]> git.ipfire.org Git - thirdparty/glibc.git/blame - malloc/malloc.c
malloc: Refactor TAG_ macros to avoid indirection
[thirdparty/glibc.git] / malloc / malloc.c
CommitLineData
56137dbc 1/* Malloc implementation for multiple threads without lock contention.
2b778ceb 2 Copyright (C) 1996-2021 Free Software Foundation, Inc.
f65fd747 3 This file is part of the GNU C Library.
fa8d436c
UD
4 Contributed by Wolfram Gloger <wg@malloc.de>
5 and Doug Lea <dl@cs.oswego.edu>, 2001.
f65fd747
UD
6
7 The GNU C Library is free software; you can redistribute it and/or
cc7375ce
RM
8 modify it under the terms of the GNU Lesser General Public License as
9 published by the Free Software Foundation; either version 2.1 of the
fa8d436c 10 License, or (at your option) any later version.
f65fd747
UD
11
12 The GNU C Library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
cc7375ce 15 Lesser General Public License for more details.
f65fd747 16
cc7375ce 17 You should have received a copy of the GNU Lesser General Public
59ba27a6 18 License along with the GNU C Library; see the file COPYING.LIB. If
5a82c748 19 not, see <https://www.gnu.org/licenses/>. */
f65fd747 20
fa8d436c
UD
21/*
22 This is a version (aka ptmalloc2) of malloc/free/realloc written by
23 Doug Lea and adapted to multiple threads/arenas by Wolfram Gloger.
24
bb2ce416 25 There have been substantial changes made after the integration into
da2d2fb6
UD
26 glibc in all parts of the code. Do not look for much commonality
27 with the ptmalloc2 version.
28
fa8d436c 29* Version ptmalloc2-20011215
fa8d436c
UD
30 based on:
31 VERSION 2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee)
f65fd747 32
fa8d436c 33* Quickstart
f65fd747 34
fa8d436c
UD
35 In order to compile this implementation, a Makefile is provided with
36 the ptmalloc2 distribution, which has pre-defined targets for some
37 popular systems (e.g. "make posix" for Posix threads). All that is
38 typically required with regard to compiler flags is the selection of
39 the thread package via defining one out of USE_PTHREADS, USE_THR or
40 USE_SPROC. Check the thread-m.h file for what effects this has.
41 Many/most systems will additionally require USE_TSD_DATA_HACK to be
42 defined, so this is the default for "make posix".
f65fd747
UD
43
44* Why use this malloc?
45
46 This is not the fastest, most space-conserving, most portable, or
47 most tunable malloc ever written. However it is among the fastest
48 while also being among the most space-conserving, portable and tunable.
49 Consistent balance across these factors results in a good general-purpose
fa8d436c
UD
50 allocator for malloc-intensive programs.
51
52 The main properties of the algorithms are:
53 * For large (>= 512 bytes) requests, it is a pure best-fit allocator,
54 with ties normally decided via FIFO (i.e. least recently used).
55 * For small (<= 64 bytes by default) requests, it is a caching
56 allocator, that maintains pools of quickly recycled chunks.
57 * In between, and for combinations of large and small requests, it does
58 the best it can trying to meet both goals at once.
59 * For very large requests (>= 128KB by default), it relies on system
60 memory mapping facilities, if supported.
61
62 For a longer but slightly out of date high-level description, see
63 http://gee.cs.oswego.edu/dl/html/malloc.html
64
65 You may already by default be using a C library containing a malloc
66 that is based on some version of this malloc (for example in
67 linux). You might still want to use the one in this file in order to
68 customize settings or to avoid overheads associated with library
69 versions.
70
71* Contents, described in more detail in "description of public routines" below.
72
73 Standard (ANSI/SVID/...) functions:
74 malloc(size_t n);
75 calloc(size_t n_elements, size_t element_size);
22a89187
UD
76 free(void* p);
77 realloc(void* p, size_t n);
fa8d436c
UD
78 memalign(size_t alignment, size_t n);
79 valloc(size_t n);
80 mallinfo()
81 mallopt(int parameter_number, int parameter_value)
82
83 Additional functions:
22a89187
UD
84 independent_calloc(size_t n_elements, size_t size, void* chunks[]);
85 independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);
fa8d436c 86 pvalloc(size_t n);
fa8d436c 87 malloc_trim(size_t pad);
22a89187 88 malloc_usable_size(void* p);
fa8d436c 89 malloc_stats();
f65fd747
UD
90
91* Vital statistics:
92
fa8d436c 93 Supported pointer representation: 4 or 8 bytes
a9177ff5 94 Supported size_t representation: 4 or 8 bytes
f65fd747 95 Note that size_t is allowed to be 4 bytes even if pointers are 8.
fa8d436c
UD
96 You can adjust this by defining INTERNAL_SIZE_T
97
98 Alignment: 2 * sizeof(size_t) (default)
99 (i.e., 8 byte alignment with 4byte size_t). This suffices for
100 nearly all current machines and C compilers. However, you can
101 define MALLOC_ALIGNMENT to be wider than this if necessary.
f65fd747 102
fa8d436c
UD
103 Minimum overhead per allocated chunk: 4 or 8 bytes
104 Each malloced chunk has a hidden word of overhead holding size
f65fd747
UD
105 and status information.
106
107 Minimum allocated size: 4-byte ptrs: 16 bytes (including 4 overhead)
72f90263 108 8-byte ptrs: 24/32 bytes (including, 4/8 overhead)
f65fd747
UD
109
110 When a chunk is freed, 12 (for 4byte ptrs) or 20 (for 8 byte
111 ptrs but 4 byte size) or 24 (for 8/8) additional bytes are
fa8d436c
UD
112 needed; 4 (8) for a trailing size field and 8 (16) bytes for
113 free list pointers. Thus, the minimum allocatable size is
114 16/24/32 bytes.
f65fd747
UD
115
116 Even a request for zero bytes (i.e., malloc(0)) returns a
117 pointer to something of the minimum allocatable size.
118
fa8d436c
UD
119 The maximum overhead wastage (i.e., number of extra bytes
120 allocated than were requested in malloc) is less than or equal
121 to the minimum size, except for requests >= mmap_threshold that
122 are serviced via mmap(), where the worst case wastage is 2 *
123 sizeof(size_t) bytes plus the remainder from a system page (the
124 minimal mmap unit); typically 4096 or 8192 bytes.
f65fd747 125
a9177ff5 126 Maximum allocated size: 4-byte size_t: 2^32 minus about two pages
72f90263 127 8-byte size_t: 2^64 minus about two pages
fa8d436c
UD
128
129 It is assumed that (possibly signed) size_t values suffice to
f65fd747
UD
130 represent chunk sizes. `Possibly signed' is due to the fact
131 that `size_t' may be defined on a system as either a signed or
fa8d436c
UD
132 an unsigned type. The ISO C standard says that it must be
133 unsigned, but a few systems are known not to adhere to this.
134 Additionally, even when size_t is unsigned, sbrk (which is by
135 default used to obtain memory from system) accepts signed
136 arguments, and may not be able to handle size_t-wide arguments
137 with negative sign bit. Generally, values that would
138 appear as negative after accounting for overhead and alignment
139 are supported only via mmap(), which does not have this
140 limitation.
141
142 Requests for sizes outside the allowed range will perform an optional
143 failure action and then return null. (Requests may also
144 also fail because a system is out of memory.)
145
22a89187 146 Thread-safety: thread-safe
fa8d436c
UD
147
148 Compliance: I believe it is compliant with the 1997 Single Unix Specification
2b0fba75 149 Also SVID/XPG, ANSI C, and probably others as well.
f65fd747
UD
150
151* Synopsis of compile-time options:
152
153 People have reported using previous versions of this malloc on all
154 versions of Unix, sometimes by tweaking some of the defines
22a89187 155 below. It has been tested most extensively on Solaris and Linux.
fa8d436c
UD
156 People also report using it in stand-alone embedded systems.
157
158 The implementation is in straight, hand-tuned ANSI C. It is not
159 at all modular. (Sorry!) It uses a lot of macros. To be at all
160 usable, this code should be compiled using an optimizing compiler
161 (for example gcc -O3) that can simplify expressions and control
162 paths. (FAQ: some macros import variables as arguments rather than
163 declare locals because people reported that some debuggers
164 otherwise get confused.)
165
166 OPTION DEFAULT VALUE
167
168 Compilation Environment options:
169
2a26ef3a 170 HAVE_MREMAP 0
fa8d436c
UD
171
172 Changing default word sizes:
173
174 INTERNAL_SIZE_T size_t
fa8d436c
UD
175
176 Configuration and functionality options:
177
fa8d436c
UD
178 USE_PUBLIC_MALLOC_WRAPPERS NOT defined
179 USE_MALLOC_LOCK NOT defined
180 MALLOC_DEBUG NOT defined
181 REALLOC_ZERO_BYTES_FREES 1
fa8d436c
UD
182 TRIM_FASTBINS 0
183
184 Options for customizing MORECORE:
185
186 MORECORE sbrk
187 MORECORE_FAILURE -1
a9177ff5 188 MORECORE_CONTIGUOUS 1
fa8d436c
UD
189 MORECORE_CANNOT_TRIM NOT defined
190 MORECORE_CLEARS 1
a9177ff5 191 MMAP_AS_MORECORE_SIZE (1024 * 1024)
fa8d436c
UD
192
193 Tuning options that are also dynamically changeable via mallopt:
194
425ce2ed 195 DEFAULT_MXFAST 64 (for 32bit), 128 (for 64bit)
fa8d436c
UD
196 DEFAULT_TRIM_THRESHOLD 128 * 1024
197 DEFAULT_TOP_PAD 0
198 DEFAULT_MMAP_THRESHOLD 128 * 1024
199 DEFAULT_MMAP_MAX 65536
200
201 There are several other #defined constants and macros that you
202 probably don't want to touch unless you are extending or adapting malloc. */
f65fd747
UD
203
204/*
22a89187 205 void* is the pointer type that malloc should say it returns
f65fd747
UD
206*/
207
22a89187
UD
208#ifndef void
209#define void void
210#endif /*void*/
f65fd747 211
fa8d436c
UD
212#include <stddef.h> /* for size_t */
213#include <stdlib.h> /* for getenv(), abort() */
2a26ef3a 214#include <unistd.h> /* for __libc_enable_secure */
f65fd747 215
425ce2ed 216#include <atomic.h>
eb96ffb0 217#include <_itoa.h>
e404fb16 218#include <bits/wordsize.h>
425ce2ed 219#include <sys/sysinfo.h>
c56da3a3 220
02d46fc4
UD
221#include <ldsodefs.h>
222
fa8d436c 223#include <unistd.h>
fa8d436c 224#include <stdio.h> /* needed for malloc_stats */
8e58439c 225#include <errno.h>
406e7a0a 226#include <assert.h>
f65fd747 227
66274218
AJ
228#include <shlib-compat.h>
229
5d78bb43
UD
230/* For uintptr_t. */
231#include <stdint.h>
f65fd747 232
3e030bd5
UD
233/* For va_arg, va_start, va_end. */
234#include <stdarg.h>
235
070906ff
RM
236/* For MIN, MAX, powerof2. */
237#include <sys/param.h>
238
ca6be165 239/* For ALIGN_UP et. al. */
9090848d 240#include <libc-pointer-arith.h>
8a35c3fe 241
d5c3fafc
DD
242/* For DIAG_PUSH/POP_NEEDS_COMMENT et al. */
243#include <libc-diag.h>
244
3784dfc0
RE
245/* For memory tagging. */
246#include <libc-mtag.h>
247
29d79486 248#include <malloc/malloc-internal.h>
c0f62c56 249
6d43de4b
WD
250/* For SINGLE_THREAD_P. */
251#include <sysdep-cancel.h>
252
29a4db29
FW
253#include <libc-internal.h>
254
fa8d436c
UD
255/*
256 Debugging:
257
258 Because freed chunks may be overwritten with bookkeeping fields, this
259 malloc will often die when freed memory is overwritten by user
260 programs. This can be very effective (albeit in an annoying way)
261 in helping track down dangling pointers.
262
263 If you compile with -DMALLOC_DEBUG, a number of assertion checks are
264 enabled that will catch more memory errors. You probably won't be
265 able to make much sense of the actual assertion errors, but they
266 should help you locate incorrectly overwritten memory. The checking
267 is fairly extensive, and will slow down execution
268 noticeably. Calling malloc_stats or mallinfo with MALLOC_DEBUG set
269 will attempt to check every non-mmapped allocated and free chunk in
270 the course of computing the summmaries. (By nature, mmapped regions
271 cannot be checked very much automatically.)
272
273 Setting MALLOC_DEBUG may also be helpful if you are trying to modify
274 this code. The assertions in the check routines spell out in more
275 detail the assumptions and invariants underlying the algorithms.
276
277 Setting MALLOC_DEBUG does NOT provide an automated mechanism for
278 checking that all accesses to malloced memory stay within their
279 bounds. However, there are several add-ons and adaptations of this
280 or other mallocs available that do this.
f65fd747
UD
281*/
282
439bda32
WN
283#ifndef MALLOC_DEBUG
284#define MALLOC_DEBUG 0
285#endif
286
406e7a0a
ST
287#ifndef NDEBUG
288# define __assert_fail(assertion, file, line, function) \
289 __malloc_assert(assertion, file, line, function)
72f90263
UD
290
291extern const char *__progname;
292
293static void
294__malloc_assert (const char *assertion, const char *file, unsigned int line,
295 const char *function)
296{
297 (void) __fxprintf (NULL, "%s%s%s:%u: %s%sAssertion `%s' failed.\n",
298 __progname, __progname[0] ? ": " : "",
299 file, line,
300 function ? function : "", function ? ": " : "",
301 assertion);
302 fflush (stderr);
303 abort ();
304}
305#endif
f65fd747 306
d5c3fafc
DD
307#if USE_TCACHE
308/* We want 64 entries. This is an arbitrary limit, which tunables can reduce. */
309# define TCACHE_MAX_BINS 64
310# define MAX_TCACHE_SIZE tidx2usize (TCACHE_MAX_BINS-1)
311
312/* Only used to pre-fill the tunables. */
313# define tidx2usize(idx) (((size_t) idx) * MALLOC_ALIGNMENT + MINSIZE - SIZE_SZ)
314
315/* When "x" is from chunksize(). */
316# define csize2tidx(x) (((x) - MINSIZE + MALLOC_ALIGNMENT - 1) / MALLOC_ALIGNMENT)
317/* When "x" is a user-provided size. */
318# define usize2tidx(x) csize2tidx (request2size (x))
319
320/* With rounding and alignment, the bins are...
321 idx 0 bytes 0..24 (64-bit) or 0..12 (32-bit)
322 idx 1 bytes 25..40 or 13..20
323 idx 2 bytes 41..56 or 21..28
324 etc. */
325
326/* This is another arbitrary limit, which tunables can change. Each
327 tcache bin will hold at most this number of chunks. */
328# define TCACHE_FILL_COUNT 7
1f50f2ad
WD
329
330/* Maximum chunks in tcache bins for tunables. This value must fit the range
331 of tcache->counts[] entries, else they may overflow. */
332# define MAX_TCACHE_COUNT UINT16_MAX
d5c3fafc
DD
333#endif
334
a1a486d7
EI
335/* Safe-Linking:
336 Use randomness from ASLR (mmap_base) to protect single-linked lists
337 of Fast-Bins and TCache. That is, mask the "next" pointers of the
338 lists' chunks, and also perform allocation alignment checks on them.
339 This mechanism reduces the risk of pointer hijacking, as was done with
340 Safe-Unlinking in the double-linked lists of Small-Bins.
341 It assumes a minimum page size of 4096 bytes (12 bits). Systems with
342 larger pages provide less entropy, although the pointer mangling
343 still works. */
344#define PROTECT_PTR(pos, ptr) \
345 ((__typeof (ptr)) ((((size_t) pos) >> 12) ^ ((size_t) ptr)))
346#define REVEAL_PTR(ptr) PROTECT_PTR (&ptr, ptr)
f65fd747 347
fa8d436c
UD
348/*
349 REALLOC_ZERO_BYTES_FREES should be set if a call to
350 realloc with zero bytes should be the same as a call to free.
351 This is required by the C standard. Otherwise, since this malloc
352 returns a unique pointer for malloc(0), so does realloc(p, 0).
353*/
354
355#ifndef REALLOC_ZERO_BYTES_FREES
356#define REALLOC_ZERO_BYTES_FREES 1
357#endif
358
359/*
360 TRIM_FASTBINS controls whether free() of a very small chunk can
361 immediately lead to trimming. Setting to true (1) can reduce memory
362 footprint, but will almost always slow down programs that use a lot
363 of small chunks.
364
365 Define this only if you are willing to give up some speed to more
366 aggressively reduce system-level memory footprint when releasing
367 memory in programs that use many small chunks. You can get
368 essentially the same effect by setting MXFAST to 0, but this can
369 lead to even greater slowdowns in programs using many small chunks.
370 TRIM_FASTBINS is an in-between compile-time option, that disables
371 only those chunks bordering topmost memory from being placed in
372 fastbins.
373*/
374
375#ifndef TRIM_FASTBINS
376#define TRIM_FASTBINS 0
377#endif
378
379
3b49edc0 380/* Definition for getting more memory from the OS. */
fa8d436c
UD
381#define MORECORE (*__morecore)
382#define MORECORE_FAILURE 0
22a89187
UD
383void * __default_morecore (ptrdiff_t);
384void *(*__morecore)(ptrdiff_t) = __default_morecore;
f65fd747 385
3784dfc0
RE
386/* Memory tagging. */
387
388/* Some systems support the concept of tagging (sometimes known as
389 coloring) memory locations on a fine grained basis. Each memory
390 location is given a color (normally allocated randomly) and
391 pointers are also colored. When the pointer is dereferenced, the
392 pointer's color is checked against the memory's color and if they
393 differ the access is faulted (sometimes lazily).
394
395 We use this in glibc by maintaining a single color for the malloc
396 data structures that are interleaved with the user data and then
397 assigning separate colors for each block allocation handed out. In
398 this way simple buffer overruns will be rapidly detected. When
399 memory is freed, the memory is recolored back to the glibc default
400 so that simple use-after-free errors can also be detected.
401
402 If memory is reallocated the buffer is recolored even if the
403 address remains the same. This has a performance impact, but
404 guarantees that the old pointer cannot mistakenly be reused (code
405 that compares old against new will see a mismatch and will then
406 need to behave as though realloc moved the data to a new location).
407
408 Internal API for memory tagging support.
409
410 The aim is to keep the code for memory tagging support as close to
411 the normal APIs in glibc as possible, so that if tagging is not
412 enabled in the library, or is disabled at runtime then standard
413 operations can continue to be used. Support macros are used to do
414 this:
415
0c719cf4 416 void *tag_new_memset (void *ptr, int, val, size_t size)
3784dfc0
RE
417
418 Has the same interface as memset(), but additionally allocates a
419 new tag, colors the memory with that tag and returns a pointer that
420 is correctly colored for that location. The non-tagging version
421 will simply call memset.
422
0c719cf4 423 void *tag_region (void *ptr, size_t size)
3784dfc0
RE
424
425 Color the region of memory pointed to by PTR and size SIZE with
426 the color of PTR. Returns the original pointer.
427
0c719cf4 428 void *tag_new_usable (void *ptr)
3784dfc0
RE
429
430 Allocate a new random color and use it to color the user region of
431 a chunk; this may include data from the subsequent chunk's header
432 if tagging is sufficiently fine grained. Returns PTR suitably
433 recolored for accessing the memory there.
434
0c719cf4 435 void *tag_at (void *ptr)
3784dfc0
RE
436
437 Read the current color of the memory at the address pointed to by
438 PTR (ignoring it's current color) and return PTR recolored to that
439 color. PTR must be valid address in all other respects. When
440 tagging is not enabled, it simply returns the original pointer.
441*/
442
443#ifdef USE_MTAG
444
445/* Default implementaions when memory tagging is supported, but disabled. */
446static void *
447__default_tag_region (void *ptr, size_t size)
448{
449 return ptr;
450}
451
452static void *
453__default_tag_nop (void *ptr)
454{
455 return ptr;
456}
457
0c719cf4
SN
458static int mtag_mmap_flags = 0;
459static size_t mtag_granule_mask = ~(size_t)0;
3784dfc0 460
0c719cf4
SN
461static void *(*tag_new_memset)(void *, int, size_t) = memset;
462static void *(*tag_region)(void *, size_t) = __default_tag_region;
463static void *(*tag_new_usable)(void *) = __default_tag_nop;
464static void *(*tag_at)(void *) = __default_tag_nop;
3784dfc0 465
3784dfc0 466#else
0c719cf4
SN
467# define mtag_mmap_flags 0
468# define tag_new_memset(ptr, val, size) memset (ptr, val, size)
469# define tag_region(ptr, size) (ptr)
470# define tag_new_usable(ptr) (ptr)
471# define tag_at(ptr) (ptr)
3784dfc0 472#endif
f65fd747 473
22a89187 474#include <string.h>
f65fd747 475
fa8d436c
UD
476/*
477 MORECORE-related declarations. By default, rely on sbrk
478*/
09f5e163 479
f65fd747 480
fa8d436c
UD
481/*
482 MORECORE is the name of the routine to call to obtain more memory
483 from the system. See below for general guidance on writing
484 alternative MORECORE functions, as well as a version for WIN32 and a
485 sample version for pre-OSX macos.
486*/
f65fd747 487
fa8d436c
UD
488#ifndef MORECORE
489#define MORECORE sbrk
490#endif
f65fd747 491
fa8d436c
UD
492/*
493 MORECORE_FAILURE is the value returned upon failure of MORECORE
494 as well as mmap. Since it cannot be an otherwise valid memory address,
495 and must reflect values of standard sys calls, you probably ought not
496 try to redefine it.
497*/
09f5e163 498
fa8d436c
UD
499#ifndef MORECORE_FAILURE
500#define MORECORE_FAILURE (-1)
501#endif
502
503/*
504 If MORECORE_CONTIGUOUS is true, take advantage of fact that
505 consecutive calls to MORECORE with positive arguments always return
506 contiguous increasing addresses. This is true of unix sbrk. Even
507 if not defined, when regions happen to be contiguous, malloc will
508 permit allocations spanning regions obtained from different
509 calls. But defining this when applicable enables some stronger
510 consistency checks and space efficiencies.
511*/
f65fd747 512
fa8d436c
UD
513#ifndef MORECORE_CONTIGUOUS
514#define MORECORE_CONTIGUOUS 1
f65fd747
UD
515#endif
516
fa8d436c
UD
517/*
518 Define MORECORE_CANNOT_TRIM if your version of MORECORE
519 cannot release space back to the system when given negative
520 arguments. This is generally necessary only if you are using
521 a hand-crafted MORECORE function that cannot handle negative arguments.
522*/
523
524/* #define MORECORE_CANNOT_TRIM */
f65fd747 525
fa8d436c
UD
526/* MORECORE_CLEARS (default 1)
527 The degree to which the routine mapped to MORECORE zeroes out
528 memory: never (0), only for newly allocated space (1) or always
529 (2). The distinction between (1) and (2) is necessary because on
530 some systems, if the application first decrements and then
531 increments the break value, the contents of the reallocated space
532 are unspecified.
6c8dbf00 533 */
fa8d436c
UD
534
535#ifndef MORECORE_CLEARS
6c8dbf00 536# define MORECORE_CLEARS 1
7cabd57c
UD
537#endif
538
fa8d436c 539
a9177ff5 540/*
fa8d436c 541 MMAP_AS_MORECORE_SIZE is the minimum mmap size argument to use if
22a89187
UD
542 sbrk fails, and mmap is used as a backup. The value must be a
543 multiple of page size. This backup strategy generally applies only
544 when systems have "holes" in address space, so sbrk cannot perform
545 contiguous expansion, but there is still space available on system.
546 On systems for which this is known to be useful (i.e. most linux
547 kernels), this occurs only when programs allocate huge amounts of
548 memory. Between this, and the fact that mmap regions tend to be
549 limited, the size should be large, to avoid too many mmap calls and
550 thus avoid running out of kernel resources. */
fa8d436c
UD
551
552#ifndef MMAP_AS_MORECORE_SIZE
553#define MMAP_AS_MORECORE_SIZE (1024 * 1024)
f65fd747
UD
554#endif
555
556/*
557 Define HAVE_MREMAP to make realloc() use mremap() to re-allocate
2a26ef3a 558 large blocks.
f65fd747
UD
559*/
560
561#ifndef HAVE_MREMAP
fa8d436c 562#define HAVE_MREMAP 0
f65fd747
UD
563#endif
564
2ba3cfa1
FW
565/* We may need to support __malloc_initialize_hook for backwards
566 compatibility. */
567
568#if SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_24)
569# define HAVE_MALLOC_INIT_HOOK 1
570#else
571# define HAVE_MALLOC_INIT_HOOK 0
572#endif
573
f65fd747 574
f65fd747 575/*
f65fd747 576 This version of malloc supports the standard SVID/XPG mallinfo
fa8d436c
UD
577 routine that returns a struct containing usage properties and
578 statistics. It should work on any SVID/XPG compliant system that has
579 a /usr/include/malloc.h defining struct mallinfo. (If you'd like to
580 install such a thing yourself, cut out the preliminary declarations
581 as described above and below and save them in a malloc.h file. But
582 there's no compelling reason to bother to do this.)
f65fd747
UD
583
584 The main declaration needed is the mallinfo struct that is returned
585 (by-copy) by mallinfo(). The SVID/XPG malloinfo struct contains a
fa8d436c
UD
586 bunch of fields that are not even meaningful in this version of
587 malloc. These fields are are instead filled by mallinfo() with
588 other numbers that might be of interest.
f65fd747
UD
589*/
590
f65fd747 591
fa8d436c 592/* ---------- description of public routines ------------ */
f65fd747
UD
593
594/*
fa8d436c
UD
595 malloc(size_t n)
596 Returns a pointer to a newly allocated chunk of at least n bytes, or null
597 if no space is available. Additionally, on failure, errno is
598 set to ENOMEM on ANSI C systems.
599
862897d2 600 If n is zero, malloc returns a minimum-sized chunk. (The minimum
fa8d436c
UD
601 size is 16 bytes on most 32bit systems, and 24 or 32 bytes on 64bit
602 systems.) On most systems, size_t is an unsigned type, so calls
603 with negative arguments are interpreted as requests for huge amounts
604 of space, which will often fail. The maximum supported value of n
605 differs across systems, but is in all cases less than the maximum
606 representable value of a size_t.
f65fd747 607*/
3b49edc0
UD
608void* __libc_malloc(size_t);
609libc_hidden_proto (__libc_malloc)
f65fd747 610
fa8d436c 611/*
22a89187 612 free(void* p)
fa8d436c
UD
613 Releases the chunk of memory pointed to by p, that had been previously
614 allocated using malloc or a related routine such as realloc.
615 It has no effect if p is null. It can have arbitrary (i.e., bad!)
616 effects if p has already been freed.
617
618 Unless disabled (using mallopt), freeing very large spaces will
619 when possible, automatically trigger operations that give
620 back unused memory to the system, thus reducing program footprint.
621*/
3b49edc0
UD
622void __libc_free(void*);
623libc_hidden_proto (__libc_free)
f65fd747 624
fa8d436c
UD
625/*
626 calloc(size_t n_elements, size_t element_size);
627 Returns a pointer to n_elements * element_size bytes, with all locations
628 set to zero.
629*/
3b49edc0 630void* __libc_calloc(size_t, size_t);
f65fd747
UD
631
632/*
22a89187 633 realloc(void* p, size_t n)
fa8d436c
UD
634 Returns a pointer to a chunk of size n that contains the same data
635 as does chunk p up to the minimum of (n, p's size) bytes, or null
a9177ff5 636 if no space is available.
f65fd747 637
fa8d436c
UD
638 The returned pointer may or may not be the same as p. The algorithm
639 prefers extending p when possible, otherwise it employs the
640 equivalent of a malloc-copy-free sequence.
f65fd747 641
a9177ff5 642 If p is null, realloc is equivalent to malloc.
f65fd747 643
fa8d436c
UD
644 If space is not available, realloc returns null, errno is set (if on
645 ANSI) and p is NOT freed.
f65fd747 646
fa8d436c
UD
647 if n is for fewer bytes than already held by p, the newly unused
648 space is lopped off and freed if possible. Unless the #define
649 REALLOC_ZERO_BYTES_FREES is set, realloc with a size argument of
650 zero (re)allocates a minimum-sized chunk.
f65fd747 651
3b5f801d
DD
652 Large chunks that were internally obtained via mmap will always be
653 grown using malloc-copy-free sequences unless the system supports
654 MREMAP (currently only linux).
f65fd747 655
fa8d436c
UD
656 The old unix realloc convention of allowing the last-free'd chunk
657 to be used as an argument to realloc is not supported.
f65fd747 658*/
3b49edc0
UD
659void* __libc_realloc(void*, size_t);
660libc_hidden_proto (__libc_realloc)
f65fd747 661
fa8d436c
UD
662/*
663 memalign(size_t alignment, size_t n);
664 Returns a pointer to a newly allocated chunk of n bytes, aligned
665 in accord with the alignment argument.
666
667 The alignment argument should be a power of two. If the argument is
668 not a power of two, the nearest greater power is used.
669 8-byte alignment is guaranteed by normal malloc calls, so don't
670 bother calling memalign with an argument of 8 or less.
671
672 Overreliance on memalign is a sure way to fragment space.
673*/
3b49edc0
UD
674void* __libc_memalign(size_t, size_t);
675libc_hidden_proto (__libc_memalign)
f65fd747
UD
676
677/*
fa8d436c
UD
678 valloc(size_t n);
679 Equivalent to memalign(pagesize, n), where pagesize is the page
680 size of the system. If the pagesize is unknown, 4096 is used.
681*/
3b49edc0 682void* __libc_valloc(size_t);
fa8d436c 683
f65fd747 684
f65fd747 685
fa8d436c
UD
686/*
687 mallopt(int parameter_number, int parameter_value)
688 Sets tunable parameters The format is to provide a
689 (parameter-number, parameter-value) pair. mallopt then sets the
690 corresponding parameter to the argument value if it can (i.e., so
691 long as the value is meaningful), and returns 1 if successful else
692 0. SVID/XPG/ANSI defines four standard param numbers for mallopt,
693 normally defined in malloc.h. Only one of these (M_MXFAST) is used
694 in this malloc. The others (M_NLBLKS, M_GRAIN, M_KEEP) don't apply,
695 so setting them has no effect. But this malloc also supports four
696 other options in mallopt. See below for details. Briefly, supported
697 parameters are as follows (listed defaults are for "typical"
698 configurations).
699
700 Symbol param # default allowed param values
701 M_MXFAST 1 64 0-80 (0 disables fastbins)
702 M_TRIM_THRESHOLD -1 128*1024 any (-1U disables trimming)
a9177ff5 703 M_TOP_PAD -2 0 any
fa8d436c
UD
704 M_MMAP_THRESHOLD -3 128*1024 any (or 0 if no MMAP support)
705 M_MMAP_MAX -4 65536 any (0 disables use of mmap)
706*/
3b49edc0
UD
707int __libc_mallopt(int, int);
708libc_hidden_proto (__libc_mallopt)
fa8d436c
UD
709
710
711/*
712 mallinfo()
713 Returns (by copy) a struct containing various summary statistics:
714
a9177ff5
RM
715 arena: current total non-mmapped bytes allocated from system
716 ordblks: the number of free chunks
fa8d436c 717 smblks: the number of fastbin blocks (i.e., small chunks that
72f90263 718 have been freed but not use resused or consolidated)
a9177ff5
RM
719 hblks: current number of mmapped regions
720 hblkhd: total bytes held in mmapped regions
ca135f82 721 usmblks: always 0
a9177ff5 722 fsmblks: total bytes held in fastbin blocks
fa8d436c 723 uordblks: current total allocated space (normal or mmapped)
a9177ff5 724 fordblks: total free space
fa8d436c 725 keepcost: the maximum number of bytes that could ideally be released
72f90263
UD
726 back to system via malloc_trim. ("ideally" means that
727 it ignores page restrictions etc.)
fa8d436c
UD
728
729 Because these fields are ints, but internal bookkeeping may
a9177ff5 730 be kept as longs, the reported values may wrap around zero and
fa8d436c
UD
731 thus be inaccurate.
732*/
e3960d1c 733struct mallinfo2 __libc_mallinfo2(void);
cdf64542 734libc_hidden_proto (__libc_mallinfo2)
e3960d1c 735
3b49edc0 736struct mallinfo __libc_mallinfo(void);
88764ae2 737
f65fd747 738
fa8d436c
UD
739/*
740 pvalloc(size_t n);
741 Equivalent to valloc(minimum-page-that-holds(n)), that is,
742 round up n to nearest pagesize.
743 */
3b49edc0 744void* __libc_pvalloc(size_t);
fa8d436c
UD
745
746/*
747 malloc_trim(size_t pad);
748
749 If possible, gives memory back to the system (via negative
750 arguments to sbrk) if there is unused memory at the `high' end of
751 the malloc pool. You can call this after freeing large blocks of
752 memory to potentially reduce the system-level memory requirements
753 of a program. However, it cannot guarantee to reduce memory. Under
754 some allocation patterns, some large free blocks of memory will be
755 locked between two used chunks, so they cannot be given back to
756 the system.
a9177ff5 757
fa8d436c
UD
758 The `pad' argument to malloc_trim represents the amount of free
759 trailing space to leave untrimmed. If this argument is zero,
760 only the minimum amount of memory to maintain internal data
761 structures will be left (one page or less). Non-zero arguments
762 can be supplied to maintain enough trailing space to service
763 future expected allocations without having to re-obtain memory
764 from the system.
a9177ff5 765
fa8d436c
UD
766 Malloc_trim returns 1 if it actually released any memory, else 0.
767 On systems that do not support "negative sbrks", it will always
c958a6a4 768 return 0.
fa8d436c 769*/
3b49edc0 770int __malloc_trim(size_t);
fa8d436c
UD
771
772/*
22a89187 773 malloc_usable_size(void* p);
fa8d436c
UD
774
775 Returns the number of bytes you can actually use in
776 an allocated chunk, which may be more than you requested (although
777 often not) due to alignment and minimum size constraints.
778 You can use this many bytes without worrying about
779 overwriting other allocated objects. This is not a particularly great
780 programming practice. malloc_usable_size can be more useful in
781 debugging and assertions, for example:
782
783 p = malloc(n);
784 assert(malloc_usable_size(p) >= 256);
785
786*/
3b49edc0 787size_t __malloc_usable_size(void*);
fa8d436c
UD
788
789/*
790 malloc_stats();
791 Prints on stderr the amount of space obtained from the system (both
792 via sbrk and mmap), the maximum amount (which may be more than
793 current if malloc_trim and/or munmap got called), and the current
794 number of bytes allocated via malloc (or realloc, etc) but not yet
795 freed. Note that this is the number of bytes allocated, not the
796 number requested. It will be larger than the number requested
797 because of alignment and bookkeeping overhead. Because it includes
798 alignment wastage as being in use, this figure may be greater than
799 zero even when no user-level chunks are allocated.
800
801 The reported current and maximum system memory can be inaccurate if
802 a program makes other calls to system memory allocation functions
803 (normally sbrk) outside of malloc.
804
805 malloc_stats prints only the most commonly interesting statistics.
806 More information can be obtained by calling mallinfo.
807
808*/
3b49edc0 809void __malloc_stats(void);
f65fd747 810
f7ddf3d3
UD
811/*
812 posix_memalign(void **memptr, size_t alignment, size_t size);
813
814 POSIX wrapper like memalign(), checking for validity of size.
815*/
816int __posix_memalign(void **, size_t, size_t);
f7ddf3d3 817
fa8d436c
UD
818/* mallopt tuning options */
819
f65fd747 820/*
fa8d436c
UD
821 M_MXFAST is the maximum request size used for "fastbins", special bins
822 that hold returned chunks without consolidating their spaces. This
823 enables future requests for chunks of the same size to be handled
824 very quickly, but can increase fragmentation, and thus increase the
825 overall memory footprint of a program.
826
827 This malloc manages fastbins very conservatively yet still
828 efficiently, so fragmentation is rarely a problem for values less
829 than or equal to the default. The maximum supported value of MXFAST
830 is 80. You wouldn't want it any higher than this anyway. Fastbins
831 are designed especially for use with many small structs, objects or
832 strings -- the default handles structs/objects/arrays with sizes up
833 to 8 4byte fields, or small strings representing words, tokens,
834 etc. Using fastbins for larger objects normally worsens
835 fragmentation without improving speed.
836
837 M_MXFAST is set in REQUEST size units. It is internally used in
838 chunksize units, which adds padding and alignment. You can reduce
839 M_MXFAST to 0 to disable all use of fastbins. This causes the malloc
840 algorithm to be a closer approximation of fifo-best-fit in all cases,
841 not just for larger requests, but will generally cause it to be
842 slower.
f65fd747
UD
843*/
844
845
fa8d436c
UD
846/* M_MXFAST is a standard SVID/XPG tuning option, usually listed in malloc.h */
847#ifndef M_MXFAST
a9177ff5 848#define M_MXFAST 1
fa8d436c 849#endif
f65fd747 850
fa8d436c 851#ifndef DEFAULT_MXFAST
425ce2ed 852#define DEFAULT_MXFAST (64 * SIZE_SZ / 4)
10dc2a90
UD
853#endif
854
10dc2a90 855
fa8d436c
UD
856/*
857 M_TRIM_THRESHOLD is the maximum amount of unused top-most memory
858 to keep before releasing via malloc_trim in free().
859
860 Automatic trimming is mainly useful in long-lived programs.
861 Because trimming via sbrk can be slow on some systems, and can
862 sometimes be wasteful (in cases where programs immediately
863 afterward allocate more large chunks) the value should be high
864 enough so that your overall system performance would improve by
865 releasing this much memory.
866
867 The trim threshold and the mmap control parameters (see below)
868 can be traded off with one another. Trimming and mmapping are
869 two different ways of releasing unused memory back to the
870 system. Between these two, it is often possible to keep
871 system-level demands of a long-lived program down to a bare
872 minimum. For example, in one test suite of sessions measuring
873 the XF86 X server on Linux, using a trim threshold of 128K and a
874 mmap threshold of 192K led to near-minimal long term resource
875 consumption.
876
877 If you are using this malloc in a long-lived program, it should
878 pay to experiment with these values. As a rough guide, you
879 might set to a value close to the average size of a process
880 (program) running on your system. Releasing this much memory
881 would allow such a process to run in memory. Generally, it's
882 worth it to tune for trimming rather tham memory mapping when a
883 program undergoes phases where several large chunks are
884 allocated and released in ways that can reuse each other's
885 storage, perhaps mixed with phases where there are no such
886 chunks at all. And in well-behaved long-lived programs,
887 controlling release of large blocks via trimming versus mapping
888 is usually faster.
889
890 However, in most programs, these parameters serve mainly as
891 protection against the system-level effects of carrying around
892 massive amounts of unneeded memory. Since frequent calls to
893 sbrk, mmap, and munmap otherwise degrade performance, the default
894 parameters are set to relatively high values that serve only as
895 safeguards.
896
897 The trim value It must be greater than page size to have any useful
a9177ff5 898 effect. To disable trimming completely, you can set to
fa8d436c
UD
899 (unsigned long)(-1)
900
901 Trim settings interact with fastbin (MXFAST) settings: Unless
902 TRIM_FASTBINS is defined, automatic trimming never takes place upon
903 freeing a chunk with size less than or equal to MXFAST. Trimming is
904 instead delayed until subsequent freeing of larger chunks. However,
905 you can still force an attempted trim by calling malloc_trim.
906
907 Also, trimming is not generally possible in cases where
908 the main arena is obtained via mmap.
909
910 Note that the trick some people use of mallocing a huge space and
911 then freeing it at program startup, in an attempt to reserve system
912 memory, doesn't have the intended effect under automatic trimming,
913 since that memory will immediately be returned to the system.
914*/
915
916#define M_TRIM_THRESHOLD -1
917
918#ifndef DEFAULT_TRIM_THRESHOLD
919#define DEFAULT_TRIM_THRESHOLD (128 * 1024)
920#endif
921
922/*
923 M_TOP_PAD is the amount of extra `padding' space to allocate or
924 retain whenever sbrk is called. It is used in two ways internally:
925
926 * When sbrk is called to extend the top of the arena to satisfy
927 a new malloc request, this much padding is added to the sbrk
928 request.
929
930 * When malloc_trim is called automatically from free(),
931 it is used as the `pad' argument.
932
933 In both cases, the actual amount of padding is rounded
934 so that the end of the arena is always a system page boundary.
935
936 The main reason for using padding is to avoid calling sbrk so
937 often. Having even a small pad greatly reduces the likelihood
938 that nearly every malloc request during program start-up (or
939 after trimming) will invoke sbrk, which needlessly wastes
940 time.
941
942 Automatic rounding-up to page-size units is normally sufficient
943 to avoid measurable overhead, so the default is 0. However, in
944 systems where sbrk is relatively slow, it can pay to increase
945 this value, at the expense of carrying around more memory than
946 the program needs.
947*/
10dc2a90 948
fa8d436c 949#define M_TOP_PAD -2
10dc2a90 950
fa8d436c
UD
951#ifndef DEFAULT_TOP_PAD
952#define DEFAULT_TOP_PAD (0)
953#endif
f65fd747 954
1d05c2fb
UD
955/*
956 MMAP_THRESHOLD_MAX and _MIN are the bounds on the dynamically
957 adjusted MMAP_THRESHOLD.
958*/
959
960#ifndef DEFAULT_MMAP_THRESHOLD_MIN
961#define DEFAULT_MMAP_THRESHOLD_MIN (128 * 1024)
962#endif
963
964#ifndef DEFAULT_MMAP_THRESHOLD_MAX
e404fb16
UD
965 /* For 32-bit platforms we cannot increase the maximum mmap
966 threshold much because it is also the minimum value for the
bd2c2341
UD
967 maximum heap size and its alignment. Going above 512k (i.e., 1M
968 for new heaps) wastes too much address space. */
e404fb16 969# if __WORDSIZE == 32
bd2c2341 970# define DEFAULT_MMAP_THRESHOLD_MAX (512 * 1024)
e404fb16 971# else
bd2c2341 972# define DEFAULT_MMAP_THRESHOLD_MAX (4 * 1024 * 1024 * sizeof(long))
e404fb16 973# endif
1d05c2fb
UD
974#endif
975
fa8d436c
UD
976/*
977 M_MMAP_THRESHOLD is the request size threshold for using mmap()
978 to service a request. Requests of at least this size that cannot
979 be allocated using already-existing space will be serviced via mmap.
980 (If enough normal freed space already exists it is used instead.)
981
982 Using mmap segregates relatively large chunks of memory so that
983 they can be individually obtained and released from the host
984 system. A request serviced through mmap is never reused by any
985 other request (at least not directly; the system may just so
986 happen to remap successive requests to the same locations).
987
988 Segregating space in this way has the benefits that:
989
a9177ff5
RM
990 1. Mmapped space can ALWAYS be individually released back
991 to the system, which helps keep the system level memory
992 demands of a long-lived program low.
fa8d436c
UD
993 2. Mapped memory can never become `locked' between
994 other chunks, as can happen with normally allocated chunks, which
995 means that even trimming via malloc_trim would not release them.
996 3. On some systems with "holes" in address spaces, mmap can obtain
997 memory that sbrk cannot.
998
999 However, it has the disadvantages that:
1000
1001 1. The space cannot be reclaimed, consolidated, and then
1002 used to service later requests, as happens with normal chunks.
1003 2. It can lead to more wastage because of mmap page alignment
1004 requirements
1005 3. It causes malloc performance to be more dependent on host
1006 system memory management support routines which may vary in
1007 implementation quality and may impose arbitrary
1008 limitations. Generally, servicing a request via normal
1009 malloc steps is faster than going through a system's mmap.
1010
1011 The advantages of mmap nearly always outweigh disadvantages for
1012 "large" chunks, but the value of "large" varies across systems. The
1013 default is an empirically derived value that works well in most
1014 systems.
1d05c2fb
UD
1015
1016
1017 Update in 2006:
1018 The above was written in 2001. Since then the world has changed a lot.
1019 Memory got bigger. Applications got bigger. The virtual address space
1020 layout in 32 bit linux changed.
1021
1022 In the new situation, brk() and mmap space is shared and there are no
1023 artificial limits on brk size imposed by the kernel. What is more,
1024 applications have started using transient allocations larger than the
1025 128Kb as was imagined in 2001.
1026
1027 The price for mmap is also high now; each time glibc mmaps from the
1028 kernel, the kernel is forced to zero out the memory it gives to the
1029 application. Zeroing memory is expensive and eats a lot of cache and
1030 memory bandwidth. This has nothing to do with the efficiency of the
1031 virtual memory system, by doing mmap the kernel just has no choice but
1032 to zero.
1033
1034 In 2001, the kernel had a maximum size for brk() which was about 800
1035 megabytes on 32 bit x86, at that point brk() would hit the first
1036 mmaped shared libaries and couldn't expand anymore. With current 2.6
1037 kernels, the VA space layout is different and brk() and mmap
1038 both can span the entire heap at will.
1039
1040 Rather than using a static threshold for the brk/mmap tradeoff,
1041 we are now using a simple dynamic one. The goal is still to avoid
1042 fragmentation. The old goals we kept are
1043 1) try to get the long lived large allocations to use mmap()
1044 2) really large allocations should always use mmap()
1045 and we're adding now:
1046 3) transient allocations should use brk() to avoid forcing the kernel
1047 having to zero memory over and over again
1048
1049 The implementation works with a sliding threshold, which is by default
1050 limited to go between 128Kb and 32Mb (64Mb for 64 bitmachines) and starts
1051 out at 128Kb as per the 2001 default.
1052
1053 This allows us to satisfy requirement 1) under the assumption that long
1054 lived allocations are made early in the process' lifespan, before it has
1055 started doing dynamic allocations of the same size (which will
1056 increase the threshold).
1057
1058 The upperbound on the threshold satisfies requirement 2)
1059
1060 The threshold goes up in value when the application frees memory that was
1061 allocated with the mmap allocator. The idea is that once the application
1062 starts freeing memory of a certain size, it's highly probable that this is
1063 a size the application uses for transient allocations. This estimator
1064 is there to satisfy the new third requirement.
1065
f65fd747
UD
1066*/
1067
fa8d436c 1068#define M_MMAP_THRESHOLD -3
f65fd747 1069
fa8d436c 1070#ifndef DEFAULT_MMAP_THRESHOLD
1d05c2fb 1071#define DEFAULT_MMAP_THRESHOLD DEFAULT_MMAP_THRESHOLD_MIN
fa8d436c
UD
1072#endif
1073
1074/*
1075 M_MMAP_MAX is the maximum number of requests to simultaneously
1076 service using mmap. This parameter exists because
1077 some systems have a limited number of internal tables for
1078 use by mmap, and using more than a few of them may degrade
1079 performance.
1080
1081 The default is set to a value that serves only as a safeguard.
22a89187 1082 Setting to 0 disables use of mmap for servicing large requests.
fa8d436c 1083*/
f65fd747 1084
fa8d436c
UD
1085#define M_MMAP_MAX -4
1086
1087#ifndef DEFAULT_MMAP_MAX
fa8d436c 1088#define DEFAULT_MMAP_MAX (65536)
f65fd747
UD
1089#endif
1090
100351c3 1091#include <malloc.h>
f65fd747 1092
fa8d436c
UD
1093#ifndef RETURN_ADDRESS
1094#define RETURN_ADDRESS(X_) (NULL)
9ae6fc54 1095#endif
431c33c0 1096
fa8d436c
UD
1097/* Forward declarations. */
1098struct malloc_chunk;
1099typedef struct malloc_chunk* mchunkptr;
431c33c0 1100
fa8d436c 1101/* Internal routines. */
f65fd747 1102
22a89187 1103static void* _int_malloc(mstate, size_t);
425ce2ed 1104static void _int_free(mstate, mchunkptr, int);
22a89187 1105static void* _int_realloc(mstate, mchunkptr, INTERNAL_SIZE_T,
6e4b2107 1106 INTERNAL_SIZE_T);
22a89187 1107static void* _int_memalign(mstate, size_t, size_t);
10ad46bc
OB
1108static void* _mid_memalign(size_t, size_t, void *);
1109
ac3ed168 1110static void malloc_printerr(const char *str) __attribute__ ((noreturn));
fa8d436c 1111
0c71122c
FW
1112static void* mem2mem_check(void *p, size_t sz);
1113static void top_check(void);
1114static void munmap_chunk(mchunkptr p);
a9177ff5 1115#if HAVE_MREMAP
0c71122c 1116static mchunkptr mremap_chunk(mchunkptr p, size_t new_size);
a9177ff5 1117#endif
fa8d436c 1118
22a89187
UD
1119static void* malloc_check(size_t sz, const void *caller);
1120static void free_check(void* mem, const void *caller);
1121static void* realloc_check(void* oldmem, size_t bytes,
1122 const void *caller);
1123static void* memalign_check(size_t alignment, size_t bytes,
1124 const void *caller);
f65fd747 1125
fa8d436c 1126/* ------------------ MMAP support ------------------ */
f65fd747 1127
f65fd747 1128
fa8d436c 1129#include <fcntl.h>
fa8d436c 1130#include <sys/mman.h>
f65fd747 1131
fa8d436c
UD
1132#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
1133# define MAP_ANONYMOUS MAP_ANON
1134#endif
f65fd747 1135
fa8d436c 1136#ifndef MAP_NORESERVE
3b49edc0 1137# define MAP_NORESERVE 0
f65fd747
UD
1138#endif
1139
fa8d436c 1140#define MMAP(addr, size, prot, flags) \
3b49edc0 1141 __mmap((addr), (size), (prot), (flags)|MAP_ANONYMOUS|MAP_PRIVATE, -1, 0)
f65fd747 1142
f65fd747
UD
1143
1144/*
fa8d436c 1145 ----------------------- Chunk representations -----------------------
f65fd747
UD
1146*/
1147
1148
fa8d436c
UD
1149/*
1150 This struct declaration is misleading (but accurate and necessary).
1151 It declares a "view" into memory allowing access to necessary
1152 fields at known offsets from a given base. See explanation below.
1153*/
1154
1155struct malloc_chunk {
1156
e9c4fe93
FW
1157 INTERNAL_SIZE_T mchunk_prev_size; /* Size of previous chunk (if free). */
1158 INTERNAL_SIZE_T mchunk_size; /* Size in bytes, including overhead. */
fa8d436c
UD
1159
1160 struct malloc_chunk* fd; /* double links -- used only if free. */
f65fd747 1161 struct malloc_chunk* bk;
7ecfbd38
UD
1162
1163 /* Only used for large blocks: pointer to next larger size. */
1164 struct malloc_chunk* fd_nextsize; /* double links -- used only if free. */
1165 struct malloc_chunk* bk_nextsize;
f65fd747
UD
1166};
1167
f65fd747
UD
1168
1169/*
f65fd747
UD
1170 malloc_chunk details:
1171
1172 (The following includes lightly edited explanations by Colin Plumb.)
1173
1174 Chunks of memory are maintained using a `boundary tag' method as
1175 described in e.g., Knuth or Standish. (See the paper by Paul
1176 Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a
1177 survey of such techniques.) Sizes of free chunks are stored both
1178 in the front of each chunk and at the end. This makes
1179 consolidating fragmented chunks into bigger chunks very fast. The
1180 size fields also hold bits representing whether chunks are free or
1181 in use.
1182
1183 An allocated chunk looks like this:
1184
1185
1186 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
ae9166f2 1187 | Size of previous chunk, if unallocated (P clear) |
72f90263 1188 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
ae9166f2 1189 | Size of chunk, in bytes |A|M|P|
f65fd747 1190 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
72f90263
UD
1191 | User data starts here... .
1192 . .
1193 . (malloc_usable_size() bytes) .
1194 . |
f65fd747 1195nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
ae9166f2
FW
1196 | (size of chunk, but used for application data) |
1197 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1198 | Size of next chunk, in bytes |A|0|1|
72f90263 1199 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
f65fd747
UD
1200
1201 Where "chunk" is the front of the chunk for the purpose of most of
1202 the malloc code, but "mem" is the pointer that is returned to the
1203 user. "Nextchunk" is the beginning of the next contiguous chunk.
1204
6f65e668 1205 Chunks always begin on even word boundaries, so the mem portion
f65fd747 1206 (which is returned to the user) is also on an even word boundary, and
fa8d436c 1207 thus at least double-word aligned.
f65fd747
UD
1208
1209 Free chunks are stored in circular doubly-linked lists, and look like this:
1210
1211 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
ae9166f2 1212 | Size of previous chunk, if unallocated (P clear) |
72f90263 1213 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
ae9166f2 1214 `head:' | Size of chunk, in bytes |A|0|P|
f65fd747 1215 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
72f90263
UD
1216 | Forward pointer to next chunk in list |
1217 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1218 | Back pointer to previous chunk in list |
1219 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1220 | Unused space (may be 0 bytes long) .
1221 . .
1222 . |
f65fd747
UD
1223nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1224 `foot:' | Size of chunk, in bytes |
72f90263 1225 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
ae9166f2
FW
1226 | Size of next chunk, in bytes |A|0|0|
1227 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
f65fd747
UD
1228
1229 The P (PREV_INUSE) bit, stored in the unused low-order bit of the
1230 chunk size (which is always a multiple of two words), is an in-use
1231 bit for the *previous* chunk. If that bit is *clear*, then the
1232 word before the current chunk size contains the previous chunk
1233 size, and can be used to find the front of the previous chunk.
fa8d436c
UD
1234 The very first chunk allocated always has this bit set,
1235 preventing access to non-existent (or non-owned) memory. If
1236 prev_inuse is set for any given chunk, then you CANNOT determine
1237 the size of the previous chunk, and might even get a memory
1238 addressing fault when trying to do so.
f65fd747 1239
ae9166f2
FW
1240 The A (NON_MAIN_ARENA) bit is cleared for chunks on the initial,
1241 main arena, described by the main_arena variable. When additional
1242 threads are spawned, each thread receives its own arena (up to a
1243 configurable limit, after which arenas are reused for multiple
1244 threads), and the chunks in these arenas have the A bit set. To
1245 find the arena for a chunk on such a non-main arena, heap_for_ptr
1246 performs a bit mask operation and indirection through the ar_ptr
1247 member of the per-heap header heap_info (see arena.c).
1248
f65fd747 1249 Note that the `foot' of the current chunk is actually represented
fa8d436c
UD
1250 as the prev_size of the NEXT chunk. This makes it easier to
1251 deal with alignments etc but can be very confusing when trying
1252 to extend or adapt this code.
f65fd747 1253
ae9166f2 1254 The three exceptions to all this are:
f65fd747 1255
fa8d436c 1256 1. The special chunk `top' doesn't bother using the
72f90263
UD
1257 trailing size field since there is no next contiguous chunk
1258 that would have to index off it. After initialization, `top'
1259 is forced to always exist. If it would become less than
1260 MINSIZE bytes long, it is replenished.
f65fd747
UD
1261
1262 2. Chunks allocated via mmap, which have the second-lowest-order
72f90263 1263 bit M (IS_MMAPPED) set in their size fields. Because they are
ae9166f2
FW
1264 allocated one-by-one, each must contain its own trailing size
1265 field. If the M bit is set, the other bits are ignored
1266 (because mmapped chunks are neither in an arena, nor adjacent
1267 to a freed chunk). The M bit is also used for chunks which
1268 originally came from a dumped heap via malloc_set_state in
1269 hooks.c.
1270
1271 3. Chunks in fastbins are treated as allocated chunks from the
1272 point of view of the chunk allocator. They are consolidated
1273 with their neighbors only in bulk, in malloc_consolidate.
f65fd747
UD
1274*/
1275
1276/*
fa8d436c
UD
1277 ---------- Size and alignment checks and conversions ----------
1278*/
f65fd747 1279
3784dfc0
RE
1280/* Conversion from malloc headers to user pointers, and back. When
1281 using memory tagging the user data and the malloc data structure
1282 headers have distinct tags. Converting fully from one to the other
1283 involves extracting the tag at the other address and creating a
1284 suitable pointer using it. That can be quite expensive. There are
1285 many occasions, though when the pointer will not be dereferenced
1286 (for example, because we only want to assert that the pointer is
1287 correctly aligned). In these cases it is more efficient not
1288 to extract the tag, since the answer will be the same either way.
1289 chunk2rawmem() can be used in these cases.
1290 */
1291
1292/* The chunk header is two SIZE_SZ elements, but this is used widely, so
1293 we define it here for clarity later. */
1294#define CHUNK_HDR_SZ (2 * SIZE_SZ)
1295
1296/* Convert a user mem pointer to a chunk address without correcting
1297 the tag. */
1298#define chunk2rawmem(p) ((void*)((char*)(p) + CHUNK_HDR_SZ))
f65fd747 1299
3784dfc0
RE
1300/* Convert between user mem pointers and chunk pointers, updating any
1301 memory tags on the pointer to respect the tag value at that
1302 location. */
0c719cf4
SN
1303#define chunk2mem(p) ((void *)tag_at (((char*)(p) + CHUNK_HDR_SZ)))
1304#define mem2chunk(mem) ((mchunkptr)tag_at (((char*)(mem) - CHUNK_HDR_SZ)))
f65fd747 1305
fa8d436c 1306/* The smallest possible chunk */
7ecfbd38 1307#define MIN_CHUNK_SIZE (offsetof(struct malloc_chunk, fd_nextsize))
f65fd747 1308
fa8d436c 1309/* The smallest size we can malloc is an aligned minimal chunk */
f65fd747 1310
fa8d436c
UD
1311#define MINSIZE \
1312 (unsigned long)(((MIN_CHUNK_SIZE+MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK))
f65fd747 1313
fa8d436c 1314/* Check if m has acceptable alignment */
f65fd747 1315
073f560e
UD
1316#define aligned_OK(m) (((unsigned long)(m) & MALLOC_ALIGN_MASK) == 0)
1317
1318#define misaligned_chunk(p) \
3784dfc0 1319 ((uintptr_t)(MALLOC_ALIGNMENT == CHUNK_HDR_SZ ? (p) : chunk2mem (p)) \
073f560e 1320 & MALLOC_ALIGN_MASK)
f65fd747 1321
fa8d436c 1322/* pad request bytes into a usable size -- internal version */
3784dfc0
RE
1323/* Note: This must be a macro that evaluates to a compile time constant
1324 if passed a literal constant. */
fa8d436c
UD
1325#define request2size(req) \
1326 (((req) + SIZE_SZ + MALLOC_ALIGN_MASK < MINSIZE) ? \
1327 MINSIZE : \
1328 ((req) + SIZE_SZ + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)
f65fd747 1329
3784dfc0
RE
1330/* Available size of chunk. This is the size of the real usable data
1331 in the chunk, plus the chunk header. */
1332#ifdef USE_MTAG
1333#define CHUNK_AVAILABLE_SIZE(p) \
1334 ((chunksize (p) + (chunk_is_mmapped (p) ? 0 : SIZE_SZ)) \
0c719cf4 1335 & mtag_granule_mask)
3784dfc0
RE
1336#else
1337#define CHUNK_AVAILABLE_SIZE(p) \
1338 (chunksize (p) + (chunk_is_mmapped (p) ? 0 : SIZE_SZ))
1339#endif
1340
9bf8e29c
AZ
1341/* Check if REQ overflows when padded and aligned and if the resulting value
1342 is less than PTRDIFF_T. Returns TRUE and the requested size or MINSIZE in
1343 case the value is less than MINSIZE on SZ or false if any of the previous
1344 check fail. */
1345static inline bool
1346checked_request2size (size_t req, size_t *sz) __nonnull (1)
1347{
1348 if (__glibc_unlikely (req > PTRDIFF_MAX))
1349 return false;
3784dfc0
RE
1350
1351#ifdef USE_MTAG
1352 /* When using tagged memory, we cannot share the end of the user
1353 block with the header for the next chunk, so ensure that we
1354 allocate blocks that are rounded up to the granule size. Take
1355 care not to overflow from close to MAX_SIZE_T to a small
1356 number. Ideally, this would be part of request2size(), but that
1357 must be a macro that produces a compile time constant if passed
1358 a constant literal. */
0c719cf4 1359 req = (req + ~mtag_granule_mask) & mtag_granule_mask;
3784dfc0
RE
1360#endif
1361
9bf8e29c
AZ
1362 *sz = request2size (req);
1363 return true;
1364}
f65fd747
UD
1365
1366/*
6c8dbf00
OB
1367 --------------- Physical chunk operations ---------------
1368 */
f65fd747 1369
10dc2a90 1370
fa8d436c
UD
1371/* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
1372#define PREV_INUSE 0x1
f65fd747 1373
fa8d436c 1374/* extract inuse bit of previous chunk */
e9c4fe93 1375#define prev_inuse(p) ((p)->mchunk_size & PREV_INUSE)
f65fd747 1376
f65fd747 1377
fa8d436c
UD
1378/* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
1379#define IS_MMAPPED 0x2
f65fd747 1380
fa8d436c 1381/* check for mmap()'ed chunk */
e9c4fe93 1382#define chunk_is_mmapped(p) ((p)->mchunk_size & IS_MMAPPED)
f65fd747 1383
f65fd747 1384
fa8d436c
UD
1385/* size field is or'ed with NON_MAIN_ARENA if the chunk was obtained
1386 from a non-main arena. This is only set immediately before handing
1387 the chunk to the user, if necessary. */
1388#define NON_MAIN_ARENA 0x4
f65fd747 1389
ae9166f2 1390/* Check for chunk from main arena. */
e9c4fe93
FW
1391#define chunk_main_arena(p) (((p)->mchunk_size & NON_MAIN_ARENA) == 0)
1392
1393/* Mark a chunk as not being on the main arena. */
1394#define set_non_main_arena(p) ((p)->mchunk_size |= NON_MAIN_ARENA)
f65fd747
UD
1395
1396
a9177ff5 1397/*
6c8dbf00 1398 Bits to mask off when extracting size
f65fd747 1399
6c8dbf00
OB
1400 Note: IS_MMAPPED is intentionally not masked off from size field in
1401 macros for which mmapped chunks should never be seen. This should
1402 cause helpful core dumps to occur if it is tried by accident by
1403 people extending or adapting this malloc.
1404 */
1405#define SIZE_BITS (PREV_INUSE | IS_MMAPPED | NON_MAIN_ARENA)
f65fd747 1406
fa8d436c 1407/* Get size, ignoring use bits */
e9c4fe93 1408#define chunksize(p) (chunksize_nomask (p) & ~(SIZE_BITS))
f65fd747 1409
e9c4fe93
FW
1410/* Like chunksize, but do not mask SIZE_BITS. */
1411#define chunksize_nomask(p) ((p)->mchunk_size)
f65fd747 1412
fa8d436c 1413/* Ptr to next physical malloc_chunk. */
e9c4fe93
FW
1414#define next_chunk(p) ((mchunkptr) (((char *) (p)) + chunksize (p)))
1415
229855e5 1416/* Size of the chunk below P. Only valid if !prev_inuse (P). */
e9c4fe93
FW
1417#define prev_size(p) ((p)->mchunk_prev_size)
1418
229855e5 1419/* Set the size of the chunk below P. Only valid if !prev_inuse (P). */
e9c4fe93 1420#define set_prev_size(p, sz) ((p)->mchunk_prev_size = (sz))
f65fd747 1421
229855e5 1422/* Ptr to previous physical malloc_chunk. Only valid if !prev_inuse (P). */
e9c4fe93 1423#define prev_chunk(p) ((mchunkptr) (((char *) (p)) - prev_size (p)))
f65fd747 1424
fa8d436c 1425/* Treat space at ptr + offset as a chunk */
6c8dbf00 1426#define chunk_at_offset(p, s) ((mchunkptr) (((char *) (p)) + (s)))
fa8d436c
UD
1427
1428/* extract p's inuse bit */
6c8dbf00 1429#define inuse(p) \
e9c4fe93 1430 ((((mchunkptr) (((char *) (p)) + chunksize (p)))->mchunk_size) & PREV_INUSE)
f65fd747 1431
fa8d436c 1432/* set/clear chunk as being inuse without otherwise disturbing */
6c8dbf00 1433#define set_inuse(p) \
e9c4fe93 1434 ((mchunkptr) (((char *) (p)) + chunksize (p)))->mchunk_size |= PREV_INUSE
f65fd747 1435
6c8dbf00 1436#define clear_inuse(p) \
e9c4fe93 1437 ((mchunkptr) (((char *) (p)) + chunksize (p)))->mchunk_size &= ~(PREV_INUSE)
f65fd747
UD
1438
1439
fa8d436c 1440/* check/set/clear inuse bits in known places */
6c8dbf00 1441#define inuse_bit_at_offset(p, s) \
e9c4fe93 1442 (((mchunkptr) (((char *) (p)) + (s)))->mchunk_size & PREV_INUSE)
f65fd747 1443
6c8dbf00 1444#define set_inuse_bit_at_offset(p, s) \
e9c4fe93 1445 (((mchunkptr) (((char *) (p)) + (s)))->mchunk_size |= PREV_INUSE)
f65fd747 1446
6c8dbf00 1447#define clear_inuse_bit_at_offset(p, s) \
e9c4fe93 1448 (((mchunkptr) (((char *) (p)) + (s)))->mchunk_size &= ~(PREV_INUSE))
f65fd747 1449
f65fd747 1450
fa8d436c 1451/* Set size at head, without disturbing its use bit */
e9c4fe93 1452#define set_head_size(p, s) ((p)->mchunk_size = (((p)->mchunk_size & SIZE_BITS) | (s)))
f65fd747 1453
fa8d436c 1454/* Set size/use field */
e9c4fe93 1455#define set_head(p, s) ((p)->mchunk_size = (s))
f65fd747 1456
fa8d436c 1457/* Set size at footer (only when chunk is not in use) */
e9c4fe93 1458#define set_foot(p, s) (((mchunkptr) ((char *) (p) + (s)))->mchunk_prev_size = (s))
f65fd747 1459
e9c4fe93
FW
1460#pragma GCC poison mchunk_size
1461#pragma GCC poison mchunk_prev_size
1462
fa8d436c 1463/*
6c8dbf00 1464 -------------------- Internal data structures --------------------
fa8d436c
UD
1465
1466 All internal state is held in an instance of malloc_state defined
1467 below. There are no other static variables, except in two optional
a9177ff5 1468 cases:
6c8dbf00
OB
1469 * If USE_MALLOC_LOCK is defined, the mALLOC_MUTEx declared above.
1470 * If mmap doesn't support MAP_ANONYMOUS, a dummy file descriptor
22a89187 1471 for mmap.
fa8d436c
UD
1472
1473 Beware of lots of tricks that minimize the total bookkeeping space
1474 requirements. The result is a little over 1K bytes (for 4byte
1475 pointers and size_t.)
6c8dbf00 1476 */
f65fd747
UD
1477
1478/*
6c8dbf00 1479 Bins
fa8d436c
UD
1480
1481 An array of bin headers for free chunks. Each bin is doubly
1482 linked. The bins are approximately proportionally (log) spaced.
1483 There are a lot of these bins (128). This may look excessive, but
1484 works very well in practice. Most bins hold sizes that are
1485 unusual as malloc request sizes, but are more usual for fragments
1486 and consolidated sets of chunks, which is what these bins hold, so
1487 they can be found quickly. All procedures maintain the invariant
1488 that no consolidated chunk physically borders another one, so each
1489 chunk in a list is known to be preceeded and followed by either
1490 inuse chunks or the ends of memory.
1491
1492 Chunks in bins are kept in size order, with ties going to the
1493 approximately least recently used chunk. Ordering isn't needed
1494 for the small bins, which all contain the same-sized chunks, but
1495 facilitates best-fit allocation for larger chunks. These lists
1496 are just sequential. Keeping them in order almost never requires
1497 enough traversal to warrant using fancier ordered data
a9177ff5 1498 structures.
fa8d436c
UD
1499
1500 Chunks of the same size are linked with the most
1501 recently freed at the front, and allocations are taken from the
1502 back. This results in LRU (FIFO) allocation order, which tends
1503 to give each chunk an equal opportunity to be consolidated with
1504 adjacent freed chunks, resulting in larger free chunks and less
1505 fragmentation.
1506
1507 To simplify use in double-linked lists, each bin header acts
1508 as a malloc_chunk. This avoids special-casing for headers.
1509 But to conserve space and improve locality, we allocate
1510 only the fd/bk pointers of bins, and then use repositioning tricks
a9177ff5 1511 to treat these as the fields of a malloc_chunk*.
6c8dbf00 1512 */
f65fd747 1513
6c8dbf00 1514typedef struct malloc_chunk *mbinptr;
f65fd747 1515
fa8d436c 1516/* addressing -- note that bin_at(0) does not exist */
41999a1a
UD
1517#define bin_at(m, i) \
1518 (mbinptr) (((char *) &((m)->bins[((i) - 1) * 2])) \
6c8dbf00 1519 - offsetof (struct malloc_chunk, fd))
f65fd747 1520
fa8d436c 1521/* analog of ++bin */
6c8dbf00 1522#define next_bin(b) ((mbinptr) ((char *) (b) + (sizeof (mchunkptr) << 1)))
f65fd747 1523
fa8d436c
UD
1524/* Reminders about list directionality within bins */
1525#define first(b) ((b)->fd)
1526#define last(b) ((b)->bk)
f65fd747 1527
fa8d436c 1528/*
6c8dbf00 1529 Indexing
fa8d436c
UD
1530
1531 Bins for sizes < 512 bytes contain chunks of all the same size, spaced
1532 8 bytes apart. Larger bins are approximately logarithmically spaced:
f65fd747 1533
fa8d436c
UD
1534 64 bins of size 8
1535 32 bins of size 64
1536 16 bins of size 512
1537 8 bins of size 4096
1538 4 bins of size 32768
1539 2 bins of size 262144
1540 1 bin of size what's left
f65fd747 1541
fa8d436c
UD
1542 There is actually a little bit of slop in the numbers in bin_index
1543 for the sake of speed. This makes no difference elsewhere.
f65fd747 1544
fa8d436c
UD
1545 The bins top out around 1MB because we expect to service large
1546 requests via mmap.
b5a2bbe6
L
1547
1548 Bin 0 does not exist. Bin 1 is the unordered list; if that would be
1549 a valid chunk size the small bins are bumped up one.
6c8dbf00 1550 */
f65fd747 1551
fa8d436c
UD
1552#define NBINS 128
1553#define NSMALLBINS 64
1d47e92f 1554#define SMALLBIN_WIDTH MALLOC_ALIGNMENT
3784dfc0 1555#define SMALLBIN_CORRECTION (MALLOC_ALIGNMENT > CHUNK_HDR_SZ)
b5a2bbe6 1556#define MIN_LARGE_SIZE ((NSMALLBINS - SMALLBIN_CORRECTION) * SMALLBIN_WIDTH)
f65fd747 1557
fa8d436c 1558#define in_smallbin_range(sz) \
6c8dbf00 1559 ((unsigned long) (sz) < (unsigned long) MIN_LARGE_SIZE)
f65fd747 1560
1d47e92f 1561#define smallbin_index(sz) \
6c8dbf00 1562 ((SMALLBIN_WIDTH == 16 ? (((unsigned) (sz)) >> 4) : (((unsigned) (sz)) >> 3))\
b5a2bbe6 1563 + SMALLBIN_CORRECTION)
f65fd747 1564
1d47e92f 1565#define largebin_index_32(sz) \
6c8dbf00
OB
1566 (((((unsigned long) (sz)) >> 6) <= 38) ? 56 + (((unsigned long) (sz)) >> 6) :\
1567 ((((unsigned long) (sz)) >> 9) <= 20) ? 91 + (((unsigned long) (sz)) >> 9) :\
1568 ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\
1569 ((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\
1570 ((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\
1571 126)
f65fd747 1572
b5a2bbe6 1573#define largebin_index_32_big(sz) \
6c8dbf00
OB
1574 (((((unsigned long) (sz)) >> 6) <= 45) ? 49 + (((unsigned long) (sz)) >> 6) :\
1575 ((((unsigned long) (sz)) >> 9) <= 20) ? 91 + (((unsigned long) (sz)) >> 9) :\
1576 ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\
1577 ((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\
1578 ((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\
1579 126)
b5a2bbe6 1580
1d47e92f
UD
1581// XXX It remains to be seen whether it is good to keep the widths of
1582// XXX the buckets the same or whether it should be scaled by a factor
1583// XXX of two as well.
1584#define largebin_index_64(sz) \
6c8dbf00
OB
1585 (((((unsigned long) (sz)) >> 6) <= 48) ? 48 + (((unsigned long) (sz)) >> 6) :\
1586 ((((unsigned long) (sz)) >> 9) <= 20) ? 91 + (((unsigned long) (sz)) >> 9) :\
1587 ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\
1588 ((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\
1589 ((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\
1590 126)
1d47e92f
UD
1591
1592#define largebin_index(sz) \
b5a2bbe6
L
1593 (SIZE_SZ == 8 ? largebin_index_64 (sz) \
1594 : MALLOC_ALIGNMENT == 16 ? largebin_index_32_big (sz) \
1595 : largebin_index_32 (sz))
1d47e92f 1596
fa8d436c 1597#define bin_index(sz) \
6c8dbf00 1598 ((in_smallbin_range (sz)) ? smallbin_index (sz) : largebin_index (sz))
f65fd747 1599
1ecba1fa
FW
1600/* Take a chunk off a bin list. */
1601static void
1602unlink_chunk (mstate av, mchunkptr p)
1603{
1604 if (chunksize (p) != prev_size (next_chunk (p)))
1605 malloc_printerr ("corrupted size vs. prev_size");
1606
1607 mchunkptr fd = p->fd;
1608 mchunkptr bk = p->bk;
1609
1610 if (__builtin_expect (fd->bk != p || bk->fd != p, 0))
1611 malloc_printerr ("corrupted double-linked list");
1612
1613 fd->bk = bk;
1614 bk->fd = fd;
1615 if (!in_smallbin_range (chunksize_nomask (p)) && p->fd_nextsize != NULL)
1616 {
1617 if (p->fd_nextsize->bk_nextsize != p
1618 || p->bk_nextsize->fd_nextsize != p)
1619 malloc_printerr ("corrupted double-linked list (not small)");
1620
1621 if (fd->fd_nextsize == NULL)
1622 {
1623 if (p->fd_nextsize == p)
1624 fd->fd_nextsize = fd->bk_nextsize = fd;
1625 else
1626 {
1627 fd->fd_nextsize = p->fd_nextsize;
1628 fd->bk_nextsize = p->bk_nextsize;
1629 p->fd_nextsize->bk_nextsize = fd;
1630 p->bk_nextsize->fd_nextsize = fd;
1631 }
1632 }
1633 else
1634 {
1635 p->fd_nextsize->bk_nextsize = p->bk_nextsize;
1636 p->bk_nextsize->fd_nextsize = p->fd_nextsize;
1637 }
1638 }
1639}
f65fd747
UD
1640
1641/*
6c8dbf00 1642 Unsorted chunks
fa8d436c
UD
1643
1644 All remainders from chunk splits, as well as all returned chunks,
1645 are first placed in the "unsorted" bin. They are then placed
1646 in regular bins after malloc gives them ONE chance to be used before
1647 binning. So, basically, the unsorted_chunks list acts as a queue,
1648 with chunks being placed on it in free (and malloc_consolidate),
1649 and taken off (to be either used or placed in bins) in malloc.
1650
1651 The NON_MAIN_ARENA flag is never set for unsorted chunks, so it
1652 does not have to be taken into account in size comparisons.
6c8dbf00 1653 */
f65fd747 1654
fa8d436c 1655/* The otherwise unindexable 1-bin is used to hold unsorted chunks. */
6c8dbf00 1656#define unsorted_chunks(M) (bin_at (M, 1))
f65fd747 1657
fa8d436c 1658/*
6c8dbf00 1659 Top
fa8d436c
UD
1660
1661 The top-most available chunk (i.e., the one bordering the end of
1662 available memory) is treated specially. It is never included in
1663 any bin, is used only if no other chunk is available, and is
1664 released back to the system if it is very large (see
1665 M_TRIM_THRESHOLD). Because top initially
1666 points to its own bin with initial zero size, thus forcing
1667 extension on the first malloc request, we avoid having any special
1668 code in malloc to check whether it even exists yet. But we still
1669 need to do so when getting memory from system, so we make
1670 initial_top treat the bin as a legal but unusable chunk during the
1671 interval between initialization and the first call to
3b49edc0 1672 sysmalloc. (This is somewhat delicate, since it relies on
fa8d436c 1673 the 2 preceding words to be zero during this interval as well.)
6c8dbf00 1674 */
f65fd747 1675
fa8d436c 1676/* Conveniently, the unsorted bin can be used as dummy top on first call */
6c8dbf00 1677#define initial_top(M) (unsorted_chunks (M))
f65fd747 1678
fa8d436c 1679/*
6c8dbf00 1680 Binmap
f65fd747 1681
fa8d436c
UD
1682 To help compensate for the large number of bins, a one-level index
1683 structure is used for bin-by-bin searching. `binmap' is a
1684 bitvector recording whether bins are definitely empty so they can
1685 be skipped over during during traversals. The bits are NOT always
1686 cleared as soon as bins are empty, but instead only
1687 when they are noticed to be empty during traversal in malloc.
6c8dbf00 1688 */
f65fd747 1689
fa8d436c
UD
1690/* Conservatively use 32 bits per map word, even if on 64bit system */
1691#define BINMAPSHIFT 5
1692#define BITSPERMAP (1U << BINMAPSHIFT)
1693#define BINMAPSIZE (NBINS / BITSPERMAP)
f65fd747 1694
fa8d436c 1695#define idx2block(i) ((i) >> BINMAPSHIFT)
6c8dbf00 1696#define idx2bit(i) ((1U << ((i) & ((1U << BINMAPSHIFT) - 1))))
f65fd747 1697
6c8dbf00
OB
1698#define mark_bin(m, i) ((m)->binmap[idx2block (i)] |= idx2bit (i))
1699#define unmark_bin(m, i) ((m)->binmap[idx2block (i)] &= ~(idx2bit (i)))
1700#define get_binmap(m, i) ((m)->binmap[idx2block (i)] & idx2bit (i))
f65fd747 1701
fa8d436c 1702/*
6c8dbf00 1703 Fastbins
fa8d436c
UD
1704
1705 An array of lists holding recently freed small chunks. Fastbins
1706 are not doubly linked. It is faster to single-link them, and
1707 since chunks are never removed from the middles of these lists,
1708 double linking is not necessary. Also, unlike regular bins, they
1709 are not even processed in FIFO order (they use faster LIFO) since
1710 ordering doesn't much matter in the transient contexts in which
1711 fastbins are normally used.
1712
1713 Chunks in fastbins keep their inuse bit set, so they cannot
1714 be consolidated with other free chunks. malloc_consolidate
1715 releases all chunks in fastbins and consolidates them with
a9177ff5 1716 other free chunks.
6c8dbf00 1717 */
f65fd747 1718
6c8dbf00 1719typedef struct malloc_chunk *mfastbinptr;
425ce2ed 1720#define fastbin(ar_ptr, idx) ((ar_ptr)->fastbinsY[idx])
f65fd747 1721
fa8d436c 1722/* offset 2 to use otherwise unindexable first 2 bins */
425ce2ed 1723#define fastbin_index(sz) \
6c8dbf00 1724 ((((unsigned int) (sz)) >> (SIZE_SZ == 8 ? 4 : 3)) - 2)
425ce2ed 1725
f65fd747 1726
fa8d436c 1727/* The maximum fastbin request size we support */
425ce2ed 1728#define MAX_FAST_SIZE (80 * SIZE_SZ / 4)
f65fd747 1729
6c8dbf00 1730#define NFASTBINS (fastbin_index (request2size (MAX_FAST_SIZE)) + 1)
f65fd747
UD
1731
1732/*
6c8dbf00
OB
1733 FASTBIN_CONSOLIDATION_THRESHOLD is the size of a chunk in free()
1734 that triggers automatic consolidation of possibly-surrounding
1735 fastbin chunks. This is a heuristic, so the exact value should not
1736 matter too much. It is defined at half the default trim threshold as a
1737 compromise heuristic to only attempt consolidation if it is likely
1738 to lead to trimming. However, it is not dynamically tunable, since
1739 consolidation reduces fragmentation surrounding large chunks even
1740 if trimming is not used.
1741 */
f65fd747 1742
fa8d436c 1743#define FASTBIN_CONSOLIDATION_THRESHOLD (65536UL)
f65fd747 1744
f65fd747 1745/*
6c8dbf00
OB
1746 NONCONTIGUOUS_BIT indicates that MORECORE does not return contiguous
1747 regions. Otherwise, contiguity is exploited in merging together,
1748 when possible, results from consecutive MORECORE calls.
f65fd747 1749
6c8dbf00
OB
1750 The initial value comes from MORECORE_CONTIGUOUS, but is
1751 changed dynamically if mmap is ever used as an sbrk substitute.
1752 */
f65fd747 1753
fa8d436c 1754#define NONCONTIGUOUS_BIT (2U)
f65fd747 1755
6c8dbf00
OB
1756#define contiguous(M) (((M)->flags & NONCONTIGUOUS_BIT) == 0)
1757#define noncontiguous(M) (((M)->flags & NONCONTIGUOUS_BIT) != 0)
1758#define set_noncontiguous(M) ((M)->flags |= NONCONTIGUOUS_BIT)
9bf248c6 1759#define set_contiguous(M) ((M)->flags &= ~NONCONTIGUOUS_BIT)
f65fd747 1760
eac43cbb
FW
1761/* Maximum size of memory handled in fastbins. */
1762static INTERNAL_SIZE_T global_max_fast;
1763
a9177ff5
RM
1764/*
1765 Set value of max_fast.
fa8d436c 1766 Use impossibly small value if 0.
3381be5c
WD
1767 Precondition: there are no existing fastbin chunks in the main arena.
1768 Since do_check_malloc_state () checks this, we call malloc_consolidate ()
1769 before changing max_fast. Note other arenas will leak their fast bin
1770 entries if max_fast is reduced.
6c8dbf00 1771 */
f65fd747 1772
9bf248c6 1773#define set_max_fast(s) \
b9cde4e3 1774 global_max_fast = (((size_t) (s) <= MALLOC_ALIGN_MASK - SIZE_SZ) \
ff12e0fb 1775 ? MIN_CHUNK_SIZE / 2 : ((s + SIZE_SZ) & ~MALLOC_ALIGN_MASK))
f65fd747 1776
eac43cbb
FW
1777static inline INTERNAL_SIZE_T
1778get_max_fast (void)
1779{
1780 /* Tell the GCC optimizers that global_max_fast is never larger
1781 than MAX_FAST_SIZE. This avoids out-of-bounds array accesses in
1782 _int_malloc after constant propagation of the size parameter.
1783 (The code never executes because malloc preserves the
1784 global_max_fast invariant, but the optimizers may not recognize
1785 this.) */
1786 if (global_max_fast > MAX_FAST_SIZE)
1787 __builtin_unreachable ();
1788 return global_max_fast;
1789}
f65fd747
UD
1790
1791/*
fa8d436c 1792 ----------- Internal state representation and initialization -----------
6c8dbf00 1793 */
f65fd747 1794
e956075a
WD
1795/*
1796 have_fastchunks indicates that there are probably some fastbin chunks.
1797 It is set true on entering a chunk into any fastbin, and cleared early in
1798 malloc_consolidate. The value is approximate since it may be set when there
1799 are no fastbin chunks, or it may be clear even if there are fastbin chunks
1800 available. Given it's sole purpose is to reduce number of redundant calls to
1801 malloc_consolidate, it does not affect correctness. As a result we can safely
1802 use relaxed atomic accesses.
1803 */
1804
1805
6c8dbf00
OB
1806struct malloc_state
1807{
fa8d436c 1808 /* Serialize access. */
cbb47fa1 1809 __libc_lock_define (, mutex);
9bf248c6
UD
1810
1811 /* Flags (formerly in max_fast). */
1812 int flags;
f65fd747 1813
e956075a 1814 /* Set if the fastbin chunks contain recently inserted free blocks. */
2c2245b9
WD
1815 /* Note this is a bool but not all targets support atomics on booleans. */
1816 int have_fastchunks;
e956075a 1817
fa8d436c 1818 /* Fastbins */
6c8dbf00 1819 mfastbinptr fastbinsY[NFASTBINS];
f65fd747 1820
fa8d436c 1821 /* Base of the topmost chunk -- not otherwise kept in a bin */
6c8dbf00 1822 mchunkptr top;
f65fd747 1823
fa8d436c 1824 /* The remainder from the most recent split of a small request */
6c8dbf00 1825 mchunkptr last_remainder;
f65fd747 1826
fa8d436c 1827 /* Normal bins packed as described above */
6c8dbf00 1828 mchunkptr bins[NBINS * 2 - 2];
f65fd747 1829
fa8d436c 1830 /* Bitmap of bins */
6c8dbf00 1831 unsigned int binmap[BINMAPSIZE];
f65fd747 1832
fa8d436c
UD
1833 /* Linked list */
1834 struct malloc_state *next;
f65fd747 1835
a62719ba 1836 /* Linked list for free arenas. Access to this field is serialized
90c400bd 1837 by free_list_lock in arena.c. */
425ce2ed 1838 struct malloc_state *next_free;
425ce2ed 1839
a62719ba 1840 /* Number of threads attached to this arena. 0 if the arena is on
90c400bd
FW
1841 the free list. Access to this field is serialized by
1842 free_list_lock in arena.c. */
a62719ba
FW
1843 INTERNAL_SIZE_T attached_threads;
1844
fa8d436c
UD
1845 /* Memory allocated from the system in this arena. */
1846 INTERNAL_SIZE_T system_mem;
1847 INTERNAL_SIZE_T max_system_mem;
1848};
f65fd747 1849
6c8dbf00
OB
1850struct malloc_par
1851{
fa8d436c 1852 /* Tunable parameters */
6c8dbf00
OB
1853 unsigned long trim_threshold;
1854 INTERNAL_SIZE_T top_pad;
1855 INTERNAL_SIZE_T mmap_threshold;
1856 INTERNAL_SIZE_T arena_test;
1857 INTERNAL_SIZE_T arena_max;
fa8d436c
UD
1858
1859 /* Memory map support */
6c8dbf00
OB
1860 int n_mmaps;
1861 int n_mmaps_max;
1862 int max_n_mmaps;
1d05c2fb
UD
1863 /* the mmap_threshold is dynamic, until the user sets
1864 it manually, at which point we need to disable any
1865 dynamic behavior. */
6c8dbf00 1866 int no_dyn_threshold;
fa8d436c 1867
fa8d436c 1868 /* Statistics */
6c8dbf00 1869 INTERNAL_SIZE_T mmapped_mem;
6c8dbf00 1870 INTERNAL_SIZE_T max_mmapped_mem;
fa8d436c
UD
1871
1872 /* First address handed out by MORECORE/sbrk. */
6c8dbf00 1873 char *sbrk_base;
d5c3fafc
DD
1874
1875#if USE_TCACHE
1876 /* Maximum number of buckets to use. */
1877 size_t tcache_bins;
1878 size_t tcache_max_bytes;
1879 /* Maximum number of chunks in each bucket. */
1880 size_t tcache_count;
1881 /* Maximum number of chunks to remove from the unsorted list, which
1882 aren't used to prefill the cache. */
1883 size_t tcache_unsorted_limit;
1884#endif
fa8d436c 1885};
f65fd747 1886
fa8d436c
UD
1887/* There are several instances of this struct ("arenas") in this
1888 malloc. If you are adapting this malloc in a way that does NOT use
1889 a static or mmapped malloc_state, you MUST explicitly zero-fill it
1890 before using. This malloc relies on the property that malloc_state
1891 is initialized to all zeroes (as is true of C statics). */
f65fd747 1892
02d46fc4 1893static struct malloc_state main_arena =
6c8dbf00 1894{
400e1226 1895 .mutex = _LIBC_LOCK_INITIALIZER,
a62719ba
FW
1896 .next = &main_arena,
1897 .attached_threads = 1
6c8dbf00 1898};
f65fd747 1899
4cf6c72f
FW
1900/* These variables are used for undumping support. Chunked are marked
1901 as using mmap, but we leave them alone if they fall into this
1e8a8875
FW
1902 range. NB: The chunk size for these chunks only includes the
1903 initial size field (of SIZE_SZ bytes), there is no trailing size
1904 field (unlike with regular mmapped chunks). */
4cf6c72f
FW
1905static mchunkptr dumped_main_arena_start; /* Inclusive. */
1906static mchunkptr dumped_main_arena_end; /* Exclusive. */
1907
1908/* True if the pointer falls into the dumped arena. Use this after
1909 chunk_is_mmapped indicates a chunk is mmapped. */
1910#define DUMPED_MAIN_ARENA_CHUNK(p) \
1911 ((p) >= dumped_main_arena_start && (p) < dumped_main_arena_end)
1912
fa8d436c 1913/* There is only one instance of the malloc parameters. */
f65fd747 1914
02d46fc4 1915static struct malloc_par mp_ =
6c8dbf00
OB
1916{
1917 .top_pad = DEFAULT_TOP_PAD,
1918 .n_mmaps_max = DEFAULT_MMAP_MAX,
1919 .mmap_threshold = DEFAULT_MMAP_THRESHOLD,
1920 .trim_threshold = DEFAULT_TRIM_THRESHOLD,
1921#define NARENAS_FROM_NCORES(n) ((n) * (sizeof (long) == 4 ? 2 : 8))
1922 .arena_test = NARENAS_FROM_NCORES (1)
d5c3fafc
DD
1923#if USE_TCACHE
1924 ,
1925 .tcache_count = TCACHE_FILL_COUNT,
1926 .tcache_bins = TCACHE_MAX_BINS,
1927 .tcache_max_bytes = tidx2usize (TCACHE_MAX_BINS-1),
1928 .tcache_unsorted_limit = 0 /* No limit. */
1929#endif
6c8dbf00 1930};
f65fd747 1931
fa8d436c 1932/*
6c8dbf00 1933 Initialize a malloc_state struct.
f65fd747 1934
3381be5c
WD
1935 This is called from ptmalloc_init () or from _int_new_arena ()
1936 when creating a new arena.
6c8dbf00 1937 */
f65fd747 1938
6c8dbf00
OB
1939static void
1940malloc_init_state (mstate av)
fa8d436c 1941{
6c8dbf00 1942 int i;
fa8d436c 1943 mbinptr bin;
a9177ff5 1944
fa8d436c 1945 /* Establish circular links for normal bins */
6c8dbf00
OB
1946 for (i = 1; i < NBINS; ++i)
1947 {
1948 bin = bin_at (av, i);
1949 bin->fd = bin->bk = bin;
1950 }
f65fd747 1951
fa8d436c
UD
1952#if MORECORE_CONTIGUOUS
1953 if (av != &main_arena)
1954#endif
6c8dbf00 1955 set_noncontiguous (av);
9bf248c6 1956 if (av == &main_arena)
6c8dbf00 1957 set_max_fast (DEFAULT_MXFAST);
e956075a 1958 atomic_store_relaxed (&av->have_fastchunks, false);
f65fd747 1959
6c8dbf00 1960 av->top = initial_top (av);
fa8d436c 1961}
e9b3e3c5 1962
a9177ff5 1963/*
fa8d436c 1964 Other internal utilities operating on mstates
6c8dbf00 1965 */
f65fd747 1966
6c8dbf00
OB
1967static void *sysmalloc (INTERNAL_SIZE_T, mstate);
1968static int systrim (size_t, mstate);
1969static void malloc_consolidate (mstate);
7e3be507 1970
404d4cef
RM
1971
1972/* -------------- Early definitions for debugging hooks ---------------- */
1973
1974/* Define and initialize the hook variables. These weak definitions must
1975 appear before any use of the variables in a function (arena.c uses one). */
1976#ifndef weak_variable
404d4cef
RM
1977/* In GNU libc we want the hook variables to be weak definitions to
1978 avoid a problem with Emacs. */
22a89187 1979# define weak_variable weak_function
404d4cef
RM
1980#endif
1981
1982/* Forward declarations. */
6c8dbf00
OB
1983static void *malloc_hook_ini (size_t sz,
1984 const void *caller) __THROW;
1985static void *realloc_hook_ini (void *ptr, size_t sz,
1986 const void *caller) __THROW;
1987static void *memalign_hook_ini (size_t alignment, size_t sz,
1988 const void *caller) __THROW;
404d4cef 1989
2ba3cfa1 1990#if HAVE_MALLOC_INIT_HOOK
0923f74a 1991void (*__malloc_initialize_hook) (void) __attribute__ ((nocommon));
92e1ab0e
FW
1992compat_symbol (libc, __malloc_initialize_hook,
1993 __malloc_initialize_hook, GLIBC_2_0);
2ba3cfa1
FW
1994#endif
1995
a222d91a 1996void weak_variable (*__free_hook) (void *__ptr,
6c8dbf00 1997 const void *) = NULL;
a222d91a 1998void *weak_variable (*__malloc_hook)
6c8dbf00 1999 (size_t __size, const void *) = malloc_hook_ini;
a222d91a 2000void *weak_variable (*__realloc_hook)
6c8dbf00
OB
2001 (void *__ptr, size_t __size, const void *)
2002 = realloc_hook_ini;
a222d91a 2003void *weak_variable (*__memalign_hook)
6c8dbf00
OB
2004 (size_t __alignment, size_t __size, const void *)
2005 = memalign_hook_ini;
06d6611a 2006void weak_variable (*__after_morecore_hook) (void) = NULL;
404d4cef 2007
0a947e06
FW
2008/* This function is called from the arena shutdown hook, to free the
2009 thread cache (if it exists). */
2010static void tcache_thread_shutdown (void);
404d4cef 2011
854278df
UD
2012/* ------------------ Testing support ----------------------------------*/
2013
2014static int perturb_byte;
2015
af102d95 2016static void
e8349efd
OB
2017alloc_perturb (char *p, size_t n)
2018{
2019 if (__glibc_unlikely (perturb_byte))
2020 memset (p, perturb_byte ^ 0xff, n);
2021}
2022
af102d95 2023static void
e8349efd
OB
2024free_perturb (char *p, size_t n)
2025{
2026 if (__glibc_unlikely (perturb_byte))
2027 memset (p, perturb_byte, n);
2028}
2029
854278df
UD
2030
2031
3ea5be54
AO
2032#include <stap-probe.h>
2033
fa8d436c
UD
2034/* ------------------- Support for multiple arenas -------------------- */
2035#include "arena.c"
f65fd747 2036
fa8d436c 2037/*
6c8dbf00 2038 Debugging support
f65fd747 2039
6c8dbf00
OB
2040 These routines make a number of assertions about the states
2041 of data structures that should be true at all times. If any
2042 are not true, it's very likely that a user program has somehow
2043 trashed memory. (It's also possible that there is a coding error
2044 in malloc. In which case, please report it!)
2045 */
ee74a442 2046
6c8dbf00 2047#if !MALLOC_DEBUG
d8f00d46 2048
6c8dbf00
OB
2049# define check_chunk(A, P)
2050# define check_free_chunk(A, P)
2051# define check_inuse_chunk(A, P)
2052# define check_remalloced_chunk(A, P, N)
2053# define check_malloced_chunk(A, P, N)
2054# define check_malloc_state(A)
d8f00d46 2055
fa8d436c 2056#else
ca34d7a7 2057
6c8dbf00
OB
2058# define check_chunk(A, P) do_check_chunk (A, P)
2059# define check_free_chunk(A, P) do_check_free_chunk (A, P)
2060# define check_inuse_chunk(A, P) do_check_inuse_chunk (A, P)
2061# define check_remalloced_chunk(A, P, N) do_check_remalloced_chunk (A, P, N)
2062# define check_malloced_chunk(A, P, N) do_check_malloced_chunk (A, P, N)
2063# define check_malloc_state(A) do_check_malloc_state (A)
ca34d7a7 2064
fa8d436c 2065/*
6c8dbf00
OB
2066 Properties of all chunks
2067 */
ca34d7a7 2068
6c8dbf00
OB
2069static void
2070do_check_chunk (mstate av, mchunkptr p)
ca34d7a7 2071{
6c8dbf00 2072 unsigned long sz = chunksize (p);
fa8d436c 2073 /* min and max possible addresses assuming contiguous allocation */
6c8dbf00
OB
2074 char *max_address = (char *) (av->top) + chunksize (av->top);
2075 char *min_address = max_address - av->system_mem;
fa8d436c 2076
6c8dbf00
OB
2077 if (!chunk_is_mmapped (p))
2078 {
2079 /* Has legal address ... */
2080 if (p != av->top)
2081 {
2082 if (contiguous (av))
2083 {
2084 assert (((char *) p) >= min_address);
2085 assert (((char *) p + sz) <= ((char *) (av->top)));
2086 }
2087 }
2088 else
2089 {
2090 /* top size is always at least MINSIZE */
2091 assert ((unsigned long) (sz) >= MINSIZE);
2092 /* top predecessor always marked inuse */
2093 assert (prev_inuse (p));
2094 }
fa8d436c 2095 }
4cf6c72f 2096 else if (!DUMPED_MAIN_ARENA_CHUNK (p))
6c8dbf00
OB
2097 {
2098 /* address is outside main heap */
2099 if (contiguous (av) && av->top != initial_top (av))
2100 {
2101 assert (((char *) p) < min_address || ((char *) p) >= max_address);
2102 }
2103 /* chunk is page-aligned */
e9c4fe93 2104 assert (((prev_size (p) + sz) & (GLRO (dl_pagesize) - 1)) == 0);
6c8dbf00 2105 /* mem is aligned */
3784dfc0 2106 assert (aligned_OK (chunk2rawmem (p)));
fa8d436c 2107 }
eb406346
UD
2108}
2109
fa8d436c 2110/*
6c8dbf00
OB
2111 Properties of free chunks
2112 */
ee74a442 2113
6c8dbf00
OB
2114static void
2115do_check_free_chunk (mstate av, mchunkptr p)
67c94753 2116{
3381be5c 2117 INTERNAL_SIZE_T sz = chunksize_nomask (p) & ~(PREV_INUSE | NON_MAIN_ARENA);
6c8dbf00 2118 mchunkptr next = chunk_at_offset (p, sz);
67c94753 2119
6c8dbf00 2120 do_check_chunk (av, p);
67c94753 2121
fa8d436c 2122 /* Chunk must claim to be free ... */
6c8dbf00
OB
2123 assert (!inuse (p));
2124 assert (!chunk_is_mmapped (p));
67c94753 2125
fa8d436c 2126 /* Unless a special marker, must have OK fields */
6c8dbf00
OB
2127 if ((unsigned long) (sz) >= MINSIZE)
2128 {
2129 assert ((sz & MALLOC_ALIGN_MASK) == 0);
3784dfc0 2130 assert (aligned_OK (chunk2rawmem (p)));
6c8dbf00 2131 /* ... matching footer field */
3381be5c 2132 assert (prev_size (next_chunk (p)) == sz);
6c8dbf00
OB
2133 /* ... and is fully consolidated */
2134 assert (prev_inuse (p));
2135 assert (next == av->top || inuse (next));
2136
2137 /* ... and has minimally sane links */
2138 assert (p->fd->bk == p);
2139 assert (p->bk->fd == p);
2140 }
fa8d436c 2141 else /* markers are always of size SIZE_SZ */
6c8dbf00 2142 assert (sz == SIZE_SZ);
67c94753 2143}
67c94753 2144
fa8d436c 2145/*
6c8dbf00
OB
2146 Properties of inuse chunks
2147 */
fa8d436c 2148
6c8dbf00
OB
2149static void
2150do_check_inuse_chunk (mstate av, mchunkptr p)
f65fd747 2151{
fa8d436c 2152 mchunkptr next;
f65fd747 2153
6c8dbf00 2154 do_check_chunk (av, p);
f65fd747 2155
6c8dbf00 2156 if (chunk_is_mmapped (p))
fa8d436c 2157 return; /* mmapped chunks have no next/prev */
ca34d7a7 2158
fa8d436c 2159 /* Check whether it claims to be in use ... */
6c8dbf00 2160 assert (inuse (p));
10dc2a90 2161
6c8dbf00 2162 next = next_chunk (p);
10dc2a90 2163
fa8d436c 2164 /* ... and is surrounded by OK chunks.
6c8dbf00
OB
2165 Since more things can be checked with free chunks than inuse ones,
2166 if an inuse chunk borders them and debug is on, it's worth doing them.
2167 */
2168 if (!prev_inuse (p))
2169 {
2170 /* Note that we cannot even look at prev unless it is not inuse */
2171 mchunkptr prv = prev_chunk (p);
2172 assert (next_chunk (prv) == p);
2173 do_check_free_chunk (av, prv);
2174 }
fa8d436c 2175
6c8dbf00
OB
2176 if (next == av->top)
2177 {
2178 assert (prev_inuse (next));
2179 assert (chunksize (next) >= MINSIZE);
2180 }
2181 else if (!inuse (next))
2182 do_check_free_chunk (av, next);
10dc2a90
UD
2183}
2184
fa8d436c 2185/*
6c8dbf00
OB
2186 Properties of chunks recycled from fastbins
2187 */
fa8d436c 2188
6c8dbf00
OB
2189static void
2190do_check_remalloced_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T s)
10dc2a90 2191{
3381be5c 2192 INTERNAL_SIZE_T sz = chunksize_nomask (p) & ~(PREV_INUSE | NON_MAIN_ARENA);
fa8d436c 2193
6c8dbf00
OB
2194 if (!chunk_is_mmapped (p))
2195 {
2196 assert (av == arena_for_chunk (p));
e9c4fe93 2197 if (chunk_main_arena (p))
6c8dbf00 2198 assert (av == &main_arena);
e9c4fe93
FW
2199 else
2200 assert (av != &main_arena);
6c8dbf00 2201 }
fa8d436c 2202
6c8dbf00 2203 do_check_inuse_chunk (av, p);
fa8d436c
UD
2204
2205 /* Legal size ... */
6c8dbf00
OB
2206 assert ((sz & MALLOC_ALIGN_MASK) == 0);
2207 assert ((unsigned long) (sz) >= MINSIZE);
fa8d436c 2208 /* ... and alignment */
3784dfc0 2209 assert (aligned_OK (chunk2rawmem (p)));
fa8d436c 2210 /* chunk is less than MINSIZE more than request */
6c8dbf00
OB
2211 assert ((long) (sz) - (long) (s) >= 0);
2212 assert ((long) (sz) - (long) (s + MINSIZE) < 0);
10dc2a90
UD
2213}
2214
fa8d436c 2215/*
6c8dbf00
OB
2216 Properties of nonrecycled chunks at the point they are malloced
2217 */
fa8d436c 2218
6c8dbf00
OB
2219static void
2220do_check_malloced_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T s)
10dc2a90 2221{
fa8d436c 2222 /* same as recycled case ... */
6c8dbf00 2223 do_check_remalloced_chunk (av, p, s);
10dc2a90 2224
fa8d436c 2225 /*
6c8dbf00
OB
2226 ... plus, must obey implementation invariant that prev_inuse is
2227 always true of any allocated chunk; i.e., that each allocated
2228 chunk borders either a previously allocated and still in-use
2229 chunk, or the base of its memory arena. This is ensured
2230 by making all allocations from the `lowest' part of any found
2231 chunk. This does not necessarily hold however for chunks
2232 recycled via fastbins.
2233 */
2234
2235 assert (prev_inuse (p));
fa8d436c 2236}
10dc2a90 2237
f65fd747 2238
fa8d436c 2239/*
6c8dbf00 2240 Properties of malloc_state.
f65fd747 2241
6c8dbf00
OB
2242 This may be useful for debugging malloc, as well as detecting user
2243 programmer errors that somehow write into malloc_state.
f65fd747 2244
6c8dbf00
OB
2245 If you are extending or experimenting with this malloc, you can
2246 probably figure out how to hack this routine to print out or
2247 display chunk addresses, sizes, bins, and other instrumentation.
2248 */
f65fd747 2249
6c8dbf00
OB
2250static void
2251do_check_malloc_state (mstate av)
fa8d436c
UD
2252{
2253 int i;
2254 mchunkptr p;
2255 mchunkptr q;
2256 mbinptr b;
fa8d436c
UD
2257 unsigned int idx;
2258 INTERNAL_SIZE_T size;
2259 unsigned long total = 0;
2260 int max_fast_bin;
f65fd747 2261
fa8d436c 2262 /* internal size_t must be no wider than pointer type */
6c8dbf00 2263 assert (sizeof (INTERNAL_SIZE_T) <= sizeof (char *));
f65fd747 2264
fa8d436c 2265 /* alignment is a power of 2 */
6c8dbf00 2266 assert ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT - 1)) == 0);
f65fd747 2267
3381be5c
WD
2268 /* Check the arena is initialized. */
2269 assert (av->top != 0);
2270
2271 /* No memory has been allocated yet, so doing more tests is not possible. */
2272 if (av->top == initial_top (av))
fa8d436c 2273 return;
f65fd747 2274
fa8d436c 2275 /* pagesize is a power of 2 */
8a35c3fe 2276 assert (powerof2(GLRO (dl_pagesize)));
f65fd747 2277
fa8d436c 2278 /* A contiguous main_arena is consistent with sbrk_base. */
6c8dbf00
OB
2279 if (av == &main_arena && contiguous (av))
2280 assert ((char *) mp_.sbrk_base + av->system_mem ==
2281 (char *) av->top + chunksize (av->top));
fa8d436c
UD
2282
2283 /* properties of fastbins */
2284
2285 /* max_fast is in allowed range */
6c8dbf00
OB
2286 assert ((get_max_fast () & ~1) <= request2size (MAX_FAST_SIZE));
2287
2288 max_fast_bin = fastbin_index (get_max_fast ());
2289
2290 for (i = 0; i < NFASTBINS; ++i)
2291 {
2292 p = fastbin (av, i);
2293
2294 /* The following test can only be performed for the main arena.
2295 While mallopt calls malloc_consolidate to get rid of all fast
2296 bins (especially those larger than the new maximum) this does
2297 only happen for the main arena. Trying to do this for any
2298 other arena would mean those arenas have to be locked and
2299 malloc_consolidate be called for them. This is excessive. And
2300 even if this is acceptable to somebody it still cannot solve
2301 the problem completely since if the arena is locked a
2302 concurrent malloc call might create a new arena which then
2303 could use the newly invalid fast bins. */
2304
2305 /* all bins past max_fast are empty */
2306 if (av == &main_arena && i > max_fast_bin)
2307 assert (p == 0);
2308
2309 while (p != 0)
2310 {
49c3c376 2311 if (__glibc_unlikely (misaligned_chunk (p)))
768358b6 2312 malloc_printerr ("do_check_malloc_state(): "
a1a486d7 2313 "unaligned fastbin chunk detected");
6c8dbf00
OB
2314 /* each chunk claims to be inuse */
2315 do_check_inuse_chunk (av, p);
2316 total += chunksize (p);
2317 /* chunk belongs in this bin */
2318 assert (fastbin_index (chunksize (p)) == i);
a1a486d7 2319 p = REVEAL_PTR (p->fd);
6c8dbf00 2320 }
fa8d436c 2321 }
fa8d436c 2322
fa8d436c 2323 /* check normal bins */
6c8dbf00
OB
2324 for (i = 1; i < NBINS; ++i)
2325 {
2326 b = bin_at (av, i);
2327
2328 /* binmap is accurate (except for bin 1 == unsorted_chunks) */
2329 if (i >= 2)
2330 {
2331 unsigned int binbit = get_binmap (av, i);
2332 int empty = last (b) == b;
2333 if (!binbit)
2334 assert (empty);
2335 else if (!empty)
2336 assert (binbit);
2337 }
2338
2339 for (p = last (b); p != b; p = p->bk)
2340 {
2341 /* each chunk claims to be free */
2342 do_check_free_chunk (av, p);
2343 size = chunksize (p);
2344 total += size;
2345 if (i >= 2)
2346 {
2347 /* chunk belongs in bin */
2348 idx = bin_index (size);
2349 assert (idx == i);
2350 /* lists are sorted */
2351 assert (p->bk == b ||
2352 (unsigned long) chunksize (p->bk) >= (unsigned long) chunksize (p));
2353
2354 if (!in_smallbin_range (size))
2355 {
2356 if (p->fd_nextsize != NULL)
2357 {
2358 if (p->fd_nextsize == p)
2359 assert (p->bk_nextsize == p);
2360 else
2361 {
2362 if (p->fd_nextsize == first (b))
2363 assert (chunksize (p) < chunksize (p->fd_nextsize));
2364 else
2365 assert (chunksize (p) > chunksize (p->fd_nextsize));
2366
2367 if (p == first (b))
2368 assert (chunksize (p) > chunksize (p->bk_nextsize));
2369 else
2370 assert (chunksize (p) < chunksize (p->bk_nextsize));
2371 }
2372 }
2373 else
2374 assert (p->bk_nextsize == NULL);
2375 }
2376 }
2377 else if (!in_smallbin_range (size))
2378 assert (p->fd_nextsize == NULL && p->bk_nextsize == NULL);
2379 /* chunk is followed by a legal chain of inuse chunks */
2380 for (q = next_chunk (p);
2381 (q != av->top && inuse (q) &&
2382 (unsigned long) (chunksize (q)) >= MINSIZE);
2383 q = next_chunk (q))
2384 do_check_inuse_chunk (av, q);
2385 }
fa8d436c 2386 }
f65fd747 2387
fa8d436c 2388 /* top chunk is OK */
6c8dbf00 2389 check_chunk (av, av->top);
fa8d436c
UD
2390}
2391#endif
2392
2393
2394/* ----------------- Support for debugging hooks -------------------- */
2395#include "hooks.c"
2396
2397
2398/* ----------- Routines dealing with system allocation -------------- */
2399
2400/*
6c8dbf00
OB
2401 sysmalloc handles malloc cases requiring more memory from the system.
2402 On entry, it is assumed that av->top does not have enough
2403 space to service request for nb bytes, thus requiring that av->top
2404 be extended or replaced.
2405 */
fa8d436c 2406
6c8dbf00
OB
2407static void *
2408sysmalloc (INTERNAL_SIZE_T nb, mstate av)
f65fd747 2409{
6c8dbf00 2410 mchunkptr old_top; /* incoming value of av->top */
fa8d436c 2411 INTERNAL_SIZE_T old_size; /* its size */
6c8dbf00 2412 char *old_end; /* its end address */
f65fd747 2413
6c8dbf00
OB
2414 long size; /* arg to first MORECORE or mmap call */
2415 char *brk; /* return value from MORECORE */
f65fd747 2416
6c8dbf00
OB
2417 long correction; /* arg to 2nd MORECORE call */
2418 char *snd_brk; /* 2nd return val */
f65fd747 2419
fa8d436c
UD
2420 INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */
2421 INTERNAL_SIZE_T end_misalign; /* partial page left at end of new space */
6c8dbf00 2422 char *aligned_brk; /* aligned offset into brk */
f65fd747 2423
6c8dbf00
OB
2424 mchunkptr p; /* the allocated/returned chunk */
2425 mchunkptr remainder; /* remainder from allocation */
2426 unsigned long remainder_size; /* its size */
fa8d436c 2427
fa8d436c 2428
8a35c3fe 2429 size_t pagesize = GLRO (dl_pagesize);
6c8dbf00 2430 bool tried_mmap = false;
fa8d436c
UD
2431
2432
fa8d436c 2433 /*
6c8dbf00
OB
2434 If have mmap, and the request size meets the mmap threshold, and
2435 the system supports mmap, and there are few enough currently
2436 allocated mmapped regions, try to directly map this request
2437 rather than expanding top.
2438 */
2439
fff94fa2
SP
2440 if (av == NULL
2441 || ((unsigned long) (nb) >= (unsigned long) (mp_.mmap_threshold)
2442 && (mp_.n_mmaps < mp_.n_mmaps_max)))
6c8dbf00
OB
2443 {
2444 char *mm; /* return value from mmap call*/
a9177ff5 2445
6c8dbf00
OB
2446 try_mmap:
2447 /*
2448 Round up size to nearest page. For mmapped chunks, the overhead
2449 is one SIZE_SZ unit larger than for normal chunks, because there
2450 is no following chunk whose prev_size field could be used.
2451
2452 See the front_misalign handling below, for glibc there is no
2453 need for further alignments unless we have have high alignment.
2454 */
3784dfc0 2455 if (MALLOC_ALIGNMENT == CHUNK_HDR_SZ)
8a35c3fe 2456 size = ALIGN_UP (nb + SIZE_SZ, pagesize);
6c8dbf00 2457 else
8a35c3fe 2458 size = ALIGN_UP (nb + SIZE_SZ + MALLOC_ALIGN_MASK, pagesize);
6c8dbf00
OB
2459 tried_mmap = true;
2460
2461 /* Don't try if size wraps around 0 */
2462 if ((unsigned long) (size) > (unsigned long) (nb))
2463 {
3784dfc0 2464 mm = (char *) (MMAP (0, size,
0c719cf4 2465 mtag_mmap_flags | PROT_READ | PROT_WRITE, 0));
6c8dbf00
OB
2466
2467 if (mm != MAP_FAILED)
2468 {
2469 /*
2470 The offset to the start of the mmapped region is stored
2471 in the prev_size field of the chunk. This allows us to adjust
2472 returned start address to meet alignment requirements here
2473 and in memalign(), and still be able to compute proper
2474 address argument for later munmap in free() and realloc().
2475 */
2476
3784dfc0 2477 if (MALLOC_ALIGNMENT == CHUNK_HDR_SZ)
6c8dbf00 2478 {
3784dfc0
RE
2479 /* For glibc, chunk2rawmem increases the address by
2480 CHUNK_HDR_SZ and MALLOC_ALIGN_MASK is
2481 CHUNK_HDR_SZ-1. Each mmap'ed area is page
2482 aligned and therefore definitely
2483 MALLOC_ALIGN_MASK-aligned. */
2484 assert (((INTERNAL_SIZE_T) chunk2rawmem (mm) & MALLOC_ALIGN_MASK) == 0);
6c8dbf00
OB
2485 front_misalign = 0;
2486 }
2487 else
3784dfc0 2488 front_misalign = (INTERNAL_SIZE_T) chunk2rawmem (mm) & MALLOC_ALIGN_MASK;
6c8dbf00
OB
2489 if (front_misalign > 0)
2490 {
2491 correction = MALLOC_ALIGNMENT - front_misalign;
2492 p = (mchunkptr) (mm + correction);
e9c4fe93 2493 set_prev_size (p, correction);
6c8dbf00
OB
2494 set_head (p, (size - correction) | IS_MMAPPED);
2495 }
2496 else
2497 {
2498 p = (mchunkptr) mm;
681421f3 2499 set_prev_size (p, 0);
6c8dbf00
OB
2500 set_head (p, size | IS_MMAPPED);
2501 }
2502
2503 /* update statistics */
2504
2505 int new = atomic_exchange_and_add (&mp_.n_mmaps, 1) + 1;
2506 atomic_max (&mp_.max_n_mmaps, new);
2507
2508 unsigned long sum;
2509 sum = atomic_exchange_and_add (&mp_.mmapped_mem, size) + size;
2510 atomic_max (&mp_.max_mmapped_mem, sum);
2511
2512 check_chunk (av, p);
2513
2514 return chunk2mem (p);
2515 }
2516 }
fa8d436c 2517 }
fa8d436c 2518
fff94fa2
SP
2519 /* There are no usable arenas and mmap also failed. */
2520 if (av == NULL)
2521 return 0;
2522
fa8d436c
UD
2523 /* Record incoming configuration of top */
2524
6c8dbf00
OB
2525 old_top = av->top;
2526 old_size = chunksize (old_top);
2527 old_end = (char *) (chunk_at_offset (old_top, old_size));
fa8d436c 2528
6c8dbf00 2529 brk = snd_brk = (char *) (MORECORE_FAILURE);
fa8d436c 2530
a9177ff5 2531 /*
fa8d436c
UD
2532 If not the first time through, we require old_size to be
2533 at least MINSIZE and to have prev_inuse set.
6c8dbf00 2534 */
fa8d436c 2535
6c8dbf00
OB
2536 assert ((old_top == initial_top (av) && old_size == 0) ||
2537 ((unsigned long) (old_size) >= MINSIZE &&
2538 prev_inuse (old_top) &&
8a35c3fe 2539 ((unsigned long) old_end & (pagesize - 1)) == 0));
fa8d436c
UD
2540
2541 /* Precondition: not enough current space to satisfy nb request */
6c8dbf00 2542 assert ((unsigned long) (old_size) < (unsigned long) (nb + MINSIZE));
a9177ff5 2543
72f90263 2544
6c8dbf00
OB
2545 if (av != &main_arena)
2546 {
2547 heap_info *old_heap, *heap;
2548 size_t old_heap_size;
2549
2550 /* First try to extend the current heap. */
2551 old_heap = heap_for_ptr (old_top);
2552 old_heap_size = old_heap->size;
2553 if ((long) (MINSIZE + nb - old_size) > 0
2554 && grow_heap (old_heap, MINSIZE + nb - old_size) == 0)
2555 {
2556 av->system_mem += old_heap->size - old_heap_size;
6c8dbf00
OB
2557 set_head (old_top, (((char *) old_heap + old_heap->size) - (char *) old_top)
2558 | PREV_INUSE);
2559 }
2560 else if ((heap = new_heap (nb + (MINSIZE + sizeof (*heap)), mp_.top_pad)))
2561 {
2562 /* Use a newly allocated heap. */
2563 heap->ar_ptr = av;
2564 heap->prev = old_heap;
2565 av->system_mem += heap->size;
6c8dbf00
OB
2566 /* Set up the new top. */
2567 top (av) = chunk_at_offset (heap, sizeof (*heap));
2568 set_head (top (av), (heap->size - sizeof (*heap)) | PREV_INUSE);
2569
2570 /* Setup fencepost and free the old top chunk with a multiple of
2571 MALLOC_ALIGNMENT in size. */
2572 /* The fencepost takes at least MINSIZE bytes, because it might
2573 become the top chunk again later. Note that a footer is set
2574 up, too, although the chunk is marked in use. */
2575 old_size = (old_size - MINSIZE) & ~MALLOC_ALIGN_MASK;
3784dfc0
RE
2576 set_head (chunk_at_offset (old_top, old_size + CHUNK_HDR_SZ),
2577 0 | PREV_INUSE);
6c8dbf00
OB
2578 if (old_size >= MINSIZE)
2579 {
3784dfc0
RE
2580 set_head (chunk_at_offset (old_top, old_size),
2581 CHUNK_HDR_SZ | PREV_INUSE);
2582 set_foot (chunk_at_offset (old_top, old_size), CHUNK_HDR_SZ);
6c8dbf00
OB
2583 set_head (old_top, old_size | PREV_INUSE | NON_MAIN_ARENA);
2584 _int_free (av, old_top, 1);
2585 }
2586 else
2587 {
3784dfc0
RE
2588 set_head (old_top, (old_size + CHUNK_HDR_SZ) | PREV_INUSE);
2589 set_foot (old_top, (old_size + CHUNK_HDR_SZ));
6c8dbf00
OB
2590 }
2591 }
2592 else if (!tried_mmap)
2593 /* We can at least try to use to mmap memory. */
2594 goto try_mmap;
fa8d436c 2595 }
6c8dbf00 2596 else /* av == main_arena */
fa8d436c 2597
fa8d436c 2598
6c8dbf00
OB
2599 { /* Request enough space for nb + pad + overhead */
2600 size = nb + mp_.top_pad + MINSIZE;
a9177ff5 2601
6c8dbf00
OB
2602 /*
2603 If contiguous, we can subtract out existing space that we hope to
2604 combine with new space. We add it back later only if
2605 we don't actually get contiguous space.
2606 */
a9177ff5 2607
6c8dbf00
OB
2608 if (contiguous (av))
2609 size -= old_size;
fa8d436c 2610
6c8dbf00
OB
2611 /*
2612 Round to a multiple of page size.
2613 If MORECORE is not contiguous, this ensures that we only call it
2614 with whole-page arguments. And if MORECORE is contiguous and
2615 this is not first time through, this preserves page-alignment of
2616 previous calls. Otherwise, we correct to page-align below.
2617 */
fa8d436c 2618
8a35c3fe 2619 size = ALIGN_UP (size, pagesize);
fa8d436c 2620
6c8dbf00
OB
2621 /*
2622 Don't try to call MORECORE if argument is so big as to appear
2623 negative. Note that since mmap takes size_t arg, it may succeed
2624 below even if we cannot call MORECORE.
2625 */
2626
2627 if (size > 0)
2628 {
2629 brk = (char *) (MORECORE (size));
2630 LIBC_PROBE (memory_sbrk_more, 2, brk, size);
2631 }
2632
2633 if (brk != (char *) (MORECORE_FAILURE))
2634 {
2635 /* Call the `morecore' hook if necessary. */
2636 void (*hook) (void) = atomic_forced_read (__after_morecore_hook);
2637 if (__builtin_expect (hook != NULL, 0))
2638 (*hook)();
2639 }
2640 else
2641 {
2642 /*
2643 If have mmap, try using it as a backup when MORECORE fails or
2644 cannot be used. This is worth doing on systems that have "holes" in
2645 address space, so sbrk cannot extend to give contiguous space, but
2646 space is available elsewhere. Note that we ignore mmap max count
2647 and threshold limits, since the space will not be used as a
2648 segregated mmap region.
2649 */
2650
2651 /* Cannot merge with old top, so add its size back in */
2652 if (contiguous (av))
8a35c3fe 2653 size = ALIGN_UP (size + old_size, pagesize);
6c8dbf00
OB
2654
2655 /* If we are relying on mmap as backup, then use larger units */
2656 if ((unsigned long) (size) < (unsigned long) (MMAP_AS_MORECORE_SIZE))
2657 size = MMAP_AS_MORECORE_SIZE;
2658
2659 /* Don't try if size wraps around 0 */
2660 if ((unsigned long) (size) > (unsigned long) (nb))
2661 {
3784dfc0 2662 char *mbrk = (char *) (MMAP (0, size,
0c719cf4 2663 mtag_mmap_flags | PROT_READ | PROT_WRITE,
3784dfc0 2664 0));
6c8dbf00
OB
2665
2666 if (mbrk != MAP_FAILED)
2667 {
2668 /* We do not need, and cannot use, another sbrk call to find end */
2669 brk = mbrk;
2670 snd_brk = brk + size;
2671
2672 /*
2673 Record that we no longer have a contiguous sbrk region.
2674 After the first time mmap is used as backup, we do not
2675 ever rely on contiguous space since this could incorrectly
2676 bridge regions.
2677 */
2678 set_noncontiguous (av);
2679 }
2680 }
2681 }
2682
2683 if (brk != (char *) (MORECORE_FAILURE))
2684 {
2685 if (mp_.sbrk_base == 0)
2686 mp_.sbrk_base = brk;
2687 av->system_mem += size;
2688
2689 /*
2690 If MORECORE extends previous space, we can likewise extend top size.
2691 */
2692
2693 if (brk == old_end && snd_brk == (char *) (MORECORE_FAILURE))
2694 set_head (old_top, (size + old_size) | PREV_INUSE);
2695
2696 else if (contiguous (av) && old_size && brk < old_end)
ac3ed168
FW
2697 /* Oops! Someone else killed our space.. Can't touch anything. */
2698 malloc_printerr ("break adjusted to free malloc space");
6c8dbf00
OB
2699
2700 /*
2701 Otherwise, make adjustments:
2702
2703 * If the first time through or noncontiguous, we need to call sbrk
2704 just to find out where the end of memory lies.
2705
2706 * We need to ensure that all returned chunks from malloc will meet
2707 MALLOC_ALIGNMENT
2708
2709 * If there was an intervening foreign sbrk, we need to adjust sbrk
2710 request size to account for fact that we will not be able to
2711 combine new space with existing space in old_top.
2712
2713 * Almost all systems internally allocate whole pages at a time, in
2714 which case we might as well use the whole last page of request.
2715 So we allocate enough more memory to hit a page boundary now,
2716 which in turn causes future contiguous calls to page-align.
2717 */
2718
2719 else
2720 {
2721 front_misalign = 0;
2722 end_misalign = 0;
2723 correction = 0;
2724 aligned_brk = brk;
2725
2726 /* handle contiguous cases */
2727 if (contiguous (av))
2728 {
2729 /* Count foreign sbrk as system_mem. */
2730 if (old_size)
2731 av->system_mem += brk - old_end;
2732
2733 /* Guarantee alignment of first new chunk made from this space */
2734
3784dfc0 2735 front_misalign = (INTERNAL_SIZE_T) chunk2rawmem (brk) & MALLOC_ALIGN_MASK;
6c8dbf00
OB
2736 if (front_misalign > 0)
2737 {
2738 /*
2739 Skip over some bytes to arrive at an aligned position.
2740 We don't need to specially mark these wasted front bytes.
2741 They will never be accessed anyway because
2742 prev_inuse of av->top (and any chunk created from its start)
2743 is always true after initialization.
2744 */
2745
2746 correction = MALLOC_ALIGNMENT - front_misalign;
2747 aligned_brk += correction;
2748 }
2749
2750 /*
2751 If this isn't adjacent to existing space, then we will not
2752 be able to merge with old_top space, so must add to 2nd request.
2753 */
2754
2755 correction += old_size;
2756
2757 /* Extend the end address to hit a page boundary */
2758 end_misalign = (INTERNAL_SIZE_T) (brk + size + correction);
8a35c3fe 2759 correction += (ALIGN_UP (end_misalign, pagesize)) - end_misalign;
6c8dbf00
OB
2760
2761 assert (correction >= 0);
2762 snd_brk = (char *) (MORECORE (correction));
2763
2764 /*
2765 If can't allocate correction, try to at least find out current
2766 brk. It might be enough to proceed without failing.
2767
2768 Note that if second sbrk did NOT fail, we assume that space
2769 is contiguous with first sbrk. This is a safe assumption unless
2770 program is multithreaded but doesn't use locks and a foreign sbrk
2771 occurred between our first and second calls.
2772 */
2773
2774 if (snd_brk == (char *) (MORECORE_FAILURE))
2775 {
2776 correction = 0;
2777 snd_brk = (char *) (MORECORE (0));
2778 }
2779 else
2780 {
2781 /* Call the `morecore' hook if necessary. */
2782 void (*hook) (void) = atomic_forced_read (__after_morecore_hook);
2783 if (__builtin_expect (hook != NULL, 0))
2784 (*hook)();
2785 }
2786 }
2787
2788 /* handle non-contiguous cases */
2789 else
2790 {
3784dfc0 2791 if (MALLOC_ALIGNMENT == CHUNK_HDR_SZ)
6c8dbf00 2792 /* MORECORE/mmap must correctly align */
3784dfc0 2793 assert (((unsigned long) chunk2rawmem (brk) & MALLOC_ALIGN_MASK) == 0);
6c8dbf00
OB
2794 else
2795 {
3784dfc0 2796 front_misalign = (INTERNAL_SIZE_T) chunk2rawmem (brk) & MALLOC_ALIGN_MASK;
6c8dbf00
OB
2797 if (front_misalign > 0)
2798 {
2799 /*
2800 Skip over some bytes to arrive at an aligned position.
2801 We don't need to specially mark these wasted front bytes.
2802 They will never be accessed anyway because
2803 prev_inuse of av->top (and any chunk created from its start)
2804 is always true after initialization.
2805 */
2806
2807 aligned_brk += MALLOC_ALIGNMENT - front_misalign;
2808 }
2809 }
2810
2811 /* Find out current end of memory */
2812 if (snd_brk == (char *) (MORECORE_FAILURE))
2813 {
2814 snd_brk = (char *) (MORECORE (0));
2815 }
2816 }
2817
2818 /* Adjust top based on results of second sbrk */
2819 if (snd_brk != (char *) (MORECORE_FAILURE))
2820 {
2821 av->top = (mchunkptr) aligned_brk;
2822 set_head (av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE);
2823 av->system_mem += correction;
2824
2825 /*
2826 If not the first time through, we either have a
2827 gap due to foreign sbrk or a non-contiguous region. Insert a
2828 double fencepost at old_top to prevent consolidation with space
2829 we don't own. These fenceposts are artificial chunks that are
2830 marked as inuse and are in any case too small to use. We need
2831 two to make sizes and alignments work out.
2832 */
2833
2834 if (old_size != 0)
2835 {
2836 /*
2837 Shrink old_top to insert fenceposts, keeping size a
2838 multiple of MALLOC_ALIGNMENT. We know there is at least
2839 enough space in old_top to do this.
2840 */
3784dfc0 2841 old_size = (old_size - 2 * CHUNK_HDR_SZ) & ~MALLOC_ALIGN_MASK;
6c8dbf00
OB
2842 set_head (old_top, old_size | PREV_INUSE);
2843
2844 /*
2845 Note that the following assignments completely overwrite
2846 old_top when old_size was previously MINSIZE. This is
2847 intentional. We need the fencepost, even if old_top otherwise gets
2848 lost.
2849 */
e9c4fe93 2850 set_head (chunk_at_offset (old_top, old_size),
3784dfc0
RE
2851 CHUNK_HDR_SZ | PREV_INUSE);
2852 set_head (chunk_at_offset (old_top,
2853 old_size + CHUNK_HDR_SZ),
2854 CHUNK_HDR_SZ | PREV_INUSE);
6c8dbf00
OB
2855
2856 /* If possible, release the rest. */
2857 if (old_size >= MINSIZE)
2858 {
2859 _int_free (av, old_top, 1);
2860 }
2861 }
2862 }
2863 }
2864 }
2865 } /* if (av != &main_arena) */
2866
2867 if ((unsigned long) av->system_mem > (unsigned long) (av->max_system_mem))
fa8d436c 2868 av->max_system_mem = av->system_mem;
6c8dbf00 2869 check_malloc_state (av);
a9177ff5 2870
fa8d436c
UD
2871 /* finally, do the allocation */
2872 p = av->top;
6c8dbf00 2873 size = chunksize (p);
fa8d436c
UD
2874
2875 /* check that one of the above allocation paths succeeded */
6c8dbf00
OB
2876 if ((unsigned long) (size) >= (unsigned long) (nb + MINSIZE))
2877 {
2878 remainder_size = size - nb;
2879 remainder = chunk_at_offset (p, nb);
2880 av->top = remainder;
2881 set_head (p, nb | PREV_INUSE | (av != &main_arena ? NON_MAIN_ARENA : 0));
2882 set_head (remainder, remainder_size | PREV_INUSE);
2883 check_malloced_chunk (av, p, nb);
2884 return chunk2mem (p);
2885 }
fa8d436c
UD
2886
2887 /* catch all failure paths */
8e58439c 2888 __set_errno (ENOMEM);
fa8d436c
UD
2889 return 0;
2890}
2891
2892
2893/*
6c8dbf00
OB
2894 systrim is an inverse of sorts to sysmalloc. It gives memory back
2895 to the system (via negative arguments to sbrk) if there is unused
2896 memory at the `high' end of the malloc pool. It is called
2897 automatically by free() when top space exceeds the trim
2898 threshold. It is also called by the public malloc_trim routine. It
2899 returns 1 if it actually released any memory, else 0.
2900 */
fa8d436c 2901
6c8dbf00
OB
2902static int
2903systrim (size_t pad, mstate av)
fa8d436c 2904{
6c8dbf00
OB
2905 long top_size; /* Amount of top-most memory */
2906 long extra; /* Amount to release */
2907 long released; /* Amount actually released */
2908 char *current_brk; /* address returned by pre-check sbrk call */
2909 char *new_brk; /* address returned by post-check sbrk call */
8a35c3fe 2910 size_t pagesize;
6c8dbf00 2911 long top_area;
fa8d436c 2912
8a35c3fe 2913 pagesize = GLRO (dl_pagesize);
6c8dbf00 2914 top_size = chunksize (av->top);
a9177ff5 2915
4b5b548c
FS
2916 top_area = top_size - MINSIZE - 1;
2917 if (top_area <= pad)
2918 return 0;
2919
ca6be165
CD
2920 /* Release in pagesize units and round down to the nearest page. */
2921 extra = ALIGN_DOWN(top_area - pad, pagesize);
a9177ff5 2922
51a7380b
WN
2923 if (extra == 0)
2924 return 0;
2925
4b5b548c 2926 /*
6c8dbf00
OB
2927 Only proceed if end of memory is where we last set it.
2928 This avoids problems if there were foreign sbrk calls.
2929 */
2930 current_brk = (char *) (MORECORE (0));
2931 if (current_brk == (char *) (av->top) + top_size)
2932 {
2933 /*
2934 Attempt to release memory. We ignore MORECORE return value,
2935 and instead call again to find out where new end of memory is.
2936 This avoids problems if first call releases less than we asked,
2937 of if failure somehow altered brk value. (We could still
2938 encounter problems if it altered brk in some very bad way,
2939 but the only thing we can do is adjust anyway, which will cause
2940 some downstream failure.)
2941 */
2942
2943 MORECORE (-extra);
2944 /* Call the `morecore' hook if necessary. */
2945 void (*hook) (void) = atomic_forced_read (__after_morecore_hook);
2946 if (__builtin_expect (hook != NULL, 0))
2947 (*hook)();
2948 new_brk = (char *) (MORECORE (0));
2949
2950 LIBC_PROBE (memory_sbrk_less, 2, new_brk, extra);
2951
2952 if (new_brk != (char *) MORECORE_FAILURE)
2953 {
2954 released = (long) (current_brk - new_brk);
2955
2956 if (released != 0)
2957 {
2958 /* Success. Adjust top. */
2959 av->system_mem -= released;
2960 set_head (av->top, (top_size - released) | PREV_INUSE);
2961 check_malloc_state (av);
2962 return 1;
2963 }
2964 }
fa8d436c 2965 }
fa8d436c 2966 return 0;
f65fd747
UD
2967}
2968
431c33c0 2969static void
6c8dbf00 2970munmap_chunk (mchunkptr p)
f65fd747 2971{
c0e82f11 2972 size_t pagesize = GLRO (dl_pagesize);
6c8dbf00 2973 INTERNAL_SIZE_T size = chunksize (p);
f65fd747 2974
6c8dbf00 2975 assert (chunk_is_mmapped (p));
8e635611 2976
4cf6c72f
FW
2977 /* Do nothing if the chunk is a faked mmapped chunk in the dumped
2978 main arena. We never free this memory. */
2979 if (DUMPED_MAIN_ARENA_CHUNK (p))
2980 return;
2981
3784dfc0 2982 uintptr_t mem = (uintptr_t) chunk2rawmem (p);
e9c4fe93
FW
2983 uintptr_t block = (uintptr_t) p - prev_size (p);
2984 size_t total_size = prev_size (p) + size;
8e635611
UD
2985 /* Unfortunately we have to do the compilers job by hand here. Normally
2986 we would test BLOCK and TOTAL-SIZE separately for compliance with the
2987 page size. But gcc does not recognize the optimization possibility
2988 (in the moment at least) so we combine the two values into one before
2989 the bit test. */
c0e82f11
IK
2990 if (__glibc_unlikely ((block | total_size) & (pagesize - 1)) != 0
2991 || __glibc_unlikely (!powerof2 (mem & (pagesize - 1))))
ac3ed168 2992 malloc_printerr ("munmap_chunk(): invalid pointer");
f65fd747 2993
c6e4925d
OB
2994 atomic_decrement (&mp_.n_mmaps);
2995 atomic_add (&mp_.mmapped_mem, -total_size);
f65fd747 2996
6ef76f3b
UD
2997 /* If munmap failed the process virtual memory address space is in a
2998 bad shape. Just leave the block hanging around, the process will
2999 terminate shortly anyway since not much can be done. */
6c8dbf00 3000 __munmap ((char *) block, total_size);
f65fd747
UD
3001}
3002
3003#if HAVE_MREMAP
3004
431c33c0 3005static mchunkptr
6c8dbf00 3006mremap_chunk (mchunkptr p, size_t new_size)
f65fd747 3007{
8a35c3fe 3008 size_t pagesize = GLRO (dl_pagesize);
e9c4fe93 3009 INTERNAL_SIZE_T offset = prev_size (p);
6c8dbf00 3010 INTERNAL_SIZE_T size = chunksize (p);
f65fd747
UD
3011 char *cp;
3012
6c8dbf00 3013 assert (chunk_is_mmapped (p));
ebe544bf
IK
3014
3015 uintptr_t block = (uintptr_t) p - offset;
3016 uintptr_t mem = (uintptr_t) chunk2mem(p);
3017 size_t total_size = offset + size;
3018 if (__glibc_unlikely ((block | total_size) & (pagesize - 1)) != 0
3019 || __glibc_unlikely (!powerof2 (mem & (pagesize - 1))))
3020 malloc_printerr("mremap_chunk(): invalid pointer");
f65fd747
UD
3021
3022 /* Note the extra SIZE_SZ overhead as in mmap_chunk(). */
8a35c3fe 3023 new_size = ALIGN_UP (new_size + offset + SIZE_SZ, pagesize);
f65fd747 3024
68f3802d 3025 /* No need to remap if the number of pages does not change. */
ebe544bf 3026 if (total_size == new_size)
68f3802d
UD
3027 return p;
3028
ebe544bf 3029 cp = (char *) __mremap ((char *) block, total_size, new_size,
6c8dbf00 3030 MREMAP_MAYMOVE);
f65fd747 3031
6c8dbf00
OB
3032 if (cp == MAP_FAILED)
3033 return 0;
f65fd747 3034
6c8dbf00 3035 p = (mchunkptr) (cp + offset);
f65fd747 3036
3784dfc0 3037 assert (aligned_OK (chunk2rawmem (p)));
f65fd747 3038
e9c4fe93 3039 assert (prev_size (p) == offset);
6c8dbf00 3040 set_head (p, (new_size - offset) | IS_MMAPPED);
f65fd747 3041
c6e4925d
OB
3042 INTERNAL_SIZE_T new;
3043 new = atomic_exchange_and_add (&mp_.mmapped_mem, new_size - size - offset)
6c8dbf00 3044 + new_size - size - offset;
c6e4925d 3045 atomic_max (&mp_.max_mmapped_mem, new);
f65fd747
UD
3046 return p;
3047}
f65fd747
UD
3048#endif /* HAVE_MREMAP */
3049
fa8d436c 3050/*------------------------ Public wrappers. --------------------------------*/
f65fd747 3051
d5c3fafc
DD
3052#if USE_TCACHE
3053
3054/* We overlay this structure on the user-data portion of a chunk when
3055 the chunk is stored in the per-thread cache. */
3056typedef struct tcache_entry
3057{
3058 struct tcache_entry *next;
bcdaad21
DD
3059 /* This field exists to detect double frees. */
3060 struct tcache_perthread_struct *key;
d5c3fafc
DD
3061} tcache_entry;
3062
3063/* There is one of these for each thread, which contains the
3064 per-thread cache (hence "tcache_perthread_struct"). Keeping
3065 overall size low is mildly important. Note that COUNTS and ENTRIES
3066 are redundant (we could have just counted the linked list each
3067 time), this is for performance reasons. */
3068typedef struct tcache_perthread_struct
3069{
1f50f2ad 3070 uint16_t counts[TCACHE_MAX_BINS];
d5c3fafc
DD
3071 tcache_entry *entries[TCACHE_MAX_BINS];
3072} tcache_perthread_struct;
3073
1e26d351 3074static __thread bool tcache_shutting_down = false;
d5c3fafc
DD
3075static __thread tcache_perthread_struct *tcache = NULL;
3076
3077/* Caller must ensure that we know tc_idx is valid and there's room
3078 for more chunks. */
e4dd4ace 3079static __always_inline void
d5c3fafc
DD
3080tcache_put (mchunkptr chunk, size_t tc_idx)
3081{
3082 tcache_entry *e = (tcache_entry *) chunk2mem (chunk);
bcdaad21
DD
3083
3084 /* Mark this chunk as "in the tcache" so the test in _int_free will
3085 detect a double free. */
3086 e->key = tcache;
3087
a1a486d7 3088 e->next = PROTECT_PTR (&e->next, tcache->entries[tc_idx]);
d5c3fafc
DD
3089 tcache->entries[tc_idx] = e;
3090 ++(tcache->counts[tc_idx]);
3091}
3092
3093/* Caller must ensure that we know tc_idx is valid and there's
3094 available chunks to remove. */
e4dd4ace 3095static __always_inline void *
d5c3fafc
DD
3096tcache_get (size_t tc_idx)
3097{
3098 tcache_entry *e = tcache->entries[tc_idx];
49c3c376
EI
3099 if (__glibc_unlikely (!aligned_OK (e)))
3100 malloc_printerr ("malloc(): unaligned tcache chunk detected");
a1a486d7 3101 tcache->entries[tc_idx] = REVEAL_PTR (e->next);
d5c3fafc 3102 --(tcache->counts[tc_idx]);
bcdaad21 3103 e->key = NULL;
d5c3fafc
DD
3104 return (void *) e;
3105}
3106
0a947e06
FW
3107static void
3108tcache_thread_shutdown (void)
d5c3fafc
DD
3109{
3110 int i;
3111 tcache_perthread_struct *tcache_tmp = tcache;
3112
3113 if (!tcache)
3114 return;
3115
1e26d351 3116 /* Disable the tcache and prevent it from being reinitialized. */
d5c3fafc 3117 tcache = NULL;
1e26d351 3118 tcache_shutting_down = true;
d5c3fafc 3119
1e26d351
CD
3120 /* Free all of the entries and the tcache itself back to the arena
3121 heap for coalescing. */
d5c3fafc
DD
3122 for (i = 0; i < TCACHE_MAX_BINS; ++i)
3123 {
3124 while (tcache_tmp->entries[i])
3125 {
3126 tcache_entry *e = tcache_tmp->entries[i];
768358b6
EI
3127 if (__glibc_unlikely (!aligned_OK (e)))
3128 malloc_printerr ("tcache_thread_shutdown(): "
3129 "unaligned tcache chunk detected");
a1a486d7 3130 tcache_tmp->entries[i] = REVEAL_PTR (e->next);
d5c3fafc
DD
3131 __libc_free (e);
3132 }
3133 }
3134
3135 __libc_free (tcache_tmp);
d5c3fafc 3136}
d5c3fafc
DD
3137
3138static void
3139tcache_init(void)
3140{
3141 mstate ar_ptr;
3142 void *victim = 0;
3143 const size_t bytes = sizeof (tcache_perthread_struct);
3144
3145 if (tcache_shutting_down)
3146 return;
3147
3148 arena_get (ar_ptr, bytes);
3149 victim = _int_malloc (ar_ptr, bytes);
3150 if (!victim && ar_ptr != NULL)
3151 {
3152 ar_ptr = arena_get_retry (ar_ptr, bytes);
3153 victim = _int_malloc (ar_ptr, bytes);
3154 }
3155
3156
3157 if (ar_ptr != NULL)
3158 __libc_lock_unlock (ar_ptr->mutex);
3159
3160 /* In a low memory situation, we may not be able to allocate memory
3161 - in which case, we just keep trying later. However, we
3162 typically do this very early, so either there is sufficient
3163 memory, or there isn't enough memory to do non-trivial
3164 allocations anyway. */
3165 if (victim)
3166 {
3167 tcache = (tcache_perthread_struct *) victim;
3168 memset (tcache, 0, sizeof (tcache_perthread_struct));
3169 }
3170
3171}
3172
0a947e06 3173# define MAYBE_INIT_TCACHE() \
d5c3fafc
DD
3174 if (__glibc_unlikely (tcache == NULL)) \
3175 tcache_init();
3176
0a947e06
FW
3177#else /* !USE_TCACHE */
3178# define MAYBE_INIT_TCACHE()
3179
3180static void
3181tcache_thread_shutdown (void)
3182{
3183 /* Nothing to do if there is no thread cache. */
3184}
3185
3186#endif /* !USE_TCACHE */
d5c3fafc 3187
6c8dbf00
OB
3188void *
3189__libc_malloc (size_t bytes)
fa8d436c
UD
3190{
3191 mstate ar_ptr;
22a89187 3192 void *victim;
f65fd747 3193
9bf8e29c
AZ
3194 _Static_assert (PTRDIFF_MAX <= SIZE_MAX / 2,
3195 "PTRDIFF_MAX is not more than half of SIZE_MAX");
3196
a222d91a 3197 void *(*hook) (size_t, const void *)
f3eeb3fc 3198 = atomic_forced_read (__malloc_hook);
bfacf1af 3199 if (__builtin_expect (hook != NULL, 0))
fa8d436c 3200 return (*hook)(bytes, RETURN_ADDRESS (0));
d5c3fafc
DD
3201#if USE_TCACHE
3202 /* int_free also calls request2size, be careful to not pad twice. */
34697694 3203 size_t tbytes;
9bf8e29c
AZ
3204 if (!checked_request2size (bytes, &tbytes))
3205 {
3206 __set_errno (ENOMEM);
3207 return NULL;
3208 }
d5c3fafc
DD
3209 size_t tc_idx = csize2tidx (tbytes);
3210
3211 MAYBE_INIT_TCACHE ();
3212
3213 DIAG_PUSH_NEEDS_COMMENT;
3214 if (tc_idx < mp_.tcache_bins
d5c3fafc 3215 && tcache
1f50f2ad 3216 && tcache->counts[tc_idx] > 0)
d5c3fafc 3217 {
3784dfc0 3218 victim = tcache_get (tc_idx);
0c719cf4 3219 return tag_new_usable (victim);
d5c3fafc
DD
3220 }
3221 DIAG_POP_NEEDS_COMMENT;
3222#endif
f65fd747 3223
3f6bb8a3
WD
3224 if (SINGLE_THREAD_P)
3225 {
0c719cf4 3226 victim = tag_new_usable (_int_malloc (&main_arena, bytes));
3f6bb8a3
WD
3227 assert (!victim || chunk_is_mmapped (mem2chunk (victim)) ||
3228 &main_arena == arena_for_chunk (mem2chunk (victim)));
3229 return victim;
3230 }
3231
94c5a52a 3232 arena_get (ar_ptr, bytes);
425ce2ed 3233
6c8dbf00 3234 victim = _int_malloc (ar_ptr, bytes);
fff94fa2
SP
3235 /* Retry with another arena only if we were able to find a usable arena
3236 before. */
3237 if (!victim && ar_ptr != NULL)
6c8dbf00
OB
3238 {
3239 LIBC_PROBE (memory_malloc_retry, 1, bytes);
3240 ar_ptr = arena_get_retry (ar_ptr, bytes);
fff94fa2 3241 victim = _int_malloc (ar_ptr, bytes);
60f0e64b 3242 }
fff94fa2
SP
3243
3244 if (ar_ptr != NULL)
4bf5f222 3245 __libc_lock_unlock (ar_ptr->mutex);
fff94fa2 3246
0c719cf4 3247 victim = tag_new_usable (victim);
3784dfc0 3248
6c8dbf00
OB
3249 assert (!victim || chunk_is_mmapped (mem2chunk (victim)) ||
3250 ar_ptr == arena_for_chunk (mem2chunk (victim)));
fa8d436c 3251 return victim;
f65fd747 3252}
6c8dbf00 3253libc_hidden_def (__libc_malloc)
f65fd747 3254
fa8d436c 3255void
6c8dbf00 3256__libc_free (void *mem)
f65fd747 3257{
fa8d436c
UD
3258 mstate ar_ptr;
3259 mchunkptr p; /* chunk corresponding to mem */
3260
a222d91a 3261 void (*hook) (void *, const void *)
f3eeb3fc 3262 = atomic_forced_read (__free_hook);
6c8dbf00
OB
3263 if (__builtin_expect (hook != NULL, 0))
3264 {
3265 (*hook)(mem, RETURN_ADDRESS (0));
3266 return;
3267 }
f65fd747 3268
fa8d436c
UD
3269 if (mem == 0) /* free(0) has no effect */
3270 return;
f65fd747 3271
3784dfc0
RE
3272#ifdef USE_MTAG
3273 /* Quickly check that the freed pointer matches the tag for the memory.
3274 This gives a useful double-free detection. */
3275 *(volatile char *)mem;
3276#endif
3277
69fda43b
PE
3278 int err = errno;
3279
6c8dbf00 3280 p = mem2chunk (mem);
f65fd747 3281
6c8dbf00
OB
3282 if (chunk_is_mmapped (p)) /* release mmapped memory. */
3283 {
4cf6c72f
FW
3284 /* See if the dynamic brk/mmap threshold needs adjusting.
3285 Dumped fake mmapped chunks do not affect the threshold. */
6c8dbf00 3286 if (!mp_.no_dyn_threshold
e9c4fe93
FW
3287 && chunksize_nomask (p) > mp_.mmap_threshold
3288 && chunksize_nomask (p) <= DEFAULT_MMAP_THRESHOLD_MAX
4cf6c72f 3289 && !DUMPED_MAIN_ARENA_CHUNK (p))
6c8dbf00
OB
3290 {
3291 mp_.mmap_threshold = chunksize (p);
3292 mp_.trim_threshold = 2 * mp_.mmap_threshold;
3293 LIBC_PROBE (memory_mallopt_free_dyn_thresholds, 2,
3294 mp_.mmap_threshold, mp_.trim_threshold);
3295 }
3296 munmap_chunk (p);
6c8dbf00 3297 }
69fda43b
PE
3298 else
3299 {
3300 MAYBE_INIT_TCACHE ();
f65fd747 3301
b9b85be6 3302 /* Mark the chunk as belonging to the library again. */
0c719cf4 3303 (void)tag_region (chunk2rawmem (p),
b9b85be6
SN
3304 CHUNK_AVAILABLE_SIZE (p) - CHUNK_HDR_SZ);
3305
69fda43b
PE
3306 ar_ptr = arena_for_chunk (p);
3307 _int_free (ar_ptr, p, 0);
3308 }
d5c3fafc 3309
69fda43b 3310 __set_errno (err);
f65fd747 3311}
3b49edc0 3312libc_hidden_def (__libc_free)
f65fd747 3313
6c8dbf00
OB
3314void *
3315__libc_realloc (void *oldmem, size_t bytes)
f65fd747 3316{
fa8d436c 3317 mstate ar_ptr;
6c8dbf00 3318 INTERNAL_SIZE_T nb; /* padded request size */
f65fd747 3319
6c8dbf00 3320 void *newp; /* chunk to return */
f65fd747 3321
a222d91a 3322 void *(*hook) (void *, size_t, const void *) =
f3eeb3fc 3323 atomic_forced_read (__realloc_hook);
bfacf1af 3324 if (__builtin_expect (hook != NULL, 0))
fa8d436c 3325 return (*hook)(oldmem, bytes, RETURN_ADDRESS (0));
f65fd747 3326
fa8d436c 3327#if REALLOC_ZERO_BYTES_FREES
6c8dbf00
OB
3328 if (bytes == 0 && oldmem != NULL)
3329 {
3330 __libc_free (oldmem); return 0;
3331 }
f65fd747 3332#endif
f65fd747 3333
fa8d436c 3334 /* realloc of null is supposed to be same as malloc */
6c8dbf00
OB
3335 if (oldmem == 0)
3336 return __libc_malloc (bytes);
f65fd747 3337
3784dfc0
RE
3338#ifdef USE_MTAG
3339 /* Perform a quick check to ensure that the pointer's tag matches the
3340 memory's tag. */
3341 *(volatile char*) oldmem;
3342#endif
3343
78ac92ad 3344 /* chunk corresponding to oldmem */
6c8dbf00 3345 const mchunkptr oldp = mem2chunk (oldmem);
78ac92ad 3346 /* its size */
6c8dbf00 3347 const INTERNAL_SIZE_T oldsize = chunksize (oldp);
f65fd747 3348
fff94fa2
SP
3349 if (chunk_is_mmapped (oldp))
3350 ar_ptr = NULL;
3351 else
d5c3fafc
DD
3352 {
3353 MAYBE_INIT_TCACHE ();
3354 ar_ptr = arena_for_chunk (oldp);
3355 }
fff94fa2 3356
4cf6c72f
FW
3357 /* Little security check which won't hurt performance: the allocator
3358 never wrapps around at the end of the address space. Therefore
3359 we can exclude some size values which might appear here by
3360 accident or by "design" from some intruder. We need to bypass
3361 this check for dumped fake mmap chunks from the old main arena
3362 because the new malloc may provide additional alignment. */
3363 if ((__builtin_expect ((uintptr_t) oldp > (uintptr_t) -oldsize, 0)
3364 || __builtin_expect (misaligned_chunk (oldp), 0))
3365 && !DUMPED_MAIN_ARENA_CHUNK (oldp))
ac3ed168 3366 malloc_printerr ("realloc(): invalid pointer");
dc165f7b 3367
9bf8e29c
AZ
3368 if (!checked_request2size (bytes, &nb))
3369 {
3370 __set_errno (ENOMEM);
3371 return NULL;
3372 }
f65fd747 3373
6c8dbf00
OB
3374 if (chunk_is_mmapped (oldp))
3375 {
4cf6c72f
FW
3376 /* If this is a faked mmapped chunk from the dumped main arena,
3377 always make a copy (and do not free the old chunk). */
3378 if (DUMPED_MAIN_ARENA_CHUNK (oldp))
3379 {
3380 /* Must alloc, copy, free. */
3381 void *newmem = __libc_malloc (bytes);
3382 if (newmem == 0)
3383 return NULL;
3384 /* Copy as many bytes as are available from the old chunk
1e8a8875 3385 and fit into the new size. NB: The overhead for faked
3784dfc0 3386 mmapped chunks is only SIZE_SZ, not CHUNK_HDR_SZ as for
1e8a8875
FW
3387 regular mmapped chunks. */
3388 if (bytes > oldsize - SIZE_SZ)
3389 bytes = oldsize - SIZE_SZ;
4cf6c72f
FW
3390 memcpy (newmem, oldmem, bytes);
3391 return newmem;
3392 }
3393
6c8dbf00 3394 void *newmem;
f65fd747 3395
fa8d436c 3396#if HAVE_MREMAP
6c8dbf00
OB
3397 newp = mremap_chunk (oldp, nb);
3398 if (newp)
3784dfc0
RE
3399 {
3400 void *newmem = chunk2rawmem (newp);
3401 /* Give the new block a different tag. This helps to ensure
3402 that stale handles to the previous mapping are not
3403 reused. There's a performance hit for both us and the
3404 caller for doing this, so we might want to
3405 reconsider. */
0c719cf4 3406 return tag_new_usable (newmem);
3784dfc0 3407 }
f65fd747 3408#endif
6c8dbf00
OB
3409 /* Note the extra SIZE_SZ overhead. */
3410 if (oldsize - SIZE_SZ >= nb)
3411 return oldmem; /* do nothing */
3412
3413 /* Must alloc, copy, free. */
3414 newmem = __libc_malloc (bytes);
3415 if (newmem == 0)
3416 return 0; /* propagate failure */
fa8d436c 3417
3784dfc0 3418 memcpy (newmem, oldmem, oldsize - CHUNK_HDR_SZ);
6c8dbf00
OB
3419 munmap_chunk (oldp);
3420 return newmem;
3421 }
3422
3f6bb8a3
WD
3423 if (SINGLE_THREAD_P)
3424 {
3425 newp = _int_realloc (ar_ptr, oldp, oldsize, nb);
3426 assert (!newp || chunk_is_mmapped (mem2chunk (newp)) ||
3427 ar_ptr == arena_for_chunk (mem2chunk (newp)));
3428
3429 return newp;
3430 }
3431
4bf5f222 3432 __libc_lock_lock (ar_ptr->mutex);
f65fd747 3433
6c8dbf00 3434 newp = _int_realloc (ar_ptr, oldp, oldsize, nb);
f65fd747 3435
4bf5f222 3436 __libc_lock_unlock (ar_ptr->mutex);
6c8dbf00
OB
3437 assert (!newp || chunk_is_mmapped (mem2chunk (newp)) ||
3438 ar_ptr == arena_for_chunk (mem2chunk (newp)));
07014fca
UD
3439
3440 if (newp == NULL)
3441 {
3442 /* Try harder to allocate memory in other arenas. */
35fed6f1 3443 LIBC_PROBE (memory_realloc_retry, 2, bytes, oldmem);
6c8dbf00 3444 newp = __libc_malloc (bytes);
07014fca 3445 if (newp != NULL)
6c8dbf00 3446 {
42cc9606
SN
3447 size_t sz = CHUNK_AVAILABLE_SIZE (oldp) - CHUNK_HDR_SZ;
3448 memcpy (newp, oldmem, sz);
0c719cf4 3449 (void) tag_region (chunk2rawmem (oldp), sz);
6c8dbf00
OB
3450 _int_free (ar_ptr, oldp, 0);
3451 }
07014fca
UD
3452 }
3453
fa8d436c
UD
3454 return newp;
3455}
3b49edc0 3456libc_hidden_def (__libc_realloc)
f65fd747 3457
6c8dbf00
OB
3458void *
3459__libc_memalign (size_t alignment, size_t bytes)
10ad46bc
OB
3460{
3461 void *address = RETURN_ADDRESS (0);
3462 return _mid_memalign (alignment, bytes, address);
3463}
3464
3465static void *
3466_mid_memalign (size_t alignment, size_t bytes, void *address)
fa8d436c
UD
3467{
3468 mstate ar_ptr;
22a89187 3469 void *p;
f65fd747 3470
a222d91a 3471 void *(*hook) (size_t, size_t, const void *) =
f3eeb3fc 3472 atomic_forced_read (__memalign_hook);
bfacf1af 3473 if (__builtin_expect (hook != NULL, 0))
10ad46bc 3474 return (*hook)(alignment, bytes, address);
f65fd747 3475
10ad46bc 3476 /* If we need less alignment than we give anyway, just relay to malloc. */
6c8dbf00
OB
3477 if (alignment <= MALLOC_ALIGNMENT)
3478 return __libc_malloc (bytes);
1228ed5c 3479
fa8d436c 3480 /* Otherwise, ensure that it is at least a minimum chunk size */
6c8dbf00
OB
3481 if (alignment < MINSIZE)
3482 alignment = MINSIZE;
f65fd747 3483
a56ee40b
WN
3484 /* If the alignment is greater than SIZE_MAX / 2 + 1 it cannot be a
3485 power of 2 and will cause overflow in the check below. */
3486 if (alignment > SIZE_MAX / 2 + 1)
3487 {
3488 __set_errno (EINVAL);
3489 return 0;
3490 }
3491
10ad46bc
OB
3492
3493 /* Make sure alignment is power of 2. */
6c8dbf00
OB
3494 if (!powerof2 (alignment))
3495 {
3496 size_t a = MALLOC_ALIGNMENT * 2;
3497 while (a < alignment)
3498 a <<= 1;
3499 alignment = a;
3500 }
10ad46bc 3501
3f6bb8a3
WD
3502 if (SINGLE_THREAD_P)
3503 {
3504 p = _int_memalign (&main_arena, alignment, bytes);
3505 assert (!p || chunk_is_mmapped (mem2chunk (p)) ||
3506 &main_arena == arena_for_chunk (mem2chunk (p)));
0c719cf4 3507 return tag_new_usable (p);
3f6bb8a3
WD
3508 }
3509
6c8dbf00 3510 arena_get (ar_ptr, bytes + alignment + MINSIZE);
6c8dbf00
OB
3511
3512 p = _int_memalign (ar_ptr, alignment, bytes);
fff94fa2 3513 if (!p && ar_ptr != NULL)
6c8dbf00
OB
3514 {
3515 LIBC_PROBE (memory_memalign_retry, 2, bytes, alignment);
3516 ar_ptr = arena_get_retry (ar_ptr, bytes);
fff94fa2 3517 p = _int_memalign (ar_ptr, alignment, bytes);
f65fd747 3518 }
fff94fa2
SP
3519
3520 if (ar_ptr != NULL)
4bf5f222 3521 __libc_lock_unlock (ar_ptr->mutex);
fff94fa2 3522
6c8dbf00
OB
3523 assert (!p || chunk_is_mmapped (mem2chunk (p)) ||
3524 ar_ptr == arena_for_chunk (mem2chunk (p)));
0c719cf4 3525 return tag_new_usable (p);
f65fd747 3526}
380d7e87 3527/* For ISO C11. */
3b49edc0
UD
3528weak_alias (__libc_memalign, aligned_alloc)
3529libc_hidden_def (__libc_memalign)
f65fd747 3530
6c8dbf00
OB
3531void *
3532__libc_valloc (size_t bytes)
fa8d436c 3533{
3784dfc0
RE
3534 void *p;
3535
6c8dbf00 3536 if (__malloc_initialized < 0)
fa8d436c 3537 ptmalloc_init ();
8088488d 3538
10ad46bc 3539 void *address = RETURN_ADDRESS (0);
8a35c3fe 3540 size_t pagesize = GLRO (dl_pagesize);
3784dfc0 3541 p = _mid_memalign (pagesize, bytes, address);
0c719cf4 3542 return tag_new_usable (p);
fa8d436c 3543}
f65fd747 3544
6c8dbf00
OB
3545void *
3546__libc_pvalloc (size_t bytes)
fa8d436c 3547{
3784dfc0
RE
3548 void *p;
3549
6c8dbf00 3550 if (__malloc_initialized < 0)
fa8d436c 3551 ptmalloc_init ();
8088488d 3552
10ad46bc 3553 void *address = RETURN_ADDRESS (0);
8a35c3fe 3554 size_t pagesize = GLRO (dl_pagesize);
9bf8e29c
AZ
3555 size_t rounded_bytes;
3556 /* ALIGN_UP with overflow check. */
3557 if (__glibc_unlikely (__builtin_add_overflow (bytes,
3558 pagesize - 1,
3559 &rounded_bytes)))
1159a193
WN
3560 {
3561 __set_errno (ENOMEM);
3562 return 0;
3563 }
9bf8e29c 3564 rounded_bytes = rounded_bytes & -(pagesize - 1);
1159a193 3565
3784dfc0 3566 p = _mid_memalign (pagesize, rounded_bytes, address);
0c719cf4 3567 return tag_new_usable (p);
fa8d436c 3568}
f65fd747 3569
6c8dbf00
OB
3570void *
3571__libc_calloc (size_t n, size_t elem_size)
f65fd747 3572{
d6285c9f 3573 mstate av;
3784dfc0
RE
3574 mchunkptr oldtop;
3575 INTERNAL_SIZE_T sz, oldtopsize;
6c8dbf00 3576 void *mem;
3784dfc0 3577#ifndef USE_MTAG
d6285c9f
CD
3578 unsigned long clearsize;
3579 unsigned long nclears;
3580 INTERNAL_SIZE_T *d;
3784dfc0 3581#endif
9bf8e29c 3582 ptrdiff_t bytes;
0950889b 3583
9bf8e29c 3584 if (__glibc_unlikely (__builtin_mul_overflow (n, elem_size, &bytes)))
6c8dbf00 3585 {
9bf8e29c
AZ
3586 __set_errno (ENOMEM);
3587 return NULL;
d9af917d 3588 }
3784dfc0 3589
9bf8e29c 3590 sz = bytes;
0950889b 3591
a222d91a 3592 void *(*hook) (size_t, const void *) =
f3eeb3fc 3593 atomic_forced_read (__malloc_hook);
6c8dbf00
OB
3594 if (__builtin_expect (hook != NULL, 0))
3595 {
d6285c9f
CD
3596 mem = (*hook)(sz, RETURN_ADDRESS (0));
3597 if (mem == 0)
3598 return 0;
3599
3600 return memset (mem, 0, sz);
7799b7b3 3601 }
f65fd747 3602
d5c3fafc
DD
3603 MAYBE_INIT_TCACHE ();
3604
3f6bb8a3
WD
3605 if (SINGLE_THREAD_P)
3606 av = &main_arena;
3607 else
3608 arena_get (av, sz);
3609
fff94fa2
SP
3610 if (av)
3611 {
3612 /* Check if we hand out the top chunk, in which case there may be no
3613 need to clear. */
d6285c9f 3614#if MORECORE_CLEARS
fff94fa2
SP
3615 oldtop = top (av);
3616 oldtopsize = chunksize (top (av));
d6285c9f 3617# if MORECORE_CLEARS < 2
fff94fa2
SP
3618 /* Only newly allocated memory is guaranteed to be cleared. */
3619 if (av == &main_arena &&
3620 oldtopsize < mp_.sbrk_base + av->max_system_mem - (char *) oldtop)
3621 oldtopsize = (mp_.sbrk_base + av->max_system_mem - (char *) oldtop);
d6285c9f 3622# endif
fff94fa2
SP
3623 if (av != &main_arena)
3624 {
3625 heap_info *heap = heap_for_ptr (oldtop);
3626 if (oldtopsize < (char *) heap + heap->mprotect_size - (char *) oldtop)
3627 oldtopsize = (char *) heap + heap->mprotect_size - (char *) oldtop;
3628 }
3629#endif
3630 }
3631 else
d6285c9f 3632 {
fff94fa2
SP
3633 /* No usable arenas. */
3634 oldtop = 0;
3635 oldtopsize = 0;
d6285c9f 3636 }
d6285c9f
CD
3637 mem = _int_malloc (av, sz);
3638
d6285c9f
CD
3639 assert (!mem || chunk_is_mmapped (mem2chunk (mem)) ||
3640 av == arena_for_chunk (mem2chunk (mem)));
3641
3f6bb8a3 3642 if (!SINGLE_THREAD_P)
d6285c9f 3643 {
3f6bb8a3
WD
3644 if (mem == 0 && av != NULL)
3645 {
3646 LIBC_PROBE (memory_calloc_retry, 1, sz);
3647 av = arena_get_retry (av, sz);
3648 mem = _int_malloc (av, sz);
3649 }
fff94fa2 3650
3f6bb8a3
WD
3651 if (av != NULL)
3652 __libc_lock_unlock (av->mutex);
3653 }
fff94fa2
SP
3654
3655 /* Allocation failed even after a retry. */
3656 if (mem == 0)
3657 return 0;
3658
3784dfc0
RE
3659 mchunkptr p = mem2chunk (mem);
3660 /* If we are using memory tagging, then we need to set the tags
3661 regardless of MORECORE_CLEARS, so we zero the whole block while
3662 doing so. */
3663#ifdef USE_MTAG
0c719cf4 3664 return tag_new_memset (mem, 0, CHUNK_AVAILABLE_SIZE (p) - CHUNK_HDR_SZ);
3784dfc0
RE
3665#else
3666 INTERNAL_SIZE_T csz = chunksize (p);
d6285c9f
CD
3667
3668 /* Two optional cases in which clearing not necessary */
3669 if (chunk_is_mmapped (p))
3670 {
3671 if (__builtin_expect (perturb_byte, 0))
3672 return memset (mem, 0, sz);
3673
3674 return mem;
3675 }
3676
d6285c9f
CD
3677#if MORECORE_CLEARS
3678 if (perturb_byte == 0 && (p == oldtop && csz > oldtopsize))
3679 {
3680 /* clear only the bytes from non-freshly-sbrked memory */
3681 csz = oldtopsize;
3682 }
3683#endif
3684
3685 /* Unroll clear of <= 36 bytes (72 if 8byte sizes). We know that
3686 contents have an odd number of INTERNAL_SIZE_T-sized words;
3687 minimally 3. */
3688 d = (INTERNAL_SIZE_T *) mem;
3689 clearsize = csz - SIZE_SZ;
3690 nclears = clearsize / sizeof (INTERNAL_SIZE_T);
3691 assert (nclears >= 3);
3692
3693 if (nclears > 9)
3694 return memset (d, 0, clearsize);
3695
3696 else
3697 {
3698 *(d + 0) = 0;
3699 *(d + 1) = 0;
3700 *(d + 2) = 0;
3701 if (nclears > 4)
3702 {
3703 *(d + 3) = 0;
3704 *(d + 4) = 0;
3705 if (nclears > 6)
3706 {
3707 *(d + 5) = 0;
3708 *(d + 6) = 0;
3709 if (nclears > 8)
3710 {
3711 *(d + 7) = 0;
3712 *(d + 8) = 0;
3713 }
3714 }
3715 }
3716 }
3717
3718 return mem;
3784dfc0 3719#endif
fa8d436c 3720}
f65fd747 3721
f65fd747 3722/*
6c8dbf00
OB
3723 ------------------------------ malloc ------------------------------
3724 */
f65fd747 3725
6c8dbf00
OB
3726static void *
3727_int_malloc (mstate av, size_t bytes)
f65fd747 3728{
fa8d436c 3729 INTERNAL_SIZE_T nb; /* normalized request size */
6c8dbf00
OB
3730 unsigned int idx; /* associated bin index */
3731 mbinptr bin; /* associated bin */
f65fd747 3732
6c8dbf00 3733 mchunkptr victim; /* inspected/selected chunk */
fa8d436c 3734 INTERNAL_SIZE_T size; /* its size */
6c8dbf00 3735 int victim_index; /* its bin index */
f65fd747 3736
6c8dbf00
OB
3737 mchunkptr remainder; /* remainder from a split */
3738 unsigned long remainder_size; /* its size */
8a4b65b4 3739
6c8dbf00
OB
3740 unsigned int block; /* bit map traverser */
3741 unsigned int bit; /* bit map traverser */
3742 unsigned int map; /* current word of binmap */
8a4b65b4 3743
6c8dbf00
OB
3744 mchunkptr fwd; /* misc temp for linking */
3745 mchunkptr bck; /* misc temp for linking */
8a4b65b4 3746
d5c3fafc
DD
3747#if USE_TCACHE
3748 size_t tcache_unsorted_count; /* count of unsorted chunks processed */
3749#endif
3750
fa8d436c 3751 /*
6c8dbf00
OB
3752 Convert request size to internal form by adding SIZE_SZ bytes
3753 overhead plus possibly more to obtain necessary alignment and/or
3754 to obtain a size of at least MINSIZE, the smallest allocatable
9bf8e29c 3755 size. Also, checked_request2size returns false for request sizes
6c8dbf00
OB
3756 that are so large that they wrap around zero when padded and
3757 aligned.
3758 */
f65fd747 3759
9bf8e29c
AZ
3760 if (!checked_request2size (bytes, &nb))
3761 {
3762 __set_errno (ENOMEM);
3763 return NULL;
3764 }
f65fd747 3765
fff94fa2
SP
3766 /* There are no usable arenas. Fall back to sysmalloc to get a chunk from
3767 mmap. */
3768 if (__glibc_unlikely (av == NULL))
3769 {
3770 void *p = sysmalloc (nb, av);
3771 if (p != NULL)
3772 alloc_perturb (p, bytes);
3773 return p;
3774 }
3775
fa8d436c 3776 /*
6c8dbf00
OB
3777 If the size qualifies as a fastbin, first check corresponding bin.
3778 This code is safe to execute even if av is not yet initialized, so we
3779 can try it without checking, which saves some time on this fast path.
3780 */
f65fd747 3781
71effcea
FW
3782#define REMOVE_FB(fb, victim, pp) \
3783 do \
3784 { \
3785 victim = pp; \
3786 if (victim == NULL) \
3787 break; \
a1a486d7 3788 pp = REVEAL_PTR (victim->fd); \
49c3c376 3789 if (__glibc_unlikely (pp != NULL && misaligned_chunk (pp))) \
a1a486d7 3790 malloc_printerr ("malloc(): unaligned fastbin chunk detected"); \
71effcea 3791 } \
a1a486d7 3792 while ((pp = catomic_compare_and_exchange_val_acq (fb, pp, victim)) \
71effcea
FW
3793 != victim); \
3794
6c8dbf00
OB
3795 if ((unsigned long) (nb) <= (unsigned long) (get_max_fast ()))
3796 {
3797 idx = fastbin_index (nb);
3798 mfastbinptr *fb = &fastbin (av, idx);
71effcea
FW
3799 mchunkptr pp;
3800 victim = *fb;
3801
905a7725
WD
3802 if (victim != NULL)
3803 {
49c3c376
EI
3804 if (__glibc_unlikely (misaligned_chunk (victim)))
3805 malloc_printerr ("malloc(): unaligned fastbin chunk detected 2");
a1a486d7 3806
71effcea 3807 if (SINGLE_THREAD_P)
a1a486d7 3808 *fb = REVEAL_PTR (victim->fd);
71effcea
FW
3809 else
3810 REMOVE_FB (fb, pp, victim);
3811 if (__glibc_likely (victim != NULL))
6923f6db 3812 {
71effcea
FW
3813 size_t victim_idx = fastbin_index (chunksize (victim));
3814 if (__builtin_expect (victim_idx != idx, 0))
3815 malloc_printerr ("malloc(): memory corruption (fast)");
3816 check_remalloced_chunk (av, victim, nb);
3817#if USE_TCACHE
3818 /* While we're here, if we see other chunks of the same size,
3819 stash them in the tcache. */
3820 size_t tc_idx = csize2tidx (nb);
3821 if (tcache && tc_idx < mp_.tcache_bins)
d5c3fafc 3822 {
71effcea
FW
3823 mchunkptr tc_victim;
3824
3825 /* While bin not empty and tcache not full, copy chunks. */
3826 while (tcache->counts[tc_idx] < mp_.tcache_count
3827 && (tc_victim = *fb) != NULL)
3828 {
49c3c376
EI
3829 if (__glibc_unlikely (misaligned_chunk (tc_victim)))
3830 malloc_printerr ("malloc(): unaligned fastbin chunk detected 3");
71effcea 3831 if (SINGLE_THREAD_P)
a1a486d7 3832 *fb = REVEAL_PTR (tc_victim->fd);
71effcea
FW
3833 else
3834 {
3835 REMOVE_FB (fb, pp, tc_victim);
3836 if (__glibc_unlikely (tc_victim == NULL))
3837 break;
3838 }
3839 tcache_put (tc_victim, tc_idx);
3840 }
d5c3fafc 3841 }
6923f6db 3842#endif
71effcea
FW
3843 void *p = chunk2mem (victim);
3844 alloc_perturb (p, bytes);
3845 return p;
3846 }
905a7725 3847 }
fa8d436c 3848 }
f65fd747 3849
fa8d436c 3850 /*
6c8dbf00
OB
3851 If a small request, check regular bin. Since these "smallbins"
3852 hold one size each, no searching within bins is necessary.
3853 (For a large request, we need to wait until unsorted chunks are
3854 processed to find best fit. But for small ones, fits are exact
3855 anyway, so we can check now, which is faster.)
3856 */
3857
3858 if (in_smallbin_range (nb))
3859 {
3860 idx = smallbin_index (nb);
3861 bin = bin_at (av, idx);
3862
3863 if ((victim = last (bin)) != bin)
3864 {
3381be5c
WD
3865 bck = victim->bk;
3866 if (__glibc_unlikely (bck->fd != victim))
3867 malloc_printerr ("malloc(): smallbin double linked list corrupted");
3868 set_inuse_bit_at_offset (victim, nb);
3869 bin->bk = bck;
3870 bck->fd = bin;
3871
3872 if (av != &main_arena)
3873 set_non_main_arena (victim);
3874 check_malloced_chunk (av, victim, nb);
d5c3fafc
DD
3875#if USE_TCACHE
3876 /* While we're here, if we see other chunks of the same size,
3877 stash them in the tcache. */
3878 size_t tc_idx = csize2tidx (nb);
3879 if (tcache && tc_idx < mp_.tcache_bins)
3880 {
3881 mchunkptr tc_victim;
3882
3883 /* While bin not empty and tcache not full, copy chunks over. */
3884 while (tcache->counts[tc_idx] < mp_.tcache_count
3885 && (tc_victim = last (bin)) != bin)
3886 {
3887 if (tc_victim != 0)
3888 {
3889 bck = tc_victim->bk;
3890 set_inuse_bit_at_offset (tc_victim, nb);
3891 if (av != &main_arena)
3892 set_non_main_arena (tc_victim);
3893 bin->bk = bck;
3894 bck->fd = bin;
3895
3896 tcache_put (tc_victim, tc_idx);
3897 }
3898 }
3899 }
3900#endif
3381be5c
WD
3901 void *p = chunk2mem (victim);
3902 alloc_perturb (p, bytes);
3903 return p;
6c8dbf00 3904 }
fa8d436c 3905 }
f65fd747 3906
a9177ff5 3907 /*
fa8d436c
UD
3908 If this is a large request, consolidate fastbins before continuing.
3909 While it might look excessive to kill all fastbins before
3910 even seeing if there is space available, this avoids
3911 fragmentation problems normally associated with fastbins.
3912 Also, in practice, programs tend to have runs of either small or
a9177ff5 3913 large requests, but less often mixtures, so consolidation is not
fa8d436c
UD
3914 invoked all that often in most programs. And the programs that
3915 it is called frequently in otherwise tend to fragment.
6c8dbf00 3916 */
7799b7b3 3917
6c8dbf00
OB
3918 else
3919 {
3920 idx = largebin_index (nb);
e956075a 3921 if (atomic_load_relaxed (&av->have_fastchunks))
6c8dbf00
OB
3922 malloc_consolidate (av);
3923 }
f65fd747 3924
fa8d436c 3925 /*
6c8dbf00
OB
3926 Process recently freed or remaindered chunks, taking one only if
3927 it is exact fit, or, if this a small request, the chunk is remainder from
3928 the most recent non-exact fit. Place other traversed chunks in
3929 bins. Note that this step is the only place in any routine where
3930 chunks are placed in bins.
3931
3932 The outer loop here is needed because we might not realize until
3933 near the end of malloc that we should have consolidated, so must
3934 do so and retry. This happens at most once, and only when we would
3935 otherwise need to expand memory to service a "small" request.
3936 */
3937
d5c3fafc
DD
3938#if USE_TCACHE
3939 INTERNAL_SIZE_T tcache_nb = 0;
3940 size_t tc_idx = csize2tidx (nb);
3941 if (tcache && tc_idx < mp_.tcache_bins)
3942 tcache_nb = nb;
3943 int return_cached = 0;
3944
3945 tcache_unsorted_count = 0;
3946#endif
3947
6c8dbf00
OB
3948 for (;; )
3949 {
3950 int iters = 0;
3951 while ((victim = unsorted_chunks (av)->bk) != unsorted_chunks (av))
3952 {
3953 bck = victim->bk;
6c8dbf00 3954 size = chunksize (victim);
b90ddd08
IK
3955 mchunkptr next = chunk_at_offset (victim, size);
3956
3784dfc0 3957 if (__glibc_unlikely (size <= CHUNK_HDR_SZ)
b90ddd08
IK
3958 || __glibc_unlikely (size > av->system_mem))
3959 malloc_printerr ("malloc(): invalid size (unsorted)");
3784dfc0 3960 if (__glibc_unlikely (chunksize_nomask (next) < CHUNK_HDR_SZ)
b90ddd08
IK
3961 || __glibc_unlikely (chunksize_nomask (next) > av->system_mem))
3962 malloc_printerr ("malloc(): invalid next size (unsorted)");
3963 if (__glibc_unlikely ((prev_size (next) & ~(SIZE_BITS)) != size))
3964 malloc_printerr ("malloc(): mismatching next->prev_size (unsorted)");
3965 if (__glibc_unlikely (bck->fd != victim)
3966 || __glibc_unlikely (victim->fd != unsorted_chunks (av)))
3967 malloc_printerr ("malloc(): unsorted double linked list corrupted");
35cfefd9 3968 if (__glibc_unlikely (prev_inuse (next)))
b90ddd08 3969 malloc_printerr ("malloc(): invalid next->prev_inuse (unsorted)");
6c8dbf00
OB
3970
3971 /*
3972 If a small request, try to use last remainder if it is the
3973 only chunk in unsorted bin. This helps promote locality for
3974 runs of consecutive small requests. This is the only
3975 exception to best-fit, and applies only when there is
3976 no exact fit for a small chunk.
3977 */
3978
3979 if (in_smallbin_range (nb) &&
3980 bck == unsorted_chunks (av) &&
3981 victim == av->last_remainder &&
3982 (unsigned long) (size) > (unsigned long) (nb + MINSIZE))
3983 {
3984 /* split and reattach remainder */
3985 remainder_size = size - nb;
3986 remainder = chunk_at_offset (victim, nb);
3987 unsorted_chunks (av)->bk = unsorted_chunks (av)->fd = remainder;
3988 av->last_remainder = remainder;
3989 remainder->bk = remainder->fd = unsorted_chunks (av);
3990 if (!in_smallbin_range (remainder_size))
3991 {
3992 remainder->fd_nextsize = NULL;
3993 remainder->bk_nextsize = NULL;
3994 }
3995
3996 set_head (victim, nb | PREV_INUSE |
3997 (av != &main_arena ? NON_MAIN_ARENA : 0));
3998 set_head (remainder, remainder_size | PREV_INUSE);
3999 set_foot (remainder, remainder_size);
4000
4001 check_malloced_chunk (av, victim, nb);
4002 void *p = chunk2mem (victim);
4003 alloc_perturb (p, bytes);
4004 return p;
4005 }
4006
4007 /* remove from unsorted list */
bdc3009b
FG
4008 if (__glibc_unlikely (bck->fd != victim))
4009 malloc_printerr ("malloc(): corrupted unsorted chunks 3");
6c8dbf00
OB
4010 unsorted_chunks (av)->bk = bck;
4011 bck->fd = unsorted_chunks (av);
4012
4013 /* Take now instead of binning if exact fit */
4014
4015 if (size == nb)
4016 {
4017 set_inuse_bit_at_offset (victim, size);
4018 if (av != &main_arena)
e9c4fe93 4019 set_non_main_arena (victim);
d5c3fafc
DD
4020#if USE_TCACHE
4021 /* Fill cache first, return to user only if cache fills.
4022 We may return one of these chunks later. */
4023 if (tcache_nb
4024 && tcache->counts[tc_idx] < mp_.tcache_count)
4025 {
4026 tcache_put (victim, tc_idx);
4027 return_cached = 1;
4028 continue;
4029 }
4030 else
4031 {
4032#endif
6c8dbf00
OB
4033 check_malloced_chunk (av, victim, nb);
4034 void *p = chunk2mem (victim);
4035 alloc_perturb (p, bytes);
4036 return p;
d5c3fafc
DD
4037#if USE_TCACHE
4038 }
4039#endif
6c8dbf00
OB
4040 }
4041
4042 /* place chunk in bin */
4043
4044 if (in_smallbin_range (size))
4045 {
4046 victim_index = smallbin_index (size);
4047 bck = bin_at (av, victim_index);
4048 fwd = bck->fd;
4049 }
4050 else
4051 {
4052 victim_index = largebin_index (size);
4053 bck = bin_at (av, victim_index);
4054 fwd = bck->fd;
4055
4056 /* maintain large bins in sorted order */
4057 if (fwd != bck)
4058 {
4059 /* Or with inuse bit to speed comparisons */
4060 size |= PREV_INUSE;
4061 /* if smaller than smallest, bypass loop below */
e9c4fe93
FW
4062 assert (chunk_main_arena (bck->bk));
4063 if ((unsigned long) (size)
4064 < (unsigned long) chunksize_nomask (bck->bk))
6c8dbf00
OB
4065 {
4066 fwd = bck;
4067 bck = bck->bk;
4068
4069 victim->fd_nextsize = fwd->fd;
4070 victim->bk_nextsize = fwd->fd->bk_nextsize;
4071 fwd->fd->bk_nextsize = victim->bk_nextsize->fd_nextsize = victim;
4072 }
4073 else
4074 {
e9c4fe93
FW
4075 assert (chunk_main_arena (fwd));
4076 while ((unsigned long) size < chunksize_nomask (fwd))
6c8dbf00
OB
4077 {
4078 fwd = fwd->fd_nextsize;
e9c4fe93 4079 assert (chunk_main_arena (fwd));
6c8dbf00
OB
4080 }
4081
e9c4fe93
FW
4082 if ((unsigned long) size
4083 == (unsigned long) chunksize_nomask (fwd))
6c8dbf00
OB
4084 /* Always insert in the second position. */
4085 fwd = fwd->fd;
4086 else
4087 {
4088 victim->fd_nextsize = fwd;
4089 victim->bk_nextsize = fwd->bk_nextsize;
5b06f538
AM
4090 if (__glibc_unlikely (fwd->bk_nextsize->fd_nextsize != fwd))
4091 malloc_printerr ("malloc(): largebin double linked list corrupted (nextsize)");
6c8dbf00
OB
4092 fwd->bk_nextsize = victim;
4093 victim->bk_nextsize->fd_nextsize = victim;
4094 }
4095 bck = fwd->bk;
5b06f538
AM
4096 if (bck->fd != fwd)
4097 malloc_printerr ("malloc(): largebin double linked list corrupted (bk)");
6c8dbf00
OB
4098 }
4099 }
4100 else
4101 victim->fd_nextsize = victim->bk_nextsize = victim;
4102 }
4103
4104 mark_bin (av, victim_index);
4105 victim->bk = bck;
4106 victim->fd = fwd;
4107 fwd->bk = victim;
4108 bck->fd = victim;
4109
d5c3fafc
DD
4110#if USE_TCACHE
4111 /* If we've processed as many chunks as we're allowed while
4112 filling the cache, return one of the cached ones. */
4113 ++tcache_unsorted_count;
4114 if (return_cached
4115 && mp_.tcache_unsorted_limit > 0
4116 && tcache_unsorted_count > mp_.tcache_unsorted_limit)
4117 {
4118 return tcache_get (tc_idx);
4119 }
4120#endif
4121
6c8dbf00
OB
4122#define MAX_ITERS 10000
4123 if (++iters >= MAX_ITERS)
4124 break;
4125 }
fa8d436c 4126
d5c3fafc
DD
4127#if USE_TCACHE
4128 /* If all the small chunks we found ended up cached, return one now. */
4129 if (return_cached)
4130 {
4131 return tcache_get (tc_idx);
4132 }
4133#endif
4134
a9177ff5 4135 /*
6c8dbf00
OB
4136 If a large request, scan through the chunks of current bin in
4137 sorted order to find smallest that fits. Use the skip list for this.
4138 */
4139
4140 if (!in_smallbin_range (nb))
4141 {
4142 bin = bin_at (av, idx);
4143
4144 /* skip scan if empty or largest chunk is too small */
e9c4fe93
FW
4145 if ((victim = first (bin)) != bin
4146 && (unsigned long) chunksize_nomask (victim)
4147 >= (unsigned long) (nb))
6c8dbf00
OB
4148 {
4149 victim = victim->bk_nextsize;
4150 while (((unsigned long) (size = chunksize (victim)) <
4151 (unsigned long) (nb)))
4152 victim = victim->bk_nextsize;
4153
4154 /* Avoid removing the first entry for a size so that the skip
4155 list does not have to be rerouted. */
e9c4fe93
FW
4156 if (victim != last (bin)
4157 && chunksize_nomask (victim)
4158 == chunksize_nomask (victim->fd))
6c8dbf00
OB
4159 victim = victim->fd;
4160
4161 remainder_size = size - nb;
1ecba1fa 4162 unlink_chunk (av, victim);
6c8dbf00
OB
4163
4164 /* Exhaust */
4165 if (remainder_size < MINSIZE)
4166 {
4167 set_inuse_bit_at_offset (victim, size);
4168 if (av != &main_arena)
e9c4fe93 4169 set_non_main_arena (victim);
6c8dbf00
OB
4170 }
4171 /* Split */
4172 else
4173 {
4174 remainder = chunk_at_offset (victim, nb);
4175 /* We cannot assume the unsorted list is empty and therefore
4176 have to perform a complete insert here. */
4177 bck = unsorted_chunks (av);
4178 fwd = bck->fd;
ac3ed168
FW
4179 if (__glibc_unlikely (fwd->bk != bck))
4180 malloc_printerr ("malloc(): corrupted unsorted chunks");
6c8dbf00
OB
4181 remainder->bk = bck;
4182 remainder->fd = fwd;
4183 bck->fd = remainder;
4184 fwd->bk = remainder;
4185 if (!in_smallbin_range (remainder_size))
4186 {
4187 remainder->fd_nextsize = NULL;
4188 remainder->bk_nextsize = NULL;
4189 }
4190 set_head (victim, nb | PREV_INUSE |
4191 (av != &main_arena ? NON_MAIN_ARENA : 0));
4192 set_head (remainder, remainder_size | PREV_INUSE);
4193 set_foot (remainder, remainder_size);
4194 }
4195 check_malloced_chunk (av, victim, nb);
4196 void *p = chunk2mem (victim);
4197 alloc_perturb (p, bytes);
4198 return p;
4199 }
4200 }
f65fd747 4201
6c8dbf00
OB
4202 /*
4203 Search for a chunk by scanning bins, starting with next largest
4204 bin. This search is strictly by best-fit; i.e., the smallest
4205 (with ties going to approximately the least recently used) chunk
4206 that fits is selected.
4207
4208 The bitmap avoids needing to check that most blocks are nonempty.
4209 The particular case of skipping all bins during warm-up phases
4210 when no chunks have been returned yet is faster than it might look.
4211 */
4212
4213 ++idx;
4214 bin = bin_at (av, idx);
4215 block = idx2block (idx);
4216 map = av->binmap[block];
4217 bit = idx2bit (idx);
4218
4219 for (;; )
4220 {
4221 /* Skip rest of block if there are no more set bits in this block. */
4222 if (bit > map || bit == 0)
4223 {
4224 do
4225 {
4226 if (++block >= BINMAPSIZE) /* out of bins */
4227 goto use_top;
4228 }
4229 while ((map = av->binmap[block]) == 0);
4230
4231 bin = bin_at (av, (block << BINMAPSHIFT));
4232 bit = 1;
4233 }
4234
4235 /* Advance to bin with set bit. There must be one. */
4236 while ((bit & map) == 0)
4237 {
4238 bin = next_bin (bin);
4239 bit <<= 1;
4240 assert (bit != 0);
4241 }
4242
4243 /* Inspect the bin. It is likely to be non-empty */
4244 victim = last (bin);
4245
4246 /* If a false alarm (empty bin), clear the bit. */
4247 if (victim == bin)
4248 {
4249 av->binmap[block] = map &= ~bit; /* Write through */
4250 bin = next_bin (bin);
4251 bit <<= 1;
4252 }
4253
4254 else
4255 {
4256 size = chunksize (victim);
4257
4258 /* We know the first chunk in this bin is big enough to use. */
4259 assert ((unsigned long) (size) >= (unsigned long) (nb));
4260
4261 remainder_size = size - nb;
4262
4263 /* unlink */
1ecba1fa 4264 unlink_chunk (av, victim);
6c8dbf00
OB
4265
4266 /* Exhaust */
4267 if (remainder_size < MINSIZE)
4268 {
4269 set_inuse_bit_at_offset (victim, size);
4270 if (av != &main_arena)
e9c4fe93 4271 set_non_main_arena (victim);
6c8dbf00
OB
4272 }
4273
4274 /* Split */
4275 else
4276 {
4277 remainder = chunk_at_offset (victim, nb);
4278
4279 /* We cannot assume the unsorted list is empty and therefore
4280 have to perform a complete insert here. */
4281 bck = unsorted_chunks (av);
4282 fwd = bck->fd;
ac3ed168
FW
4283 if (__glibc_unlikely (fwd->bk != bck))
4284 malloc_printerr ("malloc(): corrupted unsorted chunks 2");
6c8dbf00
OB
4285 remainder->bk = bck;
4286 remainder->fd = fwd;
4287 bck->fd = remainder;
4288 fwd->bk = remainder;
4289
4290 /* advertise as last remainder */
4291 if (in_smallbin_range (nb))
4292 av->last_remainder = remainder;
4293 if (!in_smallbin_range (remainder_size))
4294 {
4295 remainder->fd_nextsize = NULL;
4296 remainder->bk_nextsize = NULL;
4297 }
4298 set_head (victim, nb | PREV_INUSE |
4299 (av != &main_arena ? NON_MAIN_ARENA : 0));
4300 set_head (remainder, remainder_size | PREV_INUSE);
4301 set_foot (remainder, remainder_size);
4302 }
4303 check_malloced_chunk (av, victim, nb);
4304 void *p = chunk2mem (victim);
4305 alloc_perturb (p, bytes);
4306 return p;
4307 }
4308 }
4309
4310 use_top:
4311 /*
4312 If large enough, split off the chunk bordering the end of memory
4313 (held in av->top). Note that this is in accord with the best-fit
4314 search rule. In effect, av->top is treated as larger (and thus
4315 less well fitting) than any other available chunk since it can
4316 be extended to be as large as necessary (up to system
4317 limitations).
4318
4319 We require that av->top always exists (i.e., has size >=
4320 MINSIZE) after initialization, so if it would otherwise be
4321 exhausted by current request, it is replenished. (The main
4322 reason for ensuring it exists is that we may need MINSIZE space
4323 to put in fenceposts in sysmalloc.)
4324 */
4325
4326 victim = av->top;
4327 size = chunksize (victim);
4328
30a17d8c
PC
4329 if (__glibc_unlikely (size > av->system_mem))
4330 malloc_printerr ("malloc(): corrupted top size");
4331
6c8dbf00
OB
4332 if ((unsigned long) (size) >= (unsigned long) (nb + MINSIZE))
4333 {
4334 remainder_size = size - nb;
4335 remainder = chunk_at_offset (victim, nb);
4336 av->top = remainder;
4337 set_head (victim, nb | PREV_INUSE |
4338 (av != &main_arena ? NON_MAIN_ARENA : 0));
4339 set_head (remainder, remainder_size | PREV_INUSE);
4340
4341 check_malloced_chunk (av, victim, nb);
4342 void *p = chunk2mem (victim);
4343 alloc_perturb (p, bytes);
4344 return p;
4345 }
4346
4347 /* When we are using atomic ops to free fast chunks we can get
4348 here for all block sizes. */
e956075a 4349 else if (atomic_load_relaxed (&av->have_fastchunks))
6c8dbf00
OB
4350 {
4351 malloc_consolidate (av);
4352 /* restore original bin index */
4353 if (in_smallbin_range (nb))
4354 idx = smallbin_index (nb);
4355 else
4356 idx = largebin_index (nb);
4357 }
f65fd747 4358
6c8dbf00
OB
4359 /*
4360 Otherwise, relay to handle system-dependent cases
4361 */
425ce2ed 4362 else
6c8dbf00
OB
4363 {
4364 void *p = sysmalloc (nb, av);
4365 if (p != NULL)
4366 alloc_perturb (p, bytes);
4367 return p;
4368 }
425ce2ed 4369 }
fa8d436c 4370}
f65fd747 4371
fa8d436c 4372/*
6c8dbf00
OB
4373 ------------------------------ free ------------------------------
4374 */
f65fd747 4375
78ac92ad 4376static void
6c8dbf00 4377_int_free (mstate av, mchunkptr p, int have_lock)
f65fd747 4378{
fa8d436c 4379 INTERNAL_SIZE_T size; /* its size */
6c8dbf00
OB
4380 mfastbinptr *fb; /* associated fastbin */
4381 mchunkptr nextchunk; /* next contiguous chunk */
fa8d436c 4382 INTERNAL_SIZE_T nextsize; /* its size */
6c8dbf00 4383 int nextinuse; /* true if nextchunk is used */
fa8d436c 4384 INTERNAL_SIZE_T prevsize; /* size of previous contiguous chunk */
6c8dbf00
OB
4385 mchunkptr bck; /* misc temp for linking */
4386 mchunkptr fwd; /* misc temp for linking */
fa8d436c 4387
6c8dbf00 4388 size = chunksize (p);
f65fd747 4389
37fa1953
UD
4390 /* Little security check which won't hurt performance: the
4391 allocator never wrapps around at the end of the address space.
4392 Therefore we can exclude some size values which might appear
4393 here by accident or by "design" from some intruder. */
dc165f7b 4394 if (__builtin_expect ((uintptr_t) p > (uintptr_t) -size, 0)
073f560e 4395 || __builtin_expect (misaligned_chunk (p), 0))
ac3ed168 4396 malloc_printerr ("free(): invalid pointer");
347c92e9
L
4397 /* We know that each chunk is at least MINSIZE bytes in size or a
4398 multiple of MALLOC_ALIGNMENT. */
a1ffb40e 4399 if (__glibc_unlikely (size < MINSIZE || !aligned_OK (size)))
ac3ed168 4400 malloc_printerr ("free(): invalid size");
f65fd747 4401
37fa1953 4402 check_inuse_chunk(av, p);
f65fd747 4403
d5c3fafc
DD
4404#if USE_TCACHE
4405 {
4406 size_t tc_idx = csize2tidx (size);
affec03b 4407 if (tcache != NULL && tc_idx < mp_.tcache_bins)
d5c3fafc 4408 {
affec03b
FW
4409 /* Check to see if it's already in the tcache. */
4410 tcache_entry *e = (tcache_entry *) chunk2mem (p);
4411
4412 /* This test succeeds on double free. However, we don't 100%
4413 trust it (it also matches random payload data at a 1 in
4414 2^<size_t> chance), so verify it's not an unlikely
4415 coincidence before aborting. */
4416 if (__glibc_unlikely (e->key == tcache))
4417 {
4418 tcache_entry *tmp;
0e00b357 4419 size_t cnt = 0;
affec03b
FW
4420 LIBC_PROBE (memory_tcache_double_free, 2, e, tc_idx);
4421 for (tmp = tcache->entries[tc_idx];
4422 tmp;
0e00b357 4423 tmp = REVEAL_PTR (tmp->next), ++cnt)
768358b6 4424 {
0e00b357
H
4425 if (cnt >= mp_.tcache_count)
4426 malloc_printerr ("free(): too many chunks detected in tcache");
768358b6
EI
4427 if (__glibc_unlikely (!aligned_OK (tmp)))
4428 malloc_printerr ("free(): unaligned chunk detected in tcache 2");
4429 if (tmp == e)
4430 malloc_printerr ("free(): double free detected in tcache 2");
4431 /* If we get here, it was a coincidence. We've wasted a
4432 few cycles, but don't abort. */
4433 }
affec03b
FW
4434 }
4435
4436 if (tcache->counts[tc_idx] < mp_.tcache_count)
4437 {
4438 tcache_put (p, tc_idx);
4439 return;
4440 }
d5c3fafc
DD
4441 }
4442 }
4443#endif
4444
37fa1953
UD
4445 /*
4446 If eligible, place chunk on a fastbin so it can be found
4447 and used quickly in malloc.
4448 */
6bf4302e 4449
9bf248c6 4450 if ((unsigned long)(size) <= (unsigned long)(get_max_fast ())
6bf4302e 4451
37fa1953
UD
4452#if TRIM_FASTBINS
4453 /*
4454 If TRIM_FASTBINS set, don't place chunks
4455 bordering top into fastbins
4456 */
4457 && (chunk_at_offset(p, size) != av->top)
4458#endif
4459 ) {
fa8d436c 4460
e9c4fe93 4461 if (__builtin_expect (chunksize_nomask (chunk_at_offset (p, size))
3784dfc0 4462 <= CHUNK_HDR_SZ, 0)
893e6098
UD
4463 || __builtin_expect (chunksize (chunk_at_offset (p, size))
4464 >= av->system_mem, 0))
4465 {
d74e6f6c 4466 bool fail = true;
bec466d9 4467 /* We might not have a lock at this point and concurrent modifications
d74e6f6c
WD
4468 of system_mem might result in a false positive. Redo the test after
4469 getting the lock. */
4470 if (!have_lock)
4471 {
4472 __libc_lock_lock (av->mutex);
3784dfc0 4473 fail = (chunksize_nomask (chunk_at_offset (p, size)) <= CHUNK_HDR_SZ
d74e6f6c
WD
4474 || chunksize (chunk_at_offset (p, size)) >= av->system_mem);
4475 __libc_lock_unlock (av->mutex);
4476 }
4477
4478 if (fail)
ac3ed168 4479 malloc_printerr ("free(): invalid next size (fast)");
893e6098
UD
4480 }
4481
3784dfc0 4482 free_perturb (chunk2mem(p), size - CHUNK_HDR_SZ);
425ce2ed 4483
e956075a 4484 atomic_store_relaxed (&av->have_fastchunks, true);
90a3055e
UD
4485 unsigned int idx = fastbin_index(size);
4486 fb = &fastbin (av, idx);
425ce2ed 4487
362b47fe 4488 /* Atomically link P to its fastbin: P->FD = *FB; *FB = P; */
71effcea
FW
4489 mchunkptr old = *fb, old2;
4490
4491 if (SINGLE_THREAD_P)
4492 {
4493 /* Check that the top of the bin is not the record we are going to
4494 add (i.e., double free). */
4495 if (__builtin_expect (old == p, 0))
4496 malloc_printerr ("double free or corruption (fasttop)");
a1a486d7 4497 p->fd = PROTECT_PTR (&p->fd, old);
71effcea
FW
4498 *fb = p;
4499 }
4500 else
4501 do
4502 {
4503 /* Check that the top of the bin is not the record we are going to
4504 add (i.e., double free). */
4505 if (__builtin_expect (old == p, 0))
4506 malloc_printerr ("double free or corruption (fasttop)");
a1a486d7
EI
4507 old2 = old;
4508 p->fd = PROTECT_PTR (&p->fd, old);
71effcea
FW
4509 }
4510 while ((old = catomic_compare_and_exchange_val_rel (fb, p, old2))
4511 != old2);
a15d53e2
WD
4512
4513 /* Check that size of fastbin chunk at the top is the same as
4514 size of the chunk that we are adding. We can dereference OLD
4515 only if we have the lock, otherwise it might have already been
4516 allocated again. */
4517 if (have_lock && old != NULL
4518 && __builtin_expect (fastbin_index (chunksize (old)) != idx, 0))
ac3ed168 4519 malloc_printerr ("invalid fastbin entry (free)");
37fa1953 4520 }
f65fd747 4521
37fa1953
UD
4522 /*
4523 Consolidate other non-mmapped chunks as they arrive.
4524 */
fa8d436c 4525
37fa1953 4526 else if (!chunk_is_mmapped(p)) {
a15d53e2
WD
4527
4528 /* If we're single-threaded, don't lock the arena. */
4529 if (SINGLE_THREAD_P)
4530 have_lock = true;
4531
24cffce7 4532 if (!have_lock)
4bf5f222 4533 __libc_lock_lock (av->mutex);
425ce2ed 4534
37fa1953 4535 nextchunk = chunk_at_offset(p, size);
fa8d436c 4536
37fa1953
UD
4537 /* Lightweight tests: check whether the block is already the
4538 top block. */
a1ffb40e 4539 if (__glibc_unlikely (p == av->top))
ac3ed168 4540 malloc_printerr ("double free or corruption (top)");
37fa1953
UD
4541 /* Or whether the next chunk is beyond the boundaries of the arena. */
4542 if (__builtin_expect (contiguous (av)
4543 && (char *) nextchunk
4544 >= ((char *) av->top + chunksize(av->top)), 0))
ac3ed168 4545 malloc_printerr ("double free or corruption (out)");
37fa1953 4546 /* Or whether the block is actually not marked used. */
a1ffb40e 4547 if (__glibc_unlikely (!prev_inuse(nextchunk)))
ac3ed168 4548 malloc_printerr ("double free or corruption (!prev)");
fa8d436c 4549
37fa1953 4550 nextsize = chunksize(nextchunk);
3784dfc0 4551 if (__builtin_expect (chunksize_nomask (nextchunk) <= CHUNK_HDR_SZ, 0)
893e6098 4552 || __builtin_expect (nextsize >= av->system_mem, 0))
ac3ed168 4553 malloc_printerr ("free(): invalid next size (normal)");
fa8d436c 4554
3784dfc0 4555 free_perturb (chunk2mem(p), size - CHUNK_HDR_SZ);
854278df 4556
37fa1953
UD
4557 /* consolidate backward */
4558 if (!prev_inuse(p)) {
e9c4fe93 4559 prevsize = prev_size (p);
37fa1953
UD
4560 size += prevsize;
4561 p = chunk_at_offset(p, -((long) prevsize));
d6db68e6
ME
4562 if (__glibc_unlikely (chunksize(p) != prevsize))
4563 malloc_printerr ("corrupted size vs. prev_size while consolidating");
1ecba1fa 4564 unlink_chunk (av, p);
37fa1953 4565 }
a9177ff5 4566
37fa1953
UD
4567 if (nextchunk != av->top) {
4568 /* get and clear inuse bit */
4569 nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
4570
4571 /* consolidate forward */
4572 if (!nextinuse) {
1ecba1fa 4573 unlink_chunk (av, nextchunk);
37fa1953
UD
4574 size += nextsize;
4575 } else
4576 clear_inuse_bit_at_offset(nextchunk, 0);
10dc2a90 4577
fa8d436c 4578 /*
37fa1953
UD
4579 Place the chunk in unsorted chunk list. Chunks are
4580 not placed into regular bins until after they have
4581 been given one chance to be used in malloc.
fa8d436c 4582 */
f65fd747 4583
37fa1953
UD
4584 bck = unsorted_chunks(av);
4585 fwd = bck->fd;
a1ffb40e 4586 if (__glibc_unlikely (fwd->bk != bck))
ac3ed168 4587 malloc_printerr ("free(): corrupted unsorted chunks");
37fa1953 4588 p->fd = fwd;
7ecfbd38
UD
4589 p->bk = bck;
4590 if (!in_smallbin_range(size))
4591 {
4592 p->fd_nextsize = NULL;
4593 p->bk_nextsize = NULL;
4594 }
37fa1953
UD
4595 bck->fd = p;
4596 fwd->bk = p;
8a4b65b4 4597
37fa1953
UD
4598 set_head(p, size | PREV_INUSE);
4599 set_foot(p, size);
4600
4601 check_free_chunk(av, p);
4602 }
4603
4604 /*
4605 If the chunk borders the current high end of memory,
4606 consolidate into top
4607 */
4608
4609 else {
4610 size += nextsize;
4611 set_head(p, size | PREV_INUSE);
4612 av->top = p;
4613 check_chunk(av, p);
4614 }
4615
4616 /*
4617 If freeing a large space, consolidate possibly-surrounding
4618 chunks. Then, if the total unused topmost memory exceeds trim
4619 threshold, ask malloc_trim to reduce top.
4620
4621 Unless max_fast is 0, we don't know if there are fastbins
4622 bordering top, so we cannot tell for sure whether threshold
4623 has been reached unless fastbins are consolidated. But we
4624 don't want to consolidate on each free. As a compromise,
4625 consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
4626 is reached.
4627 */
fa8d436c 4628
37fa1953 4629 if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) {
e956075a 4630 if (atomic_load_relaxed (&av->have_fastchunks))
37fa1953 4631 malloc_consolidate(av);
fa8d436c 4632
37fa1953 4633 if (av == &main_arena) {
a9177ff5 4634#ifndef MORECORE_CANNOT_TRIM
37fa1953
UD
4635 if ((unsigned long)(chunksize(av->top)) >=
4636 (unsigned long)(mp_.trim_threshold))
3b49edc0 4637 systrim(mp_.top_pad, av);
fa8d436c 4638#endif
37fa1953
UD
4639 } else {
4640 /* Always try heap_trim(), even if the top chunk is not
4641 large, because the corresponding heap might go away. */
4642 heap_info *heap = heap_for_ptr(top(av));
fa8d436c 4643
37fa1953
UD
4644 assert(heap->ar_ptr == av);
4645 heap_trim(heap, mp_.top_pad);
fa8d436c 4646 }
fa8d436c 4647 }
10dc2a90 4648
24cffce7 4649 if (!have_lock)
4bf5f222 4650 __libc_lock_unlock (av->mutex);
37fa1953
UD
4651 }
4652 /*
22a89187 4653 If the chunk was allocated via mmap, release via munmap().
37fa1953
UD
4654 */
4655
4656 else {
c120d94d 4657 munmap_chunk (p);
fa8d436c 4658 }
10dc2a90
UD
4659}
4660
fa8d436c
UD
4661/*
4662 ------------------------- malloc_consolidate -------------------------
4663
4664 malloc_consolidate is a specialized version of free() that tears
4665 down chunks held in fastbins. Free itself cannot be used for this
4666 purpose since, among other things, it might place chunks back onto
4667 fastbins. So, instead, we need to use a minor variant of the same
4668 code.
fa8d436c
UD
4669*/
4670
fa8d436c 4671static void malloc_consolidate(mstate av)
10dc2a90 4672{
fa8d436c
UD
4673 mfastbinptr* fb; /* current fastbin being consolidated */
4674 mfastbinptr* maxfb; /* last fastbin (for loop control) */
4675 mchunkptr p; /* current chunk being consolidated */
4676 mchunkptr nextp; /* next chunk to consolidate */
4677 mchunkptr unsorted_bin; /* bin header */
4678 mchunkptr first_unsorted; /* chunk to link to */
4679
4680 /* These have same use as in free() */
4681 mchunkptr nextchunk;
4682 INTERNAL_SIZE_T size;
4683 INTERNAL_SIZE_T nextsize;
4684 INTERNAL_SIZE_T prevsize;
4685 int nextinuse;
10dc2a90 4686
3381be5c 4687 atomic_store_relaxed (&av->have_fastchunks, false);
10dc2a90 4688
3381be5c 4689 unsorted_bin = unsorted_chunks(av);
a9177ff5 4690
3381be5c
WD
4691 /*
4692 Remove each chunk from fast bin and consolidate it, placing it
4693 then in unsorted bin. Among other reasons for doing this,
4694 placing in unsorted bin avoids needing to calculate actual bins
4695 until malloc is sure that chunks aren't immediately going to be
4696 reused anyway.
4697 */
72f90263 4698
3381be5c
WD
4699 maxfb = &fastbin (av, NFASTBINS - 1);
4700 fb = &fastbin (av, 0);
4701 do {
71effcea 4702 p = atomic_exchange_acq (fb, NULL);
3381be5c
WD
4703 if (p != 0) {
4704 do {
249a5895 4705 {
49c3c376 4706 if (__glibc_unlikely (misaligned_chunk (p)))
768358b6 4707 malloc_printerr ("malloc_consolidate(): "
a1a486d7
EI
4708 "unaligned fastbin chunk detected");
4709
249a5895
IK
4710 unsigned int idx = fastbin_index (chunksize (p));
4711 if ((&fastbin (av, idx)) != fb)
4712 malloc_printerr ("malloc_consolidate(): invalid chunk size");
4713 }
4714
3381be5c 4715 check_inuse_chunk(av, p);
a1a486d7 4716 nextp = REVEAL_PTR (p->fd);
3381be5c
WD
4717
4718 /* Slightly streamlined version of consolidation code in free() */
4719 size = chunksize (p);
4720 nextchunk = chunk_at_offset(p, size);
4721 nextsize = chunksize(nextchunk);
4722
4723 if (!prev_inuse(p)) {
4724 prevsize = prev_size (p);
4725 size += prevsize;
4726 p = chunk_at_offset(p, -((long) prevsize));
d6db68e6
ME
4727 if (__glibc_unlikely (chunksize(p) != prevsize))
4728 malloc_printerr ("corrupted size vs. prev_size in fastbins");
1ecba1fa 4729 unlink_chunk (av, p);
3381be5c 4730 }
72f90263 4731
3381be5c
WD
4732 if (nextchunk != av->top) {
4733 nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
a9177ff5 4734
3381be5c
WD
4735 if (!nextinuse) {
4736 size += nextsize;
1ecba1fa 4737 unlink_chunk (av, nextchunk);
3381be5c
WD
4738 } else
4739 clear_inuse_bit_at_offset(nextchunk, 0);
a9177ff5 4740
3381be5c
WD
4741 first_unsorted = unsorted_bin->fd;
4742 unsorted_bin->fd = p;
4743 first_unsorted->bk = p;
7ecfbd38 4744
3381be5c
WD
4745 if (!in_smallbin_range (size)) {
4746 p->fd_nextsize = NULL;
4747 p->bk_nextsize = NULL;
72f90263 4748 }
a9177ff5 4749
3381be5c
WD
4750 set_head(p, size | PREV_INUSE);
4751 p->bk = unsorted_bin;
4752 p->fd = first_unsorted;
4753 set_foot(p, size);
4754 }
a9177ff5 4755
3381be5c
WD
4756 else {
4757 size += nextsize;
4758 set_head(p, size | PREV_INUSE);
4759 av->top = p;
4760 }
a9177ff5 4761
3381be5c
WD
4762 } while ( (p = nextp) != 0);
4763
4764 }
4765 } while (fb++ != maxfb);
fa8d436c 4766}
10dc2a90 4767
fa8d436c
UD
4768/*
4769 ------------------------------ realloc ------------------------------
4770*/
f65fd747 4771
22a89187 4772void*
4c8b8cc3
UD
4773_int_realloc(mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize,
4774 INTERNAL_SIZE_T nb)
fa8d436c 4775{
fa8d436c
UD
4776 mchunkptr newp; /* chunk to return */
4777 INTERNAL_SIZE_T newsize; /* its size */
22a89187 4778 void* newmem; /* corresponding user mem */
f65fd747 4779
fa8d436c 4780 mchunkptr next; /* next contiguous chunk after oldp */
f65fd747 4781
fa8d436c
UD
4782 mchunkptr remainder; /* extra space at end of newp */
4783 unsigned long remainder_size; /* its size */
f65fd747 4784
6dd6a580 4785 /* oldmem size */
3784dfc0 4786 if (__builtin_expect (chunksize_nomask (oldp) <= CHUNK_HDR_SZ, 0)
76761b63 4787 || __builtin_expect (oldsize >= av->system_mem, 0))
ac3ed168 4788 malloc_printerr ("realloc(): invalid old size");
76761b63 4789
6c8dbf00 4790 check_inuse_chunk (av, oldp);
f65fd747 4791
4c8b8cc3 4792 /* All callers already filter out mmap'ed chunks. */
6c8dbf00 4793 assert (!chunk_is_mmapped (oldp));
f65fd747 4794
6c8dbf00
OB
4795 next = chunk_at_offset (oldp, oldsize);
4796 INTERNAL_SIZE_T nextsize = chunksize (next);
3784dfc0 4797 if (__builtin_expect (chunksize_nomask (next) <= CHUNK_HDR_SZ, 0)
22a89187 4798 || __builtin_expect (nextsize >= av->system_mem, 0))
ac3ed168 4799 malloc_printerr ("realloc(): invalid next size");
22a89187 4800
6c8dbf00
OB
4801 if ((unsigned long) (oldsize) >= (unsigned long) (nb))
4802 {
4803 /* already big enough; split below */
fa8d436c 4804 newp = oldp;
6c8dbf00 4805 newsize = oldsize;
7799b7b3 4806 }
f65fd747 4807
6c8dbf00
OB
4808 else
4809 {
4810 /* Try to expand forward into top */
4811 if (next == av->top &&
4812 (unsigned long) (newsize = oldsize + nextsize) >=
4813 (unsigned long) (nb + MINSIZE))
4814 {
4815 set_head_size (oldp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0));
4816 av->top = chunk_at_offset (oldp, nb);
4817 set_head (av->top, (newsize - nb) | PREV_INUSE);
4818 check_inuse_chunk (av, oldp);
0c719cf4 4819 return tag_new_usable (chunk2rawmem (oldp));
6c8dbf00
OB
4820 }
4821
4822 /* Try to expand forward into next chunk; split off remainder below */
4823 else if (next != av->top &&
4824 !inuse (next) &&
4825 (unsigned long) (newsize = oldsize + nextsize) >=
4826 (unsigned long) (nb))
4827 {
4828 newp = oldp;
1ecba1fa 4829 unlink_chunk (av, next);
6c8dbf00
OB
4830 }
4831
4832 /* allocate, copy, free */
4833 else
4834 {
4835 newmem = _int_malloc (av, nb - MALLOC_ALIGN_MASK);
4836 if (newmem == 0)
4837 return 0; /* propagate failure */
4838
4839 newp = mem2chunk (newmem);
4840 newsize = chunksize (newp);
4841
4842 /*
4843 Avoid copy if newp is next chunk after oldp.
4844 */
4845 if (newp == next)
4846 {
4847 newsize += oldsize;
4848 newp = oldp;
4849 }
4850 else
4851 {
8ae909a5
SN
4852 void *oldmem = chunk2rawmem (oldp);
4853 size_t sz = CHUNK_AVAILABLE_SIZE (oldp) - CHUNK_HDR_SZ;
0c719cf4
SN
4854 (void) tag_region (oldmem, sz);
4855 newmem = tag_new_usable (newmem);
8ae909a5
SN
4856 memcpy (newmem, oldmem, sz);
4857 _int_free (av, oldp, 1);
4858 check_inuse_chunk (av, newp);
4859 return newmem;
6c8dbf00
OB
4860 }
4861 }
fa8d436c 4862 }
f65fd747 4863
22a89187 4864 /* If possible, free extra space in old or extended chunk */
f65fd747 4865
6c8dbf00 4866 assert ((unsigned long) (newsize) >= (unsigned long) (nb));
f65fd747 4867
22a89187 4868 remainder_size = newsize - nb;
10dc2a90 4869
6c8dbf00
OB
4870 if (remainder_size < MINSIZE) /* not enough extra to split off */
4871 {
4872 set_head_size (newp, newsize | (av != &main_arena ? NON_MAIN_ARENA : 0));
4873 set_inuse_bit_at_offset (newp, newsize);
4874 }
4875 else /* split remainder */
4876 {
4877 remainder = chunk_at_offset (newp, nb);
3784dfc0 4878 /* Clear any user-space tags before writing the header. */
0c719cf4 4879 remainder = tag_region (remainder, remainder_size);
6c8dbf00
OB
4880 set_head_size (newp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0));
4881 set_head (remainder, remainder_size | PREV_INUSE |
4882 (av != &main_arena ? NON_MAIN_ARENA : 0));
4883 /* Mark remainder as inuse so free() won't complain */
4884 set_inuse_bit_at_offset (remainder, remainder_size);
4885 _int_free (av, remainder, 1);
4886 }
22a89187 4887
6c8dbf00 4888 check_inuse_chunk (av, newp);
0c719cf4 4889 return tag_new_usable (chunk2rawmem (newp));
fa8d436c
UD
4890}
4891
4892/*
6c8dbf00
OB
4893 ------------------------------ memalign ------------------------------
4894 */
fa8d436c 4895
6c8dbf00
OB
4896static void *
4897_int_memalign (mstate av, size_t alignment, size_t bytes)
fa8d436c
UD
4898{
4899 INTERNAL_SIZE_T nb; /* padded request size */
6c8dbf00
OB
4900 char *m; /* memory returned by malloc call */
4901 mchunkptr p; /* corresponding chunk */
4902 char *brk; /* alignment point within p */
4903 mchunkptr newp; /* chunk to return */
fa8d436c
UD
4904 INTERNAL_SIZE_T newsize; /* its size */
4905 INTERNAL_SIZE_T leadsize; /* leading space before alignment point */
6c8dbf00
OB
4906 mchunkptr remainder; /* spare room at end to split off */
4907 unsigned long remainder_size; /* its size */
fa8d436c 4908 INTERNAL_SIZE_T size;
f65fd747 4909
f65fd747 4910
f65fd747 4911
9bf8e29c
AZ
4912 if (!checked_request2size (bytes, &nb))
4913 {
4914 __set_errno (ENOMEM);
4915 return NULL;
4916 }
fa8d436c
UD
4917
4918 /*
6c8dbf00
OB
4919 Strategy: find a spot within that chunk that meets the alignment
4920 request, and then possibly free the leading and trailing space.
4921 */
fa8d436c 4922
fa8d436c
UD
4923 /* Call malloc with worst case padding to hit alignment. */
4924
6c8dbf00
OB
4925 m = (char *) (_int_malloc (av, nb + alignment + MINSIZE));
4926
4927 if (m == 0)
4928 return 0; /* propagate failure */
4929
4930 p = mem2chunk (m);
4931
4932 if ((((unsigned long) (m)) % alignment) != 0) /* misaligned */
4933
4934 { /*
4935 Find an aligned spot inside chunk. Since we need to give back
4936 leading space in a chunk of at least MINSIZE, if the first
4937 calculation places us at a spot with less than MINSIZE leader,
4938 we can move to the next aligned spot -- we've allocated enough
4939 total room so that this is always possible.
4940 */
4941 brk = (char *) mem2chunk (((unsigned long) (m + alignment - 1)) &
4942 - ((signed long) alignment));
4943 if ((unsigned long) (brk - (char *) (p)) < MINSIZE)
4944 brk += alignment;
4945
4946 newp = (mchunkptr) brk;
4947 leadsize = brk - (char *) (p);
4948 newsize = chunksize (p) - leadsize;
4949
4950 /* For mmapped chunks, just adjust offset */
4951 if (chunk_is_mmapped (p))
4952 {
e9c4fe93 4953 set_prev_size (newp, prev_size (p) + leadsize);
6c8dbf00
OB
4954 set_head (newp, newsize | IS_MMAPPED);
4955 return chunk2mem (newp);
4956 }
4957
4958 /* Otherwise, give back leader, use the rest */
4959 set_head (newp, newsize | PREV_INUSE |
4960 (av != &main_arena ? NON_MAIN_ARENA : 0));
4961 set_inuse_bit_at_offset (newp, newsize);
4962 set_head_size (p, leadsize | (av != &main_arena ? NON_MAIN_ARENA : 0));
4963 _int_free (av, p, 1);
4964 p = newp;
4965
4966 assert (newsize >= nb &&
3784dfc0 4967 (((unsigned long) (chunk2rawmem (p))) % alignment) == 0);
f65fd747 4968 }
f65fd747 4969
f65fd747 4970 /* Also give back spare room at the end */
6c8dbf00
OB
4971 if (!chunk_is_mmapped (p))
4972 {
4973 size = chunksize (p);
4974 if ((unsigned long) (size) > (unsigned long) (nb + MINSIZE))
4975 {
4976 remainder_size = size - nb;
4977 remainder = chunk_at_offset (p, nb);
4978 set_head (remainder, remainder_size | PREV_INUSE |
4979 (av != &main_arena ? NON_MAIN_ARENA : 0));
4980 set_head_size (p, nb);
4981 _int_free (av, remainder, 1);
4982 }
fa8d436c 4983 }
f65fd747 4984
6c8dbf00
OB
4985 check_inuse_chunk (av, p);
4986 return chunk2mem (p);
f65fd747
UD
4987}
4988
f65fd747 4989
fa8d436c 4990/*
6c8dbf00
OB
4991 ------------------------------ malloc_trim ------------------------------
4992 */
8a4b65b4 4993
6c8dbf00
OB
4994static int
4995mtrim (mstate av, size_t pad)
f65fd747 4996{
3381be5c 4997 /* Ensure all blocks are consolidated. */
68631c8e
UD
4998 malloc_consolidate (av);
4999
6c8dbf00 5000 const size_t ps = GLRO (dl_pagesize);
68631c8e
UD
5001 int psindex = bin_index (ps);
5002 const size_t psm1 = ps - 1;
5003
5004 int result = 0;
5005 for (int i = 1; i < NBINS; ++i)
5006 if (i == 1 || i >= psindex)
5007 {
6c8dbf00 5008 mbinptr bin = bin_at (av, i);
68631c8e 5009
6c8dbf00
OB
5010 for (mchunkptr p = last (bin); p != bin; p = p->bk)
5011 {
5012 INTERNAL_SIZE_T size = chunksize (p);
68631c8e 5013
6c8dbf00
OB
5014 if (size > psm1 + sizeof (struct malloc_chunk))
5015 {
5016 /* See whether the chunk contains at least one unused page. */
5017 char *paligned_mem = (char *) (((uintptr_t) p
5018 + sizeof (struct malloc_chunk)
5019 + psm1) & ~psm1);
68631c8e 5020
3784dfc0
RE
5021 assert ((char *) chunk2rawmem (p) + 2 * CHUNK_HDR_SZ
5022 <= paligned_mem);
6c8dbf00 5023 assert ((char *) p + size > paligned_mem);
68631c8e 5024
6c8dbf00
OB
5025 /* This is the size we could potentially free. */
5026 size -= paligned_mem - (char *) p;
68631c8e 5027
6c8dbf00
OB
5028 if (size > psm1)
5029 {
439bda32 5030#if MALLOC_DEBUG
6c8dbf00
OB
5031 /* When debugging we simulate destroying the memory
5032 content. */
5033 memset (paligned_mem, 0x89, size & ~psm1);
68631c8e 5034#endif
6c8dbf00 5035 __madvise (paligned_mem, size & ~psm1, MADV_DONTNEED);
68631c8e 5036
6c8dbf00
OB
5037 result = 1;
5038 }
5039 }
5040 }
68631c8e 5041 }
8a4b65b4 5042
a9177ff5 5043#ifndef MORECORE_CANNOT_TRIM
3b49edc0 5044 return result | (av == &main_arena ? systrim (pad, av) : 0);
6c8dbf00 5045
8a4b65b4 5046#else
68631c8e 5047 return result;
f65fd747 5048#endif
f65fd747
UD
5049}
5050
f65fd747 5051
3b49edc0 5052int
6c8dbf00 5053__malloc_trim (size_t s)
3b49edc0
UD
5054{
5055 int result = 0;
5056
6c8dbf00 5057 if (__malloc_initialized < 0)
3b49edc0
UD
5058 ptmalloc_init ();
5059
5060 mstate ar_ptr = &main_arena;
5061 do
5062 {
4bf5f222 5063 __libc_lock_lock (ar_ptr->mutex);
3b49edc0 5064 result |= mtrim (ar_ptr, s);
4bf5f222 5065 __libc_lock_unlock (ar_ptr->mutex);
3b49edc0
UD
5066
5067 ar_ptr = ar_ptr->next;
5068 }
5069 while (ar_ptr != &main_arena);
5070
5071 return result;
5072}
5073
5074
f65fd747 5075/*
6c8dbf00
OB
5076 ------------------------- malloc_usable_size -------------------------
5077 */
f65fd747 5078
3b49edc0 5079static size_t
6c8dbf00 5080musable (void *mem)
f65fd747
UD
5081{
5082 mchunkptr p;
6c8dbf00
OB
5083 if (mem != 0)
5084 {
3784dfc0
RE
5085 size_t result = 0;
5086
6c8dbf00
OB
5087 p = mem2chunk (mem);
5088
5089 if (__builtin_expect (using_malloc_checking == 1, 0))
3784dfc0 5090 return malloc_check_get_size (p);
6c8dbf00
OB
5091
5092 if (chunk_is_mmapped (p))
073f8214
FW
5093 {
5094 if (DUMPED_MAIN_ARENA_CHUNK (p))
3784dfc0 5095 result = chunksize (p) - SIZE_SZ;
073f8214 5096 else
3784dfc0 5097 result = chunksize (p) - CHUNK_HDR_SZ;
073f8214 5098 }
6c8dbf00 5099 else if (inuse (p))
3784dfc0
RE
5100 result = chunksize (p) - SIZE_SZ;
5101
5102#ifdef USE_MTAG
5103 /* The usable space may be reduced if memory tagging is needed,
5104 since we cannot share the user-space data with malloc's internal
5105 data structure. */
0c719cf4 5106 result &= mtag_granule_mask;
3784dfc0
RE
5107#endif
5108 return result;
6c8dbf00 5109 }
fa8d436c 5110 return 0;
f65fd747
UD
5111}
5112
3b49edc0
UD
5113
5114size_t
6c8dbf00 5115__malloc_usable_size (void *m)
3b49edc0
UD
5116{
5117 size_t result;
5118
6c8dbf00 5119 result = musable (m);
3b49edc0
UD
5120 return result;
5121}
5122
fa8d436c 5123/*
6c8dbf00
OB
5124 ------------------------------ mallinfo ------------------------------
5125 Accumulate malloc statistics for arena AV into M.
5126 */
f65fd747 5127
bedee953 5128static void
e3960d1c 5129int_mallinfo (mstate av, struct mallinfo2 *m)
f65fd747 5130{
6dd67bd5 5131 size_t i;
f65fd747
UD
5132 mbinptr b;
5133 mchunkptr p;
f65fd747 5134 INTERNAL_SIZE_T avail;
fa8d436c
UD
5135 INTERNAL_SIZE_T fastavail;
5136 int nblocks;
5137 int nfastblocks;
f65fd747 5138
6c8dbf00 5139 check_malloc_state (av);
8a4b65b4 5140
fa8d436c 5141 /* Account for top */
6c8dbf00 5142 avail = chunksize (av->top);
fa8d436c 5143 nblocks = 1; /* top always exists */
f65fd747 5144
fa8d436c
UD
5145 /* traverse fastbins */
5146 nfastblocks = 0;
5147 fastavail = 0;
5148
6c8dbf00
OB
5149 for (i = 0; i < NFASTBINS; ++i)
5150 {
a1a486d7
EI
5151 for (p = fastbin (av, i);
5152 p != 0;
5153 p = REVEAL_PTR (p->fd))
6c8dbf00 5154 {
49c3c376 5155 if (__glibc_unlikely (misaligned_chunk (p)))
768358b6 5156 malloc_printerr ("int_mallinfo(): "
a1a486d7 5157 "unaligned fastbin chunk detected");
6c8dbf00
OB
5158 ++nfastblocks;
5159 fastavail += chunksize (p);
5160 }
fa8d436c 5161 }
fa8d436c
UD
5162
5163 avail += fastavail;
f65fd747 5164
fa8d436c 5165 /* traverse regular bins */
6c8dbf00
OB
5166 for (i = 1; i < NBINS; ++i)
5167 {
5168 b = bin_at (av, i);
5169 for (p = last (b); p != b; p = p->bk)
5170 {
5171 ++nblocks;
5172 avail += chunksize (p);
5173 }
fa8d436c 5174 }
f65fd747 5175
bedee953
PP
5176 m->smblks += nfastblocks;
5177 m->ordblks += nblocks;
5178 m->fordblks += avail;
5179 m->uordblks += av->system_mem - avail;
5180 m->arena += av->system_mem;
5181 m->fsmblks += fastavail;
5182 if (av == &main_arena)
5183 {
5184 m->hblks = mp_.n_mmaps;
5185 m->hblkhd = mp_.mmapped_mem;
ca135f82 5186 m->usmblks = 0;
6c8dbf00 5187 m->keepcost = chunksize (av->top);
bedee953 5188 }
fa8d436c 5189}
f65fd747 5190
3b49edc0 5191
e3960d1c
ML
5192struct mallinfo2
5193__libc_mallinfo2 (void)
3b49edc0 5194{
e3960d1c 5195 struct mallinfo2 m;
bedee953 5196 mstate ar_ptr;
3b49edc0 5197
6c8dbf00 5198 if (__malloc_initialized < 0)
3b49edc0 5199 ptmalloc_init ();
bedee953 5200
6c8dbf00 5201 memset (&m, 0, sizeof (m));
bedee953 5202 ar_ptr = &main_arena;
6c8dbf00
OB
5203 do
5204 {
4bf5f222 5205 __libc_lock_lock (ar_ptr->mutex);
6c8dbf00 5206 int_mallinfo (ar_ptr, &m);
4bf5f222 5207 __libc_lock_unlock (ar_ptr->mutex);
bedee953 5208
6c8dbf00
OB
5209 ar_ptr = ar_ptr->next;
5210 }
5211 while (ar_ptr != &main_arena);
bedee953 5212
3b49edc0
UD
5213 return m;
5214}
cdf64542 5215libc_hidden_def (__libc_mallinfo2)
3b49edc0 5216
e3960d1c
ML
5217struct mallinfo
5218__libc_mallinfo (void)
5219{
5220 struct mallinfo m;
5221 struct mallinfo2 m2 = __libc_mallinfo2 ();
5222
5223 m.arena = m2.arena;
5224 m.ordblks = m2.ordblks;
5225 m.smblks = m2.smblks;
5226 m.hblks = m2.hblks;
5227 m.hblkhd = m2.hblkhd;
5228 m.usmblks = m2.usmblks;
5229 m.fsmblks = m2.fsmblks;
5230 m.uordblks = m2.uordblks;
5231 m.fordblks = m2.fordblks;
5232 m.keepcost = m2.keepcost;
5233
5234 return m;
5235}
5236
5237
fa8d436c 5238/*
6c8dbf00
OB
5239 ------------------------------ malloc_stats ------------------------------
5240 */
f65fd747 5241
3b49edc0 5242void
60d2f8f3 5243__malloc_stats (void)
f65fd747 5244{
8a4b65b4 5245 int i;
fa8d436c 5246 mstate ar_ptr;
fa8d436c 5247 unsigned int in_use_b = mp_.mmapped_mem, system_b = in_use_b;
8a4b65b4 5248
6c8dbf00 5249 if (__malloc_initialized < 0)
a234e27d 5250 ptmalloc_init ();
8dab36a1 5251 _IO_flockfile (stderr);
9964a145
ZW
5252 int old_flags2 = stderr->_flags2;
5253 stderr->_flags2 |= _IO_FLAGS2_NOTCANCEL;
6c8dbf00
OB
5254 for (i = 0, ar_ptr = &main_arena;; i++)
5255 {
e3960d1c 5256 struct mallinfo2 mi;
6c8dbf00
OB
5257
5258 memset (&mi, 0, sizeof (mi));
4bf5f222 5259 __libc_lock_lock (ar_ptr->mutex);
6c8dbf00
OB
5260 int_mallinfo (ar_ptr, &mi);
5261 fprintf (stderr, "Arena %d:\n", i);
5262 fprintf (stderr, "system bytes = %10u\n", (unsigned int) mi.arena);
5263 fprintf (stderr, "in use bytes = %10u\n", (unsigned int) mi.uordblks);
fa8d436c 5264#if MALLOC_DEBUG > 1
6c8dbf00
OB
5265 if (i > 0)
5266 dump_heap (heap_for_ptr (top (ar_ptr)));
fa8d436c 5267#endif
6c8dbf00
OB
5268 system_b += mi.arena;
5269 in_use_b += mi.uordblks;
4bf5f222 5270 __libc_lock_unlock (ar_ptr->mutex);
6c8dbf00
OB
5271 ar_ptr = ar_ptr->next;
5272 if (ar_ptr == &main_arena)
5273 break;
5274 }
5275 fprintf (stderr, "Total (incl. mmap):\n");
5276 fprintf (stderr, "system bytes = %10u\n", system_b);
5277 fprintf (stderr, "in use bytes = %10u\n", in_use_b);
5278 fprintf (stderr, "max mmap regions = %10u\n", (unsigned int) mp_.max_n_mmaps);
5279 fprintf (stderr, "max mmap bytes = %10lu\n",
5280 (unsigned long) mp_.max_mmapped_mem);
9964a145 5281 stderr->_flags2 = old_flags2;
8dab36a1 5282 _IO_funlockfile (stderr);
f65fd747
UD
5283}
5284
f65fd747
UD
5285
5286/*
6c8dbf00
OB
5287 ------------------------------ mallopt ------------------------------
5288 */
c2d8f0b7 5289static __always_inline int
be7991c0
SP
5290do_set_trim_threshold (size_t value)
5291{
5292 LIBC_PROBE (memory_mallopt_trim_threshold, 3, value, mp_.trim_threshold,
5293 mp_.no_dyn_threshold);
5294 mp_.trim_threshold = value;
5295 mp_.no_dyn_threshold = 1;
5296 return 1;
5297}
5298
c2d8f0b7 5299static __always_inline int
be7991c0
SP
5300do_set_top_pad (size_t value)
5301{
5302 LIBC_PROBE (memory_mallopt_top_pad, 3, value, mp_.top_pad,
5303 mp_.no_dyn_threshold);
5304 mp_.top_pad = value;
5305 mp_.no_dyn_threshold = 1;
5306 return 1;
5307}
5308
c2d8f0b7 5309static __always_inline int
be7991c0
SP
5310do_set_mmap_threshold (size_t value)
5311{
5312 /* Forbid setting the threshold too high. */
5313 if (value <= HEAP_MAX_SIZE / 2)
5314 {
5315 LIBC_PROBE (memory_mallopt_mmap_threshold, 3, value, mp_.mmap_threshold,
5316 mp_.no_dyn_threshold);
5317 mp_.mmap_threshold = value;
5318 mp_.no_dyn_threshold = 1;
5319 return 1;
5320 }
5321 return 0;
5322}
5323
c2d8f0b7 5324static __always_inline int
be7991c0
SP
5325do_set_mmaps_max (int32_t value)
5326{
5327 LIBC_PROBE (memory_mallopt_mmap_max, 3, value, mp_.n_mmaps_max,
5328 mp_.no_dyn_threshold);
5329 mp_.n_mmaps_max = value;
5330 mp_.no_dyn_threshold = 1;
5331 return 1;
5332}
5333
c2d8f0b7 5334static __always_inline int
be7991c0
SP
5335do_set_mallopt_check (int32_t value)
5336{
be7991c0
SP
5337 return 1;
5338}
5339
c2d8f0b7 5340static __always_inline int
be7991c0
SP
5341do_set_perturb_byte (int32_t value)
5342{
5343 LIBC_PROBE (memory_mallopt_perturb, 2, value, perturb_byte);
5344 perturb_byte = value;
5345 return 1;
5346}
5347
c2d8f0b7 5348static __always_inline int
be7991c0
SP
5349do_set_arena_test (size_t value)
5350{
5351 LIBC_PROBE (memory_mallopt_arena_test, 2, value, mp_.arena_test);
5352 mp_.arena_test = value;
5353 return 1;
5354}
5355
c2d8f0b7 5356static __always_inline int
be7991c0
SP
5357do_set_arena_max (size_t value)
5358{
5359 LIBC_PROBE (memory_mallopt_arena_max, 2, value, mp_.arena_max);
5360 mp_.arena_max = value;
5361 return 1;
5362}
5363
d5c3fafc 5364#if USE_TCACHE
c2d8f0b7 5365static __always_inline int
d5c3fafc
DD
5366do_set_tcache_max (size_t value)
5367{
16554464 5368 if (value <= MAX_TCACHE_SIZE)
d5c3fafc
DD
5369 {
5370 LIBC_PROBE (memory_tunable_tcache_max_bytes, 2, value, mp_.tcache_max_bytes);
5371 mp_.tcache_max_bytes = value;
5372 mp_.tcache_bins = csize2tidx (request2size(value)) + 1;
16554464 5373 return 1;
d5c3fafc 5374 }
16554464 5375 return 0;
d5c3fafc
DD
5376}
5377
c2d8f0b7 5378static __always_inline int
d5c3fafc
DD
5379do_set_tcache_count (size_t value)
5380{
5ad533e8
WD
5381 if (value <= MAX_TCACHE_COUNT)
5382 {
5383 LIBC_PROBE (memory_tunable_tcache_count, 2, value, mp_.tcache_count);
5384 mp_.tcache_count = value;
16554464 5385 return 1;
5ad533e8 5386 }
16554464 5387 return 0;
d5c3fafc
DD
5388}
5389
c2d8f0b7 5390static __always_inline int
d5c3fafc
DD
5391do_set_tcache_unsorted_limit (size_t value)
5392{
5393 LIBC_PROBE (memory_tunable_tcache_unsorted_limit, 2, value, mp_.tcache_unsorted_limit);
5394 mp_.tcache_unsorted_limit = value;
5395 return 1;
5396}
5397#endif
f65fd747 5398
c48d92b4
DD
5399static inline int
5400__always_inline
5401do_set_mxfast (size_t value)
5402{
16554464 5403 if (value <= MAX_FAST_SIZE)
c48d92b4
DD
5404 {
5405 LIBC_PROBE (memory_mallopt_mxfast, 2, value, get_max_fast ());
5406 set_max_fast (value);
5407 return 1;
5408 }
5409 return 0;
5410}
5411
6c8dbf00
OB
5412int
5413__libc_mallopt (int param_number, int value)
f65fd747 5414{
fa8d436c
UD
5415 mstate av = &main_arena;
5416 int res = 1;
f65fd747 5417
6c8dbf00 5418 if (__malloc_initialized < 0)
0cb71e02 5419 ptmalloc_init ();
4bf5f222 5420 __libc_lock_lock (av->mutex);
2f6d1f1b 5421
3ea5be54
AO
5422 LIBC_PROBE (memory_mallopt, 2, param_number, value);
5423
3381be5c
WD
5424 /* We must consolidate main arena before changing max_fast
5425 (see definition of set_max_fast). */
5426 malloc_consolidate (av);
5427
16554464
DD
5428 /* Many of these helper functions take a size_t. We do not worry
5429 about overflow here, because negative int values will wrap to
5430 very large size_t values and the helpers have sufficient range
5431 checking for such conversions. Many of these helpers are also
5432 used by the tunables macros in arena.c. */
5433
6c8dbf00
OB
5434 switch (param_number)
5435 {
5436 case M_MXFAST:
16554464 5437 res = do_set_mxfast (value);
6c8dbf00
OB
5438 break;
5439
5440 case M_TRIM_THRESHOLD:
16554464 5441 res = do_set_trim_threshold (value);
6c8dbf00
OB
5442 break;
5443
5444 case M_TOP_PAD:
16554464 5445 res = do_set_top_pad (value);
6c8dbf00
OB
5446 break;
5447
5448 case M_MMAP_THRESHOLD:
be7991c0 5449 res = do_set_mmap_threshold (value);
6c8dbf00
OB
5450 break;
5451
5452 case M_MMAP_MAX:
16554464 5453 res = do_set_mmaps_max (value);
6c8dbf00
OB
5454 break;
5455
5456 case M_CHECK_ACTION:
16554464 5457 res = do_set_mallopt_check (value);
6c8dbf00
OB
5458 break;
5459
5460 case M_PERTURB:
16554464 5461 res = do_set_perturb_byte (value);
6c8dbf00
OB
5462 break;
5463
5464 case M_ARENA_TEST:
5465 if (value > 0)
16554464 5466 res = do_set_arena_test (value);
6c8dbf00
OB
5467 break;
5468
5469 case M_ARENA_MAX:
5470 if (value > 0)
16554464 5471 res = do_set_arena_max (value);
6c8dbf00
OB
5472 break;
5473 }
4bf5f222 5474 __libc_lock_unlock (av->mutex);
fa8d436c 5475 return res;
b22fc5f5 5476}
3b49edc0 5477libc_hidden_def (__libc_mallopt)
b22fc5f5 5478
10dc2a90 5479
a9177ff5 5480/*
6c8dbf00
OB
5481 -------------------- Alternative MORECORE functions --------------------
5482 */
10dc2a90 5483
b22fc5f5 5484
fa8d436c 5485/*
6c8dbf00 5486 General Requirements for MORECORE.
b22fc5f5 5487
6c8dbf00 5488 The MORECORE function must have the following properties:
b22fc5f5 5489
6c8dbf00 5490 If MORECORE_CONTIGUOUS is false:
10dc2a90 5491
6c8dbf00 5492 * MORECORE must allocate in multiples of pagesize. It will
fa8d436c 5493 only be called with arguments that are multiples of pagesize.
10dc2a90 5494
6c8dbf00 5495 * MORECORE(0) must return an address that is at least
fa8d436c 5496 MALLOC_ALIGNMENT aligned. (Page-aligning always suffices.)
10dc2a90 5497
6c8dbf00 5498 else (i.e. If MORECORE_CONTIGUOUS is true):
10dc2a90 5499
6c8dbf00 5500 * Consecutive calls to MORECORE with positive arguments
fa8d436c
UD
5501 return increasing addresses, indicating that space has been
5502 contiguously extended.
10dc2a90 5503
6c8dbf00 5504 * MORECORE need not allocate in multiples of pagesize.
fa8d436c 5505 Calls to MORECORE need not have args of multiples of pagesize.
10dc2a90 5506
6c8dbf00 5507 * MORECORE need not page-align.
10dc2a90 5508
6c8dbf00 5509 In either case:
10dc2a90 5510
6c8dbf00 5511 * MORECORE may allocate more memory than requested. (Or even less,
fa8d436c 5512 but this will generally result in a malloc failure.)
10dc2a90 5513
6c8dbf00 5514 * MORECORE must not allocate memory when given argument zero, but
fa8d436c
UD
5515 instead return one past the end address of memory from previous
5516 nonzero call. This malloc does NOT call MORECORE(0)
5517 until at least one call with positive arguments is made, so
5518 the initial value returned is not important.
10dc2a90 5519
6c8dbf00 5520 * Even though consecutive calls to MORECORE need not return contiguous
fa8d436c
UD
5521 addresses, it must be OK for malloc'ed chunks to span multiple
5522 regions in those cases where they do happen to be contiguous.
10dc2a90 5523
6c8dbf00 5524 * MORECORE need not handle negative arguments -- it may instead
fa8d436c
UD
5525 just return MORECORE_FAILURE when given negative arguments.
5526 Negative arguments are always multiples of pagesize. MORECORE
5527 must not misinterpret negative args as large positive unsigned
5528 args. You can suppress all such calls from even occurring by defining
5529 MORECORE_CANNOT_TRIM,
10dc2a90 5530
6c8dbf00
OB
5531 There is some variation across systems about the type of the
5532 argument to sbrk/MORECORE. If size_t is unsigned, then it cannot
5533 actually be size_t, because sbrk supports negative args, so it is
5534 normally the signed type of the same width as size_t (sometimes
5535 declared as "intptr_t", and sometimes "ptrdiff_t"). It doesn't much
5536 matter though. Internally, we use "long" as arguments, which should
5537 work across all reasonable possibilities.
5538
5539 Additionally, if MORECORE ever returns failure for a positive
5540 request, then mmap is used as a noncontiguous system allocator. This
5541 is a useful backup strategy for systems with holes in address spaces
5542 -- in this case sbrk cannot contiguously expand the heap, but mmap
5543 may be able to map noncontiguous space.
5544
5545 If you'd like mmap to ALWAYS be used, you can define MORECORE to be
5546 a function that always returns MORECORE_FAILURE.
5547
5548 If you are using this malloc with something other than sbrk (or its
5549 emulation) to supply memory regions, you probably want to set
5550 MORECORE_CONTIGUOUS as false. As an example, here is a custom
5551 allocator kindly contributed for pre-OSX macOS. It uses virtually
5552 but not necessarily physically contiguous non-paged memory (locked
5553 in, present and won't get swapped out). You can use it by
5554 uncommenting this section, adding some #includes, and setting up the
5555 appropriate defines above:
5556
5557 *#define MORECORE osMoreCore
5558 *#define MORECORE_CONTIGUOUS 0
5559
5560 There is also a shutdown routine that should somehow be called for
5561 cleanup upon program exit.
5562
5563 *#define MAX_POOL_ENTRIES 100
5564 *#define MINIMUM_MORECORE_SIZE (64 * 1024)
5565 static int next_os_pool;
5566 void *our_os_pools[MAX_POOL_ENTRIES];
5567
5568 void *osMoreCore(int size)
5569 {
fa8d436c
UD
5570 void *ptr = 0;
5571 static void *sbrk_top = 0;
ca34d7a7 5572
fa8d436c
UD
5573 if (size > 0)
5574 {
5575 if (size < MINIMUM_MORECORE_SIZE)
6c8dbf00 5576 size = MINIMUM_MORECORE_SIZE;
fa8d436c 5577 if (CurrentExecutionLevel() == kTaskLevel)
6c8dbf00 5578 ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
fa8d436c
UD
5579 if (ptr == 0)
5580 {
6c8dbf00 5581 return (void *) MORECORE_FAILURE;
fa8d436c
UD
5582 }
5583 // save ptrs so they can be freed during cleanup
5584 our_os_pools[next_os_pool] = ptr;
5585 next_os_pool++;
5586 ptr = (void *) ((((unsigned long) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);
5587 sbrk_top = (char *) ptr + size;
5588 return ptr;
5589 }
5590 else if (size < 0)
5591 {
5592 // we don't currently support shrink behavior
5593 return (void *) MORECORE_FAILURE;
5594 }
5595 else
5596 {
5597 return sbrk_top;
431c33c0 5598 }
6c8dbf00 5599 }
ca34d7a7 5600
6c8dbf00
OB
5601 // cleanup any allocated memory pools
5602 // called as last thing before shutting down driver
ca34d7a7 5603
6c8dbf00
OB
5604 void osCleanupMem(void)
5605 {
fa8d436c 5606 void **ptr;
ca34d7a7 5607
fa8d436c
UD
5608 for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)
5609 if (*ptr)
5610 {
6c8dbf00
OB
5611 PoolDeallocate(*ptr);
5612 * ptr = 0;
fa8d436c 5613 }
6c8dbf00 5614 }
ee74a442 5615
6c8dbf00 5616 */
f65fd747 5617
7e3be507 5618
3e030bd5
UD
5619/* Helper code. */
5620
ae7f5313
UD
5621extern char **__libc_argv attribute_hidden;
5622
3e030bd5 5623static void
ac3ed168 5624malloc_printerr (const char *str)
3e030bd5 5625{
ec2c1fce
FW
5626 __libc_message (do_abort, "%s\n", str);
5627 __builtin_unreachable ();
3e030bd5
UD
5628}
5629
a204dbb2
UD
5630/* We need a wrapper function for one of the additions of POSIX. */
5631int
5632__posix_memalign (void **memptr, size_t alignment, size_t size)
5633{
5634 void *mem;
5635
5636 /* Test whether the SIZE argument is valid. It must be a power of
5637 two multiple of sizeof (void *). */
de02bd05 5638 if (alignment % sizeof (void *) != 0
fc56e970 5639 || !powerof2 (alignment / sizeof (void *))
de02bd05 5640 || alignment == 0)
a204dbb2
UD
5641 return EINVAL;
5642
10ad46bc
OB
5643
5644 void *address = RETURN_ADDRESS (0);
5645 mem = _mid_memalign (alignment, size, address);
a204dbb2 5646
6c8dbf00
OB
5647 if (mem != NULL)
5648 {
5649 *memptr = mem;
5650 return 0;
5651 }
a204dbb2
UD
5652
5653 return ENOMEM;
5654}
5655weak_alias (__posix_memalign, posix_memalign)
5656
20c13899
OB
5657
5658int
c52ff39e 5659__malloc_info (int options, FILE *fp)
bb066545 5660{
20c13899
OB
5661 /* For now, at least. */
5662 if (options != 0)
5663 return EINVAL;
bb066545 5664
20c13899
OB
5665 int n = 0;
5666 size_t total_nblocks = 0;
5667 size_t total_nfastblocks = 0;
5668 size_t total_avail = 0;
5669 size_t total_fastavail = 0;
5670 size_t total_system = 0;
5671 size_t total_max_system = 0;
5672 size_t total_aspace = 0;
5673 size_t total_aspace_mprotect = 0;
bb066545 5674
6c8dbf00 5675
6c8dbf00 5676
987c0269
OB
5677 if (__malloc_initialized < 0)
5678 ptmalloc_init ();
bb066545 5679
987c0269 5680 fputs ("<malloc version=\"1\">\n", fp);
bb066545 5681
987c0269
OB
5682 /* Iterate over all arenas currently in use. */
5683 mstate ar_ptr = &main_arena;
5684 do
5685 {
5686 fprintf (fp, "<heap nr=\"%d\">\n<sizes>\n", n++);
8b35e35d 5687
987c0269
OB
5688 size_t nblocks = 0;
5689 size_t nfastblocks = 0;
5690 size_t avail = 0;
5691 size_t fastavail = 0;
5692 struct
5693 {
5694 size_t from;
5695 size_t to;
5696 size_t total;
5697 size_t count;
5698 } sizes[NFASTBINS + NBINS - 1];
5699#define nsizes (sizeof (sizes) / sizeof (sizes[0]))
6c8dbf00 5700
4bf5f222 5701 __libc_lock_lock (ar_ptr->mutex);
bb066545 5702
b6d2c447
NH
5703 /* Account for top chunk. The top-most available chunk is
5704 treated specially and is never in any bin. See "initial_top"
5705 comments. */
5706 avail = chunksize (ar_ptr->top);
5707 nblocks = 1; /* Top always exists. */
5708
987c0269
OB
5709 for (size_t i = 0; i < NFASTBINS; ++i)
5710 {
5711 mchunkptr p = fastbin (ar_ptr, i);
5712 if (p != NULL)
5713 {
5714 size_t nthissize = 0;
5715 size_t thissize = chunksize (p);
5716
5717 while (p != NULL)
5718 {
49c3c376 5719 if (__glibc_unlikely (misaligned_chunk (p)))
768358b6 5720 malloc_printerr ("__malloc_info(): "
a1a486d7 5721 "unaligned fastbin chunk detected");
987c0269 5722 ++nthissize;
a1a486d7 5723 p = REVEAL_PTR (p->fd);
987c0269
OB
5724 }
5725
5726 fastavail += nthissize * thissize;
5727 nfastblocks += nthissize;
5728 sizes[i].from = thissize - (MALLOC_ALIGNMENT - 1);
5729 sizes[i].to = thissize;
5730 sizes[i].count = nthissize;
5731 }
5732 else
5733 sizes[i].from = sizes[i].to = sizes[i].count = 0;
bb066545 5734
987c0269
OB
5735 sizes[i].total = sizes[i].count * sizes[i].to;
5736 }
bb066545 5737
bb066545 5738
987c0269
OB
5739 mbinptr bin;
5740 struct malloc_chunk *r;
bb066545 5741
987c0269
OB
5742 for (size_t i = 1; i < NBINS; ++i)
5743 {
5744 bin = bin_at (ar_ptr, i);
5745 r = bin->fd;
5746 sizes[NFASTBINS - 1 + i].from = ~((size_t) 0);
5747 sizes[NFASTBINS - 1 + i].to = sizes[NFASTBINS - 1 + i].total
5748 = sizes[NFASTBINS - 1 + i].count = 0;
5749
5750 if (r != NULL)
5751 while (r != bin)
5752 {
e9c4fe93 5753 size_t r_size = chunksize_nomask (r);
987c0269 5754 ++sizes[NFASTBINS - 1 + i].count;
e9c4fe93 5755 sizes[NFASTBINS - 1 + i].total += r_size;
987c0269 5756 sizes[NFASTBINS - 1 + i].from
e9c4fe93 5757 = MIN (sizes[NFASTBINS - 1 + i].from, r_size);
987c0269 5758 sizes[NFASTBINS - 1 + i].to = MAX (sizes[NFASTBINS - 1 + i].to,
e9c4fe93 5759 r_size);
987c0269
OB
5760
5761 r = r->fd;
5762 }
5763
5764 if (sizes[NFASTBINS - 1 + i].count == 0)
5765 sizes[NFASTBINS - 1 + i].from = 0;
5766 nblocks += sizes[NFASTBINS - 1 + i].count;
5767 avail += sizes[NFASTBINS - 1 + i].total;
5768 }
bb066545 5769
7a9368a1
FW
5770 size_t heap_size = 0;
5771 size_t heap_mprotect_size = 0;
34eb4157 5772 size_t heap_count = 0;
7a9368a1
FW
5773 if (ar_ptr != &main_arena)
5774 {
34eb4157 5775 /* Iterate over the arena heaps from back to front. */
7a9368a1 5776 heap_info *heap = heap_for_ptr (top (ar_ptr));
34eb4157
FW
5777 do
5778 {
5779 heap_size += heap->size;
5780 heap_mprotect_size += heap->mprotect_size;
5781 heap = heap->prev;
5782 ++heap_count;
5783 }
5784 while (heap != NULL);
7a9368a1
FW
5785 }
5786
4bf5f222 5787 __libc_lock_unlock (ar_ptr->mutex);
da2d2fb6 5788
987c0269
OB
5789 total_nfastblocks += nfastblocks;
5790 total_fastavail += fastavail;
0588a9cb 5791
987c0269
OB
5792 total_nblocks += nblocks;
5793 total_avail += avail;
0588a9cb 5794
987c0269
OB
5795 for (size_t i = 0; i < nsizes; ++i)
5796 if (sizes[i].count != 0 && i != NFASTBINS)
b0f6679b 5797 fprintf (fp, "\
987c0269
OB
5798 <size from=\"%zu\" to=\"%zu\" total=\"%zu\" count=\"%zu\"/>\n",
5799 sizes[i].from, sizes[i].to, sizes[i].total, sizes[i].count);
fdfd175d 5800
987c0269
OB
5801 if (sizes[NFASTBINS].count != 0)
5802 fprintf (fp, "\
5803 <unsorted from=\"%zu\" to=\"%zu\" total=\"%zu\" count=\"%zu\"/>\n",
5804 sizes[NFASTBINS].from, sizes[NFASTBINS].to,
5805 sizes[NFASTBINS].total, sizes[NFASTBINS].count);
fdfd175d 5806
987c0269
OB
5807 total_system += ar_ptr->system_mem;
5808 total_max_system += ar_ptr->max_system_mem;
bb066545 5809
987c0269
OB
5810 fprintf (fp,
5811 "</sizes>\n<total type=\"fast\" count=\"%zu\" size=\"%zu\"/>\n"
5812 "<total type=\"rest\" count=\"%zu\" size=\"%zu\"/>\n"
5813 "<system type=\"current\" size=\"%zu\"/>\n"
5814 "<system type=\"max\" size=\"%zu\"/>\n",
5815 nfastblocks, fastavail, nblocks, avail,
5816 ar_ptr->system_mem, ar_ptr->max_system_mem);
346bc35c 5817
987c0269
OB
5818 if (ar_ptr != &main_arena)
5819 {
987c0269
OB
5820 fprintf (fp,
5821 "<aspace type=\"total\" size=\"%zu\"/>\n"
34eb4157
FW
5822 "<aspace type=\"mprotect\" size=\"%zu\"/>\n"
5823 "<aspace type=\"subheaps\" size=\"%zu\"/>\n",
5824 heap_size, heap_mprotect_size, heap_count);
7a9368a1
FW
5825 total_aspace += heap_size;
5826 total_aspace_mprotect += heap_mprotect_size;
987c0269
OB
5827 }
5828 else
5829 {
5830 fprintf (fp,
5831 "<aspace type=\"total\" size=\"%zu\"/>\n"
5832 "<aspace type=\"mprotect\" size=\"%zu\"/>\n",
5833 ar_ptr->system_mem, ar_ptr->system_mem);
5834 total_aspace += ar_ptr->system_mem;
5835 total_aspace_mprotect += ar_ptr->system_mem;
5836 }
bb066545 5837
987c0269 5838 fputs ("</heap>\n", fp);
bb066545
UD
5839 ar_ptr = ar_ptr->next;
5840 }
5841 while (ar_ptr != &main_arena);
5842
5843 fprintf (fp,
62a58816
SP
5844 "<total type=\"fast\" count=\"%zu\" size=\"%zu\"/>\n"
5845 "<total type=\"rest\" count=\"%zu\" size=\"%zu\"/>\n"
9fa76613 5846 "<total type=\"mmap\" count=\"%d\" size=\"%zu\"/>\n"
62a58816
SP
5847 "<system type=\"current\" size=\"%zu\"/>\n"
5848 "<system type=\"max\" size=\"%zu\"/>\n"
5849 "<aspace type=\"total\" size=\"%zu\"/>\n"
5850 "<aspace type=\"mprotect\" size=\"%zu\"/>\n"
5851 "</malloc>\n",
5852 total_nfastblocks, total_fastavail, total_nblocks, total_avail,
4d653a59 5853 mp_.n_mmaps, mp_.mmapped_mem,
62a58816
SP
5854 total_system, total_max_system,
5855 total_aspace, total_aspace_mprotect);
bb066545
UD
5856
5857 return 0;
5858}
c52ff39e 5859weak_alias (__malloc_info, malloc_info)
bb066545
UD
5860
5861
eba19d2b 5862strong_alias (__libc_calloc, __calloc) weak_alias (__libc_calloc, calloc)
eba19d2b
UD
5863strong_alias (__libc_free, __free) strong_alias (__libc_free, free)
5864strong_alias (__libc_malloc, __malloc) strong_alias (__libc_malloc, malloc)
5865strong_alias (__libc_memalign, __memalign)
5866weak_alias (__libc_memalign, memalign)
5867strong_alias (__libc_realloc, __realloc) strong_alias (__libc_realloc, realloc)
5868strong_alias (__libc_valloc, __valloc) weak_alias (__libc_valloc, valloc)
5869strong_alias (__libc_pvalloc, __pvalloc) weak_alias (__libc_pvalloc, pvalloc)
5870strong_alias (__libc_mallinfo, __mallinfo)
5871weak_alias (__libc_mallinfo, mallinfo)
e3960d1c
ML
5872strong_alias (__libc_mallinfo2, __mallinfo2)
5873weak_alias (__libc_mallinfo2, mallinfo2)
eba19d2b 5874strong_alias (__libc_mallopt, __mallopt) weak_alias (__libc_mallopt, mallopt)
7e3be507
UD
5875
5876weak_alias (__malloc_stats, malloc_stats)
5877weak_alias (__malloc_usable_size, malloc_usable_size)
5878weak_alias (__malloc_trim, malloc_trim)
7e3be507 5879
025b33ae
FW
5880#if SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_26)
5881compat_symbol (libc, __libc_free, cfree, GLIBC_2_0);
5882#endif
f65fd747 5883
fa8d436c 5884/* ------------------------------------------------------------
6c8dbf00 5885 History:
f65fd747 5886
6c8dbf00 5887 [see ftp://g.oswego.edu/pub/misc/malloc.c for the history of dlmalloc]
f65fd747 5888
6c8dbf00 5889 */
fa8d436c
UD
5890/*
5891 * Local variables:
5892 * c-basic-offset: 2
5893 * End:
5894 */