]> git.ipfire.org Git - thirdparty/glibc.git/blob - malloc/malloc.c
0315ac5d160c2c276d155d37dcd5a40ac8ae7129
[thirdparty/glibc.git] / malloc / malloc.c
1 /* Malloc implementation for multiple threads without lock contention.
2 Copyright (C) 1996-2023 Free Software Foundation, Inc.
3 Copyright The GNU Toolchain Authors.
4 This file is part of the GNU C Library.
5
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public License as
8 published by the Free Software Foundation; either version 2.1 of the
9 License, or (at your option) any later version.
10
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
15
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; see the file COPYING.LIB. If
18 not, see <https://www.gnu.org/licenses/>. */
19
20 /*
21 This is a version (aka ptmalloc2) of malloc/free/realloc written by
22 Doug Lea and adapted to multiple threads/arenas by Wolfram Gloger.
23
24 There have been substantial changes made after the integration into
25 glibc in all parts of the code. Do not look for much commonality
26 with the ptmalloc2 version.
27
28 * Version ptmalloc2-20011215
29 based on:
30 VERSION 2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee)
31
32 * Quickstart
33
34 In order to compile this implementation, a Makefile is provided with
35 the ptmalloc2 distribution, which has pre-defined targets for some
36 popular systems (e.g. "make posix" for Posix threads). All that is
37 typically required with regard to compiler flags is the selection of
38 the thread package via defining one out of USE_PTHREADS, USE_THR or
39 USE_SPROC. Check the thread-m.h file for what effects this has.
40 Many/most systems will additionally require USE_TSD_DATA_HACK to be
41 defined, so this is the default for "make posix".
42
43 * Why use this malloc?
44
45 This is not the fastest, most space-conserving, most portable, or
46 most tunable malloc ever written. However it is among the fastest
47 while also being among the most space-conserving, portable and tunable.
48 Consistent balance across these factors results in a good general-purpose
49 allocator for malloc-intensive programs.
50
51 The main properties of the algorithms are:
52 * For large (>= 512 bytes) requests, it is a pure best-fit allocator,
53 with ties normally decided via FIFO (i.e. least recently used).
54 * For small (<= 64 bytes by default) requests, it is a caching
55 allocator, that maintains pools of quickly recycled chunks.
56 * In between, and for combinations of large and small requests, it does
57 the best it can trying to meet both goals at once.
58 * For very large requests (>= 128KB by default), it relies on system
59 memory mapping facilities, if supported.
60
61 For a longer but slightly out of date high-level description, see
62 http://gee.cs.oswego.edu/dl/html/malloc.html
63
64 You may already by default be using a C library containing a malloc
65 that is based on some version of this malloc (for example in
66 linux). You might still want to use the one in this file in order to
67 customize settings or to avoid overheads associated with library
68 versions.
69
70 * Contents, described in more detail in "description of public routines" below.
71
72 Standard (ANSI/SVID/...) functions:
73 malloc(size_t n);
74 calloc(size_t n_elements, size_t element_size);
75 free(void* p);
76 realloc(void* p, size_t n);
77 memalign(size_t alignment, size_t n);
78 valloc(size_t n);
79 mallinfo()
80 mallopt(int parameter_number, int parameter_value)
81
82 Additional functions:
83 independent_calloc(size_t n_elements, size_t size, void* chunks[]);
84 independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);
85 pvalloc(size_t n);
86 malloc_trim(size_t pad);
87 malloc_usable_size(void* p);
88 malloc_stats();
89
90 * Vital statistics:
91
92 Supported pointer representation: 4 or 8 bytes
93 Supported size_t representation: 4 or 8 bytes
94 Note that size_t is allowed to be 4 bytes even if pointers are 8.
95 You can adjust this by defining INTERNAL_SIZE_T
96
97 Alignment: 2 * sizeof(size_t) (default)
98 (i.e., 8 byte alignment with 4byte size_t). This suffices for
99 nearly all current machines and C compilers. However, you can
100 define MALLOC_ALIGNMENT to be wider than this if necessary.
101
102 Minimum overhead per allocated chunk: 4 or 8 bytes
103 Each malloced chunk has a hidden word of overhead holding size
104 and status information.
105
106 Minimum allocated size: 4-byte ptrs: 16 bytes (including 4 overhead)
107 8-byte ptrs: 24/32 bytes (including, 4/8 overhead)
108
109 When a chunk is freed, 12 (for 4byte ptrs) or 20 (for 8 byte
110 ptrs but 4 byte size) or 24 (for 8/8) additional bytes are
111 needed; 4 (8) for a trailing size field and 8 (16) bytes for
112 free list pointers. Thus, the minimum allocatable size is
113 16/24/32 bytes.
114
115 Even a request for zero bytes (i.e., malloc(0)) returns a
116 pointer to something of the minimum allocatable size.
117
118 The maximum overhead wastage (i.e., number of extra bytes
119 allocated than were requested in malloc) is less than or equal
120 to the minimum size, except for requests >= mmap_threshold that
121 are serviced via mmap(), where the worst case wastage is 2 *
122 sizeof(size_t) bytes plus the remainder from a system page (the
123 minimal mmap unit); typically 4096 or 8192 bytes.
124
125 Maximum allocated size: 4-byte size_t: 2^32 minus about two pages
126 8-byte size_t: 2^64 minus about two pages
127
128 It is assumed that (possibly signed) size_t values suffice to
129 represent chunk sizes. `Possibly signed' is due to the fact
130 that `size_t' may be defined on a system as either a signed or
131 an unsigned type. The ISO C standard says that it must be
132 unsigned, but a few systems are known not to adhere to this.
133 Additionally, even when size_t is unsigned, sbrk (which is by
134 default used to obtain memory from system) accepts signed
135 arguments, and may not be able to handle size_t-wide arguments
136 with negative sign bit. Generally, values that would
137 appear as negative after accounting for overhead and alignment
138 are supported only via mmap(), which does not have this
139 limitation.
140
141 Requests for sizes outside the allowed range will perform an optional
142 failure action and then return null. (Requests may also
143 also fail because a system is out of memory.)
144
145 Thread-safety: thread-safe
146
147 Compliance: I believe it is compliant with the 1997 Single Unix Specification
148 Also SVID/XPG, ANSI C, and probably others as well.
149
150 * Synopsis of compile-time options:
151
152 People have reported using previous versions of this malloc on all
153 versions of Unix, sometimes by tweaking some of the defines
154 below. It has been tested most extensively on Solaris and Linux.
155 People also report using it in stand-alone embedded systems.
156
157 The implementation is in straight, hand-tuned ANSI C. It is not
158 at all modular. (Sorry!) It uses a lot of macros. To be at all
159 usable, this code should be compiled using an optimizing compiler
160 (for example gcc -O3) that can simplify expressions and control
161 paths. (FAQ: some macros import variables as arguments rather than
162 declare locals because people reported that some debuggers
163 otherwise get confused.)
164
165 OPTION DEFAULT VALUE
166
167 Compilation Environment options:
168
169 HAVE_MREMAP 0
170
171 Changing default word sizes:
172
173 INTERNAL_SIZE_T size_t
174
175 Configuration and functionality options:
176
177 USE_PUBLIC_MALLOC_WRAPPERS NOT defined
178 USE_MALLOC_LOCK NOT defined
179 MALLOC_DEBUG NOT defined
180 REALLOC_ZERO_BYTES_FREES 1
181 TRIM_FASTBINS 0
182
183 Options for customizing MORECORE:
184
185 MORECORE sbrk
186 MORECORE_FAILURE -1
187 MORECORE_CONTIGUOUS 1
188 MORECORE_CANNOT_TRIM NOT defined
189 MORECORE_CLEARS 1
190 MMAP_AS_MORECORE_SIZE (1024 * 1024)
191
192 Tuning options that are also dynamically changeable via mallopt:
193
194 DEFAULT_MXFAST 64 (for 32bit), 128 (for 64bit)
195 DEFAULT_TRIM_THRESHOLD 128 * 1024
196 DEFAULT_TOP_PAD 0
197 DEFAULT_MMAP_THRESHOLD 128 * 1024
198 DEFAULT_MMAP_MAX 65536
199
200 There are several other #defined constants and macros that you
201 probably don't want to touch unless you are extending or adapting malloc. */
202
203 /*
204 void* is the pointer type that malloc should say it returns
205 */
206
207 #ifndef void
208 #define void void
209 #endif /*void*/
210
211 #include <stddef.h> /* for size_t */
212 #include <stdlib.h> /* for getenv(), abort() */
213 #include <unistd.h> /* for __libc_enable_secure */
214
215 #include <atomic.h>
216 #include <_itoa.h>
217 #include <bits/wordsize.h>
218 #include <sys/sysinfo.h>
219
220 #include <ldsodefs.h>
221
222 #include <unistd.h>
223 #include <stdio.h> /* needed for malloc_stats */
224 #include <errno.h>
225 #include <assert.h>
226
227 #include <shlib-compat.h>
228
229 /* For uintptr_t. */
230 #include <stdint.h>
231
232 /* For va_arg, va_start, va_end. */
233 #include <stdarg.h>
234
235 /* For MIN, MAX, powerof2. */
236 #include <sys/param.h>
237
238 /* For ALIGN_UP et. al. */
239 #include <libc-pointer-arith.h>
240
241 /* For DIAG_PUSH/POP_NEEDS_COMMENT et al. */
242 #include <libc-diag.h>
243
244 /* For memory tagging. */
245 #include <libc-mtag.h>
246
247 #include <malloc/malloc-internal.h>
248
249 /* For SINGLE_THREAD_P. */
250 #include <sysdep-cancel.h>
251
252 #include <libc-internal.h>
253
254 /* For tcache double-free check. */
255 #include <random-bits.h>
256 #include <sys/random.h>
257 #include <not-cancel.h>
258
259 /*
260 Debugging:
261
262 Because freed chunks may be overwritten with bookkeeping fields, this
263 malloc will often die when freed memory is overwritten by user
264 programs. This can be very effective (albeit in an annoying way)
265 in helping track down dangling pointers.
266
267 If you compile with -DMALLOC_DEBUG, a number of assertion checks are
268 enabled that will catch more memory errors. You probably won't be
269 able to make much sense of the actual assertion errors, but they
270 should help you locate incorrectly overwritten memory. The checking
271 is fairly extensive, and will slow down execution
272 noticeably. Calling malloc_stats or mallinfo with MALLOC_DEBUG set
273 will attempt to check every non-mmapped allocated and free chunk in
274 the course of computing the summmaries. (By nature, mmapped regions
275 cannot be checked very much automatically.)
276
277 Setting MALLOC_DEBUG may also be helpful if you are trying to modify
278 this code. The assertions in the check routines spell out in more
279 detail the assumptions and invariants underlying the algorithms.
280
281 Setting MALLOC_DEBUG does NOT provide an automated mechanism for
282 checking that all accesses to malloced memory stay within their
283 bounds. However, there are several add-ons and adaptations of this
284 or other mallocs available that do this.
285 */
286
287 #ifndef MALLOC_DEBUG
288 #define MALLOC_DEBUG 0
289 #endif
290
291 #if USE_TCACHE
292 /* We want 64 entries. This is an arbitrary limit, which tunables can reduce. */
293 # define TCACHE_MAX_BINS 64
294 # define MAX_TCACHE_SIZE tidx2usize (TCACHE_MAX_BINS-1)
295
296 /* Only used to pre-fill the tunables. */
297 # define tidx2usize(idx) (((size_t) idx) * MALLOC_ALIGNMENT + MINSIZE - SIZE_SZ)
298
299 /* When "x" is from chunksize(). */
300 # define csize2tidx(x) (((x) - MINSIZE + MALLOC_ALIGNMENT - 1) / MALLOC_ALIGNMENT)
301 /* When "x" is a user-provided size. */
302 # define usize2tidx(x) csize2tidx (request2size (x))
303
304 /* With rounding and alignment, the bins are...
305 idx 0 bytes 0..24 (64-bit) or 0..12 (32-bit)
306 idx 1 bytes 25..40 or 13..20
307 idx 2 bytes 41..56 or 21..28
308 etc. */
309
310 /* This is another arbitrary limit, which tunables can change. Each
311 tcache bin will hold at most this number of chunks. */
312 # define TCACHE_FILL_COUNT 7
313
314 /* Maximum chunks in tcache bins for tunables. This value must fit the range
315 of tcache->counts[] entries, else they may overflow. */
316 # define MAX_TCACHE_COUNT UINT16_MAX
317 #endif
318
319 /* Safe-Linking:
320 Use randomness from ASLR (mmap_base) to protect single-linked lists
321 of Fast-Bins and TCache. That is, mask the "next" pointers of the
322 lists' chunks, and also perform allocation alignment checks on them.
323 This mechanism reduces the risk of pointer hijacking, as was done with
324 Safe-Unlinking in the double-linked lists of Small-Bins.
325 It assumes a minimum page size of 4096 bytes (12 bits). Systems with
326 larger pages provide less entropy, although the pointer mangling
327 still works. */
328 #define PROTECT_PTR(pos, ptr) \
329 ((__typeof (ptr)) ((((size_t) pos) >> 12) ^ ((size_t) ptr)))
330 #define REVEAL_PTR(ptr) PROTECT_PTR (&ptr, ptr)
331
332 /*
333 The REALLOC_ZERO_BYTES_FREES macro controls the behavior of realloc (p, 0)
334 when p is nonnull. If the macro is nonzero, the realloc call returns NULL;
335 otherwise, the call returns what malloc (0) would. In either case,
336 p is freed. Glibc uses a nonzero REALLOC_ZERO_BYTES_FREES, which
337 implements common historical practice.
338
339 ISO C17 says the realloc call has implementation-defined behavior,
340 and it might not even free p.
341 */
342
343 #ifndef REALLOC_ZERO_BYTES_FREES
344 #define REALLOC_ZERO_BYTES_FREES 1
345 #endif
346
347 /*
348 TRIM_FASTBINS controls whether free() of a very small chunk can
349 immediately lead to trimming. Setting to true (1) can reduce memory
350 footprint, but will almost always slow down programs that use a lot
351 of small chunks.
352
353 Define this only if you are willing to give up some speed to more
354 aggressively reduce system-level memory footprint when releasing
355 memory in programs that use many small chunks. You can get
356 essentially the same effect by setting MXFAST to 0, but this can
357 lead to even greater slowdowns in programs using many small chunks.
358 TRIM_FASTBINS is an in-between compile-time option, that disables
359 only those chunks bordering topmost memory from being placed in
360 fastbins.
361 */
362
363 #ifndef TRIM_FASTBINS
364 #define TRIM_FASTBINS 0
365 #endif
366
367 /* Definition for getting more memory from the OS. */
368 #include "morecore.c"
369
370 #define MORECORE (*__glibc_morecore)
371 #define MORECORE_FAILURE 0
372
373 /* Memory tagging. */
374
375 /* Some systems support the concept of tagging (sometimes known as
376 coloring) memory locations on a fine grained basis. Each memory
377 location is given a color (normally allocated randomly) and
378 pointers are also colored. When the pointer is dereferenced, the
379 pointer's color is checked against the memory's color and if they
380 differ the access is faulted (sometimes lazily).
381
382 We use this in glibc by maintaining a single color for the malloc
383 data structures that are interleaved with the user data and then
384 assigning separate colors for each block allocation handed out. In
385 this way simple buffer overruns will be rapidly detected. When
386 memory is freed, the memory is recolored back to the glibc default
387 so that simple use-after-free errors can also be detected.
388
389 If memory is reallocated the buffer is recolored even if the
390 address remains the same. This has a performance impact, but
391 guarantees that the old pointer cannot mistakenly be reused (code
392 that compares old against new will see a mismatch and will then
393 need to behave as though realloc moved the data to a new location).
394
395 Internal API for memory tagging support.
396
397 The aim is to keep the code for memory tagging support as close to
398 the normal APIs in glibc as possible, so that if tagging is not
399 enabled in the library, or is disabled at runtime then standard
400 operations can continue to be used. Support macros are used to do
401 this:
402
403 void *tag_new_zero_region (void *ptr, size_t size)
404
405 Allocates a new tag, colors the memory with that tag, zeros the
406 memory and returns a pointer that is correctly colored for that
407 location. The non-tagging version will simply call memset with 0.
408
409 void *tag_region (void *ptr, size_t size)
410
411 Color the region of memory pointed to by PTR and size SIZE with
412 the color of PTR. Returns the original pointer.
413
414 void *tag_new_usable (void *ptr)
415
416 Allocate a new random color and use it to color the user region of
417 a chunk; this may include data from the subsequent chunk's header
418 if tagging is sufficiently fine grained. Returns PTR suitably
419 recolored for accessing the memory there.
420
421 void *tag_at (void *ptr)
422
423 Read the current color of the memory at the address pointed to by
424 PTR (ignoring it's current color) and return PTR recolored to that
425 color. PTR must be valid address in all other respects. When
426 tagging is not enabled, it simply returns the original pointer.
427 */
428
429 #ifdef USE_MTAG
430 static bool mtag_enabled = false;
431 static int mtag_mmap_flags = 0;
432 #else
433 # define mtag_enabled false
434 # define mtag_mmap_flags 0
435 #endif
436
437 static __always_inline void *
438 tag_region (void *ptr, size_t size)
439 {
440 if (__glibc_unlikely (mtag_enabled))
441 return __libc_mtag_tag_region (ptr, size);
442 return ptr;
443 }
444
445 static __always_inline void *
446 tag_new_zero_region (void *ptr, size_t size)
447 {
448 if (__glibc_unlikely (mtag_enabled))
449 return __libc_mtag_tag_zero_region (__libc_mtag_new_tag (ptr), size);
450 return memset (ptr, 0, size);
451 }
452
453 /* Defined later. */
454 static void *
455 tag_new_usable (void *ptr);
456
457 static __always_inline void *
458 tag_at (void *ptr)
459 {
460 if (__glibc_unlikely (mtag_enabled))
461 return __libc_mtag_address_get_tag (ptr);
462 return ptr;
463 }
464
465 #include <string.h>
466
467 /*
468 MORECORE-related declarations. By default, rely on sbrk
469 */
470
471
472 /*
473 MORECORE is the name of the routine to call to obtain more memory
474 from the system. See below for general guidance on writing
475 alternative MORECORE functions, as well as a version for WIN32 and a
476 sample version for pre-OSX macos.
477 */
478
479 #ifndef MORECORE
480 #define MORECORE sbrk
481 #endif
482
483 /*
484 MORECORE_FAILURE is the value returned upon failure of MORECORE
485 as well as mmap. Since it cannot be an otherwise valid memory address,
486 and must reflect values of standard sys calls, you probably ought not
487 try to redefine it.
488 */
489
490 #ifndef MORECORE_FAILURE
491 #define MORECORE_FAILURE (-1)
492 #endif
493
494 /*
495 If MORECORE_CONTIGUOUS is true, take advantage of fact that
496 consecutive calls to MORECORE with positive arguments always return
497 contiguous increasing addresses. This is true of unix sbrk. Even
498 if not defined, when regions happen to be contiguous, malloc will
499 permit allocations spanning regions obtained from different
500 calls. But defining this when applicable enables some stronger
501 consistency checks and space efficiencies.
502 */
503
504 #ifndef MORECORE_CONTIGUOUS
505 #define MORECORE_CONTIGUOUS 1
506 #endif
507
508 /*
509 Define MORECORE_CANNOT_TRIM if your version of MORECORE
510 cannot release space back to the system when given negative
511 arguments. This is generally necessary only if you are using
512 a hand-crafted MORECORE function that cannot handle negative arguments.
513 */
514
515 /* #define MORECORE_CANNOT_TRIM */
516
517 /* MORECORE_CLEARS (default 1)
518 The degree to which the routine mapped to MORECORE zeroes out
519 memory: never (0), only for newly allocated space (1) or always
520 (2). The distinction between (1) and (2) is necessary because on
521 some systems, if the application first decrements and then
522 increments the break value, the contents of the reallocated space
523 are unspecified.
524 */
525
526 #ifndef MORECORE_CLEARS
527 # define MORECORE_CLEARS 1
528 #endif
529
530
531 /*
532 MMAP_AS_MORECORE_SIZE is the minimum mmap size argument to use if
533 sbrk fails, and mmap is used as a backup. The value must be a
534 multiple of page size. This backup strategy generally applies only
535 when systems have "holes" in address space, so sbrk cannot perform
536 contiguous expansion, but there is still space available on system.
537 On systems for which this is known to be useful (i.e. most linux
538 kernels), this occurs only when programs allocate huge amounts of
539 memory. Between this, and the fact that mmap regions tend to be
540 limited, the size should be large, to avoid too many mmap calls and
541 thus avoid running out of kernel resources. */
542
543 #ifndef MMAP_AS_MORECORE_SIZE
544 #define MMAP_AS_MORECORE_SIZE (1024 * 1024)
545 #endif
546
547 /*
548 Define HAVE_MREMAP to make realloc() use mremap() to re-allocate
549 large blocks.
550 */
551
552 #ifndef HAVE_MREMAP
553 #define HAVE_MREMAP 0
554 #endif
555
556 /*
557 This version of malloc supports the standard SVID/XPG mallinfo
558 routine that returns a struct containing usage properties and
559 statistics. It should work on any SVID/XPG compliant system that has
560 a /usr/include/malloc.h defining struct mallinfo. (If you'd like to
561 install such a thing yourself, cut out the preliminary declarations
562 as described above and below and save them in a malloc.h file. But
563 there's no compelling reason to bother to do this.)
564
565 The main declaration needed is the mallinfo struct that is returned
566 (by-copy) by mallinfo(). The SVID/XPG malloinfo struct contains a
567 bunch of fields that are not even meaningful in this version of
568 malloc. These fields are are instead filled by mallinfo() with
569 other numbers that might be of interest.
570 */
571
572
573 /* ---------- description of public routines ------------ */
574
575 #if IS_IN (libc)
576 /*
577 malloc(size_t n)
578 Returns a pointer to a newly allocated chunk of at least n bytes, or null
579 if no space is available. Additionally, on failure, errno is
580 set to ENOMEM on ANSI C systems.
581
582 If n is zero, malloc returns a minimum-sized chunk. (The minimum
583 size is 16 bytes on most 32bit systems, and 24 or 32 bytes on 64bit
584 systems.) On most systems, size_t is an unsigned type, so calls
585 with negative arguments are interpreted as requests for huge amounts
586 of space, which will often fail. The maximum supported value of n
587 differs across systems, but is in all cases less than the maximum
588 representable value of a size_t.
589 */
590 void* __libc_malloc(size_t);
591 libc_hidden_proto (__libc_malloc)
592
593 /*
594 free(void* p)
595 Releases the chunk of memory pointed to by p, that had been previously
596 allocated using malloc or a related routine such as realloc.
597 It has no effect if p is null. It can have arbitrary (i.e., bad!)
598 effects if p has already been freed.
599
600 Unless disabled (using mallopt), freeing very large spaces will
601 when possible, automatically trigger operations that give
602 back unused memory to the system, thus reducing program footprint.
603 */
604 void __libc_free(void*);
605 libc_hidden_proto (__libc_free)
606
607 /*
608 calloc(size_t n_elements, size_t element_size);
609 Returns a pointer to n_elements * element_size bytes, with all locations
610 set to zero.
611 */
612 void* __libc_calloc(size_t, size_t);
613
614 /*
615 realloc(void* p, size_t n)
616 Returns a pointer to a chunk of size n that contains the same data
617 as does chunk p up to the minimum of (n, p's size) bytes, or null
618 if no space is available.
619
620 The returned pointer may or may not be the same as p. The algorithm
621 prefers extending p when possible, otherwise it employs the
622 equivalent of a malloc-copy-free sequence.
623
624 If p is null, realloc is equivalent to malloc.
625
626 If space is not available, realloc returns null, errno is set (if on
627 ANSI) and p is NOT freed.
628
629 if n is for fewer bytes than already held by p, the newly unused
630 space is lopped off and freed if possible. Unless the #define
631 REALLOC_ZERO_BYTES_FREES is set, realloc with a size argument of
632 zero (re)allocates a minimum-sized chunk.
633
634 Large chunks that were internally obtained via mmap will always be
635 grown using malloc-copy-free sequences unless the system supports
636 MREMAP (currently only linux).
637
638 The old unix realloc convention of allowing the last-free'd chunk
639 to be used as an argument to realloc is not supported.
640 */
641 void* __libc_realloc(void*, size_t);
642 libc_hidden_proto (__libc_realloc)
643
644 /*
645 memalign(size_t alignment, size_t n);
646 Returns a pointer to a newly allocated chunk of n bytes, aligned
647 in accord with the alignment argument.
648
649 The alignment argument should be a power of two. If the argument is
650 not a power of two, the nearest greater power is used.
651 8-byte alignment is guaranteed by normal malloc calls, so don't
652 bother calling memalign with an argument of 8 or less.
653
654 Overreliance on memalign is a sure way to fragment space.
655 */
656 void* __libc_memalign(size_t, size_t);
657 libc_hidden_proto (__libc_memalign)
658
659 /*
660 valloc(size_t n);
661 Equivalent to memalign(pagesize, n), where pagesize is the page
662 size of the system. If the pagesize is unknown, 4096 is used.
663 */
664 void* __libc_valloc(size_t);
665
666
667
668 /*
669 mallinfo()
670 Returns (by copy) a struct containing various summary statistics:
671
672 arena: current total non-mmapped bytes allocated from system
673 ordblks: the number of free chunks
674 smblks: the number of fastbin blocks (i.e., small chunks that
675 have been freed but not use resused or consolidated)
676 hblks: current number of mmapped regions
677 hblkhd: total bytes held in mmapped regions
678 usmblks: always 0
679 fsmblks: total bytes held in fastbin blocks
680 uordblks: current total allocated space (normal or mmapped)
681 fordblks: total free space
682 keepcost: the maximum number of bytes that could ideally be released
683 back to system via malloc_trim. ("ideally" means that
684 it ignores page restrictions etc.)
685
686 Because these fields are ints, but internal bookkeeping may
687 be kept as longs, the reported values may wrap around zero and
688 thus be inaccurate.
689 */
690 struct mallinfo2 __libc_mallinfo2(void);
691 libc_hidden_proto (__libc_mallinfo2)
692
693 struct mallinfo __libc_mallinfo(void);
694
695
696 /*
697 pvalloc(size_t n);
698 Equivalent to valloc(minimum-page-that-holds(n)), that is,
699 round up n to nearest pagesize.
700 */
701 void* __libc_pvalloc(size_t);
702
703 /*
704 malloc_trim(size_t pad);
705
706 If possible, gives memory back to the system (via negative
707 arguments to sbrk) if there is unused memory at the `high' end of
708 the malloc pool. You can call this after freeing large blocks of
709 memory to potentially reduce the system-level memory requirements
710 of a program. However, it cannot guarantee to reduce memory. Under
711 some allocation patterns, some large free blocks of memory will be
712 locked between two used chunks, so they cannot be given back to
713 the system.
714
715 The `pad' argument to malloc_trim represents the amount of free
716 trailing space to leave untrimmed. If this argument is zero,
717 only the minimum amount of memory to maintain internal data
718 structures will be left (one page or less). Non-zero arguments
719 can be supplied to maintain enough trailing space to service
720 future expected allocations without having to re-obtain memory
721 from the system.
722
723 Malloc_trim returns 1 if it actually released any memory, else 0.
724 On systems that do not support "negative sbrks", it will always
725 return 0.
726 */
727 int __malloc_trim(size_t);
728
729 /*
730 malloc_usable_size(void* p);
731
732 Returns the number of bytes you can actually use in
733 an allocated chunk, which may be more than you requested (although
734 often not) due to alignment and minimum size constraints.
735 You can use this many bytes without worrying about
736 overwriting other allocated objects. This is not a particularly great
737 programming practice. malloc_usable_size can be more useful in
738 debugging and assertions, for example:
739
740 p = malloc(n);
741 assert(malloc_usable_size(p) >= 256);
742
743 */
744 size_t __malloc_usable_size(void*);
745
746 /*
747 malloc_stats();
748 Prints on stderr the amount of space obtained from the system (both
749 via sbrk and mmap), the maximum amount (which may be more than
750 current if malloc_trim and/or munmap got called), and the current
751 number of bytes allocated via malloc (or realloc, etc) but not yet
752 freed. Note that this is the number of bytes allocated, not the
753 number requested. It will be larger than the number requested
754 because of alignment and bookkeeping overhead. Because it includes
755 alignment wastage as being in use, this figure may be greater than
756 zero even when no user-level chunks are allocated.
757
758 The reported current and maximum system memory can be inaccurate if
759 a program makes other calls to system memory allocation functions
760 (normally sbrk) outside of malloc.
761
762 malloc_stats prints only the most commonly interesting statistics.
763 More information can be obtained by calling mallinfo.
764
765 */
766 void __malloc_stats(void);
767
768 /*
769 posix_memalign(void **memptr, size_t alignment, size_t size);
770
771 POSIX wrapper like memalign(), checking for validity of size.
772 */
773 int __posix_memalign(void **, size_t, size_t);
774 #endif /* IS_IN (libc) */
775
776 /*
777 mallopt(int parameter_number, int parameter_value)
778 Sets tunable parameters The format is to provide a
779 (parameter-number, parameter-value) pair. mallopt then sets the
780 corresponding parameter to the argument value if it can (i.e., so
781 long as the value is meaningful), and returns 1 if successful else
782 0. SVID/XPG/ANSI defines four standard param numbers for mallopt,
783 normally defined in malloc.h. Only one of these (M_MXFAST) is used
784 in this malloc. The others (M_NLBLKS, M_GRAIN, M_KEEP) don't apply,
785 so setting them has no effect. But this malloc also supports four
786 other options in mallopt. See below for details. Briefly, supported
787 parameters are as follows (listed defaults are for "typical"
788 configurations).
789
790 Symbol param # default allowed param values
791 M_MXFAST 1 64 0-80 (0 disables fastbins)
792 M_TRIM_THRESHOLD -1 128*1024 any (-1U disables trimming)
793 M_TOP_PAD -2 0 any
794 M_MMAP_THRESHOLD -3 128*1024 any (or 0 if no MMAP support)
795 M_MMAP_MAX -4 65536 any (0 disables use of mmap)
796 */
797 int __libc_mallopt(int, int);
798 #if IS_IN (libc)
799 libc_hidden_proto (__libc_mallopt)
800 #endif
801
802 /* mallopt tuning options */
803
804 /*
805 M_MXFAST is the maximum request size used for "fastbins", special bins
806 that hold returned chunks without consolidating their spaces. This
807 enables future requests for chunks of the same size to be handled
808 very quickly, but can increase fragmentation, and thus increase the
809 overall memory footprint of a program.
810
811 This malloc manages fastbins very conservatively yet still
812 efficiently, so fragmentation is rarely a problem for values less
813 than or equal to the default. The maximum supported value of MXFAST
814 is 80. You wouldn't want it any higher than this anyway. Fastbins
815 are designed especially for use with many small structs, objects or
816 strings -- the default handles structs/objects/arrays with sizes up
817 to 8 4byte fields, or small strings representing words, tokens,
818 etc. Using fastbins for larger objects normally worsens
819 fragmentation without improving speed.
820
821 M_MXFAST is set in REQUEST size units. It is internally used in
822 chunksize units, which adds padding and alignment. You can reduce
823 M_MXFAST to 0 to disable all use of fastbins. This causes the malloc
824 algorithm to be a closer approximation of fifo-best-fit in all cases,
825 not just for larger requests, but will generally cause it to be
826 slower.
827 */
828
829
830 /* M_MXFAST is a standard SVID/XPG tuning option, usually listed in malloc.h */
831 #ifndef M_MXFAST
832 #define M_MXFAST 1
833 #endif
834
835 #ifndef DEFAULT_MXFAST
836 #define DEFAULT_MXFAST (64 * SIZE_SZ / 4)
837 #endif
838
839
840 /*
841 M_TRIM_THRESHOLD is the maximum amount of unused top-most memory
842 to keep before releasing via malloc_trim in free().
843
844 Automatic trimming is mainly useful in long-lived programs.
845 Because trimming via sbrk can be slow on some systems, and can
846 sometimes be wasteful (in cases where programs immediately
847 afterward allocate more large chunks) the value should be high
848 enough so that your overall system performance would improve by
849 releasing this much memory.
850
851 The trim threshold and the mmap control parameters (see below)
852 can be traded off with one another. Trimming and mmapping are
853 two different ways of releasing unused memory back to the
854 system. Between these two, it is often possible to keep
855 system-level demands of a long-lived program down to a bare
856 minimum. For example, in one test suite of sessions measuring
857 the XF86 X server on Linux, using a trim threshold of 128K and a
858 mmap threshold of 192K led to near-minimal long term resource
859 consumption.
860
861 If you are using this malloc in a long-lived program, it should
862 pay to experiment with these values. As a rough guide, you
863 might set to a value close to the average size of a process
864 (program) running on your system. Releasing this much memory
865 would allow such a process to run in memory. Generally, it's
866 worth it to tune for trimming rather tham memory mapping when a
867 program undergoes phases where several large chunks are
868 allocated and released in ways that can reuse each other's
869 storage, perhaps mixed with phases where there are no such
870 chunks at all. And in well-behaved long-lived programs,
871 controlling release of large blocks via trimming versus mapping
872 is usually faster.
873
874 However, in most programs, these parameters serve mainly as
875 protection against the system-level effects of carrying around
876 massive amounts of unneeded memory. Since frequent calls to
877 sbrk, mmap, and munmap otherwise degrade performance, the default
878 parameters are set to relatively high values that serve only as
879 safeguards.
880
881 The trim value It must be greater than page size to have any useful
882 effect. To disable trimming completely, you can set to
883 (unsigned long)(-1)
884
885 Trim settings interact with fastbin (MXFAST) settings: Unless
886 TRIM_FASTBINS is defined, automatic trimming never takes place upon
887 freeing a chunk with size less than or equal to MXFAST. Trimming is
888 instead delayed until subsequent freeing of larger chunks. However,
889 you can still force an attempted trim by calling malloc_trim.
890
891 Also, trimming is not generally possible in cases where
892 the main arena is obtained via mmap.
893
894 Note that the trick some people use of mallocing a huge space and
895 then freeing it at program startup, in an attempt to reserve system
896 memory, doesn't have the intended effect under automatic trimming,
897 since that memory will immediately be returned to the system.
898 */
899
900 #define M_TRIM_THRESHOLD -1
901
902 #ifndef DEFAULT_TRIM_THRESHOLD
903 #define DEFAULT_TRIM_THRESHOLD (128 * 1024)
904 #endif
905
906 /*
907 M_TOP_PAD is the amount of extra `padding' space to allocate or
908 retain whenever sbrk is called. It is used in two ways internally:
909
910 * When sbrk is called to extend the top of the arena to satisfy
911 a new malloc request, this much padding is added to the sbrk
912 request.
913
914 * When malloc_trim is called automatically from free(),
915 it is used as the `pad' argument.
916
917 In both cases, the actual amount of padding is rounded
918 so that the end of the arena is always a system page boundary.
919
920 The main reason for using padding is to avoid calling sbrk so
921 often. Having even a small pad greatly reduces the likelihood
922 that nearly every malloc request during program start-up (or
923 after trimming) will invoke sbrk, which needlessly wastes
924 time.
925
926 Automatic rounding-up to page-size units is normally sufficient
927 to avoid measurable overhead, so the default is 0. However, in
928 systems where sbrk is relatively slow, it can pay to increase
929 this value, at the expense of carrying around more memory than
930 the program needs.
931 */
932
933 #define M_TOP_PAD -2
934
935 #ifndef DEFAULT_TOP_PAD
936 #define DEFAULT_TOP_PAD (0)
937 #endif
938
939 /*
940 MMAP_THRESHOLD_MAX and _MIN are the bounds on the dynamically
941 adjusted MMAP_THRESHOLD.
942 */
943
944 #ifndef DEFAULT_MMAP_THRESHOLD_MIN
945 #define DEFAULT_MMAP_THRESHOLD_MIN (128 * 1024)
946 #endif
947
948 #ifndef DEFAULT_MMAP_THRESHOLD_MAX
949 /* For 32-bit platforms we cannot increase the maximum mmap
950 threshold much because it is also the minimum value for the
951 maximum heap size and its alignment. Going above 512k (i.e., 1M
952 for new heaps) wastes too much address space. */
953 # if __WORDSIZE == 32
954 # define DEFAULT_MMAP_THRESHOLD_MAX (512 * 1024)
955 # else
956 # define DEFAULT_MMAP_THRESHOLD_MAX (4 * 1024 * 1024 * sizeof(long))
957 # endif
958 #endif
959
960 /*
961 M_MMAP_THRESHOLD is the request size threshold for using mmap()
962 to service a request. Requests of at least this size that cannot
963 be allocated using already-existing space will be serviced via mmap.
964 (If enough normal freed space already exists it is used instead.)
965
966 Using mmap segregates relatively large chunks of memory so that
967 they can be individually obtained and released from the host
968 system. A request serviced through mmap is never reused by any
969 other request (at least not directly; the system may just so
970 happen to remap successive requests to the same locations).
971
972 Segregating space in this way has the benefits that:
973
974 1. Mmapped space can ALWAYS be individually released back
975 to the system, which helps keep the system level memory
976 demands of a long-lived program low.
977 2. Mapped memory can never become `locked' between
978 other chunks, as can happen with normally allocated chunks, which
979 means that even trimming via malloc_trim would not release them.
980 3. On some systems with "holes" in address spaces, mmap can obtain
981 memory that sbrk cannot.
982
983 However, it has the disadvantages that:
984
985 1. The space cannot be reclaimed, consolidated, and then
986 used to service later requests, as happens with normal chunks.
987 2. It can lead to more wastage because of mmap page alignment
988 requirements
989 3. It causes malloc performance to be more dependent on host
990 system memory management support routines which may vary in
991 implementation quality and may impose arbitrary
992 limitations. Generally, servicing a request via normal
993 malloc steps is faster than going through a system's mmap.
994
995 The advantages of mmap nearly always outweigh disadvantages for
996 "large" chunks, but the value of "large" varies across systems. The
997 default is an empirically derived value that works well in most
998 systems.
999
1000
1001 Update in 2006:
1002 The above was written in 2001. Since then the world has changed a lot.
1003 Memory got bigger. Applications got bigger. The virtual address space
1004 layout in 32 bit linux changed.
1005
1006 In the new situation, brk() and mmap space is shared and there are no
1007 artificial limits on brk size imposed by the kernel. What is more,
1008 applications have started using transient allocations larger than the
1009 128Kb as was imagined in 2001.
1010
1011 The price for mmap is also high now; each time glibc mmaps from the
1012 kernel, the kernel is forced to zero out the memory it gives to the
1013 application. Zeroing memory is expensive and eats a lot of cache and
1014 memory bandwidth. This has nothing to do with the efficiency of the
1015 virtual memory system, by doing mmap the kernel just has no choice but
1016 to zero.
1017
1018 In 2001, the kernel had a maximum size for brk() which was about 800
1019 megabytes on 32 bit x86, at that point brk() would hit the first
1020 mmaped shared libaries and couldn't expand anymore. With current 2.6
1021 kernels, the VA space layout is different and brk() and mmap
1022 both can span the entire heap at will.
1023
1024 Rather than using a static threshold for the brk/mmap tradeoff,
1025 we are now using a simple dynamic one. The goal is still to avoid
1026 fragmentation. The old goals we kept are
1027 1) try to get the long lived large allocations to use mmap()
1028 2) really large allocations should always use mmap()
1029 and we're adding now:
1030 3) transient allocations should use brk() to avoid forcing the kernel
1031 having to zero memory over and over again
1032
1033 The implementation works with a sliding threshold, which is by default
1034 limited to go between 128Kb and 32Mb (64Mb for 64 bitmachines) and starts
1035 out at 128Kb as per the 2001 default.
1036
1037 This allows us to satisfy requirement 1) under the assumption that long
1038 lived allocations are made early in the process' lifespan, before it has
1039 started doing dynamic allocations of the same size (which will
1040 increase the threshold).
1041
1042 The upperbound on the threshold satisfies requirement 2)
1043
1044 The threshold goes up in value when the application frees memory that was
1045 allocated with the mmap allocator. The idea is that once the application
1046 starts freeing memory of a certain size, it's highly probable that this is
1047 a size the application uses for transient allocations. This estimator
1048 is there to satisfy the new third requirement.
1049
1050 */
1051
1052 #define M_MMAP_THRESHOLD -3
1053
1054 #ifndef DEFAULT_MMAP_THRESHOLD
1055 #define DEFAULT_MMAP_THRESHOLD DEFAULT_MMAP_THRESHOLD_MIN
1056 #endif
1057
1058 /*
1059 M_MMAP_MAX is the maximum number of requests to simultaneously
1060 service using mmap. This parameter exists because
1061 some systems have a limited number of internal tables for
1062 use by mmap, and using more than a few of them may degrade
1063 performance.
1064
1065 The default is set to a value that serves only as a safeguard.
1066 Setting to 0 disables use of mmap for servicing large requests.
1067 */
1068
1069 #define M_MMAP_MAX -4
1070
1071 #ifndef DEFAULT_MMAP_MAX
1072 #define DEFAULT_MMAP_MAX (65536)
1073 #endif
1074
1075 #include <malloc.h>
1076
1077 #ifndef RETURN_ADDRESS
1078 #define RETURN_ADDRESS(X_) (NULL)
1079 #endif
1080
1081 /* Forward declarations. */
1082 struct malloc_chunk;
1083 typedef struct malloc_chunk* mchunkptr;
1084
1085 /* Internal routines. */
1086
1087 static void* _int_malloc(mstate, size_t);
1088 static void _int_free(mstate, mchunkptr, int);
1089 static void* _int_realloc(mstate, mchunkptr, INTERNAL_SIZE_T,
1090 INTERNAL_SIZE_T);
1091 static void* _int_memalign(mstate, size_t, size_t);
1092 #if IS_IN (libc)
1093 static void* _mid_memalign(size_t, size_t, void *);
1094 #endif
1095
1096 static void malloc_printerr(const char *str) __attribute__ ((noreturn));
1097
1098 static void munmap_chunk(mchunkptr p);
1099 #if HAVE_MREMAP
1100 static mchunkptr mremap_chunk(mchunkptr p, size_t new_size);
1101 #endif
1102
1103 static size_t musable (void *mem);
1104
1105 /* ------------------ MMAP support ------------------ */
1106
1107
1108 #include <fcntl.h>
1109 #include <sys/mman.h>
1110
1111 #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
1112 # define MAP_ANONYMOUS MAP_ANON
1113 #endif
1114
1115 #define MMAP(addr, size, prot, flags) \
1116 __mmap((addr), (size), (prot), (flags)|MAP_ANONYMOUS|MAP_PRIVATE, -1, 0)
1117
1118
1119 /*
1120 ----------------------- Chunk representations -----------------------
1121 */
1122
1123
1124 /*
1125 This struct declaration is misleading (but accurate and necessary).
1126 It declares a "view" into memory allowing access to necessary
1127 fields at known offsets from a given base. See explanation below.
1128 */
1129
1130 struct malloc_chunk {
1131
1132 INTERNAL_SIZE_T mchunk_prev_size; /* Size of previous chunk (if free). */
1133 INTERNAL_SIZE_T mchunk_size; /* Size in bytes, including overhead. */
1134
1135 struct malloc_chunk* fd; /* double links -- used only if free. */
1136 struct malloc_chunk* bk;
1137
1138 /* Only used for large blocks: pointer to next larger size. */
1139 struct malloc_chunk* fd_nextsize; /* double links -- used only if free. */
1140 struct malloc_chunk* bk_nextsize;
1141 };
1142
1143
1144 /*
1145 malloc_chunk details:
1146
1147 (The following includes lightly edited explanations by Colin Plumb.)
1148
1149 Chunks of memory are maintained using a `boundary tag' method as
1150 described in e.g., Knuth or Standish. (See the paper by Paul
1151 Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a
1152 survey of such techniques.) Sizes of free chunks are stored both
1153 in the front of each chunk and at the end. This makes
1154 consolidating fragmented chunks into bigger chunks very fast. The
1155 size fields also hold bits representing whether chunks are free or
1156 in use.
1157
1158 An allocated chunk looks like this:
1159
1160
1161 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1162 | Size of previous chunk, if unallocated (P clear) |
1163 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1164 | Size of chunk, in bytes |A|M|P|
1165 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1166 | User data starts here... .
1167 . .
1168 . (malloc_usable_size() bytes) .
1169 . |
1170 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1171 | (size of chunk, but used for application data) |
1172 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1173 | Size of next chunk, in bytes |A|0|1|
1174 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1175
1176 Where "chunk" is the front of the chunk for the purpose of most of
1177 the malloc code, but "mem" is the pointer that is returned to the
1178 user. "Nextchunk" is the beginning of the next contiguous chunk.
1179
1180 Chunks always begin on even word boundaries, so the mem portion
1181 (which is returned to the user) is also on an even word boundary, and
1182 thus at least double-word aligned.
1183
1184 Free chunks are stored in circular doubly-linked lists, and look like this:
1185
1186 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1187 | Size of previous chunk, if unallocated (P clear) |
1188 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1189 `head:' | Size of chunk, in bytes |A|0|P|
1190 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1191 | Forward pointer to next chunk in list |
1192 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1193 | Back pointer to previous chunk in list |
1194 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1195 | Unused space (may be 0 bytes long) .
1196 . .
1197 . |
1198 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1199 `foot:' | Size of chunk, in bytes |
1200 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1201 | Size of next chunk, in bytes |A|0|0|
1202 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1203
1204 The P (PREV_INUSE) bit, stored in the unused low-order bit of the
1205 chunk size (which is always a multiple of two words), is an in-use
1206 bit for the *previous* chunk. If that bit is *clear*, then the
1207 word before the current chunk size contains the previous chunk
1208 size, and can be used to find the front of the previous chunk.
1209 The very first chunk allocated always has this bit set,
1210 preventing access to non-existent (or non-owned) memory. If
1211 prev_inuse is set for any given chunk, then you CANNOT determine
1212 the size of the previous chunk, and might even get a memory
1213 addressing fault when trying to do so.
1214
1215 The A (NON_MAIN_ARENA) bit is cleared for chunks on the initial,
1216 main arena, described by the main_arena variable. When additional
1217 threads are spawned, each thread receives its own arena (up to a
1218 configurable limit, after which arenas are reused for multiple
1219 threads), and the chunks in these arenas have the A bit set. To
1220 find the arena for a chunk on such a non-main arena, heap_for_ptr
1221 performs a bit mask operation and indirection through the ar_ptr
1222 member of the per-heap header heap_info (see arena.c).
1223
1224 Note that the `foot' of the current chunk is actually represented
1225 as the prev_size of the NEXT chunk. This makes it easier to
1226 deal with alignments etc but can be very confusing when trying
1227 to extend or adapt this code.
1228
1229 The three exceptions to all this are:
1230
1231 1. The special chunk `top' doesn't bother using the
1232 trailing size field since there is no next contiguous chunk
1233 that would have to index off it. After initialization, `top'
1234 is forced to always exist. If it would become less than
1235 MINSIZE bytes long, it is replenished.
1236
1237 2. Chunks allocated via mmap, which have the second-lowest-order
1238 bit M (IS_MMAPPED) set in their size fields. Because they are
1239 allocated one-by-one, each must contain its own trailing size
1240 field. If the M bit is set, the other bits are ignored
1241 (because mmapped chunks are neither in an arena, nor adjacent
1242 to a freed chunk). The M bit is also used for chunks which
1243 originally came from a dumped heap via malloc_set_state in
1244 hooks.c.
1245
1246 3. Chunks in fastbins are treated as allocated chunks from the
1247 point of view of the chunk allocator. They are consolidated
1248 with their neighbors only in bulk, in malloc_consolidate.
1249 */
1250
1251 /*
1252 ---------- Size and alignment checks and conversions ----------
1253 */
1254
1255 /* Conversion from malloc headers to user pointers, and back. When
1256 using memory tagging the user data and the malloc data structure
1257 headers have distinct tags. Converting fully from one to the other
1258 involves extracting the tag at the other address and creating a
1259 suitable pointer using it. That can be quite expensive. There are
1260 cases when the pointers are not dereferenced (for example only used
1261 for alignment check) so the tags are not relevant, and there are
1262 cases when user data is not tagged distinctly from malloc headers
1263 (user data is untagged because tagging is done late in malloc and
1264 early in free). User memory tagging across internal interfaces:
1265
1266 sysmalloc: Returns untagged memory.
1267 _int_malloc: Returns untagged memory.
1268 _int_free: Takes untagged memory.
1269 _int_memalign: Returns untagged memory.
1270 _int_memalign: Returns untagged memory.
1271 _mid_memalign: Returns tagged memory.
1272 _int_realloc: Takes and returns tagged memory.
1273 */
1274
1275 /* The chunk header is two SIZE_SZ elements, but this is used widely, so
1276 we define it here for clarity later. */
1277 #define CHUNK_HDR_SZ (2 * SIZE_SZ)
1278
1279 /* Convert a chunk address to a user mem pointer without correcting
1280 the tag. */
1281 #define chunk2mem(p) ((void*)((char*)(p) + CHUNK_HDR_SZ))
1282
1283 /* Convert a chunk address to a user mem pointer and extract the right tag. */
1284 #define chunk2mem_tag(p) ((void*)tag_at ((char*)(p) + CHUNK_HDR_SZ))
1285
1286 /* Convert a user mem pointer to a chunk address and extract the right tag. */
1287 #define mem2chunk(mem) ((mchunkptr)tag_at (((char*)(mem) - CHUNK_HDR_SZ)))
1288
1289 /* The smallest possible chunk */
1290 #define MIN_CHUNK_SIZE (offsetof(struct malloc_chunk, fd_nextsize))
1291
1292 /* The smallest size we can malloc is an aligned minimal chunk */
1293
1294 #define MINSIZE \
1295 (unsigned long)(((MIN_CHUNK_SIZE+MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK))
1296
1297 /* Check if m has acceptable alignment */
1298
1299 #define aligned_OK(m) (((unsigned long)(m) & MALLOC_ALIGN_MASK) == 0)
1300
1301 #define misaligned_chunk(p) \
1302 ((uintptr_t)(MALLOC_ALIGNMENT == CHUNK_HDR_SZ ? (p) : chunk2mem (p)) \
1303 & MALLOC_ALIGN_MASK)
1304
1305 /* pad request bytes into a usable size -- internal version */
1306 /* Note: This must be a macro that evaluates to a compile time constant
1307 if passed a literal constant. */
1308 #define request2size(req) \
1309 (((req) + SIZE_SZ + MALLOC_ALIGN_MASK < MINSIZE) ? \
1310 MINSIZE : \
1311 ((req) + SIZE_SZ + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)
1312
1313 /* Check if REQ overflows when padded and aligned and if the resulting
1314 value is less than PTRDIFF_T. Returns the requested size or
1315 MINSIZE in case the value is less than MINSIZE, or 0 if any of the
1316 previous checks fail. */
1317 static inline size_t
1318 checked_request2size (size_t req) __nonnull (1)
1319 {
1320 if (__glibc_unlikely (req > PTRDIFF_MAX))
1321 return 0;
1322
1323 /* When using tagged memory, we cannot share the end of the user
1324 block with the header for the next chunk, so ensure that we
1325 allocate blocks that are rounded up to the granule size. Take
1326 care not to overflow from close to MAX_SIZE_T to a small
1327 number. Ideally, this would be part of request2size(), but that
1328 must be a macro that produces a compile time constant if passed
1329 a constant literal. */
1330 if (__glibc_unlikely (mtag_enabled))
1331 {
1332 /* Ensure this is not evaluated if !mtag_enabled, see gcc PR 99551. */
1333 asm ("");
1334
1335 req = (req + (__MTAG_GRANULE_SIZE - 1)) &
1336 ~(size_t)(__MTAG_GRANULE_SIZE - 1);
1337 }
1338
1339 return request2size (req);
1340 }
1341
1342 /*
1343 --------------- Physical chunk operations ---------------
1344 */
1345
1346
1347 /* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
1348 #define PREV_INUSE 0x1
1349
1350 /* extract inuse bit of previous chunk */
1351 #define prev_inuse(p) ((p)->mchunk_size & PREV_INUSE)
1352
1353
1354 /* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
1355 #define IS_MMAPPED 0x2
1356
1357 /* check for mmap()'ed chunk */
1358 #define chunk_is_mmapped(p) ((p)->mchunk_size & IS_MMAPPED)
1359
1360
1361 /* size field is or'ed with NON_MAIN_ARENA if the chunk was obtained
1362 from a non-main arena. This is only set immediately before handing
1363 the chunk to the user, if necessary. */
1364 #define NON_MAIN_ARENA 0x4
1365
1366 /* Check for chunk from main arena. */
1367 #define chunk_main_arena(p) (((p)->mchunk_size & NON_MAIN_ARENA) == 0)
1368
1369 /* Mark a chunk as not being on the main arena. */
1370 #define set_non_main_arena(p) ((p)->mchunk_size |= NON_MAIN_ARENA)
1371
1372
1373 /*
1374 Bits to mask off when extracting size
1375
1376 Note: IS_MMAPPED is intentionally not masked off from size field in
1377 macros for which mmapped chunks should never be seen. This should
1378 cause helpful core dumps to occur if it is tried by accident by
1379 people extending or adapting this malloc.
1380 */
1381 #define SIZE_BITS (PREV_INUSE | IS_MMAPPED | NON_MAIN_ARENA)
1382
1383 /* Get size, ignoring use bits */
1384 #define chunksize(p) (chunksize_nomask (p) & ~(SIZE_BITS))
1385
1386 /* Like chunksize, but do not mask SIZE_BITS. */
1387 #define chunksize_nomask(p) ((p)->mchunk_size)
1388
1389 /* Ptr to next physical malloc_chunk. */
1390 #define next_chunk(p) ((mchunkptr) (((char *) (p)) + chunksize (p)))
1391
1392 /* Size of the chunk below P. Only valid if !prev_inuse (P). */
1393 #define prev_size(p) ((p)->mchunk_prev_size)
1394
1395 /* Set the size of the chunk below P. Only valid if !prev_inuse (P). */
1396 #define set_prev_size(p, sz) ((p)->mchunk_prev_size = (sz))
1397
1398 /* Ptr to previous physical malloc_chunk. Only valid if !prev_inuse (P). */
1399 #define prev_chunk(p) ((mchunkptr) (((char *) (p)) - prev_size (p)))
1400
1401 /* Treat space at ptr + offset as a chunk */
1402 #define chunk_at_offset(p, s) ((mchunkptr) (((char *) (p)) + (s)))
1403
1404 /* extract p's inuse bit */
1405 #define inuse(p) \
1406 ((((mchunkptr) (((char *) (p)) + chunksize (p)))->mchunk_size) & PREV_INUSE)
1407
1408 /* set/clear chunk as being inuse without otherwise disturbing */
1409 #define set_inuse(p) \
1410 ((mchunkptr) (((char *) (p)) + chunksize (p)))->mchunk_size |= PREV_INUSE
1411
1412 #define clear_inuse(p) \
1413 ((mchunkptr) (((char *) (p)) + chunksize (p)))->mchunk_size &= ~(PREV_INUSE)
1414
1415
1416 /* check/set/clear inuse bits in known places */
1417 #define inuse_bit_at_offset(p, s) \
1418 (((mchunkptr) (((char *) (p)) + (s)))->mchunk_size & PREV_INUSE)
1419
1420 #define set_inuse_bit_at_offset(p, s) \
1421 (((mchunkptr) (((char *) (p)) + (s)))->mchunk_size |= PREV_INUSE)
1422
1423 #define clear_inuse_bit_at_offset(p, s) \
1424 (((mchunkptr) (((char *) (p)) + (s)))->mchunk_size &= ~(PREV_INUSE))
1425
1426
1427 /* Set size at head, without disturbing its use bit */
1428 #define set_head_size(p, s) ((p)->mchunk_size = (((p)->mchunk_size & SIZE_BITS) | (s)))
1429
1430 /* Set size/use field */
1431 #define set_head(p, s) ((p)->mchunk_size = (s))
1432
1433 /* Set size at footer (only when chunk is not in use) */
1434 #define set_foot(p, s) (((mchunkptr) ((char *) (p) + (s)))->mchunk_prev_size = (s))
1435
1436 #pragma GCC poison mchunk_size
1437 #pragma GCC poison mchunk_prev_size
1438
1439 /* This is the size of the real usable data in the chunk. Not valid for
1440 dumped heap chunks. */
1441 #define memsize(p) \
1442 (__MTAG_GRANULE_SIZE > SIZE_SZ && __glibc_unlikely (mtag_enabled) ? \
1443 chunksize (p) - CHUNK_HDR_SZ : \
1444 chunksize (p) - CHUNK_HDR_SZ + (chunk_is_mmapped (p) ? 0 : SIZE_SZ))
1445
1446 /* If memory tagging is enabled the layout changes to accommodate the granule
1447 size, this is wasteful for small allocations so not done by default.
1448 Both the chunk header and user data has to be granule aligned. */
1449 _Static_assert (__MTAG_GRANULE_SIZE <= CHUNK_HDR_SZ,
1450 "memory tagging is not supported with large granule.");
1451
1452 static __always_inline void *
1453 tag_new_usable (void *ptr)
1454 {
1455 if (__glibc_unlikely (mtag_enabled) && ptr)
1456 {
1457 mchunkptr cp = mem2chunk(ptr);
1458 ptr = __libc_mtag_tag_region (__libc_mtag_new_tag (ptr), memsize (cp));
1459 }
1460 return ptr;
1461 }
1462
1463 /*
1464 -------------------- Internal data structures --------------------
1465
1466 All internal state is held in an instance of malloc_state defined
1467 below. There are no other static variables, except in two optional
1468 cases:
1469 * If USE_MALLOC_LOCK is defined, the mALLOC_MUTEx declared above.
1470 * If mmap doesn't support MAP_ANONYMOUS, a dummy file descriptor
1471 for mmap.
1472
1473 Beware of lots of tricks that minimize the total bookkeeping space
1474 requirements. The result is a little over 1K bytes (for 4byte
1475 pointers and size_t.)
1476 */
1477
1478 /*
1479 Bins
1480
1481 An array of bin headers for free chunks. Each bin is doubly
1482 linked. The bins are approximately proportionally (log) spaced.
1483 There are a lot of these bins (128). This may look excessive, but
1484 works very well in practice. Most bins hold sizes that are
1485 unusual as malloc request sizes, but are more usual for fragments
1486 and consolidated sets of chunks, which is what these bins hold, so
1487 they can be found quickly. All procedures maintain the invariant
1488 that no consolidated chunk physically borders another one, so each
1489 chunk in a list is known to be preceeded and followed by either
1490 inuse chunks or the ends of memory.
1491
1492 Chunks in bins are kept in size order, with ties going to the
1493 approximately least recently used chunk. Ordering isn't needed
1494 for the small bins, which all contain the same-sized chunks, but
1495 facilitates best-fit allocation for larger chunks. These lists
1496 are just sequential. Keeping them in order almost never requires
1497 enough traversal to warrant using fancier ordered data
1498 structures.
1499
1500 Chunks of the same size are linked with the most
1501 recently freed at the front, and allocations are taken from the
1502 back. This results in LRU (FIFO) allocation order, which tends
1503 to give each chunk an equal opportunity to be consolidated with
1504 adjacent freed chunks, resulting in larger free chunks and less
1505 fragmentation.
1506
1507 To simplify use in double-linked lists, each bin header acts
1508 as a malloc_chunk. This avoids special-casing for headers.
1509 But to conserve space and improve locality, we allocate
1510 only the fd/bk pointers of bins, and then use repositioning tricks
1511 to treat these as the fields of a malloc_chunk*.
1512 */
1513
1514 typedef struct malloc_chunk *mbinptr;
1515
1516 /* addressing -- note that bin_at(0) does not exist */
1517 #define bin_at(m, i) \
1518 (mbinptr) (((char *) &((m)->bins[((i) - 1) * 2])) \
1519 - offsetof (struct malloc_chunk, fd))
1520
1521 /* analog of ++bin */
1522 #define next_bin(b) ((mbinptr) ((char *) (b) + (sizeof (mchunkptr) << 1)))
1523
1524 /* Reminders about list directionality within bins */
1525 #define first(b) ((b)->fd)
1526 #define last(b) ((b)->bk)
1527
1528 /*
1529 Indexing
1530
1531 Bins for sizes < 512 bytes contain chunks of all the same size, spaced
1532 8 bytes apart. Larger bins are approximately logarithmically spaced:
1533
1534 64 bins of size 8
1535 32 bins of size 64
1536 16 bins of size 512
1537 8 bins of size 4096
1538 4 bins of size 32768
1539 2 bins of size 262144
1540 1 bin of size what's left
1541
1542 There is actually a little bit of slop in the numbers in bin_index
1543 for the sake of speed. This makes no difference elsewhere.
1544
1545 The bins top out around 1MB because we expect to service large
1546 requests via mmap.
1547
1548 Bin 0 does not exist. Bin 1 is the unordered list; if that would be
1549 a valid chunk size the small bins are bumped up one.
1550 */
1551
1552 #define NBINS 128
1553 #define NSMALLBINS 64
1554 #define SMALLBIN_WIDTH MALLOC_ALIGNMENT
1555 #define SMALLBIN_CORRECTION (MALLOC_ALIGNMENT > CHUNK_HDR_SZ)
1556 #define MIN_LARGE_SIZE ((NSMALLBINS - SMALLBIN_CORRECTION) * SMALLBIN_WIDTH)
1557
1558 #define in_smallbin_range(sz) \
1559 ((unsigned long) (sz) < (unsigned long) MIN_LARGE_SIZE)
1560
1561 #define smallbin_index(sz) \
1562 ((SMALLBIN_WIDTH == 16 ? (((unsigned) (sz)) >> 4) : (((unsigned) (sz)) >> 3))\
1563 + SMALLBIN_CORRECTION)
1564
1565 #define largebin_index_32(sz) \
1566 (((((unsigned long) (sz)) >> 6) <= 38) ? 56 + (((unsigned long) (sz)) >> 6) :\
1567 ((((unsigned long) (sz)) >> 9) <= 20) ? 91 + (((unsigned long) (sz)) >> 9) :\
1568 ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\
1569 ((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\
1570 ((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\
1571 126)
1572
1573 #define largebin_index_32_big(sz) \
1574 (((((unsigned long) (sz)) >> 6) <= 45) ? 49 + (((unsigned long) (sz)) >> 6) :\
1575 ((((unsigned long) (sz)) >> 9) <= 20) ? 91 + (((unsigned long) (sz)) >> 9) :\
1576 ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\
1577 ((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\
1578 ((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\
1579 126)
1580
1581 // XXX It remains to be seen whether it is good to keep the widths of
1582 // XXX the buckets the same or whether it should be scaled by a factor
1583 // XXX of two as well.
1584 #define largebin_index_64(sz) \
1585 (((((unsigned long) (sz)) >> 6) <= 48) ? 48 + (((unsigned long) (sz)) >> 6) :\
1586 ((((unsigned long) (sz)) >> 9) <= 20) ? 91 + (((unsigned long) (sz)) >> 9) :\
1587 ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\
1588 ((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\
1589 ((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\
1590 126)
1591
1592 #define largebin_index(sz) \
1593 (SIZE_SZ == 8 ? largebin_index_64 (sz) \
1594 : MALLOC_ALIGNMENT == 16 ? largebin_index_32_big (sz) \
1595 : largebin_index_32 (sz))
1596
1597 #define bin_index(sz) \
1598 ((in_smallbin_range (sz)) ? smallbin_index (sz) : largebin_index (sz))
1599
1600 /* Take a chunk off a bin list. */
1601 static void
1602 unlink_chunk (mstate av, mchunkptr p)
1603 {
1604 if (chunksize (p) != prev_size (next_chunk (p)))
1605 malloc_printerr ("corrupted size vs. prev_size");
1606
1607 mchunkptr fd = p->fd;
1608 mchunkptr bk = p->bk;
1609
1610 if (__builtin_expect (fd->bk != p || bk->fd != p, 0))
1611 malloc_printerr ("corrupted double-linked list");
1612
1613 fd->bk = bk;
1614 bk->fd = fd;
1615 if (!in_smallbin_range (chunksize_nomask (p)) && p->fd_nextsize != NULL)
1616 {
1617 if (p->fd_nextsize->bk_nextsize != p
1618 || p->bk_nextsize->fd_nextsize != p)
1619 malloc_printerr ("corrupted double-linked list (not small)");
1620
1621 if (fd->fd_nextsize == NULL)
1622 {
1623 if (p->fd_nextsize == p)
1624 fd->fd_nextsize = fd->bk_nextsize = fd;
1625 else
1626 {
1627 fd->fd_nextsize = p->fd_nextsize;
1628 fd->bk_nextsize = p->bk_nextsize;
1629 p->fd_nextsize->bk_nextsize = fd;
1630 p->bk_nextsize->fd_nextsize = fd;
1631 }
1632 }
1633 else
1634 {
1635 p->fd_nextsize->bk_nextsize = p->bk_nextsize;
1636 p->bk_nextsize->fd_nextsize = p->fd_nextsize;
1637 }
1638 }
1639 }
1640
1641 /*
1642 Unsorted chunks
1643
1644 All remainders from chunk splits, as well as all returned chunks,
1645 are first placed in the "unsorted" bin. They are then placed
1646 in regular bins after malloc gives them ONE chance to be used before
1647 binning. So, basically, the unsorted_chunks list acts as a queue,
1648 with chunks being placed on it in free (and malloc_consolidate),
1649 and taken off (to be either used or placed in bins) in malloc.
1650
1651 The NON_MAIN_ARENA flag is never set for unsorted chunks, so it
1652 does not have to be taken into account in size comparisons.
1653 */
1654
1655 /* The otherwise unindexable 1-bin is used to hold unsorted chunks. */
1656 #define unsorted_chunks(M) (bin_at (M, 1))
1657
1658 /*
1659 Top
1660
1661 The top-most available chunk (i.e., the one bordering the end of
1662 available memory) is treated specially. It is never included in
1663 any bin, is used only if no other chunk is available, and is
1664 released back to the system if it is very large (see
1665 M_TRIM_THRESHOLD). Because top initially
1666 points to its own bin with initial zero size, thus forcing
1667 extension on the first malloc request, we avoid having any special
1668 code in malloc to check whether it even exists yet. But we still
1669 need to do so when getting memory from system, so we make
1670 initial_top treat the bin as a legal but unusable chunk during the
1671 interval between initialization and the first call to
1672 sysmalloc. (This is somewhat delicate, since it relies on
1673 the 2 preceding words to be zero during this interval as well.)
1674 */
1675
1676 /* Conveniently, the unsorted bin can be used as dummy top on first call */
1677 #define initial_top(M) (unsorted_chunks (M))
1678
1679 /*
1680 Binmap
1681
1682 To help compensate for the large number of bins, a one-level index
1683 structure is used for bin-by-bin searching. `binmap' is a
1684 bitvector recording whether bins are definitely empty so they can
1685 be skipped over during during traversals. The bits are NOT always
1686 cleared as soon as bins are empty, but instead only
1687 when they are noticed to be empty during traversal in malloc.
1688 */
1689
1690 /* Conservatively use 32 bits per map word, even if on 64bit system */
1691 #define BINMAPSHIFT 5
1692 #define BITSPERMAP (1U << BINMAPSHIFT)
1693 #define BINMAPSIZE (NBINS / BITSPERMAP)
1694
1695 #define idx2block(i) ((i) >> BINMAPSHIFT)
1696 #define idx2bit(i) ((1U << ((i) & ((1U << BINMAPSHIFT) - 1))))
1697
1698 #define mark_bin(m, i) ((m)->binmap[idx2block (i)] |= idx2bit (i))
1699 #define unmark_bin(m, i) ((m)->binmap[idx2block (i)] &= ~(idx2bit (i)))
1700 #define get_binmap(m, i) ((m)->binmap[idx2block (i)] & idx2bit (i))
1701
1702 /*
1703 Fastbins
1704
1705 An array of lists holding recently freed small chunks. Fastbins
1706 are not doubly linked. It is faster to single-link them, and
1707 since chunks are never removed from the middles of these lists,
1708 double linking is not necessary. Also, unlike regular bins, they
1709 are not even processed in FIFO order (they use faster LIFO) since
1710 ordering doesn't much matter in the transient contexts in which
1711 fastbins are normally used.
1712
1713 Chunks in fastbins keep their inuse bit set, so they cannot
1714 be consolidated with other free chunks. malloc_consolidate
1715 releases all chunks in fastbins and consolidates them with
1716 other free chunks.
1717 */
1718
1719 typedef struct malloc_chunk *mfastbinptr;
1720 #define fastbin(ar_ptr, idx) ((ar_ptr)->fastbinsY[idx])
1721
1722 /* offset 2 to use otherwise unindexable first 2 bins */
1723 #define fastbin_index(sz) \
1724 ((((unsigned int) (sz)) >> (SIZE_SZ == 8 ? 4 : 3)) - 2)
1725
1726
1727 /* The maximum fastbin request size we support */
1728 #define MAX_FAST_SIZE (80 * SIZE_SZ / 4)
1729
1730 #define NFASTBINS (fastbin_index (request2size (MAX_FAST_SIZE)) + 1)
1731
1732 /*
1733 FASTBIN_CONSOLIDATION_THRESHOLD is the size of a chunk in free()
1734 that triggers automatic consolidation of possibly-surrounding
1735 fastbin chunks. This is a heuristic, so the exact value should not
1736 matter too much. It is defined at half the default trim threshold as a
1737 compromise heuristic to only attempt consolidation if it is likely
1738 to lead to trimming. However, it is not dynamically tunable, since
1739 consolidation reduces fragmentation surrounding large chunks even
1740 if trimming is not used.
1741 */
1742
1743 #define FASTBIN_CONSOLIDATION_THRESHOLD (65536UL)
1744
1745 /*
1746 NONCONTIGUOUS_BIT indicates that MORECORE does not return contiguous
1747 regions. Otherwise, contiguity is exploited in merging together,
1748 when possible, results from consecutive MORECORE calls.
1749
1750 The initial value comes from MORECORE_CONTIGUOUS, but is
1751 changed dynamically if mmap is ever used as an sbrk substitute.
1752 */
1753
1754 #define NONCONTIGUOUS_BIT (2U)
1755
1756 #define contiguous(M) (((M)->flags & NONCONTIGUOUS_BIT) == 0)
1757 #define noncontiguous(M) (((M)->flags & NONCONTIGUOUS_BIT) != 0)
1758 #define set_noncontiguous(M) ((M)->flags |= NONCONTIGUOUS_BIT)
1759 #define set_contiguous(M) ((M)->flags &= ~NONCONTIGUOUS_BIT)
1760
1761 /* Maximum size of memory handled in fastbins. */
1762 static uint8_t global_max_fast;
1763
1764 /*
1765 Set value of max_fast.
1766 Use impossibly small value if 0.
1767 Precondition: there are no existing fastbin chunks in the main arena.
1768 Since do_check_malloc_state () checks this, we call malloc_consolidate ()
1769 before changing max_fast. Note other arenas will leak their fast bin
1770 entries if max_fast is reduced.
1771 */
1772
1773 #define set_max_fast(s) \
1774 global_max_fast = (((size_t) (s) <= MALLOC_ALIGN_MASK - SIZE_SZ) \
1775 ? MIN_CHUNK_SIZE / 2 : ((s + SIZE_SZ) & ~MALLOC_ALIGN_MASK))
1776
1777 static inline INTERNAL_SIZE_T
1778 get_max_fast (void)
1779 {
1780 /* Tell the GCC optimizers that global_max_fast is never larger
1781 than MAX_FAST_SIZE. This avoids out-of-bounds array accesses in
1782 _int_malloc after constant propagation of the size parameter.
1783 (The code never executes because malloc preserves the
1784 global_max_fast invariant, but the optimizers may not recognize
1785 this.) */
1786 if (global_max_fast > MAX_FAST_SIZE)
1787 __builtin_unreachable ();
1788 return global_max_fast;
1789 }
1790
1791 /*
1792 ----------- Internal state representation and initialization -----------
1793 */
1794
1795 /*
1796 have_fastchunks indicates that there are probably some fastbin chunks.
1797 It is set true on entering a chunk into any fastbin, and cleared early in
1798 malloc_consolidate. The value is approximate since it may be set when there
1799 are no fastbin chunks, or it may be clear even if there are fastbin chunks
1800 available. Given it's sole purpose is to reduce number of redundant calls to
1801 malloc_consolidate, it does not affect correctness. As a result we can safely
1802 use relaxed atomic accesses.
1803 */
1804
1805
1806 struct malloc_state
1807 {
1808 /* Serialize access. */
1809 __libc_lock_define (, mutex);
1810
1811 /* Flags (formerly in max_fast). */
1812 int flags;
1813
1814 /* Set if the fastbin chunks contain recently inserted free blocks. */
1815 /* Note this is a bool but not all targets support atomics on booleans. */
1816 int have_fastchunks;
1817
1818 /* Fastbins */
1819 mfastbinptr fastbinsY[NFASTBINS];
1820
1821 /* Base of the topmost chunk -- not otherwise kept in a bin */
1822 mchunkptr top;
1823
1824 /* The remainder from the most recent split of a small request */
1825 mchunkptr last_remainder;
1826
1827 /* Normal bins packed as described above */
1828 mchunkptr bins[NBINS * 2 - 2];
1829
1830 /* Bitmap of bins */
1831 unsigned int binmap[BINMAPSIZE];
1832
1833 /* Linked list */
1834 struct malloc_state *next;
1835
1836 /* Linked list for free arenas. Access to this field is serialized
1837 by free_list_lock in arena.c. */
1838 struct malloc_state *next_free;
1839
1840 /* Number of threads attached to this arena. 0 if the arena is on
1841 the free list. Access to this field is serialized by
1842 free_list_lock in arena.c. */
1843 INTERNAL_SIZE_T attached_threads;
1844
1845 /* Memory allocated from the system in this arena. */
1846 INTERNAL_SIZE_T system_mem;
1847 INTERNAL_SIZE_T max_system_mem;
1848 };
1849
1850 struct malloc_par
1851 {
1852 /* Tunable parameters */
1853 unsigned long trim_threshold;
1854 INTERNAL_SIZE_T top_pad;
1855 INTERNAL_SIZE_T mmap_threshold;
1856 INTERNAL_SIZE_T arena_test;
1857 INTERNAL_SIZE_T arena_max;
1858
1859 /* Transparent Large Page support. */
1860 INTERNAL_SIZE_T thp_pagesize;
1861 /* A value different than 0 means to align mmap allocation to hp_pagesize
1862 add hp_flags on flags. */
1863 INTERNAL_SIZE_T hp_pagesize;
1864 int hp_flags;
1865
1866 /* Memory map support */
1867 int n_mmaps;
1868 int n_mmaps_max;
1869 int max_n_mmaps;
1870 /* the mmap_threshold is dynamic, until the user sets
1871 it manually, at which point we need to disable any
1872 dynamic behavior. */
1873 int no_dyn_threshold;
1874
1875 /* Statistics */
1876 INTERNAL_SIZE_T mmapped_mem;
1877 INTERNAL_SIZE_T max_mmapped_mem;
1878
1879 /* First address handed out by MORECORE/sbrk. */
1880 char *sbrk_base;
1881
1882 #if USE_TCACHE
1883 /* Maximum number of buckets to use. */
1884 size_t tcache_bins;
1885 size_t tcache_max_bytes;
1886 /* Maximum number of chunks in each bucket. */
1887 size_t tcache_count;
1888 /* Maximum number of chunks to remove from the unsorted list, which
1889 aren't used to prefill the cache. */
1890 size_t tcache_unsorted_limit;
1891 #endif
1892 };
1893
1894 /* There are several instances of this struct ("arenas") in this
1895 malloc. If you are adapting this malloc in a way that does NOT use
1896 a static or mmapped malloc_state, you MUST explicitly zero-fill it
1897 before using. This malloc relies on the property that malloc_state
1898 is initialized to all zeroes (as is true of C statics). */
1899
1900 static struct malloc_state main_arena =
1901 {
1902 .mutex = _LIBC_LOCK_INITIALIZER,
1903 .next = &main_arena,
1904 .attached_threads = 1
1905 };
1906
1907 /* There is only one instance of the malloc parameters. */
1908
1909 static struct malloc_par mp_ =
1910 {
1911 .top_pad = DEFAULT_TOP_PAD,
1912 .n_mmaps_max = DEFAULT_MMAP_MAX,
1913 .mmap_threshold = DEFAULT_MMAP_THRESHOLD,
1914 .trim_threshold = DEFAULT_TRIM_THRESHOLD,
1915 #define NARENAS_FROM_NCORES(n) ((n) * (sizeof (long) == 4 ? 2 : 8))
1916 .arena_test = NARENAS_FROM_NCORES (1)
1917 #if USE_TCACHE
1918 ,
1919 .tcache_count = TCACHE_FILL_COUNT,
1920 .tcache_bins = TCACHE_MAX_BINS,
1921 .tcache_max_bytes = tidx2usize (TCACHE_MAX_BINS-1),
1922 .tcache_unsorted_limit = 0 /* No limit. */
1923 #endif
1924 };
1925
1926 /*
1927 Initialize a malloc_state struct.
1928
1929 This is called from ptmalloc_init () or from _int_new_arena ()
1930 when creating a new arena.
1931 */
1932
1933 static void
1934 malloc_init_state (mstate av)
1935 {
1936 int i;
1937 mbinptr bin;
1938
1939 /* Establish circular links for normal bins */
1940 for (i = 1; i < NBINS; ++i)
1941 {
1942 bin = bin_at (av, i);
1943 bin->fd = bin->bk = bin;
1944 }
1945
1946 #if MORECORE_CONTIGUOUS
1947 if (av != &main_arena)
1948 #endif
1949 set_noncontiguous (av);
1950 if (av == &main_arena)
1951 set_max_fast (DEFAULT_MXFAST);
1952 atomic_store_relaxed (&av->have_fastchunks, false);
1953
1954 av->top = initial_top (av);
1955 }
1956
1957 /*
1958 Other internal utilities operating on mstates
1959 */
1960
1961 static void *sysmalloc (INTERNAL_SIZE_T, mstate);
1962 static int systrim (size_t, mstate);
1963 static void malloc_consolidate (mstate);
1964
1965
1966 /* -------------- Early definitions for debugging hooks ---------------- */
1967
1968 /* This function is called from the arena shutdown hook, to free the
1969 thread cache (if it exists). */
1970 static void tcache_thread_shutdown (void);
1971
1972 /* ------------------ Testing support ----------------------------------*/
1973
1974 static int perturb_byte;
1975
1976 static void
1977 alloc_perturb (char *p, size_t n)
1978 {
1979 if (__glibc_unlikely (perturb_byte))
1980 memset (p, perturb_byte ^ 0xff, n);
1981 }
1982
1983 static void
1984 free_perturb (char *p, size_t n)
1985 {
1986 if (__glibc_unlikely (perturb_byte))
1987 memset (p, perturb_byte, n);
1988 }
1989
1990
1991
1992 #include <stap-probe.h>
1993
1994 /* ----------- Routines dealing with transparent huge pages ----------- */
1995
1996 static inline void
1997 madvise_thp (void *p, INTERNAL_SIZE_T size)
1998 {
1999 #ifdef MADV_HUGEPAGE
2000 /* Do not consider areas smaller than a huge page or if the tunable is
2001 not active. */
2002 if (mp_.thp_pagesize == 0 || size < mp_.thp_pagesize)
2003 return;
2004
2005 /* Linux requires the input address to be page-aligned, and unaligned
2006 inputs happens only for initial data segment. */
2007 if (__glibc_unlikely (!PTR_IS_ALIGNED (p, GLRO (dl_pagesize))))
2008 {
2009 void *q = PTR_ALIGN_DOWN (p, GLRO (dl_pagesize));
2010 size += PTR_DIFF (p, q);
2011 p = q;
2012 }
2013
2014 __madvise (p, size, MADV_HUGEPAGE);
2015 #endif
2016 }
2017
2018 /* ------------------- Support for multiple arenas -------------------- */
2019 #include "arena.c"
2020
2021 /*
2022 Debugging support
2023
2024 These routines make a number of assertions about the states
2025 of data structures that should be true at all times. If any
2026 are not true, it's very likely that a user program has somehow
2027 trashed memory. (It's also possible that there is a coding error
2028 in malloc. In which case, please report it!)
2029 */
2030
2031 #if !MALLOC_DEBUG
2032
2033 # define check_chunk(A, P)
2034 # define check_free_chunk(A, P)
2035 # define check_inuse_chunk(A, P)
2036 # define check_remalloced_chunk(A, P, N)
2037 # define check_malloced_chunk(A, P, N)
2038 # define check_malloc_state(A)
2039
2040 #else
2041
2042 # define check_chunk(A, P) do_check_chunk (A, P)
2043 # define check_free_chunk(A, P) do_check_free_chunk (A, P)
2044 # define check_inuse_chunk(A, P) do_check_inuse_chunk (A, P)
2045 # define check_remalloced_chunk(A, P, N) do_check_remalloced_chunk (A, P, N)
2046 # define check_malloced_chunk(A, P, N) do_check_malloced_chunk (A, P, N)
2047 # define check_malloc_state(A) do_check_malloc_state (A)
2048
2049 /*
2050 Properties of all chunks
2051 */
2052
2053 static void
2054 do_check_chunk (mstate av, mchunkptr p)
2055 {
2056 unsigned long sz = chunksize (p);
2057 /* min and max possible addresses assuming contiguous allocation */
2058 char *max_address = (char *) (av->top) + chunksize (av->top);
2059 char *min_address = max_address - av->system_mem;
2060
2061 if (!chunk_is_mmapped (p))
2062 {
2063 /* Has legal address ... */
2064 if (p != av->top)
2065 {
2066 if (contiguous (av))
2067 {
2068 assert (((char *) p) >= min_address);
2069 assert (((char *) p + sz) <= ((char *) (av->top)));
2070 }
2071 }
2072 else
2073 {
2074 /* top size is always at least MINSIZE */
2075 assert ((unsigned long) (sz) >= MINSIZE);
2076 /* top predecessor always marked inuse */
2077 assert (prev_inuse (p));
2078 }
2079 }
2080 else
2081 {
2082 /* address is outside main heap */
2083 if (contiguous (av) && av->top != initial_top (av))
2084 {
2085 assert (((char *) p) < min_address || ((char *) p) >= max_address);
2086 }
2087 /* chunk is page-aligned */
2088 assert (((prev_size (p) + sz) & (GLRO (dl_pagesize) - 1)) == 0);
2089 /* mem is aligned */
2090 assert (aligned_OK (chunk2mem (p)));
2091 }
2092 }
2093
2094 /*
2095 Properties of free chunks
2096 */
2097
2098 static void
2099 do_check_free_chunk (mstate av, mchunkptr p)
2100 {
2101 INTERNAL_SIZE_T sz = chunksize_nomask (p) & ~(PREV_INUSE | NON_MAIN_ARENA);
2102 mchunkptr next = chunk_at_offset (p, sz);
2103
2104 do_check_chunk (av, p);
2105
2106 /* Chunk must claim to be free ... */
2107 assert (!inuse (p));
2108 assert (!chunk_is_mmapped (p));
2109
2110 /* Unless a special marker, must have OK fields */
2111 if ((unsigned long) (sz) >= MINSIZE)
2112 {
2113 assert ((sz & MALLOC_ALIGN_MASK) == 0);
2114 assert (aligned_OK (chunk2mem (p)));
2115 /* ... matching footer field */
2116 assert (prev_size (next_chunk (p)) == sz);
2117 /* ... and is fully consolidated */
2118 assert (prev_inuse (p));
2119 assert (next == av->top || inuse (next));
2120
2121 /* ... and has minimally sane links */
2122 assert (p->fd->bk == p);
2123 assert (p->bk->fd == p);
2124 }
2125 else /* markers are always of size SIZE_SZ */
2126 assert (sz == SIZE_SZ);
2127 }
2128
2129 /*
2130 Properties of inuse chunks
2131 */
2132
2133 static void
2134 do_check_inuse_chunk (mstate av, mchunkptr p)
2135 {
2136 mchunkptr next;
2137
2138 do_check_chunk (av, p);
2139
2140 if (chunk_is_mmapped (p))
2141 return; /* mmapped chunks have no next/prev */
2142
2143 /* Check whether it claims to be in use ... */
2144 assert (inuse (p));
2145
2146 next = next_chunk (p);
2147
2148 /* ... and is surrounded by OK chunks.
2149 Since more things can be checked with free chunks than inuse ones,
2150 if an inuse chunk borders them and debug is on, it's worth doing them.
2151 */
2152 if (!prev_inuse (p))
2153 {
2154 /* Note that we cannot even look at prev unless it is not inuse */
2155 mchunkptr prv = prev_chunk (p);
2156 assert (next_chunk (prv) == p);
2157 do_check_free_chunk (av, prv);
2158 }
2159
2160 if (next == av->top)
2161 {
2162 assert (prev_inuse (next));
2163 assert (chunksize (next) >= MINSIZE);
2164 }
2165 else if (!inuse (next))
2166 do_check_free_chunk (av, next);
2167 }
2168
2169 /*
2170 Properties of chunks recycled from fastbins
2171 */
2172
2173 static void
2174 do_check_remalloced_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T s)
2175 {
2176 INTERNAL_SIZE_T sz = chunksize_nomask (p) & ~(PREV_INUSE | NON_MAIN_ARENA);
2177
2178 if (!chunk_is_mmapped (p))
2179 {
2180 assert (av == arena_for_chunk (p));
2181 if (chunk_main_arena (p))
2182 assert (av == &main_arena);
2183 else
2184 assert (av != &main_arena);
2185 }
2186
2187 do_check_inuse_chunk (av, p);
2188
2189 /* Legal size ... */
2190 assert ((sz & MALLOC_ALIGN_MASK) == 0);
2191 assert ((unsigned long) (sz) >= MINSIZE);
2192 /* ... and alignment */
2193 assert (aligned_OK (chunk2mem (p)));
2194 /* chunk is less than MINSIZE more than request */
2195 assert ((long) (sz) - (long) (s) >= 0);
2196 assert ((long) (sz) - (long) (s + MINSIZE) < 0);
2197 }
2198
2199 /*
2200 Properties of nonrecycled chunks at the point they are malloced
2201 */
2202
2203 static void
2204 do_check_malloced_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T s)
2205 {
2206 /* same as recycled case ... */
2207 do_check_remalloced_chunk (av, p, s);
2208
2209 /*
2210 ... plus, must obey implementation invariant that prev_inuse is
2211 always true of any allocated chunk; i.e., that each allocated
2212 chunk borders either a previously allocated and still in-use
2213 chunk, or the base of its memory arena. This is ensured
2214 by making all allocations from the `lowest' part of any found
2215 chunk. This does not necessarily hold however for chunks
2216 recycled via fastbins.
2217 */
2218
2219 assert (prev_inuse (p));
2220 }
2221
2222
2223 /*
2224 Properties of malloc_state.
2225
2226 This may be useful for debugging malloc, as well as detecting user
2227 programmer errors that somehow write into malloc_state.
2228
2229 If you are extending or experimenting with this malloc, you can
2230 probably figure out how to hack this routine to print out or
2231 display chunk addresses, sizes, bins, and other instrumentation.
2232 */
2233
2234 static void
2235 do_check_malloc_state (mstate av)
2236 {
2237 int i;
2238 mchunkptr p;
2239 mchunkptr q;
2240 mbinptr b;
2241 unsigned int idx;
2242 INTERNAL_SIZE_T size;
2243 unsigned long total = 0;
2244 int max_fast_bin;
2245
2246 /* internal size_t must be no wider than pointer type */
2247 assert (sizeof (INTERNAL_SIZE_T) <= sizeof (char *));
2248
2249 /* alignment is a power of 2 */
2250 assert ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT - 1)) == 0);
2251
2252 /* Check the arena is initialized. */
2253 assert (av->top != 0);
2254
2255 /* No memory has been allocated yet, so doing more tests is not possible. */
2256 if (av->top == initial_top (av))
2257 return;
2258
2259 /* pagesize is a power of 2 */
2260 assert (powerof2(GLRO (dl_pagesize)));
2261
2262 /* A contiguous main_arena is consistent with sbrk_base. */
2263 if (av == &main_arena && contiguous (av))
2264 assert ((char *) mp_.sbrk_base + av->system_mem ==
2265 (char *) av->top + chunksize (av->top));
2266
2267 /* properties of fastbins */
2268
2269 /* max_fast is in allowed range */
2270 assert ((get_max_fast () & ~1) <= request2size (MAX_FAST_SIZE));
2271
2272 max_fast_bin = fastbin_index (get_max_fast ());
2273
2274 for (i = 0; i < NFASTBINS; ++i)
2275 {
2276 p = fastbin (av, i);
2277
2278 /* The following test can only be performed for the main arena.
2279 While mallopt calls malloc_consolidate to get rid of all fast
2280 bins (especially those larger than the new maximum) this does
2281 only happen for the main arena. Trying to do this for any
2282 other arena would mean those arenas have to be locked and
2283 malloc_consolidate be called for them. This is excessive. And
2284 even if this is acceptable to somebody it still cannot solve
2285 the problem completely since if the arena is locked a
2286 concurrent malloc call might create a new arena which then
2287 could use the newly invalid fast bins. */
2288
2289 /* all bins past max_fast are empty */
2290 if (av == &main_arena && i > max_fast_bin)
2291 assert (p == 0);
2292
2293 while (p != 0)
2294 {
2295 if (__glibc_unlikely (misaligned_chunk (p)))
2296 malloc_printerr ("do_check_malloc_state(): "
2297 "unaligned fastbin chunk detected");
2298 /* each chunk claims to be inuse */
2299 do_check_inuse_chunk (av, p);
2300 total += chunksize (p);
2301 /* chunk belongs in this bin */
2302 assert (fastbin_index (chunksize (p)) == i);
2303 p = REVEAL_PTR (p->fd);
2304 }
2305 }
2306
2307 /* check normal bins */
2308 for (i = 1; i < NBINS; ++i)
2309 {
2310 b = bin_at (av, i);
2311
2312 /* binmap is accurate (except for bin 1 == unsorted_chunks) */
2313 if (i >= 2)
2314 {
2315 unsigned int binbit = get_binmap (av, i);
2316 int empty = last (b) == b;
2317 if (!binbit)
2318 assert (empty);
2319 else if (!empty)
2320 assert (binbit);
2321 }
2322
2323 for (p = last (b); p != b; p = p->bk)
2324 {
2325 /* each chunk claims to be free */
2326 do_check_free_chunk (av, p);
2327 size = chunksize (p);
2328 total += size;
2329 if (i >= 2)
2330 {
2331 /* chunk belongs in bin */
2332 idx = bin_index (size);
2333 assert (idx == i);
2334 /* lists are sorted */
2335 assert (p->bk == b ||
2336 (unsigned long) chunksize (p->bk) >= (unsigned long) chunksize (p));
2337
2338 if (!in_smallbin_range (size))
2339 {
2340 if (p->fd_nextsize != NULL)
2341 {
2342 if (p->fd_nextsize == p)
2343 assert (p->bk_nextsize == p);
2344 else
2345 {
2346 if (p->fd_nextsize == first (b))
2347 assert (chunksize (p) < chunksize (p->fd_nextsize));
2348 else
2349 assert (chunksize (p) > chunksize (p->fd_nextsize));
2350
2351 if (p == first (b))
2352 assert (chunksize (p) > chunksize (p->bk_nextsize));
2353 else
2354 assert (chunksize (p) < chunksize (p->bk_nextsize));
2355 }
2356 }
2357 else
2358 assert (p->bk_nextsize == NULL);
2359 }
2360 }
2361 else if (!in_smallbin_range (size))
2362 assert (p->fd_nextsize == NULL && p->bk_nextsize == NULL);
2363 /* chunk is followed by a legal chain of inuse chunks */
2364 for (q = next_chunk (p);
2365 (q != av->top && inuse (q) &&
2366 (unsigned long) (chunksize (q)) >= MINSIZE);
2367 q = next_chunk (q))
2368 do_check_inuse_chunk (av, q);
2369 }
2370 }
2371
2372 /* top chunk is OK */
2373 check_chunk (av, av->top);
2374 }
2375 #endif
2376
2377
2378 /* ----------------- Support for debugging hooks -------------------- */
2379 #if IS_IN (libc)
2380 #include "hooks.c"
2381 #endif
2382
2383
2384 /* ----------- Routines dealing with system allocation -------------- */
2385
2386 /*
2387 sysmalloc handles malloc cases requiring more memory from the system.
2388 On entry, it is assumed that av->top does not have enough
2389 space to service request for nb bytes, thus requiring that av->top
2390 be extended or replaced.
2391 */
2392
2393 static void *
2394 sysmalloc_mmap (INTERNAL_SIZE_T nb, size_t pagesize, int extra_flags, mstate av)
2395 {
2396 long int size;
2397
2398 /*
2399 Round up size to nearest page. For mmapped chunks, the overhead is one
2400 SIZE_SZ unit larger than for normal chunks, because there is no
2401 following chunk whose prev_size field could be used.
2402
2403 See the front_misalign handling below, for glibc there is no need for
2404 further alignments unless we have have high alignment.
2405 */
2406 if (MALLOC_ALIGNMENT == CHUNK_HDR_SZ)
2407 size = ALIGN_UP (nb + SIZE_SZ, pagesize);
2408 else
2409 size = ALIGN_UP (nb + SIZE_SZ + MALLOC_ALIGN_MASK, pagesize);
2410
2411 /* Don't try if size wraps around 0. */
2412 if ((unsigned long) (size) <= (unsigned long) (nb))
2413 return MAP_FAILED;
2414
2415 char *mm = (char *) MMAP (0, size,
2416 mtag_mmap_flags | PROT_READ | PROT_WRITE,
2417 extra_flags);
2418 if (mm == MAP_FAILED)
2419 return mm;
2420
2421 #ifdef MAP_HUGETLB
2422 if (!(extra_flags & MAP_HUGETLB))
2423 madvise_thp (mm, size);
2424 #endif
2425
2426 /*
2427 The offset to the start of the mmapped region is stored in the prev_size
2428 field of the chunk. This allows us to adjust returned start address to
2429 meet alignment requirements here and in memalign(), and still be able to
2430 compute proper address argument for later munmap in free() and realloc().
2431 */
2432
2433 INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */
2434
2435 if (MALLOC_ALIGNMENT == CHUNK_HDR_SZ)
2436 {
2437 /* For glibc, chunk2mem increases the address by CHUNK_HDR_SZ and
2438 MALLOC_ALIGN_MASK is CHUNK_HDR_SZ-1. Each mmap'ed area is page
2439 aligned and therefore definitely MALLOC_ALIGN_MASK-aligned. */
2440 assert (((INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK) == 0);
2441 front_misalign = 0;
2442 }
2443 else
2444 front_misalign = (INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK;
2445
2446 mchunkptr p; /* the allocated/returned chunk */
2447
2448 if (front_misalign > 0)
2449 {
2450 ptrdiff_t correction = MALLOC_ALIGNMENT - front_misalign;
2451 p = (mchunkptr) (mm + correction);
2452 set_prev_size (p, correction);
2453 set_head (p, (size - correction) | IS_MMAPPED);
2454 }
2455 else
2456 {
2457 p = (mchunkptr) mm;
2458 set_prev_size (p, 0);
2459 set_head (p, size | IS_MMAPPED);
2460 }
2461
2462 /* update statistics */
2463 int new = atomic_fetch_add_relaxed (&mp_.n_mmaps, 1) + 1;
2464 atomic_max (&mp_.max_n_mmaps, new);
2465
2466 unsigned long sum;
2467 sum = atomic_fetch_add_relaxed (&mp_.mmapped_mem, size) + size;
2468 atomic_max (&mp_.max_mmapped_mem, sum);
2469
2470 check_chunk (av, p);
2471
2472 return chunk2mem (p);
2473 }
2474
2475 /*
2476 Allocate memory using mmap() based on S and NB requested size, aligning to
2477 PAGESIZE if required. The EXTRA_FLAGS is used on mmap() call. If the call
2478 succeedes S is updated with the allocated size. This is used as a fallback
2479 if MORECORE fails.
2480 */
2481 static void *
2482 sysmalloc_mmap_fallback (long int *s, INTERNAL_SIZE_T nb,
2483 INTERNAL_SIZE_T old_size, size_t minsize,
2484 size_t pagesize, int extra_flags, mstate av)
2485 {
2486 long int size = *s;
2487
2488 /* Cannot merge with old top, so add its size back in */
2489 if (contiguous (av))
2490 size = ALIGN_UP (size + old_size, pagesize);
2491
2492 /* If we are relying on mmap as backup, then use larger units */
2493 if ((unsigned long) (size) < minsize)
2494 size = minsize;
2495
2496 /* Don't try if size wraps around 0 */
2497 if ((unsigned long) (size) <= (unsigned long) (nb))
2498 return MORECORE_FAILURE;
2499
2500 char *mbrk = (char *) (MMAP (0, size,
2501 mtag_mmap_flags | PROT_READ | PROT_WRITE,
2502 extra_flags));
2503 if (mbrk == MAP_FAILED)
2504 return MAP_FAILED;
2505
2506 #ifdef MAP_HUGETLB
2507 if (!(extra_flags & MAP_HUGETLB))
2508 madvise_thp (mbrk, size);
2509 #endif
2510
2511 /* Record that we no longer have a contiguous sbrk region. After the first
2512 time mmap is used as backup, we do not ever rely on contiguous space
2513 since this could incorrectly bridge regions. */
2514 set_noncontiguous (av);
2515
2516 *s = size;
2517 return mbrk;
2518 }
2519
2520 static void *
2521 sysmalloc (INTERNAL_SIZE_T nb, mstate av)
2522 {
2523 mchunkptr old_top; /* incoming value of av->top */
2524 INTERNAL_SIZE_T old_size; /* its size */
2525 char *old_end; /* its end address */
2526
2527 long size; /* arg to first MORECORE or mmap call */
2528 char *brk; /* return value from MORECORE */
2529
2530 long correction; /* arg to 2nd MORECORE call */
2531 char *snd_brk; /* 2nd return val */
2532
2533 INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */
2534 INTERNAL_SIZE_T end_misalign; /* partial page left at end of new space */
2535 char *aligned_brk; /* aligned offset into brk */
2536
2537 mchunkptr p; /* the allocated/returned chunk */
2538 mchunkptr remainder; /* remainder from allocation */
2539 unsigned long remainder_size; /* its size */
2540
2541
2542 size_t pagesize = GLRO (dl_pagesize);
2543 bool tried_mmap = false;
2544
2545
2546 /*
2547 If have mmap, and the request size meets the mmap threshold, and
2548 the system supports mmap, and there are few enough currently
2549 allocated mmapped regions, try to directly map this request
2550 rather than expanding top.
2551 */
2552
2553 if (av == NULL
2554 || ((unsigned long) (nb) >= (unsigned long) (mp_.mmap_threshold)
2555 && (mp_.n_mmaps < mp_.n_mmaps_max)))
2556 {
2557 char *mm;
2558 if (mp_.hp_pagesize > 0 && nb >= mp_.hp_pagesize)
2559 {
2560 /* There is no need to isse the THP madvise call if Huge Pages are
2561 used directly. */
2562 mm = sysmalloc_mmap (nb, mp_.hp_pagesize, mp_.hp_flags, av);
2563 if (mm != MAP_FAILED)
2564 return mm;
2565 }
2566 mm = sysmalloc_mmap (nb, pagesize, 0, av);
2567 if (mm != MAP_FAILED)
2568 return mm;
2569 tried_mmap = true;
2570 }
2571
2572 /* There are no usable arenas and mmap also failed. */
2573 if (av == NULL)
2574 return 0;
2575
2576 /* Record incoming configuration of top */
2577
2578 old_top = av->top;
2579 old_size = chunksize (old_top);
2580 old_end = (char *) (chunk_at_offset (old_top, old_size));
2581
2582 brk = snd_brk = (char *) (MORECORE_FAILURE);
2583
2584 /*
2585 If not the first time through, we require old_size to be
2586 at least MINSIZE and to have prev_inuse set.
2587 */
2588
2589 assert ((old_top == initial_top (av) && old_size == 0) ||
2590 ((unsigned long) (old_size) >= MINSIZE &&
2591 prev_inuse (old_top) &&
2592 ((unsigned long) old_end & (pagesize - 1)) == 0));
2593
2594 /* Precondition: not enough current space to satisfy nb request */
2595 assert ((unsigned long) (old_size) < (unsigned long) (nb + MINSIZE));
2596
2597
2598 if (av != &main_arena)
2599 {
2600 heap_info *old_heap, *heap;
2601 size_t old_heap_size;
2602
2603 /* First try to extend the current heap. */
2604 old_heap = heap_for_ptr (old_top);
2605 old_heap_size = old_heap->size;
2606 if ((long) (MINSIZE + nb - old_size) > 0
2607 && grow_heap (old_heap, MINSIZE + nb - old_size) == 0)
2608 {
2609 av->system_mem += old_heap->size - old_heap_size;
2610 set_head (old_top, (((char *) old_heap + old_heap->size) - (char *) old_top)
2611 | PREV_INUSE);
2612 }
2613 else if ((heap = new_heap (nb + (MINSIZE + sizeof (*heap)), mp_.top_pad)))
2614 {
2615 /* Use a newly allocated heap. */
2616 heap->ar_ptr = av;
2617 heap->prev = old_heap;
2618 av->system_mem += heap->size;
2619 /* Set up the new top. */
2620 top (av) = chunk_at_offset (heap, sizeof (*heap));
2621 set_head (top (av), (heap->size - sizeof (*heap)) | PREV_INUSE);
2622
2623 /* Setup fencepost and free the old top chunk with a multiple of
2624 MALLOC_ALIGNMENT in size. */
2625 /* The fencepost takes at least MINSIZE bytes, because it might
2626 become the top chunk again later. Note that a footer is set
2627 up, too, although the chunk is marked in use. */
2628 old_size = (old_size - MINSIZE) & ~MALLOC_ALIGN_MASK;
2629 set_head (chunk_at_offset (old_top, old_size + CHUNK_HDR_SZ),
2630 0 | PREV_INUSE);
2631 if (old_size >= MINSIZE)
2632 {
2633 set_head (chunk_at_offset (old_top, old_size),
2634 CHUNK_HDR_SZ | PREV_INUSE);
2635 set_foot (chunk_at_offset (old_top, old_size), CHUNK_HDR_SZ);
2636 set_head (old_top, old_size | PREV_INUSE | NON_MAIN_ARENA);
2637 _int_free (av, old_top, 1);
2638 }
2639 else
2640 {
2641 set_head (old_top, (old_size + CHUNK_HDR_SZ) | PREV_INUSE);
2642 set_foot (old_top, (old_size + CHUNK_HDR_SZ));
2643 }
2644 }
2645 else if (!tried_mmap)
2646 {
2647 /* We can at least try to use to mmap memory. If new_heap fails
2648 it is unlikely that trying to allocate huge pages will
2649 succeed. */
2650 char *mm = sysmalloc_mmap (nb, pagesize, 0, av);
2651 if (mm != MAP_FAILED)
2652 return mm;
2653 }
2654 }
2655 else /* av == main_arena */
2656
2657
2658 { /* Request enough space for nb + pad + overhead */
2659 size = nb + mp_.top_pad + MINSIZE;
2660
2661 /*
2662 If contiguous, we can subtract out existing space that we hope to
2663 combine with new space. We add it back later only if
2664 we don't actually get contiguous space.
2665 */
2666
2667 if (contiguous (av))
2668 size -= old_size;
2669
2670 /*
2671 Round to a multiple of page size or huge page size.
2672 If MORECORE is not contiguous, this ensures that we only call it
2673 with whole-page arguments. And if MORECORE is contiguous and
2674 this is not first time through, this preserves page-alignment of
2675 previous calls. Otherwise, we correct to page-align below.
2676 */
2677
2678 #ifdef MADV_HUGEPAGE
2679 /* Defined in brk.c. */
2680 extern void *__curbrk;
2681 if (__glibc_unlikely (mp_.thp_pagesize != 0))
2682 {
2683 uintptr_t top = ALIGN_UP ((uintptr_t) __curbrk + size,
2684 mp_.thp_pagesize);
2685 size = top - (uintptr_t) __curbrk;
2686 }
2687 else
2688 #endif
2689 size = ALIGN_UP (size, GLRO(dl_pagesize));
2690
2691 /*
2692 Don't try to call MORECORE if argument is so big as to appear
2693 negative. Note that since mmap takes size_t arg, it may succeed
2694 below even if we cannot call MORECORE.
2695 */
2696
2697 if (size > 0)
2698 {
2699 brk = (char *) (MORECORE (size));
2700 if (brk != (char *) (MORECORE_FAILURE))
2701 madvise_thp (brk, size);
2702 LIBC_PROBE (memory_sbrk_more, 2, brk, size);
2703 }
2704
2705 if (brk == (char *) (MORECORE_FAILURE))
2706 {
2707 /*
2708 If have mmap, try using it as a backup when MORECORE fails or
2709 cannot be used. This is worth doing on systems that have "holes" in
2710 address space, so sbrk cannot extend to give contiguous space, but
2711 space is available elsewhere. Note that we ignore mmap max count
2712 and threshold limits, since the space will not be used as a
2713 segregated mmap region.
2714 */
2715
2716 char *mbrk = MAP_FAILED;
2717 if (mp_.hp_pagesize > 0)
2718 mbrk = sysmalloc_mmap_fallback (&size, nb, old_size,
2719 mp_.hp_pagesize, mp_.hp_pagesize,
2720 mp_.hp_flags, av);
2721 if (mbrk == MAP_FAILED)
2722 mbrk = sysmalloc_mmap_fallback (&size, nb, old_size, MMAP_AS_MORECORE_SIZE,
2723 pagesize, 0, av);
2724 if (mbrk != MAP_FAILED)
2725 {
2726 /* We do not need, and cannot use, another sbrk call to find end */
2727 brk = mbrk;
2728 snd_brk = brk + size;
2729 }
2730 }
2731
2732 if (brk != (char *) (MORECORE_FAILURE))
2733 {
2734 if (mp_.sbrk_base == 0)
2735 mp_.sbrk_base = brk;
2736 av->system_mem += size;
2737
2738 /*
2739 If MORECORE extends previous space, we can likewise extend top size.
2740 */
2741
2742 if (brk == old_end && snd_brk == (char *) (MORECORE_FAILURE))
2743 set_head (old_top, (size + old_size) | PREV_INUSE);
2744
2745 else if (contiguous (av) && old_size && brk < old_end)
2746 /* Oops! Someone else killed our space.. Can't touch anything. */
2747 malloc_printerr ("break adjusted to free malloc space");
2748
2749 /*
2750 Otherwise, make adjustments:
2751
2752 * If the first time through or noncontiguous, we need to call sbrk
2753 just to find out where the end of memory lies.
2754
2755 * We need to ensure that all returned chunks from malloc will meet
2756 MALLOC_ALIGNMENT
2757
2758 * If there was an intervening foreign sbrk, we need to adjust sbrk
2759 request size to account for fact that we will not be able to
2760 combine new space with existing space in old_top.
2761
2762 * Almost all systems internally allocate whole pages at a time, in
2763 which case we might as well use the whole last page of request.
2764 So we allocate enough more memory to hit a page boundary now,
2765 which in turn causes future contiguous calls to page-align.
2766 */
2767
2768 else
2769 {
2770 front_misalign = 0;
2771 end_misalign = 0;
2772 correction = 0;
2773 aligned_brk = brk;
2774
2775 /* handle contiguous cases */
2776 if (contiguous (av))
2777 {
2778 /* Count foreign sbrk as system_mem. */
2779 if (old_size)
2780 av->system_mem += brk - old_end;
2781
2782 /* Guarantee alignment of first new chunk made from this space */
2783
2784 front_misalign = (INTERNAL_SIZE_T) chunk2mem (brk) & MALLOC_ALIGN_MASK;
2785 if (front_misalign > 0)
2786 {
2787 /*
2788 Skip over some bytes to arrive at an aligned position.
2789 We don't need to specially mark these wasted front bytes.
2790 They will never be accessed anyway because
2791 prev_inuse of av->top (and any chunk created from its start)
2792 is always true after initialization.
2793 */
2794
2795 correction = MALLOC_ALIGNMENT - front_misalign;
2796 aligned_brk += correction;
2797 }
2798
2799 /*
2800 If this isn't adjacent to existing space, then we will not
2801 be able to merge with old_top space, so must add to 2nd request.
2802 */
2803
2804 correction += old_size;
2805
2806 /* Extend the end address to hit a page boundary */
2807 end_misalign = (INTERNAL_SIZE_T) (brk + size + correction);
2808 correction += (ALIGN_UP (end_misalign, pagesize)) - end_misalign;
2809
2810 assert (correction >= 0);
2811 snd_brk = (char *) (MORECORE (correction));
2812
2813 /*
2814 If can't allocate correction, try to at least find out current
2815 brk. It might be enough to proceed without failing.
2816
2817 Note that if second sbrk did NOT fail, we assume that space
2818 is contiguous with first sbrk. This is a safe assumption unless
2819 program is multithreaded but doesn't use locks and a foreign sbrk
2820 occurred between our first and second calls.
2821 */
2822
2823 if (snd_brk == (char *) (MORECORE_FAILURE))
2824 {
2825 correction = 0;
2826 snd_brk = (char *) (MORECORE (0));
2827 }
2828 else
2829 madvise_thp (snd_brk, correction);
2830 }
2831
2832 /* handle non-contiguous cases */
2833 else
2834 {
2835 if (MALLOC_ALIGNMENT == CHUNK_HDR_SZ)
2836 /* MORECORE/mmap must correctly align */
2837 assert (((unsigned long) chunk2mem (brk) & MALLOC_ALIGN_MASK) == 0);
2838 else
2839 {
2840 front_misalign = (INTERNAL_SIZE_T) chunk2mem (brk) & MALLOC_ALIGN_MASK;
2841 if (front_misalign > 0)
2842 {
2843 /*
2844 Skip over some bytes to arrive at an aligned position.
2845 We don't need to specially mark these wasted front bytes.
2846 They will never be accessed anyway because
2847 prev_inuse of av->top (and any chunk created from its start)
2848 is always true after initialization.
2849 */
2850
2851 aligned_brk += MALLOC_ALIGNMENT - front_misalign;
2852 }
2853 }
2854
2855 /* Find out current end of memory */
2856 if (snd_brk == (char *) (MORECORE_FAILURE))
2857 {
2858 snd_brk = (char *) (MORECORE (0));
2859 }
2860 }
2861
2862 /* Adjust top based on results of second sbrk */
2863 if (snd_brk != (char *) (MORECORE_FAILURE))
2864 {
2865 av->top = (mchunkptr) aligned_brk;
2866 set_head (av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE);
2867 av->system_mem += correction;
2868
2869 /*
2870 If not the first time through, we either have a
2871 gap due to foreign sbrk or a non-contiguous region. Insert a
2872 double fencepost at old_top to prevent consolidation with space
2873 we don't own. These fenceposts are artificial chunks that are
2874 marked as inuse and are in any case too small to use. We need
2875 two to make sizes and alignments work out.
2876 */
2877
2878 if (old_size != 0)
2879 {
2880 /*
2881 Shrink old_top to insert fenceposts, keeping size a
2882 multiple of MALLOC_ALIGNMENT. We know there is at least
2883 enough space in old_top to do this.
2884 */
2885 old_size = (old_size - 2 * CHUNK_HDR_SZ) & ~MALLOC_ALIGN_MASK;
2886 set_head (old_top, old_size | PREV_INUSE);
2887
2888 /*
2889 Note that the following assignments completely overwrite
2890 old_top when old_size was previously MINSIZE. This is
2891 intentional. We need the fencepost, even if old_top otherwise gets
2892 lost.
2893 */
2894 set_head (chunk_at_offset (old_top, old_size),
2895 CHUNK_HDR_SZ | PREV_INUSE);
2896 set_head (chunk_at_offset (old_top,
2897 old_size + CHUNK_HDR_SZ),
2898 CHUNK_HDR_SZ | PREV_INUSE);
2899
2900 /* If possible, release the rest. */
2901 if (old_size >= MINSIZE)
2902 {
2903 _int_free (av, old_top, 1);
2904 }
2905 }
2906 }
2907 }
2908 }
2909 } /* if (av != &main_arena) */
2910
2911 if ((unsigned long) av->system_mem > (unsigned long) (av->max_system_mem))
2912 av->max_system_mem = av->system_mem;
2913 check_malloc_state (av);
2914
2915 /* finally, do the allocation */
2916 p = av->top;
2917 size = chunksize (p);
2918
2919 /* check that one of the above allocation paths succeeded */
2920 if ((unsigned long) (size) >= (unsigned long) (nb + MINSIZE))
2921 {
2922 remainder_size = size - nb;
2923 remainder = chunk_at_offset (p, nb);
2924 av->top = remainder;
2925 set_head (p, nb | PREV_INUSE | (av != &main_arena ? NON_MAIN_ARENA : 0));
2926 set_head (remainder, remainder_size | PREV_INUSE);
2927 check_malloced_chunk (av, p, nb);
2928 return chunk2mem (p);
2929 }
2930
2931 /* catch all failure paths */
2932 __set_errno (ENOMEM);
2933 return 0;
2934 }
2935
2936
2937 /*
2938 systrim is an inverse of sorts to sysmalloc. It gives memory back
2939 to the system (via negative arguments to sbrk) if there is unused
2940 memory at the `high' end of the malloc pool. It is called
2941 automatically by free() when top space exceeds the trim
2942 threshold. It is also called by the public malloc_trim routine. It
2943 returns 1 if it actually released any memory, else 0.
2944 */
2945
2946 static int
2947 systrim (size_t pad, mstate av)
2948 {
2949 long top_size; /* Amount of top-most memory */
2950 long extra; /* Amount to release */
2951 long released; /* Amount actually released */
2952 char *current_brk; /* address returned by pre-check sbrk call */
2953 char *new_brk; /* address returned by post-check sbrk call */
2954 long top_area;
2955
2956 top_size = chunksize (av->top);
2957
2958 top_area = top_size - MINSIZE - 1;
2959 if (top_area <= pad)
2960 return 0;
2961
2962 /* Release in pagesize units and round down to the nearest page. */
2963 #ifdef MADV_HUGEPAGE
2964 if (__glibc_unlikely (mp_.thp_pagesize != 0))
2965 extra = ALIGN_DOWN (top_area - pad, mp_.thp_pagesize);
2966 else
2967 #endif
2968 extra = ALIGN_DOWN (top_area - pad, GLRO(dl_pagesize));
2969
2970 if (extra == 0)
2971 return 0;
2972
2973 /*
2974 Only proceed if end of memory is where we last set it.
2975 This avoids problems if there were foreign sbrk calls.
2976 */
2977 current_brk = (char *) (MORECORE (0));
2978 if (current_brk == (char *) (av->top) + top_size)
2979 {
2980 /*
2981 Attempt to release memory. We ignore MORECORE return value,
2982 and instead call again to find out where new end of memory is.
2983 This avoids problems if first call releases less than we asked,
2984 of if failure somehow altered brk value. (We could still
2985 encounter problems if it altered brk in some very bad way,
2986 but the only thing we can do is adjust anyway, which will cause
2987 some downstream failure.)
2988 */
2989
2990 MORECORE (-extra);
2991 new_brk = (char *) (MORECORE (0));
2992
2993 LIBC_PROBE (memory_sbrk_less, 2, new_brk, extra);
2994
2995 if (new_brk != (char *) MORECORE_FAILURE)
2996 {
2997 released = (long) (current_brk - new_brk);
2998
2999 if (released != 0)
3000 {
3001 /* Success. Adjust top. */
3002 av->system_mem -= released;
3003 set_head (av->top, (top_size - released) | PREV_INUSE);
3004 check_malloc_state (av);
3005 return 1;
3006 }
3007 }
3008 }
3009 return 0;
3010 }
3011
3012 static void
3013 munmap_chunk (mchunkptr p)
3014 {
3015 size_t pagesize = GLRO (dl_pagesize);
3016 INTERNAL_SIZE_T size = chunksize (p);
3017
3018 assert (chunk_is_mmapped (p));
3019
3020 uintptr_t mem = (uintptr_t) chunk2mem (p);
3021 uintptr_t block = (uintptr_t) p - prev_size (p);
3022 size_t total_size = prev_size (p) + size;
3023 /* Unfortunately we have to do the compilers job by hand here. Normally
3024 we would test BLOCK and TOTAL-SIZE separately for compliance with the
3025 page size. But gcc does not recognize the optimization possibility
3026 (in the moment at least) so we combine the two values into one before
3027 the bit test. */
3028 if (__glibc_unlikely ((block | total_size) & (pagesize - 1)) != 0
3029 || __glibc_unlikely (!powerof2 (mem & (pagesize - 1))))
3030 malloc_printerr ("munmap_chunk(): invalid pointer");
3031
3032 atomic_fetch_add_relaxed (&mp_.n_mmaps, -1);
3033 atomic_fetch_add_relaxed (&mp_.mmapped_mem, -total_size);
3034
3035 /* If munmap failed the process virtual memory address space is in a
3036 bad shape. Just leave the block hanging around, the process will
3037 terminate shortly anyway since not much can be done. */
3038 __munmap ((char *) block, total_size);
3039 }
3040
3041 #if HAVE_MREMAP
3042
3043 static mchunkptr
3044 mremap_chunk (mchunkptr p, size_t new_size)
3045 {
3046 size_t pagesize = GLRO (dl_pagesize);
3047 INTERNAL_SIZE_T offset = prev_size (p);
3048 INTERNAL_SIZE_T size = chunksize (p);
3049 char *cp;
3050
3051 assert (chunk_is_mmapped (p));
3052
3053 uintptr_t block = (uintptr_t) p - offset;
3054 uintptr_t mem = (uintptr_t) chunk2mem(p);
3055 size_t total_size = offset + size;
3056 if (__glibc_unlikely ((block | total_size) & (pagesize - 1)) != 0
3057 || __glibc_unlikely (!powerof2 (mem & (pagesize - 1))))
3058 malloc_printerr("mremap_chunk(): invalid pointer");
3059
3060 /* Note the extra SIZE_SZ overhead as in mmap_chunk(). */
3061 new_size = ALIGN_UP (new_size + offset + SIZE_SZ, pagesize);
3062
3063 /* No need to remap if the number of pages does not change. */
3064 if (total_size == new_size)
3065 return p;
3066
3067 cp = (char *) __mremap ((char *) block, total_size, new_size,
3068 MREMAP_MAYMOVE);
3069
3070 if (cp == MAP_FAILED)
3071 return 0;
3072
3073 madvise_thp (cp, new_size);
3074
3075 p = (mchunkptr) (cp + offset);
3076
3077 assert (aligned_OK (chunk2mem (p)));
3078
3079 assert (prev_size (p) == offset);
3080 set_head (p, (new_size - offset) | IS_MMAPPED);
3081
3082 INTERNAL_SIZE_T new;
3083 new = atomic_fetch_add_relaxed (&mp_.mmapped_mem, new_size - size - offset)
3084 + new_size - size - offset;
3085 atomic_max (&mp_.max_mmapped_mem, new);
3086 return p;
3087 }
3088 #endif /* HAVE_MREMAP */
3089
3090 /*------------------------ Public wrappers. --------------------------------*/
3091
3092 #if USE_TCACHE
3093
3094 /* We overlay this structure on the user-data portion of a chunk when
3095 the chunk is stored in the per-thread cache. */
3096 typedef struct tcache_entry
3097 {
3098 struct tcache_entry *next;
3099 /* This field exists to detect double frees. */
3100 uintptr_t key;
3101 } tcache_entry;
3102
3103 /* There is one of these for each thread, which contains the
3104 per-thread cache (hence "tcache_perthread_struct"). Keeping
3105 overall size low is mildly important. Note that COUNTS and ENTRIES
3106 are redundant (we could have just counted the linked list each
3107 time), this is for performance reasons. */
3108 typedef struct tcache_perthread_struct
3109 {
3110 uint16_t counts[TCACHE_MAX_BINS];
3111 tcache_entry *entries[TCACHE_MAX_BINS];
3112 } tcache_perthread_struct;
3113
3114 static __thread bool tcache_shutting_down = false;
3115 static __thread tcache_perthread_struct *tcache = NULL;
3116
3117 /* Process-wide key to try and catch a double-free in the same thread. */
3118 static uintptr_t tcache_key;
3119
3120 /* The value of tcache_key does not really have to be a cryptographically
3121 secure random number. It only needs to be arbitrary enough so that it does
3122 not collide with values present in applications. If a collision does happen
3123 consistently enough, it could cause a degradation in performance since the
3124 entire list is checked to check if the block indeed has been freed the
3125 second time. The odds of this happening are exceedingly low though, about 1
3126 in 2^wordsize. There is probably a higher chance of the performance
3127 degradation being due to a double free where the first free happened in a
3128 different thread; that's a case this check does not cover. */
3129 static void
3130 tcache_key_initialize (void)
3131 {
3132 if (__getrandom_nocancel (&tcache_key, sizeof(tcache_key), GRND_NONBLOCK)
3133 != sizeof (tcache_key))
3134 {
3135 tcache_key = random_bits ();
3136 #if __WORDSIZE == 64
3137 tcache_key = (tcache_key << 32) | random_bits ();
3138 #endif
3139 }
3140 }
3141
3142 /* Caller must ensure that we know tc_idx is valid and there's room
3143 for more chunks. */
3144 static __always_inline void
3145 tcache_put (mchunkptr chunk, size_t tc_idx)
3146 {
3147 tcache_entry *e = (tcache_entry *) chunk2mem (chunk);
3148
3149 /* Mark this chunk as "in the tcache" so the test in _int_free will
3150 detect a double free. */
3151 e->key = tcache_key;
3152
3153 e->next = PROTECT_PTR (&e->next, tcache->entries[tc_idx]);
3154 tcache->entries[tc_idx] = e;
3155 ++(tcache->counts[tc_idx]);
3156 }
3157
3158 /* Caller must ensure that we know tc_idx is valid and there's
3159 available chunks to remove. Removes chunk from the middle of the
3160 list. */
3161 static __always_inline void *
3162 tcache_get_n (size_t tc_idx, tcache_entry **ep)
3163 {
3164 tcache_entry *e;
3165 if (ep == &(tcache->entries[tc_idx]))
3166 e = *ep;
3167 else
3168 e = REVEAL_PTR (*ep);
3169
3170 if (__glibc_unlikely (!aligned_OK (e)))
3171 malloc_printerr ("malloc(): unaligned tcache chunk detected");
3172
3173 if (ep == &(tcache->entries[tc_idx]))
3174 *ep = REVEAL_PTR (e->next);
3175 else
3176 *ep = PROTECT_PTR (ep, REVEAL_PTR (e->next));
3177
3178 --(tcache->counts[tc_idx]);
3179 e->key = 0;
3180 return (void *) e;
3181 }
3182
3183 /* Like the above, but removes from the head of the list. */
3184 static __always_inline void *
3185 tcache_get (size_t tc_idx)
3186 {
3187 return tcache_get_n (tc_idx, & tcache->entries[tc_idx]);
3188 }
3189
3190 /* Iterates through the tcache linked list. */
3191 static __always_inline tcache_entry *
3192 tcache_next (tcache_entry *e)
3193 {
3194 return (tcache_entry *) REVEAL_PTR (e->next);
3195 }
3196
3197 static void
3198 tcache_thread_shutdown (void)
3199 {
3200 int i;
3201 tcache_perthread_struct *tcache_tmp = tcache;
3202
3203 tcache_shutting_down = true;
3204
3205 if (!tcache)
3206 return;
3207
3208 /* Disable the tcache and prevent it from being reinitialized. */
3209 tcache = NULL;
3210
3211 /* Free all of the entries and the tcache itself back to the arena
3212 heap for coalescing. */
3213 for (i = 0; i < TCACHE_MAX_BINS; ++i)
3214 {
3215 while (tcache_tmp->entries[i])
3216 {
3217 tcache_entry *e = tcache_tmp->entries[i];
3218 if (__glibc_unlikely (!aligned_OK (e)))
3219 malloc_printerr ("tcache_thread_shutdown(): "
3220 "unaligned tcache chunk detected");
3221 tcache_tmp->entries[i] = REVEAL_PTR (e->next);
3222 __libc_free (e);
3223 }
3224 }
3225
3226 __libc_free (tcache_tmp);
3227 }
3228
3229 static void
3230 tcache_init(void)
3231 {
3232 mstate ar_ptr;
3233 void *victim = 0;
3234 const size_t bytes = sizeof (tcache_perthread_struct);
3235
3236 if (tcache_shutting_down)
3237 return;
3238
3239 arena_get (ar_ptr, bytes);
3240 victim = _int_malloc (ar_ptr, bytes);
3241 if (!victim && ar_ptr != NULL)
3242 {
3243 ar_ptr = arena_get_retry (ar_ptr, bytes);
3244 victim = _int_malloc (ar_ptr, bytes);
3245 }
3246
3247
3248 if (ar_ptr != NULL)
3249 __libc_lock_unlock (ar_ptr->mutex);
3250
3251 /* In a low memory situation, we may not be able to allocate memory
3252 - in which case, we just keep trying later. However, we
3253 typically do this very early, so either there is sufficient
3254 memory, or there isn't enough memory to do non-trivial
3255 allocations anyway. */
3256 if (victim)
3257 {
3258 tcache = (tcache_perthread_struct *) victim;
3259 memset (tcache, 0, sizeof (tcache_perthread_struct));
3260 }
3261
3262 }
3263
3264 # define MAYBE_INIT_TCACHE() \
3265 if (__glibc_unlikely (tcache == NULL)) \
3266 tcache_init();
3267
3268 #else /* !USE_TCACHE */
3269 # define MAYBE_INIT_TCACHE()
3270
3271 static void
3272 tcache_thread_shutdown (void)
3273 {
3274 /* Nothing to do if there is no thread cache. */
3275 }
3276
3277 #endif /* !USE_TCACHE */
3278
3279 #if IS_IN (libc)
3280 void *
3281 __libc_malloc (size_t bytes)
3282 {
3283 mstate ar_ptr;
3284 void *victim;
3285
3286 _Static_assert (PTRDIFF_MAX <= SIZE_MAX / 2,
3287 "PTRDIFF_MAX is not more than half of SIZE_MAX");
3288
3289 if (!__malloc_initialized)
3290 ptmalloc_init ();
3291 #if USE_TCACHE
3292 /* int_free also calls request2size, be careful to not pad twice. */
3293 size_t tbytes = checked_request2size (bytes);
3294 if (tbytes == 0)
3295 {
3296 __set_errno (ENOMEM);
3297 return NULL;
3298 }
3299 size_t tc_idx = csize2tidx (tbytes);
3300
3301 MAYBE_INIT_TCACHE ();
3302
3303 DIAG_PUSH_NEEDS_COMMENT;
3304 if (tc_idx < mp_.tcache_bins
3305 && tcache != NULL
3306 && tcache->counts[tc_idx] > 0)
3307 {
3308 victim = tcache_get (tc_idx);
3309 return tag_new_usable (victim);
3310 }
3311 DIAG_POP_NEEDS_COMMENT;
3312 #endif
3313
3314 if (SINGLE_THREAD_P)
3315 {
3316 victim = tag_new_usable (_int_malloc (&main_arena, bytes));
3317 assert (!victim || chunk_is_mmapped (mem2chunk (victim)) ||
3318 &main_arena == arena_for_chunk (mem2chunk (victim)));
3319 return victim;
3320 }
3321
3322 arena_get (ar_ptr, bytes);
3323
3324 victim = _int_malloc (ar_ptr, bytes);
3325 /* Retry with another arena only if we were able to find a usable arena
3326 before. */
3327 if (!victim && ar_ptr != NULL)
3328 {
3329 LIBC_PROBE (memory_malloc_retry, 1, bytes);
3330 ar_ptr = arena_get_retry (ar_ptr, bytes);
3331 victim = _int_malloc (ar_ptr, bytes);
3332 }
3333
3334 if (ar_ptr != NULL)
3335 __libc_lock_unlock (ar_ptr->mutex);
3336
3337 victim = tag_new_usable (victim);
3338
3339 assert (!victim || chunk_is_mmapped (mem2chunk (victim)) ||
3340 ar_ptr == arena_for_chunk (mem2chunk (victim)));
3341 return victim;
3342 }
3343 libc_hidden_def (__libc_malloc)
3344
3345 void
3346 __libc_free (void *mem)
3347 {
3348 mstate ar_ptr;
3349 mchunkptr p; /* chunk corresponding to mem */
3350
3351 if (mem == 0) /* free(0) has no effect */
3352 return;
3353
3354 /* Quickly check that the freed pointer matches the tag for the memory.
3355 This gives a useful double-free detection. */
3356 if (__glibc_unlikely (mtag_enabled))
3357 *(volatile char *)mem;
3358
3359 int err = errno;
3360
3361 p = mem2chunk (mem);
3362
3363 if (chunk_is_mmapped (p)) /* release mmapped memory. */
3364 {
3365 /* See if the dynamic brk/mmap threshold needs adjusting.
3366 Dumped fake mmapped chunks do not affect the threshold. */
3367 if (!mp_.no_dyn_threshold
3368 && chunksize_nomask (p) > mp_.mmap_threshold
3369 && chunksize_nomask (p) <= DEFAULT_MMAP_THRESHOLD_MAX)
3370 {
3371 mp_.mmap_threshold = chunksize (p);
3372 mp_.trim_threshold = 2 * mp_.mmap_threshold;
3373 LIBC_PROBE (memory_mallopt_free_dyn_thresholds, 2,
3374 mp_.mmap_threshold, mp_.trim_threshold);
3375 }
3376 munmap_chunk (p);
3377 }
3378 else
3379 {
3380 MAYBE_INIT_TCACHE ();
3381
3382 /* Mark the chunk as belonging to the library again. */
3383 (void)tag_region (chunk2mem (p), memsize (p));
3384
3385 ar_ptr = arena_for_chunk (p);
3386 _int_free (ar_ptr, p, 0);
3387 }
3388
3389 __set_errno (err);
3390 }
3391 libc_hidden_def (__libc_free)
3392
3393 void *
3394 __libc_realloc (void *oldmem, size_t bytes)
3395 {
3396 mstate ar_ptr;
3397 INTERNAL_SIZE_T nb; /* padded request size */
3398
3399 void *newp; /* chunk to return */
3400
3401 if (!__malloc_initialized)
3402 ptmalloc_init ();
3403
3404 #if REALLOC_ZERO_BYTES_FREES
3405 if (bytes == 0 && oldmem != NULL)
3406 {
3407 __libc_free (oldmem); return 0;
3408 }
3409 #endif
3410
3411 /* realloc of null is supposed to be same as malloc */
3412 if (oldmem == 0)
3413 return __libc_malloc (bytes);
3414
3415 /* Perform a quick check to ensure that the pointer's tag matches the
3416 memory's tag. */
3417 if (__glibc_unlikely (mtag_enabled))
3418 *(volatile char*) oldmem;
3419
3420 /* Return the chunk as is whenever possible, i.e. there's enough usable space
3421 but not so much that we end up fragmenting the block. We use the trim
3422 threshold as the heuristic to decide the latter. */
3423 size_t usable = musable (oldmem);
3424 if (bytes <= usable
3425 && (unsigned long) (usable - bytes) <= mp_.trim_threshold)
3426 return oldmem;
3427
3428 /* chunk corresponding to oldmem */
3429 const mchunkptr oldp = mem2chunk (oldmem);
3430 /* its size */
3431 const INTERNAL_SIZE_T oldsize = chunksize (oldp);
3432
3433 if (chunk_is_mmapped (oldp))
3434 ar_ptr = NULL;
3435 else
3436 {
3437 MAYBE_INIT_TCACHE ();
3438 ar_ptr = arena_for_chunk (oldp);
3439 }
3440
3441 /* Little security check which won't hurt performance: the allocator
3442 never wrapps around at the end of the address space. Therefore
3443 we can exclude some size values which might appear here by
3444 accident or by "design" from some intruder. */
3445 if ((__builtin_expect ((uintptr_t) oldp > (uintptr_t) -oldsize, 0)
3446 || __builtin_expect (misaligned_chunk (oldp), 0)))
3447 malloc_printerr ("realloc(): invalid pointer");
3448
3449 nb = checked_request2size (bytes);
3450 if (nb == 0)
3451 {
3452 __set_errno (ENOMEM);
3453 return NULL;
3454 }
3455
3456 if (chunk_is_mmapped (oldp))
3457 {
3458 void *newmem;
3459
3460 #if HAVE_MREMAP
3461 newp = mremap_chunk (oldp, nb);
3462 if (newp)
3463 {
3464 void *newmem = chunk2mem_tag (newp);
3465 /* Give the new block a different tag. This helps to ensure
3466 that stale handles to the previous mapping are not
3467 reused. There's a performance hit for both us and the
3468 caller for doing this, so we might want to
3469 reconsider. */
3470 return tag_new_usable (newmem);
3471 }
3472 #endif
3473 /* Note the extra SIZE_SZ overhead. */
3474 if (oldsize - SIZE_SZ >= nb)
3475 return oldmem; /* do nothing */
3476
3477 /* Must alloc, copy, free. */
3478 newmem = __libc_malloc (bytes);
3479 if (newmem == 0)
3480 return 0; /* propagate failure */
3481
3482 memcpy (newmem, oldmem, oldsize - CHUNK_HDR_SZ);
3483 munmap_chunk (oldp);
3484 return newmem;
3485 }
3486
3487 if (SINGLE_THREAD_P)
3488 {
3489 newp = _int_realloc (ar_ptr, oldp, oldsize, nb);
3490 assert (!newp || chunk_is_mmapped (mem2chunk (newp)) ||
3491 ar_ptr == arena_for_chunk (mem2chunk (newp)));
3492
3493 return newp;
3494 }
3495
3496 __libc_lock_lock (ar_ptr->mutex);
3497
3498 newp = _int_realloc (ar_ptr, oldp, oldsize, nb);
3499
3500 __libc_lock_unlock (ar_ptr->mutex);
3501 assert (!newp || chunk_is_mmapped (mem2chunk (newp)) ||
3502 ar_ptr == arena_for_chunk (mem2chunk (newp)));
3503
3504 if (newp == NULL)
3505 {
3506 /* Try harder to allocate memory in other arenas. */
3507 LIBC_PROBE (memory_realloc_retry, 2, bytes, oldmem);
3508 newp = __libc_malloc (bytes);
3509 if (newp != NULL)
3510 {
3511 size_t sz = memsize (oldp);
3512 memcpy (newp, oldmem, sz);
3513 (void) tag_region (chunk2mem (oldp), sz);
3514 _int_free (ar_ptr, oldp, 0);
3515 }
3516 }
3517
3518 return newp;
3519 }
3520 libc_hidden_def (__libc_realloc)
3521
3522 void *
3523 __libc_memalign (size_t alignment, size_t bytes)
3524 {
3525 if (!__malloc_initialized)
3526 ptmalloc_init ();
3527
3528 void *address = RETURN_ADDRESS (0);
3529 return _mid_memalign (alignment, bytes, address);
3530 }
3531
3532 static void *
3533 _mid_memalign (size_t alignment, size_t bytes, void *address)
3534 {
3535 mstate ar_ptr;
3536 void *p;
3537
3538 /* If we need less alignment than we give anyway, just relay to malloc. */
3539 if (alignment <= MALLOC_ALIGNMENT)
3540 return __libc_malloc (bytes);
3541
3542 /* Otherwise, ensure that it is at least a minimum chunk size */
3543 if (alignment < MINSIZE)
3544 alignment = MINSIZE;
3545
3546 /* If the alignment is greater than SIZE_MAX / 2 + 1 it cannot be a
3547 power of 2 and will cause overflow in the check below. */
3548 if (alignment > SIZE_MAX / 2 + 1)
3549 {
3550 __set_errno (EINVAL);
3551 return 0;
3552 }
3553
3554
3555 /* Make sure alignment is power of 2. */
3556 if (!powerof2 (alignment))
3557 {
3558 size_t a = MALLOC_ALIGNMENT * 2;
3559 while (a < alignment)
3560 a <<= 1;
3561 alignment = a;
3562 }
3563
3564 #if USE_TCACHE
3565 {
3566 size_t tbytes;
3567 tbytes = checked_request2size (bytes);
3568 if (tbytes == 0)
3569 {
3570 __set_errno (ENOMEM);
3571 return NULL;
3572 }
3573 size_t tc_idx = csize2tidx (tbytes);
3574
3575 if (tc_idx < mp_.tcache_bins
3576 && tcache != NULL
3577 && tcache->counts[tc_idx] > 0)
3578 {
3579 /* The tcache itself isn't encoded, but the chain is. */
3580 tcache_entry **tep = & tcache->entries[tc_idx];
3581 tcache_entry *te = *tep;
3582 while (te != NULL && !PTR_IS_ALIGNED (te, alignment))
3583 {
3584 tep = & (te->next);
3585 te = tcache_next (te);
3586 }
3587 if (te != NULL)
3588 {
3589 void *victim = tcache_get_n (tc_idx, tep);
3590 return tag_new_usable (victim);
3591 }
3592 }
3593 }
3594 #endif
3595
3596 if (SINGLE_THREAD_P)
3597 {
3598 p = _int_memalign (&main_arena, alignment, bytes);
3599 assert (!p || chunk_is_mmapped (mem2chunk (p)) ||
3600 &main_arena == arena_for_chunk (mem2chunk (p)));
3601 return tag_new_usable (p);
3602 }
3603
3604 arena_get (ar_ptr, bytes + alignment + MINSIZE);
3605
3606 p = _int_memalign (ar_ptr, alignment, bytes);
3607 if (!p && ar_ptr != NULL)
3608 {
3609 LIBC_PROBE (memory_memalign_retry, 2, bytes, alignment);
3610 ar_ptr = arena_get_retry (ar_ptr, bytes);
3611 p = _int_memalign (ar_ptr, alignment, bytes);
3612 }
3613
3614 if (ar_ptr != NULL)
3615 __libc_lock_unlock (ar_ptr->mutex);
3616
3617 assert (!p || chunk_is_mmapped (mem2chunk (p)) ||
3618 ar_ptr == arena_for_chunk (mem2chunk (p)));
3619 return tag_new_usable (p);
3620 }
3621 /* For ISO C11. */
3622 weak_alias (__libc_memalign, aligned_alloc)
3623 libc_hidden_def (__libc_memalign)
3624
3625 void *
3626 __libc_valloc (size_t bytes)
3627 {
3628 if (!__malloc_initialized)
3629 ptmalloc_init ();
3630
3631 void *address = RETURN_ADDRESS (0);
3632 size_t pagesize = GLRO (dl_pagesize);
3633 return _mid_memalign (pagesize, bytes, address);
3634 }
3635
3636 void *
3637 __libc_pvalloc (size_t bytes)
3638 {
3639 if (!__malloc_initialized)
3640 ptmalloc_init ();
3641
3642 void *address = RETURN_ADDRESS (0);
3643 size_t pagesize = GLRO (dl_pagesize);
3644 size_t rounded_bytes;
3645 /* ALIGN_UP with overflow check. */
3646 if (__glibc_unlikely (__builtin_add_overflow (bytes,
3647 pagesize - 1,
3648 &rounded_bytes)))
3649 {
3650 __set_errno (ENOMEM);
3651 return 0;
3652 }
3653 rounded_bytes = rounded_bytes & -(pagesize - 1);
3654
3655 return _mid_memalign (pagesize, rounded_bytes, address);
3656 }
3657
3658 void *
3659 __libc_calloc (size_t n, size_t elem_size)
3660 {
3661 mstate av;
3662 mchunkptr oldtop;
3663 INTERNAL_SIZE_T sz, oldtopsize;
3664 void *mem;
3665 unsigned long clearsize;
3666 unsigned long nclears;
3667 INTERNAL_SIZE_T *d;
3668 ptrdiff_t bytes;
3669
3670 if (__glibc_unlikely (__builtin_mul_overflow (n, elem_size, &bytes)))
3671 {
3672 __set_errno (ENOMEM);
3673 return NULL;
3674 }
3675
3676 sz = bytes;
3677
3678 if (!__malloc_initialized)
3679 ptmalloc_init ();
3680
3681 MAYBE_INIT_TCACHE ();
3682
3683 if (SINGLE_THREAD_P)
3684 av = &main_arena;
3685 else
3686 arena_get (av, sz);
3687
3688 if (av)
3689 {
3690 /* Check if we hand out the top chunk, in which case there may be no
3691 need to clear. */
3692 #if MORECORE_CLEARS
3693 oldtop = top (av);
3694 oldtopsize = chunksize (top (av));
3695 # if MORECORE_CLEARS < 2
3696 /* Only newly allocated memory is guaranteed to be cleared. */
3697 if (av == &main_arena &&
3698 oldtopsize < mp_.sbrk_base + av->max_system_mem - (char *) oldtop)
3699 oldtopsize = (mp_.sbrk_base + av->max_system_mem - (char *) oldtop);
3700 # endif
3701 if (av != &main_arena)
3702 {
3703 heap_info *heap = heap_for_ptr (oldtop);
3704 if (oldtopsize < (char *) heap + heap->mprotect_size - (char *) oldtop)
3705 oldtopsize = (char *) heap + heap->mprotect_size - (char *) oldtop;
3706 }
3707 #endif
3708 }
3709 else
3710 {
3711 /* No usable arenas. */
3712 oldtop = 0;
3713 oldtopsize = 0;
3714 }
3715 mem = _int_malloc (av, sz);
3716
3717 assert (!mem || chunk_is_mmapped (mem2chunk (mem)) ||
3718 av == arena_for_chunk (mem2chunk (mem)));
3719
3720 if (!SINGLE_THREAD_P)
3721 {
3722 if (mem == 0 && av != NULL)
3723 {
3724 LIBC_PROBE (memory_calloc_retry, 1, sz);
3725 av = arena_get_retry (av, sz);
3726 mem = _int_malloc (av, sz);
3727 }
3728
3729 if (av != NULL)
3730 __libc_lock_unlock (av->mutex);
3731 }
3732
3733 /* Allocation failed even after a retry. */
3734 if (mem == 0)
3735 return 0;
3736
3737 mchunkptr p = mem2chunk (mem);
3738
3739 /* If we are using memory tagging, then we need to set the tags
3740 regardless of MORECORE_CLEARS, so we zero the whole block while
3741 doing so. */
3742 if (__glibc_unlikely (mtag_enabled))
3743 return tag_new_zero_region (mem, memsize (p));
3744
3745 INTERNAL_SIZE_T csz = chunksize (p);
3746
3747 /* Two optional cases in which clearing not necessary */
3748 if (chunk_is_mmapped (p))
3749 {
3750 if (__builtin_expect (perturb_byte, 0))
3751 return memset (mem, 0, sz);
3752
3753 return mem;
3754 }
3755
3756 #if MORECORE_CLEARS
3757 if (perturb_byte == 0 && (p == oldtop && csz > oldtopsize))
3758 {
3759 /* clear only the bytes from non-freshly-sbrked memory */
3760 csz = oldtopsize;
3761 }
3762 #endif
3763
3764 /* Unroll clear of <= 36 bytes (72 if 8byte sizes). We know that
3765 contents have an odd number of INTERNAL_SIZE_T-sized words;
3766 minimally 3. */
3767 d = (INTERNAL_SIZE_T *) mem;
3768 clearsize = csz - SIZE_SZ;
3769 nclears = clearsize / sizeof (INTERNAL_SIZE_T);
3770 assert (nclears >= 3);
3771
3772 if (nclears > 9)
3773 return memset (d, 0, clearsize);
3774
3775 else
3776 {
3777 *(d + 0) = 0;
3778 *(d + 1) = 0;
3779 *(d + 2) = 0;
3780 if (nclears > 4)
3781 {
3782 *(d + 3) = 0;
3783 *(d + 4) = 0;
3784 if (nclears > 6)
3785 {
3786 *(d + 5) = 0;
3787 *(d + 6) = 0;
3788 if (nclears > 8)
3789 {
3790 *(d + 7) = 0;
3791 *(d + 8) = 0;
3792 }
3793 }
3794 }
3795 }
3796
3797 return mem;
3798 }
3799 #endif /* IS_IN (libc) */
3800
3801 /*
3802 ------------------------------ malloc ------------------------------
3803 */
3804
3805 static void *
3806 _int_malloc (mstate av, size_t bytes)
3807 {
3808 INTERNAL_SIZE_T nb; /* normalized request size */
3809 unsigned int idx; /* associated bin index */
3810 mbinptr bin; /* associated bin */
3811
3812 mchunkptr victim; /* inspected/selected chunk */
3813 INTERNAL_SIZE_T size; /* its size */
3814 int victim_index; /* its bin index */
3815
3816 mchunkptr remainder; /* remainder from a split */
3817 unsigned long remainder_size; /* its size */
3818
3819 unsigned int block; /* bit map traverser */
3820 unsigned int bit; /* bit map traverser */
3821 unsigned int map; /* current word of binmap */
3822
3823 mchunkptr fwd; /* misc temp for linking */
3824 mchunkptr bck; /* misc temp for linking */
3825
3826 #if USE_TCACHE
3827 size_t tcache_unsorted_count; /* count of unsorted chunks processed */
3828 #endif
3829
3830 /*
3831 Convert request size to internal form by adding SIZE_SZ bytes
3832 overhead plus possibly more to obtain necessary alignment and/or
3833 to obtain a size of at least MINSIZE, the smallest allocatable
3834 size. Also, checked_request2size returns false for request sizes
3835 that are so large that they wrap around zero when padded and
3836 aligned.
3837 */
3838
3839 nb = checked_request2size (bytes);
3840 if (nb == 0)
3841 {
3842 __set_errno (ENOMEM);
3843 return NULL;
3844 }
3845
3846 /* There are no usable arenas. Fall back to sysmalloc to get a chunk from
3847 mmap. */
3848 if (__glibc_unlikely (av == NULL))
3849 {
3850 void *p = sysmalloc (nb, av);
3851 if (p != NULL)
3852 alloc_perturb (p, bytes);
3853 return p;
3854 }
3855
3856 /*
3857 If the size qualifies as a fastbin, first check corresponding bin.
3858 This code is safe to execute even if av is not yet initialized, so we
3859 can try it without checking, which saves some time on this fast path.
3860 */
3861
3862 #define REMOVE_FB(fb, victim, pp) \
3863 do \
3864 { \
3865 victim = pp; \
3866 if (victim == NULL) \
3867 break; \
3868 pp = REVEAL_PTR (victim->fd); \
3869 if (__glibc_unlikely (pp != NULL && misaligned_chunk (pp))) \
3870 malloc_printerr ("malloc(): unaligned fastbin chunk detected"); \
3871 } \
3872 while ((pp = catomic_compare_and_exchange_val_acq (fb, pp, victim)) \
3873 != victim); \
3874
3875 if ((unsigned long) (nb) <= (unsigned long) (get_max_fast ()))
3876 {
3877 idx = fastbin_index (nb);
3878 mfastbinptr *fb = &fastbin (av, idx);
3879 mchunkptr pp;
3880 victim = *fb;
3881
3882 if (victim != NULL)
3883 {
3884 if (__glibc_unlikely (misaligned_chunk (victim)))
3885 malloc_printerr ("malloc(): unaligned fastbin chunk detected 2");
3886
3887 if (SINGLE_THREAD_P)
3888 *fb = REVEAL_PTR (victim->fd);
3889 else
3890 REMOVE_FB (fb, pp, victim);
3891 if (__glibc_likely (victim != NULL))
3892 {
3893 size_t victim_idx = fastbin_index (chunksize (victim));
3894 if (__builtin_expect (victim_idx != idx, 0))
3895 malloc_printerr ("malloc(): memory corruption (fast)");
3896 check_remalloced_chunk (av, victim, nb);
3897 #if USE_TCACHE
3898 /* While we're here, if we see other chunks of the same size,
3899 stash them in the tcache. */
3900 size_t tc_idx = csize2tidx (nb);
3901 if (tcache != NULL && tc_idx < mp_.tcache_bins)
3902 {
3903 mchunkptr tc_victim;
3904
3905 /* While bin not empty and tcache not full, copy chunks. */
3906 while (tcache->counts[tc_idx] < mp_.tcache_count
3907 && (tc_victim = *fb) != NULL)
3908 {
3909 if (__glibc_unlikely (misaligned_chunk (tc_victim)))
3910 malloc_printerr ("malloc(): unaligned fastbin chunk detected 3");
3911 if (SINGLE_THREAD_P)
3912 *fb = REVEAL_PTR (tc_victim->fd);
3913 else
3914 {
3915 REMOVE_FB (fb, pp, tc_victim);
3916 if (__glibc_unlikely (tc_victim == NULL))
3917 break;
3918 }
3919 tcache_put (tc_victim, tc_idx);
3920 }
3921 }
3922 #endif
3923 void *p = chunk2mem (victim);
3924 alloc_perturb (p, bytes);
3925 return p;
3926 }
3927 }
3928 }
3929
3930 /*
3931 If a small request, check regular bin. Since these "smallbins"
3932 hold one size each, no searching within bins is necessary.
3933 (For a large request, we need to wait until unsorted chunks are
3934 processed to find best fit. But for small ones, fits are exact
3935 anyway, so we can check now, which is faster.)
3936 */
3937
3938 if (in_smallbin_range (nb))
3939 {
3940 idx = smallbin_index (nb);
3941 bin = bin_at (av, idx);
3942
3943 if ((victim = last (bin)) != bin)
3944 {
3945 bck = victim->bk;
3946 if (__glibc_unlikely (bck->fd != victim))
3947 malloc_printerr ("malloc(): smallbin double linked list corrupted");
3948 set_inuse_bit_at_offset (victim, nb);
3949 bin->bk = bck;
3950 bck->fd = bin;
3951
3952 if (av != &main_arena)
3953 set_non_main_arena (victim);
3954 check_malloced_chunk (av, victim, nb);
3955 #if USE_TCACHE
3956 /* While we're here, if we see other chunks of the same size,
3957 stash them in the tcache. */
3958 size_t tc_idx = csize2tidx (nb);
3959 if (tcache != NULL && tc_idx < mp_.tcache_bins)
3960 {
3961 mchunkptr tc_victim;
3962
3963 /* While bin not empty and tcache not full, copy chunks over. */
3964 while (tcache->counts[tc_idx] < mp_.tcache_count
3965 && (tc_victim = last (bin)) != bin)
3966 {
3967 if (tc_victim != 0)
3968 {
3969 bck = tc_victim->bk;
3970 set_inuse_bit_at_offset (tc_victim, nb);
3971 if (av != &main_arena)
3972 set_non_main_arena (tc_victim);
3973 bin->bk = bck;
3974 bck->fd = bin;
3975
3976 tcache_put (tc_victim, tc_idx);
3977 }
3978 }
3979 }
3980 #endif
3981 void *p = chunk2mem (victim);
3982 alloc_perturb (p, bytes);
3983 return p;
3984 }
3985 }
3986
3987 /*
3988 If this is a large request, consolidate fastbins before continuing.
3989 While it might look excessive to kill all fastbins before
3990 even seeing if there is space available, this avoids
3991 fragmentation problems normally associated with fastbins.
3992 Also, in practice, programs tend to have runs of either small or
3993 large requests, but less often mixtures, so consolidation is not
3994 invoked all that often in most programs. And the programs that
3995 it is called frequently in otherwise tend to fragment.
3996 */
3997
3998 else
3999 {
4000 idx = largebin_index (nb);
4001 if (atomic_load_relaxed (&av->have_fastchunks))
4002 malloc_consolidate (av);
4003 }
4004
4005 /*
4006 Process recently freed or remaindered chunks, taking one only if
4007 it is exact fit, or, if this a small request, the chunk is remainder from
4008 the most recent non-exact fit. Place other traversed chunks in
4009 bins. Note that this step is the only place in any routine where
4010 chunks are placed in bins.
4011
4012 The outer loop here is needed because we might not realize until
4013 near the end of malloc that we should have consolidated, so must
4014 do so and retry. This happens at most once, and only when we would
4015 otherwise need to expand memory to service a "small" request.
4016 */
4017
4018 #if USE_TCACHE
4019 INTERNAL_SIZE_T tcache_nb = 0;
4020 size_t tc_idx = csize2tidx (nb);
4021 if (tcache != NULL && tc_idx < mp_.tcache_bins)
4022 tcache_nb = nb;
4023 int return_cached = 0;
4024
4025 tcache_unsorted_count = 0;
4026 #endif
4027
4028 for (;; )
4029 {
4030 int iters = 0;
4031 while ((victim = unsorted_chunks (av)->bk) != unsorted_chunks (av))
4032 {
4033 bck = victim->bk;
4034 size = chunksize (victim);
4035 mchunkptr next = chunk_at_offset (victim, size);
4036
4037 if (__glibc_unlikely (size <= CHUNK_HDR_SZ)
4038 || __glibc_unlikely (size > av->system_mem))
4039 malloc_printerr ("malloc(): invalid size (unsorted)");
4040 if (__glibc_unlikely (chunksize_nomask (next) < CHUNK_HDR_SZ)
4041 || __glibc_unlikely (chunksize_nomask (next) > av->system_mem))
4042 malloc_printerr ("malloc(): invalid next size (unsorted)");
4043 if (__glibc_unlikely ((prev_size (next) & ~(SIZE_BITS)) != size))
4044 malloc_printerr ("malloc(): mismatching next->prev_size (unsorted)");
4045 if (__glibc_unlikely (bck->fd != victim)
4046 || __glibc_unlikely (victim->fd != unsorted_chunks (av)))
4047 malloc_printerr ("malloc(): unsorted double linked list corrupted");
4048 if (__glibc_unlikely (prev_inuse (next)))
4049 malloc_printerr ("malloc(): invalid next->prev_inuse (unsorted)");
4050
4051 /*
4052 If a small request, try to use last remainder if it is the
4053 only chunk in unsorted bin. This helps promote locality for
4054 runs of consecutive small requests. This is the only
4055 exception to best-fit, and applies only when there is
4056 no exact fit for a small chunk.
4057 */
4058
4059 if (in_smallbin_range (nb) &&
4060 bck == unsorted_chunks (av) &&
4061 victim == av->last_remainder &&
4062 (unsigned long) (size) > (unsigned long) (nb + MINSIZE))
4063 {
4064 /* split and reattach remainder */
4065 remainder_size = size - nb;
4066 remainder = chunk_at_offset (victim, nb);
4067 unsorted_chunks (av)->bk = unsorted_chunks (av)->fd = remainder;
4068 av->last_remainder = remainder;
4069 remainder->bk = remainder->fd = unsorted_chunks (av);
4070 if (!in_smallbin_range (remainder_size))
4071 {
4072 remainder->fd_nextsize = NULL;
4073 remainder->bk_nextsize = NULL;
4074 }
4075
4076 set_head (victim, nb | PREV_INUSE |
4077 (av != &main_arena ? NON_MAIN_ARENA : 0));
4078 set_head (remainder, remainder_size | PREV_INUSE);
4079 set_foot (remainder, remainder_size);
4080
4081 check_malloced_chunk (av, victim, nb);
4082 void *p = chunk2mem (victim);
4083 alloc_perturb (p, bytes);
4084 return p;
4085 }
4086
4087 /* remove from unsorted list */
4088 unsorted_chunks (av)->bk = bck;
4089 bck->fd = unsorted_chunks (av);
4090
4091 /* Take now instead of binning if exact fit */
4092
4093 if (size == nb)
4094 {
4095 set_inuse_bit_at_offset (victim, size);
4096 if (av != &main_arena)
4097 set_non_main_arena (victim);
4098 #if USE_TCACHE
4099 /* Fill cache first, return to user only if cache fills.
4100 We may return one of these chunks later. */
4101 if (tcache_nb > 0
4102 && tcache->counts[tc_idx] < mp_.tcache_count)
4103 {
4104 tcache_put (victim, tc_idx);
4105 return_cached = 1;
4106 continue;
4107 }
4108 else
4109 {
4110 #endif
4111 check_malloced_chunk (av, victim, nb);
4112 void *p = chunk2mem (victim);
4113 alloc_perturb (p, bytes);
4114 return p;
4115 #if USE_TCACHE
4116 }
4117 #endif
4118 }
4119
4120 /* place chunk in bin */
4121
4122 if (in_smallbin_range (size))
4123 {
4124 victim_index = smallbin_index (size);
4125 bck = bin_at (av, victim_index);
4126 fwd = bck->fd;
4127 }
4128 else
4129 {
4130 victim_index = largebin_index (size);
4131 bck = bin_at (av, victim_index);
4132 fwd = bck->fd;
4133
4134 /* maintain large bins in sorted order */
4135 if (fwd != bck)
4136 {
4137 /* Or with inuse bit to speed comparisons */
4138 size |= PREV_INUSE;
4139 /* if smaller than smallest, bypass loop below */
4140 assert (chunk_main_arena (bck->bk));
4141 if ((unsigned long) (size)
4142 < (unsigned long) chunksize_nomask (bck->bk))
4143 {
4144 fwd = bck;
4145 bck = bck->bk;
4146
4147 victim->fd_nextsize = fwd->fd;
4148 victim->bk_nextsize = fwd->fd->bk_nextsize;
4149 fwd->fd->bk_nextsize = victim->bk_nextsize->fd_nextsize = victim;
4150 }
4151 else
4152 {
4153 assert (chunk_main_arena (fwd));
4154 while ((unsigned long) size < chunksize_nomask (fwd))
4155 {
4156 fwd = fwd->fd_nextsize;
4157 assert (chunk_main_arena (fwd));
4158 }
4159
4160 if ((unsigned long) size
4161 == (unsigned long) chunksize_nomask (fwd))
4162 /* Always insert in the second position. */
4163 fwd = fwd->fd;
4164 else
4165 {
4166 victim->fd_nextsize = fwd;
4167 victim->bk_nextsize = fwd->bk_nextsize;
4168 if (__glibc_unlikely (fwd->bk_nextsize->fd_nextsize != fwd))
4169 malloc_printerr ("malloc(): largebin double linked list corrupted (nextsize)");
4170 fwd->bk_nextsize = victim;
4171 victim->bk_nextsize->fd_nextsize = victim;
4172 }
4173 bck = fwd->bk;
4174 if (bck->fd != fwd)
4175 malloc_printerr ("malloc(): largebin double linked list corrupted (bk)");
4176 }
4177 }
4178 else
4179 victim->fd_nextsize = victim->bk_nextsize = victim;
4180 }
4181
4182 mark_bin (av, victim_index);
4183 victim->bk = bck;
4184 victim->fd = fwd;
4185 fwd->bk = victim;
4186 bck->fd = victim;
4187
4188 #if USE_TCACHE
4189 /* If we've processed as many chunks as we're allowed while
4190 filling the cache, return one of the cached ones. */
4191 ++tcache_unsorted_count;
4192 if (return_cached
4193 && mp_.tcache_unsorted_limit > 0
4194 && tcache_unsorted_count > mp_.tcache_unsorted_limit)
4195 {
4196 return tcache_get (tc_idx);
4197 }
4198 #endif
4199
4200 #define MAX_ITERS 10000
4201 if (++iters >= MAX_ITERS)
4202 break;
4203 }
4204
4205 #if USE_TCACHE
4206 /* If all the small chunks we found ended up cached, return one now. */
4207 if (return_cached)
4208 {
4209 return tcache_get (tc_idx);
4210 }
4211 #endif
4212
4213 /*
4214 If a large request, scan through the chunks of current bin in
4215 sorted order to find smallest that fits. Use the skip list for this.
4216 */
4217
4218 if (!in_smallbin_range (nb))
4219 {
4220 bin = bin_at (av, idx);
4221
4222 /* skip scan if empty or largest chunk is too small */
4223 if ((victim = first (bin)) != bin
4224 && (unsigned long) chunksize_nomask (victim)
4225 >= (unsigned long) (nb))
4226 {
4227 victim = victim->bk_nextsize;
4228 while (((unsigned long) (size = chunksize (victim)) <
4229 (unsigned long) (nb)))
4230 victim = victim->bk_nextsize;
4231
4232 /* Avoid removing the first entry for a size so that the skip
4233 list does not have to be rerouted. */
4234 if (victim != last (bin)
4235 && chunksize_nomask (victim)
4236 == chunksize_nomask (victim->fd))
4237 victim = victim->fd;
4238
4239 remainder_size = size - nb;
4240 unlink_chunk (av, victim);
4241
4242 /* Exhaust */
4243 if (remainder_size < MINSIZE)
4244 {
4245 set_inuse_bit_at_offset (victim, size);
4246 if (av != &main_arena)
4247 set_non_main_arena (victim);
4248 }
4249 /* Split */
4250 else
4251 {
4252 remainder = chunk_at_offset (victim, nb);
4253 /* We cannot assume the unsorted list is empty and therefore
4254 have to perform a complete insert here. */
4255 bck = unsorted_chunks (av);
4256 fwd = bck->fd;
4257 if (__glibc_unlikely (fwd->bk != bck))
4258 malloc_printerr ("malloc(): corrupted unsorted chunks");
4259 remainder->bk = bck;
4260 remainder->fd = fwd;
4261 bck->fd = remainder;
4262 fwd->bk = remainder;
4263 if (!in_smallbin_range (remainder_size))
4264 {
4265 remainder->fd_nextsize = NULL;
4266 remainder->bk_nextsize = NULL;
4267 }
4268 set_head (victim, nb | PREV_INUSE |
4269 (av != &main_arena ? NON_MAIN_ARENA : 0));
4270 set_head (remainder, remainder_size | PREV_INUSE);
4271 set_foot (remainder, remainder_size);
4272 }
4273 check_malloced_chunk (av, victim, nb);
4274 void *p = chunk2mem (victim);
4275 alloc_perturb (p, bytes);
4276 return p;
4277 }
4278 }
4279
4280 /*
4281 Search for a chunk by scanning bins, starting with next largest
4282 bin. This search is strictly by best-fit; i.e., the smallest
4283 (with ties going to approximately the least recently used) chunk
4284 that fits is selected.
4285
4286 The bitmap avoids needing to check that most blocks are nonempty.
4287 The particular case of skipping all bins during warm-up phases
4288 when no chunks have been returned yet is faster than it might look.
4289 */
4290
4291 ++idx;
4292 bin = bin_at (av, idx);
4293 block = idx2block (idx);
4294 map = av->binmap[block];
4295 bit = idx2bit (idx);
4296
4297 for (;; )
4298 {
4299 /* Skip rest of block if there are no more set bits in this block. */
4300 if (bit > map || bit == 0)
4301 {
4302 do
4303 {
4304 if (++block >= BINMAPSIZE) /* out of bins */
4305 goto use_top;
4306 }
4307 while ((map = av->binmap[block]) == 0);
4308
4309 bin = bin_at (av, (block << BINMAPSHIFT));
4310 bit = 1;
4311 }
4312
4313 /* Advance to bin with set bit. There must be one. */
4314 while ((bit & map) == 0)
4315 {
4316 bin = next_bin (bin);
4317 bit <<= 1;
4318 assert (bit != 0);
4319 }
4320
4321 /* Inspect the bin. It is likely to be non-empty */
4322 victim = last (bin);
4323
4324 /* If a false alarm (empty bin), clear the bit. */
4325 if (victim == bin)
4326 {
4327 av->binmap[block] = map &= ~bit; /* Write through */
4328 bin = next_bin (bin);
4329 bit <<= 1;
4330 }
4331
4332 else
4333 {
4334 size = chunksize (victim);
4335
4336 /* We know the first chunk in this bin is big enough to use. */
4337 assert ((unsigned long) (size) >= (unsigned long) (nb));
4338
4339 remainder_size = size - nb;
4340
4341 /* unlink */
4342 unlink_chunk (av, victim);
4343
4344 /* Exhaust */
4345 if (remainder_size < MINSIZE)
4346 {
4347 set_inuse_bit_at_offset (victim, size);
4348 if (av != &main_arena)
4349 set_non_main_arena (victim);
4350 }
4351
4352 /* Split */
4353 else
4354 {
4355 remainder = chunk_at_offset (victim, nb);
4356
4357 /* We cannot assume the unsorted list is empty and therefore
4358 have to perform a complete insert here. */
4359 bck = unsorted_chunks (av);
4360 fwd = bck->fd;
4361 if (__glibc_unlikely (fwd->bk != bck))
4362 malloc_printerr ("malloc(): corrupted unsorted chunks 2");
4363 remainder->bk = bck;
4364 remainder->fd = fwd;
4365 bck->fd = remainder;
4366 fwd->bk = remainder;
4367
4368 /* advertise as last remainder */
4369 if (in_smallbin_range (nb))
4370 av->last_remainder = remainder;
4371 if (!in_smallbin_range (remainder_size))
4372 {
4373 remainder->fd_nextsize = NULL;
4374 remainder->bk_nextsize = NULL;
4375 }
4376 set_head (victim, nb | PREV_INUSE |
4377 (av != &main_arena ? NON_MAIN_ARENA : 0));
4378 set_head (remainder, remainder_size | PREV_INUSE);
4379 set_foot (remainder, remainder_size);
4380 }
4381 check_malloced_chunk (av, victim, nb);
4382 void *p = chunk2mem (victim);
4383 alloc_perturb (p, bytes);
4384 return p;
4385 }
4386 }
4387
4388 use_top:
4389 /*
4390 If large enough, split off the chunk bordering the end of memory
4391 (held in av->top). Note that this is in accord with the best-fit
4392 search rule. In effect, av->top is treated as larger (and thus
4393 less well fitting) than any other available chunk since it can
4394 be extended to be as large as necessary (up to system
4395 limitations).
4396
4397 We require that av->top always exists (i.e., has size >=
4398 MINSIZE) after initialization, so if it would otherwise be
4399 exhausted by current request, it is replenished. (The main
4400 reason for ensuring it exists is that we may need MINSIZE space
4401 to put in fenceposts in sysmalloc.)
4402 */
4403
4404 victim = av->top;
4405 size = chunksize (victim);
4406
4407 if (__glibc_unlikely (size > av->system_mem))
4408 malloc_printerr ("malloc(): corrupted top size");
4409
4410 if ((unsigned long) (size) >= (unsigned long) (nb + MINSIZE))
4411 {
4412 remainder_size = size - nb;
4413 remainder = chunk_at_offset (victim, nb);
4414 av->top = remainder;
4415 set_head (victim, nb | PREV_INUSE |
4416 (av != &main_arena ? NON_MAIN_ARENA : 0));
4417 set_head (remainder, remainder_size | PREV_INUSE);
4418
4419 check_malloced_chunk (av, victim, nb);
4420 void *p = chunk2mem (victim);
4421 alloc_perturb (p, bytes);
4422 return p;
4423 }
4424
4425 /* When we are using atomic ops to free fast chunks we can get
4426 here for all block sizes. */
4427 else if (atomic_load_relaxed (&av->have_fastchunks))
4428 {
4429 malloc_consolidate (av);
4430 /* restore original bin index */
4431 if (in_smallbin_range (nb))
4432 idx = smallbin_index (nb);
4433 else
4434 idx = largebin_index (nb);
4435 }
4436
4437 /*
4438 Otherwise, relay to handle system-dependent cases
4439 */
4440 else
4441 {
4442 void *p = sysmalloc (nb, av);
4443 if (p != NULL)
4444 alloc_perturb (p, bytes);
4445 return p;
4446 }
4447 }
4448 }
4449
4450 /*
4451 ------------------------------ free ------------------------------
4452 */
4453
4454 static void
4455 _int_free (mstate av, mchunkptr p, int have_lock)
4456 {
4457 INTERNAL_SIZE_T size; /* its size */
4458 mfastbinptr *fb; /* associated fastbin */
4459 mchunkptr nextchunk; /* next contiguous chunk */
4460 INTERNAL_SIZE_T nextsize; /* its size */
4461 int nextinuse; /* true if nextchunk is used */
4462 INTERNAL_SIZE_T prevsize; /* size of previous contiguous chunk */
4463 mchunkptr bck; /* misc temp for linking */
4464 mchunkptr fwd; /* misc temp for linking */
4465
4466 size = chunksize (p);
4467
4468 /* Little security check which won't hurt performance: the
4469 allocator never wrapps around at the end of the address space.
4470 Therefore we can exclude some size values which might appear
4471 here by accident or by "design" from some intruder. */
4472 if (__builtin_expect ((uintptr_t) p > (uintptr_t) -size, 0)
4473 || __builtin_expect (misaligned_chunk (p), 0))
4474 malloc_printerr ("free(): invalid pointer");
4475 /* We know that each chunk is at least MINSIZE bytes in size or a
4476 multiple of MALLOC_ALIGNMENT. */
4477 if (__glibc_unlikely (size < MINSIZE || !aligned_OK (size)))
4478 malloc_printerr ("free(): invalid size");
4479
4480 check_inuse_chunk(av, p);
4481
4482 #if USE_TCACHE
4483 {
4484 size_t tc_idx = csize2tidx (size);
4485 if (tcache != NULL && tc_idx < mp_.tcache_bins)
4486 {
4487 /* Check to see if it's already in the tcache. */
4488 tcache_entry *e = (tcache_entry *) chunk2mem (p);
4489
4490 /* This test succeeds on double free. However, we don't 100%
4491 trust it (it also matches random payload data at a 1 in
4492 2^<size_t> chance), so verify it's not an unlikely
4493 coincidence before aborting. */
4494 if (__glibc_unlikely (e->key == tcache_key))
4495 {
4496 tcache_entry *tmp;
4497 size_t cnt = 0;
4498 LIBC_PROBE (memory_tcache_double_free, 2, e, tc_idx);
4499 for (tmp = tcache->entries[tc_idx];
4500 tmp;
4501 tmp = REVEAL_PTR (tmp->next), ++cnt)
4502 {
4503 if (cnt >= mp_.tcache_count)
4504 malloc_printerr ("free(): too many chunks detected in tcache");
4505 if (__glibc_unlikely (!aligned_OK (tmp)))
4506 malloc_printerr ("free(): unaligned chunk detected in tcache 2");
4507 if (tmp == e)
4508 malloc_printerr ("free(): double free detected in tcache 2");
4509 /* If we get here, it was a coincidence. We've wasted a
4510 few cycles, but don't abort. */
4511 }
4512 }
4513
4514 if (tcache->counts[tc_idx] < mp_.tcache_count)
4515 {
4516 tcache_put (p, tc_idx);
4517 return;
4518 }
4519 }
4520 }
4521 #endif
4522
4523 /*
4524 If eligible, place chunk on a fastbin so it can be found
4525 and used quickly in malloc.
4526 */
4527
4528 if ((unsigned long)(size) <= (unsigned long)(get_max_fast ())
4529
4530 #if TRIM_FASTBINS
4531 /*
4532 If TRIM_FASTBINS set, don't place chunks
4533 bordering top into fastbins
4534 */
4535 && (chunk_at_offset(p, size) != av->top)
4536 #endif
4537 ) {
4538
4539 if (__builtin_expect (chunksize_nomask (chunk_at_offset (p, size))
4540 <= CHUNK_HDR_SZ, 0)
4541 || __builtin_expect (chunksize (chunk_at_offset (p, size))
4542 >= av->system_mem, 0))
4543 {
4544 bool fail = true;
4545 /* We might not have a lock at this point and concurrent modifications
4546 of system_mem might result in a false positive. Redo the test after
4547 getting the lock. */
4548 if (!have_lock)
4549 {
4550 __libc_lock_lock (av->mutex);
4551 fail = (chunksize_nomask (chunk_at_offset (p, size)) <= CHUNK_HDR_SZ
4552 || chunksize (chunk_at_offset (p, size)) >= av->system_mem);
4553 __libc_lock_unlock (av->mutex);
4554 }
4555
4556 if (fail)
4557 malloc_printerr ("free(): invalid next size (fast)");
4558 }
4559
4560 free_perturb (chunk2mem(p), size - CHUNK_HDR_SZ);
4561
4562 atomic_store_relaxed (&av->have_fastchunks, true);
4563 unsigned int idx = fastbin_index(size);
4564 fb = &fastbin (av, idx);
4565
4566 /* Atomically link P to its fastbin: P->FD = *FB; *FB = P; */
4567 mchunkptr old = *fb, old2;
4568
4569 if (SINGLE_THREAD_P)
4570 {
4571 /* Check that the top of the bin is not the record we are going to
4572 add (i.e., double free). */
4573 if (__builtin_expect (old == p, 0))
4574 malloc_printerr ("double free or corruption (fasttop)");
4575 p->fd = PROTECT_PTR (&p->fd, old);
4576 *fb = p;
4577 }
4578 else
4579 do
4580 {
4581 /* Check that the top of the bin is not the record we are going to
4582 add (i.e., double free). */
4583 if (__builtin_expect (old == p, 0))
4584 malloc_printerr ("double free or corruption (fasttop)");
4585 old2 = old;
4586 p->fd = PROTECT_PTR (&p->fd, old);
4587 }
4588 while ((old = catomic_compare_and_exchange_val_rel (fb, p, old2))
4589 != old2);
4590
4591 /* Check that size of fastbin chunk at the top is the same as
4592 size of the chunk that we are adding. We can dereference OLD
4593 only if we have the lock, otherwise it might have already been
4594 allocated again. */
4595 if (have_lock && old != NULL
4596 && __builtin_expect (fastbin_index (chunksize (old)) != idx, 0))
4597 malloc_printerr ("invalid fastbin entry (free)");
4598 }
4599
4600 /*
4601 Consolidate other non-mmapped chunks as they arrive.
4602 */
4603
4604 else if (!chunk_is_mmapped(p)) {
4605
4606 /* If we're single-threaded, don't lock the arena. */
4607 if (SINGLE_THREAD_P)
4608 have_lock = true;
4609
4610 if (!have_lock)
4611 __libc_lock_lock (av->mutex);
4612
4613 nextchunk = chunk_at_offset(p, size);
4614
4615 /* Lightweight tests: check whether the block is already the
4616 top block. */
4617 if (__glibc_unlikely (p == av->top))
4618 malloc_printerr ("double free or corruption (top)");
4619 /* Or whether the next chunk is beyond the boundaries of the arena. */
4620 if (__builtin_expect (contiguous (av)
4621 && (char *) nextchunk
4622 >= ((char *) av->top + chunksize(av->top)), 0))
4623 malloc_printerr ("double free or corruption (out)");
4624 /* Or whether the block is actually not marked used. */
4625 if (__glibc_unlikely (!prev_inuse(nextchunk)))
4626 malloc_printerr ("double free or corruption (!prev)");
4627
4628 nextsize = chunksize(nextchunk);
4629 if (__builtin_expect (chunksize_nomask (nextchunk) <= CHUNK_HDR_SZ, 0)
4630 || __builtin_expect (nextsize >= av->system_mem, 0))
4631 malloc_printerr ("free(): invalid next size (normal)");
4632
4633 free_perturb (chunk2mem(p), size - CHUNK_HDR_SZ);
4634
4635 /* consolidate backward */
4636 if (!prev_inuse(p)) {
4637 prevsize = prev_size (p);
4638 size += prevsize;
4639 p = chunk_at_offset(p, -((long) prevsize));
4640 if (__glibc_unlikely (chunksize(p) != prevsize))
4641 malloc_printerr ("corrupted size vs. prev_size while consolidating");
4642 unlink_chunk (av, p);
4643 }
4644
4645 if (nextchunk != av->top) {
4646 /* get and clear inuse bit */
4647 nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
4648
4649 /* consolidate forward */
4650 if (!nextinuse) {
4651 unlink_chunk (av, nextchunk);
4652 size += nextsize;
4653 } else
4654 clear_inuse_bit_at_offset(nextchunk, 0);
4655
4656 /*
4657 Place the chunk in unsorted chunk list. Chunks are
4658 not placed into regular bins until after they have
4659 been given one chance to be used in malloc.
4660 */
4661
4662 bck = unsorted_chunks(av);
4663 fwd = bck->fd;
4664 if (__glibc_unlikely (fwd->bk != bck))
4665 malloc_printerr ("free(): corrupted unsorted chunks");
4666 p->fd = fwd;
4667 p->bk = bck;
4668 if (!in_smallbin_range(size))
4669 {
4670 p->fd_nextsize = NULL;
4671 p->bk_nextsize = NULL;
4672 }
4673 bck->fd = p;
4674 fwd->bk = p;
4675
4676 set_head(p, size | PREV_INUSE);
4677 set_foot(p, size);
4678
4679 check_free_chunk(av, p);
4680 }
4681
4682 /*
4683 If the chunk borders the current high end of memory,
4684 consolidate into top
4685 */
4686
4687 else {
4688 size += nextsize;
4689 set_head(p, size | PREV_INUSE);
4690 av->top = p;
4691 check_chunk(av, p);
4692 }
4693
4694 /*
4695 If freeing a large space, consolidate possibly-surrounding
4696 chunks. Then, if the total unused topmost memory exceeds trim
4697 threshold, ask malloc_trim to reduce top.
4698
4699 Unless max_fast is 0, we don't know if there are fastbins
4700 bordering top, so we cannot tell for sure whether threshold
4701 has been reached unless fastbins are consolidated. But we
4702 don't want to consolidate on each free. As a compromise,
4703 consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
4704 is reached.
4705 */
4706
4707 if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) {
4708 if (atomic_load_relaxed (&av->have_fastchunks))
4709 malloc_consolidate(av);
4710
4711 if (av == &main_arena) {
4712 #ifndef MORECORE_CANNOT_TRIM
4713 if ((unsigned long)(chunksize(av->top)) >=
4714 (unsigned long)(mp_.trim_threshold))
4715 systrim(mp_.top_pad, av);
4716 #endif
4717 } else {
4718 /* Always try heap_trim(), even if the top chunk is not
4719 large, because the corresponding heap might go away. */
4720 heap_info *heap = heap_for_ptr(top(av));
4721
4722 assert(heap->ar_ptr == av);
4723 heap_trim(heap, mp_.top_pad);
4724 }
4725 }
4726
4727 if (!have_lock)
4728 __libc_lock_unlock (av->mutex);
4729 }
4730 /*
4731 If the chunk was allocated via mmap, release via munmap().
4732 */
4733
4734 else {
4735 munmap_chunk (p);
4736 }
4737 }
4738
4739 /*
4740 ------------------------- malloc_consolidate -------------------------
4741
4742 malloc_consolidate is a specialized version of free() that tears
4743 down chunks held in fastbins. Free itself cannot be used for this
4744 purpose since, among other things, it might place chunks back onto
4745 fastbins. So, instead, we need to use a minor variant of the same
4746 code.
4747 */
4748
4749 static void malloc_consolidate(mstate av)
4750 {
4751 mfastbinptr* fb; /* current fastbin being consolidated */
4752 mfastbinptr* maxfb; /* last fastbin (for loop control) */
4753 mchunkptr p; /* current chunk being consolidated */
4754 mchunkptr nextp; /* next chunk to consolidate */
4755 mchunkptr unsorted_bin; /* bin header */
4756 mchunkptr first_unsorted; /* chunk to link to */
4757
4758 /* These have same use as in free() */
4759 mchunkptr nextchunk;
4760 INTERNAL_SIZE_T size;
4761 INTERNAL_SIZE_T nextsize;
4762 INTERNAL_SIZE_T prevsize;
4763 int nextinuse;
4764
4765 atomic_store_relaxed (&av->have_fastchunks, false);
4766
4767 unsorted_bin = unsorted_chunks(av);
4768
4769 /*
4770 Remove each chunk from fast bin and consolidate it, placing it
4771 then in unsorted bin. Among other reasons for doing this,
4772 placing in unsorted bin avoids needing to calculate actual bins
4773 until malloc is sure that chunks aren't immediately going to be
4774 reused anyway.
4775 */
4776
4777 maxfb = &fastbin (av, NFASTBINS - 1);
4778 fb = &fastbin (av, 0);
4779 do {
4780 p = atomic_exchange_acquire (fb, NULL);
4781 if (p != 0) {
4782 do {
4783 {
4784 if (__glibc_unlikely (misaligned_chunk (p)))
4785 malloc_printerr ("malloc_consolidate(): "
4786 "unaligned fastbin chunk detected");
4787
4788 unsigned int idx = fastbin_index (chunksize (p));
4789 if ((&fastbin (av, idx)) != fb)
4790 malloc_printerr ("malloc_consolidate(): invalid chunk size");
4791 }
4792
4793 check_inuse_chunk(av, p);
4794 nextp = REVEAL_PTR (p->fd);
4795
4796 /* Slightly streamlined version of consolidation code in free() */
4797 size = chunksize (p);
4798 nextchunk = chunk_at_offset(p, size);
4799 nextsize = chunksize(nextchunk);
4800
4801 if (!prev_inuse(p)) {
4802 prevsize = prev_size (p);
4803 size += prevsize;
4804 p = chunk_at_offset(p, -((long) prevsize));
4805 if (__glibc_unlikely (chunksize(p) != prevsize))
4806 malloc_printerr ("corrupted size vs. prev_size in fastbins");
4807 unlink_chunk (av, p);
4808 }
4809
4810 if (nextchunk != av->top) {
4811 nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
4812
4813 if (!nextinuse) {
4814 size += nextsize;
4815 unlink_chunk (av, nextchunk);
4816 } else
4817 clear_inuse_bit_at_offset(nextchunk, 0);
4818
4819 first_unsorted = unsorted_bin->fd;
4820 unsorted_bin->fd = p;
4821 first_unsorted->bk = p;
4822
4823 if (!in_smallbin_range (size)) {
4824 p->fd_nextsize = NULL;
4825 p->bk_nextsize = NULL;
4826 }
4827
4828 set_head(p, size | PREV_INUSE);
4829 p->bk = unsorted_bin;
4830 p->fd = first_unsorted;
4831 set_foot(p, size);
4832 }
4833
4834 else {
4835 size += nextsize;
4836 set_head(p, size | PREV_INUSE);
4837 av->top = p;
4838 }
4839
4840 } while ( (p = nextp) != 0);
4841
4842 }
4843 } while (fb++ != maxfb);
4844 }
4845
4846 /*
4847 ------------------------------ realloc ------------------------------
4848 */
4849
4850 static void *
4851 _int_realloc (mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize,
4852 INTERNAL_SIZE_T nb)
4853 {
4854 mchunkptr newp; /* chunk to return */
4855 INTERNAL_SIZE_T newsize; /* its size */
4856 void* newmem; /* corresponding user mem */
4857
4858 mchunkptr next; /* next contiguous chunk after oldp */
4859
4860 mchunkptr remainder; /* extra space at end of newp */
4861 unsigned long remainder_size; /* its size */
4862
4863 /* oldmem size */
4864 if (__builtin_expect (chunksize_nomask (oldp) <= CHUNK_HDR_SZ, 0)
4865 || __builtin_expect (oldsize >= av->system_mem, 0)
4866 || __builtin_expect (oldsize != chunksize (oldp), 0))
4867 malloc_printerr ("realloc(): invalid old size");
4868
4869 check_inuse_chunk (av, oldp);
4870
4871 /* All callers already filter out mmap'ed chunks. */
4872 assert (!chunk_is_mmapped (oldp));
4873
4874 next = chunk_at_offset (oldp, oldsize);
4875 INTERNAL_SIZE_T nextsize = chunksize (next);
4876 if (__builtin_expect (chunksize_nomask (next) <= CHUNK_HDR_SZ, 0)
4877 || __builtin_expect (nextsize >= av->system_mem, 0))
4878 malloc_printerr ("realloc(): invalid next size");
4879
4880 if ((unsigned long) (oldsize) >= (unsigned long) (nb))
4881 {
4882 /* already big enough; split below */
4883 newp = oldp;
4884 newsize = oldsize;
4885 }
4886
4887 else
4888 {
4889 /* Try to expand forward into top */
4890 if (next == av->top &&
4891 (unsigned long) (newsize = oldsize + nextsize) >=
4892 (unsigned long) (nb + MINSIZE))
4893 {
4894 set_head_size (oldp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0));
4895 av->top = chunk_at_offset (oldp, nb);
4896 set_head (av->top, (newsize - nb) | PREV_INUSE);
4897 check_inuse_chunk (av, oldp);
4898 return tag_new_usable (chunk2mem (oldp));
4899 }
4900
4901 /* Try to expand forward into next chunk; split off remainder below */
4902 else if (next != av->top &&
4903 !inuse (next) &&
4904 (unsigned long) (newsize = oldsize + nextsize) >=
4905 (unsigned long) (nb))
4906 {
4907 newp = oldp;
4908 unlink_chunk (av, next);
4909 }
4910
4911 /* allocate, copy, free */
4912 else
4913 {
4914 newmem = _int_malloc (av, nb - MALLOC_ALIGN_MASK);
4915 if (newmem == 0)
4916 return 0; /* propagate failure */
4917
4918 newp = mem2chunk (newmem);
4919 newsize = chunksize (newp);
4920
4921 /*
4922 Avoid copy if newp is next chunk after oldp.
4923 */
4924 if (newp == next)
4925 {
4926 newsize += oldsize;
4927 newp = oldp;
4928 }
4929 else
4930 {
4931 void *oldmem = chunk2mem (oldp);
4932 size_t sz = memsize (oldp);
4933 (void) tag_region (oldmem, sz);
4934 newmem = tag_new_usable (newmem);
4935 memcpy (newmem, oldmem, sz);
4936 _int_free (av, oldp, 1);
4937 check_inuse_chunk (av, newp);
4938 return newmem;
4939 }
4940 }
4941 }
4942
4943 /* If possible, free extra space in old or extended chunk */
4944
4945 assert ((unsigned long) (newsize) >= (unsigned long) (nb));
4946
4947 remainder_size = newsize - nb;
4948
4949 if (remainder_size < MINSIZE) /* not enough extra to split off */
4950 {
4951 set_head_size (newp, newsize | (av != &main_arena ? NON_MAIN_ARENA : 0));
4952 set_inuse_bit_at_offset (newp, newsize);
4953 }
4954 else /* split remainder */
4955 {
4956 remainder = chunk_at_offset (newp, nb);
4957 /* Clear any user-space tags before writing the header. */
4958 remainder = tag_region (remainder, remainder_size);
4959 set_head_size (newp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0));
4960 set_head (remainder, remainder_size | PREV_INUSE |
4961 (av != &main_arena ? NON_MAIN_ARENA : 0));
4962 /* Mark remainder as inuse so free() won't complain */
4963 set_inuse_bit_at_offset (remainder, remainder_size);
4964 _int_free (av, remainder, 1);
4965 }
4966
4967 check_inuse_chunk (av, newp);
4968 return tag_new_usable (chunk2mem (newp));
4969 }
4970
4971 /*
4972 ------------------------------ memalign ------------------------------
4973 */
4974
4975 /* Returns 0 if the chunk is not and does not contain the requested
4976 aligned sub-chunk, else returns the amount of "waste" from
4977 trimming. BYTES is the *user* byte size, not the chunk byte
4978 size. */
4979 static size_t
4980 chunk_ok_for_memalign (mchunkptr p, size_t alignment, size_t bytes)
4981 {
4982 void *m = chunk2mem (p);
4983 INTERNAL_SIZE_T size = memsize (p);
4984 void *aligned_m = m;
4985
4986 if (__glibc_unlikely (misaligned_chunk (p)))
4987 malloc_printerr ("_int_memalign(): unaligned chunk detected");
4988
4989 aligned_m = PTR_ALIGN_UP (m, alignment);
4990
4991 INTERNAL_SIZE_T front_extra = (intptr_t) aligned_m - (intptr_t) m;
4992
4993 /* We can't trim off the front as it's too small. */
4994 if (front_extra > 0 && front_extra < MINSIZE)
4995 return 0;
4996
4997 /* If it's a perfect fit, it's an exception to the return value rule
4998 (we would return zero waste, which looks like "not usable"), so
4999 handle it here by returning a small non-zero value instead. */
5000 if (size == bytes && front_extra == 0)
5001 return 1;
5002
5003 /* If the block we need fits in the chunk, calculate total waste. */
5004 if (size > bytes + front_extra)
5005 return size - bytes;
5006
5007 /* Can't use this chunk. */
5008 return 0;
5009 }
5010
5011 /* BYTES is user requested bytes, not requested chunksize bytes. */
5012 static void *
5013 _int_memalign (mstate av, size_t alignment, size_t bytes)
5014 {
5015 INTERNAL_SIZE_T nb; /* padded request size */
5016 char *m; /* memory returned by malloc call */
5017 mchunkptr p; /* corresponding chunk */
5018 char *brk; /* alignment point within p */
5019 mchunkptr newp; /* chunk to return */
5020 INTERNAL_SIZE_T newsize; /* its size */
5021 INTERNAL_SIZE_T leadsize; /* leading space before alignment point */
5022 mchunkptr remainder; /* spare room at end to split off */
5023 unsigned long remainder_size; /* its size */
5024 INTERNAL_SIZE_T size;
5025 mchunkptr victim;
5026
5027 nb = checked_request2size (bytes);
5028 if (nb == 0)
5029 {
5030 __set_errno (ENOMEM);
5031 return NULL;
5032 }
5033
5034 /* We can't check tcache here because we hold the arena lock, which
5035 tcache doesn't expect. We expect it has been checked
5036 earlier. */
5037
5038 /* Strategy: search the bins looking for an existing block that
5039 meets our needs. We scan a range of bins from "exact size" to
5040 "just under 2x", spanning the small/large barrier if needed. If
5041 we don't find anything in those bins, the common malloc code will
5042 scan starting at 2x. */
5043
5044 /* This will be set if we found a candidate chunk. */
5045 victim = NULL;
5046
5047 /* Fast bins are singly-linked, hard to remove a chunk from the middle
5048 and unlikely to meet our alignment requirements. We have not done
5049 any experimentation with searching for aligned fastbins. */
5050
5051 int first_bin_index;
5052 int first_largebin_index;
5053 int last_bin_index;
5054
5055 if (in_smallbin_range (nb))
5056 first_bin_index = smallbin_index (nb);
5057 else
5058 first_bin_index = largebin_index (nb);
5059
5060 if (in_smallbin_range (nb * 2))
5061 last_bin_index = smallbin_index (nb * 2);
5062 else
5063 last_bin_index = largebin_index (nb * 2);
5064
5065 first_largebin_index = largebin_index (MIN_LARGE_SIZE);
5066
5067 int victim_index; /* its bin index */
5068
5069 for (victim_index = first_bin_index;
5070 victim_index < last_bin_index;
5071 victim_index ++)
5072 {
5073 victim = NULL;
5074
5075 if (victim_index < first_largebin_index)
5076 {
5077 /* Check small bins. Small bin chunks are doubly-linked despite
5078 being the same size. */
5079
5080 mchunkptr fwd; /* misc temp for linking */
5081 mchunkptr bck; /* misc temp for linking */
5082
5083 bck = bin_at (av, victim_index);
5084 fwd = bck->fd;
5085 while (fwd != bck)
5086 {
5087 if (chunk_ok_for_memalign (fwd, alignment, bytes) > 0)
5088 {
5089 victim = fwd;
5090
5091 /* Unlink it */
5092 victim->fd->bk = victim->bk;
5093 victim->bk->fd = victim->fd;
5094 break;
5095 }
5096
5097 fwd = fwd->fd;
5098 }
5099 }
5100 else
5101 {
5102 /* Check large bins. */
5103 mchunkptr fwd; /* misc temp for linking */
5104 mchunkptr bck; /* misc temp for linking */
5105 mchunkptr best = NULL;
5106 size_t best_size = 0;
5107
5108 bck = bin_at (av, victim_index);
5109 fwd = bck->fd;
5110
5111 while (fwd != bck)
5112 {
5113 int extra;
5114
5115 if (chunksize (fwd) < nb)
5116 break;
5117 extra = chunk_ok_for_memalign (fwd, alignment, bytes);
5118 if (extra > 0
5119 && (extra <= best_size || best == NULL))
5120 {
5121 best = fwd;
5122 best_size = extra;
5123 }
5124
5125 fwd = fwd->fd;
5126 }
5127 victim = best;
5128
5129 if (victim != NULL)
5130 {
5131 unlink_chunk (av, victim);
5132 break;
5133 }
5134 }
5135
5136 if (victim != NULL)
5137 break;
5138 }
5139
5140 /* Strategy: find a spot within that chunk that meets the alignment
5141 request, and then possibly free the leading and trailing space.
5142 This strategy is incredibly costly and can lead to external
5143 fragmentation if header and footer chunks are unused. */
5144
5145 if (victim != NULL)
5146 {
5147 p = victim;
5148 m = chunk2mem (p);
5149 set_inuse (p);
5150 }
5151 else
5152 {
5153 /* Call malloc with worst case padding to hit alignment. */
5154
5155 m = (char *) (_int_malloc (av, nb + alignment + MINSIZE));
5156
5157 if (m == 0)
5158 return 0; /* propagate failure */
5159
5160 p = mem2chunk (m);
5161 }
5162
5163 if ((((unsigned long) (m)) % alignment) != 0) /* misaligned */
5164 {
5165 /* Find an aligned spot inside chunk. Since we need to give back
5166 leading space in a chunk of at least MINSIZE, if the first
5167 calculation places us at a spot with less than MINSIZE leader,
5168 we can move to the next aligned spot -- we've allocated enough
5169 total room so that this is always possible. */
5170 brk = (char *) mem2chunk (((unsigned long) (m + alignment - 1)) &
5171 - ((signed long) alignment));
5172 if ((unsigned long) (brk - (char *) (p)) < MINSIZE)
5173 brk += alignment;
5174
5175 newp = (mchunkptr) brk;
5176 leadsize = brk - (char *) (p);
5177 newsize = chunksize (p) - leadsize;
5178
5179 /* For mmapped chunks, just adjust offset */
5180 if (chunk_is_mmapped (p))
5181 {
5182 set_prev_size (newp, prev_size (p) + leadsize);
5183 set_head (newp, newsize | IS_MMAPPED);
5184 return chunk2mem (newp);
5185 }
5186
5187 /* Otherwise, give back leader, use the rest */
5188 set_head (newp, newsize | PREV_INUSE |
5189 (av != &main_arena ? NON_MAIN_ARENA : 0));
5190 set_inuse_bit_at_offset (newp, newsize);
5191 set_head_size (p, leadsize | (av != &main_arena ? NON_MAIN_ARENA : 0));
5192 _int_free (av, p, 1);
5193 p = newp;
5194
5195 assert (newsize >= nb &&
5196 (((unsigned long) (chunk2mem (p))) % alignment) == 0);
5197 }
5198
5199 /* Also give back spare room at the end */
5200 if (!chunk_is_mmapped (p))
5201 {
5202 size = chunksize (p);
5203 if ((unsigned long) (size) > (unsigned long) (nb + MINSIZE))
5204 {
5205 remainder_size = size - nb;
5206 remainder = chunk_at_offset (p, nb);
5207 set_head (remainder, remainder_size | PREV_INUSE |
5208 (av != &main_arena ? NON_MAIN_ARENA : 0));
5209 set_head_size (p, nb);
5210 _int_free (av, remainder, 1);
5211 }
5212 }
5213
5214 check_inuse_chunk (av, p);
5215 return chunk2mem (p);
5216 }
5217
5218
5219 /*
5220 ------------------------------ malloc_trim ------------------------------
5221 */
5222
5223 static int
5224 mtrim (mstate av, size_t pad)
5225 {
5226 /* Ensure all blocks are consolidated. */
5227 malloc_consolidate (av);
5228
5229 const size_t ps = GLRO (dl_pagesize);
5230 int psindex = bin_index (ps);
5231 const size_t psm1 = ps - 1;
5232
5233 int result = 0;
5234 for (int i = 1; i < NBINS; ++i)
5235 if (i == 1 || i >= psindex)
5236 {
5237 mbinptr bin = bin_at (av, i);
5238
5239 for (mchunkptr p = last (bin); p != bin; p = p->bk)
5240 {
5241 INTERNAL_SIZE_T size = chunksize (p);
5242
5243 if (size > psm1 + sizeof (struct malloc_chunk))
5244 {
5245 /* See whether the chunk contains at least one unused page. */
5246 char *paligned_mem = (char *) (((uintptr_t) p
5247 + sizeof (struct malloc_chunk)
5248 + psm1) & ~psm1);
5249
5250 assert ((char *) chunk2mem (p) + 2 * CHUNK_HDR_SZ
5251 <= paligned_mem);
5252 assert ((char *) p + size > paligned_mem);
5253
5254 /* This is the size we could potentially free. */
5255 size -= paligned_mem - (char *) p;
5256
5257 if (size > psm1)
5258 {
5259 #if MALLOC_DEBUG
5260 /* When debugging we simulate destroying the memory
5261 content. */
5262 memset (paligned_mem, 0x89, size & ~psm1);
5263 #endif
5264 __madvise (paligned_mem, size & ~psm1, MADV_DONTNEED);
5265
5266 result = 1;
5267 }
5268 }
5269 }
5270 }
5271
5272 #ifndef MORECORE_CANNOT_TRIM
5273 return result | (av == &main_arena ? systrim (pad, av) : 0);
5274
5275 #else
5276 return result;
5277 #endif
5278 }
5279
5280
5281 int
5282 __malloc_trim (size_t s)
5283 {
5284 int result = 0;
5285
5286 if (!__malloc_initialized)
5287 ptmalloc_init ();
5288
5289 mstate ar_ptr = &main_arena;
5290 do
5291 {
5292 __libc_lock_lock (ar_ptr->mutex);
5293 result |= mtrim (ar_ptr, s);
5294 __libc_lock_unlock (ar_ptr->mutex);
5295
5296 ar_ptr = ar_ptr->next;
5297 }
5298 while (ar_ptr != &main_arena);
5299
5300 return result;
5301 }
5302
5303
5304 /*
5305 ------------------------- malloc_usable_size -------------------------
5306 */
5307
5308 static size_t
5309 musable (void *mem)
5310 {
5311 mchunkptr p = mem2chunk (mem);
5312
5313 if (chunk_is_mmapped (p))
5314 return chunksize (p) - CHUNK_HDR_SZ;
5315 else if (inuse (p))
5316 return memsize (p);
5317
5318 return 0;
5319 }
5320
5321 #if IS_IN (libc)
5322 size_t
5323 __malloc_usable_size (void *m)
5324 {
5325 if (m == NULL)
5326 return 0;
5327 return musable (m);
5328 }
5329 #endif
5330
5331 /*
5332 ------------------------------ mallinfo ------------------------------
5333 Accumulate malloc statistics for arena AV into M.
5334 */
5335 static void
5336 int_mallinfo (mstate av, struct mallinfo2 *m)
5337 {
5338 size_t i;
5339 mbinptr b;
5340 mchunkptr p;
5341 INTERNAL_SIZE_T avail;
5342 INTERNAL_SIZE_T fastavail;
5343 int nblocks;
5344 int nfastblocks;
5345
5346 check_malloc_state (av);
5347
5348 /* Account for top */
5349 avail = chunksize (av->top);
5350 nblocks = 1; /* top always exists */
5351
5352 /* traverse fastbins */
5353 nfastblocks = 0;
5354 fastavail = 0;
5355
5356 for (i = 0; i < NFASTBINS; ++i)
5357 {
5358 for (p = fastbin (av, i);
5359 p != 0;
5360 p = REVEAL_PTR (p->fd))
5361 {
5362 if (__glibc_unlikely (misaligned_chunk (p)))
5363 malloc_printerr ("int_mallinfo(): "
5364 "unaligned fastbin chunk detected");
5365 ++nfastblocks;
5366 fastavail += chunksize (p);
5367 }
5368 }
5369
5370 avail += fastavail;
5371
5372 /* traverse regular bins */
5373 for (i = 1; i < NBINS; ++i)
5374 {
5375 b = bin_at (av, i);
5376 for (p = last (b); p != b; p = p->bk)
5377 {
5378 ++nblocks;
5379 avail += chunksize (p);
5380 }
5381 }
5382
5383 m->smblks += nfastblocks;
5384 m->ordblks += nblocks;
5385 m->fordblks += avail;
5386 m->uordblks += av->system_mem - avail;
5387 m->arena += av->system_mem;
5388 m->fsmblks += fastavail;
5389 if (av == &main_arena)
5390 {
5391 m->hblks = mp_.n_mmaps;
5392 m->hblkhd = mp_.mmapped_mem;
5393 m->usmblks = 0;
5394 m->keepcost = chunksize (av->top);
5395 }
5396 }
5397
5398
5399 struct mallinfo2
5400 __libc_mallinfo2 (void)
5401 {
5402 struct mallinfo2 m;
5403 mstate ar_ptr;
5404
5405 if (!__malloc_initialized)
5406 ptmalloc_init ();
5407
5408 memset (&m, 0, sizeof (m));
5409 ar_ptr = &main_arena;
5410 do
5411 {
5412 __libc_lock_lock (ar_ptr->mutex);
5413 int_mallinfo (ar_ptr, &m);
5414 __libc_lock_unlock (ar_ptr->mutex);
5415
5416 ar_ptr = ar_ptr->next;
5417 }
5418 while (ar_ptr != &main_arena);
5419
5420 return m;
5421 }
5422 libc_hidden_def (__libc_mallinfo2)
5423
5424 struct mallinfo
5425 __libc_mallinfo (void)
5426 {
5427 struct mallinfo m;
5428 struct mallinfo2 m2 = __libc_mallinfo2 ();
5429
5430 m.arena = m2.arena;
5431 m.ordblks = m2.ordblks;
5432 m.smblks = m2.smblks;
5433 m.hblks = m2.hblks;
5434 m.hblkhd = m2.hblkhd;
5435 m.usmblks = m2.usmblks;
5436 m.fsmblks = m2.fsmblks;
5437 m.uordblks = m2.uordblks;
5438 m.fordblks = m2.fordblks;
5439 m.keepcost = m2.keepcost;
5440
5441 return m;
5442 }
5443
5444
5445 /*
5446 ------------------------------ malloc_stats ------------------------------
5447 */
5448
5449 void
5450 __malloc_stats (void)
5451 {
5452 int i;
5453 mstate ar_ptr;
5454 unsigned int in_use_b = mp_.mmapped_mem, system_b = in_use_b;
5455
5456 if (!__malloc_initialized)
5457 ptmalloc_init ();
5458 _IO_flockfile (stderr);
5459 int old_flags2 = stderr->_flags2;
5460 stderr->_flags2 |= _IO_FLAGS2_NOTCANCEL;
5461 for (i = 0, ar_ptr = &main_arena;; i++)
5462 {
5463 struct mallinfo2 mi;
5464
5465 memset (&mi, 0, sizeof (mi));
5466 __libc_lock_lock (ar_ptr->mutex);
5467 int_mallinfo (ar_ptr, &mi);
5468 fprintf (stderr, "Arena %d:\n", i);
5469 fprintf (stderr, "system bytes = %10u\n", (unsigned int) mi.arena);
5470 fprintf (stderr, "in use bytes = %10u\n", (unsigned int) mi.uordblks);
5471 #if MALLOC_DEBUG > 1
5472 if (i > 0)
5473 dump_heap (heap_for_ptr (top (ar_ptr)));
5474 #endif
5475 system_b += mi.arena;
5476 in_use_b += mi.uordblks;
5477 __libc_lock_unlock (ar_ptr->mutex);
5478 ar_ptr = ar_ptr->next;
5479 if (ar_ptr == &main_arena)
5480 break;
5481 }
5482 fprintf (stderr, "Total (incl. mmap):\n");
5483 fprintf (stderr, "system bytes = %10u\n", system_b);
5484 fprintf (stderr, "in use bytes = %10u\n", in_use_b);
5485 fprintf (stderr, "max mmap regions = %10u\n", (unsigned int) mp_.max_n_mmaps);
5486 fprintf (stderr, "max mmap bytes = %10lu\n",
5487 (unsigned long) mp_.max_mmapped_mem);
5488 stderr->_flags2 = old_flags2;
5489 _IO_funlockfile (stderr);
5490 }
5491
5492
5493 /*
5494 ------------------------------ mallopt ------------------------------
5495 */
5496 static __always_inline int
5497 do_set_trim_threshold (size_t value)
5498 {
5499 LIBC_PROBE (memory_mallopt_trim_threshold, 3, value, mp_.trim_threshold,
5500 mp_.no_dyn_threshold);
5501 mp_.trim_threshold = value;
5502 mp_.no_dyn_threshold = 1;
5503 return 1;
5504 }
5505
5506 static __always_inline int
5507 do_set_top_pad (size_t value)
5508 {
5509 LIBC_PROBE (memory_mallopt_top_pad, 3, value, mp_.top_pad,
5510 mp_.no_dyn_threshold);
5511 mp_.top_pad = value;
5512 mp_.no_dyn_threshold = 1;
5513 return 1;
5514 }
5515
5516 static __always_inline int
5517 do_set_mmap_threshold (size_t value)
5518 {
5519 LIBC_PROBE (memory_mallopt_mmap_threshold, 3, value, mp_.mmap_threshold,
5520 mp_.no_dyn_threshold);
5521 mp_.mmap_threshold = value;
5522 mp_.no_dyn_threshold = 1;
5523 return 1;
5524 }
5525
5526 static __always_inline int
5527 do_set_mmaps_max (int32_t value)
5528 {
5529 LIBC_PROBE (memory_mallopt_mmap_max, 3, value, mp_.n_mmaps_max,
5530 mp_.no_dyn_threshold);
5531 mp_.n_mmaps_max = value;
5532 mp_.no_dyn_threshold = 1;
5533 return 1;
5534 }
5535
5536 static __always_inline int
5537 do_set_mallopt_check (int32_t value)
5538 {
5539 return 1;
5540 }
5541
5542 static __always_inline int
5543 do_set_perturb_byte (int32_t value)
5544 {
5545 LIBC_PROBE (memory_mallopt_perturb, 2, value, perturb_byte);
5546 perturb_byte = value;
5547 return 1;
5548 }
5549
5550 static __always_inline int
5551 do_set_arena_test (size_t value)
5552 {
5553 LIBC_PROBE (memory_mallopt_arena_test, 2, value, mp_.arena_test);
5554 mp_.arena_test = value;
5555 return 1;
5556 }
5557
5558 static __always_inline int
5559 do_set_arena_max (size_t value)
5560 {
5561 LIBC_PROBE (memory_mallopt_arena_max, 2, value, mp_.arena_max);
5562 mp_.arena_max = value;
5563 return 1;
5564 }
5565
5566 #if USE_TCACHE
5567 static __always_inline int
5568 do_set_tcache_max (size_t value)
5569 {
5570 if (value <= MAX_TCACHE_SIZE)
5571 {
5572 LIBC_PROBE (memory_tunable_tcache_max_bytes, 2, value, mp_.tcache_max_bytes);
5573 mp_.tcache_max_bytes = value;
5574 mp_.tcache_bins = csize2tidx (request2size(value)) + 1;
5575 return 1;
5576 }
5577 return 0;
5578 }
5579
5580 static __always_inline int
5581 do_set_tcache_count (size_t value)
5582 {
5583 if (value <= MAX_TCACHE_COUNT)
5584 {
5585 LIBC_PROBE (memory_tunable_tcache_count, 2, value, mp_.tcache_count);
5586 mp_.tcache_count = value;
5587 return 1;
5588 }
5589 return 0;
5590 }
5591
5592 static __always_inline int
5593 do_set_tcache_unsorted_limit (size_t value)
5594 {
5595 LIBC_PROBE (memory_tunable_tcache_unsorted_limit, 2, value, mp_.tcache_unsorted_limit);
5596 mp_.tcache_unsorted_limit = value;
5597 return 1;
5598 }
5599 #endif
5600
5601 static __always_inline int
5602 do_set_mxfast (size_t value)
5603 {
5604 if (value <= MAX_FAST_SIZE)
5605 {
5606 LIBC_PROBE (memory_mallopt_mxfast, 2, value, get_max_fast ());
5607 set_max_fast (value);
5608 return 1;
5609 }
5610 return 0;
5611 }
5612
5613 static __always_inline int
5614 do_set_hugetlb (size_t value)
5615 {
5616 if (value == 1)
5617 {
5618 enum malloc_thp_mode_t thp_mode = __malloc_thp_mode ();
5619 /*
5620 Only enable THP madvise usage if system does support it and
5621 has 'madvise' mode. Otherwise the madvise() call is wasteful.
5622 */
5623 if (thp_mode == malloc_thp_mode_madvise)
5624 mp_.thp_pagesize = __malloc_default_thp_pagesize ();
5625 }
5626 else if (value >= 2)
5627 __malloc_hugepage_config (value == 2 ? 0 : value, &mp_.hp_pagesize,
5628 &mp_.hp_flags);
5629 return 0;
5630 }
5631
5632 int
5633 __libc_mallopt (int param_number, int value)
5634 {
5635 mstate av = &main_arena;
5636 int res = 1;
5637
5638 if (!__malloc_initialized)
5639 ptmalloc_init ();
5640 __libc_lock_lock (av->mutex);
5641
5642 LIBC_PROBE (memory_mallopt, 2, param_number, value);
5643
5644 /* We must consolidate main arena before changing max_fast
5645 (see definition of set_max_fast). */
5646 malloc_consolidate (av);
5647
5648 /* Many of these helper functions take a size_t. We do not worry
5649 about overflow here, because negative int values will wrap to
5650 very large size_t values and the helpers have sufficient range
5651 checking for such conversions. Many of these helpers are also
5652 used by the tunables macros in arena.c. */
5653
5654 switch (param_number)
5655 {
5656 case M_MXFAST:
5657 res = do_set_mxfast (value);
5658 break;
5659
5660 case M_TRIM_THRESHOLD:
5661 res = do_set_trim_threshold (value);
5662 break;
5663
5664 case M_TOP_PAD:
5665 res = do_set_top_pad (value);
5666 break;
5667
5668 case M_MMAP_THRESHOLD:
5669 res = do_set_mmap_threshold (value);
5670 break;
5671
5672 case M_MMAP_MAX:
5673 res = do_set_mmaps_max (value);
5674 break;
5675
5676 case M_CHECK_ACTION:
5677 res = do_set_mallopt_check (value);
5678 break;
5679
5680 case M_PERTURB:
5681 res = do_set_perturb_byte (value);
5682 break;
5683
5684 case M_ARENA_TEST:
5685 if (value > 0)
5686 res = do_set_arena_test (value);
5687 break;
5688
5689 case M_ARENA_MAX:
5690 if (value > 0)
5691 res = do_set_arena_max (value);
5692 break;
5693 }
5694 __libc_lock_unlock (av->mutex);
5695 return res;
5696 }
5697 libc_hidden_def (__libc_mallopt)
5698
5699
5700 /*
5701 -------------------- Alternative MORECORE functions --------------------
5702 */
5703
5704
5705 /*
5706 General Requirements for MORECORE.
5707
5708 The MORECORE function must have the following properties:
5709
5710 If MORECORE_CONTIGUOUS is false:
5711
5712 * MORECORE must allocate in multiples of pagesize. It will
5713 only be called with arguments that are multiples of pagesize.
5714
5715 * MORECORE(0) must return an address that is at least
5716 MALLOC_ALIGNMENT aligned. (Page-aligning always suffices.)
5717
5718 else (i.e. If MORECORE_CONTIGUOUS is true):
5719
5720 * Consecutive calls to MORECORE with positive arguments
5721 return increasing addresses, indicating that space has been
5722 contiguously extended.
5723
5724 * MORECORE need not allocate in multiples of pagesize.
5725 Calls to MORECORE need not have args of multiples of pagesize.
5726
5727 * MORECORE need not page-align.
5728
5729 In either case:
5730
5731 * MORECORE may allocate more memory than requested. (Or even less,
5732 but this will generally result in a malloc failure.)
5733
5734 * MORECORE must not allocate memory when given argument zero, but
5735 instead return one past the end address of memory from previous
5736 nonzero call. This malloc does NOT call MORECORE(0)
5737 until at least one call with positive arguments is made, so
5738 the initial value returned is not important.
5739
5740 * Even though consecutive calls to MORECORE need not return contiguous
5741 addresses, it must be OK for malloc'ed chunks to span multiple
5742 regions in those cases where they do happen to be contiguous.
5743
5744 * MORECORE need not handle negative arguments -- it may instead
5745 just return MORECORE_FAILURE when given negative arguments.
5746 Negative arguments are always multiples of pagesize. MORECORE
5747 must not misinterpret negative args as large positive unsigned
5748 args. You can suppress all such calls from even occurring by defining
5749 MORECORE_CANNOT_TRIM,
5750
5751 There is some variation across systems about the type of the
5752 argument to sbrk/MORECORE. If size_t is unsigned, then it cannot
5753 actually be size_t, because sbrk supports negative args, so it is
5754 normally the signed type of the same width as size_t (sometimes
5755 declared as "intptr_t", and sometimes "ptrdiff_t"). It doesn't much
5756 matter though. Internally, we use "long" as arguments, which should
5757 work across all reasonable possibilities.
5758
5759 Additionally, if MORECORE ever returns failure for a positive
5760 request, then mmap is used as a noncontiguous system allocator. This
5761 is a useful backup strategy for systems with holes in address spaces
5762 -- in this case sbrk cannot contiguously expand the heap, but mmap
5763 may be able to map noncontiguous space.
5764
5765 If you'd like mmap to ALWAYS be used, you can define MORECORE to be
5766 a function that always returns MORECORE_FAILURE.
5767
5768 If you are using this malloc with something other than sbrk (or its
5769 emulation) to supply memory regions, you probably want to set
5770 MORECORE_CONTIGUOUS as false. As an example, here is a custom
5771 allocator kindly contributed for pre-OSX macOS. It uses virtually
5772 but not necessarily physically contiguous non-paged memory (locked
5773 in, present and won't get swapped out). You can use it by
5774 uncommenting this section, adding some #includes, and setting up the
5775 appropriate defines above:
5776
5777 *#define MORECORE osMoreCore
5778 *#define MORECORE_CONTIGUOUS 0
5779
5780 There is also a shutdown routine that should somehow be called for
5781 cleanup upon program exit.
5782
5783 *#define MAX_POOL_ENTRIES 100
5784 *#define MINIMUM_MORECORE_SIZE (64 * 1024)
5785 static int next_os_pool;
5786 void *our_os_pools[MAX_POOL_ENTRIES];
5787
5788 void *osMoreCore(int size)
5789 {
5790 void *ptr = 0;
5791 static void *sbrk_top = 0;
5792
5793 if (size > 0)
5794 {
5795 if (size < MINIMUM_MORECORE_SIZE)
5796 size = MINIMUM_MORECORE_SIZE;
5797 if (CurrentExecutionLevel() == kTaskLevel)
5798 ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
5799 if (ptr == 0)
5800 {
5801 return (void *) MORECORE_FAILURE;
5802 }
5803 // save ptrs so they can be freed during cleanup
5804 our_os_pools[next_os_pool] = ptr;
5805 next_os_pool++;
5806 ptr = (void *) ((((unsigned long) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);
5807 sbrk_top = (char *) ptr + size;
5808 return ptr;
5809 }
5810 else if (size < 0)
5811 {
5812 // we don't currently support shrink behavior
5813 return (void *) MORECORE_FAILURE;
5814 }
5815 else
5816 {
5817 return sbrk_top;
5818 }
5819 }
5820
5821 // cleanup any allocated memory pools
5822 // called as last thing before shutting down driver
5823
5824 void osCleanupMem(void)
5825 {
5826 void **ptr;
5827
5828 for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)
5829 if (*ptr)
5830 {
5831 PoolDeallocate(*ptr);
5832 * ptr = 0;
5833 }
5834 }
5835
5836 */
5837
5838
5839 /* Helper code. */
5840
5841 extern char **__libc_argv attribute_hidden;
5842
5843 static void
5844 malloc_printerr (const char *str)
5845 {
5846 #if IS_IN (libc)
5847 __libc_message ("%s\n", str);
5848 #else
5849 __libc_fatal (str);
5850 #endif
5851 __builtin_unreachable ();
5852 }
5853
5854 #if IS_IN (libc)
5855 /* We need a wrapper function for one of the additions of POSIX. */
5856 int
5857 __posix_memalign (void **memptr, size_t alignment, size_t size)
5858 {
5859 void *mem;
5860
5861 if (!__malloc_initialized)
5862 ptmalloc_init ();
5863
5864 /* Test whether the SIZE argument is valid. It must be a power of
5865 two multiple of sizeof (void *). */
5866 if (alignment % sizeof (void *) != 0
5867 || !powerof2 (alignment / sizeof (void *))
5868 || alignment == 0)
5869 return EINVAL;
5870
5871
5872 void *address = RETURN_ADDRESS (0);
5873 mem = _mid_memalign (alignment, size, address);
5874
5875 if (mem != NULL)
5876 {
5877 *memptr = mem;
5878 return 0;
5879 }
5880
5881 return ENOMEM;
5882 }
5883 weak_alias (__posix_memalign, posix_memalign)
5884 #endif
5885
5886
5887 int
5888 __malloc_info (int options, FILE *fp)
5889 {
5890 /* For now, at least. */
5891 if (options != 0)
5892 return EINVAL;
5893
5894 int n = 0;
5895 size_t total_nblocks = 0;
5896 size_t total_nfastblocks = 0;
5897 size_t total_avail = 0;
5898 size_t total_fastavail = 0;
5899 size_t total_system = 0;
5900 size_t total_max_system = 0;
5901 size_t total_aspace = 0;
5902 size_t total_aspace_mprotect = 0;
5903
5904
5905
5906 if (!__malloc_initialized)
5907 ptmalloc_init ();
5908
5909 fputs ("<malloc version=\"1\">\n", fp);
5910
5911 /* Iterate over all arenas currently in use. */
5912 mstate ar_ptr = &main_arena;
5913 do
5914 {
5915 fprintf (fp, "<heap nr=\"%d\">\n<sizes>\n", n++);
5916
5917 size_t nblocks = 0;
5918 size_t nfastblocks = 0;
5919 size_t avail = 0;
5920 size_t fastavail = 0;
5921 struct
5922 {
5923 size_t from;
5924 size_t to;
5925 size_t total;
5926 size_t count;
5927 } sizes[NFASTBINS + NBINS - 1];
5928 #define nsizes (sizeof (sizes) / sizeof (sizes[0]))
5929
5930 __libc_lock_lock (ar_ptr->mutex);
5931
5932 /* Account for top chunk. The top-most available chunk is
5933 treated specially and is never in any bin. See "initial_top"
5934 comments. */
5935 avail = chunksize (ar_ptr->top);
5936 nblocks = 1; /* Top always exists. */
5937
5938 for (size_t i = 0; i < NFASTBINS; ++i)
5939 {
5940 mchunkptr p = fastbin (ar_ptr, i);
5941 if (p != NULL)
5942 {
5943 size_t nthissize = 0;
5944 size_t thissize = chunksize (p);
5945
5946 while (p != NULL)
5947 {
5948 if (__glibc_unlikely (misaligned_chunk (p)))
5949 malloc_printerr ("__malloc_info(): "
5950 "unaligned fastbin chunk detected");
5951 ++nthissize;
5952 p = REVEAL_PTR (p->fd);
5953 }
5954
5955 fastavail += nthissize * thissize;
5956 nfastblocks += nthissize;
5957 sizes[i].from = thissize - (MALLOC_ALIGNMENT - 1);
5958 sizes[i].to = thissize;
5959 sizes[i].count = nthissize;
5960 }
5961 else
5962 sizes[i].from = sizes[i].to = sizes[i].count = 0;
5963
5964 sizes[i].total = sizes[i].count * sizes[i].to;
5965 }
5966
5967
5968 mbinptr bin;
5969 struct malloc_chunk *r;
5970
5971 for (size_t i = 1; i < NBINS; ++i)
5972 {
5973 bin = bin_at (ar_ptr, i);
5974 r = bin->fd;
5975 sizes[NFASTBINS - 1 + i].from = ~((size_t) 0);
5976 sizes[NFASTBINS - 1 + i].to = sizes[NFASTBINS - 1 + i].total
5977 = sizes[NFASTBINS - 1 + i].count = 0;
5978
5979 if (r != NULL)
5980 while (r != bin)
5981 {
5982 size_t r_size = chunksize_nomask (r);
5983 ++sizes[NFASTBINS - 1 + i].count;
5984 sizes[NFASTBINS - 1 + i].total += r_size;
5985 sizes[NFASTBINS - 1 + i].from
5986 = MIN (sizes[NFASTBINS - 1 + i].from, r_size);
5987 sizes[NFASTBINS - 1 + i].to = MAX (sizes[NFASTBINS - 1 + i].to,
5988 r_size);
5989
5990 r = r->fd;
5991 }
5992
5993 if (sizes[NFASTBINS - 1 + i].count == 0)
5994 sizes[NFASTBINS - 1 + i].from = 0;
5995 nblocks += sizes[NFASTBINS - 1 + i].count;
5996 avail += sizes[NFASTBINS - 1 + i].total;
5997 }
5998
5999 size_t heap_size = 0;
6000 size_t heap_mprotect_size = 0;
6001 size_t heap_count = 0;
6002 if (ar_ptr != &main_arena)
6003 {
6004 /* Iterate over the arena heaps from back to front. */
6005 heap_info *heap = heap_for_ptr (top (ar_ptr));
6006 do
6007 {
6008 heap_size += heap->size;
6009 heap_mprotect_size += heap->mprotect_size;
6010 heap = heap->prev;
6011 ++heap_count;
6012 }
6013 while (heap != NULL);
6014 }
6015
6016 __libc_lock_unlock (ar_ptr->mutex);
6017
6018 total_nfastblocks += nfastblocks;
6019 total_fastavail += fastavail;
6020
6021 total_nblocks += nblocks;
6022 total_avail += avail;
6023
6024 for (size_t i = 0; i < nsizes; ++i)
6025 if (sizes[i].count != 0 && i != NFASTBINS)
6026 fprintf (fp, "\
6027 <size from=\"%zu\" to=\"%zu\" total=\"%zu\" count=\"%zu\"/>\n",
6028 sizes[i].from, sizes[i].to, sizes[i].total, sizes[i].count);
6029
6030 if (sizes[NFASTBINS].count != 0)
6031 fprintf (fp, "\
6032 <unsorted from=\"%zu\" to=\"%zu\" total=\"%zu\" count=\"%zu\"/>\n",
6033 sizes[NFASTBINS].from, sizes[NFASTBINS].to,
6034 sizes[NFASTBINS].total, sizes[NFASTBINS].count);
6035
6036 total_system += ar_ptr->system_mem;
6037 total_max_system += ar_ptr->max_system_mem;
6038
6039 fprintf (fp,
6040 "</sizes>\n<total type=\"fast\" count=\"%zu\" size=\"%zu\"/>\n"
6041 "<total type=\"rest\" count=\"%zu\" size=\"%zu\"/>\n"
6042 "<system type=\"current\" size=\"%zu\"/>\n"
6043 "<system type=\"max\" size=\"%zu\"/>\n",
6044 nfastblocks, fastavail, nblocks, avail,
6045 ar_ptr->system_mem, ar_ptr->max_system_mem);
6046
6047 if (ar_ptr != &main_arena)
6048 {
6049 fprintf (fp,
6050 "<aspace type=\"total\" size=\"%zu\"/>\n"
6051 "<aspace type=\"mprotect\" size=\"%zu\"/>\n"
6052 "<aspace type=\"subheaps\" size=\"%zu\"/>\n",
6053 heap_size, heap_mprotect_size, heap_count);
6054 total_aspace += heap_size;
6055 total_aspace_mprotect += heap_mprotect_size;
6056 }
6057 else
6058 {
6059 fprintf (fp,
6060 "<aspace type=\"total\" size=\"%zu\"/>\n"
6061 "<aspace type=\"mprotect\" size=\"%zu\"/>\n",
6062 ar_ptr->system_mem, ar_ptr->system_mem);
6063 total_aspace += ar_ptr->system_mem;
6064 total_aspace_mprotect += ar_ptr->system_mem;
6065 }
6066
6067 fputs ("</heap>\n", fp);
6068 ar_ptr = ar_ptr->next;
6069 }
6070 while (ar_ptr != &main_arena);
6071
6072 fprintf (fp,
6073 "<total type=\"fast\" count=\"%zu\" size=\"%zu\"/>\n"
6074 "<total type=\"rest\" count=\"%zu\" size=\"%zu\"/>\n"
6075 "<total type=\"mmap\" count=\"%d\" size=\"%zu\"/>\n"
6076 "<system type=\"current\" size=\"%zu\"/>\n"
6077 "<system type=\"max\" size=\"%zu\"/>\n"
6078 "<aspace type=\"total\" size=\"%zu\"/>\n"
6079 "<aspace type=\"mprotect\" size=\"%zu\"/>\n"
6080 "</malloc>\n",
6081 total_nfastblocks, total_fastavail, total_nblocks, total_avail,
6082 mp_.n_mmaps, mp_.mmapped_mem,
6083 total_system, total_max_system,
6084 total_aspace, total_aspace_mprotect);
6085
6086 return 0;
6087 }
6088 #if IS_IN (libc)
6089 weak_alias (__malloc_info, malloc_info)
6090
6091 strong_alias (__libc_calloc, __calloc) weak_alias (__libc_calloc, calloc)
6092 strong_alias (__libc_free, __free) strong_alias (__libc_free, free)
6093 strong_alias (__libc_malloc, __malloc) strong_alias (__libc_malloc, malloc)
6094 strong_alias (__libc_memalign, __memalign)
6095 weak_alias (__libc_memalign, memalign)
6096 strong_alias (__libc_realloc, __realloc) strong_alias (__libc_realloc, realloc)
6097 strong_alias (__libc_valloc, __valloc) weak_alias (__libc_valloc, valloc)
6098 strong_alias (__libc_pvalloc, __pvalloc) weak_alias (__libc_pvalloc, pvalloc)
6099 strong_alias (__libc_mallinfo, __mallinfo)
6100 weak_alias (__libc_mallinfo, mallinfo)
6101 strong_alias (__libc_mallinfo2, __mallinfo2)
6102 weak_alias (__libc_mallinfo2, mallinfo2)
6103 strong_alias (__libc_mallopt, __mallopt) weak_alias (__libc_mallopt, mallopt)
6104
6105 weak_alias (__malloc_stats, malloc_stats)
6106 weak_alias (__malloc_usable_size, malloc_usable_size)
6107 weak_alias (__malloc_trim, malloc_trim)
6108 #endif
6109
6110 #if SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_26)
6111 compat_symbol (libc, __libc_free, cfree, GLIBC_2_0);
6112 #endif
6113
6114 /* ------------------------------------------------------------
6115 History:
6116
6117 [see ftp://g.oswego.edu/pub/misc/malloc.c for the history of dlmalloc]
6118
6119 */
6120 /*
6121 * Local variables:
6122 * c-basic-offset: 2
6123 * End:
6124 */