]> git.ipfire.org Git - thirdparty/glibc.git/blob - malloc/malloc.c
198e78a1625a99aff355040f1474c282135509f1
[thirdparty/glibc.git] / malloc / malloc.c
1 /* Malloc implementation for multiple threads without lock contention.
2 Copyright (C) 1996-2024 Free Software Foundation, Inc.
3 Copyright The GNU Toolchain Authors.
4 This file is part of the GNU C Library.
5
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public License as
8 published by the Free Software Foundation; either version 2.1 of the
9 License, or (at your option) any later version.
10
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
15
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; see the file COPYING.LIB. If
18 not, see <https://www.gnu.org/licenses/>. */
19
20 /*
21 This is a version (aka ptmalloc2) of malloc/free/realloc written by
22 Doug Lea and adapted to multiple threads/arenas by Wolfram Gloger.
23
24 There have been substantial changes made after the integration into
25 glibc in all parts of the code. Do not look for much commonality
26 with the ptmalloc2 version.
27
28 * Version ptmalloc2-20011215
29 based on:
30 VERSION 2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee)
31
32 * Quickstart
33
34 In order to compile this implementation, a Makefile is provided with
35 the ptmalloc2 distribution, which has pre-defined targets for some
36 popular systems (e.g. "make posix" for Posix threads). All that is
37 typically required with regard to compiler flags is the selection of
38 the thread package via defining one out of USE_PTHREADS, USE_THR or
39 USE_SPROC. Check the thread-m.h file for what effects this has.
40 Many/most systems will additionally require USE_TSD_DATA_HACK to be
41 defined, so this is the default for "make posix".
42
43 * Why use this malloc?
44
45 This is not the fastest, most space-conserving, most portable, or
46 most tunable malloc ever written. However it is among the fastest
47 while also being among the most space-conserving, portable and tunable.
48 Consistent balance across these factors results in a good general-purpose
49 allocator for malloc-intensive programs.
50
51 The main properties of the algorithms are:
52 * For large (>= 512 bytes) requests, it is a pure best-fit allocator,
53 with ties normally decided via FIFO (i.e. least recently used).
54 * For small (<= 64 bytes by default) requests, it is a caching
55 allocator, that maintains pools of quickly recycled chunks.
56 * In between, and for combinations of large and small requests, it does
57 the best it can trying to meet both goals at once.
58 * For very large requests (>= 128KB by default), it relies on system
59 memory mapping facilities, if supported.
60
61 For a longer but slightly out of date high-level description, see
62 http://gee.cs.oswego.edu/dl/html/malloc.html
63
64 You may already by default be using a C library containing a malloc
65 that is based on some version of this malloc (for example in
66 linux). You might still want to use the one in this file in order to
67 customize settings or to avoid overheads associated with library
68 versions.
69
70 * Contents, described in more detail in "description of public routines" below.
71
72 Standard (ANSI/SVID/...) functions:
73 malloc(size_t n);
74 calloc(size_t n_elements, size_t element_size);
75 free(void* p);
76 realloc(void* p, size_t n);
77 memalign(size_t alignment, size_t n);
78 valloc(size_t n);
79 mallinfo()
80 mallopt(int parameter_number, int parameter_value)
81
82 Additional functions:
83 independent_calloc(size_t n_elements, size_t size, void* chunks[]);
84 independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);
85 pvalloc(size_t n);
86 malloc_trim(size_t pad);
87 malloc_usable_size(void* p);
88 malloc_stats();
89
90 * Vital statistics:
91
92 Supported pointer representation: 4 or 8 bytes
93 Supported size_t representation: 4 or 8 bytes
94 Note that size_t is allowed to be 4 bytes even if pointers are 8.
95 You can adjust this by defining INTERNAL_SIZE_T
96
97 Alignment: 2 * sizeof(size_t) (default)
98 (i.e., 8 byte alignment with 4byte size_t). This suffices for
99 nearly all current machines and C compilers. However, you can
100 define MALLOC_ALIGNMENT to be wider than this if necessary.
101
102 Minimum overhead per allocated chunk: 4 or 8 bytes
103 Each malloced chunk has a hidden word of overhead holding size
104 and status information.
105
106 Minimum allocated size: 4-byte ptrs: 16 bytes (including 4 overhead)
107 8-byte ptrs: 24/32 bytes (including, 4/8 overhead)
108
109 When a chunk is freed, 12 (for 4byte ptrs) or 20 (for 8 byte
110 ptrs but 4 byte size) or 24 (for 8/8) additional bytes are
111 needed; 4 (8) for a trailing size field and 8 (16) bytes for
112 free list pointers. Thus, the minimum allocatable size is
113 16/24/32 bytes.
114
115 Even a request for zero bytes (i.e., malloc(0)) returns a
116 pointer to something of the minimum allocatable size.
117
118 The maximum overhead wastage (i.e., number of extra bytes
119 allocated than were requested in malloc) is less than or equal
120 to the minimum size, except for requests >= mmap_threshold that
121 are serviced via mmap(), where the worst case wastage is 2 *
122 sizeof(size_t) bytes plus the remainder from a system page (the
123 minimal mmap unit); typically 4096 or 8192 bytes.
124
125 Maximum allocated size: 4-byte size_t: 2^32 minus about two pages
126 8-byte size_t: 2^64 minus about two pages
127
128 It is assumed that (possibly signed) size_t values suffice to
129 represent chunk sizes. `Possibly signed' is due to the fact
130 that `size_t' may be defined on a system as either a signed or
131 an unsigned type. The ISO C standard says that it must be
132 unsigned, but a few systems are known not to adhere to this.
133 Additionally, even when size_t is unsigned, sbrk (which is by
134 default used to obtain memory from system) accepts signed
135 arguments, and may not be able to handle size_t-wide arguments
136 with negative sign bit. Generally, values that would
137 appear as negative after accounting for overhead and alignment
138 are supported only via mmap(), which does not have this
139 limitation.
140
141 Requests for sizes outside the allowed range will perform an optional
142 failure action and then return null. (Requests may also
143 also fail because a system is out of memory.)
144
145 Thread-safety: thread-safe
146
147 Compliance: I believe it is compliant with the 1997 Single Unix Specification
148 Also SVID/XPG, ANSI C, and probably others as well.
149
150 * Synopsis of compile-time options:
151
152 People have reported using previous versions of this malloc on all
153 versions of Unix, sometimes by tweaking some of the defines
154 below. It has been tested most extensively on Solaris and Linux.
155 People also report using it in stand-alone embedded systems.
156
157 The implementation is in straight, hand-tuned ANSI C. It is not
158 at all modular. (Sorry!) It uses a lot of macros. To be at all
159 usable, this code should be compiled using an optimizing compiler
160 (for example gcc -O3) that can simplify expressions and control
161 paths. (FAQ: some macros import variables as arguments rather than
162 declare locals because people reported that some debuggers
163 otherwise get confused.)
164
165 OPTION DEFAULT VALUE
166
167 Compilation Environment options:
168
169 HAVE_MREMAP 0
170
171 Changing default word sizes:
172
173 INTERNAL_SIZE_T size_t
174
175 Configuration and functionality options:
176
177 USE_PUBLIC_MALLOC_WRAPPERS NOT defined
178 USE_MALLOC_LOCK NOT defined
179 MALLOC_DEBUG NOT defined
180 REALLOC_ZERO_BYTES_FREES 1
181 TRIM_FASTBINS 0
182
183 Options for customizing MORECORE:
184
185 MORECORE sbrk
186 MORECORE_FAILURE -1
187 MORECORE_CONTIGUOUS 1
188 MORECORE_CANNOT_TRIM NOT defined
189 MORECORE_CLEARS 1
190 MMAP_AS_MORECORE_SIZE (1024 * 1024)
191
192 Tuning options that are also dynamically changeable via mallopt:
193
194 DEFAULT_MXFAST 64 (for 32bit), 128 (for 64bit)
195 DEFAULT_TRIM_THRESHOLD 128 * 1024
196 DEFAULT_TOP_PAD 0
197 DEFAULT_MMAP_THRESHOLD 128 * 1024
198 DEFAULT_MMAP_MAX 65536
199
200 There are several other #defined constants and macros that you
201 probably don't want to touch unless you are extending or adapting malloc. */
202
203 /*
204 void* is the pointer type that malloc should say it returns
205 */
206
207 #ifndef void
208 #define void void
209 #endif /*void*/
210
211 #include <stddef.h> /* for size_t */
212 #include <stdlib.h> /* for getenv(), abort() */
213 #include <unistd.h> /* for __libc_enable_secure */
214
215 #include <atomic.h>
216 #include <_itoa.h>
217 #include <bits/wordsize.h>
218 #include <sys/sysinfo.h>
219
220 #include <ldsodefs.h>
221 #include <setvmaname.h>
222
223 #include <unistd.h>
224 #include <stdio.h> /* needed for malloc_stats */
225 #include <errno.h>
226 #include <assert.h>
227
228 #include <shlib-compat.h>
229
230 /* For uintptr_t. */
231 #include <stdint.h>
232
233 /* For va_arg, va_start, va_end. */
234 #include <stdarg.h>
235
236 /* For MIN, MAX, powerof2. */
237 #include <sys/param.h>
238
239 /* For ALIGN_UP et. al. */
240 #include <libc-pointer-arith.h>
241
242 /* For DIAG_PUSH/POP_NEEDS_COMMENT et al. */
243 #include <libc-diag.h>
244
245 /* For memory tagging. */
246 #include <libc-mtag.h>
247
248 #include <malloc/malloc-internal.h>
249
250 /* For SINGLE_THREAD_P. */
251 #include <sysdep-cancel.h>
252
253 #include <libc-internal.h>
254
255 /* For tcache double-free check. */
256 #include <random-bits.h>
257 #include <sys/random.h>
258 #include <not-cancel.h>
259
260 /*
261 Debugging:
262
263 Because freed chunks may be overwritten with bookkeeping fields, this
264 malloc will often die when freed memory is overwritten by user
265 programs. This can be very effective (albeit in an annoying way)
266 in helping track down dangling pointers.
267
268 If you compile with -DMALLOC_DEBUG, a number of assertion checks are
269 enabled that will catch more memory errors. You probably won't be
270 able to make much sense of the actual assertion errors, but they
271 should help you locate incorrectly overwritten memory. The checking
272 is fairly extensive, and will slow down execution
273 noticeably. Calling malloc_stats or mallinfo with MALLOC_DEBUG set
274 will attempt to check every non-mmapped allocated and free chunk in
275 the course of computing the summaries. (By nature, mmapped regions
276 cannot be checked very much automatically.)
277
278 Setting MALLOC_DEBUG may also be helpful if you are trying to modify
279 this code. The assertions in the check routines spell out in more
280 detail the assumptions and invariants underlying the algorithms.
281
282 Setting MALLOC_DEBUG does NOT provide an automated mechanism for
283 checking that all accesses to malloced memory stay within their
284 bounds. However, there are several add-ons and adaptations of this
285 or other mallocs available that do this.
286 */
287
288 #ifndef MALLOC_DEBUG
289 #define MALLOC_DEBUG 0
290 #endif
291
292 #if USE_TCACHE
293 /* We want 64 entries. This is an arbitrary limit, which tunables can reduce. */
294 # define TCACHE_MAX_BINS 64
295 # define MAX_TCACHE_SIZE tidx2usize (TCACHE_MAX_BINS-1)
296
297 /* Only used to pre-fill the tunables. */
298 # define tidx2usize(idx) (((size_t) idx) * MALLOC_ALIGNMENT + MINSIZE - SIZE_SZ)
299
300 /* When "x" is from chunksize(). */
301 # define csize2tidx(x) (((x) - MINSIZE + MALLOC_ALIGNMENT - 1) / MALLOC_ALIGNMENT)
302 /* When "x" is a user-provided size. */
303 # define usize2tidx(x) csize2tidx (request2size (x))
304
305 /* With rounding and alignment, the bins are...
306 idx 0 bytes 0..24 (64-bit) or 0..12 (32-bit)
307 idx 1 bytes 25..40 or 13..20
308 idx 2 bytes 41..56 or 21..28
309 etc. */
310
311 /* This is another arbitrary limit, which tunables can change. Each
312 tcache bin will hold at most this number of chunks. */
313 # define TCACHE_FILL_COUNT 7
314
315 /* Maximum chunks in tcache bins for tunables. This value must fit the range
316 of tcache->counts[] entries, else they may overflow. */
317 # define MAX_TCACHE_COUNT UINT16_MAX
318 #endif
319
320 /* Safe-Linking:
321 Use randomness from ASLR (mmap_base) to protect single-linked lists
322 of Fast-Bins and TCache. That is, mask the "next" pointers of the
323 lists' chunks, and also perform allocation alignment checks on them.
324 This mechanism reduces the risk of pointer hijacking, as was done with
325 Safe-Unlinking in the double-linked lists of Small-Bins.
326 It assumes a minimum page size of 4096 bytes (12 bits). Systems with
327 larger pages provide less entropy, although the pointer mangling
328 still works. */
329 #define PROTECT_PTR(pos, ptr) \
330 ((__typeof (ptr)) ((((size_t) pos) >> 12) ^ ((size_t) ptr)))
331 #define REVEAL_PTR(ptr) PROTECT_PTR (&ptr, ptr)
332
333 /*
334 The REALLOC_ZERO_BYTES_FREES macro controls the behavior of realloc (p, 0)
335 when p is nonnull. If the macro is nonzero, the realloc call returns NULL;
336 otherwise, the call returns what malloc (0) would. In either case,
337 p is freed. Glibc uses a nonzero REALLOC_ZERO_BYTES_FREES, which
338 implements common historical practice.
339
340 ISO C17 says the realloc call has implementation-defined behavior,
341 and it might not even free p.
342 */
343
344 #ifndef REALLOC_ZERO_BYTES_FREES
345 #define REALLOC_ZERO_BYTES_FREES 1
346 #endif
347
348 /*
349 TRIM_FASTBINS controls whether free() of a very small chunk can
350 immediately lead to trimming. Setting to true (1) can reduce memory
351 footprint, but will almost always slow down programs that use a lot
352 of small chunks.
353
354 Define this only if you are willing to give up some speed to more
355 aggressively reduce system-level memory footprint when releasing
356 memory in programs that use many small chunks. You can get
357 essentially the same effect by setting MXFAST to 0, but this can
358 lead to even greater slowdowns in programs using many small chunks.
359 TRIM_FASTBINS is an in-between compile-time option, that disables
360 only those chunks bordering topmost memory from being placed in
361 fastbins.
362 */
363
364 #ifndef TRIM_FASTBINS
365 #define TRIM_FASTBINS 0
366 #endif
367
368 /* Definition for getting more memory from the OS. */
369 #include "morecore.c"
370
371 #define MORECORE (*__glibc_morecore)
372 #define MORECORE_FAILURE 0
373
374 /* Memory tagging. */
375
376 /* Some systems support the concept of tagging (sometimes known as
377 coloring) memory locations on a fine grained basis. Each memory
378 location is given a color (normally allocated randomly) and
379 pointers are also colored. When the pointer is dereferenced, the
380 pointer's color is checked against the memory's color and if they
381 differ the access is faulted (sometimes lazily).
382
383 We use this in glibc by maintaining a single color for the malloc
384 data structures that are interleaved with the user data and then
385 assigning separate colors for each block allocation handed out. In
386 this way simple buffer overruns will be rapidly detected. When
387 memory is freed, the memory is recolored back to the glibc default
388 so that simple use-after-free errors can also be detected.
389
390 If memory is reallocated the buffer is recolored even if the
391 address remains the same. This has a performance impact, but
392 guarantees that the old pointer cannot mistakenly be reused (code
393 that compares old against new will see a mismatch and will then
394 need to behave as though realloc moved the data to a new location).
395
396 Internal API for memory tagging support.
397
398 The aim is to keep the code for memory tagging support as close to
399 the normal APIs in glibc as possible, so that if tagging is not
400 enabled in the library, or is disabled at runtime then standard
401 operations can continue to be used. Support macros are used to do
402 this:
403
404 void *tag_new_zero_region (void *ptr, size_t size)
405
406 Allocates a new tag, colors the memory with that tag, zeros the
407 memory and returns a pointer that is correctly colored for that
408 location. The non-tagging version will simply call memset with 0.
409
410 void *tag_region (void *ptr, size_t size)
411
412 Color the region of memory pointed to by PTR and size SIZE with
413 the color of PTR. Returns the original pointer.
414
415 void *tag_new_usable (void *ptr)
416
417 Allocate a new random color and use it to color the user region of
418 a chunk; this may include data from the subsequent chunk's header
419 if tagging is sufficiently fine grained. Returns PTR suitably
420 recolored for accessing the memory there.
421
422 void *tag_at (void *ptr)
423
424 Read the current color of the memory at the address pointed to by
425 PTR (ignoring it's current color) and return PTR recolored to that
426 color. PTR must be valid address in all other respects. When
427 tagging is not enabled, it simply returns the original pointer.
428 */
429
430 #ifdef USE_MTAG
431 static bool mtag_enabled = false;
432 static int mtag_mmap_flags = 0;
433 #else
434 # define mtag_enabled false
435 # define mtag_mmap_flags 0
436 #endif
437
438 static __always_inline void *
439 tag_region (void *ptr, size_t size)
440 {
441 if (__glibc_unlikely (mtag_enabled))
442 return __libc_mtag_tag_region (ptr, size);
443 return ptr;
444 }
445
446 static __always_inline void *
447 tag_new_zero_region (void *ptr, size_t size)
448 {
449 if (__glibc_unlikely (mtag_enabled))
450 return __libc_mtag_tag_zero_region (__libc_mtag_new_tag (ptr), size);
451 return memset (ptr, 0, size);
452 }
453
454 /* Defined later. */
455 static void *
456 tag_new_usable (void *ptr);
457
458 static __always_inline void *
459 tag_at (void *ptr)
460 {
461 if (__glibc_unlikely (mtag_enabled))
462 return __libc_mtag_address_get_tag (ptr);
463 return ptr;
464 }
465
466 #include <string.h>
467
468 /*
469 MORECORE-related declarations. By default, rely on sbrk
470 */
471
472
473 /*
474 MORECORE is the name of the routine to call to obtain more memory
475 from the system. See below for general guidance on writing
476 alternative MORECORE functions, as well as a version for WIN32 and a
477 sample version for pre-OSX macos.
478 */
479
480 #ifndef MORECORE
481 #define MORECORE sbrk
482 #endif
483
484 /*
485 MORECORE_FAILURE is the value returned upon failure of MORECORE
486 as well as mmap. Since it cannot be an otherwise valid memory address,
487 and must reflect values of standard sys calls, you probably ought not
488 try to redefine it.
489 */
490
491 #ifndef MORECORE_FAILURE
492 #define MORECORE_FAILURE (-1)
493 #endif
494
495 /*
496 If MORECORE_CONTIGUOUS is true, take advantage of fact that
497 consecutive calls to MORECORE with positive arguments always return
498 contiguous increasing addresses. This is true of unix sbrk. Even
499 if not defined, when regions happen to be contiguous, malloc will
500 permit allocations spanning regions obtained from different
501 calls. But defining this when applicable enables some stronger
502 consistency checks and space efficiencies.
503 */
504
505 #ifndef MORECORE_CONTIGUOUS
506 #define MORECORE_CONTIGUOUS 1
507 #endif
508
509 /*
510 Define MORECORE_CANNOT_TRIM if your version of MORECORE
511 cannot release space back to the system when given negative
512 arguments. This is generally necessary only if you are using
513 a hand-crafted MORECORE function that cannot handle negative arguments.
514 */
515
516 /* #define MORECORE_CANNOT_TRIM */
517
518 /* MORECORE_CLEARS (default 1)
519 The degree to which the routine mapped to MORECORE zeroes out
520 memory: never (0), only for newly allocated space (1) or always
521 (2). The distinction between (1) and (2) is necessary because on
522 some systems, if the application first decrements and then
523 increments the break value, the contents of the reallocated space
524 are unspecified.
525 */
526
527 #ifndef MORECORE_CLEARS
528 # define MORECORE_CLEARS 1
529 #endif
530
531
532 /*
533 MMAP_AS_MORECORE_SIZE is the minimum mmap size argument to use if
534 sbrk fails, and mmap is used as a backup. The value must be a
535 multiple of page size. This backup strategy generally applies only
536 when systems have "holes" in address space, so sbrk cannot perform
537 contiguous expansion, but there is still space available on system.
538 On systems for which this is known to be useful (i.e. most linux
539 kernels), this occurs only when programs allocate huge amounts of
540 memory. Between this, and the fact that mmap regions tend to be
541 limited, the size should be large, to avoid too many mmap calls and
542 thus avoid running out of kernel resources. */
543
544 #ifndef MMAP_AS_MORECORE_SIZE
545 #define MMAP_AS_MORECORE_SIZE (1024 * 1024)
546 #endif
547
548 /*
549 Define HAVE_MREMAP to make realloc() use mremap() to re-allocate
550 large blocks.
551 */
552
553 #ifndef HAVE_MREMAP
554 #define HAVE_MREMAP 0
555 #endif
556
557 /*
558 This version of malloc supports the standard SVID/XPG mallinfo
559 routine that returns a struct containing usage properties and
560 statistics. It should work on any SVID/XPG compliant system that has
561 a /usr/include/malloc.h defining struct mallinfo. (If you'd like to
562 install such a thing yourself, cut out the preliminary declarations
563 as described above and below and save them in a malloc.h file. But
564 there's no compelling reason to bother to do this.)
565
566 The main declaration needed is the mallinfo struct that is returned
567 (by-copy) by mallinfo(). The SVID/XPG malloinfo struct contains a
568 bunch of fields that are not even meaningful in this version of
569 malloc. These fields are are instead filled by mallinfo() with
570 other numbers that might be of interest.
571 */
572
573
574 /* ---------- description of public routines ------------ */
575
576 #if IS_IN (libc)
577 /*
578 malloc(size_t n)
579 Returns a pointer to a newly allocated chunk of at least n bytes, or null
580 if no space is available. Additionally, on failure, errno is
581 set to ENOMEM on ANSI C systems.
582
583 If n is zero, malloc returns a minimum-sized chunk. (The minimum
584 size is 16 bytes on most 32bit systems, and 24 or 32 bytes on 64bit
585 systems.) On most systems, size_t is an unsigned type, so calls
586 with negative arguments are interpreted as requests for huge amounts
587 of space, which will often fail. The maximum supported value of n
588 differs across systems, but is in all cases less than the maximum
589 representable value of a size_t.
590 */
591 void* __libc_malloc(size_t);
592 libc_hidden_proto (__libc_malloc)
593
594 /*
595 free(void* p)
596 Releases the chunk of memory pointed to by p, that had been previously
597 allocated using malloc or a related routine such as realloc.
598 It has no effect if p is null. It can have arbitrary (i.e., bad!)
599 effects if p has already been freed.
600
601 Unless disabled (using mallopt), freeing very large spaces will
602 when possible, automatically trigger operations that give
603 back unused memory to the system, thus reducing program footprint.
604 */
605 void __libc_free(void*);
606 libc_hidden_proto (__libc_free)
607
608 /*
609 calloc(size_t n_elements, size_t element_size);
610 Returns a pointer to n_elements * element_size bytes, with all locations
611 set to zero.
612 */
613 void* __libc_calloc(size_t, size_t);
614
615 /*
616 realloc(void* p, size_t n)
617 Returns a pointer to a chunk of size n that contains the same data
618 as does chunk p up to the minimum of (n, p's size) bytes, or null
619 if no space is available.
620
621 The returned pointer may or may not be the same as p. The algorithm
622 prefers extending p when possible, otherwise it employs the
623 equivalent of a malloc-copy-free sequence.
624
625 If p is null, realloc is equivalent to malloc.
626
627 If space is not available, realloc returns null, errno is set (if on
628 ANSI) and p is NOT freed.
629
630 if n is for fewer bytes than already held by p, the newly unused
631 space is lopped off and freed if possible. Unless the #define
632 REALLOC_ZERO_BYTES_FREES is set, realloc with a size argument of
633 zero (re)allocates a minimum-sized chunk.
634
635 Large chunks that were internally obtained via mmap will always be
636 grown using malloc-copy-free sequences unless the system supports
637 MREMAP (currently only linux).
638
639 The old unix realloc convention of allowing the last-free'd chunk
640 to be used as an argument to realloc is not supported.
641 */
642 void* __libc_realloc(void*, size_t);
643 libc_hidden_proto (__libc_realloc)
644
645 /*
646 memalign(size_t alignment, size_t n);
647 Returns a pointer to a newly allocated chunk of n bytes, aligned
648 in accord with the alignment argument.
649
650 The alignment argument should be a power of two. If the argument is
651 not a power of two, the nearest greater power is used.
652 8-byte alignment is guaranteed by normal malloc calls, so don't
653 bother calling memalign with an argument of 8 or less.
654
655 Overreliance on memalign is a sure way to fragment space.
656 */
657 void* __libc_memalign(size_t, size_t);
658 libc_hidden_proto (__libc_memalign)
659
660 /*
661 valloc(size_t n);
662 Equivalent to memalign(pagesize, n), where pagesize is the page
663 size of the system. If the pagesize is unknown, 4096 is used.
664 */
665 void* __libc_valloc(size_t);
666
667
668
669 /*
670 mallinfo()
671 Returns (by copy) a struct containing various summary statistics:
672
673 arena: current total non-mmapped bytes allocated from system
674 ordblks: the number of free chunks
675 smblks: the number of fastbin blocks (i.e., small chunks that
676 have been freed but not reused or consolidated)
677 hblks: current number of mmapped regions
678 hblkhd: total bytes held in mmapped regions
679 usmblks: always 0
680 fsmblks: total bytes held in fastbin blocks
681 uordblks: current total allocated space (normal or mmapped)
682 fordblks: total free space
683 keepcost: the maximum number of bytes that could ideally be released
684 back to system via malloc_trim. ("ideally" means that
685 it ignores page restrictions etc.)
686
687 Because these fields are ints, but internal bookkeeping may
688 be kept as longs, the reported values may wrap around zero and
689 thus be inaccurate.
690 */
691 struct mallinfo2 __libc_mallinfo2(void);
692 libc_hidden_proto (__libc_mallinfo2)
693
694 struct mallinfo __libc_mallinfo(void);
695
696
697 /*
698 pvalloc(size_t n);
699 Equivalent to valloc(minimum-page-that-holds(n)), that is,
700 round up n to nearest pagesize.
701 */
702 void* __libc_pvalloc(size_t);
703
704 /*
705 malloc_trim(size_t pad);
706
707 If possible, gives memory back to the system (via negative
708 arguments to sbrk) if there is unused memory at the `high' end of
709 the malloc pool. You can call this after freeing large blocks of
710 memory to potentially reduce the system-level memory requirements
711 of a program. However, it cannot guarantee to reduce memory. Under
712 some allocation patterns, some large free blocks of memory will be
713 locked between two used chunks, so they cannot be given back to
714 the system.
715
716 The `pad' argument to malloc_trim represents the amount of free
717 trailing space to leave untrimmed. If this argument is zero,
718 only the minimum amount of memory to maintain internal data
719 structures will be left (one page or less). Non-zero arguments
720 can be supplied to maintain enough trailing space to service
721 future expected allocations without having to re-obtain memory
722 from the system.
723
724 Malloc_trim returns 1 if it actually released any memory, else 0.
725 On systems that do not support "negative sbrks", it will always
726 return 0.
727 */
728 int __malloc_trim(size_t);
729
730 /*
731 malloc_usable_size(void* p);
732
733 Returns the number of bytes you can actually use in
734 an allocated chunk, which may be more than you requested (although
735 often not) due to alignment and minimum size constraints.
736 You can use this many bytes without worrying about
737 overwriting other allocated objects. This is not a particularly great
738 programming practice. malloc_usable_size can be more useful in
739 debugging and assertions, for example:
740
741 p = malloc(n);
742 assert(malloc_usable_size(p) >= 256);
743
744 */
745 size_t __malloc_usable_size(void*);
746
747 /*
748 malloc_stats();
749 Prints on stderr the amount of space obtained from the system (both
750 via sbrk and mmap), the maximum amount (which may be more than
751 current if malloc_trim and/or munmap got called), and the current
752 number of bytes allocated via malloc (or realloc, etc) but not yet
753 freed. Note that this is the number of bytes allocated, not the
754 number requested. It will be larger than the number requested
755 because of alignment and bookkeeping overhead. Because it includes
756 alignment wastage as being in use, this figure may be greater than
757 zero even when no user-level chunks are allocated.
758
759 The reported current and maximum system memory can be inaccurate if
760 a program makes other calls to system memory allocation functions
761 (normally sbrk) outside of malloc.
762
763 malloc_stats prints only the most commonly interesting statistics.
764 More information can be obtained by calling mallinfo.
765
766 */
767 void __malloc_stats(void);
768
769 /*
770 posix_memalign(void **memptr, size_t alignment, size_t size);
771
772 POSIX wrapper like memalign(), checking for validity of size.
773 */
774 int __posix_memalign(void **, size_t, size_t);
775 #endif /* IS_IN (libc) */
776
777 /*
778 mallopt(int parameter_number, int parameter_value)
779 Sets tunable parameters The format is to provide a
780 (parameter-number, parameter-value) pair. mallopt then sets the
781 corresponding parameter to the argument value if it can (i.e., so
782 long as the value is meaningful), and returns 1 if successful else
783 0. SVID/XPG/ANSI defines four standard param numbers for mallopt,
784 normally defined in malloc.h. Only one of these (M_MXFAST) is used
785 in this malloc. The others (M_NLBLKS, M_GRAIN, M_KEEP) don't apply,
786 so setting them has no effect. But this malloc also supports four
787 other options in mallopt. See below for details. Briefly, supported
788 parameters are as follows (listed defaults are for "typical"
789 configurations).
790
791 Symbol param # default allowed param values
792 M_MXFAST 1 64 0-80 (0 disables fastbins)
793 M_TRIM_THRESHOLD -1 128*1024 any (-1U disables trimming)
794 M_TOP_PAD -2 0 any
795 M_MMAP_THRESHOLD -3 128*1024 any (or 0 if no MMAP support)
796 M_MMAP_MAX -4 65536 any (0 disables use of mmap)
797 */
798 int __libc_mallopt(int, int);
799 #if IS_IN (libc)
800 libc_hidden_proto (__libc_mallopt)
801 #endif
802
803 /* mallopt tuning options */
804
805 /*
806 M_MXFAST is the maximum request size used for "fastbins", special bins
807 that hold returned chunks without consolidating their spaces. This
808 enables future requests for chunks of the same size to be handled
809 very quickly, but can increase fragmentation, and thus increase the
810 overall memory footprint of a program.
811
812 This malloc manages fastbins very conservatively yet still
813 efficiently, so fragmentation is rarely a problem for values less
814 than or equal to the default. The maximum supported value of MXFAST
815 is 80. You wouldn't want it any higher than this anyway. Fastbins
816 are designed especially for use with many small structs, objects or
817 strings -- the default handles structs/objects/arrays with sizes up
818 to 8 4byte fields, or small strings representing words, tokens,
819 etc. Using fastbins for larger objects normally worsens
820 fragmentation without improving speed.
821
822 M_MXFAST is set in REQUEST size units. It is internally used in
823 chunksize units, which adds padding and alignment. You can reduce
824 M_MXFAST to 0 to disable all use of fastbins. This causes the malloc
825 algorithm to be a closer approximation of fifo-best-fit in all cases,
826 not just for larger requests, but will generally cause it to be
827 slower.
828 */
829
830
831 /* M_MXFAST is a standard SVID/XPG tuning option, usually listed in malloc.h */
832 #ifndef M_MXFAST
833 #define M_MXFAST 1
834 #endif
835
836 #ifndef DEFAULT_MXFAST
837 #define DEFAULT_MXFAST (64 * SIZE_SZ / 4)
838 #endif
839
840
841 /*
842 M_TRIM_THRESHOLD is the maximum amount of unused top-most memory
843 to keep before releasing via malloc_trim in free().
844
845 Automatic trimming is mainly useful in long-lived programs.
846 Because trimming via sbrk can be slow on some systems, and can
847 sometimes be wasteful (in cases where programs immediately
848 afterward allocate more large chunks) the value should be high
849 enough so that your overall system performance would improve by
850 releasing this much memory.
851
852 The trim threshold and the mmap control parameters (see below)
853 can be traded off with one another. Trimming and mmapping are
854 two different ways of releasing unused memory back to the
855 system. Between these two, it is often possible to keep
856 system-level demands of a long-lived program down to a bare
857 minimum. For example, in one test suite of sessions measuring
858 the XF86 X server on Linux, using a trim threshold of 128K and a
859 mmap threshold of 192K led to near-minimal long term resource
860 consumption.
861
862 If you are using this malloc in a long-lived program, it should
863 pay to experiment with these values. As a rough guide, you
864 might set to a value close to the average size of a process
865 (program) running on your system. Releasing this much memory
866 would allow such a process to run in memory. Generally, it's
867 worth it to tune for trimming rather tham memory mapping when a
868 program undergoes phases where several large chunks are
869 allocated and released in ways that can reuse each other's
870 storage, perhaps mixed with phases where there are no such
871 chunks at all. And in well-behaved long-lived programs,
872 controlling release of large blocks via trimming versus mapping
873 is usually faster.
874
875 However, in most programs, these parameters serve mainly as
876 protection against the system-level effects of carrying around
877 massive amounts of unneeded memory. Since frequent calls to
878 sbrk, mmap, and munmap otherwise degrade performance, the default
879 parameters are set to relatively high values that serve only as
880 safeguards.
881
882 The trim value It must be greater than page size to have any useful
883 effect. To disable trimming completely, you can set to
884 (unsigned long)(-1)
885
886 Trim settings interact with fastbin (MXFAST) settings: Unless
887 TRIM_FASTBINS is defined, automatic trimming never takes place upon
888 freeing a chunk with size less than or equal to MXFAST. Trimming is
889 instead delayed until subsequent freeing of larger chunks. However,
890 you can still force an attempted trim by calling malloc_trim.
891
892 Also, trimming is not generally possible in cases where
893 the main arena is obtained via mmap.
894
895 Note that the trick some people use of mallocing a huge space and
896 then freeing it at program startup, in an attempt to reserve system
897 memory, doesn't have the intended effect under automatic trimming,
898 since that memory will immediately be returned to the system.
899 */
900
901 #define M_TRIM_THRESHOLD -1
902
903 #ifndef DEFAULT_TRIM_THRESHOLD
904 #define DEFAULT_TRIM_THRESHOLD (128 * 1024)
905 #endif
906
907 /*
908 M_TOP_PAD is the amount of extra `padding' space to allocate or
909 retain whenever sbrk is called. It is used in two ways internally:
910
911 * When sbrk is called to extend the top of the arena to satisfy
912 a new malloc request, this much padding is added to the sbrk
913 request.
914
915 * When malloc_trim is called automatically from free(),
916 it is used as the `pad' argument.
917
918 In both cases, the actual amount of padding is rounded
919 so that the end of the arena is always a system page boundary.
920
921 The main reason for using padding is to avoid calling sbrk so
922 often. Having even a small pad greatly reduces the likelihood
923 that nearly every malloc request during program start-up (or
924 after trimming) will invoke sbrk, which needlessly wastes
925 time.
926
927 Automatic rounding-up to page-size units is normally sufficient
928 to avoid measurable overhead, so the default is 0. However, in
929 systems where sbrk is relatively slow, it can pay to increase
930 this value, at the expense of carrying around more memory than
931 the program needs.
932 */
933
934 #define M_TOP_PAD -2
935
936 #ifndef DEFAULT_TOP_PAD
937 #define DEFAULT_TOP_PAD (0)
938 #endif
939
940 /*
941 MMAP_THRESHOLD_MAX and _MIN are the bounds on the dynamically
942 adjusted MMAP_THRESHOLD.
943 */
944
945 #ifndef DEFAULT_MMAP_THRESHOLD_MIN
946 #define DEFAULT_MMAP_THRESHOLD_MIN (128 * 1024)
947 #endif
948
949 #ifndef DEFAULT_MMAP_THRESHOLD_MAX
950 /* For 32-bit platforms we cannot increase the maximum mmap
951 threshold much because it is also the minimum value for the
952 maximum heap size and its alignment. Going above 512k (i.e., 1M
953 for new heaps) wastes too much address space. */
954 # if __WORDSIZE == 32
955 # define DEFAULT_MMAP_THRESHOLD_MAX (512 * 1024)
956 # else
957 # define DEFAULT_MMAP_THRESHOLD_MAX (4 * 1024 * 1024 * sizeof(long))
958 # endif
959 #endif
960
961 /*
962 M_MMAP_THRESHOLD is the request size threshold for using mmap()
963 to service a request. Requests of at least this size that cannot
964 be allocated using already-existing space will be serviced via mmap.
965 (If enough normal freed space already exists it is used instead.)
966
967 Using mmap segregates relatively large chunks of memory so that
968 they can be individually obtained and released from the host
969 system. A request serviced through mmap is never reused by any
970 other request (at least not directly; the system may just so
971 happen to remap successive requests to the same locations).
972
973 Segregating space in this way has the benefits that:
974
975 1. Mmapped space can ALWAYS be individually released back
976 to the system, which helps keep the system level memory
977 demands of a long-lived program low.
978 2. Mapped memory can never become `locked' between
979 other chunks, as can happen with normally allocated chunks, which
980 means that even trimming via malloc_trim would not release them.
981 3. On some systems with "holes" in address spaces, mmap can obtain
982 memory that sbrk cannot.
983
984 However, it has the disadvantages that:
985
986 1. The space cannot be reclaimed, consolidated, and then
987 used to service later requests, as happens with normal chunks.
988 2. It can lead to more wastage because of mmap page alignment
989 requirements
990 3. It causes malloc performance to be more dependent on host
991 system memory management support routines which may vary in
992 implementation quality and may impose arbitrary
993 limitations. Generally, servicing a request via normal
994 malloc steps is faster than going through a system's mmap.
995
996 The advantages of mmap nearly always outweigh disadvantages for
997 "large" chunks, but the value of "large" varies across systems. The
998 default is an empirically derived value that works well in most
999 systems.
1000
1001
1002 Update in 2006:
1003 The above was written in 2001. Since then the world has changed a lot.
1004 Memory got bigger. Applications got bigger. The virtual address space
1005 layout in 32 bit linux changed.
1006
1007 In the new situation, brk() and mmap space is shared and there are no
1008 artificial limits on brk size imposed by the kernel. What is more,
1009 applications have started using transient allocations larger than the
1010 128Kb as was imagined in 2001.
1011
1012 The price for mmap is also high now; each time glibc mmaps from the
1013 kernel, the kernel is forced to zero out the memory it gives to the
1014 application. Zeroing memory is expensive and eats a lot of cache and
1015 memory bandwidth. This has nothing to do with the efficiency of the
1016 virtual memory system, by doing mmap the kernel just has no choice but
1017 to zero.
1018
1019 In 2001, the kernel had a maximum size for brk() which was about 800
1020 megabytes on 32 bit x86, at that point brk() would hit the first
1021 mmaped shared libraries and couldn't expand anymore. With current 2.6
1022 kernels, the VA space layout is different and brk() and mmap
1023 both can span the entire heap at will.
1024
1025 Rather than using a static threshold for the brk/mmap tradeoff,
1026 we are now using a simple dynamic one. The goal is still to avoid
1027 fragmentation. The old goals we kept are
1028 1) try to get the long lived large allocations to use mmap()
1029 2) really large allocations should always use mmap()
1030 and we're adding now:
1031 3) transient allocations should use brk() to avoid forcing the kernel
1032 having to zero memory over and over again
1033
1034 The implementation works with a sliding threshold, which is by default
1035 limited to go between 128Kb and 32Mb (64Mb for 64 bitmachines) and starts
1036 out at 128Kb as per the 2001 default.
1037
1038 This allows us to satisfy requirement 1) under the assumption that long
1039 lived allocations are made early in the process' lifespan, before it has
1040 started doing dynamic allocations of the same size (which will
1041 increase the threshold).
1042
1043 The upperbound on the threshold satisfies requirement 2)
1044
1045 The threshold goes up in value when the application frees memory that was
1046 allocated with the mmap allocator. The idea is that once the application
1047 starts freeing memory of a certain size, it's highly probable that this is
1048 a size the application uses for transient allocations. This estimator
1049 is there to satisfy the new third requirement.
1050
1051 */
1052
1053 #define M_MMAP_THRESHOLD -3
1054
1055 #ifndef DEFAULT_MMAP_THRESHOLD
1056 #define DEFAULT_MMAP_THRESHOLD DEFAULT_MMAP_THRESHOLD_MIN
1057 #endif
1058
1059 /*
1060 M_MMAP_MAX is the maximum number of requests to simultaneously
1061 service using mmap. This parameter exists because
1062 some systems have a limited number of internal tables for
1063 use by mmap, and using more than a few of them may degrade
1064 performance.
1065
1066 The default is set to a value that serves only as a safeguard.
1067 Setting to 0 disables use of mmap for servicing large requests.
1068 */
1069
1070 #define M_MMAP_MAX -4
1071
1072 #ifndef DEFAULT_MMAP_MAX
1073 #define DEFAULT_MMAP_MAX (65536)
1074 #endif
1075
1076 #include <malloc.h>
1077
1078 #ifndef RETURN_ADDRESS
1079 #define RETURN_ADDRESS(X_) (NULL)
1080 #endif
1081
1082 /* Forward declarations. */
1083 struct malloc_chunk;
1084 typedef struct malloc_chunk* mchunkptr;
1085
1086 /* Internal routines. */
1087
1088 static void* _int_malloc(mstate, size_t);
1089 static void _int_free(mstate, mchunkptr, int);
1090 static void _int_free_merge_chunk (mstate, mchunkptr, INTERNAL_SIZE_T);
1091 static INTERNAL_SIZE_T _int_free_create_chunk (mstate,
1092 mchunkptr, INTERNAL_SIZE_T,
1093 mchunkptr, INTERNAL_SIZE_T);
1094 static void _int_free_maybe_consolidate (mstate, INTERNAL_SIZE_T);
1095 static void* _int_realloc(mstate, mchunkptr, INTERNAL_SIZE_T,
1096 INTERNAL_SIZE_T);
1097 static void* _int_memalign(mstate, size_t, size_t);
1098 #if IS_IN (libc)
1099 static void* _mid_memalign(size_t, size_t, void *);
1100 #endif
1101
1102 static void malloc_printerr(const char *str) __attribute__ ((noreturn));
1103
1104 static void munmap_chunk(mchunkptr p);
1105 #if HAVE_MREMAP
1106 static mchunkptr mremap_chunk(mchunkptr p, size_t new_size);
1107 #endif
1108
1109 static size_t musable (void *mem);
1110
1111 /* ------------------ MMAP support ------------------ */
1112
1113
1114 #include <fcntl.h>
1115 #include <sys/mman.h>
1116
1117 #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
1118 # define MAP_ANONYMOUS MAP_ANON
1119 #endif
1120
1121 #define MMAP(addr, size, prot, flags) \
1122 __mmap((addr), (size), (prot), (flags)|MAP_ANONYMOUS|MAP_PRIVATE, -1, 0)
1123
1124
1125 /*
1126 ----------------------- Chunk representations -----------------------
1127 */
1128
1129
1130 /*
1131 This struct declaration is misleading (but accurate and necessary).
1132 It declares a "view" into memory allowing access to necessary
1133 fields at known offsets from a given base. See explanation below.
1134 */
1135
1136 struct malloc_chunk {
1137
1138 INTERNAL_SIZE_T mchunk_prev_size; /* Size of previous chunk (if free). */
1139 INTERNAL_SIZE_T mchunk_size; /* Size in bytes, including overhead. */
1140
1141 struct malloc_chunk* fd; /* double links -- used only if free. */
1142 struct malloc_chunk* bk;
1143
1144 /* Only used for large blocks: pointer to next larger size. */
1145 struct malloc_chunk* fd_nextsize; /* double links -- used only if free. */
1146 struct malloc_chunk* bk_nextsize;
1147 };
1148
1149
1150 /*
1151 malloc_chunk details:
1152
1153 (The following includes lightly edited explanations by Colin Plumb.)
1154
1155 Chunks of memory are maintained using a `boundary tag' method as
1156 described in e.g., Knuth or Standish. (See the paper by Paul
1157 Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a
1158 survey of such techniques.) Sizes of free chunks are stored both
1159 in the front of each chunk and at the end. This makes
1160 consolidating fragmented chunks into bigger chunks very fast. The
1161 size fields also hold bits representing whether chunks are free or
1162 in use.
1163
1164 An allocated chunk looks like this:
1165
1166
1167 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1168 | Size of previous chunk, if unallocated (P clear) |
1169 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1170 | Size of chunk, in bytes |A|M|P|
1171 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1172 | User data starts here... .
1173 . .
1174 . (malloc_usable_size() bytes) .
1175 . |
1176 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1177 | (size of chunk, but used for application data) |
1178 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1179 | Size of next chunk, in bytes |A|0|1|
1180 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1181
1182 Where "chunk" is the front of the chunk for the purpose of most of
1183 the malloc code, but "mem" is the pointer that is returned to the
1184 user. "Nextchunk" is the beginning of the next contiguous chunk.
1185
1186 Chunks always begin on even word boundaries, so the mem portion
1187 (which is returned to the user) is also on an even word boundary, and
1188 thus at least double-word aligned.
1189
1190 Free chunks are stored in circular doubly-linked lists, and look like this:
1191
1192 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1193 | Size of previous chunk, if unallocated (P clear) |
1194 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1195 `head:' | Size of chunk, in bytes |A|0|P|
1196 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1197 | Forward pointer to next chunk in list |
1198 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1199 | Back pointer to previous chunk in list |
1200 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1201 | Unused space (may be 0 bytes long) .
1202 . .
1203 . |
1204 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1205 `foot:' | Size of chunk, in bytes |
1206 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1207 | Size of next chunk, in bytes |A|0|0|
1208 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1209
1210 The P (PREV_INUSE) bit, stored in the unused low-order bit of the
1211 chunk size (which is always a multiple of two words), is an in-use
1212 bit for the *previous* chunk. If that bit is *clear*, then the
1213 word before the current chunk size contains the previous chunk
1214 size, and can be used to find the front of the previous chunk.
1215 The very first chunk allocated always has this bit set,
1216 preventing access to non-existent (or non-owned) memory. If
1217 prev_inuse is set for any given chunk, then you CANNOT determine
1218 the size of the previous chunk, and might even get a memory
1219 addressing fault when trying to do so.
1220
1221 The A (NON_MAIN_ARENA) bit is cleared for chunks on the initial,
1222 main arena, described by the main_arena variable. When additional
1223 threads are spawned, each thread receives its own arena (up to a
1224 configurable limit, after which arenas are reused for multiple
1225 threads), and the chunks in these arenas have the A bit set. To
1226 find the arena for a chunk on such a non-main arena, heap_for_ptr
1227 performs a bit mask operation and indirection through the ar_ptr
1228 member of the per-heap header heap_info (see arena.c).
1229
1230 Note that the `foot' of the current chunk is actually represented
1231 as the prev_size of the NEXT chunk. This makes it easier to
1232 deal with alignments etc but can be very confusing when trying
1233 to extend or adapt this code.
1234
1235 The three exceptions to all this are:
1236
1237 1. The special chunk `top' doesn't bother using the
1238 trailing size field since there is no next contiguous chunk
1239 that would have to index off it. After initialization, `top'
1240 is forced to always exist. If it would become less than
1241 MINSIZE bytes long, it is replenished.
1242
1243 2. Chunks allocated via mmap, which have the second-lowest-order
1244 bit M (IS_MMAPPED) set in their size fields. Because they are
1245 allocated one-by-one, each must contain its own trailing size
1246 field. If the M bit is set, the other bits are ignored
1247 (because mmapped chunks are neither in an arena, nor adjacent
1248 to a freed chunk). The M bit is also used for chunks which
1249 originally came from a dumped heap via malloc_set_state in
1250 hooks.c.
1251
1252 3. Chunks in fastbins are treated as allocated chunks from the
1253 point of view of the chunk allocator. They are consolidated
1254 with their neighbors only in bulk, in malloc_consolidate.
1255 */
1256
1257 /*
1258 ---------- Size and alignment checks and conversions ----------
1259 */
1260
1261 /* Conversion from malloc headers to user pointers, and back. When
1262 using memory tagging the user data and the malloc data structure
1263 headers have distinct tags. Converting fully from one to the other
1264 involves extracting the tag at the other address and creating a
1265 suitable pointer using it. That can be quite expensive. There are
1266 cases when the pointers are not dereferenced (for example only used
1267 for alignment check) so the tags are not relevant, and there are
1268 cases when user data is not tagged distinctly from malloc headers
1269 (user data is untagged because tagging is done late in malloc and
1270 early in free). User memory tagging across internal interfaces:
1271
1272 sysmalloc: Returns untagged memory.
1273 _int_malloc: Returns untagged memory.
1274 _int_free: Takes untagged memory.
1275 _int_memalign: Returns untagged memory.
1276 _int_memalign: Returns untagged memory.
1277 _mid_memalign: Returns tagged memory.
1278 _int_realloc: Takes and returns tagged memory.
1279 */
1280
1281 /* The chunk header is two SIZE_SZ elements, but this is used widely, so
1282 we define it here for clarity later. */
1283 #define CHUNK_HDR_SZ (2 * SIZE_SZ)
1284
1285 /* Convert a chunk address to a user mem pointer without correcting
1286 the tag. */
1287 #define chunk2mem(p) ((void*)((char*)(p) + CHUNK_HDR_SZ))
1288
1289 /* Convert a chunk address to a user mem pointer and extract the right tag. */
1290 #define chunk2mem_tag(p) ((void*)tag_at ((char*)(p) + CHUNK_HDR_SZ))
1291
1292 /* Convert a user mem pointer to a chunk address and extract the right tag. */
1293 #define mem2chunk(mem) ((mchunkptr)tag_at (((char*)(mem) - CHUNK_HDR_SZ)))
1294
1295 /* The smallest possible chunk */
1296 #define MIN_CHUNK_SIZE (offsetof(struct malloc_chunk, fd_nextsize))
1297
1298 /* The smallest size we can malloc is an aligned minimal chunk */
1299
1300 #define MINSIZE \
1301 (unsigned long)(((MIN_CHUNK_SIZE+MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK))
1302
1303 /* Check if m has acceptable alignment */
1304
1305 #define aligned_OK(m) (((unsigned long)(m) & MALLOC_ALIGN_MASK) == 0)
1306
1307 #define misaligned_chunk(p) \
1308 ((uintptr_t)(MALLOC_ALIGNMENT == CHUNK_HDR_SZ ? (p) : chunk2mem (p)) \
1309 & MALLOC_ALIGN_MASK)
1310
1311 /* pad request bytes into a usable size -- internal version */
1312 /* Note: This must be a macro that evaluates to a compile time constant
1313 if passed a literal constant. */
1314 #define request2size(req) \
1315 (((req) + SIZE_SZ + MALLOC_ALIGN_MASK < MINSIZE) ? \
1316 MINSIZE : \
1317 ((req) + SIZE_SZ + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)
1318
1319 /* Check if REQ overflows when padded and aligned and if the resulting
1320 value is less than PTRDIFF_T. Returns the requested size or
1321 MINSIZE in case the value is less than MINSIZE, or 0 if any of the
1322 previous checks fail. */
1323 static inline size_t
1324 checked_request2size (size_t req) __nonnull (1)
1325 {
1326 if (__glibc_unlikely (req > PTRDIFF_MAX))
1327 return 0;
1328
1329 /* When using tagged memory, we cannot share the end of the user
1330 block with the header for the next chunk, so ensure that we
1331 allocate blocks that are rounded up to the granule size. Take
1332 care not to overflow from close to MAX_SIZE_T to a small
1333 number. Ideally, this would be part of request2size(), but that
1334 must be a macro that produces a compile time constant if passed
1335 a constant literal. */
1336 if (__glibc_unlikely (mtag_enabled))
1337 {
1338 /* Ensure this is not evaluated if !mtag_enabled, see gcc PR 99551. */
1339 asm ("");
1340
1341 req = (req + (__MTAG_GRANULE_SIZE - 1)) &
1342 ~(size_t)(__MTAG_GRANULE_SIZE - 1);
1343 }
1344
1345 return request2size (req);
1346 }
1347
1348 /*
1349 --------------- Physical chunk operations ---------------
1350 */
1351
1352
1353 /* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
1354 #define PREV_INUSE 0x1
1355
1356 /* extract inuse bit of previous chunk */
1357 #define prev_inuse(p) ((p)->mchunk_size & PREV_INUSE)
1358
1359
1360 /* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
1361 #define IS_MMAPPED 0x2
1362
1363 /* check for mmap()'ed chunk */
1364 #define chunk_is_mmapped(p) ((p)->mchunk_size & IS_MMAPPED)
1365
1366
1367 /* size field is or'ed with NON_MAIN_ARENA if the chunk was obtained
1368 from a non-main arena. This is only set immediately before handing
1369 the chunk to the user, if necessary. */
1370 #define NON_MAIN_ARENA 0x4
1371
1372 /* Check for chunk from main arena. */
1373 #define chunk_main_arena(p) (((p)->mchunk_size & NON_MAIN_ARENA) == 0)
1374
1375 /* Mark a chunk as not being on the main arena. */
1376 #define set_non_main_arena(p) ((p)->mchunk_size |= NON_MAIN_ARENA)
1377
1378
1379 /*
1380 Bits to mask off when extracting size
1381
1382 Note: IS_MMAPPED is intentionally not masked off from size field in
1383 macros for which mmapped chunks should never be seen. This should
1384 cause helpful core dumps to occur if it is tried by accident by
1385 people extending or adapting this malloc.
1386 */
1387 #define SIZE_BITS (PREV_INUSE | IS_MMAPPED | NON_MAIN_ARENA)
1388
1389 /* Get size, ignoring use bits */
1390 #define chunksize(p) (chunksize_nomask (p) & ~(SIZE_BITS))
1391
1392 /* Like chunksize, but do not mask SIZE_BITS. */
1393 #define chunksize_nomask(p) ((p)->mchunk_size)
1394
1395 /* Ptr to next physical malloc_chunk. */
1396 #define next_chunk(p) ((mchunkptr) (((char *) (p)) + chunksize (p)))
1397
1398 /* Size of the chunk below P. Only valid if !prev_inuse (P). */
1399 #define prev_size(p) ((p)->mchunk_prev_size)
1400
1401 /* Set the size of the chunk below P. Only valid if !prev_inuse (P). */
1402 #define set_prev_size(p, sz) ((p)->mchunk_prev_size = (sz))
1403
1404 /* Ptr to previous physical malloc_chunk. Only valid if !prev_inuse (P). */
1405 #define prev_chunk(p) ((mchunkptr) (((char *) (p)) - prev_size (p)))
1406
1407 /* Treat space at ptr + offset as a chunk */
1408 #define chunk_at_offset(p, s) ((mchunkptr) (((char *) (p)) + (s)))
1409
1410 /* extract p's inuse bit */
1411 #define inuse(p) \
1412 ((((mchunkptr) (((char *) (p)) + chunksize (p)))->mchunk_size) & PREV_INUSE)
1413
1414 /* set/clear chunk as being inuse without otherwise disturbing */
1415 #define set_inuse(p) \
1416 ((mchunkptr) (((char *) (p)) + chunksize (p)))->mchunk_size |= PREV_INUSE
1417
1418 #define clear_inuse(p) \
1419 ((mchunkptr) (((char *) (p)) + chunksize (p)))->mchunk_size &= ~(PREV_INUSE)
1420
1421
1422 /* check/set/clear inuse bits in known places */
1423 #define inuse_bit_at_offset(p, s) \
1424 (((mchunkptr) (((char *) (p)) + (s)))->mchunk_size & PREV_INUSE)
1425
1426 #define set_inuse_bit_at_offset(p, s) \
1427 (((mchunkptr) (((char *) (p)) + (s)))->mchunk_size |= PREV_INUSE)
1428
1429 #define clear_inuse_bit_at_offset(p, s) \
1430 (((mchunkptr) (((char *) (p)) + (s)))->mchunk_size &= ~(PREV_INUSE))
1431
1432
1433 /* Set size at head, without disturbing its use bit */
1434 #define set_head_size(p, s) ((p)->mchunk_size = (((p)->mchunk_size & SIZE_BITS) | (s)))
1435
1436 /* Set size/use field */
1437 #define set_head(p, s) ((p)->mchunk_size = (s))
1438
1439 /* Set size at footer (only when chunk is not in use) */
1440 #define set_foot(p, s) (((mchunkptr) ((char *) (p) + (s)))->mchunk_prev_size = (s))
1441
1442 #pragma GCC poison mchunk_size
1443 #pragma GCC poison mchunk_prev_size
1444
1445 /* This is the size of the real usable data in the chunk. Not valid for
1446 dumped heap chunks. */
1447 #define memsize(p) \
1448 (__MTAG_GRANULE_SIZE > SIZE_SZ && __glibc_unlikely (mtag_enabled) ? \
1449 chunksize (p) - CHUNK_HDR_SZ : \
1450 chunksize (p) - CHUNK_HDR_SZ + (chunk_is_mmapped (p) ? 0 : SIZE_SZ))
1451
1452 /* If memory tagging is enabled the layout changes to accommodate the granule
1453 size, this is wasteful for small allocations so not done by default.
1454 Both the chunk header and user data has to be granule aligned. */
1455 _Static_assert (__MTAG_GRANULE_SIZE <= CHUNK_HDR_SZ,
1456 "memory tagging is not supported with large granule.");
1457
1458 static __always_inline void *
1459 tag_new_usable (void *ptr)
1460 {
1461 if (__glibc_unlikely (mtag_enabled) && ptr)
1462 {
1463 mchunkptr cp = mem2chunk(ptr);
1464 ptr = __libc_mtag_tag_region (__libc_mtag_new_tag (ptr), memsize (cp));
1465 }
1466 return ptr;
1467 }
1468
1469 /*
1470 -------------------- Internal data structures --------------------
1471
1472 All internal state is held in an instance of malloc_state defined
1473 below. There are no other static variables, except in two optional
1474 cases:
1475 * If USE_MALLOC_LOCK is defined, the mALLOC_MUTEx declared above.
1476 * If mmap doesn't support MAP_ANONYMOUS, a dummy file descriptor
1477 for mmap.
1478
1479 Beware of lots of tricks that minimize the total bookkeeping space
1480 requirements. The result is a little over 1K bytes (for 4byte
1481 pointers and size_t.)
1482 */
1483
1484 /*
1485 Bins
1486
1487 An array of bin headers for free chunks. Each bin is doubly
1488 linked. The bins are approximately proportionally (log) spaced.
1489 There are a lot of these bins (128). This may look excessive, but
1490 works very well in practice. Most bins hold sizes that are
1491 unusual as malloc request sizes, but are more usual for fragments
1492 and consolidated sets of chunks, which is what these bins hold, so
1493 they can be found quickly. All procedures maintain the invariant
1494 that no consolidated chunk physically borders another one, so each
1495 chunk in a list is known to be preceded and followed by either
1496 inuse chunks or the ends of memory.
1497
1498 Chunks in bins are kept in size order, with ties going to the
1499 approximately least recently used chunk. Ordering isn't needed
1500 for the small bins, which all contain the same-sized chunks, but
1501 facilitates best-fit allocation for larger chunks. These lists
1502 are just sequential. Keeping them in order almost never requires
1503 enough traversal to warrant using fancier ordered data
1504 structures.
1505
1506 Chunks of the same size are linked with the most
1507 recently freed at the front, and allocations are taken from the
1508 back. This results in LRU (FIFO) allocation order, which tends
1509 to give each chunk an equal opportunity to be consolidated with
1510 adjacent freed chunks, resulting in larger free chunks and less
1511 fragmentation.
1512
1513 To simplify use in double-linked lists, each bin header acts
1514 as a malloc_chunk. This avoids special-casing for headers.
1515 But to conserve space and improve locality, we allocate
1516 only the fd/bk pointers of bins, and then use repositioning tricks
1517 to treat these as the fields of a malloc_chunk*.
1518 */
1519
1520 typedef struct malloc_chunk *mbinptr;
1521
1522 /* addressing -- note that bin_at(0) does not exist */
1523 #define bin_at(m, i) \
1524 (mbinptr) (((char *) &((m)->bins[((i) - 1) * 2])) \
1525 - offsetof (struct malloc_chunk, fd))
1526
1527 /* analog of ++bin */
1528 #define next_bin(b) ((mbinptr) ((char *) (b) + (sizeof (mchunkptr) << 1)))
1529
1530 /* Reminders about list directionality within bins */
1531 #define first(b) ((b)->fd)
1532 #define last(b) ((b)->bk)
1533
1534 /*
1535 Indexing
1536
1537 Bins for sizes < 512 bytes contain chunks of all the same size, spaced
1538 8 bytes apart. Larger bins are approximately logarithmically spaced:
1539
1540 64 bins of size 8
1541 32 bins of size 64
1542 16 bins of size 512
1543 8 bins of size 4096
1544 4 bins of size 32768
1545 2 bins of size 262144
1546 1 bin of size what's left
1547
1548 There is actually a little bit of slop in the numbers in bin_index
1549 for the sake of speed. This makes no difference elsewhere.
1550
1551 The bins top out around 1MB because we expect to service large
1552 requests via mmap.
1553
1554 Bin 0 does not exist. Bin 1 is the unordered list; if that would be
1555 a valid chunk size the small bins are bumped up one.
1556 */
1557
1558 #define NBINS 128
1559 #define NSMALLBINS 64
1560 #define SMALLBIN_WIDTH MALLOC_ALIGNMENT
1561 #define SMALLBIN_CORRECTION (MALLOC_ALIGNMENT > CHUNK_HDR_SZ)
1562 #define MIN_LARGE_SIZE ((NSMALLBINS - SMALLBIN_CORRECTION) * SMALLBIN_WIDTH)
1563
1564 #define in_smallbin_range(sz) \
1565 ((unsigned long) (sz) < (unsigned long) MIN_LARGE_SIZE)
1566
1567 #define smallbin_index(sz) \
1568 ((SMALLBIN_WIDTH == 16 ? (((unsigned) (sz)) >> 4) : (((unsigned) (sz)) >> 3))\
1569 + SMALLBIN_CORRECTION)
1570
1571 #define largebin_index_32(sz) \
1572 (((((unsigned long) (sz)) >> 6) <= 38) ? 56 + (((unsigned long) (sz)) >> 6) :\
1573 ((((unsigned long) (sz)) >> 9) <= 20) ? 91 + (((unsigned long) (sz)) >> 9) :\
1574 ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\
1575 ((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\
1576 ((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\
1577 126)
1578
1579 #define largebin_index_32_big(sz) \
1580 (((((unsigned long) (sz)) >> 6) <= 45) ? 49 + (((unsigned long) (sz)) >> 6) :\
1581 ((((unsigned long) (sz)) >> 9) <= 20) ? 91 + (((unsigned long) (sz)) >> 9) :\
1582 ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\
1583 ((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\
1584 ((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\
1585 126)
1586
1587 // XXX It remains to be seen whether it is good to keep the widths of
1588 // XXX the buckets the same or whether it should be scaled by a factor
1589 // XXX of two as well.
1590 #define largebin_index_64(sz) \
1591 (((((unsigned long) (sz)) >> 6) <= 48) ? 48 + (((unsigned long) (sz)) >> 6) :\
1592 ((((unsigned long) (sz)) >> 9) <= 20) ? 91 + (((unsigned long) (sz)) >> 9) :\
1593 ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\
1594 ((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\
1595 ((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\
1596 126)
1597
1598 #define largebin_index(sz) \
1599 (SIZE_SZ == 8 ? largebin_index_64 (sz) \
1600 : MALLOC_ALIGNMENT == 16 ? largebin_index_32_big (sz) \
1601 : largebin_index_32 (sz))
1602
1603 #define bin_index(sz) \
1604 ((in_smallbin_range (sz)) ? smallbin_index (sz) : largebin_index (sz))
1605
1606 /* Take a chunk off a bin list. */
1607 static void
1608 unlink_chunk (mstate av, mchunkptr p)
1609 {
1610 if (chunksize (p) != prev_size (next_chunk (p)))
1611 malloc_printerr ("corrupted size vs. prev_size");
1612
1613 mchunkptr fd = p->fd;
1614 mchunkptr bk = p->bk;
1615
1616 if (__builtin_expect (fd->bk != p || bk->fd != p, 0))
1617 malloc_printerr ("corrupted double-linked list");
1618
1619 fd->bk = bk;
1620 bk->fd = fd;
1621 if (!in_smallbin_range (chunksize_nomask (p)) && p->fd_nextsize != NULL)
1622 {
1623 if (p->fd_nextsize->bk_nextsize != p
1624 || p->bk_nextsize->fd_nextsize != p)
1625 malloc_printerr ("corrupted double-linked list (not small)");
1626
1627 if (fd->fd_nextsize == NULL)
1628 {
1629 if (p->fd_nextsize == p)
1630 fd->fd_nextsize = fd->bk_nextsize = fd;
1631 else
1632 {
1633 fd->fd_nextsize = p->fd_nextsize;
1634 fd->bk_nextsize = p->bk_nextsize;
1635 p->fd_nextsize->bk_nextsize = fd;
1636 p->bk_nextsize->fd_nextsize = fd;
1637 }
1638 }
1639 else
1640 {
1641 p->fd_nextsize->bk_nextsize = p->bk_nextsize;
1642 p->bk_nextsize->fd_nextsize = p->fd_nextsize;
1643 }
1644 }
1645 }
1646
1647 /*
1648 Unsorted chunks
1649
1650 All remainders from chunk splits, as well as all returned chunks,
1651 are first placed in the "unsorted" bin. They are then placed
1652 in regular bins after malloc gives them ONE chance to be used before
1653 binning. So, basically, the unsorted_chunks list acts as a queue,
1654 with chunks being placed on it in free (and malloc_consolidate),
1655 and taken off (to be either used or placed in bins) in malloc.
1656
1657 The NON_MAIN_ARENA flag is never set for unsorted chunks, so it
1658 does not have to be taken into account in size comparisons.
1659 */
1660
1661 /* The otherwise unindexable 1-bin is used to hold unsorted chunks. */
1662 #define unsorted_chunks(M) (bin_at (M, 1))
1663
1664 /*
1665 Top
1666
1667 The top-most available chunk (i.e., the one bordering the end of
1668 available memory) is treated specially. It is never included in
1669 any bin, is used only if no other chunk is available, and is
1670 released back to the system if it is very large (see
1671 M_TRIM_THRESHOLD). Because top initially
1672 points to its own bin with initial zero size, thus forcing
1673 extension on the first malloc request, we avoid having any special
1674 code in malloc to check whether it even exists yet. But we still
1675 need to do so when getting memory from system, so we make
1676 initial_top treat the bin as a legal but unusable chunk during the
1677 interval between initialization and the first call to
1678 sysmalloc. (This is somewhat delicate, since it relies on
1679 the 2 preceding words to be zero during this interval as well.)
1680 */
1681
1682 /* Conveniently, the unsorted bin can be used as dummy top on first call */
1683 #define initial_top(M) (unsorted_chunks (M))
1684
1685 /*
1686 Binmap
1687
1688 To help compensate for the large number of bins, a one-level index
1689 structure is used for bin-by-bin searching. `binmap' is a
1690 bitvector recording whether bins are definitely empty so they can
1691 be skipped over during during traversals. The bits are NOT always
1692 cleared as soon as bins are empty, but instead only
1693 when they are noticed to be empty during traversal in malloc.
1694 */
1695
1696 /* Conservatively use 32 bits per map word, even if on 64bit system */
1697 #define BINMAPSHIFT 5
1698 #define BITSPERMAP (1U << BINMAPSHIFT)
1699 #define BINMAPSIZE (NBINS / BITSPERMAP)
1700
1701 #define idx2block(i) ((i) >> BINMAPSHIFT)
1702 #define idx2bit(i) ((1U << ((i) & ((1U << BINMAPSHIFT) - 1))))
1703
1704 #define mark_bin(m, i) ((m)->binmap[idx2block (i)] |= idx2bit (i))
1705 #define unmark_bin(m, i) ((m)->binmap[idx2block (i)] &= ~(idx2bit (i)))
1706 #define get_binmap(m, i) ((m)->binmap[idx2block (i)] & idx2bit (i))
1707
1708 /*
1709 Fastbins
1710
1711 An array of lists holding recently freed small chunks. Fastbins
1712 are not doubly linked. It is faster to single-link them, and
1713 since chunks are never removed from the middles of these lists,
1714 double linking is not necessary. Also, unlike regular bins, they
1715 are not even processed in FIFO order (they use faster LIFO) since
1716 ordering doesn't much matter in the transient contexts in which
1717 fastbins are normally used.
1718
1719 Chunks in fastbins keep their inuse bit set, so they cannot
1720 be consolidated with other free chunks. malloc_consolidate
1721 releases all chunks in fastbins and consolidates them with
1722 other free chunks.
1723 */
1724
1725 typedef struct malloc_chunk *mfastbinptr;
1726 #define fastbin(ar_ptr, idx) ((ar_ptr)->fastbinsY[idx])
1727
1728 /* offset 2 to use otherwise unindexable first 2 bins */
1729 #define fastbin_index(sz) \
1730 ((((unsigned int) (sz)) >> (SIZE_SZ == 8 ? 4 : 3)) - 2)
1731
1732
1733 /* The maximum fastbin request size we support */
1734 #define MAX_FAST_SIZE (80 * SIZE_SZ / 4)
1735
1736 #define NFASTBINS (fastbin_index (request2size (MAX_FAST_SIZE)) + 1)
1737
1738 /*
1739 FASTBIN_CONSOLIDATION_THRESHOLD is the size of a chunk in free()
1740 that triggers automatic consolidation of possibly-surrounding
1741 fastbin chunks. This is a heuristic, so the exact value should not
1742 matter too much. It is defined at half the default trim threshold as a
1743 compromise heuristic to only attempt consolidation if it is likely
1744 to lead to trimming. However, it is not dynamically tunable, since
1745 consolidation reduces fragmentation surrounding large chunks even
1746 if trimming is not used.
1747 */
1748
1749 #define FASTBIN_CONSOLIDATION_THRESHOLD (65536UL)
1750
1751 /*
1752 NONCONTIGUOUS_BIT indicates that MORECORE does not return contiguous
1753 regions. Otherwise, contiguity is exploited in merging together,
1754 when possible, results from consecutive MORECORE calls.
1755
1756 The initial value comes from MORECORE_CONTIGUOUS, but is
1757 changed dynamically if mmap is ever used as an sbrk substitute.
1758 */
1759
1760 #define NONCONTIGUOUS_BIT (2U)
1761
1762 #define contiguous(M) (((M)->flags & NONCONTIGUOUS_BIT) == 0)
1763 #define noncontiguous(M) (((M)->flags & NONCONTIGUOUS_BIT) != 0)
1764 #define set_noncontiguous(M) ((M)->flags |= NONCONTIGUOUS_BIT)
1765 #define set_contiguous(M) ((M)->flags &= ~NONCONTIGUOUS_BIT)
1766
1767 /* Maximum size of memory handled in fastbins. */
1768 static uint8_t global_max_fast;
1769
1770 /*
1771 Set value of max_fast.
1772 Use impossibly small value if 0.
1773 Precondition: there are no existing fastbin chunks in the main arena.
1774 Since do_check_malloc_state () checks this, we call malloc_consolidate ()
1775 before changing max_fast. Note other arenas will leak their fast bin
1776 entries if max_fast is reduced.
1777 */
1778
1779 #define set_max_fast(s) \
1780 global_max_fast = (((size_t) (s) <= MALLOC_ALIGN_MASK - SIZE_SZ) \
1781 ? MIN_CHUNK_SIZE / 2 : ((s + SIZE_SZ) & ~MALLOC_ALIGN_MASK))
1782
1783 static inline INTERNAL_SIZE_T
1784 get_max_fast (void)
1785 {
1786 /* Tell the GCC optimizers that global_max_fast is never larger
1787 than MAX_FAST_SIZE. This avoids out-of-bounds array accesses in
1788 _int_malloc after constant propagation of the size parameter.
1789 (The code never executes because malloc preserves the
1790 global_max_fast invariant, but the optimizers may not recognize
1791 this.) */
1792 if (global_max_fast > MAX_FAST_SIZE)
1793 __builtin_unreachable ();
1794 return global_max_fast;
1795 }
1796
1797 /*
1798 ----------- Internal state representation and initialization -----------
1799 */
1800
1801 /*
1802 have_fastchunks indicates that there are probably some fastbin chunks.
1803 It is set true on entering a chunk into any fastbin, and cleared early in
1804 malloc_consolidate. The value is approximate since it may be set when there
1805 are no fastbin chunks, or it may be clear even if there are fastbin chunks
1806 available. Given it's sole purpose is to reduce number of redundant calls to
1807 malloc_consolidate, it does not affect correctness. As a result we can safely
1808 use relaxed atomic accesses.
1809 */
1810
1811
1812 struct malloc_state
1813 {
1814 /* Serialize access. */
1815 __libc_lock_define (, mutex);
1816
1817 /* Flags (formerly in max_fast). */
1818 int flags;
1819
1820 /* Set if the fastbin chunks contain recently inserted free blocks. */
1821 /* Note this is a bool but not all targets support atomics on booleans. */
1822 int have_fastchunks;
1823
1824 /* Fastbins */
1825 mfastbinptr fastbinsY[NFASTBINS];
1826
1827 /* Base of the topmost chunk -- not otherwise kept in a bin */
1828 mchunkptr top;
1829
1830 /* The remainder from the most recent split of a small request */
1831 mchunkptr last_remainder;
1832
1833 /* Normal bins packed as described above */
1834 mchunkptr bins[NBINS * 2 - 2];
1835
1836 /* Bitmap of bins */
1837 unsigned int binmap[BINMAPSIZE];
1838
1839 /* Linked list */
1840 struct malloc_state *next;
1841
1842 /* Linked list for free arenas. Access to this field is serialized
1843 by free_list_lock in arena.c. */
1844 struct malloc_state *next_free;
1845
1846 /* Number of threads attached to this arena. 0 if the arena is on
1847 the free list. Access to this field is serialized by
1848 free_list_lock in arena.c. */
1849 INTERNAL_SIZE_T attached_threads;
1850
1851 /* Memory allocated from the system in this arena. */
1852 INTERNAL_SIZE_T system_mem;
1853 INTERNAL_SIZE_T max_system_mem;
1854 };
1855
1856 struct malloc_par
1857 {
1858 /* Tunable parameters */
1859 unsigned long trim_threshold;
1860 INTERNAL_SIZE_T top_pad;
1861 INTERNAL_SIZE_T mmap_threshold;
1862 INTERNAL_SIZE_T arena_test;
1863 INTERNAL_SIZE_T arena_max;
1864
1865 /* Transparent Large Page support. */
1866 INTERNAL_SIZE_T thp_pagesize;
1867 /* A value different than 0 means to align mmap allocation to hp_pagesize
1868 add hp_flags on flags. */
1869 INTERNAL_SIZE_T hp_pagesize;
1870 int hp_flags;
1871
1872 /* Memory map support */
1873 int n_mmaps;
1874 int n_mmaps_max;
1875 int max_n_mmaps;
1876 /* the mmap_threshold is dynamic, until the user sets
1877 it manually, at which point we need to disable any
1878 dynamic behavior. */
1879 int no_dyn_threshold;
1880
1881 /* Statistics */
1882 INTERNAL_SIZE_T mmapped_mem;
1883 INTERNAL_SIZE_T max_mmapped_mem;
1884
1885 /* First address handed out by MORECORE/sbrk. */
1886 char *sbrk_base;
1887
1888 #if USE_TCACHE
1889 /* Maximum number of buckets to use. */
1890 size_t tcache_bins;
1891 size_t tcache_max_bytes;
1892 /* Maximum number of chunks in each bucket. */
1893 size_t tcache_count;
1894 /* Maximum number of chunks to remove from the unsorted list, which
1895 aren't used to prefill the cache. */
1896 size_t tcache_unsorted_limit;
1897 #endif
1898 };
1899
1900 /* There are several instances of this struct ("arenas") in this
1901 malloc. If you are adapting this malloc in a way that does NOT use
1902 a static or mmapped malloc_state, you MUST explicitly zero-fill it
1903 before using. This malloc relies on the property that malloc_state
1904 is initialized to all zeroes (as is true of C statics). */
1905
1906 static struct malloc_state main_arena =
1907 {
1908 .mutex = _LIBC_LOCK_INITIALIZER,
1909 .next = &main_arena,
1910 .attached_threads = 1
1911 };
1912
1913 /* There is only one instance of the malloc parameters. */
1914
1915 static struct malloc_par mp_ =
1916 {
1917 .top_pad = DEFAULT_TOP_PAD,
1918 .n_mmaps_max = DEFAULT_MMAP_MAX,
1919 .mmap_threshold = DEFAULT_MMAP_THRESHOLD,
1920 .trim_threshold = DEFAULT_TRIM_THRESHOLD,
1921 #define NARENAS_FROM_NCORES(n) ((n) * (sizeof (long) == 4 ? 2 : 8))
1922 .arena_test = NARENAS_FROM_NCORES (1)
1923 #if USE_TCACHE
1924 ,
1925 .tcache_count = TCACHE_FILL_COUNT,
1926 .tcache_bins = TCACHE_MAX_BINS,
1927 .tcache_max_bytes = tidx2usize (TCACHE_MAX_BINS-1),
1928 .tcache_unsorted_limit = 0 /* No limit. */
1929 #endif
1930 };
1931
1932 /*
1933 Initialize a malloc_state struct.
1934
1935 This is called from ptmalloc_init () or from _int_new_arena ()
1936 when creating a new arena.
1937 */
1938
1939 static void
1940 malloc_init_state (mstate av)
1941 {
1942 int i;
1943 mbinptr bin;
1944
1945 /* Establish circular links for normal bins */
1946 for (i = 1; i < NBINS; ++i)
1947 {
1948 bin = bin_at (av, i);
1949 bin->fd = bin->bk = bin;
1950 }
1951
1952 #if MORECORE_CONTIGUOUS
1953 if (av != &main_arena)
1954 #endif
1955 set_noncontiguous (av);
1956 if (av == &main_arena)
1957 set_max_fast (DEFAULT_MXFAST);
1958 atomic_store_relaxed (&av->have_fastchunks, false);
1959
1960 av->top = initial_top (av);
1961 }
1962
1963 /*
1964 Other internal utilities operating on mstates
1965 */
1966
1967 static void *sysmalloc (INTERNAL_SIZE_T, mstate);
1968 static int systrim (size_t, mstate);
1969 static void malloc_consolidate (mstate);
1970
1971
1972 /* -------------- Early definitions for debugging hooks ---------------- */
1973
1974 /* This function is called from the arena shutdown hook, to free the
1975 thread cache (if it exists). */
1976 static void tcache_thread_shutdown (void);
1977
1978 /* ------------------ Testing support ----------------------------------*/
1979
1980 static int perturb_byte;
1981
1982 static void
1983 alloc_perturb (char *p, size_t n)
1984 {
1985 if (__glibc_unlikely (perturb_byte))
1986 memset (p, perturb_byte ^ 0xff, n);
1987 }
1988
1989 static void
1990 free_perturb (char *p, size_t n)
1991 {
1992 if (__glibc_unlikely (perturb_byte))
1993 memset (p, perturb_byte, n);
1994 }
1995
1996
1997
1998 #include <stap-probe.h>
1999
2000 /* ----------- Routines dealing with transparent huge pages ----------- */
2001
2002 static inline void
2003 madvise_thp (void *p, INTERNAL_SIZE_T size)
2004 {
2005 #ifdef MADV_HUGEPAGE
2006 /* Do not consider areas smaller than a huge page or if the tunable is
2007 not active. */
2008 if (mp_.thp_pagesize == 0 || size < mp_.thp_pagesize)
2009 return;
2010
2011 /* Linux requires the input address to be page-aligned, and unaligned
2012 inputs happens only for initial data segment. */
2013 if (__glibc_unlikely (!PTR_IS_ALIGNED (p, GLRO (dl_pagesize))))
2014 {
2015 void *q = PTR_ALIGN_DOWN (p, GLRO (dl_pagesize));
2016 size += PTR_DIFF (p, q);
2017 p = q;
2018 }
2019
2020 __madvise (p, size, MADV_HUGEPAGE);
2021 #endif
2022 }
2023
2024 /* ------------------- Support for multiple arenas -------------------- */
2025 #include "arena.c"
2026
2027 /*
2028 Debugging support
2029
2030 These routines make a number of assertions about the states
2031 of data structures that should be true at all times. If any
2032 are not true, it's very likely that a user program has somehow
2033 trashed memory. (It's also possible that there is a coding error
2034 in malloc. In which case, please report it!)
2035 */
2036
2037 #if !MALLOC_DEBUG
2038
2039 # define check_chunk(A, P)
2040 # define check_free_chunk(A, P)
2041 # define check_inuse_chunk(A, P)
2042 # define check_remalloced_chunk(A, P, N)
2043 # define check_malloced_chunk(A, P, N)
2044 # define check_malloc_state(A)
2045
2046 #else
2047
2048 # define check_chunk(A, P) do_check_chunk (A, P)
2049 # define check_free_chunk(A, P) do_check_free_chunk (A, P)
2050 # define check_inuse_chunk(A, P) do_check_inuse_chunk (A, P)
2051 # define check_remalloced_chunk(A, P, N) do_check_remalloced_chunk (A, P, N)
2052 # define check_malloced_chunk(A, P, N) do_check_malloced_chunk (A, P, N)
2053 # define check_malloc_state(A) do_check_malloc_state (A)
2054
2055 /*
2056 Properties of all chunks
2057 */
2058
2059 static void
2060 do_check_chunk (mstate av, mchunkptr p)
2061 {
2062 unsigned long sz = chunksize (p);
2063 /* min and max possible addresses assuming contiguous allocation */
2064 char *max_address = (char *) (av->top) + chunksize (av->top);
2065 char *min_address = max_address - av->system_mem;
2066
2067 if (!chunk_is_mmapped (p))
2068 {
2069 /* Has legal address ... */
2070 if (p != av->top)
2071 {
2072 if (contiguous (av))
2073 {
2074 assert (((char *) p) >= min_address);
2075 assert (((char *) p + sz) <= ((char *) (av->top)));
2076 }
2077 }
2078 else
2079 {
2080 /* top size is always at least MINSIZE */
2081 assert ((unsigned long) (sz) >= MINSIZE);
2082 /* top predecessor always marked inuse */
2083 assert (prev_inuse (p));
2084 }
2085 }
2086 else
2087 {
2088 /* address is outside main heap */
2089 if (contiguous (av) && av->top != initial_top (av))
2090 {
2091 assert (((char *) p) < min_address || ((char *) p) >= max_address);
2092 }
2093 /* chunk is page-aligned */
2094 assert (((prev_size (p) + sz) & (GLRO (dl_pagesize) - 1)) == 0);
2095 /* mem is aligned */
2096 assert (aligned_OK (chunk2mem (p)));
2097 }
2098 }
2099
2100 /*
2101 Properties of free chunks
2102 */
2103
2104 static void
2105 do_check_free_chunk (mstate av, mchunkptr p)
2106 {
2107 INTERNAL_SIZE_T sz = chunksize_nomask (p) & ~(PREV_INUSE | NON_MAIN_ARENA);
2108 mchunkptr next = chunk_at_offset (p, sz);
2109
2110 do_check_chunk (av, p);
2111
2112 /* Chunk must claim to be free ... */
2113 assert (!inuse (p));
2114 assert (!chunk_is_mmapped (p));
2115
2116 /* Unless a special marker, must have OK fields */
2117 if ((unsigned long) (sz) >= MINSIZE)
2118 {
2119 assert ((sz & MALLOC_ALIGN_MASK) == 0);
2120 assert (aligned_OK (chunk2mem (p)));
2121 /* ... matching footer field */
2122 assert (prev_size (next_chunk (p)) == sz);
2123 /* ... and is fully consolidated */
2124 assert (prev_inuse (p));
2125 assert (next == av->top || inuse (next));
2126
2127 /* ... and has minimally sane links */
2128 assert (p->fd->bk == p);
2129 assert (p->bk->fd == p);
2130 }
2131 else /* markers are always of size SIZE_SZ */
2132 assert (sz == SIZE_SZ);
2133 }
2134
2135 /*
2136 Properties of inuse chunks
2137 */
2138
2139 static void
2140 do_check_inuse_chunk (mstate av, mchunkptr p)
2141 {
2142 mchunkptr next;
2143
2144 do_check_chunk (av, p);
2145
2146 if (chunk_is_mmapped (p))
2147 return; /* mmapped chunks have no next/prev */
2148
2149 /* Check whether it claims to be in use ... */
2150 assert (inuse (p));
2151
2152 next = next_chunk (p);
2153
2154 /* ... and is surrounded by OK chunks.
2155 Since more things can be checked with free chunks than inuse ones,
2156 if an inuse chunk borders them and debug is on, it's worth doing them.
2157 */
2158 if (!prev_inuse (p))
2159 {
2160 /* Note that we cannot even look at prev unless it is not inuse */
2161 mchunkptr prv = prev_chunk (p);
2162 assert (next_chunk (prv) == p);
2163 do_check_free_chunk (av, prv);
2164 }
2165
2166 if (next == av->top)
2167 {
2168 assert (prev_inuse (next));
2169 assert (chunksize (next) >= MINSIZE);
2170 }
2171 else if (!inuse (next))
2172 do_check_free_chunk (av, next);
2173 }
2174
2175 /*
2176 Properties of chunks recycled from fastbins
2177 */
2178
2179 static void
2180 do_check_remalloced_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T s)
2181 {
2182 INTERNAL_SIZE_T sz = chunksize_nomask (p) & ~(PREV_INUSE | NON_MAIN_ARENA);
2183
2184 if (!chunk_is_mmapped (p))
2185 {
2186 assert (av == arena_for_chunk (p));
2187 if (chunk_main_arena (p))
2188 assert (av == &main_arena);
2189 else
2190 assert (av != &main_arena);
2191 }
2192
2193 do_check_inuse_chunk (av, p);
2194
2195 /* Legal size ... */
2196 assert ((sz & MALLOC_ALIGN_MASK) == 0);
2197 assert ((unsigned long) (sz) >= MINSIZE);
2198 /* ... and alignment */
2199 assert (aligned_OK (chunk2mem (p)));
2200 /* chunk is less than MINSIZE more than request */
2201 assert ((long) (sz) - (long) (s) >= 0);
2202 assert ((long) (sz) - (long) (s + MINSIZE) < 0);
2203 }
2204
2205 /*
2206 Properties of nonrecycled chunks at the point they are malloced
2207 */
2208
2209 static void
2210 do_check_malloced_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T s)
2211 {
2212 /* same as recycled case ... */
2213 do_check_remalloced_chunk (av, p, s);
2214
2215 /*
2216 ... plus, must obey implementation invariant that prev_inuse is
2217 always true of any allocated chunk; i.e., that each allocated
2218 chunk borders either a previously allocated and still in-use
2219 chunk, or the base of its memory arena. This is ensured
2220 by making all allocations from the `lowest' part of any found
2221 chunk. This does not necessarily hold however for chunks
2222 recycled via fastbins.
2223 */
2224
2225 assert (prev_inuse (p));
2226 }
2227
2228
2229 /*
2230 Properties of malloc_state.
2231
2232 This may be useful for debugging malloc, as well as detecting user
2233 programmer errors that somehow write into malloc_state.
2234
2235 If you are extending or experimenting with this malloc, you can
2236 probably figure out how to hack this routine to print out or
2237 display chunk addresses, sizes, bins, and other instrumentation.
2238 */
2239
2240 static void
2241 do_check_malloc_state (mstate av)
2242 {
2243 int i;
2244 mchunkptr p;
2245 mchunkptr q;
2246 mbinptr b;
2247 unsigned int idx;
2248 INTERNAL_SIZE_T size;
2249 unsigned long total = 0;
2250 int max_fast_bin;
2251
2252 /* internal size_t must be no wider than pointer type */
2253 assert (sizeof (INTERNAL_SIZE_T) <= sizeof (char *));
2254
2255 /* alignment is a power of 2 */
2256 assert ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT - 1)) == 0);
2257
2258 /* Check the arena is initialized. */
2259 assert (av->top != 0);
2260
2261 /* No memory has been allocated yet, so doing more tests is not possible. */
2262 if (av->top == initial_top (av))
2263 return;
2264
2265 /* pagesize is a power of 2 */
2266 assert (powerof2(GLRO (dl_pagesize)));
2267
2268 /* A contiguous main_arena is consistent with sbrk_base. */
2269 if (av == &main_arena && contiguous (av))
2270 assert ((char *) mp_.sbrk_base + av->system_mem ==
2271 (char *) av->top + chunksize (av->top));
2272
2273 /* properties of fastbins */
2274
2275 /* max_fast is in allowed range */
2276 assert ((get_max_fast () & ~1) <= request2size (MAX_FAST_SIZE));
2277
2278 max_fast_bin = fastbin_index (get_max_fast ());
2279
2280 for (i = 0; i < NFASTBINS; ++i)
2281 {
2282 p = fastbin (av, i);
2283
2284 /* The following test can only be performed for the main arena.
2285 While mallopt calls malloc_consolidate to get rid of all fast
2286 bins (especially those larger than the new maximum) this does
2287 only happen for the main arena. Trying to do this for any
2288 other arena would mean those arenas have to be locked and
2289 malloc_consolidate be called for them. This is excessive. And
2290 even if this is acceptable to somebody it still cannot solve
2291 the problem completely since if the arena is locked a
2292 concurrent malloc call might create a new arena which then
2293 could use the newly invalid fast bins. */
2294
2295 /* all bins past max_fast are empty */
2296 if (av == &main_arena && i > max_fast_bin)
2297 assert (p == 0);
2298
2299 while (p != 0)
2300 {
2301 if (__glibc_unlikely (misaligned_chunk (p)))
2302 malloc_printerr ("do_check_malloc_state(): "
2303 "unaligned fastbin chunk detected");
2304 /* each chunk claims to be inuse */
2305 do_check_inuse_chunk (av, p);
2306 total += chunksize (p);
2307 /* chunk belongs in this bin */
2308 assert (fastbin_index (chunksize (p)) == i);
2309 p = REVEAL_PTR (p->fd);
2310 }
2311 }
2312
2313 /* check normal bins */
2314 for (i = 1; i < NBINS; ++i)
2315 {
2316 b = bin_at (av, i);
2317
2318 /* binmap is accurate (except for bin 1 == unsorted_chunks) */
2319 if (i >= 2)
2320 {
2321 unsigned int binbit = get_binmap (av, i);
2322 int empty = last (b) == b;
2323 if (!binbit)
2324 assert (empty);
2325 else if (!empty)
2326 assert (binbit);
2327 }
2328
2329 for (p = last (b); p != b; p = p->bk)
2330 {
2331 /* each chunk claims to be free */
2332 do_check_free_chunk (av, p);
2333 size = chunksize (p);
2334 total += size;
2335 if (i >= 2)
2336 {
2337 /* chunk belongs in bin */
2338 idx = bin_index (size);
2339 assert (idx == i);
2340 /* lists are sorted */
2341 assert (p->bk == b ||
2342 (unsigned long) chunksize (p->bk) >= (unsigned long) chunksize (p));
2343
2344 if (!in_smallbin_range (size))
2345 {
2346 if (p->fd_nextsize != NULL)
2347 {
2348 if (p->fd_nextsize == p)
2349 assert (p->bk_nextsize == p);
2350 else
2351 {
2352 if (p->fd_nextsize == first (b))
2353 assert (chunksize (p) < chunksize (p->fd_nextsize));
2354 else
2355 assert (chunksize (p) > chunksize (p->fd_nextsize));
2356
2357 if (p == first (b))
2358 assert (chunksize (p) > chunksize (p->bk_nextsize));
2359 else
2360 assert (chunksize (p) < chunksize (p->bk_nextsize));
2361 }
2362 }
2363 else
2364 assert (p->bk_nextsize == NULL);
2365 }
2366 }
2367 else if (!in_smallbin_range (size))
2368 assert (p->fd_nextsize == NULL && p->bk_nextsize == NULL);
2369 /* chunk is followed by a legal chain of inuse chunks */
2370 for (q = next_chunk (p);
2371 (q != av->top && inuse (q) &&
2372 (unsigned long) (chunksize (q)) >= MINSIZE);
2373 q = next_chunk (q))
2374 do_check_inuse_chunk (av, q);
2375 }
2376 }
2377
2378 /* top chunk is OK */
2379 check_chunk (av, av->top);
2380 }
2381 #endif
2382
2383
2384 /* ----------------- Support for debugging hooks -------------------- */
2385 #if IS_IN (libc)
2386 #include "hooks.c"
2387 #endif
2388
2389
2390 /* ----------- Routines dealing with system allocation -------------- */
2391
2392 /*
2393 sysmalloc handles malloc cases requiring more memory from the system.
2394 On entry, it is assumed that av->top does not have enough
2395 space to service request for nb bytes, thus requiring that av->top
2396 be extended or replaced.
2397 */
2398
2399 static void *
2400 sysmalloc_mmap (INTERNAL_SIZE_T nb, size_t pagesize, int extra_flags, mstate av)
2401 {
2402 long int size;
2403
2404 /*
2405 Round up size to nearest page. For mmapped chunks, the overhead is one
2406 SIZE_SZ unit larger than for normal chunks, because there is no
2407 following chunk whose prev_size field could be used.
2408
2409 See the front_misalign handling below, for glibc there is no need for
2410 further alignments unless we have have high alignment.
2411 */
2412 if (MALLOC_ALIGNMENT == CHUNK_HDR_SZ)
2413 size = ALIGN_UP (nb + SIZE_SZ, pagesize);
2414 else
2415 size = ALIGN_UP (nb + SIZE_SZ + MALLOC_ALIGN_MASK, pagesize);
2416
2417 /* Don't try if size wraps around 0. */
2418 if ((unsigned long) (size) <= (unsigned long) (nb))
2419 return MAP_FAILED;
2420
2421 char *mm = (char *) MMAP (0, size,
2422 mtag_mmap_flags | PROT_READ | PROT_WRITE,
2423 extra_flags);
2424 if (mm == MAP_FAILED)
2425 return mm;
2426
2427 #ifdef MAP_HUGETLB
2428 if (!(extra_flags & MAP_HUGETLB))
2429 madvise_thp (mm, size);
2430 #endif
2431
2432 __set_vma_name (mm, size, " glibc: malloc");
2433
2434 /*
2435 The offset to the start of the mmapped region is stored in the prev_size
2436 field of the chunk. This allows us to adjust returned start address to
2437 meet alignment requirements here and in memalign(), and still be able to
2438 compute proper address argument for later munmap in free() and realloc().
2439 */
2440
2441 INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */
2442
2443 if (MALLOC_ALIGNMENT == CHUNK_HDR_SZ)
2444 {
2445 /* For glibc, chunk2mem increases the address by CHUNK_HDR_SZ and
2446 MALLOC_ALIGN_MASK is CHUNK_HDR_SZ-1. Each mmap'ed area is page
2447 aligned and therefore definitely MALLOC_ALIGN_MASK-aligned. */
2448 assert (((INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK) == 0);
2449 front_misalign = 0;
2450 }
2451 else
2452 front_misalign = (INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK;
2453
2454 mchunkptr p; /* the allocated/returned chunk */
2455
2456 if (front_misalign > 0)
2457 {
2458 ptrdiff_t correction = MALLOC_ALIGNMENT - front_misalign;
2459 p = (mchunkptr) (mm + correction);
2460 set_prev_size (p, correction);
2461 set_head (p, (size - correction) | IS_MMAPPED);
2462 }
2463 else
2464 {
2465 p = (mchunkptr) mm;
2466 set_prev_size (p, 0);
2467 set_head (p, size | IS_MMAPPED);
2468 }
2469
2470 /* update statistics */
2471 int new = atomic_fetch_add_relaxed (&mp_.n_mmaps, 1) + 1;
2472 atomic_max (&mp_.max_n_mmaps, new);
2473
2474 unsigned long sum;
2475 sum = atomic_fetch_add_relaxed (&mp_.mmapped_mem, size) + size;
2476 atomic_max (&mp_.max_mmapped_mem, sum);
2477
2478 check_chunk (av, p);
2479
2480 return chunk2mem (p);
2481 }
2482
2483 /*
2484 Allocate memory using mmap() based on S and NB requested size, aligning to
2485 PAGESIZE if required. The EXTRA_FLAGS is used on mmap() call. If the call
2486 succeeds S is updated with the allocated size. This is used as a fallback
2487 if MORECORE fails.
2488 */
2489 static void *
2490 sysmalloc_mmap_fallback (long int *s, INTERNAL_SIZE_T nb,
2491 INTERNAL_SIZE_T old_size, size_t minsize,
2492 size_t pagesize, int extra_flags, mstate av)
2493 {
2494 long int size = *s;
2495
2496 /* Cannot merge with old top, so add its size back in */
2497 if (contiguous (av))
2498 size = ALIGN_UP (size + old_size, pagesize);
2499
2500 /* If we are relying on mmap as backup, then use larger units */
2501 if ((unsigned long) (size) < minsize)
2502 size = minsize;
2503
2504 /* Don't try if size wraps around 0 */
2505 if ((unsigned long) (size) <= (unsigned long) (nb))
2506 return MORECORE_FAILURE;
2507
2508 char *mbrk = (char *) (MMAP (0, size,
2509 mtag_mmap_flags | PROT_READ | PROT_WRITE,
2510 extra_flags));
2511 if (mbrk == MAP_FAILED)
2512 return MAP_FAILED;
2513
2514 #ifdef MAP_HUGETLB
2515 if (!(extra_flags & MAP_HUGETLB))
2516 madvise_thp (mbrk, size);
2517 #endif
2518
2519 __set_vma_name (mbrk, size, " glibc: malloc");
2520
2521 /* Record that we no longer have a contiguous sbrk region. After the first
2522 time mmap is used as backup, we do not ever rely on contiguous space
2523 since this could incorrectly bridge regions. */
2524 set_noncontiguous (av);
2525
2526 *s = size;
2527 return mbrk;
2528 }
2529
2530 static void *
2531 sysmalloc (INTERNAL_SIZE_T nb, mstate av)
2532 {
2533 mchunkptr old_top; /* incoming value of av->top */
2534 INTERNAL_SIZE_T old_size; /* its size */
2535 char *old_end; /* its end address */
2536
2537 long size; /* arg to first MORECORE or mmap call */
2538 char *brk; /* return value from MORECORE */
2539
2540 long correction; /* arg to 2nd MORECORE call */
2541 char *snd_brk; /* 2nd return val */
2542
2543 INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */
2544 INTERNAL_SIZE_T end_misalign; /* partial page left at end of new space */
2545 char *aligned_brk; /* aligned offset into brk */
2546
2547 mchunkptr p; /* the allocated/returned chunk */
2548 mchunkptr remainder; /* remainder from allocation */
2549 unsigned long remainder_size; /* its size */
2550
2551
2552 size_t pagesize = GLRO (dl_pagesize);
2553 bool tried_mmap = false;
2554
2555
2556 /*
2557 If have mmap, and the request size meets the mmap threshold, and
2558 the system supports mmap, and there are few enough currently
2559 allocated mmapped regions, try to directly map this request
2560 rather than expanding top.
2561 */
2562
2563 if (av == NULL
2564 || ((unsigned long) (nb) >= (unsigned long) (mp_.mmap_threshold)
2565 && (mp_.n_mmaps < mp_.n_mmaps_max)))
2566 {
2567 char *mm;
2568 if (mp_.hp_pagesize > 0 && nb >= mp_.hp_pagesize)
2569 {
2570 /* There is no need to issue the THP madvise call if Huge Pages are
2571 used directly. */
2572 mm = sysmalloc_mmap (nb, mp_.hp_pagesize, mp_.hp_flags, av);
2573 if (mm != MAP_FAILED)
2574 return mm;
2575 }
2576 mm = sysmalloc_mmap (nb, pagesize, 0, av);
2577 if (mm != MAP_FAILED)
2578 return mm;
2579 tried_mmap = true;
2580 }
2581
2582 /* There are no usable arenas and mmap also failed. */
2583 if (av == NULL)
2584 return 0;
2585
2586 /* Record incoming configuration of top */
2587
2588 old_top = av->top;
2589 old_size = chunksize (old_top);
2590 old_end = (char *) (chunk_at_offset (old_top, old_size));
2591
2592 brk = snd_brk = (char *) (MORECORE_FAILURE);
2593
2594 /*
2595 If not the first time through, we require old_size to be
2596 at least MINSIZE and to have prev_inuse set.
2597 */
2598
2599 assert ((old_top == initial_top (av) && old_size == 0) ||
2600 ((unsigned long) (old_size) >= MINSIZE &&
2601 prev_inuse (old_top) &&
2602 ((unsigned long) old_end & (pagesize - 1)) == 0));
2603
2604 /* Precondition: not enough current space to satisfy nb request */
2605 assert ((unsigned long) (old_size) < (unsigned long) (nb + MINSIZE));
2606
2607
2608 if (av != &main_arena)
2609 {
2610 heap_info *old_heap, *heap;
2611 size_t old_heap_size;
2612
2613 /* First try to extend the current heap. */
2614 old_heap = heap_for_ptr (old_top);
2615 old_heap_size = old_heap->size;
2616 if ((long) (MINSIZE + nb - old_size) > 0
2617 && grow_heap (old_heap, MINSIZE + nb - old_size) == 0)
2618 {
2619 av->system_mem += old_heap->size - old_heap_size;
2620 set_head (old_top, (((char *) old_heap + old_heap->size) - (char *) old_top)
2621 | PREV_INUSE);
2622 }
2623 else if ((heap = new_heap (nb + (MINSIZE + sizeof (*heap)), mp_.top_pad)))
2624 {
2625 /* Use a newly allocated heap. */
2626 heap->ar_ptr = av;
2627 heap->prev = old_heap;
2628 av->system_mem += heap->size;
2629 /* Set up the new top. */
2630 top (av) = chunk_at_offset (heap, sizeof (*heap));
2631 set_head (top (av), (heap->size - sizeof (*heap)) | PREV_INUSE);
2632
2633 /* Setup fencepost and free the old top chunk with a multiple of
2634 MALLOC_ALIGNMENT in size. */
2635 /* The fencepost takes at least MINSIZE bytes, because it might
2636 become the top chunk again later. Note that a footer is set
2637 up, too, although the chunk is marked in use. */
2638 old_size = (old_size - MINSIZE) & ~MALLOC_ALIGN_MASK;
2639 set_head (chunk_at_offset (old_top, old_size + CHUNK_HDR_SZ),
2640 0 | PREV_INUSE);
2641 if (old_size >= MINSIZE)
2642 {
2643 set_head (chunk_at_offset (old_top, old_size),
2644 CHUNK_HDR_SZ | PREV_INUSE);
2645 set_foot (chunk_at_offset (old_top, old_size), CHUNK_HDR_SZ);
2646 set_head (old_top, old_size | PREV_INUSE | NON_MAIN_ARENA);
2647 _int_free (av, old_top, 1);
2648 }
2649 else
2650 {
2651 set_head (old_top, (old_size + CHUNK_HDR_SZ) | PREV_INUSE);
2652 set_foot (old_top, (old_size + CHUNK_HDR_SZ));
2653 }
2654 }
2655 else if (!tried_mmap)
2656 {
2657 /* We can at least try to use to mmap memory. If new_heap fails
2658 it is unlikely that trying to allocate huge pages will
2659 succeed. */
2660 char *mm = sysmalloc_mmap (nb, pagesize, 0, av);
2661 if (mm != MAP_FAILED)
2662 return mm;
2663 }
2664 }
2665 else /* av == main_arena */
2666
2667
2668 { /* Request enough space for nb + pad + overhead */
2669 size = nb + mp_.top_pad + MINSIZE;
2670
2671 /*
2672 If contiguous, we can subtract out existing space that we hope to
2673 combine with new space. We add it back later only if
2674 we don't actually get contiguous space.
2675 */
2676
2677 if (contiguous (av))
2678 size -= old_size;
2679
2680 /*
2681 Round to a multiple of page size or huge page size.
2682 If MORECORE is not contiguous, this ensures that we only call it
2683 with whole-page arguments. And if MORECORE is contiguous and
2684 this is not first time through, this preserves page-alignment of
2685 previous calls. Otherwise, we correct to page-align below.
2686 */
2687
2688 #ifdef MADV_HUGEPAGE
2689 /* Defined in brk.c. */
2690 extern void *__curbrk;
2691 if (__glibc_unlikely (mp_.thp_pagesize != 0))
2692 {
2693 uintptr_t top = ALIGN_UP ((uintptr_t) __curbrk + size,
2694 mp_.thp_pagesize);
2695 size = top - (uintptr_t) __curbrk;
2696 }
2697 else
2698 #endif
2699 size = ALIGN_UP (size, GLRO(dl_pagesize));
2700
2701 /*
2702 Don't try to call MORECORE if argument is so big as to appear
2703 negative. Note that since mmap takes size_t arg, it may succeed
2704 below even if we cannot call MORECORE.
2705 */
2706
2707 if (size > 0)
2708 {
2709 brk = (char *) (MORECORE (size));
2710 if (brk != (char *) (MORECORE_FAILURE))
2711 madvise_thp (brk, size);
2712 LIBC_PROBE (memory_sbrk_more, 2, brk, size);
2713 }
2714
2715 if (brk == (char *) (MORECORE_FAILURE))
2716 {
2717 /*
2718 If have mmap, try using it as a backup when MORECORE fails or
2719 cannot be used. This is worth doing on systems that have "holes" in
2720 address space, so sbrk cannot extend to give contiguous space, but
2721 space is available elsewhere. Note that we ignore mmap max count
2722 and threshold limits, since the space will not be used as a
2723 segregated mmap region.
2724 */
2725
2726 char *mbrk = MAP_FAILED;
2727 if (mp_.hp_pagesize > 0)
2728 mbrk = sysmalloc_mmap_fallback (&size, nb, old_size,
2729 mp_.hp_pagesize, mp_.hp_pagesize,
2730 mp_.hp_flags, av);
2731 if (mbrk == MAP_FAILED)
2732 mbrk = sysmalloc_mmap_fallback (&size, nb, old_size, MMAP_AS_MORECORE_SIZE,
2733 pagesize, 0, av);
2734 if (mbrk != MAP_FAILED)
2735 {
2736 /* We do not need, and cannot use, another sbrk call to find end */
2737 brk = mbrk;
2738 snd_brk = brk + size;
2739 }
2740 }
2741
2742 if (brk != (char *) (MORECORE_FAILURE))
2743 {
2744 if (mp_.sbrk_base == 0)
2745 mp_.sbrk_base = brk;
2746 av->system_mem += size;
2747
2748 /*
2749 If MORECORE extends previous space, we can likewise extend top size.
2750 */
2751
2752 if (brk == old_end && snd_brk == (char *) (MORECORE_FAILURE))
2753 set_head (old_top, (size + old_size) | PREV_INUSE);
2754
2755 else if (contiguous (av) && old_size && brk < old_end)
2756 /* Oops! Someone else killed our space.. Can't touch anything. */
2757 malloc_printerr ("break adjusted to free malloc space");
2758
2759 /*
2760 Otherwise, make adjustments:
2761
2762 * If the first time through or noncontiguous, we need to call sbrk
2763 just to find out where the end of memory lies.
2764
2765 * We need to ensure that all returned chunks from malloc will meet
2766 MALLOC_ALIGNMENT
2767
2768 * If there was an intervening foreign sbrk, we need to adjust sbrk
2769 request size to account for fact that we will not be able to
2770 combine new space with existing space in old_top.
2771
2772 * Almost all systems internally allocate whole pages at a time, in
2773 which case we might as well use the whole last page of request.
2774 So we allocate enough more memory to hit a page boundary now,
2775 which in turn causes future contiguous calls to page-align.
2776 */
2777
2778 else
2779 {
2780 front_misalign = 0;
2781 end_misalign = 0;
2782 correction = 0;
2783 aligned_brk = brk;
2784
2785 /* handle contiguous cases */
2786 if (contiguous (av))
2787 {
2788 /* Count foreign sbrk as system_mem. */
2789 if (old_size)
2790 av->system_mem += brk - old_end;
2791
2792 /* Guarantee alignment of first new chunk made from this space */
2793
2794 front_misalign = (INTERNAL_SIZE_T) chunk2mem (brk) & MALLOC_ALIGN_MASK;
2795 if (front_misalign > 0)
2796 {
2797 /*
2798 Skip over some bytes to arrive at an aligned position.
2799 We don't need to specially mark these wasted front bytes.
2800 They will never be accessed anyway because
2801 prev_inuse of av->top (and any chunk created from its start)
2802 is always true after initialization.
2803 */
2804
2805 correction = MALLOC_ALIGNMENT - front_misalign;
2806 aligned_brk += correction;
2807 }
2808
2809 /*
2810 If this isn't adjacent to existing space, then we will not
2811 be able to merge with old_top space, so must add to 2nd request.
2812 */
2813
2814 correction += old_size;
2815
2816 /* Extend the end address to hit a page boundary */
2817 end_misalign = (INTERNAL_SIZE_T) (brk + size + correction);
2818 correction += (ALIGN_UP (end_misalign, pagesize)) - end_misalign;
2819
2820 assert (correction >= 0);
2821 snd_brk = (char *) (MORECORE (correction));
2822
2823 /*
2824 If can't allocate correction, try to at least find out current
2825 brk. It might be enough to proceed without failing.
2826
2827 Note that if second sbrk did NOT fail, we assume that space
2828 is contiguous with first sbrk. This is a safe assumption unless
2829 program is multithreaded but doesn't use locks and a foreign sbrk
2830 occurred between our first and second calls.
2831 */
2832
2833 if (snd_brk == (char *) (MORECORE_FAILURE))
2834 {
2835 correction = 0;
2836 snd_brk = (char *) (MORECORE (0));
2837 }
2838 else
2839 madvise_thp (snd_brk, correction);
2840 }
2841
2842 /* handle non-contiguous cases */
2843 else
2844 {
2845 if (MALLOC_ALIGNMENT == CHUNK_HDR_SZ)
2846 /* MORECORE/mmap must correctly align */
2847 assert (((unsigned long) chunk2mem (brk) & MALLOC_ALIGN_MASK) == 0);
2848 else
2849 {
2850 front_misalign = (INTERNAL_SIZE_T) chunk2mem (brk) & MALLOC_ALIGN_MASK;
2851 if (front_misalign > 0)
2852 {
2853 /*
2854 Skip over some bytes to arrive at an aligned position.
2855 We don't need to specially mark these wasted front bytes.
2856 They will never be accessed anyway because
2857 prev_inuse of av->top (and any chunk created from its start)
2858 is always true after initialization.
2859 */
2860
2861 aligned_brk += MALLOC_ALIGNMENT - front_misalign;
2862 }
2863 }
2864
2865 /* Find out current end of memory */
2866 if (snd_brk == (char *) (MORECORE_FAILURE))
2867 {
2868 snd_brk = (char *) (MORECORE (0));
2869 }
2870 }
2871
2872 /* Adjust top based on results of second sbrk */
2873 if (snd_brk != (char *) (MORECORE_FAILURE))
2874 {
2875 av->top = (mchunkptr) aligned_brk;
2876 set_head (av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE);
2877 av->system_mem += correction;
2878
2879 /*
2880 If not the first time through, we either have a
2881 gap due to foreign sbrk or a non-contiguous region. Insert a
2882 double fencepost at old_top to prevent consolidation with space
2883 we don't own. These fenceposts are artificial chunks that are
2884 marked as inuse and are in any case too small to use. We need
2885 two to make sizes and alignments work out.
2886 */
2887
2888 if (old_size != 0)
2889 {
2890 /*
2891 Shrink old_top to insert fenceposts, keeping size a
2892 multiple of MALLOC_ALIGNMENT. We know there is at least
2893 enough space in old_top to do this.
2894 */
2895 old_size = (old_size - 2 * CHUNK_HDR_SZ) & ~MALLOC_ALIGN_MASK;
2896 set_head (old_top, old_size | PREV_INUSE);
2897
2898 /*
2899 Note that the following assignments completely overwrite
2900 old_top when old_size was previously MINSIZE. This is
2901 intentional. We need the fencepost, even if old_top otherwise gets
2902 lost.
2903 */
2904 set_head (chunk_at_offset (old_top, old_size),
2905 CHUNK_HDR_SZ | PREV_INUSE);
2906 set_head (chunk_at_offset (old_top,
2907 old_size + CHUNK_HDR_SZ),
2908 CHUNK_HDR_SZ | PREV_INUSE);
2909
2910 /* If possible, release the rest. */
2911 if (old_size >= MINSIZE)
2912 {
2913 _int_free (av, old_top, 1);
2914 }
2915 }
2916 }
2917 }
2918 }
2919 } /* if (av != &main_arena) */
2920
2921 if ((unsigned long) av->system_mem > (unsigned long) (av->max_system_mem))
2922 av->max_system_mem = av->system_mem;
2923 check_malloc_state (av);
2924
2925 /* finally, do the allocation */
2926 p = av->top;
2927 size = chunksize (p);
2928
2929 /* check that one of the above allocation paths succeeded */
2930 if ((unsigned long) (size) >= (unsigned long) (nb + MINSIZE))
2931 {
2932 remainder_size = size - nb;
2933 remainder = chunk_at_offset (p, nb);
2934 av->top = remainder;
2935 set_head (p, nb | PREV_INUSE | (av != &main_arena ? NON_MAIN_ARENA : 0));
2936 set_head (remainder, remainder_size | PREV_INUSE);
2937 check_malloced_chunk (av, p, nb);
2938 return chunk2mem (p);
2939 }
2940
2941 /* catch all failure paths */
2942 __set_errno (ENOMEM);
2943 return 0;
2944 }
2945
2946
2947 /*
2948 systrim is an inverse of sorts to sysmalloc. It gives memory back
2949 to the system (via negative arguments to sbrk) if there is unused
2950 memory at the `high' end of the malloc pool. It is called
2951 automatically by free() when top space exceeds the trim
2952 threshold. It is also called by the public malloc_trim routine. It
2953 returns 1 if it actually released any memory, else 0.
2954 */
2955
2956 static int
2957 systrim (size_t pad, mstate av)
2958 {
2959 long top_size; /* Amount of top-most memory */
2960 long extra; /* Amount to release */
2961 long released; /* Amount actually released */
2962 char *current_brk; /* address returned by pre-check sbrk call */
2963 char *new_brk; /* address returned by post-check sbrk call */
2964 long top_area;
2965
2966 top_size = chunksize (av->top);
2967
2968 top_area = top_size - MINSIZE - 1;
2969 if (top_area <= pad)
2970 return 0;
2971
2972 /* Release in pagesize units and round down to the nearest page. */
2973 #ifdef MADV_HUGEPAGE
2974 if (__glibc_unlikely (mp_.thp_pagesize != 0))
2975 extra = ALIGN_DOWN (top_area - pad, mp_.thp_pagesize);
2976 else
2977 #endif
2978 extra = ALIGN_DOWN (top_area - pad, GLRO(dl_pagesize));
2979
2980 if (extra == 0)
2981 return 0;
2982
2983 /*
2984 Only proceed if end of memory is where we last set it.
2985 This avoids problems if there were foreign sbrk calls.
2986 */
2987 current_brk = (char *) (MORECORE (0));
2988 if (current_brk == (char *) (av->top) + top_size)
2989 {
2990 /*
2991 Attempt to release memory. We ignore MORECORE return value,
2992 and instead call again to find out where new end of memory is.
2993 This avoids problems if first call releases less than we asked,
2994 of if failure somehow altered brk value. (We could still
2995 encounter problems if it altered brk in some very bad way,
2996 but the only thing we can do is adjust anyway, which will cause
2997 some downstream failure.)
2998 */
2999
3000 MORECORE (-extra);
3001 new_brk = (char *) (MORECORE (0));
3002
3003 LIBC_PROBE (memory_sbrk_less, 2, new_brk, extra);
3004
3005 if (new_brk != (char *) MORECORE_FAILURE)
3006 {
3007 released = (long) (current_brk - new_brk);
3008
3009 if (released != 0)
3010 {
3011 /* Success. Adjust top. */
3012 av->system_mem -= released;
3013 set_head (av->top, (top_size - released) | PREV_INUSE);
3014 check_malloc_state (av);
3015 return 1;
3016 }
3017 }
3018 }
3019 return 0;
3020 }
3021
3022 static void
3023 munmap_chunk (mchunkptr p)
3024 {
3025 size_t pagesize = GLRO (dl_pagesize);
3026 INTERNAL_SIZE_T size = chunksize (p);
3027
3028 assert (chunk_is_mmapped (p));
3029
3030 uintptr_t mem = (uintptr_t) chunk2mem (p);
3031 uintptr_t block = (uintptr_t) p - prev_size (p);
3032 size_t total_size = prev_size (p) + size;
3033 /* Unfortunately we have to do the compilers job by hand here. Normally
3034 we would test BLOCK and TOTAL-SIZE separately for compliance with the
3035 page size. But gcc does not recognize the optimization possibility
3036 (in the moment at least) so we combine the two values into one before
3037 the bit test. */
3038 if (__glibc_unlikely ((block | total_size) & (pagesize - 1)) != 0
3039 || __glibc_unlikely (!powerof2 (mem & (pagesize - 1))))
3040 malloc_printerr ("munmap_chunk(): invalid pointer");
3041
3042 atomic_fetch_add_relaxed (&mp_.n_mmaps, -1);
3043 atomic_fetch_add_relaxed (&mp_.mmapped_mem, -total_size);
3044
3045 /* If munmap failed the process virtual memory address space is in a
3046 bad shape. Just leave the block hanging around, the process will
3047 terminate shortly anyway since not much can be done. */
3048 __munmap ((char *) block, total_size);
3049 }
3050
3051 #if HAVE_MREMAP
3052
3053 static mchunkptr
3054 mremap_chunk (mchunkptr p, size_t new_size)
3055 {
3056 size_t pagesize = GLRO (dl_pagesize);
3057 INTERNAL_SIZE_T offset = prev_size (p);
3058 INTERNAL_SIZE_T size = chunksize (p);
3059 char *cp;
3060
3061 assert (chunk_is_mmapped (p));
3062
3063 uintptr_t block = (uintptr_t) p - offset;
3064 uintptr_t mem = (uintptr_t) chunk2mem(p);
3065 size_t total_size = offset + size;
3066 if (__glibc_unlikely ((block | total_size) & (pagesize - 1)) != 0
3067 || __glibc_unlikely (!powerof2 (mem & (pagesize - 1))))
3068 malloc_printerr("mremap_chunk(): invalid pointer");
3069
3070 /* Note the extra SIZE_SZ overhead as in mmap_chunk(). */
3071 new_size = ALIGN_UP (new_size + offset + SIZE_SZ, pagesize);
3072
3073 /* No need to remap if the number of pages does not change. */
3074 if (total_size == new_size)
3075 return p;
3076
3077 cp = (char *) __mremap ((char *) block, total_size, new_size,
3078 MREMAP_MAYMOVE);
3079
3080 if (cp == MAP_FAILED)
3081 return 0;
3082
3083 madvise_thp (cp, new_size);
3084
3085 p = (mchunkptr) (cp + offset);
3086
3087 assert (aligned_OK (chunk2mem (p)));
3088
3089 assert (prev_size (p) == offset);
3090 set_head (p, (new_size - offset) | IS_MMAPPED);
3091
3092 INTERNAL_SIZE_T new;
3093 new = atomic_fetch_add_relaxed (&mp_.mmapped_mem, new_size - size - offset)
3094 + new_size - size - offset;
3095 atomic_max (&mp_.max_mmapped_mem, new);
3096 return p;
3097 }
3098 #endif /* HAVE_MREMAP */
3099
3100 /*------------------------ Public wrappers. --------------------------------*/
3101
3102 #if USE_TCACHE
3103
3104 /* We overlay this structure on the user-data portion of a chunk when
3105 the chunk is stored in the per-thread cache. */
3106 typedef struct tcache_entry
3107 {
3108 struct tcache_entry *next;
3109 /* This field exists to detect double frees. */
3110 uintptr_t key;
3111 } tcache_entry;
3112
3113 /* There is one of these for each thread, which contains the
3114 per-thread cache (hence "tcache_perthread_struct"). Keeping
3115 overall size low is mildly important. Note that COUNTS and ENTRIES
3116 are redundant (we could have just counted the linked list each
3117 time), this is for performance reasons. */
3118 typedef struct tcache_perthread_struct
3119 {
3120 uint16_t counts[TCACHE_MAX_BINS];
3121 tcache_entry *entries[TCACHE_MAX_BINS];
3122 } tcache_perthread_struct;
3123
3124 static __thread bool tcache_shutting_down = false;
3125 static __thread tcache_perthread_struct *tcache = NULL;
3126
3127 /* Process-wide key to try and catch a double-free in the same thread. */
3128 static uintptr_t tcache_key;
3129
3130 /* The value of tcache_key does not really have to be a cryptographically
3131 secure random number. It only needs to be arbitrary enough so that it does
3132 not collide with values present in applications. If a collision does happen
3133 consistently enough, it could cause a degradation in performance since the
3134 entire list is checked to check if the block indeed has been freed the
3135 second time. The odds of this happening are exceedingly low though, about 1
3136 in 2^wordsize. There is probably a higher chance of the performance
3137 degradation being due to a double free where the first free happened in a
3138 different thread; that's a case this check does not cover. */
3139 static void
3140 tcache_key_initialize (void)
3141 {
3142 if (__getrandom_nocancel (&tcache_key, sizeof(tcache_key), GRND_NONBLOCK)
3143 != sizeof (tcache_key))
3144 {
3145 tcache_key = random_bits ();
3146 #if __WORDSIZE == 64
3147 tcache_key = (tcache_key << 32) | random_bits ();
3148 #endif
3149 }
3150 }
3151
3152 /* Caller must ensure that we know tc_idx is valid and there's room
3153 for more chunks. */
3154 static __always_inline void
3155 tcache_put (mchunkptr chunk, size_t tc_idx)
3156 {
3157 tcache_entry *e = (tcache_entry *) chunk2mem (chunk);
3158
3159 /* Mark this chunk as "in the tcache" so the test in _int_free will
3160 detect a double free. */
3161 e->key = tcache_key;
3162
3163 e->next = PROTECT_PTR (&e->next, tcache->entries[tc_idx]);
3164 tcache->entries[tc_idx] = e;
3165 ++(tcache->counts[tc_idx]);
3166 }
3167
3168 /* Caller must ensure that we know tc_idx is valid and there's
3169 available chunks to remove. Removes chunk from the middle of the
3170 list. */
3171 static __always_inline void *
3172 tcache_get_n (size_t tc_idx, tcache_entry **ep)
3173 {
3174 tcache_entry *e;
3175 if (ep == &(tcache->entries[tc_idx]))
3176 e = *ep;
3177 else
3178 e = REVEAL_PTR (*ep);
3179
3180 if (__glibc_unlikely (!aligned_OK (e)))
3181 malloc_printerr ("malloc(): unaligned tcache chunk detected");
3182
3183 if (ep == &(tcache->entries[tc_idx]))
3184 *ep = REVEAL_PTR (e->next);
3185 else
3186 *ep = PROTECT_PTR (ep, REVEAL_PTR (e->next));
3187
3188 --(tcache->counts[tc_idx]);
3189 e->key = 0;
3190 return (void *) e;
3191 }
3192
3193 /* Like the above, but removes from the head of the list. */
3194 static __always_inline void *
3195 tcache_get (size_t tc_idx)
3196 {
3197 return tcache_get_n (tc_idx, & tcache->entries[tc_idx]);
3198 }
3199
3200 /* Iterates through the tcache linked list. */
3201 static __always_inline tcache_entry *
3202 tcache_next (tcache_entry *e)
3203 {
3204 return (tcache_entry *) REVEAL_PTR (e->next);
3205 }
3206
3207 static void
3208 tcache_thread_shutdown (void)
3209 {
3210 int i;
3211 tcache_perthread_struct *tcache_tmp = tcache;
3212
3213 tcache_shutting_down = true;
3214
3215 if (!tcache)
3216 return;
3217
3218 /* Disable the tcache and prevent it from being reinitialized. */
3219 tcache = NULL;
3220
3221 /* Free all of the entries and the tcache itself back to the arena
3222 heap for coalescing. */
3223 for (i = 0; i < TCACHE_MAX_BINS; ++i)
3224 {
3225 while (tcache_tmp->entries[i])
3226 {
3227 tcache_entry *e = tcache_tmp->entries[i];
3228 if (__glibc_unlikely (!aligned_OK (e)))
3229 malloc_printerr ("tcache_thread_shutdown(): "
3230 "unaligned tcache chunk detected");
3231 tcache_tmp->entries[i] = REVEAL_PTR (e->next);
3232 __libc_free (e);
3233 }
3234 }
3235
3236 __libc_free (tcache_tmp);
3237 }
3238
3239 static void
3240 tcache_init(void)
3241 {
3242 mstate ar_ptr;
3243 void *victim = 0;
3244 const size_t bytes = sizeof (tcache_perthread_struct);
3245
3246 if (tcache_shutting_down)
3247 return;
3248
3249 arena_get (ar_ptr, bytes);
3250 victim = _int_malloc (ar_ptr, bytes);
3251 if (!victim && ar_ptr != NULL)
3252 {
3253 ar_ptr = arena_get_retry (ar_ptr, bytes);
3254 victim = _int_malloc (ar_ptr, bytes);
3255 }
3256
3257
3258 if (ar_ptr != NULL)
3259 __libc_lock_unlock (ar_ptr->mutex);
3260
3261 /* In a low memory situation, we may not be able to allocate memory
3262 - in which case, we just keep trying later. However, we
3263 typically do this very early, so either there is sufficient
3264 memory, or there isn't enough memory to do non-trivial
3265 allocations anyway. */
3266 if (victim)
3267 {
3268 tcache = (tcache_perthread_struct *) victim;
3269 memset (tcache, 0, sizeof (tcache_perthread_struct));
3270 }
3271
3272 }
3273
3274 # define MAYBE_INIT_TCACHE() \
3275 if (__glibc_unlikely (tcache == NULL)) \
3276 tcache_init();
3277
3278 #else /* !USE_TCACHE */
3279 # define MAYBE_INIT_TCACHE()
3280
3281 static void
3282 tcache_thread_shutdown (void)
3283 {
3284 /* Nothing to do if there is no thread cache. */
3285 }
3286
3287 #endif /* !USE_TCACHE */
3288
3289 #if IS_IN (libc)
3290 void *
3291 __libc_malloc (size_t bytes)
3292 {
3293 mstate ar_ptr;
3294 void *victim;
3295
3296 _Static_assert (PTRDIFF_MAX <= SIZE_MAX / 2,
3297 "PTRDIFF_MAX is not more than half of SIZE_MAX");
3298
3299 if (!__malloc_initialized)
3300 ptmalloc_init ();
3301 #if USE_TCACHE
3302 /* int_free also calls request2size, be careful to not pad twice. */
3303 size_t tbytes = checked_request2size (bytes);
3304 if (tbytes == 0)
3305 {
3306 __set_errno (ENOMEM);
3307 return NULL;
3308 }
3309 size_t tc_idx = csize2tidx (tbytes);
3310
3311 MAYBE_INIT_TCACHE ();
3312
3313 DIAG_PUSH_NEEDS_COMMENT;
3314 if (tc_idx < mp_.tcache_bins
3315 && tcache != NULL
3316 && tcache->counts[tc_idx] > 0)
3317 {
3318 victim = tcache_get (tc_idx);
3319 return tag_new_usable (victim);
3320 }
3321 DIAG_POP_NEEDS_COMMENT;
3322 #endif
3323
3324 if (SINGLE_THREAD_P)
3325 {
3326 victim = tag_new_usable (_int_malloc (&main_arena, bytes));
3327 assert (!victim || chunk_is_mmapped (mem2chunk (victim)) ||
3328 &main_arena == arena_for_chunk (mem2chunk (victim)));
3329 return victim;
3330 }
3331
3332 arena_get (ar_ptr, bytes);
3333
3334 victim = _int_malloc (ar_ptr, bytes);
3335 /* Retry with another arena only if we were able to find a usable arena
3336 before. */
3337 if (!victim && ar_ptr != NULL)
3338 {
3339 LIBC_PROBE (memory_malloc_retry, 1, bytes);
3340 ar_ptr = arena_get_retry (ar_ptr, bytes);
3341 victim = _int_malloc (ar_ptr, bytes);
3342 }
3343
3344 if (ar_ptr != NULL)
3345 __libc_lock_unlock (ar_ptr->mutex);
3346
3347 victim = tag_new_usable (victim);
3348
3349 assert (!victim || chunk_is_mmapped (mem2chunk (victim)) ||
3350 ar_ptr == arena_for_chunk (mem2chunk (victim)));
3351 return victim;
3352 }
3353 libc_hidden_def (__libc_malloc)
3354
3355 void
3356 __libc_free (void *mem)
3357 {
3358 mstate ar_ptr;
3359 mchunkptr p; /* chunk corresponding to mem */
3360
3361 if (mem == 0) /* free(0) has no effect */
3362 return;
3363
3364 /* Quickly check that the freed pointer matches the tag for the memory.
3365 This gives a useful double-free detection. */
3366 if (__glibc_unlikely (mtag_enabled))
3367 *(volatile char *)mem;
3368
3369 int err = errno;
3370
3371 p = mem2chunk (mem);
3372
3373 if (chunk_is_mmapped (p)) /* release mmapped memory. */
3374 {
3375 /* See if the dynamic brk/mmap threshold needs adjusting.
3376 Dumped fake mmapped chunks do not affect the threshold. */
3377 if (!mp_.no_dyn_threshold
3378 && chunksize_nomask (p) > mp_.mmap_threshold
3379 && chunksize_nomask (p) <= DEFAULT_MMAP_THRESHOLD_MAX)
3380 {
3381 mp_.mmap_threshold = chunksize (p);
3382 mp_.trim_threshold = 2 * mp_.mmap_threshold;
3383 LIBC_PROBE (memory_mallopt_free_dyn_thresholds, 2,
3384 mp_.mmap_threshold, mp_.trim_threshold);
3385 }
3386 munmap_chunk (p);
3387 }
3388 else
3389 {
3390 MAYBE_INIT_TCACHE ();
3391
3392 /* Mark the chunk as belonging to the library again. */
3393 (void)tag_region (chunk2mem (p), memsize (p));
3394
3395 ar_ptr = arena_for_chunk (p);
3396 _int_free (ar_ptr, p, 0);
3397 }
3398
3399 __set_errno (err);
3400 }
3401 libc_hidden_def (__libc_free)
3402
3403 void *
3404 __libc_realloc (void *oldmem, size_t bytes)
3405 {
3406 mstate ar_ptr;
3407 INTERNAL_SIZE_T nb; /* padded request size */
3408
3409 void *newp; /* chunk to return */
3410
3411 if (!__malloc_initialized)
3412 ptmalloc_init ();
3413
3414 #if REALLOC_ZERO_BYTES_FREES
3415 if (bytes == 0 && oldmem != NULL)
3416 {
3417 __libc_free (oldmem); return 0;
3418 }
3419 #endif
3420
3421 /* realloc of null is supposed to be same as malloc */
3422 if (oldmem == 0)
3423 return __libc_malloc (bytes);
3424
3425 /* Perform a quick check to ensure that the pointer's tag matches the
3426 memory's tag. */
3427 if (__glibc_unlikely (mtag_enabled))
3428 *(volatile char*) oldmem;
3429
3430 /* chunk corresponding to oldmem */
3431 const mchunkptr oldp = mem2chunk (oldmem);
3432
3433 /* Return the chunk as is if the request grows within usable bytes, typically
3434 into the alignment padding. We want to avoid reusing the block for
3435 shrinkages because it ends up unnecessarily fragmenting the address space.
3436 This is also why the heuristic misses alignment padding for THP for
3437 now. */
3438 size_t usable = musable (oldmem);
3439 if (bytes <= usable)
3440 {
3441 size_t difference = usable - bytes;
3442 if ((unsigned long) difference < 2 * sizeof (INTERNAL_SIZE_T)
3443 || (chunk_is_mmapped (oldp) && difference <= GLRO (dl_pagesize)))
3444 return oldmem;
3445 }
3446
3447 /* its size */
3448 const INTERNAL_SIZE_T oldsize = chunksize (oldp);
3449
3450 if (chunk_is_mmapped (oldp))
3451 ar_ptr = NULL;
3452 else
3453 {
3454 MAYBE_INIT_TCACHE ();
3455 ar_ptr = arena_for_chunk (oldp);
3456 }
3457
3458 /* Little security check which won't hurt performance: the allocator
3459 never wraps around at the end of the address space. Therefore
3460 we can exclude some size values which might appear here by
3461 accident or by "design" from some intruder. */
3462 if ((__builtin_expect ((uintptr_t) oldp > (uintptr_t) -oldsize, 0)
3463 || __builtin_expect (misaligned_chunk (oldp), 0)))
3464 malloc_printerr ("realloc(): invalid pointer");
3465
3466 nb = checked_request2size (bytes);
3467 if (nb == 0)
3468 {
3469 __set_errno (ENOMEM);
3470 return NULL;
3471 }
3472
3473 if (chunk_is_mmapped (oldp))
3474 {
3475 void *newmem;
3476
3477 #if HAVE_MREMAP
3478 newp = mremap_chunk (oldp, nb);
3479 if (newp)
3480 {
3481 void *newmem = chunk2mem_tag (newp);
3482 /* Give the new block a different tag. This helps to ensure
3483 that stale handles to the previous mapping are not
3484 reused. There's a performance hit for both us and the
3485 caller for doing this, so we might want to
3486 reconsider. */
3487 return tag_new_usable (newmem);
3488 }
3489 #endif
3490 /* Note the extra SIZE_SZ overhead. */
3491 if (oldsize - SIZE_SZ >= nb)
3492 return oldmem; /* do nothing */
3493
3494 /* Must alloc, copy, free. */
3495 newmem = __libc_malloc (bytes);
3496 if (newmem == 0)
3497 return 0; /* propagate failure */
3498
3499 memcpy (newmem, oldmem, oldsize - CHUNK_HDR_SZ);
3500 munmap_chunk (oldp);
3501 return newmem;
3502 }
3503
3504 if (SINGLE_THREAD_P)
3505 {
3506 newp = _int_realloc (ar_ptr, oldp, oldsize, nb);
3507 assert (!newp || chunk_is_mmapped (mem2chunk (newp)) ||
3508 ar_ptr == arena_for_chunk (mem2chunk (newp)));
3509
3510 return newp;
3511 }
3512
3513 __libc_lock_lock (ar_ptr->mutex);
3514
3515 newp = _int_realloc (ar_ptr, oldp, oldsize, nb);
3516
3517 __libc_lock_unlock (ar_ptr->mutex);
3518 assert (!newp || chunk_is_mmapped (mem2chunk (newp)) ||
3519 ar_ptr == arena_for_chunk (mem2chunk (newp)));
3520
3521 if (newp == NULL)
3522 {
3523 /* Try harder to allocate memory in other arenas. */
3524 LIBC_PROBE (memory_realloc_retry, 2, bytes, oldmem);
3525 newp = __libc_malloc (bytes);
3526 if (newp != NULL)
3527 {
3528 size_t sz = memsize (oldp);
3529 memcpy (newp, oldmem, sz);
3530 (void) tag_region (chunk2mem (oldp), sz);
3531 _int_free (ar_ptr, oldp, 0);
3532 }
3533 }
3534
3535 return newp;
3536 }
3537 libc_hidden_def (__libc_realloc)
3538
3539 void *
3540 __libc_memalign (size_t alignment, size_t bytes)
3541 {
3542 if (!__malloc_initialized)
3543 ptmalloc_init ();
3544
3545 void *address = RETURN_ADDRESS (0);
3546 return _mid_memalign (alignment, bytes, address);
3547 }
3548 libc_hidden_def (__libc_memalign)
3549
3550 /* For ISO C17. */
3551 void *
3552 weak_function
3553 aligned_alloc (size_t alignment, size_t bytes)
3554 {
3555 if (!__malloc_initialized)
3556 ptmalloc_init ();
3557
3558 /* Similar to memalign, but starting with ISO C17 the standard
3559 requires an error for alignments that are not supported by the
3560 implementation. Valid alignments for the current implementation
3561 are non-negative powers of two. */
3562 if (!powerof2 (alignment) || alignment == 0)
3563 {
3564 __set_errno (EINVAL);
3565 return 0;
3566 }
3567
3568 void *address = RETURN_ADDRESS (0);
3569 return _mid_memalign (alignment, bytes, address);
3570 }
3571
3572 static void *
3573 _mid_memalign (size_t alignment, size_t bytes, void *address)
3574 {
3575 mstate ar_ptr;
3576 void *p;
3577
3578 /* If we need less alignment than we give anyway, just relay to malloc. */
3579 if (alignment <= MALLOC_ALIGNMENT)
3580 return __libc_malloc (bytes);
3581
3582 /* Otherwise, ensure that it is at least a minimum chunk size */
3583 if (alignment < MINSIZE)
3584 alignment = MINSIZE;
3585
3586 /* If the alignment is greater than SIZE_MAX / 2 + 1 it cannot be a
3587 power of 2 and will cause overflow in the check below. */
3588 if (alignment > SIZE_MAX / 2 + 1)
3589 {
3590 __set_errno (EINVAL);
3591 return 0;
3592 }
3593
3594
3595 /* Make sure alignment is power of 2. */
3596 if (!powerof2 (alignment))
3597 {
3598 size_t a = MALLOC_ALIGNMENT * 2;
3599 while (a < alignment)
3600 a <<= 1;
3601 alignment = a;
3602 }
3603
3604 #if USE_TCACHE
3605 {
3606 size_t tbytes;
3607 tbytes = checked_request2size (bytes);
3608 if (tbytes == 0)
3609 {
3610 __set_errno (ENOMEM);
3611 return NULL;
3612 }
3613 size_t tc_idx = csize2tidx (tbytes);
3614
3615 if (tc_idx < mp_.tcache_bins
3616 && tcache != NULL
3617 && tcache->counts[tc_idx] > 0)
3618 {
3619 /* The tcache itself isn't encoded, but the chain is. */
3620 tcache_entry **tep = & tcache->entries[tc_idx];
3621 tcache_entry *te = *tep;
3622 while (te != NULL && !PTR_IS_ALIGNED (te, alignment))
3623 {
3624 tep = & (te->next);
3625 te = tcache_next (te);
3626 }
3627 if (te != NULL)
3628 {
3629 void *victim = tcache_get_n (tc_idx, tep);
3630 return tag_new_usable (victim);
3631 }
3632 }
3633 }
3634 #endif
3635
3636 if (SINGLE_THREAD_P)
3637 {
3638 p = _int_memalign (&main_arena, alignment, bytes);
3639 assert (!p || chunk_is_mmapped (mem2chunk (p)) ||
3640 &main_arena == arena_for_chunk (mem2chunk (p)));
3641 return tag_new_usable (p);
3642 }
3643
3644 arena_get (ar_ptr, bytes + alignment + MINSIZE);
3645
3646 p = _int_memalign (ar_ptr, alignment, bytes);
3647 if (!p && ar_ptr != NULL)
3648 {
3649 LIBC_PROBE (memory_memalign_retry, 2, bytes, alignment);
3650 ar_ptr = arena_get_retry (ar_ptr, bytes);
3651 p = _int_memalign (ar_ptr, alignment, bytes);
3652 }
3653
3654 if (ar_ptr != NULL)
3655 __libc_lock_unlock (ar_ptr->mutex);
3656
3657 assert (!p || chunk_is_mmapped (mem2chunk (p)) ||
3658 ar_ptr == arena_for_chunk (mem2chunk (p)));
3659 return tag_new_usable (p);
3660 }
3661
3662 void *
3663 __libc_valloc (size_t bytes)
3664 {
3665 if (!__malloc_initialized)
3666 ptmalloc_init ();
3667
3668 void *address = RETURN_ADDRESS (0);
3669 size_t pagesize = GLRO (dl_pagesize);
3670 return _mid_memalign (pagesize, bytes, address);
3671 }
3672
3673 void *
3674 __libc_pvalloc (size_t bytes)
3675 {
3676 if (!__malloc_initialized)
3677 ptmalloc_init ();
3678
3679 void *address = RETURN_ADDRESS (0);
3680 size_t pagesize = GLRO (dl_pagesize);
3681 size_t rounded_bytes;
3682 /* ALIGN_UP with overflow check. */
3683 if (__glibc_unlikely (__builtin_add_overflow (bytes,
3684 pagesize - 1,
3685 &rounded_bytes)))
3686 {
3687 __set_errno (ENOMEM);
3688 return 0;
3689 }
3690 rounded_bytes = rounded_bytes & -(pagesize - 1);
3691
3692 return _mid_memalign (pagesize, rounded_bytes, address);
3693 }
3694
3695 void *
3696 __libc_calloc (size_t n, size_t elem_size)
3697 {
3698 mstate av;
3699 mchunkptr oldtop;
3700 INTERNAL_SIZE_T sz, oldtopsize;
3701 void *mem;
3702 unsigned long clearsize;
3703 unsigned long nclears;
3704 INTERNAL_SIZE_T *d;
3705 ptrdiff_t bytes;
3706
3707 if (__glibc_unlikely (__builtin_mul_overflow (n, elem_size, &bytes)))
3708 {
3709 __set_errno (ENOMEM);
3710 return NULL;
3711 }
3712
3713 sz = bytes;
3714
3715 if (!__malloc_initialized)
3716 ptmalloc_init ();
3717
3718 MAYBE_INIT_TCACHE ();
3719
3720 if (SINGLE_THREAD_P)
3721 av = &main_arena;
3722 else
3723 arena_get (av, sz);
3724
3725 if (av)
3726 {
3727 /* Check if we hand out the top chunk, in which case there may be no
3728 need to clear. */
3729 #if MORECORE_CLEARS
3730 oldtop = top (av);
3731 oldtopsize = chunksize (top (av));
3732 # if MORECORE_CLEARS < 2
3733 /* Only newly allocated memory is guaranteed to be cleared. */
3734 if (av == &main_arena &&
3735 oldtopsize < mp_.sbrk_base + av->max_system_mem - (char *) oldtop)
3736 oldtopsize = (mp_.sbrk_base + av->max_system_mem - (char *) oldtop);
3737 # endif
3738 if (av != &main_arena)
3739 {
3740 heap_info *heap = heap_for_ptr (oldtop);
3741 if (oldtopsize < (char *) heap + heap->mprotect_size - (char *) oldtop)
3742 oldtopsize = (char *) heap + heap->mprotect_size - (char *) oldtop;
3743 }
3744 #endif
3745 }
3746 else
3747 {
3748 /* No usable arenas. */
3749 oldtop = 0;
3750 oldtopsize = 0;
3751 }
3752 mem = _int_malloc (av, sz);
3753
3754 assert (!mem || chunk_is_mmapped (mem2chunk (mem)) ||
3755 av == arena_for_chunk (mem2chunk (mem)));
3756
3757 if (!SINGLE_THREAD_P)
3758 {
3759 if (mem == 0 && av != NULL)
3760 {
3761 LIBC_PROBE (memory_calloc_retry, 1, sz);
3762 av = arena_get_retry (av, sz);
3763 mem = _int_malloc (av, sz);
3764 }
3765
3766 if (av != NULL)
3767 __libc_lock_unlock (av->mutex);
3768 }
3769
3770 /* Allocation failed even after a retry. */
3771 if (mem == 0)
3772 return 0;
3773
3774 mchunkptr p = mem2chunk (mem);
3775
3776 /* If we are using memory tagging, then we need to set the tags
3777 regardless of MORECORE_CLEARS, so we zero the whole block while
3778 doing so. */
3779 if (__glibc_unlikely (mtag_enabled))
3780 return tag_new_zero_region (mem, memsize (p));
3781
3782 INTERNAL_SIZE_T csz = chunksize (p);
3783
3784 /* Two optional cases in which clearing not necessary */
3785 if (chunk_is_mmapped (p))
3786 {
3787 if (__builtin_expect (perturb_byte, 0))
3788 return memset (mem, 0, sz);
3789
3790 return mem;
3791 }
3792
3793 #if MORECORE_CLEARS
3794 if (perturb_byte == 0 && (p == oldtop && csz > oldtopsize))
3795 {
3796 /* clear only the bytes from non-freshly-sbrked memory */
3797 csz = oldtopsize;
3798 }
3799 #endif
3800
3801 /* Unroll clear of <= 36 bytes (72 if 8byte sizes). We know that
3802 contents have an odd number of INTERNAL_SIZE_T-sized words;
3803 minimally 3. */
3804 d = (INTERNAL_SIZE_T *) mem;
3805 clearsize = csz - SIZE_SZ;
3806 nclears = clearsize / sizeof (INTERNAL_SIZE_T);
3807 assert (nclears >= 3);
3808
3809 if (nclears > 9)
3810 return memset (d, 0, clearsize);
3811
3812 else
3813 {
3814 *(d + 0) = 0;
3815 *(d + 1) = 0;
3816 *(d + 2) = 0;
3817 if (nclears > 4)
3818 {
3819 *(d + 3) = 0;
3820 *(d + 4) = 0;
3821 if (nclears > 6)
3822 {
3823 *(d + 5) = 0;
3824 *(d + 6) = 0;
3825 if (nclears > 8)
3826 {
3827 *(d + 7) = 0;
3828 *(d + 8) = 0;
3829 }
3830 }
3831 }
3832 }
3833
3834 return mem;
3835 }
3836 #endif /* IS_IN (libc) */
3837
3838 /*
3839 ------------------------------ malloc ------------------------------
3840 */
3841
3842 static void *
3843 _int_malloc (mstate av, size_t bytes)
3844 {
3845 INTERNAL_SIZE_T nb; /* normalized request size */
3846 unsigned int idx; /* associated bin index */
3847 mbinptr bin; /* associated bin */
3848
3849 mchunkptr victim; /* inspected/selected chunk */
3850 INTERNAL_SIZE_T size; /* its size */
3851 int victim_index; /* its bin index */
3852
3853 mchunkptr remainder; /* remainder from a split */
3854 unsigned long remainder_size; /* its size */
3855
3856 unsigned int block; /* bit map traverser */
3857 unsigned int bit; /* bit map traverser */
3858 unsigned int map; /* current word of binmap */
3859
3860 mchunkptr fwd; /* misc temp for linking */
3861 mchunkptr bck; /* misc temp for linking */
3862
3863 #if USE_TCACHE
3864 size_t tcache_unsorted_count; /* count of unsorted chunks processed */
3865 #endif
3866
3867 /*
3868 Convert request size to internal form by adding SIZE_SZ bytes
3869 overhead plus possibly more to obtain necessary alignment and/or
3870 to obtain a size of at least MINSIZE, the smallest allocatable
3871 size. Also, checked_request2size returns false for request sizes
3872 that are so large that they wrap around zero when padded and
3873 aligned.
3874 */
3875
3876 nb = checked_request2size (bytes);
3877 if (nb == 0)
3878 {
3879 __set_errno (ENOMEM);
3880 return NULL;
3881 }
3882
3883 /* There are no usable arenas. Fall back to sysmalloc to get a chunk from
3884 mmap. */
3885 if (__glibc_unlikely (av == NULL))
3886 {
3887 void *p = sysmalloc (nb, av);
3888 if (p != NULL)
3889 alloc_perturb (p, bytes);
3890 return p;
3891 }
3892
3893 /*
3894 If the size qualifies as a fastbin, first check corresponding bin.
3895 This code is safe to execute even if av is not yet initialized, so we
3896 can try it without checking, which saves some time on this fast path.
3897 */
3898
3899 #define REMOVE_FB(fb, victim, pp) \
3900 do \
3901 { \
3902 victim = pp; \
3903 if (victim == NULL) \
3904 break; \
3905 pp = REVEAL_PTR (victim->fd); \
3906 if (__glibc_unlikely (pp != NULL && misaligned_chunk (pp))) \
3907 malloc_printerr ("malloc(): unaligned fastbin chunk detected"); \
3908 } \
3909 while ((pp = catomic_compare_and_exchange_val_acq (fb, pp, victim)) \
3910 != victim); \
3911
3912 if ((unsigned long) (nb) <= (unsigned long) (get_max_fast ()))
3913 {
3914 idx = fastbin_index (nb);
3915 mfastbinptr *fb = &fastbin (av, idx);
3916 mchunkptr pp;
3917 victim = *fb;
3918
3919 if (victim != NULL)
3920 {
3921 if (__glibc_unlikely (misaligned_chunk (victim)))
3922 malloc_printerr ("malloc(): unaligned fastbin chunk detected 2");
3923
3924 if (SINGLE_THREAD_P)
3925 *fb = REVEAL_PTR (victim->fd);
3926 else
3927 REMOVE_FB (fb, pp, victim);
3928 if (__glibc_likely (victim != NULL))
3929 {
3930 size_t victim_idx = fastbin_index (chunksize (victim));
3931 if (__builtin_expect (victim_idx != idx, 0))
3932 malloc_printerr ("malloc(): memory corruption (fast)");
3933 check_remalloced_chunk (av, victim, nb);
3934 #if USE_TCACHE
3935 /* While we're here, if we see other chunks of the same size,
3936 stash them in the tcache. */
3937 size_t tc_idx = csize2tidx (nb);
3938 if (tcache != NULL && tc_idx < mp_.tcache_bins)
3939 {
3940 mchunkptr tc_victim;
3941
3942 /* While bin not empty and tcache not full, copy chunks. */
3943 while (tcache->counts[tc_idx] < mp_.tcache_count
3944 && (tc_victim = *fb) != NULL)
3945 {
3946 if (__glibc_unlikely (misaligned_chunk (tc_victim)))
3947 malloc_printerr ("malloc(): unaligned fastbin chunk detected 3");
3948 if (SINGLE_THREAD_P)
3949 *fb = REVEAL_PTR (tc_victim->fd);
3950 else
3951 {
3952 REMOVE_FB (fb, pp, tc_victim);
3953 if (__glibc_unlikely (tc_victim == NULL))
3954 break;
3955 }
3956 tcache_put (tc_victim, tc_idx);
3957 }
3958 }
3959 #endif
3960 void *p = chunk2mem (victim);
3961 alloc_perturb (p, bytes);
3962 return p;
3963 }
3964 }
3965 }
3966
3967 /*
3968 If a small request, check regular bin. Since these "smallbins"
3969 hold one size each, no searching within bins is necessary.
3970 (For a large request, we need to wait until unsorted chunks are
3971 processed to find best fit. But for small ones, fits are exact
3972 anyway, so we can check now, which is faster.)
3973 */
3974
3975 if (in_smallbin_range (nb))
3976 {
3977 idx = smallbin_index (nb);
3978 bin = bin_at (av, idx);
3979
3980 if ((victim = last (bin)) != bin)
3981 {
3982 bck = victim->bk;
3983 if (__glibc_unlikely (bck->fd != victim))
3984 malloc_printerr ("malloc(): smallbin double linked list corrupted");
3985 set_inuse_bit_at_offset (victim, nb);
3986 bin->bk = bck;
3987 bck->fd = bin;
3988
3989 if (av != &main_arena)
3990 set_non_main_arena (victim);
3991 check_malloced_chunk (av, victim, nb);
3992 #if USE_TCACHE
3993 /* While we're here, if we see other chunks of the same size,
3994 stash them in the tcache. */
3995 size_t tc_idx = csize2tidx (nb);
3996 if (tcache != NULL && tc_idx < mp_.tcache_bins)
3997 {
3998 mchunkptr tc_victim;
3999
4000 /* While bin not empty and tcache not full, copy chunks over. */
4001 while (tcache->counts[tc_idx] < mp_.tcache_count
4002 && (tc_victim = last (bin)) != bin)
4003 {
4004 if (tc_victim != 0)
4005 {
4006 bck = tc_victim->bk;
4007 set_inuse_bit_at_offset (tc_victim, nb);
4008 if (av != &main_arena)
4009 set_non_main_arena (tc_victim);
4010 bin->bk = bck;
4011 bck->fd = bin;
4012
4013 tcache_put (tc_victim, tc_idx);
4014 }
4015 }
4016 }
4017 #endif
4018 void *p = chunk2mem (victim);
4019 alloc_perturb (p, bytes);
4020 return p;
4021 }
4022 }
4023
4024 /*
4025 If this is a large request, consolidate fastbins before continuing.
4026 While it might look excessive to kill all fastbins before
4027 even seeing if there is space available, this avoids
4028 fragmentation problems normally associated with fastbins.
4029 Also, in practice, programs tend to have runs of either small or
4030 large requests, but less often mixtures, so consolidation is not
4031 invoked all that often in most programs. And the programs that
4032 it is called frequently in otherwise tend to fragment.
4033 */
4034
4035 else
4036 {
4037 idx = largebin_index (nb);
4038 if (atomic_load_relaxed (&av->have_fastchunks))
4039 malloc_consolidate (av);
4040 }
4041
4042 /*
4043 Process recently freed or remaindered chunks, taking one only if
4044 it is exact fit, or, if this a small request, the chunk is remainder from
4045 the most recent non-exact fit. Place other traversed chunks in
4046 bins. Note that this step is the only place in any routine where
4047 chunks are placed in bins.
4048
4049 The outer loop here is needed because we might not realize until
4050 near the end of malloc that we should have consolidated, so must
4051 do so and retry. This happens at most once, and only when we would
4052 otherwise need to expand memory to service a "small" request.
4053 */
4054
4055 #if USE_TCACHE
4056 INTERNAL_SIZE_T tcache_nb = 0;
4057 size_t tc_idx = csize2tidx (nb);
4058 if (tcache != NULL && tc_idx < mp_.tcache_bins)
4059 tcache_nb = nb;
4060 int return_cached = 0;
4061
4062 tcache_unsorted_count = 0;
4063 #endif
4064
4065 for (;; )
4066 {
4067 int iters = 0;
4068 while ((victim = unsorted_chunks (av)->bk) != unsorted_chunks (av))
4069 {
4070 bck = victim->bk;
4071 size = chunksize (victim);
4072 mchunkptr next = chunk_at_offset (victim, size);
4073
4074 if (__glibc_unlikely (size <= CHUNK_HDR_SZ)
4075 || __glibc_unlikely (size > av->system_mem))
4076 malloc_printerr ("malloc(): invalid size (unsorted)");
4077 if (__glibc_unlikely (chunksize_nomask (next) < CHUNK_HDR_SZ)
4078 || __glibc_unlikely (chunksize_nomask (next) > av->system_mem))
4079 malloc_printerr ("malloc(): invalid next size (unsorted)");
4080 if (__glibc_unlikely ((prev_size (next) & ~(SIZE_BITS)) != size))
4081 malloc_printerr ("malloc(): mismatching next->prev_size (unsorted)");
4082 if (__glibc_unlikely (bck->fd != victim)
4083 || __glibc_unlikely (victim->fd != unsorted_chunks (av)))
4084 malloc_printerr ("malloc(): unsorted double linked list corrupted");
4085 if (__glibc_unlikely (prev_inuse (next)))
4086 malloc_printerr ("malloc(): invalid next->prev_inuse (unsorted)");
4087
4088 /*
4089 If a small request, try to use last remainder if it is the
4090 only chunk in unsorted bin. This helps promote locality for
4091 runs of consecutive small requests. This is the only
4092 exception to best-fit, and applies only when there is
4093 no exact fit for a small chunk.
4094 */
4095
4096 if (in_smallbin_range (nb) &&
4097 bck == unsorted_chunks (av) &&
4098 victim == av->last_remainder &&
4099 (unsigned long) (size) > (unsigned long) (nb + MINSIZE))
4100 {
4101 /* split and reattach remainder */
4102 remainder_size = size - nb;
4103 remainder = chunk_at_offset (victim, nb);
4104 unsorted_chunks (av)->bk = unsorted_chunks (av)->fd = remainder;
4105 av->last_remainder = remainder;
4106 remainder->bk = remainder->fd = unsorted_chunks (av);
4107 if (!in_smallbin_range (remainder_size))
4108 {
4109 remainder->fd_nextsize = NULL;
4110 remainder->bk_nextsize = NULL;
4111 }
4112
4113 set_head (victim, nb | PREV_INUSE |
4114 (av != &main_arena ? NON_MAIN_ARENA : 0));
4115 set_head (remainder, remainder_size | PREV_INUSE);
4116 set_foot (remainder, remainder_size);
4117
4118 check_malloced_chunk (av, victim, nb);
4119 void *p = chunk2mem (victim);
4120 alloc_perturb (p, bytes);
4121 return p;
4122 }
4123
4124 /* remove from unsorted list */
4125 unsorted_chunks (av)->bk = bck;
4126 bck->fd = unsorted_chunks (av);
4127
4128 /* Take now instead of binning if exact fit */
4129
4130 if (size == nb)
4131 {
4132 set_inuse_bit_at_offset (victim, size);
4133 if (av != &main_arena)
4134 set_non_main_arena (victim);
4135 #if USE_TCACHE
4136 /* Fill cache first, return to user only if cache fills.
4137 We may return one of these chunks later. */
4138 if (tcache_nb > 0
4139 && tcache->counts[tc_idx] < mp_.tcache_count)
4140 {
4141 tcache_put (victim, tc_idx);
4142 return_cached = 1;
4143 continue;
4144 }
4145 else
4146 {
4147 #endif
4148 check_malloced_chunk (av, victim, nb);
4149 void *p = chunk2mem (victim);
4150 alloc_perturb (p, bytes);
4151 return p;
4152 #if USE_TCACHE
4153 }
4154 #endif
4155 }
4156
4157 /* place chunk in bin */
4158
4159 if (in_smallbin_range (size))
4160 {
4161 victim_index = smallbin_index (size);
4162 bck = bin_at (av, victim_index);
4163 fwd = bck->fd;
4164 }
4165 else
4166 {
4167 victim_index = largebin_index (size);
4168 bck = bin_at (av, victim_index);
4169 fwd = bck->fd;
4170
4171 /* maintain large bins in sorted order */
4172 if (fwd != bck)
4173 {
4174 /* Or with inuse bit to speed comparisons */
4175 size |= PREV_INUSE;
4176 /* if smaller than smallest, bypass loop below */
4177 assert (chunk_main_arena (bck->bk));
4178 if ((unsigned long) (size)
4179 < (unsigned long) chunksize_nomask (bck->bk))
4180 {
4181 fwd = bck;
4182 bck = bck->bk;
4183
4184 victim->fd_nextsize = fwd->fd;
4185 victim->bk_nextsize = fwd->fd->bk_nextsize;
4186 fwd->fd->bk_nextsize = victim->bk_nextsize->fd_nextsize = victim;
4187 }
4188 else
4189 {
4190 assert (chunk_main_arena (fwd));
4191 while ((unsigned long) size < chunksize_nomask (fwd))
4192 {
4193 fwd = fwd->fd_nextsize;
4194 assert (chunk_main_arena (fwd));
4195 }
4196
4197 if ((unsigned long) size
4198 == (unsigned long) chunksize_nomask (fwd))
4199 /* Always insert in the second position. */
4200 fwd = fwd->fd;
4201 else
4202 {
4203 victim->fd_nextsize = fwd;
4204 victim->bk_nextsize = fwd->bk_nextsize;
4205 if (__glibc_unlikely (fwd->bk_nextsize->fd_nextsize != fwd))
4206 malloc_printerr ("malloc(): largebin double linked list corrupted (nextsize)");
4207 fwd->bk_nextsize = victim;
4208 victim->bk_nextsize->fd_nextsize = victim;
4209 }
4210 bck = fwd->bk;
4211 if (bck->fd != fwd)
4212 malloc_printerr ("malloc(): largebin double linked list corrupted (bk)");
4213 }
4214 }
4215 else
4216 victim->fd_nextsize = victim->bk_nextsize = victim;
4217 }
4218
4219 mark_bin (av, victim_index);
4220 victim->bk = bck;
4221 victim->fd = fwd;
4222 fwd->bk = victim;
4223 bck->fd = victim;
4224
4225 #if USE_TCACHE
4226 /* If we've processed as many chunks as we're allowed while
4227 filling the cache, return one of the cached ones. */
4228 ++tcache_unsorted_count;
4229 if (return_cached
4230 && mp_.tcache_unsorted_limit > 0
4231 && tcache_unsorted_count > mp_.tcache_unsorted_limit)
4232 {
4233 return tcache_get (tc_idx);
4234 }
4235 #endif
4236
4237 #define MAX_ITERS 10000
4238 if (++iters >= MAX_ITERS)
4239 break;
4240 }
4241
4242 #if USE_TCACHE
4243 /* If all the small chunks we found ended up cached, return one now. */
4244 if (return_cached)
4245 {
4246 return tcache_get (tc_idx);
4247 }
4248 #endif
4249
4250 /*
4251 If a large request, scan through the chunks of current bin in
4252 sorted order to find smallest that fits. Use the skip list for this.
4253 */
4254
4255 if (!in_smallbin_range (nb))
4256 {
4257 bin = bin_at (av, idx);
4258
4259 /* skip scan if empty or largest chunk is too small */
4260 if ((victim = first (bin)) != bin
4261 && (unsigned long) chunksize_nomask (victim)
4262 >= (unsigned long) (nb))
4263 {
4264 victim = victim->bk_nextsize;
4265 while (((unsigned long) (size = chunksize (victim)) <
4266 (unsigned long) (nb)))
4267 victim = victim->bk_nextsize;
4268
4269 /* Avoid removing the first entry for a size so that the skip
4270 list does not have to be rerouted. */
4271 if (victim != last (bin)
4272 && chunksize_nomask (victim)
4273 == chunksize_nomask (victim->fd))
4274 victim = victim->fd;
4275
4276 remainder_size = size - nb;
4277 unlink_chunk (av, victim);
4278
4279 /* Exhaust */
4280 if (remainder_size < MINSIZE)
4281 {
4282 set_inuse_bit_at_offset (victim, size);
4283 if (av != &main_arena)
4284 set_non_main_arena (victim);
4285 }
4286 /* Split */
4287 else
4288 {
4289 remainder = chunk_at_offset (victim, nb);
4290 /* We cannot assume the unsorted list is empty and therefore
4291 have to perform a complete insert here. */
4292 bck = unsorted_chunks (av);
4293 fwd = bck->fd;
4294 if (__glibc_unlikely (fwd->bk != bck))
4295 malloc_printerr ("malloc(): corrupted unsorted chunks");
4296 remainder->bk = bck;
4297 remainder->fd = fwd;
4298 bck->fd = remainder;
4299 fwd->bk = remainder;
4300 if (!in_smallbin_range (remainder_size))
4301 {
4302 remainder->fd_nextsize = NULL;
4303 remainder->bk_nextsize = NULL;
4304 }
4305 set_head (victim, nb | PREV_INUSE |
4306 (av != &main_arena ? NON_MAIN_ARENA : 0));
4307 set_head (remainder, remainder_size | PREV_INUSE);
4308 set_foot (remainder, remainder_size);
4309 }
4310 check_malloced_chunk (av, victim, nb);
4311 void *p = chunk2mem (victim);
4312 alloc_perturb (p, bytes);
4313 return p;
4314 }
4315 }
4316
4317 /*
4318 Search for a chunk by scanning bins, starting with next largest
4319 bin. This search is strictly by best-fit; i.e., the smallest
4320 (with ties going to approximately the least recently used) chunk
4321 that fits is selected.
4322
4323 The bitmap avoids needing to check that most blocks are nonempty.
4324 The particular case of skipping all bins during warm-up phases
4325 when no chunks have been returned yet is faster than it might look.
4326 */
4327
4328 ++idx;
4329 bin = bin_at (av, idx);
4330 block = idx2block (idx);
4331 map = av->binmap[block];
4332 bit = idx2bit (idx);
4333
4334 for (;; )
4335 {
4336 /* Skip rest of block if there are no more set bits in this block. */
4337 if (bit > map || bit == 0)
4338 {
4339 do
4340 {
4341 if (++block >= BINMAPSIZE) /* out of bins */
4342 goto use_top;
4343 }
4344 while ((map = av->binmap[block]) == 0);
4345
4346 bin = bin_at (av, (block << BINMAPSHIFT));
4347 bit = 1;
4348 }
4349
4350 /* Advance to bin with set bit. There must be one. */
4351 while ((bit & map) == 0)
4352 {
4353 bin = next_bin (bin);
4354 bit <<= 1;
4355 assert (bit != 0);
4356 }
4357
4358 /* Inspect the bin. It is likely to be non-empty */
4359 victim = last (bin);
4360
4361 /* If a false alarm (empty bin), clear the bit. */
4362 if (victim == bin)
4363 {
4364 av->binmap[block] = map &= ~bit; /* Write through */
4365 bin = next_bin (bin);
4366 bit <<= 1;
4367 }
4368
4369 else
4370 {
4371 size = chunksize (victim);
4372
4373 /* We know the first chunk in this bin is big enough to use. */
4374 assert ((unsigned long) (size) >= (unsigned long) (nb));
4375
4376 remainder_size = size - nb;
4377
4378 /* unlink */
4379 unlink_chunk (av, victim);
4380
4381 /* Exhaust */
4382 if (remainder_size < MINSIZE)
4383 {
4384 set_inuse_bit_at_offset (victim, size);
4385 if (av != &main_arena)
4386 set_non_main_arena (victim);
4387 }
4388
4389 /* Split */
4390 else
4391 {
4392 remainder = chunk_at_offset (victim, nb);
4393
4394 /* We cannot assume the unsorted list is empty and therefore
4395 have to perform a complete insert here. */
4396 bck = unsorted_chunks (av);
4397 fwd = bck->fd;
4398 if (__glibc_unlikely (fwd->bk != bck))
4399 malloc_printerr ("malloc(): corrupted unsorted chunks 2");
4400 remainder->bk = bck;
4401 remainder->fd = fwd;
4402 bck->fd = remainder;
4403 fwd->bk = remainder;
4404
4405 /* advertise as last remainder */
4406 if (in_smallbin_range (nb))
4407 av->last_remainder = remainder;
4408 if (!in_smallbin_range (remainder_size))
4409 {
4410 remainder->fd_nextsize = NULL;
4411 remainder->bk_nextsize = NULL;
4412 }
4413 set_head (victim, nb | PREV_INUSE |
4414 (av != &main_arena ? NON_MAIN_ARENA : 0));
4415 set_head (remainder, remainder_size | PREV_INUSE);
4416 set_foot (remainder, remainder_size);
4417 }
4418 check_malloced_chunk (av, victim, nb);
4419 void *p = chunk2mem (victim);
4420 alloc_perturb (p, bytes);
4421 return p;
4422 }
4423 }
4424
4425 use_top:
4426 /*
4427 If large enough, split off the chunk bordering the end of memory
4428 (held in av->top). Note that this is in accord with the best-fit
4429 search rule. In effect, av->top is treated as larger (and thus
4430 less well fitting) than any other available chunk since it can
4431 be extended to be as large as necessary (up to system
4432 limitations).
4433
4434 We require that av->top always exists (i.e., has size >=
4435 MINSIZE) after initialization, so if it would otherwise be
4436 exhausted by current request, it is replenished. (The main
4437 reason for ensuring it exists is that we may need MINSIZE space
4438 to put in fenceposts in sysmalloc.)
4439 */
4440
4441 victim = av->top;
4442 size = chunksize (victim);
4443
4444 if (__glibc_unlikely (size > av->system_mem))
4445 malloc_printerr ("malloc(): corrupted top size");
4446
4447 if ((unsigned long) (size) >= (unsigned long) (nb + MINSIZE))
4448 {
4449 remainder_size = size - nb;
4450 remainder = chunk_at_offset (victim, nb);
4451 av->top = remainder;
4452 set_head (victim, nb | PREV_INUSE |
4453 (av != &main_arena ? NON_MAIN_ARENA : 0));
4454 set_head (remainder, remainder_size | PREV_INUSE);
4455
4456 check_malloced_chunk (av, victim, nb);
4457 void *p = chunk2mem (victim);
4458 alloc_perturb (p, bytes);
4459 return p;
4460 }
4461
4462 /* When we are using atomic ops to free fast chunks we can get
4463 here for all block sizes. */
4464 else if (atomic_load_relaxed (&av->have_fastchunks))
4465 {
4466 malloc_consolidate (av);
4467 /* restore original bin index */
4468 if (in_smallbin_range (nb))
4469 idx = smallbin_index (nb);
4470 else
4471 idx = largebin_index (nb);
4472 }
4473
4474 /*
4475 Otherwise, relay to handle system-dependent cases
4476 */
4477 else
4478 {
4479 void *p = sysmalloc (nb, av);
4480 if (p != NULL)
4481 alloc_perturb (p, bytes);
4482 return p;
4483 }
4484 }
4485 }
4486
4487 /*
4488 ------------------------------ free ------------------------------
4489 */
4490
4491 static void
4492 _int_free (mstate av, mchunkptr p, int have_lock)
4493 {
4494 INTERNAL_SIZE_T size; /* its size */
4495 mfastbinptr *fb; /* associated fastbin */
4496
4497 size = chunksize (p);
4498
4499 /* Little security check which won't hurt performance: the
4500 allocator never wraps around at the end of the address space.
4501 Therefore we can exclude some size values which might appear
4502 here by accident or by "design" from some intruder. */
4503 if (__builtin_expect ((uintptr_t) p > (uintptr_t) -size, 0)
4504 || __builtin_expect (misaligned_chunk (p), 0))
4505 malloc_printerr ("free(): invalid pointer");
4506 /* We know that each chunk is at least MINSIZE bytes in size or a
4507 multiple of MALLOC_ALIGNMENT. */
4508 if (__glibc_unlikely (size < MINSIZE || !aligned_OK (size)))
4509 malloc_printerr ("free(): invalid size");
4510
4511 check_inuse_chunk(av, p);
4512
4513 #if USE_TCACHE
4514 {
4515 size_t tc_idx = csize2tidx (size);
4516 if (tcache != NULL && tc_idx < mp_.tcache_bins)
4517 {
4518 /* Check to see if it's already in the tcache. */
4519 tcache_entry *e = (tcache_entry *) chunk2mem (p);
4520
4521 /* This test succeeds on double free. However, we don't 100%
4522 trust it (it also matches random payload data at a 1 in
4523 2^<size_t> chance), so verify it's not an unlikely
4524 coincidence before aborting. */
4525 if (__glibc_unlikely (e->key == tcache_key))
4526 {
4527 tcache_entry *tmp;
4528 size_t cnt = 0;
4529 LIBC_PROBE (memory_tcache_double_free, 2, e, tc_idx);
4530 for (tmp = tcache->entries[tc_idx];
4531 tmp;
4532 tmp = REVEAL_PTR (tmp->next), ++cnt)
4533 {
4534 if (cnt >= mp_.tcache_count)
4535 malloc_printerr ("free(): too many chunks detected in tcache");
4536 if (__glibc_unlikely (!aligned_OK (tmp)))
4537 malloc_printerr ("free(): unaligned chunk detected in tcache 2");
4538 if (tmp == e)
4539 malloc_printerr ("free(): double free detected in tcache 2");
4540 /* If we get here, it was a coincidence. We've wasted a
4541 few cycles, but don't abort. */
4542 }
4543 }
4544
4545 if (tcache->counts[tc_idx] < mp_.tcache_count)
4546 {
4547 tcache_put (p, tc_idx);
4548 return;
4549 }
4550 }
4551 }
4552 #endif
4553
4554 /*
4555 If eligible, place chunk on a fastbin so it can be found
4556 and used quickly in malloc.
4557 */
4558
4559 if ((unsigned long)(size) <= (unsigned long)(get_max_fast ())
4560
4561 #if TRIM_FASTBINS
4562 /*
4563 If TRIM_FASTBINS set, don't place chunks
4564 bordering top into fastbins
4565 */
4566 && (chunk_at_offset(p, size) != av->top)
4567 #endif
4568 ) {
4569
4570 if (__builtin_expect (chunksize_nomask (chunk_at_offset (p, size))
4571 <= CHUNK_HDR_SZ, 0)
4572 || __builtin_expect (chunksize (chunk_at_offset (p, size))
4573 >= av->system_mem, 0))
4574 {
4575 bool fail = true;
4576 /* We might not have a lock at this point and concurrent modifications
4577 of system_mem might result in a false positive. Redo the test after
4578 getting the lock. */
4579 if (!have_lock)
4580 {
4581 __libc_lock_lock (av->mutex);
4582 fail = (chunksize_nomask (chunk_at_offset (p, size)) <= CHUNK_HDR_SZ
4583 || chunksize (chunk_at_offset (p, size)) >= av->system_mem);
4584 __libc_lock_unlock (av->mutex);
4585 }
4586
4587 if (fail)
4588 malloc_printerr ("free(): invalid next size (fast)");
4589 }
4590
4591 free_perturb (chunk2mem(p), size - CHUNK_HDR_SZ);
4592
4593 atomic_store_relaxed (&av->have_fastchunks, true);
4594 unsigned int idx = fastbin_index(size);
4595 fb = &fastbin (av, idx);
4596
4597 /* Atomically link P to its fastbin: P->FD = *FB; *FB = P; */
4598 mchunkptr old = *fb, old2;
4599
4600 if (SINGLE_THREAD_P)
4601 {
4602 /* Check that the top of the bin is not the record we are going to
4603 add (i.e., double free). */
4604 if (__builtin_expect (old == p, 0))
4605 malloc_printerr ("double free or corruption (fasttop)");
4606 p->fd = PROTECT_PTR (&p->fd, old);
4607 *fb = p;
4608 }
4609 else
4610 do
4611 {
4612 /* Check that the top of the bin is not the record we are going to
4613 add (i.e., double free). */
4614 if (__builtin_expect (old == p, 0))
4615 malloc_printerr ("double free or corruption (fasttop)");
4616 old2 = old;
4617 p->fd = PROTECT_PTR (&p->fd, old);
4618 }
4619 while ((old = catomic_compare_and_exchange_val_rel (fb, p, old2))
4620 != old2);
4621
4622 /* Check that size of fastbin chunk at the top is the same as
4623 size of the chunk that we are adding. We can dereference OLD
4624 only if we have the lock, otherwise it might have already been
4625 allocated again. */
4626 if (have_lock && old != NULL
4627 && __builtin_expect (fastbin_index (chunksize (old)) != idx, 0))
4628 malloc_printerr ("invalid fastbin entry (free)");
4629 }
4630
4631 /*
4632 Consolidate other non-mmapped chunks as they arrive.
4633 */
4634
4635 else if (!chunk_is_mmapped(p)) {
4636
4637 /* If we're single-threaded, don't lock the arena. */
4638 if (SINGLE_THREAD_P)
4639 have_lock = true;
4640
4641 if (!have_lock)
4642 __libc_lock_lock (av->mutex);
4643
4644 _int_free_merge_chunk (av, p, size);
4645
4646 if (!have_lock)
4647 __libc_lock_unlock (av->mutex);
4648 }
4649 /*
4650 If the chunk was allocated via mmap, release via munmap().
4651 */
4652
4653 else {
4654 munmap_chunk (p);
4655 }
4656 }
4657
4658 /* Try to merge chunk P of SIZE bytes with its neighbors. Put the
4659 resulting chunk on the appropriate bin list. P must not be on a
4660 bin list yet, and it can be in use. */
4661 static void
4662 _int_free_merge_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T size)
4663 {
4664 mchunkptr nextchunk = chunk_at_offset(p, size);
4665
4666 /* Lightweight tests: check whether the block is already the
4667 top block. */
4668 if (__glibc_unlikely (p == av->top))
4669 malloc_printerr ("double free or corruption (top)");
4670 /* Or whether the next chunk is beyond the boundaries of the arena. */
4671 if (__builtin_expect (contiguous (av)
4672 && (char *) nextchunk
4673 >= ((char *) av->top + chunksize(av->top)), 0))
4674 malloc_printerr ("double free or corruption (out)");
4675 /* Or whether the block is actually not marked used. */
4676 if (__glibc_unlikely (!prev_inuse(nextchunk)))
4677 malloc_printerr ("double free or corruption (!prev)");
4678
4679 INTERNAL_SIZE_T nextsize = chunksize(nextchunk);
4680 if (__builtin_expect (chunksize_nomask (nextchunk) <= CHUNK_HDR_SZ, 0)
4681 || __builtin_expect (nextsize >= av->system_mem, 0))
4682 malloc_printerr ("free(): invalid next size (normal)");
4683
4684 free_perturb (chunk2mem(p), size - CHUNK_HDR_SZ);
4685
4686 /* Consolidate backward. */
4687 if (!prev_inuse(p))
4688 {
4689 INTERNAL_SIZE_T prevsize = prev_size (p);
4690 size += prevsize;
4691 p = chunk_at_offset(p, -((long) prevsize));
4692 if (__glibc_unlikely (chunksize(p) != prevsize))
4693 malloc_printerr ("corrupted size vs. prev_size while consolidating");
4694 unlink_chunk (av, p);
4695 }
4696
4697 /* Write the chunk header, maybe after merging with the following chunk. */
4698 size = _int_free_create_chunk (av, p, size, nextchunk, nextsize);
4699 _int_free_maybe_consolidate (av, size);
4700 }
4701
4702 /* Create a chunk at P of SIZE bytes, with SIZE potentially increased
4703 to cover the immediately following chunk NEXTCHUNK of NEXTSIZE
4704 bytes (if NEXTCHUNK is unused). The chunk at P is not actually
4705 read and does not have to be initialized. After creation, it is
4706 placed on the appropriate bin list. The function returns the size
4707 of the new chunk. */
4708 static INTERNAL_SIZE_T
4709 _int_free_create_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T size,
4710 mchunkptr nextchunk, INTERNAL_SIZE_T nextsize)
4711 {
4712 if (nextchunk != av->top)
4713 {
4714 /* get and clear inuse bit */
4715 bool nextinuse = inuse_bit_at_offset (nextchunk, nextsize);
4716
4717 /* consolidate forward */
4718 if (!nextinuse) {
4719 unlink_chunk (av, nextchunk);
4720 size += nextsize;
4721 } else
4722 clear_inuse_bit_at_offset(nextchunk, 0);
4723
4724 /*
4725 Place the chunk in unsorted chunk list. Chunks are
4726 not placed into regular bins until after they have
4727 been given one chance to be used in malloc.
4728 */
4729
4730 mchunkptr bck = unsorted_chunks (av);
4731 mchunkptr fwd = bck->fd;
4732 if (__glibc_unlikely (fwd->bk != bck))
4733 malloc_printerr ("free(): corrupted unsorted chunks");
4734 p->fd = fwd;
4735 p->bk = bck;
4736 if (!in_smallbin_range(size))
4737 {
4738 p->fd_nextsize = NULL;
4739 p->bk_nextsize = NULL;
4740 }
4741 bck->fd = p;
4742 fwd->bk = p;
4743
4744 set_head(p, size | PREV_INUSE);
4745 set_foot(p, size);
4746
4747 check_free_chunk(av, p);
4748 }
4749
4750 else
4751 {
4752 /* If the chunk borders the current high end of memory,
4753 consolidate into top. */
4754 size += nextsize;
4755 set_head(p, size | PREV_INUSE);
4756 av->top = p;
4757 check_chunk(av, p);
4758 }
4759
4760 return size;
4761 }
4762
4763 /* If freeing a large space, consolidate possibly-surrounding
4764 chunks. Then, if the total unused topmost memory exceeds trim
4765 threshold, ask malloc_trim to reduce top. */
4766 static void
4767 _int_free_maybe_consolidate (mstate av, INTERNAL_SIZE_T size)
4768 {
4769 /* Unless max_fast is 0, we don't know if there are fastbins
4770 bordering top, so we cannot tell for sure whether threshold has
4771 been reached unless fastbins are consolidated. But we don't want
4772 to consolidate on each free. As a compromise, consolidation is
4773 performed if FASTBIN_CONSOLIDATION_THRESHOLD is reached. */
4774 if (size >= FASTBIN_CONSOLIDATION_THRESHOLD)
4775 {
4776 if (atomic_load_relaxed (&av->have_fastchunks))
4777 malloc_consolidate(av);
4778
4779 if (av == &main_arena)
4780 {
4781 #ifndef MORECORE_CANNOT_TRIM
4782 if (chunksize (av->top) >= mp_.trim_threshold)
4783 systrim (mp_.top_pad, av);
4784 #endif
4785 }
4786 else
4787 {
4788 /* Always try heap_trim, even if the top chunk is not large,
4789 because the corresponding heap might go away. */
4790 heap_info *heap = heap_for_ptr (top (av));
4791
4792 assert (heap->ar_ptr == av);
4793 heap_trim (heap, mp_.top_pad);
4794 }
4795 }
4796 }
4797
4798 /*
4799 ------------------------- malloc_consolidate -------------------------
4800
4801 malloc_consolidate is a specialized version of free() that tears
4802 down chunks held in fastbins. Free itself cannot be used for this
4803 purpose since, among other things, it might place chunks back onto
4804 fastbins. So, instead, we need to use a minor variant of the same
4805 code.
4806 */
4807
4808 static void malloc_consolidate(mstate av)
4809 {
4810 mfastbinptr* fb; /* current fastbin being consolidated */
4811 mfastbinptr* maxfb; /* last fastbin (for loop control) */
4812 mchunkptr p; /* current chunk being consolidated */
4813 mchunkptr nextp; /* next chunk to consolidate */
4814 mchunkptr unsorted_bin; /* bin header */
4815 mchunkptr first_unsorted; /* chunk to link to */
4816
4817 /* These have same use as in free() */
4818 mchunkptr nextchunk;
4819 INTERNAL_SIZE_T size;
4820 INTERNAL_SIZE_T nextsize;
4821 INTERNAL_SIZE_T prevsize;
4822 int nextinuse;
4823
4824 atomic_store_relaxed (&av->have_fastchunks, false);
4825
4826 unsorted_bin = unsorted_chunks(av);
4827
4828 /*
4829 Remove each chunk from fast bin and consolidate it, placing it
4830 then in unsorted bin. Among other reasons for doing this,
4831 placing in unsorted bin avoids needing to calculate actual bins
4832 until malloc is sure that chunks aren't immediately going to be
4833 reused anyway.
4834 */
4835
4836 maxfb = &fastbin (av, NFASTBINS - 1);
4837 fb = &fastbin (av, 0);
4838 do {
4839 p = atomic_exchange_acquire (fb, NULL);
4840 if (p != 0) {
4841 do {
4842 {
4843 if (__glibc_unlikely (misaligned_chunk (p)))
4844 malloc_printerr ("malloc_consolidate(): "
4845 "unaligned fastbin chunk detected");
4846
4847 unsigned int idx = fastbin_index (chunksize (p));
4848 if ((&fastbin (av, idx)) != fb)
4849 malloc_printerr ("malloc_consolidate(): invalid chunk size");
4850 }
4851
4852 check_inuse_chunk(av, p);
4853 nextp = REVEAL_PTR (p->fd);
4854
4855 /* Slightly streamlined version of consolidation code in free() */
4856 size = chunksize (p);
4857 nextchunk = chunk_at_offset(p, size);
4858 nextsize = chunksize(nextchunk);
4859
4860 if (!prev_inuse(p)) {
4861 prevsize = prev_size (p);
4862 size += prevsize;
4863 p = chunk_at_offset(p, -((long) prevsize));
4864 if (__glibc_unlikely (chunksize(p) != prevsize))
4865 malloc_printerr ("corrupted size vs. prev_size in fastbins");
4866 unlink_chunk (av, p);
4867 }
4868
4869 if (nextchunk != av->top) {
4870 nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
4871
4872 if (!nextinuse) {
4873 size += nextsize;
4874 unlink_chunk (av, nextchunk);
4875 } else
4876 clear_inuse_bit_at_offset(nextchunk, 0);
4877
4878 first_unsorted = unsorted_bin->fd;
4879 unsorted_bin->fd = p;
4880 first_unsorted->bk = p;
4881
4882 if (!in_smallbin_range (size)) {
4883 p->fd_nextsize = NULL;
4884 p->bk_nextsize = NULL;
4885 }
4886
4887 set_head(p, size | PREV_INUSE);
4888 p->bk = unsorted_bin;
4889 p->fd = first_unsorted;
4890 set_foot(p, size);
4891 }
4892
4893 else {
4894 size += nextsize;
4895 set_head(p, size | PREV_INUSE);
4896 av->top = p;
4897 }
4898
4899 } while ( (p = nextp) != 0);
4900
4901 }
4902 } while (fb++ != maxfb);
4903 }
4904
4905 /*
4906 ------------------------------ realloc ------------------------------
4907 */
4908
4909 static void *
4910 _int_realloc (mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize,
4911 INTERNAL_SIZE_T nb)
4912 {
4913 mchunkptr newp; /* chunk to return */
4914 INTERNAL_SIZE_T newsize; /* its size */
4915 void* newmem; /* corresponding user mem */
4916
4917 mchunkptr next; /* next contiguous chunk after oldp */
4918
4919 mchunkptr remainder; /* extra space at end of newp */
4920 unsigned long remainder_size; /* its size */
4921
4922 /* oldmem size */
4923 if (__builtin_expect (chunksize_nomask (oldp) <= CHUNK_HDR_SZ, 0)
4924 || __builtin_expect (oldsize >= av->system_mem, 0)
4925 || __builtin_expect (oldsize != chunksize (oldp), 0))
4926 malloc_printerr ("realloc(): invalid old size");
4927
4928 check_inuse_chunk (av, oldp);
4929
4930 /* All callers already filter out mmap'ed chunks. */
4931 assert (!chunk_is_mmapped (oldp));
4932
4933 next = chunk_at_offset (oldp, oldsize);
4934 INTERNAL_SIZE_T nextsize = chunksize (next);
4935 if (__builtin_expect (chunksize_nomask (next) <= CHUNK_HDR_SZ, 0)
4936 || __builtin_expect (nextsize >= av->system_mem, 0))
4937 malloc_printerr ("realloc(): invalid next size");
4938
4939 if ((unsigned long) (oldsize) >= (unsigned long) (nb))
4940 {
4941 /* already big enough; split below */
4942 newp = oldp;
4943 newsize = oldsize;
4944 }
4945
4946 else
4947 {
4948 /* Try to expand forward into top */
4949 if (next == av->top &&
4950 (unsigned long) (newsize = oldsize + nextsize) >=
4951 (unsigned long) (nb + MINSIZE))
4952 {
4953 set_head_size (oldp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0));
4954 av->top = chunk_at_offset (oldp, nb);
4955 set_head (av->top, (newsize - nb) | PREV_INUSE);
4956 check_inuse_chunk (av, oldp);
4957 return tag_new_usable (chunk2mem (oldp));
4958 }
4959
4960 /* Try to expand forward into next chunk; split off remainder below */
4961 else if (next != av->top &&
4962 !inuse (next) &&
4963 (unsigned long) (newsize = oldsize + nextsize) >=
4964 (unsigned long) (nb))
4965 {
4966 newp = oldp;
4967 unlink_chunk (av, next);
4968 }
4969
4970 /* allocate, copy, free */
4971 else
4972 {
4973 newmem = _int_malloc (av, nb - MALLOC_ALIGN_MASK);
4974 if (newmem == 0)
4975 return 0; /* propagate failure */
4976
4977 newp = mem2chunk (newmem);
4978 newsize = chunksize (newp);
4979
4980 /*
4981 Avoid copy if newp is next chunk after oldp.
4982 */
4983 if (newp == next)
4984 {
4985 newsize += oldsize;
4986 newp = oldp;
4987 }
4988 else
4989 {
4990 void *oldmem = chunk2mem (oldp);
4991 size_t sz = memsize (oldp);
4992 (void) tag_region (oldmem, sz);
4993 newmem = tag_new_usable (newmem);
4994 memcpy (newmem, oldmem, sz);
4995 _int_free (av, oldp, 1);
4996 check_inuse_chunk (av, newp);
4997 return newmem;
4998 }
4999 }
5000 }
5001
5002 /* If possible, free extra space in old or extended chunk */
5003
5004 assert ((unsigned long) (newsize) >= (unsigned long) (nb));
5005
5006 remainder_size = newsize - nb;
5007
5008 if (remainder_size < MINSIZE) /* not enough extra to split off */
5009 {
5010 set_head_size (newp, newsize | (av != &main_arena ? NON_MAIN_ARENA : 0));
5011 set_inuse_bit_at_offset (newp, newsize);
5012 }
5013 else /* split remainder */
5014 {
5015 remainder = chunk_at_offset (newp, nb);
5016 /* Clear any user-space tags before writing the header. */
5017 remainder = tag_region (remainder, remainder_size);
5018 set_head_size (newp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0));
5019 set_head (remainder, remainder_size | PREV_INUSE |
5020 (av != &main_arena ? NON_MAIN_ARENA : 0));
5021 /* Mark remainder as inuse so free() won't complain */
5022 set_inuse_bit_at_offset (remainder, remainder_size);
5023 _int_free (av, remainder, 1);
5024 }
5025
5026 check_inuse_chunk (av, newp);
5027 return tag_new_usable (chunk2mem (newp));
5028 }
5029
5030 /*
5031 ------------------------------ memalign ------------------------------
5032 */
5033
5034 /* BYTES is user requested bytes, not requested chunksize bytes. */
5035 static void *
5036 _int_memalign (mstate av, size_t alignment, size_t bytes)
5037 {
5038 INTERNAL_SIZE_T nb; /* padded request size */
5039 char *m; /* memory returned by malloc call */
5040 mchunkptr p; /* corresponding chunk */
5041 char *brk; /* alignment point within p */
5042 mchunkptr newp; /* chunk to return */
5043 INTERNAL_SIZE_T newsize; /* its size */
5044 INTERNAL_SIZE_T leadsize; /* leading space before alignment point */
5045 mchunkptr remainder; /* spare room at end to split off */
5046 unsigned long remainder_size; /* its size */
5047 INTERNAL_SIZE_T size;
5048
5049 nb = checked_request2size (bytes);
5050 if (nb == 0)
5051 {
5052 __set_errno (ENOMEM);
5053 return NULL;
5054 }
5055
5056 /* We can't check tcache here because we hold the arena lock, which
5057 tcache doesn't expect. We expect it has been checked
5058 earlier. */
5059
5060 /* Strategy: search the bins looking for an existing block that
5061 meets our needs. We scan a range of bins from "exact size" to
5062 "just under 2x", spanning the small/large barrier if needed. If
5063 we don't find anything in those bins, the common malloc code will
5064 scan starting at 2x. */
5065
5066 /* Call malloc with worst case padding to hit alignment. */
5067 m = (char *) (_int_malloc (av, nb + alignment + MINSIZE));
5068
5069 if (m == 0)
5070 return 0; /* propagate failure */
5071
5072 p = mem2chunk (m);
5073
5074 if ((((unsigned long) (m)) % alignment) != 0) /* misaligned */
5075 {
5076 /* Find an aligned spot inside chunk. Since we need to give back
5077 leading space in a chunk of at least MINSIZE, if the first
5078 calculation places us at a spot with less than MINSIZE leader,
5079 we can move to the next aligned spot -- we've allocated enough
5080 total room so that this is always possible. */
5081 brk = (char *) mem2chunk (((unsigned long) (m + alignment - 1)) &
5082 - ((signed long) alignment));
5083 if ((unsigned long) (brk - (char *) (p)) < MINSIZE)
5084 brk += alignment;
5085
5086 newp = (mchunkptr) brk;
5087 leadsize = brk - (char *) (p);
5088 newsize = chunksize (p) - leadsize;
5089
5090 /* For mmapped chunks, just adjust offset */
5091 if (chunk_is_mmapped (p))
5092 {
5093 set_prev_size (newp, prev_size (p) + leadsize);
5094 set_head (newp, newsize | IS_MMAPPED);
5095 return chunk2mem (newp);
5096 }
5097
5098 /* Otherwise, give back leader, use the rest */
5099 set_head (newp, newsize | PREV_INUSE |
5100 (av != &main_arena ? NON_MAIN_ARENA : 0));
5101 set_inuse_bit_at_offset (newp, newsize);
5102 set_head_size (p, leadsize | (av != &main_arena ? NON_MAIN_ARENA : 0));
5103 _int_free_merge_chunk (av, p, leadsize);
5104 p = newp;
5105
5106 assert (newsize >= nb &&
5107 (((unsigned long) (chunk2mem (p))) % alignment) == 0);
5108 }
5109
5110 /* Also give back spare room at the end */
5111 if (!chunk_is_mmapped (p))
5112 {
5113 size = chunksize (p);
5114 mchunkptr nextchunk = chunk_at_offset(p, size);
5115 INTERNAL_SIZE_T nextsize = chunksize(nextchunk);
5116 if (size > nb)
5117 {
5118 remainder_size = size - nb;
5119 if (remainder_size >= MINSIZE
5120 || nextchunk == av->top
5121 || !inuse_bit_at_offset (nextchunk, nextsize))
5122 {
5123 /* We can only give back the tail if it is larger than
5124 MINSIZE, or if the following chunk is unused (top
5125 chunk or unused in-heap chunk). Otherwise we would
5126 create a chunk that is smaller than MINSIZE. */
5127 remainder = chunk_at_offset (p, nb);
5128 set_head_size (p, nb);
5129 remainder_size = _int_free_create_chunk (av, remainder,
5130 remainder_size,
5131 nextchunk, nextsize);
5132 _int_free_maybe_consolidate (av, remainder_size);
5133 }
5134 }
5135 }
5136
5137 check_inuse_chunk (av, p);
5138 return chunk2mem (p);
5139 }
5140
5141
5142 /*
5143 ------------------------------ malloc_trim ------------------------------
5144 */
5145
5146 static int
5147 mtrim (mstate av, size_t pad)
5148 {
5149 /* Ensure all blocks are consolidated. */
5150 malloc_consolidate (av);
5151
5152 const size_t ps = GLRO (dl_pagesize);
5153 int psindex = bin_index (ps);
5154 const size_t psm1 = ps - 1;
5155
5156 int result = 0;
5157 for (int i = 1; i < NBINS; ++i)
5158 if (i == 1 || i >= psindex)
5159 {
5160 mbinptr bin = bin_at (av, i);
5161
5162 for (mchunkptr p = last (bin); p != bin; p = p->bk)
5163 {
5164 INTERNAL_SIZE_T size = chunksize (p);
5165
5166 if (size > psm1 + sizeof (struct malloc_chunk))
5167 {
5168 /* See whether the chunk contains at least one unused page. */
5169 char *paligned_mem = (char *) (((uintptr_t) p
5170 + sizeof (struct malloc_chunk)
5171 + psm1) & ~psm1);
5172
5173 assert ((char *) chunk2mem (p) + 2 * CHUNK_HDR_SZ
5174 <= paligned_mem);
5175 assert ((char *) p + size > paligned_mem);
5176
5177 /* This is the size we could potentially free. */
5178 size -= paligned_mem - (char *) p;
5179
5180 if (size > psm1)
5181 {
5182 #if MALLOC_DEBUG
5183 /* When debugging we simulate destroying the memory
5184 content. */
5185 memset (paligned_mem, 0x89, size & ~psm1);
5186 #endif
5187 __madvise (paligned_mem, size & ~psm1, MADV_DONTNEED);
5188
5189 result = 1;
5190 }
5191 }
5192 }
5193 }
5194
5195 #ifndef MORECORE_CANNOT_TRIM
5196 return result | (av == &main_arena ? systrim (pad, av) : 0);
5197
5198 #else
5199 return result;
5200 #endif
5201 }
5202
5203
5204 int
5205 __malloc_trim (size_t s)
5206 {
5207 int result = 0;
5208
5209 if (!__malloc_initialized)
5210 ptmalloc_init ();
5211
5212 mstate ar_ptr = &main_arena;
5213 do
5214 {
5215 __libc_lock_lock (ar_ptr->mutex);
5216 result |= mtrim (ar_ptr, s);
5217 __libc_lock_unlock (ar_ptr->mutex);
5218
5219 ar_ptr = ar_ptr->next;
5220 }
5221 while (ar_ptr != &main_arena);
5222
5223 return result;
5224 }
5225
5226
5227 /*
5228 ------------------------- malloc_usable_size -------------------------
5229 */
5230
5231 static size_t
5232 musable (void *mem)
5233 {
5234 mchunkptr p = mem2chunk (mem);
5235
5236 if (chunk_is_mmapped (p))
5237 return chunksize (p) - CHUNK_HDR_SZ;
5238 else if (inuse (p))
5239 return memsize (p);
5240
5241 return 0;
5242 }
5243
5244 #if IS_IN (libc)
5245 size_t
5246 __malloc_usable_size (void *m)
5247 {
5248 if (m == NULL)
5249 return 0;
5250 return musable (m);
5251 }
5252 #endif
5253
5254 /*
5255 ------------------------------ mallinfo ------------------------------
5256 Accumulate malloc statistics for arena AV into M.
5257 */
5258 static void
5259 int_mallinfo (mstate av, struct mallinfo2 *m)
5260 {
5261 size_t i;
5262 mbinptr b;
5263 mchunkptr p;
5264 INTERNAL_SIZE_T avail;
5265 INTERNAL_SIZE_T fastavail;
5266 int nblocks;
5267 int nfastblocks;
5268
5269 check_malloc_state (av);
5270
5271 /* Account for top */
5272 avail = chunksize (av->top);
5273 nblocks = 1; /* top always exists */
5274
5275 /* traverse fastbins */
5276 nfastblocks = 0;
5277 fastavail = 0;
5278
5279 for (i = 0; i < NFASTBINS; ++i)
5280 {
5281 for (p = fastbin (av, i);
5282 p != 0;
5283 p = REVEAL_PTR (p->fd))
5284 {
5285 if (__glibc_unlikely (misaligned_chunk (p)))
5286 malloc_printerr ("int_mallinfo(): "
5287 "unaligned fastbin chunk detected");
5288 ++nfastblocks;
5289 fastavail += chunksize (p);
5290 }
5291 }
5292
5293 avail += fastavail;
5294
5295 /* traverse regular bins */
5296 for (i = 1; i < NBINS; ++i)
5297 {
5298 b = bin_at (av, i);
5299 for (p = last (b); p != b; p = p->bk)
5300 {
5301 ++nblocks;
5302 avail += chunksize (p);
5303 }
5304 }
5305
5306 m->smblks += nfastblocks;
5307 m->ordblks += nblocks;
5308 m->fordblks += avail;
5309 m->uordblks += av->system_mem - avail;
5310 m->arena += av->system_mem;
5311 m->fsmblks += fastavail;
5312 if (av == &main_arena)
5313 {
5314 m->hblks = mp_.n_mmaps;
5315 m->hblkhd = mp_.mmapped_mem;
5316 m->usmblks = 0;
5317 m->keepcost = chunksize (av->top);
5318 }
5319 }
5320
5321
5322 struct mallinfo2
5323 __libc_mallinfo2 (void)
5324 {
5325 struct mallinfo2 m;
5326 mstate ar_ptr;
5327
5328 if (!__malloc_initialized)
5329 ptmalloc_init ();
5330
5331 memset (&m, 0, sizeof (m));
5332 ar_ptr = &main_arena;
5333 do
5334 {
5335 __libc_lock_lock (ar_ptr->mutex);
5336 int_mallinfo (ar_ptr, &m);
5337 __libc_lock_unlock (ar_ptr->mutex);
5338
5339 ar_ptr = ar_ptr->next;
5340 }
5341 while (ar_ptr != &main_arena);
5342
5343 return m;
5344 }
5345 libc_hidden_def (__libc_mallinfo2)
5346
5347 struct mallinfo
5348 __libc_mallinfo (void)
5349 {
5350 struct mallinfo m;
5351 struct mallinfo2 m2 = __libc_mallinfo2 ();
5352
5353 m.arena = m2.arena;
5354 m.ordblks = m2.ordblks;
5355 m.smblks = m2.smblks;
5356 m.hblks = m2.hblks;
5357 m.hblkhd = m2.hblkhd;
5358 m.usmblks = m2.usmblks;
5359 m.fsmblks = m2.fsmblks;
5360 m.uordblks = m2.uordblks;
5361 m.fordblks = m2.fordblks;
5362 m.keepcost = m2.keepcost;
5363
5364 return m;
5365 }
5366
5367
5368 /*
5369 ------------------------------ malloc_stats ------------------------------
5370 */
5371
5372 void
5373 __malloc_stats (void)
5374 {
5375 int i;
5376 mstate ar_ptr;
5377 unsigned int in_use_b = mp_.mmapped_mem, system_b = in_use_b;
5378
5379 if (!__malloc_initialized)
5380 ptmalloc_init ();
5381 _IO_flockfile (stderr);
5382 int old_flags2 = stderr->_flags2;
5383 stderr->_flags2 |= _IO_FLAGS2_NOTCANCEL;
5384 for (i = 0, ar_ptr = &main_arena;; i++)
5385 {
5386 struct mallinfo2 mi;
5387
5388 memset (&mi, 0, sizeof (mi));
5389 __libc_lock_lock (ar_ptr->mutex);
5390 int_mallinfo (ar_ptr, &mi);
5391 fprintf (stderr, "Arena %d:\n", i);
5392 fprintf (stderr, "system bytes = %10u\n", (unsigned int) mi.arena);
5393 fprintf (stderr, "in use bytes = %10u\n", (unsigned int) mi.uordblks);
5394 #if MALLOC_DEBUG > 1
5395 if (i > 0)
5396 dump_heap (heap_for_ptr (top (ar_ptr)));
5397 #endif
5398 system_b += mi.arena;
5399 in_use_b += mi.uordblks;
5400 __libc_lock_unlock (ar_ptr->mutex);
5401 ar_ptr = ar_ptr->next;
5402 if (ar_ptr == &main_arena)
5403 break;
5404 }
5405 fprintf (stderr, "Total (incl. mmap):\n");
5406 fprintf (stderr, "system bytes = %10u\n", system_b);
5407 fprintf (stderr, "in use bytes = %10u\n", in_use_b);
5408 fprintf (stderr, "max mmap regions = %10u\n", (unsigned int) mp_.max_n_mmaps);
5409 fprintf (stderr, "max mmap bytes = %10lu\n",
5410 (unsigned long) mp_.max_mmapped_mem);
5411 stderr->_flags2 = old_flags2;
5412 _IO_funlockfile (stderr);
5413 }
5414
5415
5416 /*
5417 ------------------------------ mallopt ------------------------------
5418 */
5419 static __always_inline int
5420 do_set_trim_threshold (size_t value)
5421 {
5422 LIBC_PROBE (memory_mallopt_trim_threshold, 3, value, mp_.trim_threshold,
5423 mp_.no_dyn_threshold);
5424 mp_.trim_threshold = value;
5425 mp_.no_dyn_threshold = 1;
5426 return 1;
5427 }
5428
5429 static __always_inline int
5430 do_set_top_pad (size_t value)
5431 {
5432 LIBC_PROBE (memory_mallopt_top_pad, 3, value, mp_.top_pad,
5433 mp_.no_dyn_threshold);
5434 mp_.top_pad = value;
5435 mp_.no_dyn_threshold = 1;
5436 return 1;
5437 }
5438
5439 static __always_inline int
5440 do_set_mmap_threshold (size_t value)
5441 {
5442 LIBC_PROBE (memory_mallopt_mmap_threshold, 3, value, mp_.mmap_threshold,
5443 mp_.no_dyn_threshold);
5444 mp_.mmap_threshold = value;
5445 mp_.no_dyn_threshold = 1;
5446 return 1;
5447 }
5448
5449 static __always_inline int
5450 do_set_mmaps_max (int32_t value)
5451 {
5452 LIBC_PROBE (memory_mallopt_mmap_max, 3, value, mp_.n_mmaps_max,
5453 mp_.no_dyn_threshold);
5454 mp_.n_mmaps_max = value;
5455 mp_.no_dyn_threshold = 1;
5456 return 1;
5457 }
5458
5459 static __always_inline int
5460 do_set_mallopt_check (int32_t value)
5461 {
5462 return 1;
5463 }
5464
5465 static __always_inline int
5466 do_set_perturb_byte (int32_t value)
5467 {
5468 LIBC_PROBE (memory_mallopt_perturb, 2, value, perturb_byte);
5469 perturb_byte = value;
5470 return 1;
5471 }
5472
5473 static __always_inline int
5474 do_set_arena_test (size_t value)
5475 {
5476 LIBC_PROBE (memory_mallopt_arena_test, 2, value, mp_.arena_test);
5477 mp_.arena_test = value;
5478 return 1;
5479 }
5480
5481 static __always_inline int
5482 do_set_arena_max (size_t value)
5483 {
5484 LIBC_PROBE (memory_mallopt_arena_max, 2, value, mp_.arena_max);
5485 mp_.arena_max = value;
5486 return 1;
5487 }
5488
5489 #if USE_TCACHE
5490 static __always_inline int
5491 do_set_tcache_max (size_t value)
5492 {
5493 if (value <= MAX_TCACHE_SIZE)
5494 {
5495 LIBC_PROBE (memory_tunable_tcache_max_bytes, 2, value, mp_.tcache_max_bytes);
5496 mp_.tcache_max_bytes = value;
5497 mp_.tcache_bins = csize2tidx (request2size(value)) + 1;
5498 return 1;
5499 }
5500 return 0;
5501 }
5502
5503 static __always_inline int
5504 do_set_tcache_count (size_t value)
5505 {
5506 if (value <= MAX_TCACHE_COUNT)
5507 {
5508 LIBC_PROBE (memory_tunable_tcache_count, 2, value, mp_.tcache_count);
5509 mp_.tcache_count = value;
5510 return 1;
5511 }
5512 return 0;
5513 }
5514
5515 static __always_inline int
5516 do_set_tcache_unsorted_limit (size_t value)
5517 {
5518 LIBC_PROBE (memory_tunable_tcache_unsorted_limit, 2, value, mp_.tcache_unsorted_limit);
5519 mp_.tcache_unsorted_limit = value;
5520 return 1;
5521 }
5522 #endif
5523
5524 static __always_inline int
5525 do_set_mxfast (size_t value)
5526 {
5527 if (value <= MAX_FAST_SIZE)
5528 {
5529 LIBC_PROBE (memory_mallopt_mxfast, 2, value, get_max_fast ());
5530 set_max_fast (value);
5531 return 1;
5532 }
5533 return 0;
5534 }
5535
5536 static __always_inline int
5537 do_set_hugetlb (size_t value)
5538 {
5539 if (value == 1)
5540 {
5541 enum malloc_thp_mode_t thp_mode = __malloc_thp_mode ();
5542 /*
5543 Only enable THP madvise usage if system does support it and
5544 has 'madvise' mode. Otherwise the madvise() call is wasteful.
5545 */
5546 if (thp_mode == malloc_thp_mode_madvise)
5547 mp_.thp_pagesize = __malloc_default_thp_pagesize ();
5548 }
5549 else if (value >= 2)
5550 __malloc_hugepage_config (value == 2 ? 0 : value, &mp_.hp_pagesize,
5551 &mp_.hp_flags);
5552 return 0;
5553 }
5554
5555 int
5556 __libc_mallopt (int param_number, int value)
5557 {
5558 mstate av = &main_arena;
5559 int res = 1;
5560
5561 if (!__malloc_initialized)
5562 ptmalloc_init ();
5563 __libc_lock_lock (av->mutex);
5564
5565 LIBC_PROBE (memory_mallopt, 2, param_number, value);
5566
5567 /* We must consolidate main arena before changing max_fast
5568 (see definition of set_max_fast). */
5569 malloc_consolidate (av);
5570
5571 /* Many of these helper functions take a size_t. We do not worry
5572 about overflow here, because negative int values will wrap to
5573 very large size_t values and the helpers have sufficient range
5574 checking for such conversions. Many of these helpers are also
5575 used by the tunables macros in arena.c. */
5576
5577 switch (param_number)
5578 {
5579 case M_MXFAST:
5580 res = do_set_mxfast (value);
5581 break;
5582
5583 case M_TRIM_THRESHOLD:
5584 res = do_set_trim_threshold (value);
5585 break;
5586
5587 case M_TOP_PAD:
5588 res = do_set_top_pad (value);
5589 break;
5590
5591 case M_MMAP_THRESHOLD:
5592 res = do_set_mmap_threshold (value);
5593 break;
5594
5595 case M_MMAP_MAX:
5596 res = do_set_mmaps_max (value);
5597 break;
5598
5599 case M_CHECK_ACTION:
5600 res = do_set_mallopt_check (value);
5601 break;
5602
5603 case M_PERTURB:
5604 res = do_set_perturb_byte (value);
5605 break;
5606
5607 case M_ARENA_TEST:
5608 if (value > 0)
5609 res = do_set_arena_test (value);
5610 break;
5611
5612 case M_ARENA_MAX:
5613 if (value > 0)
5614 res = do_set_arena_max (value);
5615 break;
5616 }
5617 __libc_lock_unlock (av->mutex);
5618 return res;
5619 }
5620 libc_hidden_def (__libc_mallopt)
5621
5622
5623 /*
5624 -------------------- Alternative MORECORE functions --------------------
5625 */
5626
5627
5628 /*
5629 General Requirements for MORECORE.
5630
5631 The MORECORE function must have the following properties:
5632
5633 If MORECORE_CONTIGUOUS is false:
5634
5635 * MORECORE must allocate in multiples of pagesize. It will
5636 only be called with arguments that are multiples of pagesize.
5637
5638 * MORECORE(0) must return an address that is at least
5639 MALLOC_ALIGNMENT aligned. (Page-aligning always suffices.)
5640
5641 else (i.e. If MORECORE_CONTIGUOUS is true):
5642
5643 * Consecutive calls to MORECORE with positive arguments
5644 return increasing addresses, indicating that space has been
5645 contiguously extended.
5646
5647 * MORECORE need not allocate in multiples of pagesize.
5648 Calls to MORECORE need not have args of multiples of pagesize.
5649
5650 * MORECORE need not page-align.
5651
5652 In either case:
5653
5654 * MORECORE may allocate more memory than requested. (Or even less,
5655 but this will generally result in a malloc failure.)
5656
5657 * MORECORE must not allocate memory when given argument zero, but
5658 instead return one past the end address of memory from previous
5659 nonzero call. This malloc does NOT call MORECORE(0)
5660 until at least one call with positive arguments is made, so
5661 the initial value returned is not important.
5662
5663 * Even though consecutive calls to MORECORE need not return contiguous
5664 addresses, it must be OK for malloc'ed chunks to span multiple
5665 regions in those cases where they do happen to be contiguous.
5666
5667 * MORECORE need not handle negative arguments -- it may instead
5668 just return MORECORE_FAILURE when given negative arguments.
5669 Negative arguments are always multiples of pagesize. MORECORE
5670 must not misinterpret negative args as large positive unsigned
5671 args. You can suppress all such calls from even occurring by defining
5672 MORECORE_CANNOT_TRIM,
5673
5674 There is some variation across systems about the type of the
5675 argument to sbrk/MORECORE. If size_t is unsigned, then it cannot
5676 actually be size_t, because sbrk supports negative args, so it is
5677 normally the signed type of the same width as size_t (sometimes
5678 declared as "intptr_t", and sometimes "ptrdiff_t"). It doesn't much
5679 matter though. Internally, we use "long" as arguments, which should
5680 work across all reasonable possibilities.
5681
5682 Additionally, if MORECORE ever returns failure for a positive
5683 request, then mmap is used as a noncontiguous system allocator. This
5684 is a useful backup strategy for systems with holes in address spaces
5685 -- in this case sbrk cannot contiguously expand the heap, but mmap
5686 may be able to map noncontiguous space.
5687
5688 If you'd like mmap to ALWAYS be used, you can define MORECORE to be
5689 a function that always returns MORECORE_FAILURE.
5690
5691 If you are using this malloc with something other than sbrk (or its
5692 emulation) to supply memory regions, you probably want to set
5693 MORECORE_CONTIGUOUS as false. As an example, here is a custom
5694 allocator kindly contributed for pre-OSX macOS. It uses virtually
5695 but not necessarily physically contiguous non-paged memory (locked
5696 in, present and won't get swapped out). You can use it by
5697 uncommenting this section, adding some #includes, and setting up the
5698 appropriate defines above:
5699
5700 *#define MORECORE osMoreCore
5701 *#define MORECORE_CONTIGUOUS 0
5702
5703 There is also a shutdown routine that should somehow be called for
5704 cleanup upon program exit.
5705
5706 *#define MAX_POOL_ENTRIES 100
5707 *#define MINIMUM_MORECORE_SIZE (64 * 1024)
5708 static int next_os_pool;
5709 void *our_os_pools[MAX_POOL_ENTRIES];
5710
5711 void *osMoreCore(int size)
5712 {
5713 void *ptr = 0;
5714 static void *sbrk_top = 0;
5715
5716 if (size > 0)
5717 {
5718 if (size < MINIMUM_MORECORE_SIZE)
5719 size = MINIMUM_MORECORE_SIZE;
5720 if (CurrentExecutionLevel() == kTaskLevel)
5721 ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
5722 if (ptr == 0)
5723 {
5724 return (void *) MORECORE_FAILURE;
5725 }
5726 // save ptrs so they can be freed during cleanup
5727 our_os_pools[next_os_pool] = ptr;
5728 next_os_pool++;
5729 ptr = (void *) ((((unsigned long) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);
5730 sbrk_top = (char *) ptr + size;
5731 return ptr;
5732 }
5733 else if (size < 0)
5734 {
5735 // we don't currently support shrink behavior
5736 return (void *) MORECORE_FAILURE;
5737 }
5738 else
5739 {
5740 return sbrk_top;
5741 }
5742 }
5743
5744 // cleanup any allocated memory pools
5745 // called as last thing before shutting down driver
5746
5747 void osCleanupMem(void)
5748 {
5749 void **ptr;
5750
5751 for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)
5752 if (*ptr)
5753 {
5754 PoolDeallocate(*ptr);
5755 * ptr = 0;
5756 }
5757 }
5758
5759 */
5760
5761
5762 /* Helper code. */
5763
5764 extern char **__libc_argv attribute_hidden;
5765
5766 static void
5767 malloc_printerr (const char *str)
5768 {
5769 #if IS_IN (libc)
5770 __libc_message ("%s\n", str);
5771 #else
5772 __libc_fatal (str);
5773 #endif
5774 __builtin_unreachable ();
5775 }
5776
5777 #if IS_IN (libc)
5778 /* We need a wrapper function for one of the additions of POSIX. */
5779 int
5780 __posix_memalign (void **memptr, size_t alignment, size_t size)
5781 {
5782 void *mem;
5783
5784 if (!__malloc_initialized)
5785 ptmalloc_init ();
5786
5787 /* Test whether the SIZE argument is valid. It must be a power of
5788 two multiple of sizeof (void *). */
5789 if (alignment % sizeof (void *) != 0
5790 || !powerof2 (alignment / sizeof (void *))
5791 || alignment == 0)
5792 return EINVAL;
5793
5794
5795 void *address = RETURN_ADDRESS (0);
5796 mem = _mid_memalign (alignment, size, address);
5797
5798 if (mem != NULL)
5799 {
5800 *memptr = mem;
5801 return 0;
5802 }
5803
5804 return ENOMEM;
5805 }
5806 weak_alias (__posix_memalign, posix_memalign)
5807 #endif
5808
5809
5810 int
5811 __malloc_info (int options, FILE *fp)
5812 {
5813 /* For now, at least. */
5814 if (options != 0)
5815 return EINVAL;
5816
5817 int n = 0;
5818 size_t total_nblocks = 0;
5819 size_t total_nfastblocks = 0;
5820 size_t total_avail = 0;
5821 size_t total_fastavail = 0;
5822 size_t total_system = 0;
5823 size_t total_max_system = 0;
5824 size_t total_aspace = 0;
5825 size_t total_aspace_mprotect = 0;
5826
5827
5828
5829 if (!__malloc_initialized)
5830 ptmalloc_init ();
5831
5832 fputs ("<malloc version=\"1\">\n", fp);
5833
5834 /* Iterate over all arenas currently in use. */
5835 mstate ar_ptr = &main_arena;
5836 do
5837 {
5838 fprintf (fp, "<heap nr=\"%d\">\n<sizes>\n", n++);
5839
5840 size_t nblocks = 0;
5841 size_t nfastblocks = 0;
5842 size_t avail = 0;
5843 size_t fastavail = 0;
5844 struct
5845 {
5846 size_t from;
5847 size_t to;
5848 size_t total;
5849 size_t count;
5850 } sizes[NFASTBINS + NBINS - 1];
5851 #define nsizes (sizeof (sizes) / sizeof (sizes[0]))
5852
5853 __libc_lock_lock (ar_ptr->mutex);
5854
5855 /* Account for top chunk. The top-most available chunk is
5856 treated specially and is never in any bin. See "initial_top"
5857 comments. */
5858 avail = chunksize (ar_ptr->top);
5859 nblocks = 1; /* Top always exists. */
5860
5861 for (size_t i = 0; i < NFASTBINS; ++i)
5862 {
5863 mchunkptr p = fastbin (ar_ptr, i);
5864 if (p != NULL)
5865 {
5866 size_t nthissize = 0;
5867 size_t thissize = chunksize (p);
5868
5869 while (p != NULL)
5870 {
5871 if (__glibc_unlikely (misaligned_chunk (p)))
5872 malloc_printerr ("__malloc_info(): "
5873 "unaligned fastbin chunk detected");
5874 ++nthissize;
5875 p = REVEAL_PTR (p->fd);
5876 }
5877
5878 fastavail += nthissize * thissize;
5879 nfastblocks += nthissize;
5880 sizes[i].from = thissize - (MALLOC_ALIGNMENT - 1);
5881 sizes[i].to = thissize;
5882 sizes[i].count = nthissize;
5883 }
5884 else
5885 sizes[i].from = sizes[i].to = sizes[i].count = 0;
5886
5887 sizes[i].total = sizes[i].count * sizes[i].to;
5888 }
5889
5890
5891 mbinptr bin;
5892 struct malloc_chunk *r;
5893
5894 for (size_t i = 1; i < NBINS; ++i)
5895 {
5896 bin = bin_at (ar_ptr, i);
5897 r = bin->fd;
5898 sizes[NFASTBINS - 1 + i].from = ~((size_t) 0);
5899 sizes[NFASTBINS - 1 + i].to = sizes[NFASTBINS - 1 + i].total
5900 = sizes[NFASTBINS - 1 + i].count = 0;
5901
5902 if (r != NULL)
5903 while (r != bin)
5904 {
5905 size_t r_size = chunksize_nomask (r);
5906 ++sizes[NFASTBINS - 1 + i].count;
5907 sizes[NFASTBINS - 1 + i].total += r_size;
5908 sizes[NFASTBINS - 1 + i].from
5909 = MIN (sizes[NFASTBINS - 1 + i].from, r_size);
5910 sizes[NFASTBINS - 1 + i].to = MAX (sizes[NFASTBINS - 1 + i].to,
5911 r_size);
5912
5913 r = r->fd;
5914 }
5915
5916 if (sizes[NFASTBINS - 1 + i].count == 0)
5917 sizes[NFASTBINS - 1 + i].from = 0;
5918 nblocks += sizes[NFASTBINS - 1 + i].count;
5919 avail += sizes[NFASTBINS - 1 + i].total;
5920 }
5921
5922 size_t heap_size = 0;
5923 size_t heap_mprotect_size = 0;
5924 size_t heap_count = 0;
5925 if (ar_ptr != &main_arena)
5926 {
5927 /* Iterate over the arena heaps from back to front. */
5928 heap_info *heap = heap_for_ptr (top (ar_ptr));
5929 do
5930 {
5931 heap_size += heap->size;
5932 heap_mprotect_size += heap->mprotect_size;
5933 heap = heap->prev;
5934 ++heap_count;
5935 }
5936 while (heap != NULL);
5937 }
5938
5939 __libc_lock_unlock (ar_ptr->mutex);
5940
5941 total_nfastblocks += nfastblocks;
5942 total_fastavail += fastavail;
5943
5944 total_nblocks += nblocks;
5945 total_avail += avail;
5946
5947 for (size_t i = 0; i < nsizes; ++i)
5948 if (sizes[i].count != 0 && i != NFASTBINS)
5949 fprintf (fp, "\
5950 <size from=\"%zu\" to=\"%zu\" total=\"%zu\" count=\"%zu\"/>\n",
5951 sizes[i].from, sizes[i].to, sizes[i].total, sizes[i].count);
5952
5953 if (sizes[NFASTBINS].count != 0)
5954 fprintf (fp, "\
5955 <unsorted from=\"%zu\" to=\"%zu\" total=\"%zu\" count=\"%zu\"/>\n",
5956 sizes[NFASTBINS].from, sizes[NFASTBINS].to,
5957 sizes[NFASTBINS].total, sizes[NFASTBINS].count);
5958
5959 total_system += ar_ptr->system_mem;
5960 total_max_system += ar_ptr->max_system_mem;
5961
5962 fprintf (fp,
5963 "</sizes>\n<total type=\"fast\" count=\"%zu\" size=\"%zu\"/>\n"
5964 "<total type=\"rest\" count=\"%zu\" size=\"%zu\"/>\n"
5965 "<system type=\"current\" size=\"%zu\"/>\n"
5966 "<system type=\"max\" size=\"%zu\"/>\n",
5967 nfastblocks, fastavail, nblocks, avail,
5968 ar_ptr->system_mem, ar_ptr->max_system_mem);
5969
5970 if (ar_ptr != &main_arena)
5971 {
5972 fprintf (fp,
5973 "<aspace type=\"total\" size=\"%zu\"/>\n"
5974 "<aspace type=\"mprotect\" size=\"%zu\"/>\n"
5975 "<aspace type=\"subheaps\" size=\"%zu\"/>\n",
5976 heap_size, heap_mprotect_size, heap_count);
5977 total_aspace += heap_size;
5978 total_aspace_mprotect += heap_mprotect_size;
5979 }
5980 else
5981 {
5982 fprintf (fp,
5983 "<aspace type=\"total\" size=\"%zu\"/>\n"
5984 "<aspace type=\"mprotect\" size=\"%zu\"/>\n",
5985 ar_ptr->system_mem, ar_ptr->system_mem);
5986 total_aspace += ar_ptr->system_mem;
5987 total_aspace_mprotect += ar_ptr->system_mem;
5988 }
5989
5990 fputs ("</heap>\n", fp);
5991 ar_ptr = ar_ptr->next;
5992 }
5993 while (ar_ptr != &main_arena);
5994
5995 fprintf (fp,
5996 "<total type=\"fast\" count=\"%zu\" size=\"%zu\"/>\n"
5997 "<total type=\"rest\" count=\"%zu\" size=\"%zu\"/>\n"
5998 "<total type=\"mmap\" count=\"%d\" size=\"%zu\"/>\n"
5999 "<system type=\"current\" size=\"%zu\"/>\n"
6000 "<system type=\"max\" size=\"%zu\"/>\n"
6001 "<aspace type=\"total\" size=\"%zu\"/>\n"
6002 "<aspace type=\"mprotect\" size=\"%zu\"/>\n"
6003 "</malloc>\n",
6004 total_nfastblocks, total_fastavail, total_nblocks, total_avail,
6005 mp_.n_mmaps, mp_.mmapped_mem,
6006 total_system, total_max_system,
6007 total_aspace, total_aspace_mprotect);
6008
6009 return 0;
6010 }
6011 #if IS_IN (libc)
6012 weak_alias (__malloc_info, malloc_info)
6013
6014 strong_alias (__libc_calloc, __calloc) weak_alias (__libc_calloc, calloc)
6015 strong_alias (__libc_free, __free) strong_alias (__libc_free, free)
6016 strong_alias (__libc_malloc, __malloc) strong_alias (__libc_malloc, malloc)
6017 strong_alias (__libc_memalign, __memalign)
6018 weak_alias (__libc_memalign, memalign)
6019 strong_alias (__libc_realloc, __realloc) strong_alias (__libc_realloc, realloc)
6020 strong_alias (__libc_valloc, __valloc) weak_alias (__libc_valloc, valloc)
6021 strong_alias (__libc_pvalloc, __pvalloc) weak_alias (__libc_pvalloc, pvalloc)
6022 strong_alias (__libc_mallinfo, __mallinfo)
6023 weak_alias (__libc_mallinfo, mallinfo)
6024 strong_alias (__libc_mallinfo2, __mallinfo2)
6025 weak_alias (__libc_mallinfo2, mallinfo2)
6026 strong_alias (__libc_mallopt, __mallopt) weak_alias (__libc_mallopt, mallopt)
6027
6028 weak_alias (__malloc_stats, malloc_stats)
6029 weak_alias (__malloc_usable_size, malloc_usable_size)
6030 weak_alias (__malloc_trim, malloc_trim)
6031 #endif
6032
6033 #if SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_26)
6034 compat_symbol (libc, __libc_free, cfree, GLIBC_2_0);
6035 #endif
6036
6037 /* ------------------------------------------------------------
6038 History:
6039
6040 [see ftp://g.oswego.edu/pub/misc/malloc.c for the history of dlmalloc]
6041
6042 */
6043 /*
6044 * Local variables:
6045 * c-basic-offset: 2
6046 * End:
6047 */