]> git.ipfire.org Git - thirdparty/glibc.git/blob - malloc/malloc.c
3b151f44f73fa569c303777150c185dd6d89e976
[thirdparty/glibc.git] / malloc / malloc.c
1 /* Malloc implementation for multiple threads without lock contention.
2 Copyright (C) 1996-2020 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Wolfram Gloger <wg@malloc.de>
5 and Doug Lea <dl@cs.oswego.edu>, 2001.
6
7 The GNU C Library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public License as
9 published by the Free Software Foundation; either version 2.1 of the
10 License, or (at your option) any later version.
11
12 The GNU C Library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public
18 License along with the GNU C Library; see the file COPYING.LIB. If
19 not, see <https://www.gnu.org/licenses/>. */
20
21 /*
22 This is a version (aka ptmalloc2) of malloc/free/realloc written by
23 Doug Lea and adapted to multiple threads/arenas by Wolfram Gloger.
24
25 There have been substantial changes made after the integration into
26 glibc in all parts of the code. Do not look for much commonality
27 with the ptmalloc2 version.
28
29 * Version ptmalloc2-20011215
30 based on:
31 VERSION 2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee)
32
33 * Quickstart
34
35 In order to compile this implementation, a Makefile is provided with
36 the ptmalloc2 distribution, which has pre-defined targets for some
37 popular systems (e.g. "make posix" for Posix threads). All that is
38 typically required with regard to compiler flags is the selection of
39 the thread package via defining one out of USE_PTHREADS, USE_THR or
40 USE_SPROC. Check the thread-m.h file for what effects this has.
41 Many/most systems will additionally require USE_TSD_DATA_HACK to be
42 defined, so this is the default for "make posix".
43
44 * Why use this malloc?
45
46 This is not the fastest, most space-conserving, most portable, or
47 most tunable malloc ever written. However it is among the fastest
48 while also being among the most space-conserving, portable and tunable.
49 Consistent balance across these factors results in a good general-purpose
50 allocator for malloc-intensive programs.
51
52 The main properties of the algorithms are:
53 * For large (>= 512 bytes) requests, it is a pure best-fit allocator,
54 with ties normally decided via FIFO (i.e. least recently used).
55 * For small (<= 64 bytes by default) requests, it is a caching
56 allocator, that maintains pools of quickly recycled chunks.
57 * In between, and for combinations of large and small requests, it does
58 the best it can trying to meet both goals at once.
59 * For very large requests (>= 128KB by default), it relies on system
60 memory mapping facilities, if supported.
61
62 For a longer but slightly out of date high-level description, see
63 http://gee.cs.oswego.edu/dl/html/malloc.html
64
65 You may already by default be using a C library containing a malloc
66 that is based on some version of this malloc (for example in
67 linux). You might still want to use the one in this file in order to
68 customize settings or to avoid overheads associated with library
69 versions.
70
71 * Contents, described in more detail in "description of public routines" below.
72
73 Standard (ANSI/SVID/...) functions:
74 malloc(size_t n);
75 calloc(size_t n_elements, size_t element_size);
76 free(void* p);
77 realloc(void* p, size_t n);
78 memalign(size_t alignment, size_t n);
79 valloc(size_t n);
80 mallinfo()
81 mallopt(int parameter_number, int parameter_value)
82
83 Additional functions:
84 independent_calloc(size_t n_elements, size_t size, void* chunks[]);
85 independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);
86 pvalloc(size_t n);
87 malloc_trim(size_t pad);
88 malloc_usable_size(void* p);
89 malloc_stats();
90
91 * Vital statistics:
92
93 Supported pointer representation: 4 or 8 bytes
94 Supported size_t representation: 4 or 8 bytes
95 Note that size_t is allowed to be 4 bytes even if pointers are 8.
96 You can adjust this by defining INTERNAL_SIZE_T
97
98 Alignment: 2 * sizeof(size_t) (default)
99 (i.e., 8 byte alignment with 4byte size_t). This suffices for
100 nearly all current machines and C compilers. However, you can
101 define MALLOC_ALIGNMENT to be wider than this if necessary.
102
103 Minimum overhead per allocated chunk: 4 or 8 bytes
104 Each malloced chunk has a hidden word of overhead holding size
105 and status information.
106
107 Minimum allocated size: 4-byte ptrs: 16 bytes (including 4 overhead)
108 8-byte ptrs: 24/32 bytes (including, 4/8 overhead)
109
110 When a chunk is freed, 12 (for 4byte ptrs) or 20 (for 8 byte
111 ptrs but 4 byte size) or 24 (for 8/8) additional bytes are
112 needed; 4 (8) for a trailing size field and 8 (16) bytes for
113 free list pointers. Thus, the minimum allocatable size is
114 16/24/32 bytes.
115
116 Even a request for zero bytes (i.e., malloc(0)) returns a
117 pointer to something of the minimum allocatable size.
118
119 The maximum overhead wastage (i.e., number of extra bytes
120 allocated than were requested in malloc) is less than or equal
121 to the minimum size, except for requests >= mmap_threshold that
122 are serviced via mmap(), where the worst case wastage is 2 *
123 sizeof(size_t) bytes plus the remainder from a system page (the
124 minimal mmap unit); typically 4096 or 8192 bytes.
125
126 Maximum allocated size: 4-byte size_t: 2^32 minus about two pages
127 8-byte size_t: 2^64 minus about two pages
128
129 It is assumed that (possibly signed) size_t values suffice to
130 represent chunk sizes. `Possibly signed' is due to the fact
131 that `size_t' may be defined on a system as either a signed or
132 an unsigned type. The ISO C standard says that it must be
133 unsigned, but a few systems are known not to adhere to this.
134 Additionally, even when size_t is unsigned, sbrk (which is by
135 default used to obtain memory from system) accepts signed
136 arguments, and may not be able to handle size_t-wide arguments
137 with negative sign bit. Generally, values that would
138 appear as negative after accounting for overhead and alignment
139 are supported only via mmap(), which does not have this
140 limitation.
141
142 Requests for sizes outside the allowed range will perform an optional
143 failure action and then return null. (Requests may also
144 also fail because a system is out of memory.)
145
146 Thread-safety: thread-safe
147
148 Compliance: I believe it is compliant with the 1997 Single Unix Specification
149 Also SVID/XPG, ANSI C, and probably others as well.
150
151 * Synopsis of compile-time options:
152
153 People have reported using previous versions of this malloc on all
154 versions of Unix, sometimes by tweaking some of the defines
155 below. It has been tested most extensively on Solaris and Linux.
156 People also report using it in stand-alone embedded systems.
157
158 The implementation is in straight, hand-tuned ANSI C. It is not
159 at all modular. (Sorry!) It uses a lot of macros. To be at all
160 usable, this code should be compiled using an optimizing compiler
161 (for example gcc -O3) that can simplify expressions and control
162 paths. (FAQ: some macros import variables as arguments rather than
163 declare locals because people reported that some debuggers
164 otherwise get confused.)
165
166 OPTION DEFAULT VALUE
167
168 Compilation Environment options:
169
170 HAVE_MREMAP 0
171
172 Changing default word sizes:
173
174 INTERNAL_SIZE_T size_t
175
176 Configuration and functionality options:
177
178 USE_PUBLIC_MALLOC_WRAPPERS NOT defined
179 USE_MALLOC_LOCK NOT defined
180 MALLOC_DEBUG NOT defined
181 REALLOC_ZERO_BYTES_FREES 1
182 TRIM_FASTBINS 0
183
184 Options for customizing MORECORE:
185
186 MORECORE sbrk
187 MORECORE_FAILURE -1
188 MORECORE_CONTIGUOUS 1
189 MORECORE_CANNOT_TRIM NOT defined
190 MORECORE_CLEARS 1
191 MMAP_AS_MORECORE_SIZE (1024 * 1024)
192
193 Tuning options that are also dynamically changeable via mallopt:
194
195 DEFAULT_MXFAST 64 (for 32bit), 128 (for 64bit)
196 DEFAULT_TRIM_THRESHOLD 128 * 1024
197 DEFAULT_TOP_PAD 0
198 DEFAULT_MMAP_THRESHOLD 128 * 1024
199 DEFAULT_MMAP_MAX 65536
200
201 There are several other #defined constants and macros that you
202 probably don't want to touch unless you are extending or adapting malloc. */
203
204 /*
205 void* is the pointer type that malloc should say it returns
206 */
207
208 #ifndef void
209 #define void void
210 #endif /*void*/
211
212 #include <stddef.h> /* for size_t */
213 #include <stdlib.h> /* for getenv(), abort() */
214 #include <unistd.h> /* for __libc_enable_secure */
215
216 #include <atomic.h>
217 #include <_itoa.h>
218 #include <bits/wordsize.h>
219 #include <sys/sysinfo.h>
220
221 #include <ldsodefs.h>
222
223 #include <unistd.h>
224 #include <stdio.h> /* needed for malloc_stats */
225 #include <errno.h>
226 #include <assert.h>
227
228 #include <shlib-compat.h>
229
230 /* For uintptr_t. */
231 #include <stdint.h>
232
233 /* For va_arg, va_start, va_end. */
234 #include <stdarg.h>
235
236 /* For MIN, MAX, powerof2. */
237 #include <sys/param.h>
238
239 /* For ALIGN_UP et. al. */
240 #include <libc-pointer-arith.h>
241
242 /* For DIAG_PUSH/POP_NEEDS_COMMENT et al. */
243 #include <libc-diag.h>
244
245 /* For memory tagging. */
246 #include <libc-mtag.h>
247
248 #include <malloc/malloc-internal.h>
249
250 /* For SINGLE_THREAD_P. */
251 #include <sysdep-cancel.h>
252
253 #include <libc-internal.h>
254
255 /*
256 Debugging:
257
258 Because freed chunks may be overwritten with bookkeeping fields, this
259 malloc will often die when freed memory is overwritten by user
260 programs. This can be very effective (albeit in an annoying way)
261 in helping track down dangling pointers.
262
263 If you compile with -DMALLOC_DEBUG, a number of assertion checks are
264 enabled that will catch more memory errors. You probably won't be
265 able to make much sense of the actual assertion errors, but they
266 should help you locate incorrectly overwritten memory. The checking
267 is fairly extensive, and will slow down execution
268 noticeably. Calling malloc_stats or mallinfo with MALLOC_DEBUG set
269 will attempt to check every non-mmapped allocated and free chunk in
270 the course of computing the summmaries. (By nature, mmapped regions
271 cannot be checked very much automatically.)
272
273 Setting MALLOC_DEBUG may also be helpful if you are trying to modify
274 this code. The assertions in the check routines spell out in more
275 detail the assumptions and invariants underlying the algorithms.
276
277 Setting MALLOC_DEBUG does NOT provide an automated mechanism for
278 checking that all accesses to malloced memory stay within their
279 bounds. However, there are several add-ons and adaptations of this
280 or other mallocs available that do this.
281 */
282
283 #ifndef MALLOC_DEBUG
284 #define MALLOC_DEBUG 0
285 #endif
286
287 #ifndef NDEBUG
288 # define __assert_fail(assertion, file, line, function) \
289 __malloc_assert(assertion, file, line, function)
290
291 extern const char *__progname;
292
293 static void
294 __malloc_assert (const char *assertion, const char *file, unsigned int line,
295 const char *function)
296 {
297 (void) __fxprintf (NULL, "%s%s%s:%u: %s%sAssertion `%s' failed.\n",
298 __progname, __progname[0] ? ": " : "",
299 file, line,
300 function ? function : "", function ? ": " : "",
301 assertion);
302 fflush (stderr);
303 abort ();
304 }
305 #endif
306
307 #if USE_TCACHE
308 /* We want 64 entries. This is an arbitrary limit, which tunables can reduce. */
309 # define TCACHE_MAX_BINS 64
310 # define MAX_TCACHE_SIZE tidx2usize (TCACHE_MAX_BINS-1)
311
312 /* Only used to pre-fill the tunables. */
313 # define tidx2usize(idx) (((size_t) idx) * MALLOC_ALIGNMENT + MINSIZE - SIZE_SZ)
314
315 /* When "x" is from chunksize(). */
316 # define csize2tidx(x) (((x) - MINSIZE + MALLOC_ALIGNMENT - 1) / MALLOC_ALIGNMENT)
317 /* When "x" is a user-provided size. */
318 # define usize2tidx(x) csize2tidx (request2size (x))
319
320 /* With rounding and alignment, the bins are...
321 idx 0 bytes 0..24 (64-bit) or 0..12 (32-bit)
322 idx 1 bytes 25..40 or 13..20
323 idx 2 bytes 41..56 or 21..28
324 etc. */
325
326 /* This is another arbitrary limit, which tunables can change. Each
327 tcache bin will hold at most this number of chunks. */
328 # define TCACHE_FILL_COUNT 7
329
330 /* Maximum chunks in tcache bins for tunables. This value must fit the range
331 of tcache->counts[] entries, else they may overflow. */
332 # define MAX_TCACHE_COUNT UINT16_MAX
333 #endif
334
335 /* Safe-Linking:
336 Use randomness from ASLR (mmap_base) to protect single-linked lists
337 of Fast-Bins and TCache. That is, mask the "next" pointers of the
338 lists' chunks, and also perform allocation alignment checks on them.
339 This mechanism reduces the risk of pointer hijacking, as was done with
340 Safe-Unlinking in the double-linked lists of Small-Bins.
341 It assumes a minimum page size of 4096 bytes (12 bits). Systems with
342 larger pages provide less entropy, although the pointer mangling
343 still works. */
344 #define PROTECT_PTR(pos, ptr) \
345 ((__typeof (ptr)) ((((size_t) pos) >> 12) ^ ((size_t) ptr)))
346 #define REVEAL_PTR(ptr) PROTECT_PTR (&ptr, ptr)
347
348 /*
349 REALLOC_ZERO_BYTES_FREES should be set if a call to
350 realloc with zero bytes should be the same as a call to free.
351 This is required by the C standard. Otherwise, since this malloc
352 returns a unique pointer for malloc(0), so does realloc(p, 0).
353 */
354
355 #ifndef REALLOC_ZERO_BYTES_FREES
356 #define REALLOC_ZERO_BYTES_FREES 1
357 #endif
358
359 /*
360 TRIM_FASTBINS controls whether free() of a very small chunk can
361 immediately lead to trimming. Setting to true (1) can reduce memory
362 footprint, but will almost always slow down programs that use a lot
363 of small chunks.
364
365 Define this only if you are willing to give up some speed to more
366 aggressively reduce system-level memory footprint when releasing
367 memory in programs that use many small chunks. You can get
368 essentially the same effect by setting MXFAST to 0, but this can
369 lead to even greater slowdowns in programs using many small chunks.
370 TRIM_FASTBINS is an in-between compile-time option, that disables
371 only those chunks bordering topmost memory from being placed in
372 fastbins.
373 */
374
375 #ifndef TRIM_FASTBINS
376 #define TRIM_FASTBINS 0
377 #endif
378
379
380 /* Definition for getting more memory from the OS. */
381 #define MORECORE (*__morecore)
382 #define MORECORE_FAILURE 0
383 void * __default_morecore (ptrdiff_t);
384 void *(*__morecore)(ptrdiff_t) = __default_morecore;
385
386 /* Memory tagging. */
387
388 /* Some systems support the concept of tagging (sometimes known as
389 coloring) memory locations on a fine grained basis. Each memory
390 location is given a color (normally allocated randomly) and
391 pointers are also colored. When the pointer is dereferenced, the
392 pointer's color is checked against the memory's color and if they
393 differ the access is faulted (sometimes lazily).
394
395 We use this in glibc by maintaining a single color for the malloc
396 data structures that are interleaved with the user data and then
397 assigning separate colors for each block allocation handed out. In
398 this way simple buffer overruns will be rapidly detected. When
399 memory is freed, the memory is recolored back to the glibc default
400 so that simple use-after-free errors can also be detected.
401
402 If memory is reallocated the buffer is recolored even if the
403 address remains the same. This has a performance impact, but
404 guarantees that the old pointer cannot mistakenly be reused (code
405 that compares old against new will see a mismatch and will then
406 need to behave as though realloc moved the data to a new location).
407
408 Internal API for memory tagging support.
409
410 The aim is to keep the code for memory tagging support as close to
411 the normal APIs in glibc as possible, so that if tagging is not
412 enabled in the library, or is disabled at runtime then standard
413 operations can continue to be used. Support macros are used to do
414 this:
415
416 void *TAG_NEW_MEMSET (void *ptr, int, val, size_t size)
417
418 Has the same interface as memset(), but additionally allocates a
419 new tag, colors the memory with that tag and returns a pointer that
420 is correctly colored for that location. The non-tagging version
421 will simply call memset.
422
423 void *TAG_REGION (void *ptr, size_t size)
424
425 Color the region of memory pointed to by PTR and size SIZE with
426 the color of PTR. Returns the original pointer.
427
428 void *TAG_NEW_USABLE (void *ptr)
429
430 Allocate a new random color and use it to color the user region of
431 a chunk; this may include data from the subsequent chunk's header
432 if tagging is sufficiently fine grained. Returns PTR suitably
433 recolored for accessing the memory there.
434
435 void *TAG_AT (void *ptr)
436
437 Read the current color of the memory at the address pointed to by
438 PTR (ignoring it's current color) and return PTR recolored to that
439 color. PTR must be valid address in all other respects. When
440 tagging is not enabled, it simply returns the original pointer.
441 */
442
443 #ifdef USE_MTAG
444
445 /* Default implementaions when memory tagging is supported, but disabled. */
446 static void *
447 __default_tag_region (void *ptr, size_t size)
448 {
449 return ptr;
450 }
451
452 static void *
453 __default_tag_nop (void *ptr)
454 {
455 return ptr;
456 }
457
458 static int __mtag_mmap_flags = 0;
459 static size_t __mtag_granule_mask = ~(size_t)0;
460
461 static void *(*__tag_new_memset)(void *, int, size_t) = memset;
462 static void *(*__tag_region)(void *, size_t) = __default_tag_region;
463 static void *(*__tag_new_usable)(void *) = __default_tag_nop;
464 static void *(*__tag_at)(void *) = __default_tag_nop;
465
466 # define TAG_NEW_MEMSET(ptr, val, size) __tag_new_memset (ptr, val, size)
467 # define TAG_REGION(ptr, size) __tag_region (ptr, size)
468 # define TAG_NEW_USABLE(ptr) __tag_new_usable (ptr)
469 # define TAG_AT(ptr) __tag_at (ptr)
470 #else
471 # define TAG_NEW_MEMSET(ptr, val, size) memset (ptr, val, size)
472 # define TAG_REGION(ptr, size) (ptr)
473 # define TAG_NEW_USABLE(ptr) (ptr)
474 # define TAG_AT(ptr) (ptr)
475 #endif
476
477 #include <string.h>
478
479 /*
480 MORECORE-related declarations. By default, rely on sbrk
481 */
482
483
484 /*
485 MORECORE is the name of the routine to call to obtain more memory
486 from the system. See below for general guidance on writing
487 alternative MORECORE functions, as well as a version for WIN32 and a
488 sample version for pre-OSX macos.
489 */
490
491 #ifndef MORECORE
492 #define MORECORE sbrk
493 #endif
494
495 /*
496 MORECORE_FAILURE is the value returned upon failure of MORECORE
497 as well as mmap. Since it cannot be an otherwise valid memory address,
498 and must reflect values of standard sys calls, you probably ought not
499 try to redefine it.
500 */
501
502 #ifndef MORECORE_FAILURE
503 #define MORECORE_FAILURE (-1)
504 #endif
505
506 /*
507 If MORECORE_CONTIGUOUS is true, take advantage of fact that
508 consecutive calls to MORECORE with positive arguments always return
509 contiguous increasing addresses. This is true of unix sbrk. Even
510 if not defined, when regions happen to be contiguous, malloc will
511 permit allocations spanning regions obtained from different
512 calls. But defining this when applicable enables some stronger
513 consistency checks and space efficiencies.
514 */
515
516 #ifndef MORECORE_CONTIGUOUS
517 #define MORECORE_CONTIGUOUS 1
518 #endif
519
520 /*
521 Define MORECORE_CANNOT_TRIM if your version of MORECORE
522 cannot release space back to the system when given negative
523 arguments. This is generally necessary only if you are using
524 a hand-crafted MORECORE function that cannot handle negative arguments.
525 */
526
527 /* #define MORECORE_CANNOT_TRIM */
528
529 /* MORECORE_CLEARS (default 1)
530 The degree to which the routine mapped to MORECORE zeroes out
531 memory: never (0), only for newly allocated space (1) or always
532 (2). The distinction between (1) and (2) is necessary because on
533 some systems, if the application first decrements and then
534 increments the break value, the contents of the reallocated space
535 are unspecified.
536 */
537
538 #ifndef MORECORE_CLEARS
539 # define MORECORE_CLEARS 1
540 #endif
541
542
543 /*
544 MMAP_AS_MORECORE_SIZE is the minimum mmap size argument to use if
545 sbrk fails, and mmap is used as a backup. The value must be a
546 multiple of page size. This backup strategy generally applies only
547 when systems have "holes" in address space, so sbrk cannot perform
548 contiguous expansion, but there is still space available on system.
549 On systems for which this is known to be useful (i.e. most linux
550 kernels), this occurs only when programs allocate huge amounts of
551 memory. Between this, and the fact that mmap regions tend to be
552 limited, the size should be large, to avoid too many mmap calls and
553 thus avoid running out of kernel resources. */
554
555 #ifndef MMAP_AS_MORECORE_SIZE
556 #define MMAP_AS_MORECORE_SIZE (1024 * 1024)
557 #endif
558
559 /*
560 Define HAVE_MREMAP to make realloc() use mremap() to re-allocate
561 large blocks.
562 */
563
564 #ifndef HAVE_MREMAP
565 #define HAVE_MREMAP 0
566 #endif
567
568 /* We may need to support __malloc_initialize_hook for backwards
569 compatibility. */
570
571 #if SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_24)
572 # define HAVE_MALLOC_INIT_HOOK 1
573 #else
574 # define HAVE_MALLOC_INIT_HOOK 0
575 #endif
576
577
578 /*
579 This version of malloc supports the standard SVID/XPG mallinfo
580 routine that returns a struct containing usage properties and
581 statistics. It should work on any SVID/XPG compliant system that has
582 a /usr/include/malloc.h defining struct mallinfo. (If you'd like to
583 install such a thing yourself, cut out the preliminary declarations
584 as described above and below and save them in a malloc.h file. But
585 there's no compelling reason to bother to do this.)
586
587 The main declaration needed is the mallinfo struct that is returned
588 (by-copy) by mallinfo(). The SVID/XPG malloinfo struct contains a
589 bunch of fields that are not even meaningful in this version of
590 malloc. These fields are are instead filled by mallinfo() with
591 other numbers that might be of interest.
592 */
593
594
595 /* ---------- description of public routines ------------ */
596
597 /*
598 malloc(size_t n)
599 Returns a pointer to a newly allocated chunk of at least n bytes, or null
600 if no space is available. Additionally, on failure, errno is
601 set to ENOMEM on ANSI C systems.
602
603 If n is zero, malloc returns a minimum-sized chunk. (The minimum
604 size is 16 bytes on most 32bit systems, and 24 or 32 bytes on 64bit
605 systems.) On most systems, size_t is an unsigned type, so calls
606 with negative arguments are interpreted as requests for huge amounts
607 of space, which will often fail. The maximum supported value of n
608 differs across systems, but is in all cases less than the maximum
609 representable value of a size_t.
610 */
611 void* __libc_malloc(size_t);
612 libc_hidden_proto (__libc_malloc)
613
614 /*
615 free(void* p)
616 Releases the chunk of memory pointed to by p, that had been previously
617 allocated using malloc or a related routine such as realloc.
618 It has no effect if p is null. It can have arbitrary (i.e., bad!)
619 effects if p has already been freed.
620
621 Unless disabled (using mallopt), freeing very large spaces will
622 when possible, automatically trigger operations that give
623 back unused memory to the system, thus reducing program footprint.
624 */
625 void __libc_free(void*);
626 libc_hidden_proto (__libc_free)
627
628 /*
629 calloc(size_t n_elements, size_t element_size);
630 Returns a pointer to n_elements * element_size bytes, with all locations
631 set to zero.
632 */
633 void* __libc_calloc(size_t, size_t);
634
635 /*
636 realloc(void* p, size_t n)
637 Returns a pointer to a chunk of size n that contains the same data
638 as does chunk p up to the minimum of (n, p's size) bytes, or null
639 if no space is available.
640
641 The returned pointer may or may not be the same as p. The algorithm
642 prefers extending p when possible, otherwise it employs the
643 equivalent of a malloc-copy-free sequence.
644
645 If p is null, realloc is equivalent to malloc.
646
647 If space is not available, realloc returns null, errno is set (if on
648 ANSI) and p is NOT freed.
649
650 if n is for fewer bytes than already held by p, the newly unused
651 space is lopped off and freed if possible. Unless the #define
652 REALLOC_ZERO_BYTES_FREES is set, realloc with a size argument of
653 zero (re)allocates a minimum-sized chunk.
654
655 Large chunks that were internally obtained via mmap will always be
656 grown using malloc-copy-free sequences unless the system supports
657 MREMAP (currently only linux).
658
659 The old unix realloc convention of allowing the last-free'd chunk
660 to be used as an argument to realloc is not supported.
661 */
662 void* __libc_realloc(void*, size_t);
663 libc_hidden_proto (__libc_realloc)
664
665 /*
666 memalign(size_t alignment, size_t n);
667 Returns a pointer to a newly allocated chunk of n bytes, aligned
668 in accord with the alignment argument.
669
670 The alignment argument should be a power of two. If the argument is
671 not a power of two, the nearest greater power is used.
672 8-byte alignment is guaranteed by normal malloc calls, so don't
673 bother calling memalign with an argument of 8 or less.
674
675 Overreliance on memalign is a sure way to fragment space.
676 */
677 void* __libc_memalign(size_t, size_t);
678 libc_hidden_proto (__libc_memalign)
679
680 /*
681 valloc(size_t n);
682 Equivalent to memalign(pagesize, n), where pagesize is the page
683 size of the system. If the pagesize is unknown, 4096 is used.
684 */
685 void* __libc_valloc(size_t);
686
687
688
689 /*
690 mallopt(int parameter_number, int parameter_value)
691 Sets tunable parameters The format is to provide a
692 (parameter-number, parameter-value) pair. mallopt then sets the
693 corresponding parameter to the argument value if it can (i.e., so
694 long as the value is meaningful), and returns 1 if successful else
695 0. SVID/XPG/ANSI defines four standard param numbers for mallopt,
696 normally defined in malloc.h. Only one of these (M_MXFAST) is used
697 in this malloc. The others (M_NLBLKS, M_GRAIN, M_KEEP) don't apply,
698 so setting them has no effect. But this malloc also supports four
699 other options in mallopt. See below for details. Briefly, supported
700 parameters are as follows (listed defaults are for "typical"
701 configurations).
702
703 Symbol param # default allowed param values
704 M_MXFAST 1 64 0-80 (0 disables fastbins)
705 M_TRIM_THRESHOLD -1 128*1024 any (-1U disables trimming)
706 M_TOP_PAD -2 0 any
707 M_MMAP_THRESHOLD -3 128*1024 any (or 0 if no MMAP support)
708 M_MMAP_MAX -4 65536 any (0 disables use of mmap)
709 */
710 int __libc_mallopt(int, int);
711 libc_hidden_proto (__libc_mallopt)
712
713
714 /*
715 mallinfo()
716 Returns (by copy) a struct containing various summary statistics:
717
718 arena: current total non-mmapped bytes allocated from system
719 ordblks: the number of free chunks
720 smblks: the number of fastbin blocks (i.e., small chunks that
721 have been freed but not use resused or consolidated)
722 hblks: current number of mmapped regions
723 hblkhd: total bytes held in mmapped regions
724 usmblks: always 0
725 fsmblks: total bytes held in fastbin blocks
726 uordblks: current total allocated space (normal or mmapped)
727 fordblks: total free space
728 keepcost: the maximum number of bytes that could ideally be released
729 back to system via malloc_trim. ("ideally" means that
730 it ignores page restrictions etc.)
731
732 Because these fields are ints, but internal bookkeeping may
733 be kept as longs, the reported values may wrap around zero and
734 thus be inaccurate.
735 */
736 struct mallinfo2 __libc_mallinfo2(void);
737 libc_hidden_proto (__libc_mallinfo2)
738
739 struct mallinfo __libc_mallinfo(void);
740
741
742 /*
743 pvalloc(size_t n);
744 Equivalent to valloc(minimum-page-that-holds(n)), that is,
745 round up n to nearest pagesize.
746 */
747 void* __libc_pvalloc(size_t);
748
749 /*
750 malloc_trim(size_t pad);
751
752 If possible, gives memory back to the system (via negative
753 arguments to sbrk) if there is unused memory at the `high' end of
754 the malloc pool. You can call this after freeing large blocks of
755 memory to potentially reduce the system-level memory requirements
756 of a program. However, it cannot guarantee to reduce memory. Under
757 some allocation patterns, some large free blocks of memory will be
758 locked between two used chunks, so they cannot be given back to
759 the system.
760
761 The `pad' argument to malloc_trim represents the amount of free
762 trailing space to leave untrimmed. If this argument is zero,
763 only the minimum amount of memory to maintain internal data
764 structures will be left (one page or less). Non-zero arguments
765 can be supplied to maintain enough trailing space to service
766 future expected allocations without having to re-obtain memory
767 from the system.
768
769 Malloc_trim returns 1 if it actually released any memory, else 0.
770 On systems that do not support "negative sbrks", it will always
771 return 0.
772 */
773 int __malloc_trim(size_t);
774
775 /*
776 malloc_usable_size(void* p);
777
778 Returns the number of bytes you can actually use in
779 an allocated chunk, which may be more than you requested (although
780 often not) due to alignment and minimum size constraints.
781 You can use this many bytes without worrying about
782 overwriting other allocated objects. This is not a particularly great
783 programming practice. malloc_usable_size can be more useful in
784 debugging and assertions, for example:
785
786 p = malloc(n);
787 assert(malloc_usable_size(p) >= 256);
788
789 */
790 size_t __malloc_usable_size(void*);
791
792 /*
793 malloc_stats();
794 Prints on stderr the amount of space obtained from the system (both
795 via sbrk and mmap), the maximum amount (which may be more than
796 current if malloc_trim and/or munmap got called), and the current
797 number of bytes allocated via malloc (or realloc, etc) but not yet
798 freed. Note that this is the number of bytes allocated, not the
799 number requested. It will be larger than the number requested
800 because of alignment and bookkeeping overhead. Because it includes
801 alignment wastage as being in use, this figure may be greater than
802 zero even when no user-level chunks are allocated.
803
804 The reported current and maximum system memory can be inaccurate if
805 a program makes other calls to system memory allocation functions
806 (normally sbrk) outside of malloc.
807
808 malloc_stats prints only the most commonly interesting statistics.
809 More information can be obtained by calling mallinfo.
810
811 */
812 void __malloc_stats(void);
813
814 /*
815 posix_memalign(void **memptr, size_t alignment, size_t size);
816
817 POSIX wrapper like memalign(), checking for validity of size.
818 */
819 int __posix_memalign(void **, size_t, size_t);
820
821 /* mallopt tuning options */
822
823 /*
824 M_MXFAST is the maximum request size used for "fastbins", special bins
825 that hold returned chunks without consolidating their spaces. This
826 enables future requests for chunks of the same size to be handled
827 very quickly, but can increase fragmentation, and thus increase the
828 overall memory footprint of a program.
829
830 This malloc manages fastbins very conservatively yet still
831 efficiently, so fragmentation is rarely a problem for values less
832 than or equal to the default. The maximum supported value of MXFAST
833 is 80. You wouldn't want it any higher than this anyway. Fastbins
834 are designed especially for use with many small structs, objects or
835 strings -- the default handles structs/objects/arrays with sizes up
836 to 8 4byte fields, or small strings representing words, tokens,
837 etc. Using fastbins for larger objects normally worsens
838 fragmentation without improving speed.
839
840 M_MXFAST is set in REQUEST size units. It is internally used in
841 chunksize units, which adds padding and alignment. You can reduce
842 M_MXFAST to 0 to disable all use of fastbins. This causes the malloc
843 algorithm to be a closer approximation of fifo-best-fit in all cases,
844 not just for larger requests, but will generally cause it to be
845 slower.
846 */
847
848
849 /* M_MXFAST is a standard SVID/XPG tuning option, usually listed in malloc.h */
850 #ifndef M_MXFAST
851 #define M_MXFAST 1
852 #endif
853
854 #ifndef DEFAULT_MXFAST
855 #define DEFAULT_MXFAST (64 * SIZE_SZ / 4)
856 #endif
857
858
859 /*
860 M_TRIM_THRESHOLD is the maximum amount of unused top-most memory
861 to keep before releasing via malloc_trim in free().
862
863 Automatic trimming is mainly useful in long-lived programs.
864 Because trimming via sbrk can be slow on some systems, and can
865 sometimes be wasteful (in cases where programs immediately
866 afterward allocate more large chunks) the value should be high
867 enough so that your overall system performance would improve by
868 releasing this much memory.
869
870 The trim threshold and the mmap control parameters (see below)
871 can be traded off with one another. Trimming and mmapping are
872 two different ways of releasing unused memory back to the
873 system. Between these two, it is often possible to keep
874 system-level demands of a long-lived program down to a bare
875 minimum. For example, in one test suite of sessions measuring
876 the XF86 X server on Linux, using a trim threshold of 128K and a
877 mmap threshold of 192K led to near-minimal long term resource
878 consumption.
879
880 If you are using this malloc in a long-lived program, it should
881 pay to experiment with these values. As a rough guide, you
882 might set to a value close to the average size of a process
883 (program) running on your system. Releasing this much memory
884 would allow such a process to run in memory. Generally, it's
885 worth it to tune for trimming rather tham memory mapping when a
886 program undergoes phases where several large chunks are
887 allocated and released in ways that can reuse each other's
888 storage, perhaps mixed with phases where there are no such
889 chunks at all. And in well-behaved long-lived programs,
890 controlling release of large blocks via trimming versus mapping
891 is usually faster.
892
893 However, in most programs, these parameters serve mainly as
894 protection against the system-level effects of carrying around
895 massive amounts of unneeded memory. Since frequent calls to
896 sbrk, mmap, and munmap otherwise degrade performance, the default
897 parameters are set to relatively high values that serve only as
898 safeguards.
899
900 The trim value It must be greater than page size to have any useful
901 effect. To disable trimming completely, you can set to
902 (unsigned long)(-1)
903
904 Trim settings interact with fastbin (MXFAST) settings: Unless
905 TRIM_FASTBINS is defined, automatic trimming never takes place upon
906 freeing a chunk with size less than or equal to MXFAST. Trimming is
907 instead delayed until subsequent freeing of larger chunks. However,
908 you can still force an attempted trim by calling malloc_trim.
909
910 Also, trimming is not generally possible in cases where
911 the main arena is obtained via mmap.
912
913 Note that the trick some people use of mallocing a huge space and
914 then freeing it at program startup, in an attempt to reserve system
915 memory, doesn't have the intended effect under automatic trimming,
916 since that memory will immediately be returned to the system.
917 */
918
919 #define M_TRIM_THRESHOLD -1
920
921 #ifndef DEFAULT_TRIM_THRESHOLD
922 #define DEFAULT_TRIM_THRESHOLD (128 * 1024)
923 #endif
924
925 /*
926 M_TOP_PAD is the amount of extra `padding' space to allocate or
927 retain whenever sbrk is called. It is used in two ways internally:
928
929 * When sbrk is called to extend the top of the arena to satisfy
930 a new malloc request, this much padding is added to the sbrk
931 request.
932
933 * When malloc_trim is called automatically from free(),
934 it is used as the `pad' argument.
935
936 In both cases, the actual amount of padding is rounded
937 so that the end of the arena is always a system page boundary.
938
939 The main reason for using padding is to avoid calling sbrk so
940 often. Having even a small pad greatly reduces the likelihood
941 that nearly every malloc request during program start-up (or
942 after trimming) will invoke sbrk, which needlessly wastes
943 time.
944
945 Automatic rounding-up to page-size units is normally sufficient
946 to avoid measurable overhead, so the default is 0. However, in
947 systems where sbrk is relatively slow, it can pay to increase
948 this value, at the expense of carrying around more memory than
949 the program needs.
950 */
951
952 #define M_TOP_PAD -2
953
954 #ifndef DEFAULT_TOP_PAD
955 #define DEFAULT_TOP_PAD (0)
956 #endif
957
958 /*
959 MMAP_THRESHOLD_MAX and _MIN are the bounds on the dynamically
960 adjusted MMAP_THRESHOLD.
961 */
962
963 #ifndef DEFAULT_MMAP_THRESHOLD_MIN
964 #define DEFAULT_MMAP_THRESHOLD_MIN (128 * 1024)
965 #endif
966
967 #ifndef DEFAULT_MMAP_THRESHOLD_MAX
968 /* For 32-bit platforms we cannot increase the maximum mmap
969 threshold much because it is also the minimum value for the
970 maximum heap size and its alignment. Going above 512k (i.e., 1M
971 for new heaps) wastes too much address space. */
972 # if __WORDSIZE == 32
973 # define DEFAULT_MMAP_THRESHOLD_MAX (512 * 1024)
974 # else
975 # define DEFAULT_MMAP_THRESHOLD_MAX (4 * 1024 * 1024 * sizeof(long))
976 # endif
977 #endif
978
979 /*
980 M_MMAP_THRESHOLD is the request size threshold for using mmap()
981 to service a request. Requests of at least this size that cannot
982 be allocated using already-existing space will be serviced via mmap.
983 (If enough normal freed space already exists it is used instead.)
984
985 Using mmap segregates relatively large chunks of memory so that
986 they can be individually obtained and released from the host
987 system. A request serviced through mmap is never reused by any
988 other request (at least not directly; the system may just so
989 happen to remap successive requests to the same locations).
990
991 Segregating space in this way has the benefits that:
992
993 1. Mmapped space can ALWAYS be individually released back
994 to the system, which helps keep the system level memory
995 demands of a long-lived program low.
996 2. Mapped memory can never become `locked' between
997 other chunks, as can happen with normally allocated chunks, which
998 means that even trimming via malloc_trim would not release them.
999 3. On some systems with "holes" in address spaces, mmap can obtain
1000 memory that sbrk cannot.
1001
1002 However, it has the disadvantages that:
1003
1004 1. The space cannot be reclaimed, consolidated, and then
1005 used to service later requests, as happens with normal chunks.
1006 2. It can lead to more wastage because of mmap page alignment
1007 requirements
1008 3. It causes malloc performance to be more dependent on host
1009 system memory management support routines which may vary in
1010 implementation quality and may impose arbitrary
1011 limitations. Generally, servicing a request via normal
1012 malloc steps is faster than going through a system's mmap.
1013
1014 The advantages of mmap nearly always outweigh disadvantages for
1015 "large" chunks, but the value of "large" varies across systems. The
1016 default is an empirically derived value that works well in most
1017 systems.
1018
1019
1020 Update in 2006:
1021 The above was written in 2001. Since then the world has changed a lot.
1022 Memory got bigger. Applications got bigger. The virtual address space
1023 layout in 32 bit linux changed.
1024
1025 In the new situation, brk() and mmap space is shared and there are no
1026 artificial limits on brk size imposed by the kernel. What is more,
1027 applications have started using transient allocations larger than the
1028 128Kb as was imagined in 2001.
1029
1030 The price for mmap is also high now; each time glibc mmaps from the
1031 kernel, the kernel is forced to zero out the memory it gives to the
1032 application. Zeroing memory is expensive and eats a lot of cache and
1033 memory bandwidth. This has nothing to do with the efficiency of the
1034 virtual memory system, by doing mmap the kernel just has no choice but
1035 to zero.
1036
1037 In 2001, the kernel had a maximum size for brk() which was about 800
1038 megabytes on 32 bit x86, at that point brk() would hit the first
1039 mmaped shared libaries and couldn't expand anymore. With current 2.6
1040 kernels, the VA space layout is different and brk() and mmap
1041 both can span the entire heap at will.
1042
1043 Rather than using a static threshold for the brk/mmap tradeoff,
1044 we are now using a simple dynamic one. The goal is still to avoid
1045 fragmentation. The old goals we kept are
1046 1) try to get the long lived large allocations to use mmap()
1047 2) really large allocations should always use mmap()
1048 and we're adding now:
1049 3) transient allocations should use brk() to avoid forcing the kernel
1050 having to zero memory over and over again
1051
1052 The implementation works with a sliding threshold, which is by default
1053 limited to go between 128Kb and 32Mb (64Mb for 64 bitmachines) and starts
1054 out at 128Kb as per the 2001 default.
1055
1056 This allows us to satisfy requirement 1) under the assumption that long
1057 lived allocations are made early in the process' lifespan, before it has
1058 started doing dynamic allocations of the same size (which will
1059 increase the threshold).
1060
1061 The upperbound on the threshold satisfies requirement 2)
1062
1063 The threshold goes up in value when the application frees memory that was
1064 allocated with the mmap allocator. The idea is that once the application
1065 starts freeing memory of a certain size, it's highly probable that this is
1066 a size the application uses for transient allocations. This estimator
1067 is there to satisfy the new third requirement.
1068
1069 */
1070
1071 #define M_MMAP_THRESHOLD -3
1072
1073 #ifndef DEFAULT_MMAP_THRESHOLD
1074 #define DEFAULT_MMAP_THRESHOLD DEFAULT_MMAP_THRESHOLD_MIN
1075 #endif
1076
1077 /*
1078 M_MMAP_MAX is the maximum number of requests to simultaneously
1079 service using mmap. This parameter exists because
1080 some systems have a limited number of internal tables for
1081 use by mmap, and using more than a few of them may degrade
1082 performance.
1083
1084 The default is set to a value that serves only as a safeguard.
1085 Setting to 0 disables use of mmap for servicing large requests.
1086 */
1087
1088 #define M_MMAP_MAX -4
1089
1090 #ifndef DEFAULT_MMAP_MAX
1091 #define DEFAULT_MMAP_MAX (65536)
1092 #endif
1093
1094 #include <malloc.h>
1095
1096 #ifndef RETURN_ADDRESS
1097 #define RETURN_ADDRESS(X_) (NULL)
1098 #endif
1099
1100 /* Forward declarations. */
1101 struct malloc_chunk;
1102 typedef struct malloc_chunk* mchunkptr;
1103
1104 /* Internal routines. */
1105
1106 static void* _int_malloc(mstate, size_t);
1107 static void _int_free(mstate, mchunkptr, int);
1108 static void* _int_realloc(mstate, mchunkptr, INTERNAL_SIZE_T,
1109 INTERNAL_SIZE_T);
1110 static void* _int_memalign(mstate, size_t, size_t);
1111 static void* _mid_memalign(size_t, size_t, void *);
1112
1113 static void malloc_printerr(const char *str) __attribute__ ((noreturn));
1114
1115 static void* mem2mem_check(void *p, size_t sz);
1116 static void top_check(void);
1117 static void munmap_chunk(mchunkptr p);
1118 #if HAVE_MREMAP
1119 static mchunkptr mremap_chunk(mchunkptr p, size_t new_size);
1120 #endif
1121
1122 static void* malloc_check(size_t sz, const void *caller);
1123 static void free_check(void* mem, const void *caller);
1124 static void* realloc_check(void* oldmem, size_t bytes,
1125 const void *caller);
1126 static void* memalign_check(size_t alignment, size_t bytes,
1127 const void *caller);
1128
1129 /* ------------------ MMAP support ------------------ */
1130
1131
1132 #include <fcntl.h>
1133 #include <sys/mman.h>
1134
1135 #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
1136 # define MAP_ANONYMOUS MAP_ANON
1137 #endif
1138
1139 #ifndef MAP_NORESERVE
1140 # define MAP_NORESERVE 0
1141 #endif
1142
1143 #define MMAP(addr, size, prot, flags) \
1144 __mmap((addr), (size), (prot), (flags)|MAP_ANONYMOUS|MAP_PRIVATE, -1, 0)
1145
1146
1147 /*
1148 ----------------------- Chunk representations -----------------------
1149 */
1150
1151
1152 /*
1153 This struct declaration is misleading (but accurate and necessary).
1154 It declares a "view" into memory allowing access to necessary
1155 fields at known offsets from a given base. See explanation below.
1156 */
1157
1158 struct malloc_chunk {
1159
1160 INTERNAL_SIZE_T mchunk_prev_size; /* Size of previous chunk (if free). */
1161 INTERNAL_SIZE_T mchunk_size; /* Size in bytes, including overhead. */
1162
1163 struct malloc_chunk* fd; /* double links -- used only if free. */
1164 struct malloc_chunk* bk;
1165
1166 /* Only used for large blocks: pointer to next larger size. */
1167 struct malloc_chunk* fd_nextsize; /* double links -- used only if free. */
1168 struct malloc_chunk* bk_nextsize;
1169 };
1170
1171
1172 /*
1173 malloc_chunk details:
1174
1175 (The following includes lightly edited explanations by Colin Plumb.)
1176
1177 Chunks of memory are maintained using a `boundary tag' method as
1178 described in e.g., Knuth or Standish. (See the paper by Paul
1179 Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a
1180 survey of such techniques.) Sizes of free chunks are stored both
1181 in the front of each chunk and at the end. This makes
1182 consolidating fragmented chunks into bigger chunks very fast. The
1183 size fields also hold bits representing whether chunks are free or
1184 in use.
1185
1186 An allocated chunk looks like this:
1187
1188
1189 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1190 | Size of previous chunk, if unallocated (P clear) |
1191 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1192 | Size of chunk, in bytes |A|M|P|
1193 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1194 | User data starts here... .
1195 . .
1196 . (malloc_usable_size() bytes) .
1197 . |
1198 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1199 | (size of chunk, but used for application data) |
1200 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1201 | Size of next chunk, in bytes |A|0|1|
1202 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1203
1204 Where "chunk" is the front of the chunk for the purpose of most of
1205 the malloc code, but "mem" is the pointer that is returned to the
1206 user. "Nextchunk" is the beginning of the next contiguous chunk.
1207
1208 Chunks always begin on even word boundaries, so the mem portion
1209 (which is returned to the user) is also on an even word boundary, and
1210 thus at least double-word aligned.
1211
1212 Free chunks are stored in circular doubly-linked lists, and look like this:
1213
1214 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1215 | Size of previous chunk, if unallocated (P clear) |
1216 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1217 `head:' | Size of chunk, in bytes |A|0|P|
1218 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1219 | Forward pointer to next chunk in list |
1220 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1221 | Back pointer to previous chunk in list |
1222 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1223 | Unused space (may be 0 bytes long) .
1224 . .
1225 . |
1226 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1227 `foot:' | Size of chunk, in bytes |
1228 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1229 | Size of next chunk, in bytes |A|0|0|
1230 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1231
1232 The P (PREV_INUSE) bit, stored in the unused low-order bit of the
1233 chunk size (which is always a multiple of two words), is an in-use
1234 bit for the *previous* chunk. If that bit is *clear*, then the
1235 word before the current chunk size contains the previous chunk
1236 size, and can be used to find the front of the previous chunk.
1237 The very first chunk allocated always has this bit set,
1238 preventing access to non-existent (or non-owned) memory. If
1239 prev_inuse is set for any given chunk, then you CANNOT determine
1240 the size of the previous chunk, and might even get a memory
1241 addressing fault when trying to do so.
1242
1243 The A (NON_MAIN_ARENA) bit is cleared for chunks on the initial,
1244 main arena, described by the main_arena variable. When additional
1245 threads are spawned, each thread receives its own arena (up to a
1246 configurable limit, after which arenas are reused for multiple
1247 threads), and the chunks in these arenas have the A bit set. To
1248 find the arena for a chunk on such a non-main arena, heap_for_ptr
1249 performs a bit mask operation and indirection through the ar_ptr
1250 member of the per-heap header heap_info (see arena.c).
1251
1252 Note that the `foot' of the current chunk is actually represented
1253 as the prev_size of the NEXT chunk. This makes it easier to
1254 deal with alignments etc but can be very confusing when trying
1255 to extend or adapt this code.
1256
1257 The three exceptions to all this are:
1258
1259 1. The special chunk `top' doesn't bother using the
1260 trailing size field since there is no next contiguous chunk
1261 that would have to index off it. After initialization, `top'
1262 is forced to always exist. If it would become less than
1263 MINSIZE bytes long, it is replenished.
1264
1265 2. Chunks allocated via mmap, which have the second-lowest-order
1266 bit M (IS_MMAPPED) set in their size fields. Because they are
1267 allocated one-by-one, each must contain its own trailing size
1268 field. If the M bit is set, the other bits are ignored
1269 (because mmapped chunks are neither in an arena, nor adjacent
1270 to a freed chunk). The M bit is also used for chunks which
1271 originally came from a dumped heap via malloc_set_state in
1272 hooks.c.
1273
1274 3. Chunks in fastbins are treated as allocated chunks from the
1275 point of view of the chunk allocator. They are consolidated
1276 with their neighbors only in bulk, in malloc_consolidate.
1277 */
1278
1279 /*
1280 ---------- Size and alignment checks and conversions ----------
1281 */
1282
1283 /* Conversion from malloc headers to user pointers, and back. When
1284 using memory tagging the user data and the malloc data structure
1285 headers have distinct tags. Converting fully from one to the other
1286 involves extracting the tag at the other address and creating a
1287 suitable pointer using it. That can be quite expensive. There are
1288 many occasions, though when the pointer will not be dereferenced
1289 (for example, because we only want to assert that the pointer is
1290 correctly aligned). In these cases it is more efficient not
1291 to extract the tag, since the answer will be the same either way.
1292 chunk2rawmem() can be used in these cases.
1293 */
1294
1295 /* The chunk header is two SIZE_SZ elements, but this is used widely, so
1296 we define it here for clarity later. */
1297 #define CHUNK_HDR_SZ (2 * SIZE_SZ)
1298
1299 /* Convert a user mem pointer to a chunk address without correcting
1300 the tag. */
1301 #define chunk2rawmem(p) ((void*)((char*)(p) + CHUNK_HDR_SZ))
1302
1303 /* Convert between user mem pointers and chunk pointers, updating any
1304 memory tags on the pointer to respect the tag value at that
1305 location. */
1306 #define chunk2mem(p) ((void*)TAG_AT (((char*)(p) + CHUNK_HDR_SZ)))
1307 #define mem2chunk(mem) ((mchunkptr)TAG_AT (((char*)(mem) - CHUNK_HDR_SZ)))
1308
1309 /* The smallest possible chunk */
1310 #define MIN_CHUNK_SIZE (offsetof(struct malloc_chunk, fd_nextsize))
1311
1312 /* The smallest size we can malloc is an aligned minimal chunk */
1313
1314 #define MINSIZE \
1315 (unsigned long)(((MIN_CHUNK_SIZE+MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK))
1316
1317 /* Check if m has acceptable alignment */
1318
1319 #define aligned_OK(m) (((unsigned long)(m) & MALLOC_ALIGN_MASK) == 0)
1320
1321 #define misaligned_chunk(p) \
1322 ((uintptr_t)(MALLOC_ALIGNMENT == CHUNK_HDR_SZ ? (p) : chunk2mem (p)) \
1323 & MALLOC_ALIGN_MASK)
1324
1325 /* pad request bytes into a usable size -- internal version */
1326 /* Note: This must be a macro that evaluates to a compile time constant
1327 if passed a literal constant. */
1328 #define request2size(req) \
1329 (((req) + SIZE_SZ + MALLOC_ALIGN_MASK < MINSIZE) ? \
1330 MINSIZE : \
1331 ((req) + SIZE_SZ + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)
1332
1333 /* Available size of chunk. This is the size of the real usable data
1334 in the chunk, plus the chunk header. */
1335 #ifdef USE_MTAG
1336 #define CHUNK_AVAILABLE_SIZE(p) \
1337 ((chunksize (p) + (chunk_is_mmapped (p) ? 0 : SIZE_SZ)) \
1338 & __mtag_granule_mask)
1339 #else
1340 #define CHUNK_AVAILABLE_SIZE(p) \
1341 (chunksize (p) + (chunk_is_mmapped (p) ? 0 : SIZE_SZ))
1342 #endif
1343
1344 /* Check if REQ overflows when padded and aligned and if the resulting value
1345 is less than PTRDIFF_T. Returns TRUE and the requested size or MINSIZE in
1346 case the value is less than MINSIZE on SZ or false if any of the previous
1347 check fail. */
1348 static inline bool
1349 checked_request2size (size_t req, size_t *sz) __nonnull (1)
1350 {
1351 if (__glibc_unlikely (req > PTRDIFF_MAX))
1352 return false;
1353
1354 #ifdef USE_MTAG
1355 /* When using tagged memory, we cannot share the end of the user
1356 block with the header for the next chunk, so ensure that we
1357 allocate blocks that are rounded up to the granule size. Take
1358 care not to overflow from close to MAX_SIZE_T to a small
1359 number. Ideally, this would be part of request2size(), but that
1360 must be a macro that produces a compile time constant if passed
1361 a constant literal. */
1362 req = (req + ~__mtag_granule_mask) & __mtag_granule_mask;
1363 #endif
1364
1365 *sz = request2size (req);
1366 return true;
1367 }
1368
1369 /*
1370 --------------- Physical chunk operations ---------------
1371 */
1372
1373
1374 /* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
1375 #define PREV_INUSE 0x1
1376
1377 /* extract inuse bit of previous chunk */
1378 #define prev_inuse(p) ((p)->mchunk_size & PREV_INUSE)
1379
1380
1381 /* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
1382 #define IS_MMAPPED 0x2
1383
1384 /* check for mmap()'ed chunk */
1385 #define chunk_is_mmapped(p) ((p)->mchunk_size & IS_MMAPPED)
1386
1387
1388 /* size field is or'ed with NON_MAIN_ARENA if the chunk was obtained
1389 from a non-main arena. This is only set immediately before handing
1390 the chunk to the user, if necessary. */
1391 #define NON_MAIN_ARENA 0x4
1392
1393 /* Check for chunk from main arena. */
1394 #define chunk_main_arena(p) (((p)->mchunk_size & NON_MAIN_ARENA) == 0)
1395
1396 /* Mark a chunk as not being on the main arena. */
1397 #define set_non_main_arena(p) ((p)->mchunk_size |= NON_MAIN_ARENA)
1398
1399
1400 /*
1401 Bits to mask off when extracting size
1402
1403 Note: IS_MMAPPED is intentionally not masked off from size field in
1404 macros for which mmapped chunks should never be seen. This should
1405 cause helpful core dumps to occur if it is tried by accident by
1406 people extending or adapting this malloc.
1407 */
1408 #define SIZE_BITS (PREV_INUSE | IS_MMAPPED | NON_MAIN_ARENA)
1409
1410 /* Get size, ignoring use bits */
1411 #define chunksize(p) (chunksize_nomask (p) & ~(SIZE_BITS))
1412
1413 /* Like chunksize, but do not mask SIZE_BITS. */
1414 #define chunksize_nomask(p) ((p)->mchunk_size)
1415
1416 /* Ptr to next physical malloc_chunk. */
1417 #define next_chunk(p) ((mchunkptr) (((char *) (p)) + chunksize (p)))
1418
1419 /* Size of the chunk below P. Only valid if !prev_inuse (P). */
1420 #define prev_size(p) ((p)->mchunk_prev_size)
1421
1422 /* Set the size of the chunk below P. Only valid if !prev_inuse (P). */
1423 #define set_prev_size(p, sz) ((p)->mchunk_prev_size = (sz))
1424
1425 /* Ptr to previous physical malloc_chunk. Only valid if !prev_inuse (P). */
1426 #define prev_chunk(p) ((mchunkptr) (((char *) (p)) - prev_size (p)))
1427
1428 /* Treat space at ptr + offset as a chunk */
1429 #define chunk_at_offset(p, s) ((mchunkptr) (((char *) (p)) + (s)))
1430
1431 /* extract p's inuse bit */
1432 #define inuse(p) \
1433 ((((mchunkptr) (((char *) (p)) + chunksize (p)))->mchunk_size) & PREV_INUSE)
1434
1435 /* set/clear chunk as being inuse without otherwise disturbing */
1436 #define set_inuse(p) \
1437 ((mchunkptr) (((char *) (p)) + chunksize (p)))->mchunk_size |= PREV_INUSE
1438
1439 #define clear_inuse(p) \
1440 ((mchunkptr) (((char *) (p)) + chunksize (p)))->mchunk_size &= ~(PREV_INUSE)
1441
1442
1443 /* check/set/clear inuse bits in known places */
1444 #define inuse_bit_at_offset(p, s) \
1445 (((mchunkptr) (((char *) (p)) + (s)))->mchunk_size & PREV_INUSE)
1446
1447 #define set_inuse_bit_at_offset(p, s) \
1448 (((mchunkptr) (((char *) (p)) + (s)))->mchunk_size |= PREV_INUSE)
1449
1450 #define clear_inuse_bit_at_offset(p, s) \
1451 (((mchunkptr) (((char *) (p)) + (s)))->mchunk_size &= ~(PREV_INUSE))
1452
1453
1454 /* Set size at head, without disturbing its use bit */
1455 #define set_head_size(p, s) ((p)->mchunk_size = (((p)->mchunk_size & SIZE_BITS) | (s)))
1456
1457 /* Set size/use field */
1458 #define set_head(p, s) ((p)->mchunk_size = (s))
1459
1460 /* Set size at footer (only when chunk is not in use) */
1461 #define set_foot(p, s) (((mchunkptr) ((char *) (p) + (s)))->mchunk_prev_size = (s))
1462
1463 #pragma GCC poison mchunk_size
1464 #pragma GCC poison mchunk_prev_size
1465
1466 /*
1467 -------------------- Internal data structures --------------------
1468
1469 All internal state is held in an instance of malloc_state defined
1470 below. There are no other static variables, except in two optional
1471 cases:
1472 * If USE_MALLOC_LOCK is defined, the mALLOC_MUTEx declared above.
1473 * If mmap doesn't support MAP_ANONYMOUS, a dummy file descriptor
1474 for mmap.
1475
1476 Beware of lots of tricks that minimize the total bookkeeping space
1477 requirements. The result is a little over 1K bytes (for 4byte
1478 pointers and size_t.)
1479 */
1480
1481 /*
1482 Bins
1483
1484 An array of bin headers for free chunks. Each bin is doubly
1485 linked. The bins are approximately proportionally (log) spaced.
1486 There are a lot of these bins (128). This may look excessive, but
1487 works very well in practice. Most bins hold sizes that are
1488 unusual as malloc request sizes, but are more usual for fragments
1489 and consolidated sets of chunks, which is what these bins hold, so
1490 they can be found quickly. All procedures maintain the invariant
1491 that no consolidated chunk physically borders another one, so each
1492 chunk in a list is known to be preceeded and followed by either
1493 inuse chunks or the ends of memory.
1494
1495 Chunks in bins are kept in size order, with ties going to the
1496 approximately least recently used chunk. Ordering isn't needed
1497 for the small bins, which all contain the same-sized chunks, but
1498 facilitates best-fit allocation for larger chunks. These lists
1499 are just sequential. Keeping them in order almost never requires
1500 enough traversal to warrant using fancier ordered data
1501 structures.
1502
1503 Chunks of the same size are linked with the most
1504 recently freed at the front, and allocations are taken from the
1505 back. This results in LRU (FIFO) allocation order, which tends
1506 to give each chunk an equal opportunity to be consolidated with
1507 adjacent freed chunks, resulting in larger free chunks and less
1508 fragmentation.
1509
1510 To simplify use in double-linked lists, each bin header acts
1511 as a malloc_chunk. This avoids special-casing for headers.
1512 But to conserve space and improve locality, we allocate
1513 only the fd/bk pointers of bins, and then use repositioning tricks
1514 to treat these as the fields of a malloc_chunk*.
1515 */
1516
1517 typedef struct malloc_chunk *mbinptr;
1518
1519 /* addressing -- note that bin_at(0) does not exist */
1520 #define bin_at(m, i) \
1521 (mbinptr) (((char *) &((m)->bins[((i) - 1) * 2])) \
1522 - offsetof (struct malloc_chunk, fd))
1523
1524 /* analog of ++bin */
1525 #define next_bin(b) ((mbinptr) ((char *) (b) + (sizeof (mchunkptr) << 1)))
1526
1527 /* Reminders about list directionality within bins */
1528 #define first(b) ((b)->fd)
1529 #define last(b) ((b)->bk)
1530
1531 /*
1532 Indexing
1533
1534 Bins for sizes < 512 bytes contain chunks of all the same size, spaced
1535 8 bytes apart. Larger bins are approximately logarithmically spaced:
1536
1537 64 bins of size 8
1538 32 bins of size 64
1539 16 bins of size 512
1540 8 bins of size 4096
1541 4 bins of size 32768
1542 2 bins of size 262144
1543 1 bin of size what's left
1544
1545 There is actually a little bit of slop in the numbers in bin_index
1546 for the sake of speed. This makes no difference elsewhere.
1547
1548 The bins top out around 1MB because we expect to service large
1549 requests via mmap.
1550
1551 Bin 0 does not exist. Bin 1 is the unordered list; if that would be
1552 a valid chunk size the small bins are bumped up one.
1553 */
1554
1555 #define NBINS 128
1556 #define NSMALLBINS 64
1557 #define SMALLBIN_WIDTH MALLOC_ALIGNMENT
1558 #define SMALLBIN_CORRECTION (MALLOC_ALIGNMENT > CHUNK_HDR_SZ)
1559 #define MIN_LARGE_SIZE ((NSMALLBINS - SMALLBIN_CORRECTION) * SMALLBIN_WIDTH)
1560
1561 #define in_smallbin_range(sz) \
1562 ((unsigned long) (sz) < (unsigned long) MIN_LARGE_SIZE)
1563
1564 #define smallbin_index(sz) \
1565 ((SMALLBIN_WIDTH == 16 ? (((unsigned) (sz)) >> 4) : (((unsigned) (sz)) >> 3))\
1566 + SMALLBIN_CORRECTION)
1567
1568 #define largebin_index_32(sz) \
1569 (((((unsigned long) (sz)) >> 6) <= 38) ? 56 + (((unsigned long) (sz)) >> 6) :\
1570 ((((unsigned long) (sz)) >> 9) <= 20) ? 91 + (((unsigned long) (sz)) >> 9) :\
1571 ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\
1572 ((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\
1573 ((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\
1574 126)
1575
1576 #define largebin_index_32_big(sz) \
1577 (((((unsigned long) (sz)) >> 6) <= 45) ? 49 + (((unsigned long) (sz)) >> 6) :\
1578 ((((unsigned long) (sz)) >> 9) <= 20) ? 91 + (((unsigned long) (sz)) >> 9) :\
1579 ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\
1580 ((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\
1581 ((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\
1582 126)
1583
1584 // XXX It remains to be seen whether it is good to keep the widths of
1585 // XXX the buckets the same or whether it should be scaled by a factor
1586 // XXX of two as well.
1587 #define largebin_index_64(sz) \
1588 (((((unsigned long) (sz)) >> 6) <= 48) ? 48 + (((unsigned long) (sz)) >> 6) :\
1589 ((((unsigned long) (sz)) >> 9) <= 20) ? 91 + (((unsigned long) (sz)) >> 9) :\
1590 ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\
1591 ((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\
1592 ((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\
1593 126)
1594
1595 #define largebin_index(sz) \
1596 (SIZE_SZ == 8 ? largebin_index_64 (sz) \
1597 : MALLOC_ALIGNMENT == 16 ? largebin_index_32_big (sz) \
1598 : largebin_index_32 (sz))
1599
1600 #define bin_index(sz) \
1601 ((in_smallbin_range (sz)) ? smallbin_index (sz) : largebin_index (sz))
1602
1603 /* Take a chunk off a bin list. */
1604 static void
1605 unlink_chunk (mstate av, mchunkptr p)
1606 {
1607 if (chunksize (p) != prev_size (next_chunk (p)))
1608 malloc_printerr ("corrupted size vs. prev_size");
1609
1610 mchunkptr fd = p->fd;
1611 mchunkptr bk = p->bk;
1612
1613 if (__builtin_expect (fd->bk != p || bk->fd != p, 0))
1614 malloc_printerr ("corrupted double-linked list");
1615
1616 fd->bk = bk;
1617 bk->fd = fd;
1618 if (!in_smallbin_range (chunksize_nomask (p)) && p->fd_nextsize != NULL)
1619 {
1620 if (p->fd_nextsize->bk_nextsize != p
1621 || p->bk_nextsize->fd_nextsize != p)
1622 malloc_printerr ("corrupted double-linked list (not small)");
1623
1624 if (fd->fd_nextsize == NULL)
1625 {
1626 if (p->fd_nextsize == p)
1627 fd->fd_nextsize = fd->bk_nextsize = fd;
1628 else
1629 {
1630 fd->fd_nextsize = p->fd_nextsize;
1631 fd->bk_nextsize = p->bk_nextsize;
1632 p->fd_nextsize->bk_nextsize = fd;
1633 p->bk_nextsize->fd_nextsize = fd;
1634 }
1635 }
1636 else
1637 {
1638 p->fd_nextsize->bk_nextsize = p->bk_nextsize;
1639 p->bk_nextsize->fd_nextsize = p->fd_nextsize;
1640 }
1641 }
1642 }
1643
1644 /*
1645 Unsorted chunks
1646
1647 All remainders from chunk splits, as well as all returned chunks,
1648 are first placed in the "unsorted" bin. They are then placed
1649 in regular bins after malloc gives them ONE chance to be used before
1650 binning. So, basically, the unsorted_chunks list acts as a queue,
1651 with chunks being placed on it in free (and malloc_consolidate),
1652 and taken off (to be either used or placed in bins) in malloc.
1653
1654 The NON_MAIN_ARENA flag is never set for unsorted chunks, so it
1655 does not have to be taken into account in size comparisons.
1656 */
1657
1658 /* The otherwise unindexable 1-bin is used to hold unsorted chunks. */
1659 #define unsorted_chunks(M) (bin_at (M, 1))
1660
1661 /*
1662 Top
1663
1664 The top-most available chunk (i.e., the one bordering the end of
1665 available memory) is treated specially. It is never included in
1666 any bin, is used only if no other chunk is available, and is
1667 released back to the system if it is very large (see
1668 M_TRIM_THRESHOLD). Because top initially
1669 points to its own bin with initial zero size, thus forcing
1670 extension on the first malloc request, we avoid having any special
1671 code in malloc to check whether it even exists yet. But we still
1672 need to do so when getting memory from system, so we make
1673 initial_top treat the bin as a legal but unusable chunk during the
1674 interval between initialization and the first call to
1675 sysmalloc. (This is somewhat delicate, since it relies on
1676 the 2 preceding words to be zero during this interval as well.)
1677 */
1678
1679 /* Conveniently, the unsorted bin can be used as dummy top on first call */
1680 #define initial_top(M) (unsorted_chunks (M))
1681
1682 /*
1683 Binmap
1684
1685 To help compensate for the large number of bins, a one-level index
1686 structure is used for bin-by-bin searching. `binmap' is a
1687 bitvector recording whether bins are definitely empty so they can
1688 be skipped over during during traversals. The bits are NOT always
1689 cleared as soon as bins are empty, but instead only
1690 when they are noticed to be empty during traversal in malloc.
1691 */
1692
1693 /* Conservatively use 32 bits per map word, even if on 64bit system */
1694 #define BINMAPSHIFT 5
1695 #define BITSPERMAP (1U << BINMAPSHIFT)
1696 #define BINMAPSIZE (NBINS / BITSPERMAP)
1697
1698 #define idx2block(i) ((i) >> BINMAPSHIFT)
1699 #define idx2bit(i) ((1U << ((i) & ((1U << BINMAPSHIFT) - 1))))
1700
1701 #define mark_bin(m, i) ((m)->binmap[idx2block (i)] |= idx2bit (i))
1702 #define unmark_bin(m, i) ((m)->binmap[idx2block (i)] &= ~(idx2bit (i)))
1703 #define get_binmap(m, i) ((m)->binmap[idx2block (i)] & idx2bit (i))
1704
1705 /*
1706 Fastbins
1707
1708 An array of lists holding recently freed small chunks. Fastbins
1709 are not doubly linked. It is faster to single-link them, and
1710 since chunks are never removed from the middles of these lists,
1711 double linking is not necessary. Also, unlike regular bins, they
1712 are not even processed in FIFO order (they use faster LIFO) since
1713 ordering doesn't much matter in the transient contexts in which
1714 fastbins are normally used.
1715
1716 Chunks in fastbins keep their inuse bit set, so they cannot
1717 be consolidated with other free chunks. malloc_consolidate
1718 releases all chunks in fastbins and consolidates them with
1719 other free chunks.
1720 */
1721
1722 typedef struct malloc_chunk *mfastbinptr;
1723 #define fastbin(ar_ptr, idx) ((ar_ptr)->fastbinsY[idx])
1724
1725 /* offset 2 to use otherwise unindexable first 2 bins */
1726 #define fastbin_index(sz) \
1727 ((((unsigned int) (sz)) >> (SIZE_SZ == 8 ? 4 : 3)) - 2)
1728
1729
1730 /* The maximum fastbin request size we support */
1731 #define MAX_FAST_SIZE (80 * SIZE_SZ / 4)
1732
1733 #define NFASTBINS (fastbin_index (request2size (MAX_FAST_SIZE)) + 1)
1734
1735 /*
1736 FASTBIN_CONSOLIDATION_THRESHOLD is the size of a chunk in free()
1737 that triggers automatic consolidation of possibly-surrounding
1738 fastbin chunks. This is a heuristic, so the exact value should not
1739 matter too much. It is defined at half the default trim threshold as a
1740 compromise heuristic to only attempt consolidation if it is likely
1741 to lead to trimming. However, it is not dynamically tunable, since
1742 consolidation reduces fragmentation surrounding large chunks even
1743 if trimming is not used.
1744 */
1745
1746 #define FASTBIN_CONSOLIDATION_THRESHOLD (65536UL)
1747
1748 /*
1749 NONCONTIGUOUS_BIT indicates that MORECORE does not return contiguous
1750 regions. Otherwise, contiguity is exploited in merging together,
1751 when possible, results from consecutive MORECORE calls.
1752
1753 The initial value comes from MORECORE_CONTIGUOUS, but is
1754 changed dynamically if mmap is ever used as an sbrk substitute.
1755 */
1756
1757 #define NONCONTIGUOUS_BIT (2U)
1758
1759 #define contiguous(M) (((M)->flags & NONCONTIGUOUS_BIT) == 0)
1760 #define noncontiguous(M) (((M)->flags & NONCONTIGUOUS_BIT) != 0)
1761 #define set_noncontiguous(M) ((M)->flags |= NONCONTIGUOUS_BIT)
1762 #define set_contiguous(M) ((M)->flags &= ~NONCONTIGUOUS_BIT)
1763
1764 /* Maximum size of memory handled in fastbins. */
1765 static INTERNAL_SIZE_T global_max_fast;
1766
1767 /*
1768 Set value of max_fast.
1769 Use impossibly small value if 0.
1770 Precondition: there are no existing fastbin chunks in the main arena.
1771 Since do_check_malloc_state () checks this, we call malloc_consolidate ()
1772 before changing max_fast. Note other arenas will leak their fast bin
1773 entries if max_fast is reduced.
1774 */
1775
1776 #define set_max_fast(s) \
1777 global_max_fast = (((size_t) (s) <= MALLOC_ALIGN_MASK - SIZE_SZ) \
1778 ? MIN_CHUNK_SIZE / 2 : ((s + SIZE_SZ) & ~MALLOC_ALIGN_MASK))
1779
1780 static inline INTERNAL_SIZE_T
1781 get_max_fast (void)
1782 {
1783 /* Tell the GCC optimizers that global_max_fast is never larger
1784 than MAX_FAST_SIZE. This avoids out-of-bounds array accesses in
1785 _int_malloc after constant propagation of the size parameter.
1786 (The code never executes because malloc preserves the
1787 global_max_fast invariant, but the optimizers may not recognize
1788 this.) */
1789 if (global_max_fast > MAX_FAST_SIZE)
1790 __builtin_unreachable ();
1791 return global_max_fast;
1792 }
1793
1794 /*
1795 ----------- Internal state representation and initialization -----------
1796 */
1797
1798 /*
1799 have_fastchunks indicates that there are probably some fastbin chunks.
1800 It is set true on entering a chunk into any fastbin, and cleared early in
1801 malloc_consolidate. The value is approximate since it may be set when there
1802 are no fastbin chunks, or it may be clear even if there are fastbin chunks
1803 available. Given it's sole purpose is to reduce number of redundant calls to
1804 malloc_consolidate, it does not affect correctness. As a result we can safely
1805 use relaxed atomic accesses.
1806 */
1807
1808
1809 struct malloc_state
1810 {
1811 /* Serialize access. */
1812 __libc_lock_define (, mutex);
1813
1814 /* Flags (formerly in max_fast). */
1815 int flags;
1816
1817 /* Set if the fastbin chunks contain recently inserted free blocks. */
1818 /* Note this is a bool but not all targets support atomics on booleans. */
1819 int have_fastchunks;
1820
1821 /* Fastbins */
1822 mfastbinptr fastbinsY[NFASTBINS];
1823
1824 /* Base of the topmost chunk -- not otherwise kept in a bin */
1825 mchunkptr top;
1826
1827 /* The remainder from the most recent split of a small request */
1828 mchunkptr last_remainder;
1829
1830 /* Normal bins packed as described above */
1831 mchunkptr bins[NBINS * 2 - 2];
1832
1833 /* Bitmap of bins */
1834 unsigned int binmap[BINMAPSIZE];
1835
1836 /* Linked list */
1837 struct malloc_state *next;
1838
1839 /* Linked list for free arenas. Access to this field is serialized
1840 by free_list_lock in arena.c. */
1841 struct malloc_state *next_free;
1842
1843 /* Number of threads attached to this arena. 0 if the arena is on
1844 the free list. Access to this field is serialized by
1845 free_list_lock in arena.c. */
1846 INTERNAL_SIZE_T attached_threads;
1847
1848 /* Memory allocated from the system in this arena. */
1849 INTERNAL_SIZE_T system_mem;
1850 INTERNAL_SIZE_T max_system_mem;
1851 };
1852
1853 struct malloc_par
1854 {
1855 /* Tunable parameters */
1856 unsigned long trim_threshold;
1857 INTERNAL_SIZE_T top_pad;
1858 INTERNAL_SIZE_T mmap_threshold;
1859 INTERNAL_SIZE_T arena_test;
1860 INTERNAL_SIZE_T arena_max;
1861
1862 /* Memory map support */
1863 int n_mmaps;
1864 int n_mmaps_max;
1865 int max_n_mmaps;
1866 /* the mmap_threshold is dynamic, until the user sets
1867 it manually, at which point we need to disable any
1868 dynamic behavior. */
1869 int no_dyn_threshold;
1870
1871 /* Statistics */
1872 INTERNAL_SIZE_T mmapped_mem;
1873 INTERNAL_SIZE_T max_mmapped_mem;
1874
1875 /* First address handed out by MORECORE/sbrk. */
1876 char *sbrk_base;
1877
1878 #if USE_TCACHE
1879 /* Maximum number of buckets to use. */
1880 size_t tcache_bins;
1881 size_t tcache_max_bytes;
1882 /* Maximum number of chunks in each bucket. */
1883 size_t tcache_count;
1884 /* Maximum number of chunks to remove from the unsorted list, which
1885 aren't used to prefill the cache. */
1886 size_t tcache_unsorted_limit;
1887 #endif
1888 };
1889
1890 /* There are several instances of this struct ("arenas") in this
1891 malloc. If you are adapting this malloc in a way that does NOT use
1892 a static or mmapped malloc_state, you MUST explicitly zero-fill it
1893 before using. This malloc relies on the property that malloc_state
1894 is initialized to all zeroes (as is true of C statics). */
1895
1896 static struct malloc_state main_arena =
1897 {
1898 .mutex = _LIBC_LOCK_INITIALIZER,
1899 .next = &main_arena,
1900 .attached_threads = 1
1901 };
1902
1903 /* These variables are used for undumping support. Chunked are marked
1904 as using mmap, but we leave them alone if they fall into this
1905 range. NB: The chunk size for these chunks only includes the
1906 initial size field (of SIZE_SZ bytes), there is no trailing size
1907 field (unlike with regular mmapped chunks). */
1908 static mchunkptr dumped_main_arena_start; /* Inclusive. */
1909 static mchunkptr dumped_main_arena_end; /* Exclusive. */
1910
1911 /* True if the pointer falls into the dumped arena. Use this after
1912 chunk_is_mmapped indicates a chunk is mmapped. */
1913 #define DUMPED_MAIN_ARENA_CHUNK(p) \
1914 ((p) >= dumped_main_arena_start && (p) < dumped_main_arena_end)
1915
1916 /* There is only one instance of the malloc parameters. */
1917
1918 static struct malloc_par mp_ =
1919 {
1920 .top_pad = DEFAULT_TOP_PAD,
1921 .n_mmaps_max = DEFAULT_MMAP_MAX,
1922 .mmap_threshold = DEFAULT_MMAP_THRESHOLD,
1923 .trim_threshold = DEFAULT_TRIM_THRESHOLD,
1924 #define NARENAS_FROM_NCORES(n) ((n) * (sizeof (long) == 4 ? 2 : 8))
1925 .arena_test = NARENAS_FROM_NCORES (1)
1926 #if USE_TCACHE
1927 ,
1928 .tcache_count = TCACHE_FILL_COUNT,
1929 .tcache_bins = TCACHE_MAX_BINS,
1930 .tcache_max_bytes = tidx2usize (TCACHE_MAX_BINS-1),
1931 .tcache_unsorted_limit = 0 /* No limit. */
1932 #endif
1933 };
1934
1935 /*
1936 Initialize a malloc_state struct.
1937
1938 This is called from ptmalloc_init () or from _int_new_arena ()
1939 when creating a new arena.
1940 */
1941
1942 static void
1943 malloc_init_state (mstate av)
1944 {
1945 int i;
1946 mbinptr bin;
1947
1948 /* Establish circular links for normal bins */
1949 for (i = 1; i < NBINS; ++i)
1950 {
1951 bin = bin_at (av, i);
1952 bin->fd = bin->bk = bin;
1953 }
1954
1955 #if MORECORE_CONTIGUOUS
1956 if (av != &main_arena)
1957 #endif
1958 set_noncontiguous (av);
1959 if (av == &main_arena)
1960 set_max_fast (DEFAULT_MXFAST);
1961 atomic_store_relaxed (&av->have_fastchunks, false);
1962
1963 av->top = initial_top (av);
1964 }
1965
1966 /*
1967 Other internal utilities operating on mstates
1968 */
1969
1970 static void *sysmalloc (INTERNAL_SIZE_T, mstate);
1971 static int systrim (size_t, mstate);
1972 static void malloc_consolidate (mstate);
1973
1974
1975 /* -------------- Early definitions for debugging hooks ---------------- */
1976
1977 /* Define and initialize the hook variables. These weak definitions must
1978 appear before any use of the variables in a function (arena.c uses one). */
1979 #ifndef weak_variable
1980 /* In GNU libc we want the hook variables to be weak definitions to
1981 avoid a problem with Emacs. */
1982 # define weak_variable weak_function
1983 #endif
1984
1985 /* Forward declarations. */
1986 static void *malloc_hook_ini (size_t sz,
1987 const void *caller) __THROW;
1988 static void *realloc_hook_ini (void *ptr, size_t sz,
1989 const void *caller) __THROW;
1990 static void *memalign_hook_ini (size_t alignment, size_t sz,
1991 const void *caller) __THROW;
1992
1993 #if HAVE_MALLOC_INIT_HOOK
1994 void weak_variable (*__malloc_initialize_hook) (void) = NULL;
1995 compat_symbol (libc, __malloc_initialize_hook,
1996 __malloc_initialize_hook, GLIBC_2_0);
1997 #endif
1998
1999 void weak_variable (*__free_hook) (void *__ptr,
2000 const void *) = NULL;
2001 void *weak_variable (*__malloc_hook)
2002 (size_t __size, const void *) = malloc_hook_ini;
2003 void *weak_variable (*__realloc_hook)
2004 (void *__ptr, size_t __size, const void *)
2005 = realloc_hook_ini;
2006 void *weak_variable (*__memalign_hook)
2007 (size_t __alignment, size_t __size, const void *)
2008 = memalign_hook_ini;
2009 void weak_variable (*__after_morecore_hook) (void) = NULL;
2010
2011 /* This function is called from the arena shutdown hook, to free the
2012 thread cache (if it exists). */
2013 static void tcache_thread_shutdown (void);
2014
2015 /* ------------------ Testing support ----------------------------------*/
2016
2017 static int perturb_byte;
2018
2019 static void
2020 alloc_perturb (char *p, size_t n)
2021 {
2022 if (__glibc_unlikely (perturb_byte))
2023 memset (p, perturb_byte ^ 0xff, n);
2024 }
2025
2026 static void
2027 free_perturb (char *p, size_t n)
2028 {
2029 if (__glibc_unlikely (perturb_byte))
2030 memset (p, perturb_byte, n);
2031 }
2032
2033
2034
2035 #include <stap-probe.h>
2036
2037 /* ------------------- Support for multiple arenas -------------------- */
2038 #include "arena.c"
2039
2040 /*
2041 Debugging support
2042
2043 These routines make a number of assertions about the states
2044 of data structures that should be true at all times. If any
2045 are not true, it's very likely that a user program has somehow
2046 trashed memory. (It's also possible that there is a coding error
2047 in malloc. In which case, please report it!)
2048 */
2049
2050 #if !MALLOC_DEBUG
2051
2052 # define check_chunk(A, P)
2053 # define check_free_chunk(A, P)
2054 # define check_inuse_chunk(A, P)
2055 # define check_remalloced_chunk(A, P, N)
2056 # define check_malloced_chunk(A, P, N)
2057 # define check_malloc_state(A)
2058
2059 #else
2060
2061 # define check_chunk(A, P) do_check_chunk (A, P)
2062 # define check_free_chunk(A, P) do_check_free_chunk (A, P)
2063 # define check_inuse_chunk(A, P) do_check_inuse_chunk (A, P)
2064 # define check_remalloced_chunk(A, P, N) do_check_remalloced_chunk (A, P, N)
2065 # define check_malloced_chunk(A, P, N) do_check_malloced_chunk (A, P, N)
2066 # define check_malloc_state(A) do_check_malloc_state (A)
2067
2068 /*
2069 Properties of all chunks
2070 */
2071
2072 static void
2073 do_check_chunk (mstate av, mchunkptr p)
2074 {
2075 unsigned long sz = chunksize (p);
2076 /* min and max possible addresses assuming contiguous allocation */
2077 char *max_address = (char *) (av->top) + chunksize (av->top);
2078 char *min_address = max_address - av->system_mem;
2079
2080 if (!chunk_is_mmapped (p))
2081 {
2082 /* Has legal address ... */
2083 if (p != av->top)
2084 {
2085 if (contiguous (av))
2086 {
2087 assert (((char *) p) >= min_address);
2088 assert (((char *) p + sz) <= ((char *) (av->top)));
2089 }
2090 }
2091 else
2092 {
2093 /* top size is always at least MINSIZE */
2094 assert ((unsigned long) (sz) >= MINSIZE);
2095 /* top predecessor always marked inuse */
2096 assert (prev_inuse (p));
2097 }
2098 }
2099 else if (!DUMPED_MAIN_ARENA_CHUNK (p))
2100 {
2101 /* address is outside main heap */
2102 if (contiguous (av) && av->top != initial_top (av))
2103 {
2104 assert (((char *) p) < min_address || ((char *) p) >= max_address);
2105 }
2106 /* chunk is page-aligned */
2107 assert (((prev_size (p) + sz) & (GLRO (dl_pagesize) - 1)) == 0);
2108 /* mem is aligned */
2109 assert (aligned_OK (chunk2rawmem (p)));
2110 }
2111 }
2112
2113 /*
2114 Properties of free chunks
2115 */
2116
2117 static void
2118 do_check_free_chunk (mstate av, mchunkptr p)
2119 {
2120 INTERNAL_SIZE_T sz = chunksize_nomask (p) & ~(PREV_INUSE | NON_MAIN_ARENA);
2121 mchunkptr next = chunk_at_offset (p, sz);
2122
2123 do_check_chunk (av, p);
2124
2125 /* Chunk must claim to be free ... */
2126 assert (!inuse (p));
2127 assert (!chunk_is_mmapped (p));
2128
2129 /* Unless a special marker, must have OK fields */
2130 if ((unsigned long) (sz) >= MINSIZE)
2131 {
2132 assert ((sz & MALLOC_ALIGN_MASK) == 0);
2133 assert (aligned_OK (chunk2rawmem (p)));
2134 /* ... matching footer field */
2135 assert (prev_size (next_chunk (p)) == sz);
2136 /* ... and is fully consolidated */
2137 assert (prev_inuse (p));
2138 assert (next == av->top || inuse (next));
2139
2140 /* ... and has minimally sane links */
2141 assert (p->fd->bk == p);
2142 assert (p->bk->fd == p);
2143 }
2144 else /* markers are always of size SIZE_SZ */
2145 assert (sz == SIZE_SZ);
2146 }
2147
2148 /*
2149 Properties of inuse chunks
2150 */
2151
2152 static void
2153 do_check_inuse_chunk (mstate av, mchunkptr p)
2154 {
2155 mchunkptr next;
2156
2157 do_check_chunk (av, p);
2158
2159 if (chunk_is_mmapped (p))
2160 return; /* mmapped chunks have no next/prev */
2161
2162 /* Check whether it claims to be in use ... */
2163 assert (inuse (p));
2164
2165 next = next_chunk (p);
2166
2167 /* ... and is surrounded by OK chunks.
2168 Since more things can be checked with free chunks than inuse ones,
2169 if an inuse chunk borders them and debug is on, it's worth doing them.
2170 */
2171 if (!prev_inuse (p))
2172 {
2173 /* Note that we cannot even look at prev unless it is not inuse */
2174 mchunkptr prv = prev_chunk (p);
2175 assert (next_chunk (prv) == p);
2176 do_check_free_chunk (av, prv);
2177 }
2178
2179 if (next == av->top)
2180 {
2181 assert (prev_inuse (next));
2182 assert (chunksize (next) >= MINSIZE);
2183 }
2184 else if (!inuse (next))
2185 do_check_free_chunk (av, next);
2186 }
2187
2188 /*
2189 Properties of chunks recycled from fastbins
2190 */
2191
2192 static void
2193 do_check_remalloced_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T s)
2194 {
2195 INTERNAL_SIZE_T sz = chunksize_nomask (p) & ~(PREV_INUSE | NON_MAIN_ARENA);
2196
2197 if (!chunk_is_mmapped (p))
2198 {
2199 assert (av == arena_for_chunk (p));
2200 if (chunk_main_arena (p))
2201 assert (av == &main_arena);
2202 else
2203 assert (av != &main_arena);
2204 }
2205
2206 do_check_inuse_chunk (av, p);
2207
2208 /* Legal size ... */
2209 assert ((sz & MALLOC_ALIGN_MASK) == 0);
2210 assert ((unsigned long) (sz) >= MINSIZE);
2211 /* ... and alignment */
2212 assert (aligned_OK (chunk2rawmem (p)));
2213 /* chunk is less than MINSIZE more than request */
2214 assert ((long) (sz) - (long) (s) >= 0);
2215 assert ((long) (sz) - (long) (s + MINSIZE) < 0);
2216 }
2217
2218 /*
2219 Properties of nonrecycled chunks at the point they are malloced
2220 */
2221
2222 static void
2223 do_check_malloced_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T s)
2224 {
2225 /* same as recycled case ... */
2226 do_check_remalloced_chunk (av, p, s);
2227
2228 /*
2229 ... plus, must obey implementation invariant that prev_inuse is
2230 always true of any allocated chunk; i.e., that each allocated
2231 chunk borders either a previously allocated and still in-use
2232 chunk, or the base of its memory arena. This is ensured
2233 by making all allocations from the `lowest' part of any found
2234 chunk. This does not necessarily hold however for chunks
2235 recycled via fastbins.
2236 */
2237
2238 assert (prev_inuse (p));
2239 }
2240
2241
2242 /*
2243 Properties of malloc_state.
2244
2245 This may be useful for debugging malloc, as well as detecting user
2246 programmer errors that somehow write into malloc_state.
2247
2248 If you are extending or experimenting with this malloc, you can
2249 probably figure out how to hack this routine to print out or
2250 display chunk addresses, sizes, bins, and other instrumentation.
2251 */
2252
2253 static void
2254 do_check_malloc_state (mstate av)
2255 {
2256 int i;
2257 mchunkptr p;
2258 mchunkptr q;
2259 mbinptr b;
2260 unsigned int idx;
2261 INTERNAL_SIZE_T size;
2262 unsigned long total = 0;
2263 int max_fast_bin;
2264
2265 /* internal size_t must be no wider than pointer type */
2266 assert (sizeof (INTERNAL_SIZE_T) <= sizeof (char *));
2267
2268 /* alignment is a power of 2 */
2269 assert ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT - 1)) == 0);
2270
2271 /* Check the arena is initialized. */
2272 assert (av->top != 0);
2273
2274 /* No memory has been allocated yet, so doing more tests is not possible. */
2275 if (av->top == initial_top (av))
2276 return;
2277
2278 /* pagesize is a power of 2 */
2279 assert (powerof2(GLRO (dl_pagesize)));
2280
2281 /* A contiguous main_arena is consistent with sbrk_base. */
2282 if (av == &main_arena && contiguous (av))
2283 assert ((char *) mp_.sbrk_base + av->system_mem ==
2284 (char *) av->top + chunksize (av->top));
2285
2286 /* properties of fastbins */
2287
2288 /* max_fast is in allowed range */
2289 assert ((get_max_fast () & ~1) <= request2size (MAX_FAST_SIZE));
2290
2291 max_fast_bin = fastbin_index (get_max_fast ());
2292
2293 for (i = 0; i < NFASTBINS; ++i)
2294 {
2295 p = fastbin (av, i);
2296
2297 /* The following test can only be performed for the main arena.
2298 While mallopt calls malloc_consolidate to get rid of all fast
2299 bins (especially those larger than the new maximum) this does
2300 only happen for the main arena. Trying to do this for any
2301 other arena would mean those arenas have to be locked and
2302 malloc_consolidate be called for them. This is excessive. And
2303 even if this is acceptable to somebody it still cannot solve
2304 the problem completely since if the arena is locked a
2305 concurrent malloc call might create a new arena which then
2306 could use the newly invalid fast bins. */
2307
2308 /* all bins past max_fast are empty */
2309 if (av == &main_arena && i > max_fast_bin)
2310 assert (p == 0);
2311
2312 while (p != 0)
2313 {
2314 if (__glibc_unlikely (misaligned_chunk (p)))
2315 malloc_printerr ("do_check_malloc_state(): "
2316 "unaligned fastbin chunk detected");
2317 /* each chunk claims to be inuse */
2318 do_check_inuse_chunk (av, p);
2319 total += chunksize (p);
2320 /* chunk belongs in this bin */
2321 assert (fastbin_index (chunksize (p)) == i);
2322 p = REVEAL_PTR (p->fd);
2323 }
2324 }
2325
2326 /* check normal bins */
2327 for (i = 1; i < NBINS; ++i)
2328 {
2329 b = bin_at (av, i);
2330
2331 /* binmap is accurate (except for bin 1 == unsorted_chunks) */
2332 if (i >= 2)
2333 {
2334 unsigned int binbit = get_binmap (av, i);
2335 int empty = last (b) == b;
2336 if (!binbit)
2337 assert (empty);
2338 else if (!empty)
2339 assert (binbit);
2340 }
2341
2342 for (p = last (b); p != b; p = p->bk)
2343 {
2344 /* each chunk claims to be free */
2345 do_check_free_chunk (av, p);
2346 size = chunksize (p);
2347 total += size;
2348 if (i >= 2)
2349 {
2350 /* chunk belongs in bin */
2351 idx = bin_index (size);
2352 assert (idx == i);
2353 /* lists are sorted */
2354 assert (p->bk == b ||
2355 (unsigned long) chunksize (p->bk) >= (unsigned long) chunksize (p));
2356
2357 if (!in_smallbin_range (size))
2358 {
2359 if (p->fd_nextsize != NULL)
2360 {
2361 if (p->fd_nextsize == p)
2362 assert (p->bk_nextsize == p);
2363 else
2364 {
2365 if (p->fd_nextsize == first (b))
2366 assert (chunksize (p) < chunksize (p->fd_nextsize));
2367 else
2368 assert (chunksize (p) > chunksize (p->fd_nextsize));
2369
2370 if (p == first (b))
2371 assert (chunksize (p) > chunksize (p->bk_nextsize));
2372 else
2373 assert (chunksize (p) < chunksize (p->bk_nextsize));
2374 }
2375 }
2376 else
2377 assert (p->bk_nextsize == NULL);
2378 }
2379 }
2380 else if (!in_smallbin_range (size))
2381 assert (p->fd_nextsize == NULL && p->bk_nextsize == NULL);
2382 /* chunk is followed by a legal chain of inuse chunks */
2383 for (q = next_chunk (p);
2384 (q != av->top && inuse (q) &&
2385 (unsigned long) (chunksize (q)) >= MINSIZE);
2386 q = next_chunk (q))
2387 do_check_inuse_chunk (av, q);
2388 }
2389 }
2390
2391 /* top chunk is OK */
2392 check_chunk (av, av->top);
2393 }
2394 #endif
2395
2396
2397 /* ----------------- Support for debugging hooks -------------------- */
2398 #include "hooks.c"
2399
2400
2401 /* ----------- Routines dealing with system allocation -------------- */
2402
2403 /*
2404 sysmalloc handles malloc cases requiring more memory from the system.
2405 On entry, it is assumed that av->top does not have enough
2406 space to service request for nb bytes, thus requiring that av->top
2407 be extended or replaced.
2408 */
2409
2410 static void *
2411 sysmalloc (INTERNAL_SIZE_T nb, mstate av)
2412 {
2413 mchunkptr old_top; /* incoming value of av->top */
2414 INTERNAL_SIZE_T old_size; /* its size */
2415 char *old_end; /* its end address */
2416
2417 long size; /* arg to first MORECORE or mmap call */
2418 char *brk; /* return value from MORECORE */
2419
2420 long correction; /* arg to 2nd MORECORE call */
2421 char *snd_brk; /* 2nd return val */
2422
2423 INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */
2424 INTERNAL_SIZE_T end_misalign; /* partial page left at end of new space */
2425 char *aligned_brk; /* aligned offset into brk */
2426
2427 mchunkptr p; /* the allocated/returned chunk */
2428 mchunkptr remainder; /* remainder from allocation */
2429 unsigned long remainder_size; /* its size */
2430
2431
2432 size_t pagesize = GLRO (dl_pagesize);
2433 bool tried_mmap = false;
2434
2435
2436 /*
2437 If have mmap, and the request size meets the mmap threshold, and
2438 the system supports mmap, and there are few enough currently
2439 allocated mmapped regions, try to directly map this request
2440 rather than expanding top.
2441 */
2442
2443 if (av == NULL
2444 || ((unsigned long) (nb) >= (unsigned long) (mp_.mmap_threshold)
2445 && (mp_.n_mmaps < mp_.n_mmaps_max)))
2446 {
2447 char *mm; /* return value from mmap call*/
2448
2449 try_mmap:
2450 /*
2451 Round up size to nearest page. For mmapped chunks, the overhead
2452 is one SIZE_SZ unit larger than for normal chunks, because there
2453 is no following chunk whose prev_size field could be used.
2454
2455 See the front_misalign handling below, for glibc there is no
2456 need for further alignments unless we have have high alignment.
2457 */
2458 if (MALLOC_ALIGNMENT == CHUNK_HDR_SZ)
2459 size = ALIGN_UP (nb + SIZE_SZ, pagesize);
2460 else
2461 size = ALIGN_UP (nb + SIZE_SZ + MALLOC_ALIGN_MASK, pagesize);
2462 tried_mmap = true;
2463
2464 /* Don't try if size wraps around 0 */
2465 if ((unsigned long) (size) > (unsigned long) (nb))
2466 {
2467 mm = (char *) (MMAP (0, size,
2468 MTAG_MMAP_FLAGS | PROT_READ | PROT_WRITE, 0));
2469
2470 if (mm != MAP_FAILED)
2471 {
2472 /*
2473 The offset to the start of the mmapped region is stored
2474 in the prev_size field of the chunk. This allows us to adjust
2475 returned start address to meet alignment requirements here
2476 and in memalign(), and still be able to compute proper
2477 address argument for later munmap in free() and realloc().
2478 */
2479
2480 if (MALLOC_ALIGNMENT == CHUNK_HDR_SZ)
2481 {
2482 /* For glibc, chunk2rawmem increases the address by
2483 CHUNK_HDR_SZ and MALLOC_ALIGN_MASK is
2484 CHUNK_HDR_SZ-1. Each mmap'ed area is page
2485 aligned and therefore definitely
2486 MALLOC_ALIGN_MASK-aligned. */
2487 assert (((INTERNAL_SIZE_T) chunk2rawmem (mm) & MALLOC_ALIGN_MASK) == 0);
2488 front_misalign = 0;
2489 }
2490 else
2491 front_misalign = (INTERNAL_SIZE_T) chunk2rawmem (mm) & MALLOC_ALIGN_MASK;
2492 if (front_misalign > 0)
2493 {
2494 correction = MALLOC_ALIGNMENT - front_misalign;
2495 p = (mchunkptr) (mm + correction);
2496 set_prev_size (p, correction);
2497 set_head (p, (size - correction) | IS_MMAPPED);
2498 }
2499 else
2500 {
2501 p = (mchunkptr) mm;
2502 set_prev_size (p, 0);
2503 set_head (p, size | IS_MMAPPED);
2504 }
2505
2506 /* update statistics */
2507
2508 int new = atomic_exchange_and_add (&mp_.n_mmaps, 1) + 1;
2509 atomic_max (&mp_.max_n_mmaps, new);
2510
2511 unsigned long sum;
2512 sum = atomic_exchange_and_add (&mp_.mmapped_mem, size) + size;
2513 atomic_max (&mp_.max_mmapped_mem, sum);
2514
2515 check_chunk (av, p);
2516
2517 return chunk2mem (p);
2518 }
2519 }
2520 }
2521
2522 /* There are no usable arenas and mmap also failed. */
2523 if (av == NULL)
2524 return 0;
2525
2526 /* Record incoming configuration of top */
2527
2528 old_top = av->top;
2529 old_size = chunksize (old_top);
2530 old_end = (char *) (chunk_at_offset (old_top, old_size));
2531
2532 brk = snd_brk = (char *) (MORECORE_FAILURE);
2533
2534 /*
2535 If not the first time through, we require old_size to be
2536 at least MINSIZE and to have prev_inuse set.
2537 */
2538
2539 assert ((old_top == initial_top (av) && old_size == 0) ||
2540 ((unsigned long) (old_size) >= MINSIZE &&
2541 prev_inuse (old_top) &&
2542 ((unsigned long) old_end & (pagesize - 1)) == 0));
2543
2544 /* Precondition: not enough current space to satisfy nb request */
2545 assert ((unsigned long) (old_size) < (unsigned long) (nb + MINSIZE));
2546
2547
2548 if (av != &main_arena)
2549 {
2550 heap_info *old_heap, *heap;
2551 size_t old_heap_size;
2552
2553 /* First try to extend the current heap. */
2554 old_heap = heap_for_ptr (old_top);
2555 old_heap_size = old_heap->size;
2556 if ((long) (MINSIZE + nb - old_size) > 0
2557 && grow_heap (old_heap, MINSIZE + nb - old_size) == 0)
2558 {
2559 av->system_mem += old_heap->size - old_heap_size;
2560 set_head (old_top, (((char *) old_heap + old_heap->size) - (char *) old_top)
2561 | PREV_INUSE);
2562 }
2563 else if ((heap = new_heap (nb + (MINSIZE + sizeof (*heap)), mp_.top_pad)))
2564 {
2565 /* Use a newly allocated heap. */
2566 heap->ar_ptr = av;
2567 heap->prev = old_heap;
2568 av->system_mem += heap->size;
2569 /* Set up the new top. */
2570 top (av) = chunk_at_offset (heap, sizeof (*heap));
2571 set_head (top (av), (heap->size - sizeof (*heap)) | PREV_INUSE);
2572
2573 /* Setup fencepost and free the old top chunk with a multiple of
2574 MALLOC_ALIGNMENT in size. */
2575 /* The fencepost takes at least MINSIZE bytes, because it might
2576 become the top chunk again later. Note that a footer is set
2577 up, too, although the chunk is marked in use. */
2578 old_size = (old_size - MINSIZE) & ~MALLOC_ALIGN_MASK;
2579 set_head (chunk_at_offset (old_top, old_size + CHUNK_HDR_SZ),
2580 0 | PREV_INUSE);
2581 if (old_size >= MINSIZE)
2582 {
2583 set_head (chunk_at_offset (old_top, old_size),
2584 CHUNK_HDR_SZ | PREV_INUSE);
2585 set_foot (chunk_at_offset (old_top, old_size), CHUNK_HDR_SZ);
2586 set_head (old_top, old_size | PREV_INUSE | NON_MAIN_ARENA);
2587 _int_free (av, old_top, 1);
2588 }
2589 else
2590 {
2591 set_head (old_top, (old_size + CHUNK_HDR_SZ) | PREV_INUSE);
2592 set_foot (old_top, (old_size + CHUNK_HDR_SZ));
2593 }
2594 }
2595 else if (!tried_mmap)
2596 /* We can at least try to use to mmap memory. */
2597 goto try_mmap;
2598 }
2599 else /* av == main_arena */
2600
2601
2602 { /* Request enough space for nb + pad + overhead */
2603 size = nb + mp_.top_pad + MINSIZE;
2604
2605 /*
2606 If contiguous, we can subtract out existing space that we hope to
2607 combine with new space. We add it back later only if
2608 we don't actually get contiguous space.
2609 */
2610
2611 if (contiguous (av))
2612 size -= old_size;
2613
2614 /*
2615 Round to a multiple of page size.
2616 If MORECORE is not contiguous, this ensures that we only call it
2617 with whole-page arguments. And if MORECORE is contiguous and
2618 this is not first time through, this preserves page-alignment of
2619 previous calls. Otherwise, we correct to page-align below.
2620 */
2621
2622 size = ALIGN_UP (size, pagesize);
2623
2624 /*
2625 Don't try to call MORECORE if argument is so big as to appear
2626 negative. Note that since mmap takes size_t arg, it may succeed
2627 below even if we cannot call MORECORE.
2628 */
2629
2630 if (size > 0)
2631 {
2632 brk = (char *) (MORECORE (size));
2633 LIBC_PROBE (memory_sbrk_more, 2, brk, size);
2634 }
2635
2636 if (brk != (char *) (MORECORE_FAILURE))
2637 {
2638 /* Call the `morecore' hook if necessary. */
2639 void (*hook) (void) = atomic_forced_read (__after_morecore_hook);
2640 if (__builtin_expect (hook != NULL, 0))
2641 (*hook)();
2642 }
2643 else
2644 {
2645 /*
2646 If have mmap, try using it as a backup when MORECORE fails or
2647 cannot be used. This is worth doing on systems that have "holes" in
2648 address space, so sbrk cannot extend to give contiguous space, but
2649 space is available elsewhere. Note that we ignore mmap max count
2650 and threshold limits, since the space will not be used as a
2651 segregated mmap region.
2652 */
2653
2654 /* Cannot merge with old top, so add its size back in */
2655 if (contiguous (av))
2656 size = ALIGN_UP (size + old_size, pagesize);
2657
2658 /* If we are relying on mmap as backup, then use larger units */
2659 if ((unsigned long) (size) < (unsigned long) (MMAP_AS_MORECORE_SIZE))
2660 size = MMAP_AS_MORECORE_SIZE;
2661
2662 /* Don't try if size wraps around 0 */
2663 if ((unsigned long) (size) > (unsigned long) (nb))
2664 {
2665 char *mbrk = (char *) (MMAP (0, size,
2666 MTAG_MMAP_FLAGS | PROT_READ | PROT_WRITE,
2667 0));
2668
2669 if (mbrk != MAP_FAILED)
2670 {
2671 /* We do not need, and cannot use, another sbrk call to find end */
2672 brk = mbrk;
2673 snd_brk = brk + size;
2674
2675 /*
2676 Record that we no longer have a contiguous sbrk region.
2677 After the first time mmap is used as backup, we do not
2678 ever rely on contiguous space since this could incorrectly
2679 bridge regions.
2680 */
2681 set_noncontiguous (av);
2682 }
2683 }
2684 }
2685
2686 if (brk != (char *) (MORECORE_FAILURE))
2687 {
2688 if (mp_.sbrk_base == 0)
2689 mp_.sbrk_base = brk;
2690 av->system_mem += size;
2691
2692 /*
2693 If MORECORE extends previous space, we can likewise extend top size.
2694 */
2695
2696 if (brk == old_end && snd_brk == (char *) (MORECORE_FAILURE))
2697 set_head (old_top, (size + old_size) | PREV_INUSE);
2698
2699 else if (contiguous (av) && old_size && brk < old_end)
2700 /* Oops! Someone else killed our space.. Can't touch anything. */
2701 malloc_printerr ("break adjusted to free malloc space");
2702
2703 /*
2704 Otherwise, make adjustments:
2705
2706 * If the first time through or noncontiguous, we need to call sbrk
2707 just to find out where the end of memory lies.
2708
2709 * We need to ensure that all returned chunks from malloc will meet
2710 MALLOC_ALIGNMENT
2711
2712 * If there was an intervening foreign sbrk, we need to adjust sbrk
2713 request size to account for fact that we will not be able to
2714 combine new space with existing space in old_top.
2715
2716 * Almost all systems internally allocate whole pages at a time, in
2717 which case we might as well use the whole last page of request.
2718 So we allocate enough more memory to hit a page boundary now,
2719 which in turn causes future contiguous calls to page-align.
2720 */
2721
2722 else
2723 {
2724 front_misalign = 0;
2725 end_misalign = 0;
2726 correction = 0;
2727 aligned_brk = brk;
2728
2729 /* handle contiguous cases */
2730 if (contiguous (av))
2731 {
2732 /* Count foreign sbrk as system_mem. */
2733 if (old_size)
2734 av->system_mem += brk - old_end;
2735
2736 /* Guarantee alignment of first new chunk made from this space */
2737
2738 front_misalign = (INTERNAL_SIZE_T) chunk2rawmem (brk) & MALLOC_ALIGN_MASK;
2739 if (front_misalign > 0)
2740 {
2741 /*
2742 Skip over some bytes to arrive at an aligned position.
2743 We don't need to specially mark these wasted front bytes.
2744 They will never be accessed anyway because
2745 prev_inuse of av->top (and any chunk created from its start)
2746 is always true after initialization.
2747 */
2748
2749 correction = MALLOC_ALIGNMENT - front_misalign;
2750 aligned_brk += correction;
2751 }
2752
2753 /*
2754 If this isn't adjacent to existing space, then we will not
2755 be able to merge with old_top space, so must add to 2nd request.
2756 */
2757
2758 correction += old_size;
2759
2760 /* Extend the end address to hit a page boundary */
2761 end_misalign = (INTERNAL_SIZE_T) (brk + size + correction);
2762 correction += (ALIGN_UP (end_misalign, pagesize)) - end_misalign;
2763
2764 assert (correction >= 0);
2765 snd_brk = (char *) (MORECORE (correction));
2766
2767 /*
2768 If can't allocate correction, try to at least find out current
2769 brk. It might be enough to proceed without failing.
2770
2771 Note that if second sbrk did NOT fail, we assume that space
2772 is contiguous with first sbrk. This is a safe assumption unless
2773 program is multithreaded but doesn't use locks and a foreign sbrk
2774 occurred between our first and second calls.
2775 */
2776
2777 if (snd_brk == (char *) (MORECORE_FAILURE))
2778 {
2779 correction = 0;
2780 snd_brk = (char *) (MORECORE (0));
2781 }
2782 else
2783 {
2784 /* Call the `morecore' hook if necessary. */
2785 void (*hook) (void) = atomic_forced_read (__after_morecore_hook);
2786 if (__builtin_expect (hook != NULL, 0))
2787 (*hook)();
2788 }
2789 }
2790
2791 /* handle non-contiguous cases */
2792 else
2793 {
2794 if (MALLOC_ALIGNMENT == CHUNK_HDR_SZ)
2795 /* MORECORE/mmap must correctly align */
2796 assert (((unsigned long) chunk2rawmem (brk) & MALLOC_ALIGN_MASK) == 0);
2797 else
2798 {
2799 front_misalign = (INTERNAL_SIZE_T) chunk2rawmem (brk) & MALLOC_ALIGN_MASK;
2800 if (front_misalign > 0)
2801 {
2802 /*
2803 Skip over some bytes to arrive at an aligned position.
2804 We don't need to specially mark these wasted front bytes.
2805 They will never be accessed anyway because
2806 prev_inuse of av->top (and any chunk created from its start)
2807 is always true after initialization.
2808 */
2809
2810 aligned_brk += MALLOC_ALIGNMENT - front_misalign;
2811 }
2812 }
2813
2814 /* Find out current end of memory */
2815 if (snd_brk == (char *) (MORECORE_FAILURE))
2816 {
2817 snd_brk = (char *) (MORECORE (0));
2818 }
2819 }
2820
2821 /* Adjust top based on results of second sbrk */
2822 if (snd_brk != (char *) (MORECORE_FAILURE))
2823 {
2824 av->top = (mchunkptr) aligned_brk;
2825 set_head (av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE);
2826 av->system_mem += correction;
2827
2828 /*
2829 If not the first time through, we either have a
2830 gap due to foreign sbrk or a non-contiguous region. Insert a
2831 double fencepost at old_top to prevent consolidation with space
2832 we don't own. These fenceposts are artificial chunks that are
2833 marked as inuse and are in any case too small to use. We need
2834 two to make sizes and alignments work out.
2835 */
2836
2837 if (old_size != 0)
2838 {
2839 /*
2840 Shrink old_top to insert fenceposts, keeping size a
2841 multiple of MALLOC_ALIGNMENT. We know there is at least
2842 enough space in old_top to do this.
2843 */
2844 old_size = (old_size - 2 * CHUNK_HDR_SZ) & ~MALLOC_ALIGN_MASK;
2845 set_head (old_top, old_size | PREV_INUSE);
2846
2847 /*
2848 Note that the following assignments completely overwrite
2849 old_top when old_size was previously MINSIZE. This is
2850 intentional. We need the fencepost, even if old_top otherwise gets
2851 lost.
2852 */
2853 set_head (chunk_at_offset (old_top, old_size),
2854 CHUNK_HDR_SZ | PREV_INUSE);
2855 set_head (chunk_at_offset (old_top,
2856 old_size + CHUNK_HDR_SZ),
2857 CHUNK_HDR_SZ | PREV_INUSE);
2858
2859 /* If possible, release the rest. */
2860 if (old_size >= MINSIZE)
2861 {
2862 _int_free (av, old_top, 1);
2863 }
2864 }
2865 }
2866 }
2867 }
2868 } /* if (av != &main_arena) */
2869
2870 if ((unsigned long) av->system_mem > (unsigned long) (av->max_system_mem))
2871 av->max_system_mem = av->system_mem;
2872 check_malloc_state (av);
2873
2874 /* finally, do the allocation */
2875 p = av->top;
2876 size = chunksize (p);
2877
2878 /* check that one of the above allocation paths succeeded */
2879 if ((unsigned long) (size) >= (unsigned long) (nb + MINSIZE))
2880 {
2881 remainder_size = size - nb;
2882 remainder = chunk_at_offset (p, nb);
2883 av->top = remainder;
2884 set_head (p, nb | PREV_INUSE | (av != &main_arena ? NON_MAIN_ARENA : 0));
2885 set_head (remainder, remainder_size | PREV_INUSE);
2886 check_malloced_chunk (av, p, nb);
2887 return chunk2mem (p);
2888 }
2889
2890 /* catch all failure paths */
2891 __set_errno (ENOMEM);
2892 return 0;
2893 }
2894
2895
2896 /*
2897 systrim is an inverse of sorts to sysmalloc. It gives memory back
2898 to the system (via negative arguments to sbrk) if there is unused
2899 memory at the `high' end of the malloc pool. It is called
2900 automatically by free() when top space exceeds the trim
2901 threshold. It is also called by the public malloc_trim routine. It
2902 returns 1 if it actually released any memory, else 0.
2903 */
2904
2905 static int
2906 systrim (size_t pad, mstate av)
2907 {
2908 long top_size; /* Amount of top-most memory */
2909 long extra; /* Amount to release */
2910 long released; /* Amount actually released */
2911 char *current_brk; /* address returned by pre-check sbrk call */
2912 char *new_brk; /* address returned by post-check sbrk call */
2913 size_t pagesize;
2914 long top_area;
2915
2916 pagesize = GLRO (dl_pagesize);
2917 top_size = chunksize (av->top);
2918
2919 top_area = top_size - MINSIZE - 1;
2920 if (top_area <= pad)
2921 return 0;
2922
2923 /* Release in pagesize units and round down to the nearest page. */
2924 extra = ALIGN_DOWN(top_area - pad, pagesize);
2925
2926 if (extra == 0)
2927 return 0;
2928
2929 /*
2930 Only proceed if end of memory is where we last set it.
2931 This avoids problems if there were foreign sbrk calls.
2932 */
2933 current_brk = (char *) (MORECORE (0));
2934 if (current_brk == (char *) (av->top) + top_size)
2935 {
2936 /*
2937 Attempt to release memory. We ignore MORECORE return value,
2938 and instead call again to find out where new end of memory is.
2939 This avoids problems if first call releases less than we asked,
2940 of if failure somehow altered brk value. (We could still
2941 encounter problems if it altered brk in some very bad way,
2942 but the only thing we can do is adjust anyway, which will cause
2943 some downstream failure.)
2944 */
2945
2946 MORECORE (-extra);
2947 /* Call the `morecore' hook if necessary. */
2948 void (*hook) (void) = atomic_forced_read (__after_morecore_hook);
2949 if (__builtin_expect (hook != NULL, 0))
2950 (*hook)();
2951 new_brk = (char *) (MORECORE (0));
2952
2953 LIBC_PROBE (memory_sbrk_less, 2, new_brk, extra);
2954
2955 if (new_brk != (char *) MORECORE_FAILURE)
2956 {
2957 released = (long) (current_brk - new_brk);
2958
2959 if (released != 0)
2960 {
2961 /* Success. Adjust top. */
2962 av->system_mem -= released;
2963 set_head (av->top, (top_size - released) | PREV_INUSE);
2964 check_malloc_state (av);
2965 return 1;
2966 }
2967 }
2968 }
2969 return 0;
2970 }
2971
2972 static void
2973 munmap_chunk (mchunkptr p)
2974 {
2975 size_t pagesize = GLRO (dl_pagesize);
2976 INTERNAL_SIZE_T size = chunksize (p);
2977
2978 assert (chunk_is_mmapped (p));
2979
2980 /* Do nothing if the chunk is a faked mmapped chunk in the dumped
2981 main arena. We never free this memory. */
2982 if (DUMPED_MAIN_ARENA_CHUNK (p))
2983 return;
2984
2985 uintptr_t mem = (uintptr_t) chunk2rawmem (p);
2986 uintptr_t block = (uintptr_t) p - prev_size (p);
2987 size_t total_size = prev_size (p) + size;
2988 /* Unfortunately we have to do the compilers job by hand here. Normally
2989 we would test BLOCK and TOTAL-SIZE separately for compliance with the
2990 page size. But gcc does not recognize the optimization possibility
2991 (in the moment at least) so we combine the two values into one before
2992 the bit test. */
2993 if (__glibc_unlikely ((block | total_size) & (pagesize - 1)) != 0
2994 || __glibc_unlikely (!powerof2 (mem & (pagesize - 1))))
2995 malloc_printerr ("munmap_chunk(): invalid pointer");
2996
2997 atomic_decrement (&mp_.n_mmaps);
2998 atomic_add (&mp_.mmapped_mem, -total_size);
2999
3000 /* If munmap failed the process virtual memory address space is in a
3001 bad shape. Just leave the block hanging around, the process will
3002 terminate shortly anyway since not much can be done. */
3003 __munmap ((char *) block, total_size);
3004 }
3005
3006 #if HAVE_MREMAP
3007
3008 static mchunkptr
3009 mremap_chunk (mchunkptr p, size_t new_size)
3010 {
3011 size_t pagesize = GLRO (dl_pagesize);
3012 INTERNAL_SIZE_T offset = prev_size (p);
3013 INTERNAL_SIZE_T size = chunksize (p);
3014 char *cp;
3015
3016 assert (chunk_is_mmapped (p));
3017
3018 uintptr_t block = (uintptr_t) p - offset;
3019 uintptr_t mem = (uintptr_t) chunk2mem(p);
3020 size_t total_size = offset + size;
3021 if (__glibc_unlikely ((block | total_size) & (pagesize - 1)) != 0
3022 || __glibc_unlikely (!powerof2 (mem & (pagesize - 1))))
3023 malloc_printerr("mremap_chunk(): invalid pointer");
3024
3025 /* Note the extra SIZE_SZ overhead as in mmap_chunk(). */
3026 new_size = ALIGN_UP (new_size + offset + SIZE_SZ, pagesize);
3027
3028 /* No need to remap if the number of pages does not change. */
3029 if (total_size == new_size)
3030 return p;
3031
3032 cp = (char *) __mremap ((char *) block, total_size, new_size,
3033 MREMAP_MAYMOVE);
3034
3035 if (cp == MAP_FAILED)
3036 return 0;
3037
3038 p = (mchunkptr) (cp + offset);
3039
3040 assert (aligned_OK (chunk2rawmem (p)));
3041
3042 assert (prev_size (p) == offset);
3043 set_head (p, (new_size - offset) | IS_MMAPPED);
3044
3045 INTERNAL_SIZE_T new;
3046 new = atomic_exchange_and_add (&mp_.mmapped_mem, new_size - size - offset)
3047 + new_size - size - offset;
3048 atomic_max (&mp_.max_mmapped_mem, new);
3049 return p;
3050 }
3051 #endif /* HAVE_MREMAP */
3052
3053 /*------------------------ Public wrappers. --------------------------------*/
3054
3055 #if USE_TCACHE
3056
3057 /* We overlay this structure on the user-data portion of a chunk when
3058 the chunk is stored in the per-thread cache. */
3059 typedef struct tcache_entry
3060 {
3061 struct tcache_entry *next;
3062 /* This field exists to detect double frees. */
3063 struct tcache_perthread_struct *key;
3064 } tcache_entry;
3065
3066 /* There is one of these for each thread, which contains the
3067 per-thread cache (hence "tcache_perthread_struct"). Keeping
3068 overall size low is mildly important. Note that COUNTS and ENTRIES
3069 are redundant (we could have just counted the linked list each
3070 time), this is for performance reasons. */
3071 typedef struct tcache_perthread_struct
3072 {
3073 uint16_t counts[TCACHE_MAX_BINS];
3074 tcache_entry *entries[TCACHE_MAX_BINS];
3075 } tcache_perthread_struct;
3076
3077 static __thread bool tcache_shutting_down = false;
3078 static __thread tcache_perthread_struct *tcache = NULL;
3079
3080 /* Caller must ensure that we know tc_idx is valid and there's room
3081 for more chunks. */
3082 static __always_inline void
3083 tcache_put (mchunkptr chunk, size_t tc_idx)
3084 {
3085 tcache_entry *e = (tcache_entry *) chunk2mem (chunk);
3086
3087 /* Mark this chunk as "in the tcache" so the test in _int_free will
3088 detect a double free. */
3089 e->key = tcache;
3090
3091 e->next = PROTECT_PTR (&e->next, tcache->entries[tc_idx]);
3092 tcache->entries[tc_idx] = e;
3093 ++(tcache->counts[tc_idx]);
3094 }
3095
3096 /* Caller must ensure that we know tc_idx is valid and there's
3097 available chunks to remove. */
3098 static __always_inline void *
3099 tcache_get (size_t tc_idx)
3100 {
3101 tcache_entry *e = tcache->entries[tc_idx];
3102 if (__glibc_unlikely (!aligned_OK (e)))
3103 malloc_printerr ("malloc(): unaligned tcache chunk detected");
3104 tcache->entries[tc_idx] = REVEAL_PTR (e->next);
3105 --(tcache->counts[tc_idx]);
3106 e->key = NULL;
3107 return (void *) e;
3108 }
3109
3110 static void
3111 tcache_thread_shutdown (void)
3112 {
3113 int i;
3114 tcache_perthread_struct *tcache_tmp = tcache;
3115
3116 if (!tcache)
3117 return;
3118
3119 /* Disable the tcache and prevent it from being reinitialized. */
3120 tcache = NULL;
3121 tcache_shutting_down = true;
3122
3123 /* Free all of the entries and the tcache itself back to the arena
3124 heap for coalescing. */
3125 for (i = 0; i < TCACHE_MAX_BINS; ++i)
3126 {
3127 while (tcache_tmp->entries[i])
3128 {
3129 tcache_entry *e = tcache_tmp->entries[i];
3130 if (__glibc_unlikely (!aligned_OK (e)))
3131 malloc_printerr ("tcache_thread_shutdown(): "
3132 "unaligned tcache chunk detected");
3133 tcache_tmp->entries[i] = REVEAL_PTR (e->next);
3134 __libc_free (e);
3135 }
3136 }
3137
3138 __libc_free (tcache_tmp);
3139 }
3140
3141 static void
3142 tcache_init(void)
3143 {
3144 mstate ar_ptr;
3145 void *victim = 0;
3146 const size_t bytes = sizeof (tcache_perthread_struct);
3147
3148 if (tcache_shutting_down)
3149 return;
3150
3151 arena_get (ar_ptr, bytes);
3152 victim = _int_malloc (ar_ptr, bytes);
3153 if (!victim && ar_ptr != NULL)
3154 {
3155 ar_ptr = arena_get_retry (ar_ptr, bytes);
3156 victim = _int_malloc (ar_ptr, bytes);
3157 }
3158
3159
3160 if (ar_ptr != NULL)
3161 __libc_lock_unlock (ar_ptr->mutex);
3162
3163 /* In a low memory situation, we may not be able to allocate memory
3164 - in which case, we just keep trying later. However, we
3165 typically do this very early, so either there is sufficient
3166 memory, or there isn't enough memory to do non-trivial
3167 allocations anyway. */
3168 if (victim)
3169 {
3170 tcache = (tcache_perthread_struct *) victim;
3171 memset (tcache, 0, sizeof (tcache_perthread_struct));
3172 }
3173
3174 }
3175
3176 # define MAYBE_INIT_TCACHE() \
3177 if (__glibc_unlikely (tcache == NULL)) \
3178 tcache_init();
3179
3180 #else /* !USE_TCACHE */
3181 # define MAYBE_INIT_TCACHE()
3182
3183 static void
3184 tcache_thread_shutdown (void)
3185 {
3186 /* Nothing to do if there is no thread cache. */
3187 }
3188
3189 #endif /* !USE_TCACHE */
3190
3191 void *
3192 __libc_malloc (size_t bytes)
3193 {
3194 mstate ar_ptr;
3195 void *victim;
3196
3197 _Static_assert (PTRDIFF_MAX <= SIZE_MAX / 2,
3198 "PTRDIFF_MAX is not more than half of SIZE_MAX");
3199
3200 void *(*hook) (size_t, const void *)
3201 = atomic_forced_read (__malloc_hook);
3202 if (__builtin_expect (hook != NULL, 0))
3203 return (*hook)(bytes, RETURN_ADDRESS (0));
3204 #if USE_TCACHE
3205 /* int_free also calls request2size, be careful to not pad twice. */
3206 size_t tbytes;
3207 if (!checked_request2size (bytes, &tbytes))
3208 {
3209 __set_errno (ENOMEM);
3210 return NULL;
3211 }
3212 size_t tc_idx = csize2tidx (tbytes);
3213
3214 MAYBE_INIT_TCACHE ();
3215
3216 DIAG_PUSH_NEEDS_COMMENT;
3217 if (tc_idx < mp_.tcache_bins
3218 && tcache
3219 && tcache->counts[tc_idx] > 0)
3220 {
3221 victim = tcache_get (tc_idx);
3222 return TAG_NEW_USABLE (victim);
3223 }
3224 DIAG_POP_NEEDS_COMMENT;
3225 #endif
3226
3227 if (SINGLE_THREAD_P)
3228 {
3229 victim = TAG_NEW_USABLE (_int_malloc (&main_arena, bytes));
3230 assert (!victim || chunk_is_mmapped (mem2chunk (victim)) ||
3231 &main_arena == arena_for_chunk (mem2chunk (victim)));
3232 return victim;
3233 }
3234
3235 arena_get (ar_ptr, bytes);
3236
3237 victim = _int_malloc (ar_ptr, bytes);
3238 /* Retry with another arena only if we were able to find a usable arena
3239 before. */
3240 if (!victim && ar_ptr != NULL)
3241 {
3242 LIBC_PROBE (memory_malloc_retry, 1, bytes);
3243 ar_ptr = arena_get_retry (ar_ptr, bytes);
3244 victim = _int_malloc (ar_ptr, bytes);
3245 }
3246
3247 if (ar_ptr != NULL)
3248 __libc_lock_unlock (ar_ptr->mutex);
3249
3250 victim = TAG_NEW_USABLE (victim);
3251
3252 assert (!victim || chunk_is_mmapped (mem2chunk (victim)) ||
3253 ar_ptr == arena_for_chunk (mem2chunk (victim)));
3254 return victim;
3255 }
3256 libc_hidden_def (__libc_malloc)
3257
3258 void
3259 __libc_free (void *mem)
3260 {
3261 mstate ar_ptr;
3262 mchunkptr p; /* chunk corresponding to mem */
3263
3264 void (*hook) (void *, const void *)
3265 = atomic_forced_read (__free_hook);
3266 if (__builtin_expect (hook != NULL, 0))
3267 {
3268 (*hook)(mem, RETURN_ADDRESS (0));
3269 return;
3270 }
3271
3272 if (mem == 0) /* free(0) has no effect */
3273 return;
3274
3275 #ifdef USE_MTAG
3276 /* Quickly check that the freed pointer matches the tag for the memory.
3277 This gives a useful double-free detection. */
3278 *(volatile char *)mem;
3279 #endif
3280
3281 int err = errno;
3282
3283 p = mem2chunk (mem);
3284
3285 /* Mark the chunk as belonging to the library again. */
3286 (void)TAG_REGION (chunk2rawmem (p), CHUNK_AVAILABLE_SIZE (p) - CHUNK_HDR_SZ);
3287
3288 if (chunk_is_mmapped (p)) /* release mmapped memory. */
3289 {
3290 /* See if the dynamic brk/mmap threshold needs adjusting.
3291 Dumped fake mmapped chunks do not affect the threshold. */
3292 if (!mp_.no_dyn_threshold
3293 && chunksize_nomask (p) > mp_.mmap_threshold
3294 && chunksize_nomask (p) <= DEFAULT_MMAP_THRESHOLD_MAX
3295 && !DUMPED_MAIN_ARENA_CHUNK (p))
3296 {
3297 mp_.mmap_threshold = chunksize (p);
3298 mp_.trim_threshold = 2 * mp_.mmap_threshold;
3299 LIBC_PROBE (memory_mallopt_free_dyn_thresholds, 2,
3300 mp_.mmap_threshold, mp_.trim_threshold);
3301 }
3302 munmap_chunk (p);
3303 }
3304 else
3305 {
3306 MAYBE_INIT_TCACHE ();
3307
3308 ar_ptr = arena_for_chunk (p);
3309 _int_free (ar_ptr, p, 0);
3310 }
3311
3312 __set_errno (err);
3313 }
3314 libc_hidden_def (__libc_free)
3315
3316 void *
3317 __libc_realloc (void *oldmem, size_t bytes)
3318 {
3319 mstate ar_ptr;
3320 INTERNAL_SIZE_T nb; /* padded request size */
3321
3322 void *newp; /* chunk to return */
3323
3324 void *(*hook) (void *, size_t, const void *) =
3325 atomic_forced_read (__realloc_hook);
3326 if (__builtin_expect (hook != NULL, 0))
3327 return (*hook)(oldmem, bytes, RETURN_ADDRESS (0));
3328
3329 #if REALLOC_ZERO_BYTES_FREES
3330 if (bytes == 0 && oldmem != NULL)
3331 {
3332 __libc_free (oldmem); return 0;
3333 }
3334 #endif
3335
3336 /* realloc of null is supposed to be same as malloc */
3337 if (oldmem == 0)
3338 return __libc_malloc (bytes);
3339
3340 #ifdef USE_MTAG
3341 /* Perform a quick check to ensure that the pointer's tag matches the
3342 memory's tag. */
3343 *(volatile char*) oldmem;
3344 #endif
3345
3346 /* chunk corresponding to oldmem */
3347 const mchunkptr oldp = mem2chunk (oldmem);
3348 /* its size */
3349 const INTERNAL_SIZE_T oldsize = chunksize (oldp);
3350
3351 if (chunk_is_mmapped (oldp))
3352 ar_ptr = NULL;
3353 else
3354 {
3355 MAYBE_INIT_TCACHE ();
3356 ar_ptr = arena_for_chunk (oldp);
3357 }
3358
3359 /* Little security check which won't hurt performance: the allocator
3360 never wrapps around at the end of the address space. Therefore
3361 we can exclude some size values which might appear here by
3362 accident or by "design" from some intruder. We need to bypass
3363 this check for dumped fake mmap chunks from the old main arena
3364 because the new malloc may provide additional alignment. */
3365 if ((__builtin_expect ((uintptr_t) oldp > (uintptr_t) -oldsize, 0)
3366 || __builtin_expect (misaligned_chunk (oldp), 0))
3367 && !DUMPED_MAIN_ARENA_CHUNK (oldp))
3368 malloc_printerr ("realloc(): invalid pointer");
3369
3370 if (!checked_request2size (bytes, &nb))
3371 {
3372 __set_errno (ENOMEM);
3373 return NULL;
3374 }
3375
3376 if (chunk_is_mmapped (oldp))
3377 {
3378 /* If this is a faked mmapped chunk from the dumped main arena,
3379 always make a copy (and do not free the old chunk). */
3380 if (DUMPED_MAIN_ARENA_CHUNK (oldp))
3381 {
3382 /* Must alloc, copy, free. */
3383 void *newmem = __libc_malloc (bytes);
3384 if (newmem == 0)
3385 return NULL;
3386 /* Copy as many bytes as are available from the old chunk
3387 and fit into the new size. NB: The overhead for faked
3388 mmapped chunks is only SIZE_SZ, not CHUNK_HDR_SZ as for
3389 regular mmapped chunks. */
3390 if (bytes > oldsize - SIZE_SZ)
3391 bytes = oldsize - SIZE_SZ;
3392 memcpy (newmem, oldmem, bytes);
3393 return newmem;
3394 }
3395
3396 void *newmem;
3397
3398 #if HAVE_MREMAP
3399 newp = mremap_chunk (oldp, nb);
3400 if (newp)
3401 {
3402 void *newmem = chunk2rawmem (newp);
3403 /* Give the new block a different tag. This helps to ensure
3404 that stale handles to the previous mapping are not
3405 reused. There's a performance hit for both us and the
3406 caller for doing this, so we might want to
3407 reconsider. */
3408 return TAG_NEW_USABLE (newmem);
3409 }
3410 #endif
3411 /* Note the extra SIZE_SZ overhead. */
3412 if (oldsize - SIZE_SZ >= nb)
3413 return oldmem; /* do nothing */
3414
3415 /* Must alloc, copy, free. */
3416 newmem = __libc_malloc (bytes);
3417 if (newmem == 0)
3418 return 0; /* propagate failure */
3419
3420 memcpy (newmem, oldmem, oldsize - CHUNK_HDR_SZ);
3421 munmap_chunk (oldp);
3422 return newmem;
3423 }
3424
3425 if (SINGLE_THREAD_P)
3426 {
3427 newp = _int_realloc (ar_ptr, oldp, oldsize, nb);
3428 assert (!newp || chunk_is_mmapped (mem2chunk (newp)) ||
3429 ar_ptr == arena_for_chunk (mem2chunk (newp)));
3430
3431 return newp;
3432 }
3433
3434 __libc_lock_lock (ar_ptr->mutex);
3435
3436 newp = _int_realloc (ar_ptr, oldp, oldsize, nb);
3437
3438 __libc_lock_unlock (ar_ptr->mutex);
3439 assert (!newp || chunk_is_mmapped (mem2chunk (newp)) ||
3440 ar_ptr == arena_for_chunk (mem2chunk (newp)));
3441
3442 if (newp == NULL)
3443 {
3444 /* Try harder to allocate memory in other arenas. */
3445 LIBC_PROBE (memory_realloc_retry, 2, bytes, oldmem);
3446 newp = __libc_malloc (bytes);
3447 if (newp != NULL)
3448 {
3449 memcpy (newp, oldmem, oldsize - SIZE_SZ);
3450 _int_free (ar_ptr, oldp, 0);
3451 }
3452 }
3453
3454 return newp;
3455 }
3456 libc_hidden_def (__libc_realloc)
3457
3458 void *
3459 __libc_memalign (size_t alignment, size_t bytes)
3460 {
3461 void *address = RETURN_ADDRESS (0);
3462 return _mid_memalign (alignment, bytes, address);
3463 }
3464
3465 static void *
3466 _mid_memalign (size_t alignment, size_t bytes, void *address)
3467 {
3468 mstate ar_ptr;
3469 void *p;
3470
3471 void *(*hook) (size_t, size_t, const void *) =
3472 atomic_forced_read (__memalign_hook);
3473 if (__builtin_expect (hook != NULL, 0))
3474 return (*hook)(alignment, bytes, address);
3475
3476 /* If we need less alignment than we give anyway, just relay to malloc. */
3477 if (alignment <= MALLOC_ALIGNMENT)
3478 return __libc_malloc (bytes);
3479
3480 /* Otherwise, ensure that it is at least a minimum chunk size */
3481 if (alignment < MINSIZE)
3482 alignment = MINSIZE;
3483
3484 /* If the alignment is greater than SIZE_MAX / 2 + 1 it cannot be a
3485 power of 2 and will cause overflow in the check below. */
3486 if (alignment > SIZE_MAX / 2 + 1)
3487 {
3488 __set_errno (EINVAL);
3489 return 0;
3490 }
3491
3492
3493 /* Make sure alignment is power of 2. */
3494 if (!powerof2 (alignment))
3495 {
3496 size_t a = MALLOC_ALIGNMENT * 2;
3497 while (a < alignment)
3498 a <<= 1;
3499 alignment = a;
3500 }
3501
3502 if (SINGLE_THREAD_P)
3503 {
3504 p = _int_memalign (&main_arena, alignment, bytes);
3505 assert (!p || chunk_is_mmapped (mem2chunk (p)) ||
3506 &main_arena == arena_for_chunk (mem2chunk (p)));
3507 return TAG_NEW_USABLE (p);
3508 }
3509
3510 arena_get (ar_ptr, bytes + alignment + MINSIZE);
3511
3512 p = _int_memalign (ar_ptr, alignment, bytes);
3513 if (!p && ar_ptr != NULL)
3514 {
3515 LIBC_PROBE (memory_memalign_retry, 2, bytes, alignment);
3516 ar_ptr = arena_get_retry (ar_ptr, bytes);
3517 p = _int_memalign (ar_ptr, alignment, bytes);
3518 }
3519
3520 if (ar_ptr != NULL)
3521 __libc_lock_unlock (ar_ptr->mutex);
3522
3523 assert (!p || chunk_is_mmapped (mem2chunk (p)) ||
3524 ar_ptr == arena_for_chunk (mem2chunk (p)));
3525 return TAG_NEW_USABLE (p);
3526 }
3527 /* For ISO C11. */
3528 weak_alias (__libc_memalign, aligned_alloc)
3529 libc_hidden_def (__libc_memalign)
3530
3531 void *
3532 __libc_valloc (size_t bytes)
3533 {
3534 void *p;
3535
3536 if (__malloc_initialized < 0)
3537 ptmalloc_init ();
3538
3539 void *address = RETURN_ADDRESS (0);
3540 size_t pagesize = GLRO (dl_pagesize);
3541 p = _mid_memalign (pagesize, bytes, address);
3542 return TAG_NEW_USABLE (p);
3543 }
3544
3545 void *
3546 __libc_pvalloc (size_t bytes)
3547 {
3548 void *p;
3549
3550 if (__malloc_initialized < 0)
3551 ptmalloc_init ();
3552
3553 void *address = RETURN_ADDRESS (0);
3554 size_t pagesize = GLRO (dl_pagesize);
3555 size_t rounded_bytes;
3556 /* ALIGN_UP with overflow check. */
3557 if (__glibc_unlikely (__builtin_add_overflow (bytes,
3558 pagesize - 1,
3559 &rounded_bytes)))
3560 {
3561 __set_errno (ENOMEM);
3562 return 0;
3563 }
3564 rounded_bytes = rounded_bytes & -(pagesize - 1);
3565
3566 p = _mid_memalign (pagesize, rounded_bytes, address);
3567 return TAG_NEW_USABLE (p);
3568 }
3569
3570 void *
3571 __libc_calloc (size_t n, size_t elem_size)
3572 {
3573 mstate av;
3574 mchunkptr oldtop;
3575 INTERNAL_SIZE_T sz, oldtopsize;
3576 void *mem;
3577 #ifndef USE_MTAG
3578 unsigned long clearsize;
3579 unsigned long nclears;
3580 INTERNAL_SIZE_T *d;
3581 #endif
3582 ptrdiff_t bytes;
3583
3584 if (__glibc_unlikely (__builtin_mul_overflow (n, elem_size, &bytes)))
3585 {
3586 __set_errno (ENOMEM);
3587 return NULL;
3588 }
3589
3590 sz = bytes;
3591
3592 void *(*hook) (size_t, const void *) =
3593 atomic_forced_read (__malloc_hook);
3594 if (__builtin_expect (hook != NULL, 0))
3595 {
3596 mem = (*hook)(sz, RETURN_ADDRESS (0));
3597 if (mem == 0)
3598 return 0;
3599
3600 return memset (mem, 0, sz);
3601 }
3602
3603 MAYBE_INIT_TCACHE ();
3604
3605 if (SINGLE_THREAD_P)
3606 av = &main_arena;
3607 else
3608 arena_get (av, sz);
3609
3610 if (av)
3611 {
3612 /* Check if we hand out the top chunk, in which case there may be no
3613 need to clear. */
3614 #if MORECORE_CLEARS
3615 oldtop = top (av);
3616 oldtopsize = chunksize (top (av));
3617 # if MORECORE_CLEARS < 2
3618 /* Only newly allocated memory is guaranteed to be cleared. */
3619 if (av == &main_arena &&
3620 oldtopsize < mp_.sbrk_base + av->max_system_mem - (char *) oldtop)
3621 oldtopsize = (mp_.sbrk_base + av->max_system_mem - (char *) oldtop);
3622 # endif
3623 if (av != &main_arena)
3624 {
3625 heap_info *heap = heap_for_ptr (oldtop);
3626 if (oldtopsize < (char *) heap + heap->mprotect_size - (char *) oldtop)
3627 oldtopsize = (char *) heap + heap->mprotect_size - (char *) oldtop;
3628 }
3629 #endif
3630 }
3631 else
3632 {
3633 /* No usable arenas. */
3634 oldtop = 0;
3635 oldtopsize = 0;
3636 }
3637 mem = _int_malloc (av, sz);
3638
3639 assert (!mem || chunk_is_mmapped (mem2chunk (mem)) ||
3640 av == arena_for_chunk (mem2chunk (mem)));
3641
3642 if (!SINGLE_THREAD_P)
3643 {
3644 if (mem == 0 && av != NULL)
3645 {
3646 LIBC_PROBE (memory_calloc_retry, 1, sz);
3647 av = arena_get_retry (av, sz);
3648 mem = _int_malloc (av, sz);
3649 }
3650
3651 if (av != NULL)
3652 __libc_lock_unlock (av->mutex);
3653 }
3654
3655 /* Allocation failed even after a retry. */
3656 if (mem == 0)
3657 return 0;
3658
3659 mchunkptr p = mem2chunk (mem);
3660 /* If we are using memory tagging, then we need to set the tags
3661 regardless of MORECORE_CLEARS, so we zero the whole block while
3662 doing so. */
3663 #ifdef USE_MTAG
3664 return TAG_NEW_MEMSET (mem, 0, CHUNK_AVAILABLE_SIZE (p) - CHUNK_HDR_SZ);
3665 #else
3666 INTERNAL_SIZE_T csz = chunksize (p);
3667
3668 /* Two optional cases in which clearing not necessary */
3669 if (chunk_is_mmapped (p))
3670 {
3671 if (__builtin_expect (perturb_byte, 0))
3672 return memset (mem, 0, sz);
3673
3674 return mem;
3675 }
3676
3677 #if MORECORE_CLEARS
3678 if (perturb_byte == 0 && (p == oldtop && csz > oldtopsize))
3679 {
3680 /* clear only the bytes from non-freshly-sbrked memory */
3681 csz = oldtopsize;
3682 }
3683 #endif
3684
3685 /* Unroll clear of <= 36 bytes (72 if 8byte sizes). We know that
3686 contents have an odd number of INTERNAL_SIZE_T-sized words;
3687 minimally 3. */
3688 d = (INTERNAL_SIZE_T *) mem;
3689 clearsize = csz - SIZE_SZ;
3690 nclears = clearsize / sizeof (INTERNAL_SIZE_T);
3691 assert (nclears >= 3);
3692
3693 if (nclears > 9)
3694 return memset (d, 0, clearsize);
3695
3696 else
3697 {
3698 *(d + 0) = 0;
3699 *(d + 1) = 0;
3700 *(d + 2) = 0;
3701 if (nclears > 4)
3702 {
3703 *(d + 3) = 0;
3704 *(d + 4) = 0;
3705 if (nclears > 6)
3706 {
3707 *(d + 5) = 0;
3708 *(d + 6) = 0;
3709 if (nclears > 8)
3710 {
3711 *(d + 7) = 0;
3712 *(d + 8) = 0;
3713 }
3714 }
3715 }
3716 }
3717
3718 return mem;
3719 #endif
3720 }
3721
3722 /*
3723 ------------------------------ malloc ------------------------------
3724 */
3725
3726 static void *
3727 _int_malloc (mstate av, size_t bytes)
3728 {
3729 INTERNAL_SIZE_T nb; /* normalized request size */
3730 unsigned int idx; /* associated bin index */
3731 mbinptr bin; /* associated bin */
3732
3733 mchunkptr victim; /* inspected/selected chunk */
3734 INTERNAL_SIZE_T size; /* its size */
3735 int victim_index; /* its bin index */
3736
3737 mchunkptr remainder; /* remainder from a split */
3738 unsigned long remainder_size; /* its size */
3739
3740 unsigned int block; /* bit map traverser */
3741 unsigned int bit; /* bit map traverser */
3742 unsigned int map; /* current word of binmap */
3743
3744 mchunkptr fwd; /* misc temp for linking */
3745 mchunkptr bck; /* misc temp for linking */
3746
3747 #if USE_TCACHE
3748 size_t tcache_unsorted_count; /* count of unsorted chunks processed */
3749 #endif
3750
3751 /*
3752 Convert request size to internal form by adding SIZE_SZ bytes
3753 overhead plus possibly more to obtain necessary alignment and/or
3754 to obtain a size of at least MINSIZE, the smallest allocatable
3755 size. Also, checked_request2size returns false for request sizes
3756 that are so large that they wrap around zero when padded and
3757 aligned.
3758 */
3759
3760 if (!checked_request2size (bytes, &nb))
3761 {
3762 __set_errno (ENOMEM);
3763 return NULL;
3764 }
3765
3766 /* There are no usable arenas. Fall back to sysmalloc to get a chunk from
3767 mmap. */
3768 if (__glibc_unlikely (av == NULL))
3769 {
3770 void *p = sysmalloc (nb, av);
3771 if (p != NULL)
3772 alloc_perturb (p, bytes);
3773 return p;
3774 }
3775
3776 /*
3777 If the size qualifies as a fastbin, first check corresponding bin.
3778 This code is safe to execute even if av is not yet initialized, so we
3779 can try it without checking, which saves some time on this fast path.
3780 */
3781
3782 #define REMOVE_FB(fb, victim, pp) \
3783 do \
3784 { \
3785 victim = pp; \
3786 if (victim == NULL) \
3787 break; \
3788 pp = REVEAL_PTR (victim->fd); \
3789 if (__glibc_unlikely (pp != NULL && misaligned_chunk (pp))) \
3790 malloc_printerr ("malloc(): unaligned fastbin chunk detected"); \
3791 } \
3792 while ((pp = catomic_compare_and_exchange_val_acq (fb, pp, victim)) \
3793 != victim); \
3794
3795 if ((unsigned long) (nb) <= (unsigned long) (get_max_fast ()))
3796 {
3797 idx = fastbin_index (nb);
3798 mfastbinptr *fb = &fastbin (av, idx);
3799 mchunkptr pp;
3800 victim = *fb;
3801
3802 if (victim != NULL)
3803 {
3804 if (__glibc_unlikely (misaligned_chunk (victim)))
3805 malloc_printerr ("malloc(): unaligned fastbin chunk detected 2");
3806
3807 if (SINGLE_THREAD_P)
3808 *fb = REVEAL_PTR (victim->fd);
3809 else
3810 REMOVE_FB (fb, pp, victim);
3811 if (__glibc_likely (victim != NULL))
3812 {
3813 size_t victim_idx = fastbin_index (chunksize (victim));
3814 if (__builtin_expect (victim_idx != idx, 0))
3815 malloc_printerr ("malloc(): memory corruption (fast)");
3816 check_remalloced_chunk (av, victim, nb);
3817 #if USE_TCACHE
3818 /* While we're here, if we see other chunks of the same size,
3819 stash them in the tcache. */
3820 size_t tc_idx = csize2tidx (nb);
3821 if (tcache && tc_idx < mp_.tcache_bins)
3822 {
3823 mchunkptr tc_victim;
3824
3825 /* While bin not empty and tcache not full, copy chunks. */
3826 while (tcache->counts[tc_idx] < mp_.tcache_count
3827 && (tc_victim = *fb) != NULL)
3828 {
3829 if (__glibc_unlikely (misaligned_chunk (tc_victim)))
3830 malloc_printerr ("malloc(): unaligned fastbin chunk detected 3");
3831 if (SINGLE_THREAD_P)
3832 *fb = REVEAL_PTR (tc_victim->fd);
3833 else
3834 {
3835 REMOVE_FB (fb, pp, tc_victim);
3836 if (__glibc_unlikely (tc_victim == NULL))
3837 break;
3838 }
3839 tcache_put (tc_victim, tc_idx);
3840 }
3841 }
3842 #endif
3843 void *p = chunk2mem (victim);
3844 alloc_perturb (p, bytes);
3845 return p;
3846 }
3847 }
3848 }
3849
3850 /*
3851 If a small request, check regular bin. Since these "smallbins"
3852 hold one size each, no searching within bins is necessary.
3853 (For a large request, we need to wait until unsorted chunks are
3854 processed to find best fit. But for small ones, fits are exact
3855 anyway, so we can check now, which is faster.)
3856 */
3857
3858 if (in_smallbin_range (nb))
3859 {
3860 idx = smallbin_index (nb);
3861 bin = bin_at (av, idx);
3862
3863 if ((victim = last (bin)) != bin)
3864 {
3865 bck = victim->bk;
3866 if (__glibc_unlikely (bck->fd != victim))
3867 malloc_printerr ("malloc(): smallbin double linked list corrupted");
3868 set_inuse_bit_at_offset (victim, nb);
3869 bin->bk = bck;
3870 bck->fd = bin;
3871
3872 if (av != &main_arena)
3873 set_non_main_arena (victim);
3874 check_malloced_chunk (av, victim, nb);
3875 #if USE_TCACHE
3876 /* While we're here, if we see other chunks of the same size,
3877 stash them in the tcache. */
3878 size_t tc_idx = csize2tidx (nb);
3879 if (tcache && tc_idx < mp_.tcache_bins)
3880 {
3881 mchunkptr tc_victim;
3882
3883 /* While bin not empty and tcache not full, copy chunks over. */
3884 while (tcache->counts[tc_idx] < mp_.tcache_count
3885 && (tc_victim = last (bin)) != bin)
3886 {
3887 if (tc_victim != 0)
3888 {
3889 bck = tc_victim->bk;
3890 set_inuse_bit_at_offset (tc_victim, nb);
3891 if (av != &main_arena)
3892 set_non_main_arena (tc_victim);
3893 bin->bk = bck;
3894 bck->fd = bin;
3895
3896 tcache_put (tc_victim, tc_idx);
3897 }
3898 }
3899 }
3900 #endif
3901 void *p = chunk2mem (victim);
3902 alloc_perturb (p, bytes);
3903 return p;
3904 }
3905 }
3906
3907 /*
3908 If this is a large request, consolidate fastbins before continuing.
3909 While it might look excessive to kill all fastbins before
3910 even seeing if there is space available, this avoids
3911 fragmentation problems normally associated with fastbins.
3912 Also, in practice, programs tend to have runs of either small or
3913 large requests, but less often mixtures, so consolidation is not
3914 invoked all that often in most programs. And the programs that
3915 it is called frequently in otherwise tend to fragment.
3916 */
3917
3918 else
3919 {
3920 idx = largebin_index (nb);
3921 if (atomic_load_relaxed (&av->have_fastchunks))
3922 malloc_consolidate (av);
3923 }
3924
3925 /*
3926 Process recently freed or remaindered chunks, taking one only if
3927 it is exact fit, or, if this a small request, the chunk is remainder from
3928 the most recent non-exact fit. Place other traversed chunks in
3929 bins. Note that this step is the only place in any routine where
3930 chunks are placed in bins.
3931
3932 The outer loop here is needed because we might not realize until
3933 near the end of malloc that we should have consolidated, so must
3934 do so and retry. This happens at most once, and only when we would
3935 otherwise need to expand memory to service a "small" request.
3936 */
3937
3938 #if USE_TCACHE
3939 INTERNAL_SIZE_T tcache_nb = 0;
3940 size_t tc_idx = csize2tidx (nb);
3941 if (tcache && tc_idx < mp_.tcache_bins)
3942 tcache_nb = nb;
3943 int return_cached = 0;
3944
3945 tcache_unsorted_count = 0;
3946 #endif
3947
3948 for (;; )
3949 {
3950 int iters = 0;
3951 while ((victim = unsorted_chunks (av)->bk) != unsorted_chunks (av))
3952 {
3953 bck = victim->bk;
3954 size = chunksize (victim);
3955 mchunkptr next = chunk_at_offset (victim, size);
3956
3957 if (__glibc_unlikely (size <= CHUNK_HDR_SZ)
3958 || __glibc_unlikely (size > av->system_mem))
3959 malloc_printerr ("malloc(): invalid size (unsorted)");
3960 if (__glibc_unlikely (chunksize_nomask (next) < CHUNK_HDR_SZ)
3961 || __glibc_unlikely (chunksize_nomask (next) > av->system_mem))
3962 malloc_printerr ("malloc(): invalid next size (unsorted)");
3963 if (__glibc_unlikely ((prev_size (next) & ~(SIZE_BITS)) != size))
3964 malloc_printerr ("malloc(): mismatching next->prev_size (unsorted)");
3965 if (__glibc_unlikely (bck->fd != victim)
3966 || __glibc_unlikely (victim->fd != unsorted_chunks (av)))
3967 malloc_printerr ("malloc(): unsorted double linked list corrupted");
3968 if (__glibc_unlikely (prev_inuse (next)))
3969 malloc_printerr ("malloc(): invalid next->prev_inuse (unsorted)");
3970
3971 /*
3972 If a small request, try to use last remainder if it is the
3973 only chunk in unsorted bin. This helps promote locality for
3974 runs of consecutive small requests. This is the only
3975 exception to best-fit, and applies only when there is
3976 no exact fit for a small chunk.
3977 */
3978
3979 if (in_smallbin_range (nb) &&
3980 bck == unsorted_chunks (av) &&
3981 victim == av->last_remainder &&
3982 (unsigned long) (size) > (unsigned long) (nb + MINSIZE))
3983 {
3984 /* split and reattach remainder */
3985 remainder_size = size - nb;
3986 remainder = chunk_at_offset (victim, nb);
3987 unsorted_chunks (av)->bk = unsorted_chunks (av)->fd = remainder;
3988 av->last_remainder = remainder;
3989 remainder->bk = remainder->fd = unsorted_chunks (av);
3990 if (!in_smallbin_range (remainder_size))
3991 {
3992 remainder->fd_nextsize = NULL;
3993 remainder->bk_nextsize = NULL;
3994 }
3995
3996 set_head (victim, nb | PREV_INUSE |
3997 (av != &main_arena ? NON_MAIN_ARENA : 0));
3998 set_head (remainder, remainder_size | PREV_INUSE);
3999 set_foot (remainder, remainder_size);
4000
4001 check_malloced_chunk (av, victim, nb);
4002 void *p = chunk2mem (victim);
4003 alloc_perturb (p, bytes);
4004 return p;
4005 }
4006
4007 /* remove from unsorted list */
4008 if (__glibc_unlikely (bck->fd != victim))
4009 malloc_printerr ("malloc(): corrupted unsorted chunks 3");
4010 unsorted_chunks (av)->bk = bck;
4011 bck->fd = unsorted_chunks (av);
4012
4013 /* Take now instead of binning if exact fit */
4014
4015 if (size == nb)
4016 {
4017 set_inuse_bit_at_offset (victim, size);
4018 if (av != &main_arena)
4019 set_non_main_arena (victim);
4020 #if USE_TCACHE
4021 /* Fill cache first, return to user only if cache fills.
4022 We may return one of these chunks later. */
4023 if (tcache_nb
4024 && tcache->counts[tc_idx] < mp_.tcache_count)
4025 {
4026 tcache_put (victim, tc_idx);
4027 return_cached = 1;
4028 continue;
4029 }
4030 else
4031 {
4032 #endif
4033 check_malloced_chunk (av, victim, nb);
4034 void *p = chunk2mem (victim);
4035 alloc_perturb (p, bytes);
4036 return p;
4037 #if USE_TCACHE
4038 }
4039 #endif
4040 }
4041
4042 /* place chunk in bin */
4043
4044 if (in_smallbin_range (size))
4045 {
4046 victim_index = smallbin_index (size);
4047 bck = bin_at (av, victim_index);
4048 fwd = bck->fd;
4049 }
4050 else
4051 {
4052 victim_index = largebin_index (size);
4053 bck = bin_at (av, victim_index);
4054 fwd = bck->fd;
4055
4056 /* maintain large bins in sorted order */
4057 if (fwd != bck)
4058 {
4059 /* Or with inuse bit to speed comparisons */
4060 size |= PREV_INUSE;
4061 /* if smaller than smallest, bypass loop below */
4062 assert (chunk_main_arena (bck->bk));
4063 if ((unsigned long) (size)
4064 < (unsigned long) chunksize_nomask (bck->bk))
4065 {
4066 fwd = bck;
4067 bck = bck->bk;
4068
4069 victim->fd_nextsize = fwd->fd;
4070 victim->bk_nextsize = fwd->fd->bk_nextsize;
4071 fwd->fd->bk_nextsize = victim->bk_nextsize->fd_nextsize = victim;
4072 }
4073 else
4074 {
4075 assert (chunk_main_arena (fwd));
4076 while ((unsigned long) size < chunksize_nomask (fwd))
4077 {
4078 fwd = fwd->fd_nextsize;
4079 assert (chunk_main_arena (fwd));
4080 }
4081
4082 if ((unsigned long) size
4083 == (unsigned long) chunksize_nomask (fwd))
4084 /* Always insert in the second position. */
4085 fwd = fwd->fd;
4086 else
4087 {
4088 victim->fd_nextsize = fwd;
4089 victim->bk_nextsize = fwd->bk_nextsize;
4090 if (__glibc_unlikely (fwd->bk_nextsize->fd_nextsize != fwd))
4091 malloc_printerr ("malloc(): largebin double linked list corrupted (nextsize)");
4092 fwd->bk_nextsize = victim;
4093 victim->bk_nextsize->fd_nextsize = victim;
4094 }
4095 bck = fwd->bk;
4096 if (bck->fd != fwd)
4097 malloc_printerr ("malloc(): largebin double linked list corrupted (bk)");
4098 }
4099 }
4100 else
4101 victim->fd_nextsize = victim->bk_nextsize = victim;
4102 }
4103
4104 mark_bin (av, victim_index);
4105 victim->bk = bck;
4106 victim->fd = fwd;
4107 fwd->bk = victim;
4108 bck->fd = victim;
4109
4110 #if USE_TCACHE
4111 /* If we've processed as many chunks as we're allowed while
4112 filling the cache, return one of the cached ones. */
4113 ++tcache_unsorted_count;
4114 if (return_cached
4115 && mp_.tcache_unsorted_limit > 0
4116 && tcache_unsorted_count > mp_.tcache_unsorted_limit)
4117 {
4118 return tcache_get (tc_idx);
4119 }
4120 #endif
4121
4122 #define MAX_ITERS 10000
4123 if (++iters >= MAX_ITERS)
4124 break;
4125 }
4126
4127 #if USE_TCACHE
4128 /* If all the small chunks we found ended up cached, return one now. */
4129 if (return_cached)
4130 {
4131 return tcache_get (tc_idx);
4132 }
4133 #endif
4134
4135 /*
4136 If a large request, scan through the chunks of current bin in
4137 sorted order to find smallest that fits. Use the skip list for this.
4138 */
4139
4140 if (!in_smallbin_range (nb))
4141 {
4142 bin = bin_at (av, idx);
4143
4144 /* skip scan if empty or largest chunk is too small */
4145 if ((victim = first (bin)) != bin
4146 && (unsigned long) chunksize_nomask (victim)
4147 >= (unsigned long) (nb))
4148 {
4149 victim = victim->bk_nextsize;
4150 while (((unsigned long) (size = chunksize (victim)) <
4151 (unsigned long) (nb)))
4152 victim = victim->bk_nextsize;
4153
4154 /* Avoid removing the first entry for a size so that the skip
4155 list does not have to be rerouted. */
4156 if (victim != last (bin)
4157 && chunksize_nomask (victim)
4158 == chunksize_nomask (victim->fd))
4159 victim = victim->fd;
4160
4161 remainder_size = size - nb;
4162 unlink_chunk (av, victim);
4163
4164 /* Exhaust */
4165 if (remainder_size < MINSIZE)
4166 {
4167 set_inuse_bit_at_offset (victim, size);
4168 if (av != &main_arena)
4169 set_non_main_arena (victim);
4170 }
4171 /* Split */
4172 else
4173 {
4174 remainder = chunk_at_offset (victim, nb);
4175 /* We cannot assume the unsorted list is empty and therefore
4176 have to perform a complete insert here. */
4177 bck = unsorted_chunks (av);
4178 fwd = bck->fd;
4179 if (__glibc_unlikely (fwd->bk != bck))
4180 malloc_printerr ("malloc(): corrupted unsorted chunks");
4181 remainder->bk = bck;
4182 remainder->fd = fwd;
4183 bck->fd = remainder;
4184 fwd->bk = remainder;
4185 if (!in_smallbin_range (remainder_size))
4186 {
4187 remainder->fd_nextsize = NULL;
4188 remainder->bk_nextsize = NULL;
4189 }
4190 set_head (victim, nb | PREV_INUSE |
4191 (av != &main_arena ? NON_MAIN_ARENA : 0));
4192 set_head (remainder, remainder_size | PREV_INUSE);
4193 set_foot (remainder, remainder_size);
4194 }
4195 check_malloced_chunk (av, victim, nb);
4196 void *p = chunk2mem (victim);
4197 alloc_perturb (p, bytes);
4198 return p;
4199 }
4200 }
4201
4202 /*
4203 Search for a chunk by scanning bins, starting with next largest
4204 bin. This search is strictly by best-fit; i.e., the smallest
4205 (with ties going to approximately the least recently used) chunk
4206 that fits is selected.
4207
4208 The bitmap avoids needing to check that most blocks are nonempty.
4209 The particular case of skipping all bins during warm-up phases
4210 when no chunks have been returned yet is faster than it might look.
4211 */
4212
4213 ++idx;
4214 bin = bin_at (av, idx);
4215 block = idx2block (idx);
4216 map = av->binmap[block];
4217 bit = idx2bit (idx);
4218
4219 for (;; )
4220 {
4221 /* Skip rest of block if there are no more set bits in this block. */
4222 if (bit > map || bit == 0)
4223 {
4224 do
4225 {
4226 if (++block >= BINMAPSIZE) /* out of bins */
4227 goto use_top;
4228 }
4229 while ((map = av->binmap[block]) == 0);
4230
4231 bin = bin_at (av, (block << BINMAPSHIFT));
4232 bit = 1;
4233 }
4234
4235 /* Advance to bin with set bit. There must be one. */
4236 while ((bit & map) == 0)
4237 {
4238 bin = next_bin (bin);
4239 bit <<= 1;
4240 assert (bit != 0);
4241 }
4242
4243 /* Inspect the bin. It is likely to be non-empty */
4244 victim = last (bin);
4245
4246 /* If a false alarm (empty bin), clear the bit. */
4247 if (victim == bin)
4248 {
4249 av->binmap[block] = map &= ~bit; /* Write through */
4250 bin = next_bin (bin);
4251 bit <<= 1;
4252 }
4253
4254 else
4255 {
4256 size = chunksize (victim);
4257
4258 /* We know the first chunk in this bin is big enough to use. */
4259 assert ((unsigned long) (size) >= (unsigned long) (nb));
4260
4261 remainder_size = size - nb;
4262
4263 /* unlink */
4264 unlink_chunk (av, victim);
4265
4266 /* Exhaust */
4267 if (remainder_size < MINSIZE)
4268 {
4269 set_inuse_bit_at_offset (victim, size);
4270 if (av != &main_arena)
4271 set_non_main_arena (victim);
4272 }
4273
4274 /* Split */
4275 else
4276 {
4277 remainder = chunk_at_offset (victim, nb);
4278
4279 /* We cannot assume the unsorted list is empty and therefore
4280 have to perform a complete insert here. */
4281 bck = unsorted_chunks (av);
4282 fwd = bck->fd;
4283 if (__glibc_unlikely (fwd->bk != bck))
4284 malloc_printerr ("malloc(): corrupted unsorted chunks 2");
4285 remainder->bk = bck;
4286 remainder->fd = fwd;
4287 bck->fd = remainder;
4288 fwd->bk = remainder;
4289
4290 /* advertise as last remainder */
4291 if (in_smallbin_range (nb))
4292 av->last_remainder = remainder;
4293 if (!in_smallbin_range (remainder_size))
4294 {
4295 remainder->fd_nextsize = NULL;
4296 remainder->bk_nextsize = NULL;
4297 }
4298 set_head (victim, nb | PREV_INUSE |
4299 (av != &main_arena ? NON_MAIN_ARENA : 0));
4300 set_head (remainder, remainder_size | PREV_INUSE);
4301 set_foot (remainder, remainder_size);
4302 }
4303 check_malloced_chunk (av, victim, nb);
4304 void *p = chunk2mem (victim);
4305 alloc_perturb (p, bytes);
4306 return p;
4307 }
4308 }
4309
4310 use_top:
4311 /*
4312 If large enough, split off the chunk bordering the end of memory
4313 (held in av->top). Note that this is in accord with the best-fit
4314 search rule. In effect, av->top is treated as larger (and thus
4315 less well fitting) than any other available chunk since it can
4316 be extended to be as large as necessary (up to system
4317 limitations).
4318
4319 We require that av->top always exists (i.e., has size >=
4320 MINSIZE) after initialization, so if it would otherwise be
4321 exhausted by current request, it is replenished. (The main
4322 reason for ensuring it exists is that we may need MINSIZE space
4323 to put in fenceposts in sysmalloc.)
4324 */
4325
4326 victim = av->top;
4327 size = chunksize (victim);
4328
4329 if (__glibc_unlikely (size > av->system_mem))
4330 malloc_printerr ("malloc(): corrupted top size");
4331
4332 if ((unsigned long) (size) >= (unsigned long) (nb + MINSIZE))
4333 {
4334 remainder_size = size - nb;
4335 remainder = chunk_at_offset (victim, nb);
4336 av->top = remainder;
4337 set_head (victim, nb | PREV_INUSE |
4338 (av != &main_arena ? NON_MAIN_ARENA : 0));
4339 set_head (remainder, remainder_size | PREV_INUSE);
4340
4341 check_malloced_chunk (av, victim, nb);
4342 void *p = chunk2mem (victim);
4343 alloc_perturb (p, bytes);
4344 return p;
4345 }
4346
4347 /* When we are using atomic ops to free fast chunks we can get
4348 here for all block sizes. */
4349 else if (atomic_load_relaxed (&av->have_fastchunks))
4350 {
4351 malloc_consolidate (av);
4352 /* restore original bin index */
4353 if (in_smallbin_range (nb))
4354 idx = smallbin_index (nb);
4355 else
4356 idx = largebin_index (nb);
4357 }
4358
4359 /*
4360 Otherwise, relay to handle system-dependent cases
4361 */
4362 else
4363 {
4364 void *p = sysmalloc (nb, av);
4365 if (p != NULL)
4366 alloc_perturb (p, bytes);
4367 return p;
4368 }
4369 }
4370 }
4371
4372 /*
4373 ------------------------------ free ------------------------------
4374 */
4375
4376 static void
4377 _int_free (mstate av, mchunkptr p, int have_lock)
4378 {
4379 INTERNAL_SIZE_T size; /* its size */
4380 mfastbinptr *fb; /* associated fastbin */
4381 mchunkptr nextchunk; /* next contiguous chunk */
4382 INTERNAL_SIZE_T nextsize; /* its size */
4383 int nextinuse; /* true if nextchunk is used */
4384 INTERNAL_SIZE_T prevsize; /* size of previous contiguous chunk */
4385 mchunkptr bck; /* misc temp for linking */
4386 mchunkptr fwd; /* misc temp for linking */
4387
4388 size = chunksize (p);
4389
4390 /* Little security check which won't hurt performance: the
4391 allocator never wrapps around at the end of the address space.
4392 Therefore we can exclude some size values which might appear
4393 here by accident or by "design" from some intruder. */
4394 if (__builtin_expect ((uintptr_t) p > (uintptr_t) -size, 0)
4395 || __builtin_expect (misaligned_chunk (p), 0))
4396 malloc_printerr ("free(): invalid pointer");
4397 /* We know that each chunk is at least MINSIZE bytes in size or a
4398 multiple of MALLOC_ALIGNMENT. */
4399 if (__glibc_unlikely (size < MINSIZE || !aligned_OK (size)))
4400 malloc_printerr ("free(): invalid size");
4401
4402 check_inuse_chunk(av, p);
4403
4404 #if USE_TCACHE
4405 {
4406 size_t tc_idx = csize2tidx (size);
4407 if (tcache != NULL && tc_idx < mp_.tcache_bins)
4408 {
4409 /* Check to see if it's already in the tcache. */
4410 tcache_entry *e = (tcache_entry *) chunk2mem (p);
4411
4412 /* This test succeeds on double free. However, we don't 100%
4413 trust it (it also matches random payload data at a 1 in
4414 2^<size_t> chance), so verify it's not an unlikely
4415 coincidence before aborting. */
4416 if (__glibc_unlikely (e->key == tcache))
4417 {
4418 tcache_entry *tmp;
4419 size_t cnt = 0;
4420 LIBC_PROBE (memory_tcache_double_free, 2, e, tc_idx);
4421 for (tmp = tcache->entries[tc_idx];
4422 tmp;
4423 tmp = REVEAL_PTR (tmp->next), ++cnt)
4424 {
4425 if (cnt >= mp_.tcache_count)
4426 malloc_printerr ("free(): too many chunks detected in tcache");
4427 if (__glibc_unlikely (!aligned_OK (tmp)))
4428 malloc_printerr ("free(): unaligned chunk detected in tcache 2");
4429 if (tmp == e)
4430 malloc_printerr ("free(): double free detected in tcache 2");
4431 /* If we get here, it was a coincidence. We've wasted a
4432 few cycles, but don't abort. */
4433 }
4434 }
4435
4436 if (tcache->counts[tc_idx] < mp_.tcache_count)
4437 {
4438 tcache_put (p, tc_idx);
4439 return;
4440 }
4441 }
4442 }
4443 #endif
4444
4445 /*
4446 If eligible, place chunk on a fastbin so it can be found
4447 and used quickly in malloc.
4448 */
4449
4450 if ((unsigned long)(size) <= (unsigned long)(get_max_fast ())
4451
4452 #if TRIM_FASTBINS
4453 /*
4454 If TRIM_FASTBINS set, don't place chunks
4455 bordering top into fastbins
4456 */
4457 && (chunk_at_offset(p, size) != av->top)
4458 #endif
4459 ) {
4460
4461 if (__builtin_expect (chunksize_nomask (chunk_at_offset (p, size))
4462 <= CHUNK_HDR_SZ, 0)
4463 || __builtin_expect (chunksize (chunk_at_offset (p, size))
4464 >= av->system_mem, 0))
4465 {
4466 bool fail = true;
4467 /* We might not have a lock at this point and concurrent modifications
4468 of system_mem might result in a false positive. Redo the test after
4469 getting the lock. */
4470 if (!have_lock)
4471 {
4472 __libc_lock_lock (av->mutex);
4473 fail = (chunksize_nomask (chunk_at_offset (p, size)) <= CHUNK_HDR_SZ
4474 || chunksize (chunk_at_offset (p, size)) >= av->system_mem);
4475 __libc_lock_unlock (av->mutex);
4476 }
4477
4478 if (fail)
4479 malloc_printerr ("free(): invalid next size (fast)");
4480 }
4481
4482 free_perturb (chunk2mem(p), size - CHUNK_HDR_SZ);
4483
4484 atomic_store_relaxed (&av->have_fastchunks, true);
4485 unsigned int idx = fastbin_index(size);
4486 fb = &fastbin (av, idx);
4487
4488 /* Atomically link P to its fastbin: P->FD = *FB; *FB = P; */
4489 mchunkptr old = *fb, old2;
4490
4491 if (SINGLE_THREAD_P)
4492 {
4493 /* Check that the top of the bin is not the record we are going to
4494 add (i.e., double free). */
4495 if (__builtin_expect (old == p, 0))
4496 malloc_printerr ("double free or corruption (fasttop)");
4497 p->fd = PROTECT_PTR (&p->fd, old);
4498 *fb = p;
4499 }
4500 else
4501 do
4502 {
4503 /* Check that the top of the bin is not the record we are going to
4504 add (i.e., double free). */
4505 if (__builtin_expect (old == p, 0))
4506 malloc_printerr ("double free or corruption (fasttop)");
4507 old2 = old;
4508 p->fd = PROTECT_PTR (&p->fd, old);
4509 }
4510 while ((old = catomic_compare_and_exchange_val_rel (fb, p, old2))
4511 != old2);
4512
4513 /* Check that size of fastbin chunk at the top is the same as
4514 size of the chunk that we are adding. We can dereference OLD
4515 only if we have the lock, otherwise it might have already been
4516 allocated again. */
4517 if (have_lock && old != NULL
4518 && __builtin_expect (fastbin_index (chunksize (old)) != idx, 0))
4519 malloc_printerr ("invalid fastbin entry (free)");
4520 }
4521
4522 /*
4523 Consolidate other non-mmapped chunks as they arrive.
4524 */
4525
4526 else if (!chunk_is_mmapped(p)) {
4527
4528 /* If we're single-threaded, don't lock the arena. */
4529 if (SINGLE_THREAD_P)
4530 have_lock = true;
4531
4532 if (!have_lock)
4533 __libc_lock_lock (av->mutex);
4534
4535 nextchunk = chunk_at_offset(p, size);
4536
4537 /* Lightweight tests: check whether the block is already the
4538 top block. */
4539 if (__glibc_unlikely (p == av->top))
4540 malloc_printerr ("double free or corruption (top)");
4541 /* Or whether the next chunk is beyond the boundaries of the arena. */
4542 if (__builtin_expect (contiguous (av)
4543 && (char *) nextchunk
4544 >= ((char *) av->top + chunksize(av->top)), 0))
4545 malloc_printerr ("double free or corruption (out)");
4546 /* Or whether the block is actually not marked used. */
4547 if (__glibc_unlikely (!prev_inuse(nextchunk)))
4548 malloc_printerr ("double free or corruption (!prev)");
4549
4550 nextsize = chunksize(nextchunk);
4551 if (__builtin_expect (chunksize_nomask (nextchunk) <= CHUNK_HDR_SZ, 0)
4552 || __builtin_expect (nextsize >= av->system_mem, 0))
4553 malloc_printerr ("free(): invalid next size (normal)");
4554
4555 free_perturb (chunk2mem(p), size - CHUNK_HDR_SZ);
4556
4557 /* consolidate backward */
4558 if (!prev_inuse(p)) {
4559 prevsize = prev_size (p);
4560 size += prevsize;
4561 p = chunk_at_offset(p, -((long) prevsize));
4562 if (__glibc_unlikely (chunksize(p) != prevsize))
4563 malloc_printerr ("corrupted size vs. prev_size while consolidating");
4564 unlink_chunk (av, p);
4565 }
4566
4567 if (nextchunk != av->top) {
4568 /* get and clear inuse bit */
4569 nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
4570
4571 /* consolidate forward */
4572 if (!nextinuse) {
4573 unlink_chunk (av, nextchunk);
4574 size += nextsize;
4575 } else
4576 clear_inuse_bit_at_offset(nextchunk, 0);
4577
4578 /*
4579 Place the chunk in unsorted chunk list. Chunks are
4580 not placed into regular bins until after they have
4581 been given one chance to be used in malloc.
4582 */
4583
4584 bck = unsorted_chunks(av);
4585 fwd = bck->fd;
4586 if (__glibc_unlikely (fwd->bk != bck))
4587 malloc_printerr ("free(): corrupted unsorted chunks");
4588 p->fd = fwd;
4589 p->bk = bck;
4590 if (!in_smallbin_range(size))
4591 {
4592 p->fd_nextsize = NULL;
4593 p->bk_nextsize = NULL;
4594 }
4595 bck->fd = p;
4596 fwd->bk = p;
4597
4598 set_head(p, size | PREV_INUSE);
4599 set_foot(p, size);
4600
4601 check_free_chunk(av, p);
4602 }
4603
4604 /*
4605 If the chunk borders the current high end of memory,
4606 consolidate into top
4607 */
4608
4609 else {
4610 size += nextsize;
4611 set_head(p, size | PREV_INUSE);
4612 av->top = p;
4613 check_chunk(av, p);
4614 }
4615
4616 /*
4617 If freeing a large space, consolidate possibly-surrounding
4618 chunks. Then, if the total unused topmost memory exceeds trim
4619 threshold, ask malloc_trim to reduce top.
4620
4621 Unless max_fast is 0, we don't know if there are fastbins
4622 bordering top, so we cannot tell for sure whether threshold
4623 has been reached unless fastbins are consolidated. But we
4624 don't want to consolidate on each free. As a compromise,
4625 consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
4626 is reached.
4627 */
4628
4629 if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) {
4630 if (atomic_load_relaxed (&av->have_fastchunks))
4631 malloc_consolidate(av);
4632
4633 if (av == &main_arena) {
4634 #ifndef MORECORE_CANNOT_TRIM
4635 if ((unsigned long)(chunksize(av->top)) >=
4636 (unsigned long)(mp_.trim_threshold))
4637 systrim(mp_.top_pad, av);
4638 #endif
4639 } else {
4640 /* Always try heap_trim(), even if the top chunk is not
4641 large, because the corresponding heap might go away. */
4642 heap_info *heap = heap_for_ptr(top(av));
4643
4644 assert(heap->ar_ptr == av);
4645 heap_trim(heap, mp_.top_pad);
4646 }
4647 }
4648
4649 if (!have_lock)
4650 __libc_lock_unlock (av->mutex);
4651 }
4652 /*
4653 If the chunk was allocated via mmap, release via munmap().
4654 */
4655
4656 else {
4657 munmap_chunk (p);
4658 }
4659 }
4660
4661 /*
4662 ------------------------- malloc_consolidate -------------------------
4663
4664 malloc_consolidate is a specialized version of free() that tears
4665 down chunks held in fastbins. Free itself cannot be used for this
4666 purpose since, among other things, it might place chunks back onto
4667 fastbins. So, instead, we need to use a minor variant of the same
4668 code.
4669 */
4670
4671 static void malloc_consolidate(mstate av)
4672 {
4673 mfastbinptr* fb; /* current fastbin being consolidated */
4674 mfastbinptr* maxfb; /* last fastbin (for loop control) */
4675 mchunkptr p; /* current chunk being consolidated */
4676 mchunkptr nextp; /* next chunk to consolidate */
4677 mchunkptr unsorted_bin; /* bin header */
4678 mchunkptr first_unsorted; /* chunk to link to */
4679
4680 /* These have same use as in free() */
4681 mchunkptr nextchunk;
4682 INTERNAL_SIZE_T size;
4683 INTERNAL_SIZE_T nextsize;
4684 INTERNAL_SIZE_T prevsize;
4685 int nextinuse;
4686
4687 atomic_store_relaxed (&av->have_fastchunks, false);
4688
4689 unsorted_bin = unsorted_chunks(av);
4690
4691 /*
4692 Remove each chunk from fast bin and consolidate it, placing it
4693 then in unsorted bin. Among other reasons for doing this,
4694 placing in unsorted bin avoids needing to calculate actual bins
4695 until malloc is sure that chunks aren't immediately going to be
4696 reused anyway.
4697 */
4698
4699 maxfb = &fastbin (av, NFASTBINS - 1);
4700 fb = &fastbin (av, 0);
4701 do {
4702 p = atomic_exchange_acq (fb, NULL);
4703 if (p != 0) {
4704 do {
4705 {
4706 if (__glibc_unlikely (misaligned_chunk (p)))
4707 malloc_printerr ("malloc_consolidate(): "
4708 "unaligned fastbin chunk detected");
4709
4710 unsigned int idx = fastbin_index (chunksize (p));
4711 if ((&fastbin (av, idx)) != fb)
4712 malloc_printerr ("malloc_consolidate(): invalid chunk size");
4713 }
4714
4715 check_inuse_chunk(av, p);
4716 nextp = REVEAL_PTR (p->fd);
4717
4718 /* Slightly streamlined version of consolidation code in free() */
4719 size = chunksize (p);
4720 nextchunk = chunk_at_offset(p, size);
4721 nextsize = chunksize(nextchunk);
4722
4723 if (!prev_inuse(p)) {
4724 prevsize = prev_size (p);
4725 size += prevsize;
4726 p = chunk_at_offset(p, -((long) prevsize));
4727 if (__glibc_unlikely (chunksize(p) != prevsize))
4728 malloc_printerr ("corrupted size vs. prev_size in fastbins");
4729 unlink_chunk (av, p);
4730 }
4731
4732 if (nextchunk != av->top) {
4733 nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
4734
4735 if (!nextinuse) {
4736 size += nextsize;
4737 unlink_chunk (av, nextchunk);
4738 } else
4739 clear_inuse_bit_at_offset(nextchunk, 0);
4740
4741 first_unsorted = unsorted_bin->fd;
4742 unsorted_bin->fd = p;
4743 first_unsorted->bk = p;
4744
4745 if (!in_smallbin_range (size)) {
4746 p->fd_nextsize = NULL;
4747 p->bk_nextsize = NULL;
4748 }
4749
4750 set_head(p, size | PREV_INUSE);
4751 p->bk = unsorted_bin;
4752 p->fd = first_unsorted;
4753 set_foot(p, size);
4754 }
4755
4756 else {
4757 size += nextsize;
4758 set_head(p, size | PREV_INUSE);
4759 av->top = p;
4760 }
4761
4762 } while ( (p = nextp) != 0);
4763
4764 }
4765 } while (fb++ != maxfb);
4766 }
4767
4768 /*
4769 ------------------------------ realloc ------------------------------
4770 */
4771
4772 void*
4773 _int_realloc(mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize,
4774 INTERNAL_SIZE_T nb)
4775 {
4776 mchunkptr newp; /* chunk to return */
4777 INTERNAL_SIZE_T newsize; /* its size */
4778 void* newmem; /* corresponding user mem */
4779
4780 mchunkptr next; /* next contiguous chunk after oldp */
4781
4782 mchunkptr remainder; /* extra space at end of newp */
4783 unsigned long remainder_size; /* its size */
4784
4785 /* oldmem size */
4786 if (__builtin_expect (chunksize_nomask (oldp) <= CHUNK_HDR_SZ, 0)
4787 || __builtin_expect (oldsize >= av->system_mem, 0))
4788 malloc_printerr ("realloc(): invalid old size");
4789
4790 check_inuse_chunk (av, oldp);
4791
4792 /* All callers already filter out mmap'ed chunks. */
4793 assert (!chunk_is_mmapped (oldp));
4794
4795 next = chunk_at_offset (oldp, oldsize);
4796 INTERNAL_SIZE_T nextsize = chunksize (next);
4797 if (__builtin_expect (chunksize_nomask (next) <= CHUNK_HDR_SZ, 0)
4798 || __builtin_expect (nextsize >= av->system_mem, 0))
4799 malloc_printerr ("realloc(): invalid next size");
4800
4801 if ((unsigned long) (oldsize) >= (unsigned long) (nb))
4802 {
4803 /* already big enough; split below */
4804 newp = oldp;
4805 newsize = oldsize;
4806 }
4807
4808 else
4809 {
4810 /* Try to expand forward into top */
4811 if (next == av->top &&
4812 (unsigned long) (newsize = oldsize + nextsize) >=
4813 (unsigned long) (nb + MINSIZE))
4814 {
4815 set_head_size (oldp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0));
4816 av->top = chunk_at_offset (oldp, nb);
4817 set_head (av->top, (newsize - nb) | PREV_INUSE);
4818 check_inuse_chunk (av, oldp);
4819 return TAG_NEW_USABLE (chunk2rawmem (oldp));
4820 }
4821
4822 /* Try to expand forward into next chunk; split off remainder below */
4823 else if (next != av->top &&
4824 !inuse (next) &&
4825 (unsigned long) (newsize = oldsize + nextsize) >=
4826 (unsigned long) (nb))
4827 {
4828 newp = oldp;
4829 unlink_chunk (av, next);
4830 }
4831
4832 /* allocate, copy, free */
4833 else
4834 {
4835 newmem = _int_malloc (av, nb - MALLOC_ALIGN_MASK);
4836 if (newmem == 0)
4837 return 0; /* propagate failure */
4838
4839 newp = mem2chunk (newmem);
4840 newsize = chunksize (newp);
4841
4842 /*
4843 Avoid copy if newp is next chunk after oldp.
4844 */
4845 if (newp == next)
4846 {
4847 newsize += oldsize;
4848 newp = oldp;
4849 }
4850 else
4851 {
4852 void *oldmem = chunk2mem (oldp);
4853 newmem = TAG_NEW_USABLE (newmem);
4854 memcpy (newmem, oldmem,
4855 CHUNK_AVAILABLE_SIZE (oldp) - CHUNK_HDR_SZ);
4856 (void) TAG_REGION (chunk2rawmem (oldp), oldsize);
4857 _int_free (av, oldp, 1);
4858 check_inuse_chunk (av, newp);
4859 return chunk2mem (newp);
4860 }
4861 }
4862 }
4863
4864 /* If possible, free extra space in old or extended chunk */
4865
4866 assert ((unsigned long) (newsize) >= (unsigned long) (nb));
4867
4868 remainder_size = newsize - nb;
4869
4870 if (remainder_size < MINSIZE) /* not enough extra to split off */
4871 {
4872 set_head_size (newp, newsize | (av != &main_arena ? NON_MAIN_ARENA : 0));
4873 set_inuse_bit_at_offset (newp, newsize);
4874 }
4875 else /* split remainder */
4876 {
4877 remainder = chunk_at_offset (newp, nb);
4878 /* Clear any user-space tags before writing the header. */
4879 remainder = TAG_REGION (remainder, remainder_size);
4880 set_head_size (newp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0));
4881 set_head (remainder, remainder_size | PREV_INUSE |
4882 (av != &main_arena ? NON_MAIN_ARENA : 0));
4883 /* Mark remainder as inuse so free() won't complain */
4884 set_inuse_bit_at_offset (remainder, remainder_size);
4885 _int_free (av, remainder, 1);
4886 }
4887
4888 check_inuse_chunk (av, newp);
4889 return TAG_NEW_USABLE (chunk2rawmem (newp));
4890 }
4891
4892 /*
4893 ------------------------------ memalign ------------------------------
4894 */
4895
4896 static void *
4897 _int_memalign (mstate av, size_t alignment, size_t bytes)
4898 {
4899 INTERNAL_SIZE_T nb; /* padded request size */
4900 char *m; /* memory returned by malloc call */
4901 mchunkptr p; /* corresponding chunk */
4902 char *brk; /* alignment point within p */
4903 mchunkptr newp; /* chunk to return */
4904 INTERNAL_SIZE_T newsize; /* its size */
4905 INTERNAL_SIZE_T leadsize; /* leading space before alignment point */
4906 mchunkptr remainder; /* spare room at end to split off */
4907 unsigned long remainder_size; /* its size */
4908 INTERNAL_SIZE_T size;
4909
4910
4911
4912 if (!checked_request2size (bytes, &nb))
4913 {
4914 __set_errno (ENOMEM);
4915 return NULL;
4916 }
4917
4918 /*
4919 Strategy: find a spot within that chunk that meets the alignment
4920 request, and then possibly free the leading and trailing space.
4921 */
4922
4923 /* Call malloc with worst case padding to hit alignment. */
4924
4925 m = (char *) (_int_malloc (av, nb + alignment + MINSIZE));
4926
4927 if (m == 0)
4928 return 0; /* propagate failure */
4929
4930 p = mem2chunk (m);
4931
4932 if ((((unsigned long) (m)) % alignment) != 0) /* misaligned */
4933
4934 { /*
4935 Find an aligned spot inside chunk. Since we need to give back
4936 leading space in a chunk of at least MINSIZE, if the first
4937 calculation places us at a spot with less than MINSIZE leader,
4938 we can move to the next aligned spot -- we've allocated enough
4939 total room so that this is always possible.
4940 */
4941 brk = (char *) mem2chunk (((unsigned long) (m + alignment - 1)) &
4942 - ((signed long) alignment));
4943 if ((unsigned long) (brk - (char *) (p)) < MINSIZE)
4944 brk += alignment;
4945
4946 newp = (mchunkptr) brk;
4947 leadsize = brk - (char *) (p);
4948 newsize = chunksize (p) - leadsize;
4949
4950 /* For mmapped chunks, just adjust offset */
4951 if (chunk_is_mmapped (p))
4952 {
4953 set_prev_size (newp, prev_size (p) + leadsize);
4954 set_head (newp, newsize | IS_MMAPPED);
4955 return chunk2mem (newp);
4956 }
4957
4958 /* Otherwise, give back leader, use the rest */
4959 set_head (newp, newsize | PREV_INUSE |
4960 (av != &main_arena ? NON_MAIN_ARENA : 0));
4961 set_inuse_bit_at_offset (newp, newsize);
4962 set_head_size (p, leadsize | (av != &main_arena ? NON_MAIN_ARENA : 0));
4963 _int_free (av, p, 1);
4964 p = newp;
4965
4966 assert (newsize >= nb &&
4967 (((unsigned long) (chunk2rawmem (p))) % alignment) == 0);
4968 }
4969
4970 /* Also give back spare room at the end */
4971 if (!chunk_is_mmapped (p))
4972 {
4973 size = chunksize (p);
4974 if ((unsigned long) (size) > (unsigned long) (nb + MINSIZE))
4975 {
4976 remainder_size = size - nb;
4977 remainder = chunk_at_offset (p, nb);
4978 set_head (remainder, remainder_size | PREV_INUSE |
4979 (av != &main_arena ? NON_MAIN_ARENA : 0));
4980 set_head_size (p, nb);
4981 _int_free (av, remainder, 1);
4982 }
4983 }
4984
4985 check_inuse_chunk (av, p);
4986 return chunk2mem (p);
4987 }
4988
4989
4990 /*
4991 ------------------------------ malloc_trim ------------------------------
4992 */
4993
4994 static int
4995 mtrim (mstate av, size_t pad)
4996 {
4997 /* Ensure all blocks are consolidated. */
4998 malloc_consolidate (av);
4999
5000 const size_t ps = GLRO (dl_pagesize);
5001 int psindex = bin_index (ps);
5002 const size_t psm1 = ps - 1;
5003
5004 int result = 0;
5005 for (int i = 1; i < NBINS; ++i)
5006 if (i == 1 || i >= psindex)
5007 {
5008 mbinptr bin = bin_at (av, i);
5009
5010 for (mchunkptr p = last (bin); p != bin; p = p->bk)
5011 {
5012 INTERNAL_SIZE_T size = chunksize (p);
5013
5014 if (size > psm1 + sizeof (struct malloc_chunk))
5015 {
5016 /* See whether the chunk contains at least one unused page. */
5017 char *paligned_mem = (char *) (((uintptr_t) p
5018 + sizeof (struct malloc_chunk)
5019 + psm1) & ~psm1);
5020
5021 assert ((char *) chunk2rawmem (p) + 2 * CHUNK_HDR_SZ
5022 <= paligned_mem);
5023 assert ((char *) p + size > paligned_mem);
5024
5025 /* This is the size we could potentially free. */
5026 size -= paligned_mem - (char *) p;
5027
5028 if (size > psm1)
5029 {
5030 #if MALLOC_DEBUG
5031 /* When debugging we simulate destroying the memory
5032 content. */
5033 memset (paligned_mem, 0x89, size & ~psm1);
5034 #endif
5035 __madvise (paligned_mem, size & ~psm1, MADV_DONTNEED);
5036
5037 result = 1;
5038 }
5039 }
5040 }
5041 }
5042
5043 #ifndef MORECORE_CANNOT_TRIM
5044 return result | (av == &main_arena ? systrim (pad, av) : 0);
5045
5046 #else
5047 return result;
5048 #endif
5049 }
5050
5051
5052 int
5053 __malloc_trim (size_t s)
5054 {
5055 int result = 0;
5056
5057 if (__malloc_initialized < 0)
5058 ptmalloc_init ();
5059
5060 mstate ar_ptr = &main_arena;
5061 do
5062 {
5063 __libc_lock_lock (ar_ptr->mutex);
5064 result |= mtrim (ar_ptr, s);
5065 __libc_lock_unlock (ar_ptr->mutex);
5066
5067 ar_ptr = ar_ptr->next;
5068 }
5069 while (ar_ptr != &main_arena);
5070
5071 return result;
5072 }
5073
5074
5075 /*
5076 ------------------------- malloc_usable_size -------------------------
5077 */
5078
5079 static size_t
5080 musable (void *mem)
5081 {
5082 mchunkptr p;
5083 if (mem != 0)
5084 {
5085 size_t result = 0;
5086
5087 p = mem2chunk (mem);
5088
5089 if (__builtin_expect (using_malloc_checking == 1, 0))
5090 return malloc_check_get_size (p);
5091
5092 if (chunk_is_mmapped (p))
5093 {
5094 if (DUMPED_MAIN_ARENA_CHUNK (p))
5095 result = chunksize (p) - SIZE_SZ;
5096 else
5097 result = chunksize (p) - CHUNK_HDR_SZ;
5098 }
5099 else if (inuse (p))
5100 result = chunksize (p) - SIZE_SZ;
5101
5102 #ifdef USE_MTAG
5103 /* The usable space may be reduced if memory tagging is needed,
5104 since we cannot share the user-space data with malloc's internal
5105 data structure. */
5106 result &= __mtag_granule_mask;
5107 #endif
5108 return result;
5109 }
5110 return 0;
5111 }
5112
5113
5114 size_t
5115 __malloc_usable_size (void *m)
5116 {
5117 size_t result;
5118
5119 result = musable (m);
5120 return result;
5121 }
5122
5123 /*
5124 ------------------------------ mallinfo ------------------------------
5125 Accumulate malloc statistics for arena AV into M.
5126 */
5127
5128 static void
5129 int_mallinfo (mstate av, struct mallinfo2 *m)
5130 {
5131 size_t i;
5132 mbinptr b;
5133 mchunkptr p;
5134 INTERNAL_SIZE_T avail;
5135 INTERNAL_SIZE_T fastavail;
5136 int nblocks;
5137 int nfastblocks;
5138
5139 check_malloc_state (av);
5140
5141 /* Account for top */
5142 avail = chunksize (av->top);
5143 nblocks = 1; /* top always exists */
5144
5145 /* traverse fastbins */
5146 nfastblocks = 0;
5147 fastavail = 0;
5148
5149 for (i = 0; i < NFASTBINS; ++i)
5150 {
5151 for (p = fastbin (av, i);
5152 p != 0;
5153 p = REVEAL_PTR (p->fd))
5154 {
5155 if (__glibc_unlikely (misaligned_chunk (p)))
5156 malloc_printerr ("int_mallinfo(): "
5157 "unaligned fastbin chunk detected");
5158 ++nfastblocks;
5159 fastavail += chunksize (p);
5160 }
5161 }
5162
5163 avail += fastavail;
5164
5165 /* traverse regular bins */
5166 for (i = 1; i < NBINS; ++i)
5167 {
5168 b = bin_at (av, i);
5169 for (p = last (b); p != b; p = p->bk)
5170 {
5171 ++nblocks;
5172 avail += chunksize (p);
5173 }
5174 }
5175
5176 m->smblks += nfastblocks;
5177 m->ordblks += nblocks;
5178 m->fordblks += avail;
5179 m->uordblks += av->system_mem - avail;
5180 m->arena += av->system_mem;
5181 m->fsmblks += fastavail;
5182 if (av == &main_arena)
5183 {
5184 m->hblks = mp_.n_mmaps;
5185 m->hblkhd = mp_.mmapped_mem;
5186 m->usmblks = 0;
5187 m->keepcost = chunksize (av->top);
5188 }
5189 }
5190
5191
5192 struct mallinfo2
5193 __libc_mallinfo2 (void)
5194 {
5195 struct mallinfo2 m;
5196 mstate ar_ptr;
5197
5198 if (__malloc_initialized < 0)
5199 ptmalloc_init ();
5200
5201 memset (&m, 0, sizeof (m));
5202 ar_ptr = &main_arena;
5203 do
5204 {
5205 __libc_lock_lock (ar_ptr->mutex);
5206 int_mallinfo (ar_ptr, &m);
5207 __libc_lock_unlock (ar_ptr->mutex);
5208
5209 ar_ptr = ar_ptr->next;
5210 }
5211 while (ar_ptr != &main_arena);
5212
5213 return m;
5214 }
5215 libc_hidden_def (__libc_mallinfo2)
5216
5217 struct mallinfo
5218 __libc_mallinfo (void)
5219 {
5220 struct mallinfo m;
5221 struct mallinfo2 m2 = __libc_mallinfo2 ();
5222
5223 m.arena = m2.arena;
5224 m.ordblks = m2.ordblks;
5225 m.smblks = m2.smblks;
5226 m.hblks = m2.hblks;
5227 m.hblkhd = m2.hblkhd;
5228 m.usmblks = m2.usmblks;
5229 m.fsmblks = m2.fsmblks;
5230 m.uordblks = m2.uordblks;
5231 m.fordblks = m2.fordblks;
5232 m.keepcost = m2.keepcost;
5233
5234 return m;
5235 }
5236
5237
5238 /*
5239 ------------------------------ malloc_stats ------------------------------
5240 */
5241
5242 void
5243 __malloc_stats (void)
5244 {
5245 int i;
5246 mstate ar_ptr;
5247 unsigned int in_use_b = mp_.mmapped_mem, system_b = in_use_b;
5248
5249 if (__malloc_initialized < 0)
5250 ptmalloc_init ();
5251 _IO_flockfile (stderr);
5252 int old_flags2 = stderr->_flags2;
5253 stderr->_flags2 |= _IO_FLAGS2_NOTCANCEL;
5254 for (i = 0, ar_ptr = &main_arena;; i++)
5255 {
5256 struct mallinfo2 mi;
5257
5258 memset (&mi, 0, sizeof (mi));
5259 __libc_lock_lock (ar_ptr->mutex);
5260 int_mallinfo (ar_ptr, &mi);
5261 fprintf (stderr, "Arena %d:\n", i);
5262 fprintf (stderr, "system bytes = %10u\n", (unsigned int) mi.arena);
5263 fprintf (stderr, "in use bytes = %10u\n", (unsigned int) mi.uordblks);
5264 #if MALLOC_DEBUG > 1
5265 if (i > 0)
5266 dump_heap (heap_for_ptr (top (ar_ptr)));
5267 #endif
5268 system_b += mi.arena;
5269 in_use_b += mi.uordblks;
5270 __libc_lock_unlock (ar_ptr->mutex);
5271 ar_ptr = ar_ptr->next;
5272 if (ar_ptr == &main_arena)
5273 break;
5274 }
5275 fprintf (stderr, "Total (incl. mmap):\n");
5276 fprintf (stderr, "system bytes = %10u\n", system_b);
5277 fprintf (stderr, "in use bytes = %10u\n", in_use_b);
5278 fprintf (stderr, "max mmap regions = %10u\n", (unsigned int) mp_.max_n_mmaps);
5279 fprintf (stderr, "max mmap bytes = %10lu\n",
5280 (unsigned long) mp_.max_mmapped_mem);
5281 stderr->_flags2 = old_flags2;
5282 _IO_funlockfile (stderr);
5283 }
5284
5285
5286 /*
5287 ------------------------------ mallopt ------------------------------
5288 */
5289 static __always_inline int
5290 do_set_trim_threshold (size_t value)
5291 {
5292 LIBC_PROBE (memory_mallopt_trim_threshold, 3, value, mp_.trim_threshold,
5293 mp_.no_dyn_threshold);
5294 mp_.trim_threshold = value;
5295 mp_.no_dyn_threshold = 1;
5296 return 1;
5297 }
5298
5299 static __always_inline int
5300 do_set_top_pad (size_t value)
5301 {
5302 LIBC_PROBE (memory_mallopt_top_pad, 3, value, mp_.top_pad,
5303 mp_.no_dyn_threshold);
5304 mp_.top_pad = value;
5305 mp_.no_dyn_threshold = 1;
5306 return 1;
5307 }
5308
5309 static __always_inline int
5310 do_set_mmap_threshold (size_t value)
5311 {
5312 /* Forbid setting the threshold too high. */
5313 if (value <= HEAP_MAX_SIZE / 2)
5314 {
5315 LIBC_PROBE (memory_mallopt_mmap_threshold, 3, value, mp_.mmap_threshold,
5316 mp_.no_dyn_threshold);
5317 mp_.mmap_threshold = value;
5318 mp_.no_dyn_threshold = 1;
5319 return 1;
5320 }
5321 return 0;
5322 }
5323
5324 static __always_inline int
5325 do_set_mmaps_max (int32_t value)
5326 {
5327 LIBC_PROBE (memory_mallopt_mmap_max, 3, value, mp_.n_mmaps_max,
5328 mp_.no_dyn_threshold);
5329 mp_.n_mmaps_max = value;
5330 mp_.no_dyn_threshold = 1;
5331 return 1;
5332 }
5333
5334 static __always_inline int
5335 do_set_mallopt_check (int32_t value)
5336 {
5337 return 1;
5338 }
5339
5340 static __always_inline int
5341 do_set_perturb_byte (int32_t value)
5342 {
5343 LIBC_PROBE (memory_mallopt_perturb, 2, value, perturb_byte);
5344 perturb_byte = value;
5345 return 1;
5346 }
5347
5348 static __always_inline int
5349 do_set_arena_test (size_t value)
5350 {
5351 LIBC_PROBE (memory_mallopt_arena_test, 2, value, mp_.arena_test);
5352 mp_.arena_test = value;
5353 return 1;
5354 }
5355
5356 static __always_inline int
5357 do_set_arena_max (size_t value)
5358 {
5359 LIBC_PROBE (memory_mallopt_arena_max, 2, value, mp_.arena_max);
5360 mp_.arena_max = value;
5361 return 1;
5362 }
5363
5364 #if USE_TCACHE
5365 static __always_inline int
5366 do_set_tcache_max (size_t value)
5367 {
5368 if (value <= MAX_TCACHE_SIZE)
5369 {
5370 LIBC_PROBE (memory_tunable_tcache_max_bytes, 2, value, mp_.tcache_max_bytes);
5371 mp_.tcache_max_bytes = value;
5372 mp_.tcache_bins = csize2tidx (request2size(value)) + 1;
5373 return 1;
5374 }
5375 return 0;
5376 }
5377
5378 static __always_inline int
5379 do_set_tcache_count (size_t value)
5380 {
5381 if (value <= MAX_TCACHE_COUNT)
5382 {
5383 LIBC_PROBE (memory_tunable_tcache_count, 2, value, mp_.tcache_count);
5384 mp_.tcache_count = value;
5385 return 1;
5386 }
5387 return 0;
5388 }
5389
5390 static __always_inline int
5391 do_set_tcache_unsorted_limit (size_t value)
5392 {
5393 LIBC_PROBE (memory_tunable_tcache_unsorted_limit, 2, value, mp_.tcache_unsorted_limit);
5394 mp_.tcache_unsorted_limit = value;
5395 return 1;
5396 }
5397 #endif
5398
5399 static inline int
5400 __always_inline
5401 do_set_mxfast (size_t value)
5402 {
5403 if (value <= MAX_FAST_SIZE)
5404 {
5405 LIBC_PROBE (memory_mallopt_mxfast, 2, value, get_max_fast ());
5406 set_max_fast (value);
5407 return 1;
5408 }
5409 return 0;
5410 }
5411
5412 int
5413 __libc_mallopt (int param_number, int value)
5414 {
5415 mstate av = &main_arena;
5416 int res = 1;
5417
5418 if (__malloc_initialized < 0)
5419 ptmalloc_init ();
5420 __libc_lock_lock (av->mutex);
5421
5422 LIBC_PROBE (memory_mallopt, 2, param_number, value);
5423
5424 /* We must consolidate main arena before changing max_fast
5425 (see definition of set_max_fast). */
5426 malloc_consolidate (av);
5427
5428 /* Many of these helper functions take a size_t. We do not worry
5429 about overflow here, because negative int values will wrap to
5430 very large size_t values and the helpers have sufficient range
5431 checking for such conversions. Many of these helpers are also
5432 used by the tunables macros in arena.c. */
5433
5434 switch (param_number)
5435 {
5436 case M_MXFAST:
5437 res = do_set_mxfast (value);
5438 break;
5439
5440 case M_TRIM_THRESHOLD:
5441 res = do_set_trim_threshold (value);
5442 break;
5443
5444 case M_TOP_PAD:
5445 res = do_set_top_pad (value);
5446 break;
5447
5448 case M_MMAP_THRESHOLD:
5449 res = do_set_mmap_threshold (value);
5450 break;
5451
5452 case M_MMAP_MAX:
5453 res = do_set_mmaps_max (value);
5454 break;
5455
5456 case M_CHECK_ACTION:
5457 res = do_set_mallopt_check (value);
5458 break;
5459
5460 case M_PERTURB:
5461 res = do_set_perturb_byte (value);
5462 break;
5463
5464 case M_ARENA_TEST:
5465 if (value > 0)
5466 res = do_set_arena_test (value);
5467 break;
5468
5469 case M_ARENA_MAX:
5470 if (value > 0)
5471 res = do_set_arena_max (value);
5472 break;
5473 }
5474 __libc_lock_unlock (av->mutex);
5475 return res;
5476 }
5477 libc_hidden_def (__libc_mallopt)
5478
5479
5480 /*
5481 -------------------- Alternative MORECORE functions --------------------
5482 */
5483
5484
5485 /*
5486 General Requirements for MORECORE.
5487
5488 The MORECORE function must have the following properties:
5489
5490 If MORECORE_CONTIGUOUS is false:
5491
5492 * MORECORE must allocate in multiples of pagesize. It will
5493 only be called with arguments that are multiples of pagesize.
5494
5495 * MORECORE(0) must return an address that is at least
5496 MALLOC_ALIGNMENT aligned. (Page-aligning always suffices.)
5497
5498 else (i.e. If MORECORE_CONTIGUOUS is true):
5499
5500 * Consecutive calls to MORECORE with positive arguments
5501 return increasing addresses, indicating that space has been
5502 contiguously extended.
5503
5504 * MORECORE need not allocate in multiples of pagesize.
5505 Calls to MORECORE need not have args of multiples of pagesize.
5506
5507 * MORECORE need not page-align.
5508
5509 In either case:
5510
5511 * MORECORE may allocate more memory than requested. (Or even less,
5512 but this will generally result in a malloc failure.)
5513
5514 * MORECORE must not allocate memory when given argument zero, but
5515 instead return one past the end address of memory from previous
5516 nonzero call. This malloc does NOT call MORECORE(0)
5517 until at least one call with positive arguments is made, so
5518 the initial value returned is not important.
5519
5520 * Even though consecutive calls to MORECORE need not return contiguous
5521 addresses, it must be OK for malloc'ed chunks to span multiple
5522 regions in those cases where they do happen to be contiguous.
5523
5524 * MORECORE need not handle negative arguments -- it may instead
5525 just return MORECORE_FAILURE when given negative arguments.
5526 Negative arguments are always multiples of pagesize. MORECORE
5527 must not misinterpret negative args as large positive unsigned
5528 args. You can suppress all such calls from even occurring by defining
5529 MORECORE_CANNOT_TRIM,
5530
5531 There is some variation across systems about the type of the
5532 argument to sbrk/MORECORE. If size_t is unsigned, then it cannot
5533 actually be size_t, because sbrk supports negative args, so it is
5534 normally the signed type of the same width as size_t (sometimes
5535 declared as "intptr_t", and sometimes "ptrdiff_t"). It doesn't much
5536 matter though. Internally, we use "long" as arguments, which should
5537 work across all reasonable possibilities.
5538
5539 Additionally, if MORECORE ever returns failure for a positive
5540 request, then mmap is used as a noncontiguous system allocator. This
5541 is a useful backup strategy for systems with holes in address spaces
5542 -- in this case sbrk cannot contiguously expand the heap, but mmap
5543 may be able to map noncontiguous space.
5544
5545 If you'd like mmap to ALWAYS be used, you can define MORECORE to be
5546 a function that always returns MORECORE_FAILURE.
5547
5548 If you are using this malloc with something other than sbrk (or its
5549 emulation) to supply memory regions, you probably want to set
5550 MORECORE_CONTIGUOUS as false. As an example, here is a custom
5551 allocator kindly contributed for pre-OSX macOS. It uses virtually
5552 but not necessarily physically contiguous non-paged memory (locked
5553 in, present and won't get swapped out). You can use it by
5554 uncommenting this section, adding some #includes, and setting up the
5555 appropriate defines above:
5556
5557 *#define MORECORE osMoreCore
5558 *#define MORECORE_CONTIGUOUS 0
5559
5560 There is also a shutdown routine that should somehow be called for
5561 cleanup upon program exit.
5562
5563 *#define MAX_POOL_ENTRIES 100
5564 *#define MINIMUM_MORECORE_SIZE (64 * 1024)
5565 static int next_os_pool;
5566 void *our_os_pools[MAX_POOL_ENTRIES];
5567
5568 void *osMoreCore(int size)
5569 {
5570 void *ptr = 0;
5571 static void *sbrk_top = 0;
5572
5573 if (size > 0)
5574 {
5575 if (size < MINIMUM_MORECORE_SIZE)
5576 size = MINIMUM_MORECORE_SIZE;
5577 if (CurrentExecutionLevel() == kTaskLevel)
5578 ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
5579 if (ptr == 0)
5580 {
5581 return (void *) MORECORE_FAILURE;
5582 }
5583 // save ptrs so they can be freed during cleanup
5584 our_os_pools[next_os_pool] = ptr;
5585 next_os_pool++;
5586 ptr = (void *) ((((unsigned long) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);
5587 sbrk_top = (char *) ptr + size;
5588 return ptr;
5589 }
5590 else if (size < 0)
5591 {
5592 // we don't currently support shrink behavior
5593 return (void *) MORECORE_FAILURE;
5594 }
5595 else
5596 {
5597 return sbrk_top;
5598 }
5599 }
5600
5601 // cleanup any allocated memory pools
5602 // called as last thing before shutting down driver
5603
5604 void osCleanupMem(void)
5605 {
5606 void **ptr;
5607
5608 for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)
5609 if (*ptr)
5610 {
5611 PoolDeallocate(*ptr);
5612 * ptr = 0;
5613 }
5614 }
5615
5616 */
5617
5618
5619 /* Helper code. */
5620
5621 extern char **__libc_argv attribute_hidden;
5622
5623 static void
5624 malloc_printerr (const char *str)
5625 {
5626 __libc_message (do_abort, "%s\n", str);
5627 __builtin_unreachable ();
5628 }
5629
5630 /* We need a wrapper function for one of the additions of POSIX. */
5631 int
5632 __posix_memalign (void **memptr, size_t alignment, size_t size)
5633 {
5634 void *mem;
5635
5636 /* Test whether the SIZE argument is valid. It must be a power of
5637 two multiple of sizeof (void *). */
5638 if (alignment % sizeof (void *) != 0
5639 || !powerof2 (alignment / sizeof (void *))
5640 || alignment == 0)
5641 return EINVAL;
5642
5643
5644 void *address = RETURN_ADDRESS (0);
5645 mem = _mid_memalign (alignment, size, address);
5646
5647 if (mem != NULL)
5648 {
5649 *memptr = mem;
5650 return 0;
5651 }
5652
5653 return ENOMEM;
5654 }
5655 weak_alias (__posix_memalign, posix_memalign)
5656
5657
5658 int
5659 __malloc_info (int options, FILE *fp)
5660 {
5661 /* For now, at least. */
5662 if (options != 0)
5663 return EINVAL;
5664
5665 int n = 0;
5666 size_t total_nblocks = 0;
5667 size_t total_nfastblocks = 0;
5668 size_t total_avail = 0;
5669 size_t total_fastavail = 0;
5670 size_t total_system = 0;
5671 size_t total_max_system = 0;
5672 size_t total_aspace = 0;
5673 size_t total_aspace_mprotect = 0;
5674
5675
5676
5677 if (__malloc_initialized < 0)
5678 ptmalloc_init ();
5679
5680 fputs ("<malloc version=\"1\">\n", fp);
5681
5682 /* Iterate over all arenas currently in use. */
5683 mstate ar_ptr = &main_arena;
5684 do
5685 {
5686 fprintf (fp, "<heap nr=\"%d\">\n<sizes>\n", n++);
5687
5688 size_t nblocks = 0;
5689 size_t nfastblocks = 0;
5690 size_t avail = 0;
5691 size_t fastavail = 0;
5692 struct
5693 {
5694 size_t from;
5695 size_t to;
5696 size_t total;
5697 size_t count;
5698 } sizes[NFASTBINS + NBINS - 1];
5699 #define nsizes (sizeof (sizes) / sizeof (sizes[0]))
5700
5701 __libc_lock_lock (ar_ptr->mutex);
5702
5703 /* Account for top chunk. The top-most available chunk is
5704 treated specially and is never in any bin. See "initial_top"
5705 comments. */
5706 avail = chunksize (ar_ptr->top);
5707 nblocks = 1; /* Top always exists. */
5708
5709 for (size_t i = 0; i < NFASTBINS; ++i)
5710 {
5711 mchunkptr p = fastbin (ar_ptr, i);
5712 if (p != NULL)
5713 {
5714 size_t nthissize = 0;
5715 size_t thissize = chunksize (p);
5716
5717 while (p != NULL)
5718 {
5719 if (__glibc_unlikely (misaligned_chunk (p)))
5720 malloc_printerr ("__malloc_info(): "
5721 "unaligned fastbin chunk detected");
5722 ++nthissize;
5723 p = REVEAL_PTR (p->fd);
5724 }
5725
5726 fastavail += nthissize * thissize;
5727 nfastblocks += nthissize;
5728 sizes[i].from = thissize - (MALLOC_ALIGNMENT - 1);
5729 sizes[i].to = thissize;
5730 sizes[i].count = nthissize;
5731 }
5732 else
5733 sizes[i].from = sizes[i].to = sizes[i].count = 0;
5734
5735 sizes[i].total = sizes[i].count * sizes[i].to;
5736 }
5737
5738
5739 mbinptr bin;
5740 struct malloc_chunk *r;
5741
5742 for (size_t i = 1; i < NBINS; ++i)
5743 {
5744 bin = bin_at (ar_ptr, i);
5745 r = bin->fd;
5746 sizes[NFASTBINS - 1 + i].from = ~((size_t) 0);
5747 sizes[NFASTBINS - 1 + i].to = sizes[NFASTBINS - 1 + i].total
5748 = sizes[NFASTBINS - 1 + i].count = 0;
5749
5750 if (r != NULL)
5751 while (r != bin)
5752 {
5753 size_t r_size = chunksize_nomask (r);
5754 ++sizes[NFASTBINS - 1 + i].count;
5755 sizes[NFASTBINS - 1 + i].total += r_size;
5756 sizes[NFASTBINS - 1 + i].from
5757 = MIN (sizes[NFASTBINS - 1 + i].from, r_size);
5758 sizes[NFASTBINS - 1 + i].to = MAX (sizes[NFASTBINS - 1 + i].to,
5759 r_size);
5760
5761 r = r->fd;
5762 }
5763
5764 if (sizes[NFASTBINS - 1 + i].count == 0)
5765 sizes[NFASTBINS - 1 + i].from = 0;
5766 nblocks += sizes[NFASTBINS - 1 + i].count;
5767 avail += sizes[NFASTBINS - 1 + i].total;
5768 }
5769
5770 size_t heap_size = 0;
5771 size_t heap_mprotect_size = 0;
5772 size_t heap_count = 0;
5773 if (ar_ptr != &main_arena)
5774 {
5775 /* Iterate over the arena heaps from back to front. */
5776 heap_info *heap = heap_for_ptr (top (ar_ptr));
5777 do
5778 {
5779 heap_size += heap->size;
5780 heap_mprotect_size += heap->mprotect_size;
5781 heap = heap->prev;
5782 ++heap_count;
5783 }
5784 while (heap != NULL);
5785 }
5786
5787 __libc_lock_unlock (ar_ptr->mutex);
5788
5789 total_nfastblocks += nfastblocks;
5790 total_fastavail += fastavail;
5791
5792 total_nblocks += nblocks;
5793 total_avail += avail;
5794
5795 for (size_t i = 0; i < nsizes; ++i)
5796 if (sizes[i].count != 0 && i != NFASTBINS)
5797 fprintf (fp, "\
5798 <size from=\"%zu\" to=\"%zu\" total=\"%zu\" count=\"%zu\"/>\n",
5799 sizes[i].from, sizes[i].to, sizes[i].total, sizes[i].count);
5800
5801 if (sizes[NFASTBINS].count != 0)
5802 fprintf (fp, "\
5803 <unsorted from=\"%zu\" to=\"%zu\" total=\"%zu\" count=\"%zu\"/>\n",
5804 sizes[NFASTBINS].from, sizes[NFASTBINS].to,
5805 sizes[NFASTBINS].total, sizes[NFASTBINS].count);
5806
5807 total_system += ar_ptr->system_mem;
5808 total_max_system += ar_ptr->max_system_mem;
5809
5810 fprintf (fp,
5811 "</sizes>\n<total type=\"fast\" count=\"%zu\" size=\"%zu\"/>\n"
5812 "<total type=\"rest\" count=\"%zu\" size=\"%zu\"/>\n"
5813 "<system type=\"current\" size=\"%zu\"/>\n"
5814 "<system type=\"max\" size=\"%zu\"/>\n",
5815 nfastblocks, fastavail, nblocks, avail,
5816 ar_ptr->system_mem, ar_ptr->max_system_mem);
5817
5818 if (ar_ptr != &main_arena)
5819 {
5820 fprintf (fp,
5821 "<aspace type=\"total\" size=\"%zu\"/>\n"
5822 "<aspace type=\"mprotect\" size=\"%zu\"/>\n"
5823 "<aspace type=\"subheaps\" size=\"%zu\"/>\n",
5824 heap_size, heap_mprotect_size, heap_count);
5825 total_aspace += heap_size;
5826 total_aspace_mprotect += heap_mprotect_size;
5827 }
5828 else
5829 {
5830 fprintf (fp,
5831 "<aspace type=\"total\" size=\"%zu\"/>\n"
5832 "<aspace type=\"mprotect\" size=\"%zu\"/>\n",
5833 ar_ptr->system_mem, ar_ptr->system_mem);
5834 total_aspace += ar_ptr->system_mem;
5835 total_aspace_mprotect += ar_ptr->system_mem;
5836 }
5837
5838 fputs ("</heap>\n", fp);
5839 ar_ptr = ar_ptr->next;
5840 }
5841 while (ar_ptr != &main_arena);
5842
5843 fprintf (fp,
5844 "<total type=\"fast\" count=\"%zu\" size=\"%zu\"/>\n"
5845 "<total type=\"rest\" count=\"%zu\" size=\"%zu\"/>\n"
5846 "<total type=\"mmap\" count=\"%d\" size=\"%zu\"/>\n"
5847 "<system type=\"current\" size=\"%zu\"/>\n"
5848 "<system type=\"max\" size=\"%zu\"/>\n"
5849 "<aspace type=\"total\" size=\"%zu\"/>\n"
5850 "<aspace type=\"mprotect\" size=\"%zu\"/>\n"
5851 "</malloc>\n",
5852 total_nfastblocks, total_fastavail, total_nblocks, total_avail,
5853 mp_.n_mmaps, mp_.mmapped_mem,
5854 total_system, total_max_system,
5855 total_aspace, total_aspace_mprotect);
5856
5857 return 0;
5858 }
5859 weak_alias (__malloc_info, malloc_info)
5860
5861
5862 strong_alias (__libc_calloc, __calloc) weak_alias (__libc_calloc, calloc)
5863 strong_alias (__libc_free, __free) strong_alias (__libc_free, free)
5864 strong_alias (__libc_malloc, __malloc) strong_alias (__libc_malloc, malloc)
5865 strong_alias (__libc_memalign, __memalign)
5866 weak_alias (__libc_memalign, memalign)
5867 strong_alias (__libc_realloc, __realloc) strong_alias (__libc_realloc, realloc)
5868 strong_alias (__libc_valloc, __valloc) weak_alias (__libc_valloc, valloc)
5869 strong_alias (__libc_pvalloc, __pvalloc) weak_alias (__libc_pvalloc, pvalloc)
5870 strong_alias (__libc_mallinfo, __mallinfo)
5871 weak_alias (__libc_mallinfo, mallinfo)
5872 strong_alias (__libc_mallinfo2, __mallinfo2)
5873 weak_alias (__libc_mallinfo2, mallinfo2)
5874 strong_alias (__libc_mallopt, __mallopt) weak_alias (__libc_mallopt, mallopt)
5875
5876 weak_alias (__malloc_stats, malloc_stats)
5877 weak_alias (__malloc_usable_size, malloc_usable_size)
5878 weak_alias (__malloc_trim, malloc_trim)
5879
5880 #if SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_26)
5881 compat_symbol (libc, __libc_free, cfree, GLIBC_2_0);
5882 #endif
5883
5884 /* ------------------------------------------------------------
5885 History:
5886
5887 [see ftp://g.oswego.edu/pub/misc/malloc.c for the history of dlmalloc]
5888
5889 */
5890 /*
5891 * Local variables:
5892 * c-basic-offset: 2
5893 * End:
5894 */