]> git.ipfire.org Git - thirdparty/glibc.git/blob - malloc/malloc.c
Force building with -fno-common
[thirdparty/glibc.git] / malloc / malloc.c
1 /* Malloc implementation for multiple threads without lock contention.
2 Copyright (C) 1996-2021 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Wolfram Gloger <wg@malloc.de>
5 and Doug Lea <dl@cs.oswego.edu>, 2001.
6
7 The GNU C Library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public License as
9 published by the Free Software Foundation; either version 2.1 of the
10 License, or (at your option) any later version.
11
12 The GNU C Library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public
18 License along with the GNU C Library; see the file COPYING.LIB. If
19 not, see <https://www.gnu.org/licenses/>. */
20
21 /*
22 This is a version (aka ptmalloc2) of malloc/free/realloc written by
23 Doug Lea and adapted to multiple threads/arenas by Wolfram Gloger.
24
25 There have been substantial changes made after the integration into
26 glibc in all parts of the code. Do not look for much commonality
27 with the ptmalloc2 version.
28
29 * Version ptmalloc2-20011215
30 based on:
31 VERSION 2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee)
32
33 * Quickstart
34
35 In order to compile this implementation, a Makefile is provided with
36 the ptmalloc2 distribution, which has pre-defined targets for some
37 popular systems (e.g. "make posix" for Posix threads). All that is
38 typically required with regard to compiler flags is the selection of
39 the thread package via defining one out of USE_PTHREADS, USE_THR or
40 USE_SPROC. Check the thread-m.h file for what effects this has.
41 Many/most systems will additionally require USE_TSD_DATA_HACK to be
42 defined, so this is the default for "make posix".
43
44 * Why use this malloc?
45
46 This is not the fastest, most space-conserving, most portable, or
47 most tunable malloc ever written. However it is among the fastest
48 while also being among the most space-conserving, portable and tunable.
49 Consistent balance across these factors results in a good general-purpose
50 allocator for malloc-intensive programs.
51
52 The main properties of the algorithms are:
53 * For large (>= 512 bytes) requests, it is a pure best-fit allocator,
54 with ties normally decided via FIFO (i.e. least recently used).
55 * For small (<= 64 bytes by default) requests, it is a caching
56 allocator, that maintains pools of quickly recycled chunks.
57 * In between, and for combinations of large and small requests, it does
58 the best it can trying to meet both goals at once.
59 * For very large requests (>= 128KB by default), it relies on system
60 memory mapping facilities, if supported.
61
62 For a longer but slightly out of date high-level description, see
63 http://gee.cs.oswego.edu/dl/html/malloc.html
64
65 You may already by default be using a C library containing a malloc
66 that is based on some version of this malloc (for example in
67 linux). You might still want to use the one in this file in order to
68 customize settings or to avoid overheads associated with library
69 versions.
70
71 * Contents, described in more detail in "description of public routines" below.
72
73 Standard (ANSI/SVID/...) functions:
74 malloc(size_t n);
75 calloc(size_t n_elements, size_t element_size);
76 free(void* p);
77 realloc(void* p, size_t n);
78 memalign(size_t alignment, size_t n);
79 valloc(size_t n);
80 mallinfo()
81 mallopt(int parameter_number, int parameter_value)
82
83 Additional functions:
84 independent_calloc(size_t n_elements, size_t size, void* chunks[]);
85 independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);
86 pvalloc(size_t n);
87 malloc_trim(size_t pad);
88 malloc_usable_size(void* p);
89 malloc_stats();
90
91 * Vital statistics:
92
93 Supported pointer representation: 4 or 8 bytes
94 Supported size_t representation: 4 or 8 bytes
95 Note that size_t is allowed to be 4 bytes even if pointers are 8.
96 You can adjust this by defining INTERNAL_SIZE_T
97
98 Alignment: 2 * sizeof(size_t) (default)
99 (i.e., 8 byte alignment with 4byte size_t). This suffices for
100 nearly all current machines and C compilers. However, you can
101 define MALLOC_ALIGNMENT to be wider than this if necessary.
102
103 Minimum overhead per allocated chunk: 4 or 8 bytes
104 Each malloced chunk has a hidden word of overhead holding size
105 and status information.
106
107 Minimum allocated size: 4-byte ptrs: 16 bytes (including 4 overhead)
108 8-byte ptrs: 24/32 bytes (including, 4/8 overhead)
109
110 When a chunk is freed, 12 (for 4byte ptrs) or 20 (for 8 byte
111 ptrs but 4 byte size) or 24 (for 8/8) additional bytes are
112 needed; 4 (8) for a trailing size field and 8 (16) bytes for
113 free list pointers. Thus, the minimum allocatable size is
114 16/24/32 bytes.
115
116 Even a request for zero bytes (i.e., malloc(0)) returns a
117 pointer to something of the minimum allocatable size.
118
119 The maximum overhead wastage (i.e., number of extra bytes
120 allocated than were requested in malloc) is less than or equal
121 to the minimum size, except for requests >= mmap_threshold that
122 are serviced via mmap(), where the worst case wastage is 2 *
123 sizeof(size_t) bytes plus the remainder from a system page (the
124 minimal mmap unit); typically 4096 or 8192 bytes.
125
126 Maximum allocated size: 4-byte size_t: 2^32 minus about two pages
127 8-byte size_t: 2^64 minus about two pages
128
129 It is assumed that (possibly signed) size_t values suffice to
130 represent chunk sizes. `Possibly signed' is due to the fact
131 that `size_t' may be defined on a system as either a signed or
132 an unsigned type. The ISO C standard says that it must be
133 unsigned, but a few systems are known not to adhere to this.
134 Additionally, even when size_t is unsigned, sbrk (which is by
135 default used to obtain memory from system) accepts signed
136 arguments, and may not be able to handle size_t-wide arguments
137 with negative sign bit. Generally, values that would
138 appear as negative after accounting for overhead and alignment
139 are supported only via mmap(), which does not have this
140 limitation.
141
142 Requests for sizes outside the allowed range will perform an optional
143 failure action and then return null. (Requests may also
144 also fail because a system is out of memory.)
145
146 Thread-safety: thread-safe
147
148 Compliance: I believe it is compliant with the 1997 Single Unix Specification
149 Also SVID/XPG, ANSI C, and probably others as well.
150
151 * Synopsis of compile-time options:
152
153 People have reported using previous versions of this malloc on all
154 versions of Unix, sometimes by tweaking some of the defines
155 below. It has been tested most extensively on Solaris and Linux.
156 People also report using it in stand-alone embedded systems.
157
158 The implementation is in straight, hand-tuned ANSI C. It is not
159 at all modular. (Sorry!) It uses a lot of macros. To be at all
160 usable, this code should be compiled using an optimizing compiler
161 (for example gcc -O3) that can simplify expressions and control
162 paths. (FAQ: some macros import variables as arguments rather than
163 declare locals because people reported that some debuggers
164 otherwise get confused.)
165
166 OPTION DEFAULT VALUE
167
168 Compilation Environment options:
169
170 HAVE_MREMAP 0
171
172 Changing default word sizes:
173
174 INTERNAL_SIZE_T size_t
175
176 Configuration and functionality options:
177
178 USE_PUBLIC_MALLOC_WRAPPERS NOT defined
179 USE_MALLOC_LOCK NOT defined
180 MALLOC_DEBUG NOT defined
181 REALLOC_ZERO_BYTES_FREES 1
182 TRIM_FASTBINS 0
183
184 Options for customizing MORECORE:
185
186 MORECORE sbrk
187 MORECORE_FAILURE -1
188 MORECORE_CONTIGUOUS 1
189 MORECORE_CANNOT_TRIM NOT defined
190 MORECORE_CLEARS 1
191 MMAP_AS_MORECORE_SIZE (1024 * 1024)
192
193 Tuning options that are also dynamically changeable via mallopt:
194
195 DEFAULT_MXFAST 64 (for 32bit), 128 (for 64bit)
196 DEFAULT_TRIM_THRESHOLD 128 * 1024
197 DEFAULT_TOP_PAD 0
198 DEFAULT_MMAP_THRESHOLD 128 * 1024
199 DEFAULT_MMAP_MAX 65536
200
201 There are several other #defined constants and macros that you
202 probably don't want to touch unless you are extending or adapting malloc. */
203
204 /*
205 void* is the pointer type that malloc should say it returns
206 */
207
208 #ifndef void
209 #define void void
210 #endif /*void*/
211
212 #include <stddef.h> /* for size_t */
213 #include <stdlib.h> /* for getenv(), abort() */
214 #include <unistd.h> /* for __libc_enable_secure */
215
216 #include <atomic.h>
217 #include <_itoa.h>
218 #include <bits/wordsize.h>
219 #include <sys/sysinfo.h>
220
221 #include <ldsodefs.h>
222
223 #include <unistd.h>
224 #include <stdio.h> /* needed for malloc_stats */
225 #include <errno.h>
226 #include <assert.h>
227
228 #include <shlib-compat.h>
229
230 /* For uintptr_t. */
231 #include <stdint.h>
232
233 /* For va_arg, va_start, va_end. */
234 #include <stdarg.h>
235
236 /* For MIN, MAX, powerof2. */
237 #include <sys/param.h>
238
239 /* For ALIGN_UP et. al. */
240 #include <libc-pointer-arith.h>
241
242 /* For DIAG_PUSH/POP_NEEDS_COMMENT et al. */
243 #include <libc-diag.h>
244
245 /* For memory tagging. */
246 #include <libc-mtag.h>
247
248 #include <malloc/malloc-internal.h>
249
250 /* For SINGLE_THREAD_P. */
251 #include <sysdep-cancel.h>
252
253 #include <libc-internal.h>
254
255 /* For tcache double-free check. */
256 #include <random-bits.h>
257 #include <sys/random.h>
258
259 /*
260 Debugging:
261
262 Because freed chunks may be overwritten with bookkeeping fields, this
263 malloc will often die when freed memory is overwritten by user
264 programs. This can be very effective (albeit in an annoying way)
265 in helping track down dangling pointers.
266
267 If you compile with -DMALLOC_DEBUG, a number of assertion checks are
268 enabled that will catch more memory errors. You probably won't be
269 able to make much sense of the actual assertion errors, but they
270 should help you locate incorrectly overwritten memory. The checking
271 is fairly extensive, and will slow down execution
272 noticeably. Calling malloc_stats or mallinfo with MALLOC_DEBUG set
273 will attempt to check every non-mmapped allocated and free chunk in
274 the course of computing the summmaries. (By nature, mmapped regions
275 cannot be checked very much automatically.)
276
277 Setting MALLOC_DEBUG may also be helpful if you are trying to modify
278 this code. The assertions in the check routines spell out in more
279 detail the assumptions and invariants underlying the algorithms.
280
281 Setting MALLOC_DEBUG does NOT provide an automated mechanism for
282 checking that all accesses to malloced memory stay within their
283 bounds. However, there are several add-ons and adaptations of this
284 or other mallocs available that do this.
285 */
286
287 #ifndef MALLOC_DEBUG
288 #define MALLOC_DEBUG 0
289 #endif
290
291 #ifndef NDEBUG
292 # define __assert_fail(assertion, file, line, function) \
293 __malloc_assert(assertion, file, line, function)
294
295 extern const char *__progname;
296
297 static void
298 __malloc_assert (const char *assertion, const char *file, unsigned int line,
299 const char *function)
300 {
301 (void) __fxprintf (NULL, "%s%s%s:%u: %s%sAssertion `%s' failed.\n",
302 __progname, __progname[0] ? ": " : "",
303 file, line,
304 function ? function : "", function ? ": " : "",
305 assertion);
306 fflush (stderr);
307 abort ();
308 }
309 #endif
310
311 #if USE_TCACHE
312 /* We want 64 entries. This is an arbitrary limit, which tunables can reduce. */
313 # define TCACHE_MAX_BINS 64
314 # define MAX_TCACHE_SIZE tidx2usize (TCACHE_MAX_BINS-1)
315
316 /* Only used to pre-fill the tunables. */
317 # define tidx2usize(idx) (((size_t) idx) * MALLOC_ALIGNMENT + MINSIZE - SIZE_SZ)
318
319 /* When "x" is from chunksize(). */
320 # define csize2tidx(x) (((x) - MINSIZE + MALLOC_ALIGNMENT - 1) / MALLOC_ALIGNMENT)
321 /* When "x" is a user-provided size. */
322 # define usize2tidx(x) csize2tidx (request2size (x))
323
324 /* With rounding and alignment, the bins are...
325 idx 0 bytes 0..24 (64-bit) or 0..12 (32-bit)
326 idx 1 bytes 25..40 or 13..20
327 idx 2 bytes 41..56 or 21..28
328 etc. */
329
330 /* This is another arbitrary limit, which tunables can change. Each
331 tcache bin will hold at most this number of chunks. */
332 # define TCACHE_FILL_COUNT 7
333
334 /* Maximum chunks in tcache bins for tunables. This value must fit the range
335 of tcache->counts[] entries, else they may overflow. */
336 # define MAX_TCACHE_COUNT UINT16_MAX
337 #endif
338
339 /* Safe-Linking:
340 Use randomness from ASLR (mmap_base) to protect single-linked lists
341 of Fast-Bins and TCache. That is, mask the "next" pointers of the
342 lists' chunks, and also perform allocation alignment checks on them.
343 This mechanism reduces the risk of pointer hijacking, as was done with
344 Safe-Unlinking in the double-linked lists of Small-Bins.
345 It assumes a minimum page size of 4096 bytes (12 bits). Systems with
346 larger pages provide less entropy, although the pointer mangling
347 still works. */
348 #define PROTECT_PTR(pos, ptr) \
349 ((__typeof (ptr)) ((((size_t) pos) >> 12) ^ ((size_t) ptr)))
350 #define REVEAL_PTR(ptr) PROTECT_PTR (&ptr, ptr)
351
352 /*
353 The REALLOC_ZERO_BYTES_FREES macro controls the behavior of realloc (p, 0)
354 when p is nonnull. If the macro is nonzero, the realloc call returns NULL;
355 otherwise, the call returns what malloc (0) would. In either case,
356 p is freed. Glibc uses a nonzero REALLOC_ZERO_BYTES_FREES, which
357 implements common historical practice.
358
359 ISO C17 says the realloc call has implementation-defined behavior,
360 and it might not even free p.
361 */
362
363 #ifndef REALLOC_ZERO_BYTES_FREES
364 #define REALLOC_ZERO_BYTES_FREES 1
365 #endif
366
367 /*
368 TRIM_FASTBINS controls whether free() of a very small chunk can
369 immediately lead to trimming. Setting to true (1) can reduce memory
370 footprint, but will almost always slow down programs that use a lot
371 of small chunks.
372
373 Define this only if you are willing to give up some speed to more
374 aggressively reduce system-level memory footprint when releasing
375 memory in programs that use many small chunks. You can get
376 essentially the same effect by setting MXFAST to 0, but this can
377 lead to even greater slowdowns in programs using many small chunks.
378 TRIM_FASTBINS is an in-between compile-time option, that disables
379 only those chunks bordering topmost memory from being placed in
380 fastbins.
381 */
382
383 #ifndef TRIM_FASTBINS
384 #define TRIM_FASTBINS 0
385 #endif
386
387
388 /* Definition for getting more memory from the OS. */
389 #define MORECORE (*__morecore)
390 #define MORECORE_FAILURE 0
391 void * __default_morecore (ptrdiff_t);
392 void *(*__morecore)(ptrdiff_t) = __default_morecore;
393
394 /* Memory tagging. */
395
396 /* Some systems support the concept of tagging (sometimes known as
397 coloring) memory locations on a fine grained basis. Each memory
398 location is given a color (normally allocated randomly) and
399 pointers are also colored. When the pointer is dereferenced, the
400 pointer's color is checked against the memory's color and if they
401 differ the access is faulted (sometimes lazily).
402
403 We use this in glibc by maintaining a single color for the malloc
404 data structures that are interleaved with the user data and then
405 assigning separate colors for each block allocation handed out. In
406 this way simple buffer overruns will be rapidly detected. When
407 memory is freed, the memory is recolored back to the glibc default
408 so that simple use-after-free errors can also be detected.
409
410 If memory is reallocated the buffer is recolored even if the
411 address remains the same. This has a performance impact, but
412 guarantees that the old pointer cannot mistakenly be reused (code
413 that compares old against new will see a mismatch and will then
414 need to behave as though realloc moved the data to a new location).
415
416 Internal API for memory tagging support.
417
418 The aim is to keep the code for memory tagging support as close to
419 the normal APIs in glibc as possible, so that if tagging is not
420 enabled in the library, or is disabled at runtime then standard
421 operations can continue to be used. Support macros are used to do
422 this:
423
424 void *tag_new_zero_region (void *ptr, size_t size)
425
426 Allocates a new tag, colors the memory with that tag, zeros the
427 memory and returns a pointer that is correctly colored for that
428 location. The non-tagging version will simply call memset with 0.
429
430 void *tag_region (void *ptr, size_t size)
431
432 Color the region of memory pointed to by PTR and size SIZE with
433 the color of PTR. Returns the original pointer.
434
435 void *tag_new_usable (void *ptr)
436
437 Allocate a new random color and use it to color the user region of
438 a chunk; this may include data from the subsequent chunk's header
439 if tagging is sufficiently fine grained. Returns PTR suitably
440 recolored for accessing the memory there.
441
442 void *tag_at (void *ptr)
443
444 Read the current color of the memory at the address pointed to by
445 PTR (ignoring it's current color) and return PTR recolored to that
446 color. PTR must be valid address in all other respects. When
447 tagging is not enabled, it simply returns the original pointer.
448 */
449
450 #ifdef USE_MTAG
451 static bool mtag_enabled = false;
452 static int mtag_mmap_flags = 0;
453 #else
454 # define mtag_enabled false
455 # define mtag_mmap_flags 0
456 #endif
457
458 static __always_inline void *
459 tag_region (void *ptr, size_t size)
460 {
461 if (__glibc_unlikely (mtag_enabled))
462 return __libc_mtag_tag_region (ptr, size);
463 return ptr;
464 }
465
466 static __always_inline void *
467 tag_new_zero_region (void *ptr, size_t size)
468 {
469 if (__glibc_unlikely (mtag_enabled))
470 return __libc_mtag_tag_zero_region (__libc_mtag_new_tag (ptr), size);
471 return memset (ptr, 0, size);
472 }
473
474 /* Defined later. */
475 static void *
476 tag_new_usable (void *ptr);
477
478 static __always_inline void *
479 tag_at (void *ptr)
480 {
481 if (__glibc_unlikely (mtag_enabled))
482 return __libc_mtag_address_get_tag (ptr);
483 return ptr;
484 }
485
486 #include <string.h>
487
488 /*
489 MORECORE-related declarations. By default, rely on sbrk
490 */
491
492
493 /*
494 MORECORE is the name of the routine to call to obtain more memory
495 from the system. See below for general guidance on writing
496 alternative MORECORE functions, as well as a version for WIN32 and a
497 sample version for pre-OSX macos.
498 */
499
500 #ifndef MORECORE
501 #define MORECORE sbrk
502 #endif
503
504 /*
505 MORECORE_FAILURE is the value returned upon failure of MORECORE
506 as well as mmap. Since it cannot be an otherwise valid memory address,
507 and must reflect values of standard sys calls, you probably ought not
508 try to redefine it.
509 */
510
511 #ifndef MORECORE_FAILURE
512 #define MORECORE_FAILURE (-1)
513 #endif
514
515 /*
516 If MORECORE_CONTIGUOUS is true, take advantage of fact that
517 consecutive calls to MORECORE with positive arguments always return
518 contiguous increasing addresses. This is true of unix sbrk. Even
519 if not defined, when regions happen to be contiguous, malloc will
520 permit allocations spanning regions obtained from different
521 calls. But defining this when applicable enables some stronger
522 consistency checks and space efficiencies.
523 */
524
525 #ifndef MORECORE_CONTIGUOUS
526 #define MORECORE_CONTIGUOUS 1
527 #endif
528
529 /*
530 Define MORECORE_CANNOT_TRIM if your version of MORECORE
531 cannot release space back to the system when given negative
532 arguments. This is generally necessary only if you are using
533 a hand-crafted MORECORE function that cannot handle negative arguments.
534 */
535
536 /* #define MORECORE_CANNOT_TRIM */
537
538 /* MORECORE_CLEARS (default 1)
539 The degree to which the routine mapped to MORECORE zeroes out
540 memory: never (0), only for newly allocated space (1) or always
541 (2). The distinction between (1) and (2) is necessary because on
542 some systems, if the application first decrements and then
543 increments the break value, the contents of the reallocated space
544 are unspecified.
545 */
546
547 #ifndef MORECORE_CLEARS
548 # define MORECORE_CLEARS 1
549 #endif
550
551
552 /*
553 MMAP_AS_MORECORE_SIZE is the minimum mmap size argument to use if
554 sbrk fails, and mmap is used as a backup. The value must be a
555 multiple of page size. This backup strategy generally applies only
556 when systems have "holes" in address space, so sbrk cannot perform
557 contiguous expansion, but there is still space available on system.
558 On systems for which this is known to be useful (i.e. most linux
559 kernels), this occurs only when programs allocate huge amounts of
560 memory. Between this, and the fact that mmap regions tend to be
561 limited, the size should be large, to avoid too many mmap calls and
562 thus avoid running out of kernel resources. */
563
564 #ifndef MMAP_AS_MORECORE_SIZE
565 #define MMAP_AS_MORECORE_SIZE (1024 * 1024)
566 #endif
567
568 /*
569 Define HAVE_MREMAP to make realloc() use mremap() to re-allocate
570 large blocks.
571 */
572
573 #ifndef HAVE_MREMAP
574 #define HAVE_MREMAP 0
575 #endif
576
577 /* We may need to support __malloc_initialize_hook for backwards
578 compatibility. */
579
580 #if SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_24)
581 # define HAVE_MALLOC_INIT_HOOK 1
582 #else
583 # define HAVE_MALLOC_INIT_HOOK 0
584 #endif
585
586
587 /*
588 This version of malloc supports the standard SVID/XPG mallinfo
589 routine that returns a struct containing usage properties and
590 statistics. It should work on any SVID/XPG compliant system that has
591 a /usr/include/malloc.h defining struct mallinfo. (If you'd like to
592 install such a thing yourself, cut out the preliminary declarations
593 as described above and below and save them in a malloc.h file. But
594 there's no compelling reason to bother to do this.)
595
596 The main declaration needed is the mallinfo struct that is returned
597 (by-copy) by mallinfo(). The SVID/XPG malloinfo struct contains a
598 bunch of fields that are not even meaningful in this version of
599 malloc. These fields are are instead filled by mallinfo() with
600 other numbers that might be of interest.
601 */
602
603
604 /* ---------- description of public routines ------------ */
605
606 /*
607 malloc(size_t n)
608 Returns a pointer to a newly allocated chunk of at least n bytes, or null
609 if no space is available. Additionally, on failure, errno is
610 set to ENOMEM on ANSI C systems.
611
612 If n is zero, malloc returns a minimum-sized chunk. (The minimum
613 size is 16 bytes on most 32bit systems, and 24 or 32 bytes on 64bit
614 systems.) On most systems, size_t is an unsigned type, so calls
615 with negative arguments are interpreted as requests for huge amounts
616 of space, which will often fail. The maximum supported value of n
617 differs across systems, but is in all cases less than the maximum
618 representable value of a size_t.
619 */
620 void* __libc_malloc(size_t);
621 libc_hidden_proto (__libc_malloc)
622
623 /*
624 free(void* p)
625 Releases the chunk of memory pointed to by p, that had been previously
626 allocated using malloc or a related routine such as realloc.
627 It has no effect if p is null. It can have arbitrary (i.e., bad!)
628 effects if p has already been freed.
629
630 Unless disabled (using mallopt), freeing very large spaces will
631 when possible, automatically trigger operations that give
632 back unused memory to the system, thus reducing program footprint.
633 */
634 void __libc_free(void*);
635 libc_hidden_proto (__libc_free)
636
637 /*
638 calloc(size_t n_elements, size_t element_size);
639 Returns a pointer to n_elements * element_size bytes, with all locations
640 set to zero.
641 */
642 void* __libc_calloc(size_t, size_t);
643
644 /*
645 realloc(void* p, size_t n)
646 Returns a pointer to a chunk of size n that contains the same data
647 as does chunk p up to the minimum of (n, p's size) bytes, or null
648 if no space is available.
649
650 The returned pointer may or may not be the same as p. The algorithm
651 prefers extending p when possible, otherwise it employs the
652 equivalent of a malloc-copy-free sequence.
653
654 If p is null, realloc is equivalent to malloc.
655
656 If space is not available, realloc returns null, errno is set (if on
657 ANSI) and p is NOT freed.
658
659 if n is for fewer bytes than already held by p, the newly unused
660 space is lopped off and freed if possible. Unless the #define
661 REALLOC_ZERO_BYTES_FREES is set, realloc with a size argument of
662 zero (re)allocates a minimum-sized chunk.
663
664 Large chunks that were internally obtained via mmap will always be
665 grown using malloc-copy-free sequences unless the system supports
666 MREMAP (currently only linux).
667
668 The old unix realloc convention of allowing the last-free'd chunk
669 to be used as an argument to realloc is not supported.
670 */
671 void* __libc_realloc(void*, size_t);
672 libc_hidden_proto (__libc_realloc)
673
674 /*
675 memalign(size_t alignment, size_t n);
676 Returns a pointer to a newly allocated chunk of n bytes, aligned
677 in accord with the alignment argument.
678
679 The alignment argument should be a power of two. If the argument is
680 not a power of two, the nearest greater power is used.
681 8-byte alignment is guaranteed by normal malloc calls, so don't
682 bother calling memalign with an argument of 8 or less.
683
684 Overreliance on memalign is a sure way to fragment space.
685 */
686 void* __libc_memalign(size_t, size_t);
687 libc_hidden_proto (__libc_memalign)
688
689 /*
690 valloc(size_t n);
691 Equivalent to memalign(pagesize, n), where pagesize is the page
692 size of the system. If the pagesize is unknown, 4096 is used.
693 */
694 void* __libc_valloc(size_t);
695
696
697
698 /*
699 mallopt(int parameter_number, int parameter_value)
700 Sets tunable parameters The format is to provide a
701 (parameter-number, parameter-value) pair. mallopt then sets the
702 corresponding parameter to the argument value if it can (i.e., so
703 long as the value is meaningful), and returns 1 if successful else
704 0. SVID/XPG/ANSI defines four standard param numbers for mallopt,
705 normally defined in malloc.h. Only one of these (M_MXFAST) is used
706 in this malloc. The others (M_NLBLKS, M_GRAIN, M_KEEP) don't apply,
707 so setting them has no effect. But this malloc also supports four
708 other options in mallopt. See below for details. Briefly, supported
709 parameters are as follows (listed defaults are for "typical"
710 configurations).
711
712 Symbol param # default allowed param values
713 M_MXFAST 1 64 0-80 (0 disables fastbins)
714 M_TRIM_THRESHOLD -1 128*1024 any (-1U disables trimming)
715 M_TOP_PAD -2 0 any
716 M_MMAP_THRESHOLD -3 128*1024 any (or 0 if no MMAP support)
717 M_MMAP_MAX -4 65536 any (0 disables use of mmap)
718 */
719 int __libc_mallopt(int, int);
720 libc_hidden_proto (__libc_mallopt)
721
722
723 /*
724 mallinfo()
725 Returns (by copy) a struct containing various summary statistics:
726
727 arena: current total non-mmapped bytes allocated from system
728 ordblks: the number of free chunks
729 smblks: the number of fastbin blocks (i.e., small chunks that
730 have been freed but not use resused or consolidated)
731 hblks: current number of mmapped regions
732 hblkhd: total bytes held in mmapped regions
733 usmblks: always 0
734 fsmblks: total bytes held in fastbin blocks
735 uordblks: current total allocated space (normal or mmapped)
736 fordblks: total free space
737 keepcost: the maximum number of bytes that could ideally be released
738 back to system via malloc_trim. ("ideally" means that
739 it ignores page restrictions etc.)
740
741 Because these fields are ints, but internal bookkeeping may
742 be kept as longs, the reported values may wrap around zero and
743 thus be inaccurate.
744 */
745 struct mallinfo2 __libc_mallinfo2(void);
746 libc_hidden_proto (__libc_mallinfo2)
747
748 struct mallinfo __libc_mallinfo(void);
749
750
751 /*
752 pvalloc(size_t n);
753 Equivalent to valloc(minimum-page-that-holds(n)), that is,
754 round up n to nearest pagesize.
755 */
756 void* __libc_pvalloc(size_t);
757
758 /*
759 malloc_trim(size_t pad);
760
761 If possible, gives memory back to the system (via negative
762 arguments to sbrk) if there is unused memory at the `high' end of
763 the malloc pool. You can call this after freeing large blocks of
764 memory to potentially reduce the system-level memory requirements
765 of a program. However, it cannot guarantee to reduce memory. Under
766 some allocation patterns, some large free blocks of memory will be
767 locked between two used chunks, so they cannot be given back to
768 the system.
769
770 The `pad' argument to malloc_trim represents the amount of free
771 trailing space to leave untrimmed. If this argument is zero,
772 only the minimum amount of memory to maintain internal data
773 structures will be left (one page or less). Non-zero arguments
774 can be supplied to maintain enough trailing space to service
775 future expected allocations without having to re-obtain memory
776 from the system.
777
778 Malloc_trim returns 1 if it actually released any memory, else 0.
779 On systems that do not support "negative sbrks", it will always
780 return 0.
781 */
782 int __malloc_trim(size_t);
783
784 /*
785 malloc_usable_size(void* p);
786
787 Returns the number of bytes you can actually use in
788 an allocated chunk, which may be more than you requested (although
789 often not) due to alignment and minimum size constraints.
790 You can use this many bytes without worrying about
791 overwriting other allocated objects. This is not a particularly great
792 programming practice. malloc_usable_size can be more useful in
793 debugging and assertions, for example:
794
795 p = malloc(n);
796 assert(malloc_usable_size(p) >= 256);
797
798 */
799 size_t __malloc_usable_size(void*);
800
801 /*
802 malloc_stats();
803 Prints on stderr the amount of space obtained from the system (both
804 via sbrk and mmap), the maximum amount (which may be more than
805 current if malloc_trim and/or munmap got called), and the current
806 number of bytes allocated via malloc (or realloc, etc) but not yet
807 freed. Note that this is the number of bytes allocated, not the
808 number requested. It will be larger than the number requested
809 because of alignment and bookkeeping overhead. Because it includes
810 alignment wastage as being in use, this figure may be greater than
811 zero even when no user-level chunks are allocated.
812
813 The reported current and maximum system memory can be inaccurate if
814 a program makes other calls to system memory allocation functions
815 (normally sbrk) outside of malloc.
816
817 malloc_stats prints only the most commonly interesting statistics.
818 More information can be obtained by calling mallinfo.
819
820 */
821 void __malloc_stats(void);
822
823 /*
824 posix_memalign(void **memptr, size_t alignment, size_t size);
825
826 POSIX wrapper like memalign(), checking for validity of size.
827 */
828 int __posix_memalign(void **, size_t, size_t);
829
830 /* mallopt tuning options */
831
832 /*
833 M_MXFAST is the maximum request size used for "fastbins", special bins
834 that hold returned chunks without consolidating their spaces. This
835 enables future requests for chunks of the same size to be handled
836 very quickly, but can increase fragmentation, and thus increase the
837 overall memory footprint of a program.
838
839 This malloc manages fastbins very conservatively yet still
840 efficiently, so fragmentation is rarely a problem for values less
841 than or equal to the default. The maximum supported value of MXFAST
842 is 80. You wouldn't want it any higher than this anyway. Fastbins
843 are designed especially for use with many small structs, objects or
844 strings -- the default handles structs/objects/arrays with sizes up
845 to 8 4byte fields, or small strings representing words, tokens,
846 etc. Using fastbins for larger objects normally worsens
847 fragmentation without improving speed.
848
849 M_MXFAST is set in REQUEST size units. It is internally used in
850 chunksize units, which adds padding and alignment. You can reduce
851 M_MXFAST to 0 to disable all use of fastbins. This causes the malloc
852 algorithm to be a closer approximation of fifo-best-fit in all cases,
853 not just for larger requests, but will generally cause it to be
854 slower.
855 */
856
857
858 /* M_MXFAST is a standard SVID/XPG tuning option, usually listed in malloc.h */
859 #ifndef M_MXFAST
860 #define M_MXFAST 1
861 #endif
862
863 #ifndef DEFAULT_MXFAST
864 #define DEFAULT_MXFAST (64 * SIZE_SZ / 4)
865 #endif
866
867
868 /*
869 M_TRIM_THRESHOLD is the maximum amount of unused top-most memory
870 to keep before releasing via malloc_trim in free().
871
872 Automatic trimming is mainly useful in long-lived programs.
873 Because trimming via sbrk can be slow on some systems, and can
874 sometimes be wasteful (in cases where programs immediately
875 afterward allocate more large chunks) the value should be high
876 enough so that your overall system performance would improve by
877 releasing this much memory.
878
879 The trim threshold and the mmap control parameters (see below)
880 can be traded off with one another. Trimming and mmapping are
881 two different ways of releasing unused memory back to the
882 system. Between these two, it is often possible to keep
883 system-level demands of a long-lived program down to a bare
884 minimum. For example, in one test suite of sessions measuring
885 the XF86 X server on Linux, using a trim threshold of 128K and a
886 mmap threshold of 192K led to near-minimal long term resource
887 consumption.
888
889 If you are using this malloc in a long-lived program, it should
890 pay to experiment with these values. As a rough guide, you
891 might set to a value close to the average size of a process
892 (program) running on your system. Releasing this much memory
893 would allow such a process to run in memory. Generally, it's
894 worth it to tune for trimming rather tham memory mapping when a
895 program undergoes phases where several large chunks are
896 allocated and released in ways that can reuse each other's
897 storage, perhaps mixed with phases where there are no such
898 chunks at all. And in well-behaved long-lived programs,
899 controlling release of large blocks via trimming versus mapping
900 is usually faster.
901
902 However, in most programs, these parameters serve mainly as
903 protection against the system-level effects of carrying around
904 massive amounts of unneeded memory. Since frequent calls to
905 sbrk, mmap, and munmap otherwise degrade performance, the default
906 parameters are set to relatively high values that serve only as
907 safeguards.
908
909 The trim value It must be greater than page size to have any useful
910 effect. To disable trimming completely, you can set to
911 (unsigned long)(-1)
912
913 Trim settings interact with fastbin (MXFAST) settings: Unless
914 TRIM_FASTBINS is defined, automatic trimming never takes place upon
915 freeing a chunk with size less than or equal to MXFAST. Trimming is
916 instead delayed until subsequent freeing of larger chunks. However,
917 you can still force an attempted trim by calling malloc_trim.
918
919 Also, trimming is not generally possible in cases where
920 the main arena is obtained via mmap.
921
922 Note that the trick some people use of mallocing a huge space and
923 then freeing it at program startup, in an attempt to reserve system
924 memory, doesn't have the intended effect under automatic trimming,
925 since that memory will immediately be returned to the system.
926 */
927
928 #define M_TRIM_THRESHOLD -1
929
930 #ifndef DEFAULT_TRIM_THRESHOLD
931 #define DEFAULT_TRIM_THRESHOLD (128 * 1024)
932 #endif
933
934 /*
935 M_TOP_PAD is the amount of extra `padding' space to allocate or
936 retain whenever sbrk is called. It is used in two ways internally:
937
938 * When sbrk is called to extend the top of the arena to satisfy
939 a new malloc request, this much padding is added to the sbrk
940 request.
941
942 * When malloc_trim is called automatically from free(),
943 it is used as the `pad' argument.
944
945 In both cases, the actual amount of padding is rounded
946 so that the end of the arena is always a system page boundary.
947
948 The main reason for using padding is to avoid calling sbrk so
949 often. Having even a small pad greatly reduces the likelihood
950 that nearly every malloc request during program start-up (or
951 after trimming) will invoke sbrk, which needlessly wastes
952 time.
953
954 Automatic rounding-up to page-size units is normally sufficient
955 to avoid measurable overhead, so the default is 0. However, in
956 systems where sbrk is relatively slow, it can pay to increase
957 this value, at the expense of carrying around more memory than
958 the program needs.
959 */
960
961 #define M_TOP_PAD -2
962
963 #ifndef DEFAULT_TOP_PAD
964 #define DEFAULT_TOP_PAD (0)
965 #endif
966
967 /*
968 MMAP_THRESHOLD_MAX and _MIN are the bounds on the dynamically
969 adjusted MMAP_THRESHOLD.
970 */
971
972 #ifndef DEFAULT_MMAP_THRESHOLD_MIN
973 #define DEFAULT_MMAP_THRESHOLD_MIN (128 * 1024)
974 #endif
975
976 #ifndef DEFAULT_MMAP_THRESHOLD_MAX
977 /* For 32-bit platforms we cannot increase the maximum mmap
978 threshold much because it is also the minimum value for the
979 maximum heap size and its alignment. Going above 512k (i.e., 1M
980 for new heaps) wastes too much address space. */
981 # if __WORDSIZE == 32
982 # define DEFAULT_MMAP_THRESHOLD_MAX (512 * 1024)
983 # else
984 # define DEFAULT_MMAP_THRESHOLD_MAX (4 * 1024 * 1024 * sizeof(long))
985 # endif
986 #endif
987
988 /*
989 M_MMAP_THRESHOLD is the request size threshold for using mmap()
990 to service a request. Requests of at least this size that cannot
991 be allocated using already-existing space will be serviced via mmap.
992 (If enough normal freed space already exists it is used instead.)
993
994 Using mmap segregates relatively large chunks of memory so that
995 they can be individually obtained and released from the host
996 system. A request serviced through mmap is never reused by any
997 other request (at least not directly; the system may just so
998 happen to remap successive requests to the same locations).
999
1000 Segregating space in this way has the benefits that:
1001
1002 1. Mmapped space can ALWAYS be individually released back
1003 to the system, which helps keep the system level memory
1004 demands of a long-lived program low.
1005 2. Mapped memory can never become `locked' between
1006 other chunks, as can happen with normally allocated chunks, which
1007 means that even trimming via malloc_trim would not release them.
1008 3. On some systems with "holes" in address spaces, mmap can obtain
1009 memory that sbrk cannot.
1010
1011 However, it has the disadvantages that:
1012
1013 1. The space cannot be reclaimed, consolidated, and then
1014 used to service later requests, as happens with normal chunks.
1015 2. It can lead to more wastage because of mmap page alignment
1016 requirements
1017 3. It causes malloc performance to be more dependent on host
1018 system memory management support routines which may vary in
1019 implementation quality and may impose arbitrary
1020 limitations. Generally, servicing a request via normal
1021 malloc steps is faster than going through a system's mmap.
1022
1023 The advantages of mmap nearly always outweigh disadvantages for
1024 "large" chunks, but the value of "large" varies across systems. The
1025 default is an empirically derived value that works well in most
1026 systems.
1027
1028
1029 Update in 2006:
1030 The above was written in 2001. Since then the world has changed a lot.
1031 Memory got bigger. Applications got bigger. The virtual address space
1032 layout in 32 bit linux changed.
1033
1034 In the new situation, brk() and mmap space is shared and there are no
1035 artificial limits on brk size imposed by the kernel. What is more,
1036 applications have started using transient allocations larger than the
1037 128Kb as was imagined in 2001.
1038
1039 The price for mmap is also high now; each time glibc mmaps from the
1040 kernel, the kernel is forced to zero out the memory it gives to the
1041 application. Zeroing memory is expensive and eats a lot of cache and
1042 memory bandwidth. This has nothing to do with the efficiency of the
1043 virtual memory system, by doing mmap the kernel just has no choice but
1044 to zero.
1045
1046 In 2001, the kernel had a maximum size for brk() which was about 800
1047 megabytes on 32 bit x86, at that point brk() would hit the first
1048 mmaped shared libaries and couldn't expand anymore. With current 2.6
1049 kernels, the VA space layout is different and brk() and mmap
1050 both can span the entire heap at will.
1051
1052 Rather than using a static threshold for the brk/mmap tradeoff,
1053 we are now using a simple dynamic one. The goal is still to avoid
1054 fragmentation. The old goals we kept are
1055 1) try to get the long lived large allocations to use mmap()
1056 2) really large allocations should always use mmap()
1057 and we're adding now:
1058 3) transient allocations should use brk() to avoid forcing the kernel
1059 having to zero memory over and over again
1060
1061 The implementation works with a sliding threshold, which is by default
1062 limited to go between 128Kb and 32Mb (64Mb for 64 bitmachines) and starts
1063 out at 128Kb as per the 2001 default.
1064
1065 This allows us to satisfy requirement 1) under the assumption that long
1066 lived allocations are made early in the process' lifespan, before it has
1067 started doing dynamic allocations of the same size (which will
1068 increase the threshold).
1069
1070 The upperbound on the threshold satisfies requirement 2)
1071
1072 The threshold goes up in value when the application frees memory that was
1073 allocated with the mmap allocator. The idea is that once the application
1074 starts freeing memory of a certain size, it's highly probable that this is
1075 a size the application uses for transient allocations. This estimator
1076 is there to satisfy the new third requirement.
1077
1078 */
1079
1080 #define M_MMAP_THRESHOLD -3
1081
1082 #ifndef DEFAULT_MMAP_THRESHOLD
1083 #define DEFAULT_MMAP_THRESHOLD DEFAULT_MMAP_THRESHOLD_MIN
1084 #endif
1085
1086 /*
1087 M_MMAP_MAX is the maximum number of requests to simultaneously
1088 service using mmap. This parameter exists because
1089 some systems have a limited number of internal tables for
1090 use by mmap, and using more than a few of them may degrade
1091 performance.
1092
1093 The default is set to a value that serves only as a safeguard.
1094 Setting to 0 disables use of mmap for servicing large requests.
1095 */
1096
1097 #define M_MMAP_MAX -4
1098
1099 #ifndef DEFAULT_MMAP_MAX
1100 #define DEFAULT_MMAP_MAX (65536)
1101 #endif
1102
1103 #include <malloc.h>
1104
1105 #ifndef RETURN_ADDRESS
1106 #define RETURN_ADDRESS(X_) (NULL)
1107 #endif
1108
1109 /* Forward declarations. */
1110 struct malloc_chunk;
1111 typedef struct malloc_chunk* mchunkptr;
1112
1113 /* Internal routines. */
1114
1115 static void* _int_malloc(mstate, size_t);
1116 static void _int_free(mstate, mchunkptr, int);
1117 static void* _int_realloc(mstate, mchunkptr, INTERNAL_SIZE_T,
1118 INTERNAL_SIZE_T);
1119 static void* _int_memalign(mstate, size_t, size_t);
1120 static void* _mid_memalign(size_t, size_t, void *);
1121
1122 static void malloc_printerr(const char *str) __attribute__ ((noreturn));
1123
1124 static void* mem2mem_check(void *p, size_t sz);
1125 static void top_check(void);
1126 static void munmap_chunk(mchunkptr p);
1127 #if HAVE_MREMAP
1128 static mchunkptr mremap_chunk(mchunkptr p, size_t new_size);
1129 #endif
1130
1131 static void* malloc_check(size_t sz, const void *caller);
1132 static void free_check(void* mem, const void *caller);
1133 static void* realloc_check(void* oldmem, size_t bytes,
1134 const void *caller);
1135 static void* memalign_check(size_t alignment, size_t bytes,
1136 const void *caller);
1137
1138 /* ------------------ MMAP support ------------------ */
1139
1140
1141 #include <fcntl.h>
1142 #include <sys/mman.h>
1143
1144 #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
1145 # define MAP_ANONYMOUS MAP_ANON
1146 #endif
1147
1148 #ifndef MAP_NORESERVE
1149 # define MAP_NORESERVE 0
1150 #endif
1151
1152 #define MMAP(addr, size, prot, flags) \
1153 __mmap((addr), (size), (prot), (flags)|MAP_ANONYMOUS|MAP_PRIVATE, -1, 0)
1154
1155
1156 /*
1157 ----------------------- Chunk representations -----------------------
1158 */
1159
1160
1161 /*
1162 This struct declaration is misleading (but accurate and necessary).
1163 It declares a "view" into memory allowing access to necessary
1164 fields at known offsets from a given base. See explanation below.
1165 */
1166
1167 struct malloc_chunk {
1168
1169 INTERNAL_SIZE_T mchunk_prev_size; /* Size of previous chunk (if free). */
1170 INTERNAL_SIZE_T mchunk_size; /* Size in bytes, including overhead. */
1171
1172 struct malloc_chunk* fd; /* double links -- used only if free. */
1173 struct malloc_chunk* bk;
1174
1175 /* Only used for large blocks: pointer to next larger size. */
1176 struct malloc_chunk* fd_nextsize; /* double links -- used only if free. */
1177 struct malloc_chunk* bk_nextsize;
1178 };
1179
1180
1181 /*
1182 malloc_chunk details:
1183
1184 (The following includes lightly edited explanations by Colin Plumb.)
1185
1186 Chunks of memory are maintained using a `boundary tag' method as
1187 described in e.g., Knuth or Standish. (See the paper by Paul
1188 Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a
1189 survey of such techniques.) Sizes of free chunks are stored both
1190 in the front of each chunk and at the end. This makes
1191 consolidating fragmented chunks into bigger chunks very fast. The
1192 size fields also hold bits representing whether chunks are free or
1193 in use.
1194
1195 An allocated chunk looks like this:
1196
1197
1198 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1199 | Size of previous chunk, if unallocated (P clear) |
1200 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1201 | Size of chunk, in bytes |A|M|P|
1202 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1203 | User data starts here... .
1204 . .
1205 . (malloc_usable_size() bytes) .
1206 . |
1207 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1208 | (size of chunk, but used for application data) |
1209 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1210 | Size of next chunk, in bytes |A|0|1|
1211 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1212
1213 Where "chunk" is the front of the chunk for the purpose of most of
1214 the malloc code, but "mem" is the pointer that is returned to the
1215 user. "Nextchunk" is the beginning of the next contiguous chunk.
1216
1217 Chunks always begin on even word boundaries, so the mem portion
1218 (which is returned to the user) is also on an even word boundary, and
1219 thus at least double-word aligned.
1220
1221 Free chunks are stored in circular doubly-linked lists, and look like this:
1222
1223 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1224 | Size of previous chunk, if unallocated (P clear) |
1225 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1226 `head:' | Size of chunk, in bytes |A|0|P|
1227 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1228 | Forward pointer to next chunk in list |
1229 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1230 | Back pointer to previous chunk in list |
1231 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1232 | Unused space (may be 0 bytes long) .
1233 . .
1234 . |
1235 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1236 `foot:' | Size of chunk, in bytes |
1237 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1238 | Size of next chunk, in bytes |A|0|0|
1239 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1240
1241 The P (PREV_INUSE) bit, stored in the unused low-order bit of the
1242 chunk size (which is always a multiple of two words), is an in-use
1243 bit for the *previous* chunk. If that bit is *clear*, then the
1244 word before the current chunk size contains the previous chunk
1245 size, and can be used to find the front of the previous chunk.
1246 The very first chunk allocated always has this bit set,
1247 preventing access to non-existent (or non-owned) memory. If
1248 prev_inuse is set for any given chunk, then you CANNOT determine
1249 the size of the previous chunk, and might even get a memory
1250 addressing fault when trying to do so.
1251
1252 The A (NON_MAIN_ARENA) bit is cleared for chunks on the initial,
1253 main arena, described by the main_arena variable. When additional
1254 threads are spawned, each thread receives its own arena (up to a
1255 configurable limit, after which arenas are reused for multiple
1256 threads), and the chunks in these arenas have the A bit set. To
1257 find the arena for a chunk on such a non-main arena, heap_for_ptr
1258 performs a bit mask operation and indirection through the ar_ptr
1259 member of the per-heap header heap_info (see arena.c).
1260
1261 Note that the `foot' of the current chunk is actually represented
1262 as the prev_size of the NEXT chunk. This makes it easier to
1263 deal with alignments etc but can be very confusing when trying
1264 to extend or adapt this code.
1265
1266 The three exceptions to all this are:
1267
1268 1. The special chunk `top' doesn't bother using the
1269 trailing size field since there is no next contiguous chunk
1270 that would have to index off it. After initialization, `top'
1271 is forced to always exist. If it would become less than
1272 MINSIZE bytes long, it is replenished.
1273
1274 2. Chunks allocated via mmap, which have the second-lowest-order
1275 bit M (IS_MMAPPED) set in their size fields. Because they are
1276 allocated one-by-one, each must contain its own trailing size
1277 field. If the M bit is set, the other bits are ignored
1278 (because mmapped chunks are neither in an arena, nor adjacent
1279 to a freed chunk). The M bit is also used for chunks which
1280 originally came from a dumped heap via malloc_set_state in
1281 hooks.c.
1282
1283 3. Chunks in fastbins are treated as allocated chunks from the
1284 point of view of the chunk allocator. They are consolidated
1285 with their neighbors only in bulk, in malloc_consolidate.
1286 */
1287
1288 /*
1289 ---------- Size and alignment checks and conversions ----------
1290 */
1291
1292 /* Conversion from malloc headers to user pointers, and back. When
1293 using memory tagging the user data and the malloc data structure
1294 headers have distinct tags. Converting fully from one to the other
1295 involves extracting the tag at the other address and creating a
1296 suitable pointer using it. That can be quite expensive. There are
1297 cases when the pointers are not dereferenced (for example only used
1298 for alignment check) so the tags are not relevant, and there are
1299 cases when user data is not tagged distinctly from malloc headers
1300 (user data is untagged because tagging is done late in malloc and
1301 early in free). User memory tagging across internal interfaces:
1302
1303 sysmalloc: Returns untagged memory.
1304 _int_malloc: Returns untagged memory.
1305 _int_free: Takes untagged memory.
1306 _int_memalign: Returns untagged memory.
1307 _int_memalign: Returns untagged memory.
1308 _mid_memalign: Returns tagged memory.
1309 _int_realloc: Takes and returns tagged memory.
1310 */
1311
1312 /* The chunk header is two SIZE_SZ elements, but this is used widely, so
1313 we define it here for clarity later. */
1314 #define CHUNK_HDR_SZ (2 * SIZE_SZ)
1315
1316 /* Convert a chunk address to a user mem pointer without correcting
1317 the tag. */
1318 #define chunk2mem(p) ((void*)((char*)(p) + CHUNK_HDR_SZ))
1319
1320 /* Convert a chunk address to a user mem pointer and extract the right tag. */
1321 #define chunk2mem_tag(p) ((void*)tag_at ((char*)(p) + CHUNK_HDR_SZ))
1322
1323 /* Convert a user mem pointer to a chunk address and extract the right tag. */
1324 #define mem2chunk(mem) ((mchunkptr)tag_at (((char*)(mem) - CHUNK_HDR_SZ)))
1325
1326 /* The smallest possible chunk */
1327 #define MIN_CHUNK_SIZE (offsetof(struct malloc_chunk, fd_nextsize))
1328
1329 /* The smallest size we can malloc is an aligned minimal chunk */
1330
1331 #define MINSIZE \
1332 (unsigned long)(((MIN_CHUNK_SIZE+MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK))
1333
1334 /* Check if m has acceptable alignment */
1335
1336 #define aligned_OK(m) (((unsigned long)(m) & MALLOC_ALIGN_MASK) == 0)
1337
1338 #define misaligned_chunk(p) \
1339 ((uintptr_t)(MALLOC_ALIGNMENT == CHUNK_HDR_SZ ? (p) : chunk2mem (p)) \
1340 & MALLOC_ALIGN_MASK)
1341
1342 /* pad request bytes into a usable size -- internal version */
1343 /* Note: This must be a macro that evaluates to a compile time constant
1344 if passed a literal constant. */
1345 #define request2size(req) \
1346 (((req) + SIZE_SZ + MALLOC_ALIGN_MASK < MINSIZE) ? \
1347 MINSIZE : \
1348 ((req) + SIZE_SZ + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)
1349
1350 /* Check if REQ overflows when padded and aligned and if the resulting value
1351 is less than PTRDIFF_T. Returns TRUE and the requested size or MINSIZE in
1352 case the value is less than MINSIZE on SZ or false if any of the previous
1353 check fail. */
1354 static inline bool
1355 checked_request2size (size_t req, size_t *sz) __nonnull (1)
1356 {
1357 if (__glibc_unlikely (req > PTRDIFF_MAX))
1358 return false;
1359
1360 /* When using tagged memory, we cannot share the end of the user
1361 block with the header for the next chunk, so ensure that we
1362 allocate blocks that are rounded up to the granule size. Take
1363 care not to overflow from close to MAX_SIZE_T to a small
1364 number. Ideally, this would be part of request2size(), but that
1365 must be a macro that produces a compile time constant if passed
1366 a constant literal. */
1367 if (__glibc_unlikely (mtag_enabled))
1368 {
1369 /* Ensure this is not evaluated if !mtag_enabled, see gcc PR 99551. */
1370 asm ("");
1371
1372 req = (req + (__MTAG_GRANULE_SIZE - 1)) &
1373 ~(size_t)(__MTAG_GRANULE_SIZE - 1);
1374 }
1375
1376 *sz = request2size (req);
1377 return true;
1378 }
1379
1380 /*
1381 --------------- Physical chunk operations ---------------
1382 */
1383
1384
1385 /* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
1386 #define PREV_INUSE 0x1
1387
1388 /* extract inuse bit of previous chunk */
1389 #define prev_inuse(p) ((p)->mchunk_size & PREV_INUSE)
1390
1391
1392 /* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
1393 #define IS_MMAPPED 0x2
1394
1395 /* check for mmap()'ed chunk */
1396 #define chunk_is_mmapped(p) ((p)->mchunk_size & IS_MMAPPED)
1397
1398
1399 /* size field is or'ed with NON_MAIN_ARENA if the chunk was obtained
1400 from a non-main arena. This is only set immediately before handing
1401 the chunk to the user, if necessary. */
1402 #define NON_MAIN_ARENA 0x4
1403
1404 /* Check for chunk from main arena. */
1405 #define chunk_main_arena(p) (((p)->mchunk_size & NON_MAIN_ARENA) == 0)
1406
1407 /* Mark a chunk as not being on the main arena. */
1408 #define set_non_main_arena(p) ((p)->mchunk_size |= NON_MAIN_ARENA)
1409
1410
1411 /*
1412 Bits to mask off when extracting size
1413
1414 Note: IS_MMAPPED is intentionally not masked off from size field in
1415 macros for which mmapped chunks should never be seen. This should
1416 cause helpful core dumps to occur if it is tried by accident by
1417 people extending or adapting this malloc.
1418 */
1419 #define SIZE_BITS (PREV_INUSE | IS_MMAPPED | NON_MAIN_ARENA)
1420
1421 /* Get size, ignoring use bits */
1422 #define chunksize(p) (chunksize_nomask (p) & ~(SIZE_BITS))
1423
1424 /* Like chunksize, but do not mask SIZE_BITS. */
1425 #define chunksize_nomask(p) ((p)->mchunk_size)
1426
1427 /* Ptr to next physical malloc_chunk. */
1428 #define next_chunk(p) ((mchunkptr) (((char *) (p)) + chunksize (p)))
1429
1430 /* Size of the chunk below P. Only valid if !prev_inuse (P). */
1431 #define prev_size(p) ((p)->mchunk_prev_size)
1432
1433 /* Set the size of the chunk below P. Only valid if !prev_inuse (P). */
1434 #define set_prev_size(p, sz) ((p)->mchunk_prev_size = (sz))
1435
1436 /* Ptr to previous physical malloc_chunk. Only valid if !prev_inuse (P). */
1437 #define prev_chunk(p) ((mchunkptr) (((char *) (p)) - prev_size (p)))
1438
1439 /* Treat space at ptr + offset as a chunk */
1440 #define chunk_at_offset(p, s) ((mchunkptr) (((char *) (p)) + (s)))
1441
1442 /* extract p's inuse bit */
1443 #define inuse(p) \
1444 ((((mchunkptr) (((char *) (p)) + chunksize (p)))->mchunk_size) & PREV_INUSE)
1445
1446 /* set/clear chunk as being inuse without otherwise disturbing */
1447 #define set_inuse(p) \
1448 ((mchunkptr) (((char *) (p)) + chunksize (p)))->mchunk_size |= PREV_INUSE
1449
1450 #define clear_inuse(p) \
1451 ((mchunkptr) (((char *) (p)) + chunksize (p)))->mchunk_size &= ~(PREV_INUSE)
1452
1453
1454 /* check/set/clear inuse bits in known places */
1455 #define inuse_bit_at_offset(p, s) \
1456 (((mchunkptr) (((char *) (p)) + (s)))->mchunk_size & PREV_INUSE)
1457
1458 #define set_inuse_bit_at_offset(p, s) \
1459 (((mchunkptr) (((char *) (p)) + (s)))->mchunk_size |= PREV_INUSE)
1460
1461 #define clear_inuse_bit_at_offset(p, s) \
1462 (((mchunkptr) (((char *) (p)) + (s)))->mchunk_size &= ~(PREV_INUSE))
1463
1464
1465 /* Set size at head, without disturbing its use bit */
1466 #define set_head_size(p, s) ((p)->mchunk_size = (((p)->mchunk_size & SIZE_BITS) | (s)))
1467
1468 /* Set size/use field */
1469 #define set_head(p, s) ((p)->mchunk_size = (s))
1470
1471 /* Set size at footer (only when chunk is not in use) */
1472 #define set_foot(p, s) (((mchunkptr) ((char *) (p) + (s)))->mchunk_prev_size = (s))
1473
1474 #pragma GCC poison mchunk_size
1475 #pragma GCC poison mchunk_prev_size
1476
1477 /* This is the size of the real usable data in the chunk. Not valid for
1478 dumped heap chunks. */
1479 #define memsize(p) \
1480 (__MTAG_GRANULE_SIZE > SIZE_SZ && __glibc_unlikely (mtag_enabled) ? \
1481 chunksize (p) - CHUNK_HDR_SZ : \
1482 chunksize (p) - CHUNK_HDR_SZ + (chunk_is_mmapped (p) ? 0 : SIZE_SZ))
1483
1484 /* If memory tagging is enabled the layout changes to accommodate the granule
1485 size, this is wasteful for small allocations so not done by default.
1486 Both the chunk header and user data has to be granule aligned. */
1487 _Static_assert (__MTAG_GRANULE_SIZE <= CHUNK_HDR_SZ,
1488 "memory tagging is not supported with large granule.");
1489
1490 static __always_inline void *
1491 tag_new_usable (void *ptr)
1492 {
1493 if (__glibc_unlikely (mtag_enabled) && ptr)
1494 {
1495 mchunkptr cp = mem2chunk(ptr);
1496 ptr = __libc_mtag_tag_region (__libc_mtag_new_tag (ptr), memsize (cp));
1497 }
1498 return ptr;
1499 }
1500
1501 /*
1502 -------------------- Internal data structures --------------------
1503
1504 All internal state is held in an instance of malloc_state defined
1505 below. There are no other static variables, except in two optional
1506 cases:
1507 * If USE_MALLOC_LOCK is defined, the mALLOC_MUTEx declared above.
1508 * If mmap doesn't support MAP_ANONYMOUS, a dummy file descriptor
1509 for mmap.
1510
1511 Beware of lots of tricks that minimize the total bookkeeping space
1512 requirements. The result is a little over 1K bytes (for 4byte
1513 pointers and size_t.)
1514 */
1515
1516 /*
1517 Bins
1518
1519 An array of bin headers for free chunks. Each bin is doubly
1520 linked. The bins are approximately proportionally (log) spaced.
1521 There are a lot of these bins (128). This may look excessive, but
1522 works very well in practice. Most bins hold sizes that are
1523 unusual as malloc request sizes, but are more usual for fragments
1524 and consolidated sets of chunks, which is what these bins hold, so
1525 they can be found quickly. All procedures maintain the invariant
1526 that no consolidated chunk physically borders another one, so each
1527 chunk in a list is known to be preceeded and followed by either
1528 inuse chunks or the ends of memory.
1529
1530 Chunks in bins are kept in size order, with ties going to the
1531 approximately least recently used chunk. Ordering isn't needed
1532 for the small bins, which all contain the same-sized chunks, but
1533 facilitates best-fit allocation for larger chunks. These lists
1534 are just sequential. Keeping them in order almost never requires
1535 enough traversal to warrant using fancier ordered data
1536 structures.
1537
1538 Chunks of the same size are linked with the most
1539 recently freed at the front, and allocations are taken from the
1540 back. This results in LRU (FIFO) allocation order, which tends
1541 to give each chunk an equal opportunity to be consolidated with
1542 adjacent freed chunks, resulting in larger free chunks and less
1543 fragmentation.
1544
1545 To simplify use in double-linked lists, each bin header acts
1546 as a malloc_chunk. This avoids special-casing for headers.
1547 But to conserve space and improve locality, we allocate
1548 only the fd/bk pointers of bins, and then use repositioning tricks
1549 to treat these as the fields of a malloc_chunk*.
1550 */
1551
1552 typedef struct malloc_chunk *mbinptr;
1553
1554 /* addressing -- note that bin_at(0) does not exist */
1555 #define bin_at(m, i) \
1556 (mbinptr) (((char *) &((m)->bins[((i) - 1) * 2])) \
1557 - offsetof (struct malloc_chunk, fd))
1558
1559 /* analog of ++bin */
1560 #define next_bin(b) ((mbinptr) ((char *) (b) + (sizeof (mchunkptr) << 1)))
1561
1562 /* Reminders about list directionality within bins */
1563 #define first(b) ((b)->fd)
1564 #define last(b) ((b)->bk)
1565
1566 /*
1567 Indexing
1568
1569 Bins for sizes < 512 bytes contain chunks of all the same size, spaced
1570 8 bytes apart. Larger bins are approximately logarithmically spaced:
1571
1572 64 bins of size 8
1573 32 bins of size 64
1574 16 bins of size 512
1575 8 bins of size 4096
1576 4 bins of size 32768
1577 2 bins of size 262144
1578 1 bin of size what's left
1579
1580 There is actually a little bit of slop in the numbers in bin_index
1581 for the sake of speed. This makes no difference elsewhere.
1582
1583 The bins top out around 1MB because we expect to service large
1584 requests via mmap.
1585
1586 Bin 0 does not exist. Bin 1 is the unordered list; if that would be
1587 a valid chunk size the small bins are bumped up one.
1588 */
1589
1590 #define NBINS 128
1591 #define NSMALLBINS 64
1592 #define SMALLBIN_WIDTH MALLOC_ALIGNMENT
1593 #define SMALLBIN_CORRECTION (MALLOC_ALIGNMENT > CHUNK_HDR_SZ)
1594 #define MIN_LARGE_SIZE ((NSMALLBINS - SMALLBIN_CORRECTION) * SMALLBIN_WIDTH)
1595
1596 #define in_smallbin_range(sz) \
1597 ((unsigned long) (sz) < (unsigned long) MIN_LARGE_SIZE)
1598
1599 #define smallbin_index(sz) \
1600 ((SMALLBIN_WIDTH == 16 ? (((unsigned) (sz)) >> 4) : (((unsigned) (sz)) >> 3))\
1601 + SMALLBIN_CORRECTION)
1602
1603 #define largebin_index_32(sz) \
1604 (((((unsigned long) (sz)) >> 6) <= 38) ? 56 + (((unsigned long) (sz)) >> 6) :\
1605 ((((unsigned long) (sz)) >> 9) <= 20) ? 91 + (((unsigned long) (sz)) >> 9) :\
1606 ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\
1607 ((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\
1608 ((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\
1609 126)
1610
1611 #define largebin_index_32_big(sz) \
1612 (((((unsigned long) (sz)) >> 6) <= 45) ? 49 + (((unsigned long) (sz)) >> 6) :\
1613 ((((unsigned long) (sz)) >> 9) <= 20) ? 91 + (((unsigned long) (sz)) >> 9) :\
1614 ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\
1615 ((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\
1616 ((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\
1617 126)
1618
1619 // XXX It remains to be seen whether it is good to keep the widths of
1620 // XXX the buckets the same or whether it should be scaled by a factor
1621 // XXX of two as well.
1622 #define largebin_index_64(sz) \
1623 (((((unsigned long) (sz)) >> 6) <= 48) ? 48 + (((unsigned long) (sz)) >> 6) :\
1624 ((((unsigned long) (sz)) >> 9) <= 20) ? 91 + (((unsigned long) (sz)) >> 9) :\
1625 ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\
1626 ((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\
1627 ((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\
1628 126)
1629
1630 #define largebin_index(sz) \
1631 (SIZE_SZ == 8 ? largebin_index_64 (sz) \
1632 : MALLOC_ALIGNMENT == 16 ? largebin_index_32_big (sz) \
1633 : largebin_index_32 (sz))
1634
1635 #define bin_index(sz) \
1636 ((in_smallbin_range (sz)) ? smallbin_index (sz) : largebin_index (sz))
1637
1638 /* Take a chunk off a bin list. */
1639 static void
1640 unlink_chunk (mstate av, mchunkptr p)
1641 {
1642 if (chunksize (p) != prev_size (next_chunk (p)))
1643 malloc_printerr ("corrupted size vs. prev_size");
1644
1645 mchunkptr fd = p->fd;
1646 mchunkptr bk = p->bk;
1647
1648 if (__builtin_expect (fd->bk != p || bk->fd != p, 0))
1649 malloc_printerr ("corrupted double-linked list");
1650
1651 fd->bk = bk;
1652 bk->fd = fd;
1653 if (!in_smallbin_range (chunksize_nomask (p)) && p->fd_nextsize != NULL)
1654 {
1655 if (p->fd_nextsize->bk_nextsize != p
1656 || p->bk_nextsize->fd_nextsize != p)
1657 malloc_printerr ("corrupted double-linked list (not small)");
1658
1659 if (fd->fd_nextsize == NULL)
1660 {
1661 if (p->fd_nextsize == p)
1662 fd->fd_nextsize = fd->bk_nextsize = fd;
1663 else
1664 {
1665 fd->fd_nextsize = p->fd_nextsize;
1666 fd->bk_nextsize = p->bk_nextsize;
1667 p->fd_nextsize->bk_nextsize = fd;
1668 p->bk_nextsize->fd_nextsize = fd;
1669 }
1670 }
1671 else
1672 {
1673 p->fd_nextsize->bk_nextsize = p->bk_nextsize;
1674 p->bk_nextsize->fd_nextsize = p->fd_nextsize;
1675 }
1676 }
1677 }
1678
1679 /*
1680 Unsorted chunks
1681
1682 All remainders from chunk splits, as well as all returned chunks,
1683 are first placed in the "unsorted" bin. They are then placed
1684 in regular bins after malloc gives them ONE chance to be used before
1685 binning. So, basically, the unsorted_chunks list acts as a queue,
1686 with chunks being placed on it in free (and malloc_consolidate),
1687 and taken off (to be either used or placed in bins) in malloc.
1688
1689 The NON_MAIN_ARENA flag is never set for unsorted chunks, so it
1690 does not have to be taken into account in size comparisons.
1691 */
1692
1693 /* The otherwise unindexable 1-bin is used to hold unsorted chunks. */
1694 #define unsorted_chunks(M) (bin_at (M, 1))
1695
1696 /*
1697 Top
1698
1699 The top-most available chunk (i.e., the one bordering the end of
1700 available memory) is treated specially. It is never included in
1701 any bin, is used only if no other chunk is available, and is
1702 released back to the system if it is very large (see
1703 M_TRIM_THRESHOLD). Because top initially
1704 points to its own bin with initial zero size, thus forcing
1705 extension on the first malloc request, we avoid having any special
1706 code in malloc to check whether it even exists yet. But we still
1707 need to do so when getting memory from system, so we make
1708 initial_top treat the bin as a legal but unusable chunk during the
1709 interval between initialization and the first call to
1710 sysmalloc. (This is somewhat delicate, since it relies on
1711 the 2 preceding words to be zero during this interval as well.)
1712 */
1713
1714 /* Conveniently, the unsorted bin can be used as dummy top on first call */
1715 #define initial_top(M) (unsorted_chunks (M))
1716
1717 /*
1718 Binmap
1719
1720 To help compensate for the large number of bins, a one-level index
1721 structure is used for bin-by-bin searching. `binmap' is a
1722 bitvector recording whether bins are definitely empty so they can
1723 be skipped over during during traversals. The bits are NOT always
1724 cleared as soon as bins are empty, but instead only
1725 when they are noticed to be empty during traversal in malloc.
1726 */
1727
1728 /* Conservatively use 32 bits per map word, even if on 64bit system */
1729 #define BINMAPSHIFT 5
1730 #define BITSPERMAP (1U << BINMAPSHIFT)
1731 #define BINMAPSIZE (NBINS / BITSPERMAP)
1732
1733 #define idx2block(i) ((i) >> BINMAPSHIFT)
1734 #define idx2bit(i) ((1U << ((i) & ((1U << BINMAPSHIFT) - 1))))
1735
1736 #define mark_bin(m, i) ((m)->binmap[idx2block (i)] |= idx2bit (i))
1737 #define unmark_bin(m, i) ((m)->binmap[idx2block (i)] &= ~(idx2bit (i)))
1738 #define get_binmap(m, i) ((m)->binmap[idx2block (i)] & idx2bit (i))
1739
1740 /*
1741 Fastbins
1742
1743 An array of lists holding recently freed small chunks. Fastbins
1744 are not doubly linked. It is faster to single-link them, and
1745 since chunks are never removed from the middles of these lists,
1746 double linking is not necessary. Also, unlike regular bins, they
1747 are not even processed in FIFO order (they use faster LIFO) since
1748 ordering doesn't much matter in the transient contexts in which
1749 fastbins are normally used.
1750
1751 Chunks in fastbins keep their inuse bit set, so they cannot
1752 be consolidated with other free chunks. malloc_consolidate
1753 releases all chunks in fastbins and consolidates them with
1754 other free chunks.
1755 */
1756
1757 typedef struct malloc_chunk *mfastbinptr;
1758 #define fastbin(ar_ptr, idx) ((ar_ptr)->fastbinsY[idx])
1759
1760 /* offset 2 to use otherwise unindexable first 2 bins */
1761 #define fastbin_index(sz) \
1762 ((((unsigned int) (sz)) >> (SIZE_SZ == 8 ? 4 : 3)) - 2)
1763
1764
1765 /* The maximum fastbin request size we support */
1766 #define MAX_FAST_SIZE (80 * SIZE_SZ / 4)
1767
1768 #define NFASTBINS (fastbin_index (request2size (MAX_FAST_SIZE)) + 1)
1769
1770 /*
1771 FASTBIN_CONSOLIDATION_THRESHOLD is the size of a chunk in free()
1772 that triggers automatic consolidation of possibly-surrounding
1773 fastbin chunks. This is a heuristic, so the exact value should not
1774 matter too much. It is defined at half the default trim threshold as a
1775 compromise heuristic to only attempt consolidation if it is likely
1776 to lead to trimming. However, it is not dynamically tunable, since
1777 consolidation reduces fragmentation surrounding large chunks even
1778 if trimming is not used.
1779 */
1780
1781 #define FASTBIN_CONSOLIDATION_THRESHOLD (65536UL)
1782
1783 /*
1784 NONCONTIGUOUS_BIT indicates that MORECORE does not return contiguous
1785 regions. Otherwise, contiguity is exploited in merging together,
1786 when possible, results from consecutive MORECORE calls.
1787
1788 The initial value comes from MORECORE_CONTIGUOUS, but is
1789 changed dynamically if mmap is ever used as an sbrk substitute.
1790 */
1791
1792 #define NONCONTIGUOUS_BIT (2U)
1793
1794 #define contiguous(M) (((M)->flags & NONCONTIGUOUS_BIT) == 0)
1795 #define noncontiguous(M) (((M)->flags & NONCONTIGUOUS_BIT) != 0)
1796 #define set_noncontiguous(M) ((M)->flags |= NONCONTIGUOUS_BIT)
1797 #define set_contiguous(M) ((M)->flags &= ~NONCONTIGUOUS_BIT)
1798
1799 /* Maximum size of memory handled in fastbins. */
1800 static INTERNAL_SIZE_T global_max_fast;
1801
1802 /*
1803 Set value of max_fast.
1804 Use impossibly small value if 0.
1805 Precondition: there are no existing fastbin chunks in the main arena.
1806 Since do_check_malloc_state () checks this, we call malloc_consolidate ()
1807 before changing max_fast. Note other arenas will leak their fast bin
1808 entries if max_fast is reduced.
1809 */
1810
1811 #define set_max_fast(s) \
1812 global_max_fast = (((size_t) (s) <= MALLOC_ALIGN_MASK - SIZE_SZ) \
1813 ? MIN_CHUNK_SIZE / 2 : ((s + SIZE_SZ) & ~MALLOC_ALIGN_MASK))
1814
1815 static inline INTERNAL_SIZE_T
1816 get_max_fast (void)
1817 {
1818 /* Tell the GCC optimizers that global_max_fast is never larger
1819 than MAX_FAST_SIZE. This avoids out-of-bounds array accesses in
1820 _int_malloc after constant propagation of the size parameter.
1821 (The code never executes because malloc preserves the
1822 global_max_fast invariant, but the optimizers may not recognize
1823 this.) */
1824 if (global_max_fast > MAX_FAST_SIZE)
1825 __builtin_unreachable ();
1826 return global_max_fast;
1827 }
1828
1829 /*
1830 ----------- Internal state representation and initialization -----------
1831 */
1832
1833 /*
1834 have_fastchunks indicates that there are probably some fastbin chunks.
1835 It is set true on entering a chunk into any fastbin, and cleared early in
1836 malloc_consolidate. The value is approximate since it may be set when there
1837 are no fastbin chunks, or it may be clear even if there are fastbin chunks
1838 available. Given it's sole purpose is to reduce number of redundant calls to
1839 malloc_consolidate, it does not affect correctness. As a result we can safely
1840 use relaxed atomic accesses.
1841 */
1842
1843
1844 struct malloc_state
1845 {
1846 /* Serialize access. */
1847 __libc_lock_define (, mutex);
1848
1849 /* Flags (formerly in max_fast). */
1850 int flags;
1851
1852 /* Set if the fastbin chunks contain recently inserted free blocks. */
1853 /* Note this is a bool but not all targets support atomics on booleans. */
1854 int have_fastchunks;
1855
1856 /* Fastbins */
1857 mfastbinptr fastbinsY[NFASTBINS];
1858
1859 /* Base of the topmost chunk -- not otherwise kept in a bin */
1860 mchunkptr top;
1861
1862 /* The remainder from the most recent split of a small request */
1863 mchunkptr last_remainder;
1864
1865 /* Normal bins packed as described above */
1866 mchunkptr bins[NBINS * 2 - 2];
1867
1868 /* Bitmap of bins */
1869 unsigned int binmap[BINMAPSIZE];
1870
1871 /* Linked list */
1872 struct malloc_state *next;
1873
1874 /* Linked list for free arenas. Access to this field is serialized
1875 by free_list_lock in arena.c. */
1876 struct malloc_state *next_free;
1877
1878 /* Number of threads attached to this arena. 0 if the arena is on
1879 the free list. Access to this field is serialized by
1880 free_list_lock in arena.c. */
1881 INTERNAL_SIZE_T attached_threads;
1882
1883 /* Memory allocated from the system in this arena. */
1884 INTERNAL_SIZE_T system_mem;
1885 INTERNAL_SIZE_T max_system_mem;
1886 };
1887
1888 struct malloc_par
1889 {
1890 /* Tunable parameters */
1891 unsigned long trim_threshold;
1892 INTERNAL_SIZE_T top_pad;
1893 INTERNAL_SIZE_T mmap_threshold;
1894 INTERNAL_SIZE_T arena_test;
1895 INTERNAL_SIZE_T arena_max;
1896
1897 /* Memory map support */
1898 int n_mmaps;
1899 int n_mmaps_max;
1900 int max_n_mmaps;
1901 /* the mmap_threshold is dynamic, until the user sets
1902 it manually, at which point we need to disable any
1903 dynamic behavior. */
1904 int no_dyn_threshold;
1905
1906 /* Statistics */
1907 INTERNAL_SIZE_T mmapped_mem;
1908 INTERNAL_SIZE_T max_mmapped_mem;
1909
1910 /* First address handed out by MORECORE/sbrk. */
1911 char *sbrk_base;
1912
1913 #if USE_TCACHE
1914 /* Maximum number of buckets to use. */
1915 size_t tcache_bins;
1916 size_t tcache_max_bytes;
1917 /* Maximum number of chunks in each bucket. */
1918 size_t tcache_count;
1919 /* Maximum number of chunks to remove from the unsorted list, which
1920 aren't used to prefill the cache. */
1921 size_t tcache_unsorted_limit;
1922 #endif
1923 };
1924
1925 /* There are several instances of this struct ("arenas") in this
1926 malloc. If you are adapting this malloc in a way that does NOT use
1927 a static or mmapped malloc_state, you MUST explicitly zero-fill it
1928 before using. This malloc relies on the property that malloc_state
1929 is initialized to all zeroes (as is true of C statics). */
1930
1931 static struct malloc_state main_arena =
1932 {
1933 .mutex = _LIBC_LOCK_INITIALIZER,
1934 .next = &main_arena,
1935 .attached_threads = 1
1936 };
1937
1938 /* These variables are used for undumping support. Chunked are marked
1939 as using mmap, but we leave them alone if they fall into this
1940 range. NB: The chunk size for these chunks only includes the
1941 initial size field (of SIZE_SZ bytes), there is no trailing size
1942 field (unlike with regular mmapped chunks). */
1943 static mchunkptr dumped_main_arena_start; /* Inclusive. */
1944 static mchunkptr dumped_main_arena_end; /* Exclusive. */
1945
1946 /* True if the pointer falls into the dumped arena. Use this after
1947 chunk_is_mmapped indicates a chunk is mmapped. */
1948 #define DUMPED_MAIN_ARENA_CHUNK(p) \
1949 ((p) >= dumped_main_arena_start && (p) < dumped_main_arena_end)
1950
1951 /* There is only one instance of the malloc parameters. */
1952
1953 static struct malloc_par mp_ =
1954 {
1955 .top_pad = DEFAULT_TOP_PAD,
1956 .n_mmaps_max = DEFAULT_MMAP_MAX,
1957 .mmap_threshold = DEFAULT_MMAP_THRESHOLD,
1958 .trim_threshold = DEFAULT_TRIM_THRESHOLD,
1959 #define NARENAS_FROM_NCORES(n) ((n) * (sizeof (long) == 4 ? 2 : 8))
1960 .arena_test = NARENAS_FROM_NCORES (1)
1961 #if USE_TCACHE
1962 ,
1963 .tcache_count = TCACHE_FILL_COUNT,
1964 .tcache_bins = TCACHE_MAX_BINS,
1965 .tcache_max_bytes = tidx2usize (TCACHE_MAX_BINS-1),
1966 .tcache_unsorted_limit = 0 /* No limit. */
1967 #endif
1968 };
1969
1970 /*
1971 Initialize a malloc_state struct.
1972
1973 This is called from ptmalloc_init () or from _int_new_arena ()
1974 when creating a new arena.
1975 */
1976
1977 static void
1978 malloc_init_state (mstate av)
1979 {
1980 int i;
1981 mbinptr bin;
1982
1983 /* Establish circular links for normal bins */
1984 for (i = 1; i < NBINS; ++i)
1985 {
1986 bin = bin_at (av, i);
1987 bin->fd = bin->bk = bin;
1988 }
1989
1990 #if MORECORE_CONTIGUOUS
1991 if (av != &main_arena)
1992 #endif
1993 set_noncontiguous (av);
1994 if (av == &main_arena)
1995 set_max_fast (DEFAULT_MXFAST);
1996 atomic_store_relaxed (&av->have_fastchunks, false);
1997
1998 av->top = initial_top (av);
1999 }
2000
2001 /*
2002 Other internal utilities operating on mstates
2003 */
2004
2005 static void *sysmalloc (INTERNAL_SIZE_T, mstate);
2006 static int systrim (size_t, mstate);
2007 static void malloc_consolidate (mstate);
2008
2009
2010 /* -------------- Early definitions for debugging hooks ---------------- */
2011
2012 /* Define and initialize the hook variables. These weak definitions must
2013 appear before any use of the variables in a function (arena.c uses one). */
2014 #ifndef weak_variable
2015 /* In GNU libc we want the hook variables to be weak definitions to
2016 avoid a problem with Emacs. */
2017 # define weak_variable weak_function
2018 #endif
2019
2020 /* Forward declarations. */
2021 static void *malloc_hook_ini (size_t sz,
2022 const void *caller) __THROW;
2023 static void *realloc_hook_ini (void *ptr, size_t sz,
2024 const void *caller) __THROW;
2025 static void *memalign_hook_ini (size_t alignment, size_t sz,
2026 const void *caller) __THROW;
2027
2028 #if HAVE_MALLOC_INIT_HOOK
2029 void (*__malloc_initialize_hook) (void);
2030 compat_symbol (libc, __malloc_initialize_hook,
2031 __malloc_initialize_hook, GLIBC_2_0);
2032 #endif
2033
2034 void weak_variable (*__free_hook) (void *__ptr,
2035 const void *) = NULL;
2036 void *weak_variable (*__malloc_hook)
2037 (size_t __size, const void *) = malloc_hook_ini;
2038 void *weak_variable (*__realloc_hook)
2039 (void *__ptr, size_t __size, const void *)
2040 = realloc_hook_ini;
2041 void *weak_variable (*__memalign_hook)
2042 (size_t __alignment, size_t __size, const void *)
2043 = memalign_hook_ini;
2044 void weak_variable (*__after_morecore_hook) (void) = NULL;
2045
2046 /* This function is called from the arena shutdown hook, to free the
2047 thread cache (if it exists). */
2048 static void tcache_thread_shutdown (void);
2049
2050 /* ------------------ Testing support ----------------------------------*/
2051
2052 static int perturb_byte;
2053
2054 static void
2055 alloc_perturb (char *p, size_t n)
2056 {
2057 if (__glibc_unlikely (perturb_byte))
2058 memset (p, perturb_byte ^ 0xff, n);
2059 }
2060
2061 static void
2062 free_perturb (char *p, size_t n)
2063 {
2064 if (__glibc_unlikely (perturb_byte))
2065 memset (p, perturb_byte, n);
2066 }
2067
2068
2069
2070 #include <stap-probe.h>
2071
2072 /* ------------------- Support for multiple arenas -------------------- */
2073 #include "arena.c"
2074
2075 /*
2076 Debugging support
2077
2078 These routines make a number of assertions about the states
2079 of data structures that should be true at all times. If any
2080 are not true, it's very likely that a user program has somehow
2081 trashed memory. (It's also possible that there is a coding error
2082 in malloc. In which case, please report it!)
2083 */
2084
2085 #if !MALLOC_DEBUG
2086
2087 # define check_chunk(A, P)
2088 # define check_free_chunk(A, P)
2089 # define check_inuse_chunk(A, P)
2090 # define check_remalloced_chunk(A, P, N)
2091 # define check_malloced_chunk(A, P, N)
2092 # define check_malloc_state(A)
2093
2094 #else
2095
2096 # define check_chunk(A, P) do_check_chunk (A, P)
2097 # define check_free_chunk(A, P) do_check_free_chunk (A, P)
2098 # define check_inuse_chunk(A, P) do_check_inuse_chunk (A, P)
2099 # define check_remalloced_chunk(A, P, N) do_check_remalloced_chunk (A, P, N)
2100 # define check_malloced_chunk(A, P, N) do_check_malloced_chunk (A, P, N)
2101 # define check_malloc_state(A) do_check_malloc_state (A)
2102
2103 /*
2104 Properties of all chunks
2105 */
2106
2107 static void
2108 do_check_chunk (mstate av, mchunkptr p)
2109 {
2110 unsigned long sz = chunksize (p);
2111 /* min and max possible addresses assuming contiguous allocation */
2112 char *max_address = (char *) (av->top) + chunksize (av->top);
2113 char *min_address = max_address - av->system_mem;
2114
2115 if (!chunk_is_mmapped (p))
2116 {
2117 /* Has legal address ... */
2118 if (p != av->top)
2119 {
2120 if (contiguous (av))
2121 {
2122 assert (((char *) p) >= min_address);
2123 assert (((char *) p + sz) <= ((char *) (av->top)));
2124 }
2125 }
2126 else
2127 {
2128 /* top size is always at least MINSIZE */
2129 assert ((unsigned long) (sz) >= MINSIZE);
2130 /* top predecessor always marked inuse */
2131 assert (prev_inuse (p));
2132 }
2133 }
2134 else if (!DUMPED_MAIN_ARENA_CHUNK (p))
2135 {
2136 /* address is outside main heap */
2137 if (contiguous (av) && av->top != initial_top (av))
2138 {
2139 assert (((char *) p) < min_address || ((char *) p) >= max_address);
2140 }
2141 /* chunk is page-aligned */
2142 assert (((prev_size (p) + sz) & (GLRO (dl_pagesize) - 1)) == 0);
2143 /* mem is aligned */
2144 assert (aligned_OK (chunk2mem (p)));
2145 }
2146 }
2147
2148 /*
2149 Properties of free chunks
2150 */
2151
2152 static void
2153 do_check_free_chunk (mstate av, mchunkptr p)
2154 {
2155 INTERNAL_SIZE_T sz = chunksize_nomask (p) & ~(PREV_INUSE | NON_MAIN_ARENA);
2156 mchunkptr next = chunk_at_offset (p, sz);
2157
2158 do_check_chunk (av, p);
2159
2160 /* Chunk must claim to be free ... */
2161 assert (!inuse (p));
2162 assert (!chunk_is_mmapped (p));
2163
2164 /* Unless a special marker, must have OK fields */
2165 if ((unsigned long) (sz) >= MINSIZE)
2166 {
2167 assert ((sz & MALLOC_ALIGN_MASK) == 0);
2168 assert (aligned_OK (chunk2mem (p)));
2169 /* ... matching footer field */
2170 assert (prev_size (next_chunk (p)) == sz);
2171 /* ... and is fully consolidated */
2172 assert (prev_inuse (p));
2173 assert (next == av->top || inuse (next));
2174
2175 /* ... and has minimally sane links */
2176 assert (p->fd->bk == p);
2177 assert (p->bk->fd == p);
2178 }
2179 else /* markers are always of size SIZE_SZ */
2180 assert (sz == SIZE_SZ);
2181 }
2182
2183 /*
2184 Properties of inuse chunks
2185 */
2186
2187 static void
2188 do_check_inuse_chunk (mstate av, mchunkptr p)
2189 {
2190 mchunkptr next;
2191
2192 do_check_chunk (av, p);
2193
2194 if (chunk_is_mmapped (p))
2195 return; /* mmapped chunks have no next/prev */
2196
2197 /* Check whether it claims to be in use ... */
2198 assert (inuse (p));
2199
2200 next = next_chunk (p);
2201
2202 /* ... and is surrounded by OK chunks.
2203 Since more things can be checked with free chunks than inuse ones,
2204 if an inuse chunk borders them and debug is on, it's worth doing them.
2205 */
2206 if (!prev_inuse (p))
2207 {
2208 /* Note that we cannot even look at prev unless it is not inuse */
2209 mchunkptr prv = prev_chunk (p);
2210 assert (next_chunk (prv) == p);
2211 do_check_free_chunk (av, prv);
2212 }
2213
2214 if (next == av->top)
2215 {
2216 assert (prev_inuse (next));
2217 assert (chunksize (next) >= MINSIZE);
2218 }
2219 else if (!inuse (next))
2220 do_check_free_chunk (av, next);
2221 }
2222
2223 /*
2224 Properties of chunks recycled from fastbins
2225 */
2226
2227 static void
2228 do_check_remalloced_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T s)
2229 {
2230 INTERNAL_SIZE_T sz = chunksize_nomask (p) & ~(PREV_INUSE | NON_MAIN_ARENA);
2231
2232 if (!chunk_is_mmapped (p))
2233 {
2234 assert (av == arena_for_chunk (p));
2235 if (chunk_main_arena (p))
2236 assert (av == &main_arena);
2237 else
2238 assert (av != &main_arena);
2239 }
2240
2241 do_check_inuse_chunk (av, p);
2242
2243 /* Legal size ... */
2244 assert ((sz & MALLOC_ALIGN_MASK) == 0);
2245 assert ((unsigned long) (sz) >= MINSIZE);
2246 /* ... and alignment */
2247 assert (aligned_OK (chunk2mem (p)));
2248 /* chunk is less than MINSIZE more than request */
2249 assert ((long) (sz) - (long) (s) >= 0);
2250 assert ((long) (sz) - (long) (s + MINSIZE) < 0);
2251 }
2252
2253 /*
2254 Properties of nonrecycled chunks at the point they are malloced
2255 */
2256
2257 static void
2258 do_check_malloced_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T s)
2259 {
2260 /* same as recycled case ... */
2261 do_check_remalloced_chunk (av, p, s);
2262
2263 /*
2264 ... plus, must obey implementation invariant that prev_inuse is
2265 always true of any allocated chunk; i.e., that each allocated
2266 chunk borders either a previously allocated and still in-use
2267 chunk, or the base of its memory arena. This is ensured
2268 by making all allocations from the `lowest' part of any found
2269 chunk. This does not necessarily hold however for chunks
2270 recycled via fastbins.
2271 */
2272
2273 assert (prev_inuse (p));
2274 }
2275
2276
2277 /*
2278 Properties of malloc_state.
2279
2280 This may be useful for debugging malloc, as well as detecting user
2281 programmer errors that somehow write into malloc_state.
2282
2283 If you are extending or experimenting with this malloc, you can
2284 probably figure out how to hack this routine to print out or
2285 display chunk addresses, sizes, bins, and other instrumentation.
2286 */
2287
2288 static void
2289 do_check_malloc_state (mstate av)
2290 {
2291 int i;
2292 mchunkptr p;
2293 mchunkptr q;
2294 mbinptr b;
2295 unsigned int idx;
2296 INTERNAL_SIZE_T size;
2297 unsigned long total = 0;
2298 int max_fast_bin;
2299
2300 /* internal size_t must be no wider than pointer type */
2301 assert (sizeof (INTERNAL_SIZE_T) <= sizeof (char *));
2302
2303 /* alignment is a power of 2 */
2304 assert ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT - 1)) == 0);
2305
2306 /* Check the arena is initialized. */
2307 assert (av->top != 0);
2308
2309 /* No memory has been allocated yet, so doing more tests is not possible. */
2310 if (av->top == initial_top (av))
2311 return;
2312
2313 /* pagesize is a power of 2 */
2314 assert (powerof2(GLRO (dl_pagesize)));
2315
2316 /* A contiguous main_arena is consistent with sbrk_base. */
2317 if (av == &main_arena && contiguous (av))
2318 assert ((char *) mp_.sbrk_base + av->system_mem ==
2319 (char *) av->top + chunksize (av->top));
2320
2321 /* properties of fastbins */
2322
2323 /* max_fast is in allowed range */
2324 assert ((get_max_fast () & ~1) <= request2size (MAX_FAST_SIZE));
2325
2326 max_fast_bin = fastbin_index (get_max_fast ());
2327
2328 for (i = 0; i < NFASTBINS; ++i)
2329 {
2330 p = fastbin (av, i);
2331
2332 /* The following test can only be performed for the main arena.
2333 While mallopt calls malloc_consolidate to get rid of all fast
2334 bins (especially those larger than the new maximum) this does
2335 only happen for the main arena. Trying to do this for any
2336 other arena would mean those arenas have to be locked and
2337 malloc_consolidate be called for them. This is excessive. And
2338 even if this is acceptable to somebody it still cannot solve
2339 the problem completely since if the arena is locked a
2340 concurrent malloc call might create a new arena which then
2341 could use the newly invalid fast bins. */
2342
2343 /* all bins past max_fast are empty */
2344 if (av == &main_arena && i > max_fast_bin)
2345 assert (p == 0);
2346
2347 while (p != 0)
2348 {
2349 if (__glibc_unlikely (misaligned_chunk (p)))
2350 malloc_printerr ("do_check_malloc_state(): "
2351 "unaligned fastbin chunk detected");
2352 /* each chunk claims to be inuse */
2353 do_check_inuse_chunk (av, p);
2354 total += chunksize (p);
2355 /* chunk belongs in this bin */
2356 assert (fastbin_index (chunksize (p)) == i);
2357 p = REVEAL_PTR (p->fd);
2358 }
2359 }
2360
2361 /* check normal bins */
2362 for (i = 1; i < NBINS; ++i)
2363 {
2364 b = bin_at (av, i);
2365
2366 /* binmap is accurate (except for bin 1 == unsorted_chunks) */
2367 if (i >= 2)
2368 {
2369 unsigned int binbit = get_binmap (av, i);
2370 int empty = last (b) == b;
2371 if (!binbit)
2372 assert (empty);
2373 else if (!empty)
2374 assert (binbit);
2375 }
2376
2377 for (p = last (b); p != b; p = p->bk)
2378 {
2379 /* each chunk claims to be free */
2380 do_check_free_chunk (av, p);
2381 size = chunksize (p);
2382 total += size;
2383 if (i >= 2)
2384 {
2385 /* chunk belongs in bin */
2386 idx = bin_index (size);
2387 assert (idx == i);
2388 /* lists are sorted */
2389 assert (p->bk == b ||
2390 (unsigned long) chunksize (p->bk) >= (unsigned long) chunksize (p));
2391
2392 if (!in_smallbin_range (size))
2393 {
2394 if (p->fd_nextsize != NULL)
2395 {
2396 if (p->fd_nextsize == p)
2397 assert (p->bk_nextsize == p);
2398 else
2399 {
2400 if (p->fd_nextsize == first (b))
2401 assert (chunksize (p) < chunksize (p->fd_nextsize));
2402 else
2403 assert (chunksize (p) > chunksize (p->fd_nextsize));
2404
2405 if (p == first (b))
2406 assert (chunksize (p) > chunksize (p->bk_nextsize));
2407 else
2408 assert (chunksize (p) < chunksize (p->bk_nextsize));
2409 }
2410 }
2411 else
2412 assert (p->bk_nextsize == NULL);
2413 }
2414 }
2415 else if (!in_smallbin_range (size))
2416 assert (p->fd_nextsize == NULL && p->bk_nextsize == NULL);
2417 /* chunk is followed by a legal chain of inuse chunks */
2418 for (q = next_chunk (p);
2419 (q != av->top && inuse (q) &&
2420 (unsigned long) (chunksize (q)) >= MINSIZE);
2421 q = next_chunk (q))
2422 do_check_inuse_chunk (av, q);
2423 }
2424 }
2425
2426 /* top chunk is OK */
2427 check_chunk (av, av->top);
2428 }
2429 #endif
2430
2431
2432 /* ----------------- Support for debugging hooks -------------------- */
2433 #include "hooks.c"
2434
2435
2436 /* ----------- Routines dealing with system allocation -------------- */
2437
2438 /*
2439 sysmalloc handles malloc cases requiring more memory from the system.
2440 On entry, it is assumed that av->top does not have enough
2441 space to service request for nb bytes, thus requiring that av->top
2442 be extended or replaced.
2443 */
2444
2445 static void *
2446 sysmalloc (INTERNAL_SIZE_T nb, mstate av)
2447 {
2448 mchunkptr old_top; /* incoming value of av->top */
2449 INTERNAL_SIZE_T old_size; /* its size */
2450 char *old_end; /* its end address */
2451
2452 long size; /* arg to first MORECORE or mmap call */
2453 char *brk; /* return value from MORECORE */
2454
2455 long correction; /* arg to 2nd MORECORE call */
2456 char *snd_brk; /* 2nd return val */
2457
2458 INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */
2459 INTERNAL_SIZE_T end_misalign; /* partial page left at end of new space */
2460 char *aligned_brk; /* aligned offset into brk */
2461
2462 mchunkptr p; /* the allocated/returned chunk */
2463 mchunkptr remainder; /* remainder from allocation */
2464 unsigned long remainder_size; /* its size */
2465
2466
2467 size_t pagesize = GLRO (dl_pagesize);
2468 bool tried_mmap = false;
2469
2470
2471 /*
2472 If have mmap, and the request size meets the mmap threshold, and
2473 the system supports mmap, and there are few enough currently
2474 allocated mmapped regions, try to directly map this request
2475 rather than expanding top.
2476 */
2477
2478 if (av == NULL
2479 || ((unsigned long) (nb) >= (unsigned long) (mp_.mmap_threshold)
2480 && (mp_.n_mmaps < mp_.n_mmaps_max)))
2481 {
2482 char *mm; /* return value from mmap call*/
2483
2484 try_mmap:
2485 /*
2486 Round up size to nearest page. For mmapped chunks, the overhead
2487 is one SIZE_SZ unit larger than for normal chunks, because there
2488 is no following chunk whose prev_size field could be used.
2489
2490 See the front_misalign handling below, for glibc there is no
2491 need for further alignments unless we have have high alignment.
2492 */
2493 if (MALLOC_ALIGNMENT == CHUNK_HDR_SZ)
2494 size = ALIGN_UP (nb + SIZE_SZ, pagesize);
2495 else
2496 size = ALIGN_UP (nb + SIZE_SZ + MALLOC_ALIGN_MASK, pagesize);
2497 tried_mmap = true;
2498
2499 /* Don't try if size wraps around 0 */
2500 if ((unsigned long) (size) > (unsigned long) (nb))
2501 {
2502 mm = (char *) (MMAP (0, size,
2503 mtag_mmap_flags | PROT_READ | PROT_WRITE, 0));
2504
2505 if (mm != MAP_FAILED)
2506 {
2507 /*
2508 The offset to the start of the mmapped region is stored
2509 in the prev_size field of the chunk. This allows us to adjust
2510 returned start address to meet alignment requirements here
2511 and in memalign(), and still be able to compute proper
2512 address argument for later munmap in free() and realloc().
2513 */
2514
2515 if (MALLOC_ALIGNMENT == CHUNK_HDR_SZ)
2516 {
2517 /* For glibc, chunk2mem increases the address by
2518 CHUNK_HDR_SZ and MALLOC_ALIGN_MASK is
2519 CHUNK_HDR_SZ-1. Each mmap'ed area is page
2520 aligned and therefore definitely
2521 MALLOC_ALIGN_MASK-aligned. */
2522 assert (((INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK) == 0);
2523 front_misalign = 0;
2524 }
2525 else
2526 front_misalign = (INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK;
2527 if (front_misalign > 0)
2528 {
2529 correction = MALLOC_ALIGNMENT - front_misalign;
2530 p = (mchunkptr) (mm + correction);
2531 set_prev_size (p, correction);
2532 set_head (p, (size - correction) | IS_MMAPPED);
2533 }
2534 else
2535 {
2536 p = (mchunkptr) mm;
2537 set_prev_size (p, 0);
2538 set_head (p, size | IS_MMAPPED);
2539 }
2540
2541 /* update statistics */
2542
2543 int new = atomic_exchange_and_add (&mp_.n_mmaps, 1) + 1;
2544 atomic_max (&mp_.max_n_mmaps, new);
2545
2546 unsigned long sum;
2547 sum = atomic_exchange_and_add (&mp_.mmapped_mem, size) + size;
2548 atomic_max (&mp_.max_mmapped_mem, sum);
2549
2550 check_chunk (av, p);
2551
2552 return chunk2mem (p);
2553 }
2554 }
2555 }
2556
2557 /* There are no usable arenas and mmap also failed. */
2558 if (av == NULL)
2559 return 0;
2560
2561 /* Record incoming configuration of top */
2562
2563 old_top = av->top;
2564 old_size = chunksize (old_top);
2565 old_end = (char *) (chunk_at_offset (old_top, old_size));
2566
2567 brk = snd_brk = (char *) (MORECORE_FAILURE);
2568
2569 /*
2570 If not the first time through, we require old_size to be
2571 at least MINSIZE and to have prev_inuse set.
2572 */
2573
2574 assert ((old_top == initial_top (av) && old_size == 0) ||
2575 ((unsigned long) (old_size) >= MINSIZE &&
2576 prev_inuse (old_top) &&
2577 ((unsigned long) old_end & (pagesize - 1)) == 0));
2578
2579 /* Precondition: not enough current space to satisfy nb request */
2580 assert ((unsigned long) (old_size) < (unsigned long) (nb + MINSIZE));
2581
2582
2583 if (av != &main_arena)
2584 {
2585 heap_info *old_heap, *heap;
2586 size_t old_heap_size;
2587
2588 /* First try to extend the current heap. */
2589 old_heap = heap_for_ptr (old_top);
2590 old_heap_size = old_heap->size;
2591 if ((long) (MINSIZE + nb - old_size) > 0
2592 && grow_heap (old_heap, MINSIZE + nb - old_size) == 0)
2593 {
2594 av->system_mem += old_heap->size - old_heap_size;
2595 set_head (old_top, (((char *) old_heap + old_heap->size) - (char *) old_top)
2596 | PREV_INUSE);
2597 }
2598 else if ((heap = new_heap (nb + (MINSIZE + sizeof (*heap)), mp_.top_pad)))
2599 {
2600 /* Use a newly allocated heap. */
2601 heap->ar_ptr = av;
2602 heap->prev = old_heap;
2603 av->system_mem += heap->size;
2604 /* Set up the new top. */
2605 top (av) = chunk_at_offset (heap, sizeof (*heap));
2606 set_head (top (av), (heap->size - sizeof (*heap)) | PREV_INUSE);
2607
2608 /* Setup fencepost and free the old top chunk with a multiple of
2609 MALLOC_ALIGNMENT in size. */
2610 /* The fencepost takes at least MINSIZE bytes, because it might
2611 become the top chunk again later. Note that a footer is set
2612 up, too, although the chunk is marked in use. */
2613 old_size = (old_size - MINSIZE) & ~MALLOC_ALIGN_MASK;
2614 set_head (chunk_at_offset (old_top, old_size + CHUNK_HDR_SZ),
2615 0 | PREV_INUSE);
2616 if (old_size >= MINSIZE)
2617 {
2618 set_head (chunk_at_offset (old_top, old_size),
2619 CHUNK_HDR_SZ | PREV_INUSE);
2620 set_foot (chunk_at_offset (old_top, old_size), CHUNK_HDR_SZ);
2621 set_head (old_top, old_size | PREV_INUSE | NON_MAIN_ARENA);
2622 _int_free (av, old_top, 1);
2623 }
2624 else
2625 {
2626 set_head (old_top, (old_size + CHUNK_HDR_SZ) | PREV_INUSE);
2627 set_foot (old_top, (old_size + CHUNK_HDR_SZ));
2628 }
2629 }
2630 else if (!tried_mmap)
2631 /* We can at least try to use to mmap memory. */
2632 goto try_mmap;
2633 }
2634 else /* av == main_arena */
2635
2636
2637 { /* Request enough space for nb + pad + overhead */
2638 size = nb + mp_.top_pad + MINSIZE;
2639
2640 /*
2641 If contiguous, we can subtract out existing space that we hope to
2642 combine with new space. We add it back later only if
2643 we don't actually get contiguous space.
2644 */
2645
2646 if (contiguous (av))
2647 size -= old_size;
2648
2649 /*
2650 Round to a multiple of page size.
2651 If MORECORE is not contiguous, this ensures that we only call it
2652 with whole-page arguments. And if MORECORE is contiguous and
2653 this is not first time through, this preserves page-alignment of
2654 previous calls. Otherwise, we correct to page-align below.
2655 */
2656
2657 size = ALIGN_UP (size, pagesize);
2658
2659 /*
2660 Don't try to call MORECORE if argument is so big as to appear
2661 negative. Note that since mmap takes size_t arg, it may succeed
2662 below even if we cannot call MORECORE.
2663 */
2664
2665 if (size > 0)
2666 {
2667 brk = (char *) (MORECORE (size));
2668 LIBC_PROBE (memory_sbrk_more, 2, brk, size);
2669 }
2670
2671 if (brk != (char *) (MORECORE_FAILURE))
2672 {
2673 /* Call the `morecore' hook if necessary. */
2674 void (*hook) (void) = atomic_forced_read (__after_morecore_hook);
2675 if (__builtin_expect (hook != NULL, 0))
2676 (*hook)();
2677 }
2678 else
2679 {
2680 /*
2681 If have mmap, try using it as a backup when MORECORE fails or
2682 cannot be used. This is worth doing on systems that have "holes" in
2683 address space, so sbrk cannot extend to give contiguous space, but
2684 space is available elsewhere. Note that we ignore mmap max count
2685 and threshold limits, since the space will not be used as a
2686 segregated mmap region.
2687 */
2688
2689 /* Cannot merge with old top, so add its size back in */
2690 if (contiguous (av))
2691 size = ALIGN_UP (size + old_size, pagesize);
2692
2693 /* If we are relying on mmap as backup, then use larger units */
2694 if ((unsigned long) (size) < (unsigned long) (MMAP_AS_MORECORE_SIZE))
2695 size = MMAP_AS_MORECORE_SIZE;
2696
2697 /* Don't try if size wraps around 0 */
2698 if ((unsigned long) (size) > (unsigned long) (nb))
2699 {
2700 char *mbrk = (char *) (MMAP (0, size,
2701 mtag_mmap_flags | PROT_READ | PROT_WRITE,
2702 0));
2703
2704 if (mbrk != MAP_FAILED)
2705 {
2706 /* We do not need, and cannot use, another sbrk call to find end */
2707 brk = mbrk;
2708 snd_brk = brk + size;
2709
2710 /*
2711 Record that we no longer have a contiguous sbrk region.
2712 After the first time mmap is used as backup, we do not
2713 ever rely on contiguous space since this could incorrectly
2714 bridge regions.
2715 */
2716 set_noncontiguous (av);
2717 }
2718 }
2719 }
2720
2721 if (brk != (char *) (MORECORE_FAILURE))
2722 {
2723 if (mp_.sbrk_base == 0)
2724 mp_.sbrk_base = brk;
2725 av->system_mem += size;
2726
2727 /*
2728 If MORECORE extends previous space, we can likewise extend top size.
2729 */
2730
2731 if (brk == old_end && snd_brk == (char *) (MORECORE_FAILURE))
2732 set_head (old_top, (size + old_size) | PREV_INUSE);
2733
2734 else if (contiguous (av) && old_size && brk < old_end)
2735 /* Oops! Someone else killed our space.. Can't touch anything. */
2736 malloc_printerr ("break adjusted to free malloc space");
2737
2738 /*
2739 Otherwise, make adjustments:
2740
2741 * If the first time through or noncontiguous, we need to call sbrk
2742 just to find out where the end of memory lies.
2743
2744 * We need to ensure that all returned chunks from malloc will meet
2745 MALLOC_ALIGNMENT
2746
2747 * If there was an intervening foreign sbrk, we need to adjust sbrk
2748 request size to account for fact that we will not be able to
2749 combine new space with existing space in old_top.
2750
2751 * Almost all systems internally allocate whole pages at a time, in
2752 which case we might as well use the whole last page of request.
2753 So we allocate enough more memory to hit a page boundary now,
2754 which in turn causes future contiguous calls to page-align.
2755 */
2756
2757 else
2758 {
2759 front_misalign = 0;
2760 end_misalign = 0;
2761 correction = 0;
2762 aligned_brk = brk;
2763
2764 /* handle contiguous cases */
2765 if (contiguous (av))
2766 {
2767 /* Count foreign sbrk as system_mem. */
2768 if (old_size)
2769 av->system_mem += brk - old_end;
2770
2771 /* Guarantee alignment of first new chunk made from this space */
2772
2773 front_misalign = (INTERNAL_SIZE_T) chunk2mem (brk) & MALLOC_ALIGN_MASK;
2774 if (front_misalign > 0)
2775 {
2776 /*
2777 Skip over some bytes to arrive at an aligned position.
2778 We don't need to specially mark these wasted front bytes.
2779 They will never be accessed anyway because
2780 prev_inuse of av->top (and any chunk created from its start)
2781 is always true after initialization.
2782 */
2783
2784 correction = MALLOC_ALIGNMENT - front_misalign;
2785 aligned_brk += correction;
2786 }
2787
2788 /*
2789 If this isn't adjacent to existing space, then we will not
2790 be able to merge with old_top space, so must add to 2nd request.
2791 */
2792
2793 correction += old_size;
2794
2795 /* Extend the end address to hit a page boundary */
2796 end_misalign = (INTERNAL_SIZE_T) (brk + size + correction);
2797 correction += (ALIGN_UP (end_misalign, pagesize)) - end_misalign;
2798
2799 assert (correction >= 0);
2800 snd_brk = (char *) (MORECORE (correction));
2801
2802 /*
2803 If can't allocate correction, try to at least find out current
2804 brk. It might be enough to proceed without failing.
2805
2806 Note that if second sbrk did NOT fail, we assume that space
2807 is contiguous with first sbrk. This is a safe assumption unless
2808 program is multithreaded but doesn't use locks and a foreign sbrk
2809 occurred between our first and second calls.
2810 */
2811
2812 if (snd_brk == (char *) (MORECORE_FAILURE))
2813 {
2814 correction = 0;
2815 snd_brk = (char *) (MORECORE (0));
2816 }
2817 else
2818 {
2819 /* Call the `morecore' hook if necessary. */
2820 void (*hook) (void) = atomic_forced_read (__after_morecore_hook);
2821 if (__builtin_expect (hook != NULL, 0))
2822 (*hook)();
2823 }
2824 }
2825
2826 /* handle non-contiguous cases */
2827 else
2828 {
2829 if (MALLOC_ALIGNMENT == CHUNK_HDR_SZ)
2830 /* MORECORE/mmap must correctly align */
2831 assert (((unsigned long) chunk2mem (brk) & MALLOC_ALIGN_MASK) == 0);
2832 else
2833 {
2834 front_misalign = (INTERNAL_SIZE_T) chunk2mem (brk) & MALLOC_ALIGN_MASK;
2835 if (front_misalign > 0)
2836 {
2837 /*
2838 Skip over some bytes to arrive at an aligned position.
2839 We don't need to specially mark these wasted front bytes.
2840 They will never be accessed anyway because
2841 prev_inuse of av->top (and any chunk created from its start)
2842 is always true after initialization.
2843 */
2844
2845 aligned_brk += MALLOC_ALIGNMENT - front_misalign;
2846 }
2847 }
2848
2849 /* Find out current end of memory */
2850 if (snd_brk == (char *) (MORECORE_FAILURE))
2851 {
2852 snd_brk = (char *) (MORECORE (0));
2853 }
2854 }
2855
2856 /* Adjust top based on results of second sbrk */
2857 if (snd_brk != (char *) (MORECORE_FAILURE))
2858 {
2859 av->top = (mchunkptr) aligned_brk;
2860 set_head (av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE);
2861 av->system_mem += correction;
2862
2863 /*
2864 If not the first time through, we either have a
2865 gap due to foreign sbrk or a non-contiguous region. Insert a
2866 double fencepost at old_top to prevent consolidation with space
2867 we don't own. These fenceposts are artificial chunks that are
2868 marked as inuse and are in any case too small to use. We need
2869 two to make sizes and alignments work out.
2870 */
2871
2872 if (old_size != 0)
2873 {
2874 /*
2875 Shrink old_top to insert fenceposts, keeping size a
2876 multiple of MALLOC_ALIGNMENT. We know there is at least
2877 enough space in old_top to do this.
2878 */
2879 old_size = (old_size - 2 * CHUNK_HDR_SZ) & ~MALLOC_ALIGN_MASK;
2880 set_head (old_top, old_size | PREV_INUSE);
2881
2882 /*
2883 Note that the following assignments completely overwrite
2884 old_top when old_size was previously MINSIZE. This is
2885 intentional. We need the fencepost, even if old_top otherwise gets
2886 lost.
2887 */
2888 set_head (chunk_at_offset (old_top, old_size),
2889 CHUNK_HDR_SZ | PREV_INUSE);
2890 set_head (chunk_at_offset (old_top,
2891 old_size + CHUNK_HDR_SZ),
2892 CHUNK_HDR_SZ | PREV_INUSE);
2893
2894 /* If possible, release the rest. */
2895 if (old_size >= MINSIZE)
2896 {
2897 _int_free (av, old_top, 1);
2898 }
2899 }
2900 }
2901 }
2902 }
2903 } /* if (av != &main_arena) */
2904
2905 if ((unsigned long) av->system_mem > (unsigned long) (av->max_system_mem))
2906 av->max_system_mem = av->system_mem;
2907 check_malloc_state (av);
2908
2909 /* finally, do the allocation */
2910 p = av->top;
2911 size = chunksize (p);
2912
2913 /* check that one of the above allocation paths succeeded */
2914 if ((unsigned long) (size) >= (unsigned long) (nb + MINSIZE))
2915 {
2916 remainder_size = size - nb;
2917 remainder = chunk_at_offset (p, nb);
2918 av->top = remainder;
2919 set_head (p, nb | PREV_INUSE | (av != &main_arena ? NON_MAIN_ARENA : 0));
2920 set_head (remainder, remainder_size | PREV_INUSE);
2921 check_malloced_chunk (av, p, nb);
2922 return chunk2mem (p);
2923 }
2924
2925 /* catch all failure paths */
2926 __set_errno (ENOMEM);
2927 return 0;
2928 }
2929
2930
2931 /*
2932 systrim is an inverse of sorts to sysmalloc. It gives memory back
2933 to the system (via negative arguments to sbrk) if there is unused
2934 memory at the `high' end of the malloc pool. It is called
2935 automatically by free() when top space exceeds the trim
2936 threshold. It is also called by the public malloc_trim routine. It
2937 returns 1 if it actually released any memory, else 0.
2938 */
2939
2940 static int
2941 systrim (size_t pad, mstate av)
2942 {
2943 long top_size; /* Amount of top-most memory */
2944 long extra; /* Amount to release */
2945 long released; /* Amount actually released */
2946 char *current_brk; /* address returned by pre-check sbrk call */
2947 char *new_brk; /* address returned by post-check sbrk call */
2948 size_t pagesize;
2949 long top_area;
2950
2951 pagesize = GLRO (dl_pagesize);
2952 top_size = chunksize (av->top);
2953
2954 top_area = top_size - MINSIZE - 1;
2955 if (top_area <= pad)
2956 return 0;
2957
2958 /* Release in pagesize units and round down to the nearest page. */
2959 extra = ALIGN_DOWN(top_area - pad, pagesize);
2960
2961 if (extra == 0)
2962 return 0;
2963
2964 /*
2965 Only proceed if end of memory is where we last set it.
2966 This avoids problems if there were foreign sbrk calls.
2967 */
2968 current_brk = (char *) (MORECORE (0));
2969 if (current_brk == (char *) (av->top) + top_size)
2970 {
2971 /*
2972 Attempt to release memory. We ignore MORECORE return value,
2973 and instead call again to find out where new end of memory is.
2974 This avoids problems if first call releases less than we asked,
2975 of if failure somehow altered brk value. (We could still
2976 encounter problems if it altered brk in some very bad way,
2977 but the only thing we can do is adjust anyway, which will cause
2978 some downstream failure.)
2979 */
2980
2981 MORECORE (-extra);
2982 /* Call the `morecore' hook if necessary. */
2983 void (*hook) (void) = atomic_forced_read (__after_morecore_hook);
2984 if (__builtin_expect (hook != NULL, 0))
2985 (*hook)();
2986 new_brk = (char *) (MORECORE (0));
2987
2988 LIBC_PROBE (memory_sbrk_less, 2, new_brk, extra);
2989
2990 if (new_brk != (char *) MORECORE_FAILURE)
2991 {
2992 released = (long) (current_brk - new_brk);
2993
2994 if (released != 0)
2995 {
2996 /* Success. Adjust top. */
2997 av->system_mem -= released;
2998 set_head (av->top, (top_size - released) | PREV_INUSE);
2999 check_malloc_state (av);
3000 return 1;
3001 }
3002 }
3003 }
3004 return 0;
3005 }
3006
3007 static void
3008 munmap_chunk (mchunkptr p)
3009 {
3010 size_t pagesize = GLRO (dl_pagesize);
3011 INTERNAL_SIZE_T size = chunksize (p);
3012
3013 assert (chunk_is_mmapped (p));
3014
3015 /* Do nothing if the chunk is a faked mmapped chunk in the dumped
3016 main arena. We never free this memory. */
3017 if (DUMPED_MAIN_ARENA_CHUNK (p))
3018 return;
3019
3020 uintptr_t mem = (uintptr_t) chunk2mem (p);
3021 uintptr_t block = (uintptr_t) p - prev_size (p);
3022 size_t total_size = prev_size (p) + size;
3023 /* Unfortunately we have to do the compilers job by hand here. Normally
3024 we would test BLOCK and TOTAL-SIZE separately for compliance with the
3025 page size. But gcc does not recognize the optimization possibility
3026 (in the moment at least) so we combine the two values into one before
3027 the bit test. */
3028 if (__glibc_unlikely ((block | total_size) & (pagesize - 1)) != 0
3029 || __glibc_unlikely (!powerof2 (mem & (pagesize - 1))))
3030 malloc_printerr ("munmap_chunk(): invalid pointer");
3031
3032 atomic_decrement (&mp_.n_mmaps);
3033 atomic_add (&mp_.mmapped_mem, -total_size);
3034
3035 /* If munmap failed the process virtual memory address space is in a
3036 bad shape. Just leave the block hanging around, the process will
3037 terminate shortly anyway since not much can be done. */
3038 __munmap ((char *) block, total_size);
3039 }
3040
3041 #if HAVE_MREMAP
3042
3043 static mchunkptr
3044 mremap_chunk (mchunkptr p, size_t new_size)
3045 {
3046 size_t pagesize = GLRO (dl_pagesize);
3047 INTERNAL_SIZE_T offset = prev_size (p);
3048 INTERNAL_SIZE_T size = chunksize (p);
3049 char *cp;
3050
3051 assert (chunk_is_mmapped (p));
3052
3053 uintptr_t block = (uintptr_t) p - offset;
3054 uintptr_t mem = (uintptr_t) chunk2mem(p);
3055 size_t total_size = offset + size;
3056 if (__glibc_unlikely ((block | total_size) & (pagesize - 1)) != 0
3057 || __glibc_unlikely (!powerof2 (mem & (pagesize - 1))))
3058 malloc_printerr("mremap_chunk(): invalid pointer");
3059
3060 /* Note the extra SIZE_SZ overhead as in mmap_chunk(). */
3061 new_size = ALIGN_UP (new_size + offset + SIZE_SZ, pagesize);
3062
3063 /* No need to remap if the number of pages does not change. */
3064 if (total_size == new_size)
3065 return p;
3066
3067 cp = (char *) __mremap ((char *) block, total_size, new_size,
3068 MREMAP_MAYMOVE);
3069
3070 if (cp == MAP_FAILED)
3071 return 0;
3072
3073 p = (mchunkptr) (cp + offset);
3074
3075 assert (aligned_OK (chunk2mem (p)));
3076
3077 assert (prev_size (p) == offset);
3078 set_head (p, (new_size - offset) | IS_MMAPPED);
3079
3080 INTERNAL_SIZE_T new;
3081 new = atomic_exchange_and_add (&mp_.mmapped_mem, new_size - size - offset)
3082 + new_size - size - offset;
3083 atomic_max (&mp_.max_mmapped_mem, new);
3084 return p;
3085 }
3086 #endif /* HAVE_MREMAP */
3087
3088 /*------------------------ Public wrappers. --------------------------------*/
3089
3090 #if USE_TCACHE
3091
3092 /* We overlay this structure on the user-data portion of a chunk when
3093 the chunk is stored in the per-thread cache. */
3094 typedef struct tcache_entry
3095 {
3096 struct tcache_entry *next;
3097 /* This field exists to detect double frees. */
3098 uintptr_t key;
3099 } tcache_entry;
3100
3101 /* There is one of these for each thread, which contains the
3102 per-thread cache (hence "tcache_perthread_struct"). Keeping
3103 overall size low is mildly important. Note that COUNTS and ENTRIES
3104 are redundant (we could have just counted the linked list each
3105 time), this is for performance reasons. */
3106 typedef struct tcache_perthread_struct
3107 {
3108 uint16_t counts[TCACHE_MAX_BINS];
3109 tcache_entry *entries[TCACHE_MAX_BINS];
3110 } tcache_perthread_struct;
3111
3112 static __thread bool tcache_shutting_down = false;
3113 static __thread tcache_perthread_struct *tcache = NULL;
3114
3115 /* Process-wide key to try and catch a double-free in the same thread. */
3116 static uintptr_t tcache_key;
3117
3118 /* The value of tcache_key does not really have to be a cryptographically
3119 secure random number. It only needs to be arbitrary enough so that it does
3120 not collide with values present in applications. If a collision does happen
3121 consistently enough, it could cause a degradation in performance since the
3122 entire list is checked to check if the block indeed has been freed the
3123 second time. The odds of this happening are exceedingly low though, about 1
3124 in 2^wordsize. There is probably a higher chance of the performance
3125 degradation being due to a double free where the first free happened in a
3126 different thread; that's a case this check does not cover. */
3127 static void
3128 tcache_key_initialize (void)
3129 {
3130 if (__getrandom (&tcache_key, sizeof(tcache_key), GRND_NONBLOCK)
3131 != sizeof (tcache_key))
3132 {
3133 tcache_key = random_bits ();
3134 #if __WORDSIZE == 64
3135 tcache_key = (tcache_key << 32) | random_bits ();
3136 #endif
3137 }
3138 }
3139
3140 /* Caller must ensure that we know tc_idx is valid and there's room
3141 for more chunks. */
3142 static __always_inline void
3143 tcache_put (mchunkptr chunk, size_t tc_idx)
3144 {
3145 tcache_entry *e = (tcache_entry *) chunk2mem (chunk);
3146
3147 /* Mark this chunk as "in the tcache" so the test in _int_free will
3148 detect a double free. */
3149 e->key = tcache_key;
3150
3151 e->next = PROTECT_PTR (&e->next, tcache->entries[tc_idx]);
3152 tcache->entries[tc_idx] = e;
3153 ++(tcache->counts[tc_idx]);
3154 }
3155
3156 /* Caller must ensure that we know tc_idx is valid and there's
3157 available chunks to remove. */
3158 static __always_inline void *
3159 tcache_get (size_t tc_idx)
3160 {
3161 tcache_entry *e = tcache->entries[tc_idx];
3162 if (__glibc_unlikely (!aligned_OK (e)))
3163 malloc_printerr ("malloc(): unaligned tcache chunk detected");
3164 tcache->entries[tc_idx] = REVEAL_PTR (e->next);
3165 --(tcache->counts[tc_idx]);
3166 e->key = 0;
3167 return (void *) e;
3168 }
3169
3170 static void
3171 tcache_thread_shutdown (void)
3172 {
3173 int i;
3174 tcache_perthread_struct *tcache_tmp = tcache;
3175
3176 tcache_shutting_down = true;
3177
3178 if (!tcache)
3179 return;
3180
3181 /* Disable the tcache and prevent it from being reinitialized. */
3182 tcache = NULL;
3183
3184 /* Free all of the entries and the tcache itself back to the arena
3185 heap for coalescing. */
3186 for (i = 0; i < TCACHE_MAX_BINS; ++i)
3187 {
3188 while (tcache_tmp->entries[i])
3189 {
3190 tcache_entry *e = tcache_tmp->entries[i];
3191 if (__glibc_unlikely (!aligned_OK (e)))
3192 malloc_printerr ("tcache_thread_shutdown(): "
3193 "unaligned tcache chunk detected");
3194 tcache_tmp->entries[i] = REVEAL_PTR (e->next);
3195 __libc_free (e);
3196 }
3197 }
3198
3199 __libc_free (tcache_tmp);
3200 }
3201
3202 static void
3203 tcache_init(void)
3204 {
3205 mstate ar_ptr;
3206 void *victim = 0;
3207 const size_t bytes = sizeof (tcache_perthread_struct);
3208
3209 if (tcache_shutting_down)
3210 return;
3211
3212 arena_get (ar_ptr, bytes);
3213 victim = _int_malloc (ar_ptr, bytes);
3214 if (!victim && ar_ptr != NULL)
3215 {
3216 ar_ptr = arena_get_retry (ar_ptr, bytes);
3217 victim = _int_malloc (ar_ptr, bytes);
3218 }
3219
3220
3221 if (ar_ptr != NULL)
3222 __libc_lock_unlock (ar_ptr->mutex);
3223
3224 /* In a low memory situation, we may not be able to allocate memory
3225 - in which case, we just keep trying later. However, we
3226 typically do this very early, so either there is sufficient
3227 memory, or there isn't enough memory to do non-trivial
3228 allocations anyway. */
3229 if (victim)
3230 {
3231 tcache = (tcache_perthread_struct *) victim;
3232 memset (tcache, 0, sizeof (tcache_perthread_struct));
3233 }
3234
3235 }
3236
3237 # define MAYBE_INIT_TCACHE() \
3238 if (__glibc_unlikely (tcache == NULL)) \
3239 tcache_init();
3240
3241 #else /* !USE_TCACHE */
3242 # define MAYBE_INIT_TCACHE()
3243
3244 static void
3245 tcache_thread_shutdown (void)
3246 {
3247 /* Nothing to do if there is no thread cache. */
3248 }
3249
3250 #endif /* !USE_TCACHE */
3251
3252 void *
3253 __libc_malloc (size_t bytes)
3254 {
3255 mstate ar_ptr;
3256 void *victim;
3257
3258 _Static_assert (PTRDIFF_MAX <= SIZE_MAX / 2,
3259 "PTRDIFF_MAX is not more than half of SIZE_MAX");
3260
3261 void *(*hook) (size_t, const void *)
3262 = atomic_forced_read (__malloc_hook);
3263 if (__builtin_expect (hook != NULL, 0))
3264 return (*hook)(bytes, RETURN_ADDRESS (0));
3265 #if USE_TCACHE
3266 /* int_free also calls request2size, be careful to not pad twice. */
3267 size_t tbytes;
3268 if (!checked_request2size (bytes, &tbytes))
3269 {
3270 __set_errno (ENOMEM);
3271 return NULL;
3272 }
3273 size_t tc_idx = csize2tidx (tbytes);
3274
3275 MAYBE_INIT_TCACHE ();
3276
3277 DIAG_PUSH_NEEDS_COMMENT;
3278 if (tc_idx < mp_.tcache_bins
3279 && tcache
3280 && tcache->counts[tc_idx] > 0)
3281 {
3282 victim = tcache_get (tc_idx);
3283 return tag_new_usable (victim);
3284 }
3285 DIAG_POP_NEEDS_COMMENT;
3286 #endif
3287
3288 if (SINGLE_THREAD_P)
3289 {
3290 victim = tag_new_usable (_int_malloc (&main_arena, bytes));
3291 assert (!victim || chunk_is_mmapped (mem2chunk (victim)) ||
3292 &main_arena == arena_for_chunk (mem2chunk (victim)));
3293 return victim;
3294 }
3295
3296 arena_get (ar_ptr, bytes);
3297
3298 victim = _int_malloc (ar_ptr, bytes);
3299 /* Retry with another arena only if we were able to find a usable arena
3300 before. */
3301 if (!victim && ar_ptr != NULL)
3302 {
3303 LIBC_PROBE (memory_malloc_retry, 1, bytes);
3304 ar_ptr = arena_get_retry (ar_ptr, bytes);
3305 victim = _int_malloc (ar_ptr, bytes);
3306 }
3307
3308 if (ar_ptr != NULL)
3309 __libc_lock_unlock (ar_ptr->mutex);
3310
3311 victim = tag_new_usable (victim);
3312
3313 assert (!victim || chunk_is_mmapped (mem2chunk (victim)) ||
3314 ar_ptr == arena_for_chunk (mem2chunk (victim)));
3315 return victim;
3316 }
3317 libc_hidden_def (__libc_malloc)
3318
3319 void
3320 __libc_free (void *mem)
3321 {
3322 mstate ar_ptr;
3323 mchunkptr p; /* chunk corresponding to mem */
3324
3325 void (*hook) (void *, const void *)
3326 = atomic_forced_read (__free_hook);
3327 if (__builtin_expect (hook != NULL, 0))
3328 {
3329 (*hook)(mem, RETURN_ADDRESS (0));
3330 return;
3331 }
3332
3333 if (mem == 0) /* free(0) has no effect */
3334 return;
3335
3336 /* Quickly check that the freed pointer matches the tag for the memory.
3337 This gives a useful double-free detection. */
3338 if (__glibc_unlikely (mtag_enabled))
3339 *(volatile char *)mem;
3340
3341 int err = errno;
3342
3343 p = mem2chunk (mem);
3344
3345 if (chunk_is_mmapped (p)) /* release mmapped memory. */
3346 {
3347 /* See if the dynamic brk/mmap threshold needs adjusting.
3348 Dumped fake mmapped chunks do not affect the threshold. */
3349 if (!mp_.no_dyn_threshold
3350 && chunksize_nomask (p) > mp_.mmap_threshold
3351 && chunksize_nomask (p) <= DEFAULT_MMAP_THRESHOLD_MAX
3352 && !DUMPED_MAIN_ARENA_CHUNK (p))
3353 {
3354 mp_.mmap_threshold = chunksize (p);
3355 mp_.trim_threshold = 2 * mp_.mmap_threshold;
3356 LIBC_PROBE (memory_mallopt_free_dyn_thresholds, 2,
3357 mp_.mmap_threshold, mp_.trim_threshold);
3358 }
3359 munmap_chunk (p);
3360 }
3361 else
3362 {
3363 MAYBE_INIT_TCACHE ();
3364
3365 /* Mark the chunk as belonging to the library again. */
3366 (void)tag_region (chunk2mem (p), memsize (p));
3367
3368 ar_ptr = arena_for_chunk (p);
3369 _int_free (ar_ptr, p, 0);
3370 }
3371
3372 __set_errno (err);
3373 }
3374 libc_hidden_def (__libc_free)
3375
3376 void *
3377 __libc_realloc (void *oldmem, size_t bytes)
3378 {
3379 mstate ar_ptr;
3380 INTERNAL_SIZE_T nb; /* padded request size */
3381
3382 void *newp; /* chunk to return */
3383
3384 void *(*hook) (void *, size_t, const void *) =
3385 atomic_forced_read (__realloc_hook);
3386 if (__builtin_expect (hook != NULL, 0))
3387 return (*hook)(oldmem, bytes, RETURN_ADDRESS (0));
3388
3389 #if REALLOC_ZERO_BYTES_FREES
3390 if (bytes == 0 && oldmem != NULL)
3391 {
3392 __libc_free (oldmem); return 0;
3393 }
3394 #endif
3395
3396 /* realloc of null is supposed to be same as malloc */
3397 if (oldmem == 0)
3398 return __libc_malloc (bytes);
3399
3400 /* Perform a quick check to ensure that the pointer's tag matches the
3401 memory's tag. */
3402 if (__glibc_unlikely (mtag_enabled))
3403 *(volatile char*) oldmem;
3404
3405 /* chunk corresponding to oldmem */
3406 const mchunkptr oldp = mem2chunk (oldmem);
3407 /* its size */
3408 const INTERNAL_SIZE_T oldsize = chunksize (oldp);
3409
3410 if (chunk_is_mmapped (oldp))
3411 ar_ptr = NULL;
3412 else
3413 {
3414 MAYBE_INIT_TCACHE ();
3415 ar_ptr = arena_for_chunk (oldp);
3416 }
3417
3418 /* Little security check which won't hurt performance: the allocator
3419 never wrapps around at the end of the address space. Therefore
3420 we can exclude some size values which might appear here by
3421 accident or by "design" from some intruder. We need to bypass
3422 this check for dumped fake mmap chunks from the old main arena
3423 because the new malloc may provide additional alignment. */
3424 if ((__builtin_expect ((uintptr_t) oldp > (uintptr_t) -oldsize, 0)
3425 || __builtin_expect (misaligned_chunk (oldp), 0))
3426 && !DUMPED_MAIN_ARENA_CHUNK (oldp))
3427 malloc_printerr ("realloc(): invalid pointer");
3428
3429 if (!checked_request2size (bytes, &nb))
3430 {
3431 __set_errno (ENOMEM);
3432 return NULL;
3433 }
3434
3435 if (chunk_is_mmapped (oldp))
3436 {
3437 /* If this is a faked mmapped chunk from the dumped main arena,
3438 always make a copy (and do not free the old chunk). */
3439 if (DUMPED_MAIN_ARENA_CHUNK (oldp))
3440 {
3441 /* Must alloc, copy, free. */
3442 void *newmem = __libc_malloc (bytes);
3443 if (newmem == 0)
3444 return NULL;
3445 /* Copy as many bytes as are available from the old chunk
3446 and fit into the new size. NB: The overhead for faked
3447 mmapped chunks is only SIZE_SZ, not CHUNK_HDR_SZ as for
3448 regular mmapped chunks. */
3449 if (bytes > oldsize - SIZE_SZ)
3450 bytes = oldsize - SIZE_SZ;
3451 memcpy (newmem, oldmem, bytes);
3452 return newmem;
3453 }
3454
3455 void *newmem;
3456
3457 #if HAVE_MREMAP
3458 newp = mremap_chunk (oldp, nb);
3459 if (newp)
3460 {
3461 void *newmem = chunk2mem_tag (newp);
3462 /* Give the new block a different tag. This helps to ensure
3463 that stale handles to the previous mapping are not
3464 reused. There's a performance hit for both us and the
3465 caller for doing this, so we might want to
3466 reconsider. */
3467 return tag_new_usable (newmem);
3468 }
3469 #endif
3470 /* Note the extra SIZE_SZ overhead. */
3471 if (oldsize - SIZE_SZ >= nb)
3472 return oldmem; /* do nothing */
3473
3474 /* Must alloc, copy, free. */
3475 newmem = __libc_malloc (bytes);
3476 if (newmem == 0)
3477 return 0; /* propagate failure */
3478
3479 memcpy (newmem, oldmem, oldsize - CHUNK_HDR_SZ);
3480 munmap_chunk (oldp);
3481 return newmem;
3482 }
3483
3484 if (SINGLE_THREAD_P)
3485 {
3486 newp = _int_realloc (ar_ptr, oldp, oldsize, nb);
3487 assert (!newp || chunk_is_mmapped (mem2chunk (newp)) ||
3488 ar_ptr == arena_for_chunk (mem2chunk (newp)));
3489
3490 return newp;
3491 }
3492
3493 __libc_lock_lock (ar_ptr->mutex);
3494
3495 newp = _int_realloc (ar_ptr, oldp, oldsize, nb);
3496
3497 __libc_lock_unlock (ar_ptr->mutex);
3498 assert (!newp || chunk_is_mmapped (mem2chunk (newp)) ||
3499 ar_ptr == arena_for_chunk (mem2chunk (newp)));
3500
3501 if (newp == NULL)
3502 {
3503 /* Try harder to allocate memory in other arenas. */
3504 LIBC_PROBE (memory_realloc_retry, 2, bytes, oldmem);
3505 newp = __libc_malloc (bytes);
3506 if (newp != NULL)
3507 {
3508 size_t sz = memsize (oldp);
3509 memcpy (newp, oldmem, sz);
3510 (void) tag_region (chunk2mem (oldp), sz);
3511 _int_free (ar_ptr, oldp, 0);
3512 }
3513 }
3514
3515 return newp;
3516 }
3517 libc_hidden_def (__libc_realloc)
3518
3519 void *
3520 __libc_memalign (size_t alignment, size_t bytes)
3521 {
3522 void *address = RETURN_ADDRESS (0);
3523 return _mid_memalign (alignment, bytes, address);
3524 }
3525
3526 static void *
3527 _mid_memalign (size_t alignment, size_t bytes, void *address)
3528 {
3529 mstate ar_ptr;
3530 void *p;
3531
3532 void *(*hook) (size_t, size_t, const void *) =
3533 atomic_forced_read (__memalign_hook);
3534 if (__builtin_expect (hook != NULL, 0))
3535 return (*hook)(alignment, bytes, address);
3536
3537 /* If we need less alignment than we give anyway, just relay to malloc. */
3538 if (alignment <= MALLOC_ALIGNMENT)
3539 return __libc_malloc (bytes);
3540
3541 /* Otherwise, ensure that it is at least a minimum chunk size */
3542 if (alignment < MINSIZE)
3543 alignment = MINSIZE;
3544
3545 /* If the alignment is greater than SIZE_MAX / 2 + 1 it cannot be a
3546 power of 2 and will cause overflow in the check below. */
3547 if (alignment > SIZE_MAX / 2 + 1)
3548 {
3549 __set_errno (EINVAL);
3550 return 0;
3551 }
3552
3553
3554 /* Make sure alignment is power of 2. */
3555 if (!powerof2 (alignment))
3556 {
3557 size_t a = MALLOC_ALIGNMENT * 2;
3558 while (a < alignment)
3559 a <<= 1;
3560 alignment = a;
3561 }
3562
3563 if (SINGLE_THREAD_P)
3564 {
3565 p = _int_memalign (&main_arena, alignment, bytes);
3566 assert (!p || chunk_is_mmapped (mem2chunk (p)) ||
3567 &main_arena == arena_for_chunk (mem2chunk (p)));
3568 return tag_new_usable (p);
3569 }
3570
3571 arena_get (ar_ptr, bytes + alignment + MINSIZE);
3572
3573 p = _int_memalign (ar_ptr, alignment, bytes);
3574 if (!p && ar_ptr != NULL)
3575 {
3576 LIBC_PROBE (memory_memalign_retry, 2, bytes, alignment);
3577 ar_ptr = arena_get_retry (ar_ptr, bytes);
3578 p = _int_memalign (ar_ptr, alignment, bytes);
3579 }
3580
3581 if (ar_ptr != NULL)
3582 __libc_lock_unlock (ar_ptr->mutex);
3583
3584 assert (!p || chunk_is_mmapped (mem2chunk (p)) ||
3585 ar_ptr == arena_for_chunk (mem2chunk (p)));
3586 return tag_new_usable (p);
3587 }
3588 /* For ISO C11. */
3589 weak_alias (__libc_memalign, aligned_alloc)
3590 libc_hidden_def (__libc_memalign)
3591
3592 void *
3593 __libc_valloc (size_t bytes)
3594 {
3595 if (__malloc_initialized < 0)
3596 ptmalloc_init ();
3597
3598 void *address = RETURN_ADDRESS (0);
3599 size_t pagesize = GLRO (dl_pagesize);
3600 return _mid_memalign (pagesize, bytes, address);
3601 }
3602
3603 void *
3604 __libc_pvalloc (size_t bytes)
3605 {
3606 if (__malloc_initialized < 0)
3607 ptmalloc_init ();
3608
3609 void *address = RETURN_ADDRESS (0);
3610 size_t pagesize = GLRO (dl_pagesize);
3611 size_t rounded_bytes;
3612 /* ALIGN_UP with overflow check. */
3613 if (__glibc_unlikely (__builtin_add_overflow (bytes,
3614 pagesize - 1,
3615 &rounded_bytes)))
3616 {
3617 __set_errno (ENOMEM);
3618 return 0;
3619 }
3620 rounded_bytes = rounded_bytes & -(pagesize - 1);
3621
3622 return _mid_memalign (pagesize, rounded_bytes, address);
3623 }
3624
3625 void *
3626 __libc_calloc (size_t n, size_t elem_size)
3627 {
3628 mstate av;
3629 mchunkptr oldtop;
3630 INTERNAL_SIZE_T sz, oldtopsize;
3631 void *mem;
3632 unsigned long clearsize;
3633 unsigned long nclears;
3634 INTERNAL_SIZE_T *d;
3635 ptrdiff_t bytes;
3636
3637 if (__glibc_unlikely (__builtin_mul_overflow (n, elem_size, &bytes)))
3638 {
3639 __set_errno (ENOMEM);
3640 return NULL;
3641 }
3642
3643 sz = bytes;
3644
3645 void *(*hook) (size_t, const void *) =
3646 atomic_forced_read (__malloc_hook);
3647 if (__builtin_expect (hook != NULL, 0))
3648 {
3649 mem = (*hook)(sz, RETURN_ADDRESS (0));
3650 if (mem == 0)
3651 return 0;
3652
3653 return memset (mem, 0, sz);
3654 }
3655
3656 MAYBE_INIT_TCACHE ();
3657
3658 if (SINGLE_THREAD_P)
3659 av = &main_arena;
3660 else
3661 arena_get (av, sz);
3662
3663 if (av)
3664 {
3665 /* Check if we hand out the top chunk, in which case there may be no
3666 need to clear. */
3667 #if MORECORE_CLEARS
3668 oldtop = top (av);
3669 oldtopsize = chunksize (top (av));
3670 # if MORECORE_CLEARS < 2
3671 /* Only newly allocated memory is guaranteed to be cleared. */
3672 if (av == &main_arena &&
3673 oldtopsize < mp_.sbrk_base + av->max_system_mem - (char *) oldtop)
3674 oldtopsize = (mp_.sbrk_base + av->max_system_mem - (char *) oldtop);
3675 # endif
3676 if (av != &main_arena)
3677 {
3678 heap_info *heap = heap_for_ptr (oldtop);
3679 if (oldtopsize < (char *) heap + heap->mprotect_size - (char *) oldtop)
3680 oldtopsize = (char *) heap + heap->mprotect_size - (char *) oldtop;
3681 }
3682 #endif
3683 }
3684 else
3685 {
3686 /* No usable arenas. */
3687 oldtop = 0;
3688 oldtopsize = 0;
3689 }
3690 mem = _int_malloc (av, sz);
3691
3692 assert (!mem || chunk_is_mmapped (mem2chunk (mem)) ||
3693 av == arena_for_chunk (mem2chunk (mem)));
3694
3695 if (!SINGLE_THREAD_P)
3696 {
3697 if (mem == 0 && av != NULL)
3698 {
3699 LIBC_PROBE (memory_calloc_retry, 1, sz);
3700 av = arena_get_retry (av, sz);
3701 mem = _int_malloc (av, sz);
3702 }
3703
3704 if (av != NULL)
3705 __libc_lock_unlock (av->mutex);
3706 }
3707
3708 /* Allocation failed even after a retry. */
3709 if (mem == 0)
3710 return 0;
3711
3712 mchunkptr p = mem2chunk (mem);
3713
3714 /* If we are using memory tagging, then we need to set the tags
3715 regardless of MORECORE_CLEARS, so we zero the whole block while
3716 doing so. */
3717 if (__glibc_unlikely (mtag_enabled))
3718 return tag_new_zero_region (mem, memsize (p));
3719
3720 INTERNAL_SIZE_T csz = chunksize (p);
3721
3722 /* Two optional cases in which clearing not necessary */
3723 if (chunk_is_mmapped (p))
3724 {
3725 if (__builtin_expect (perturb_byte, 0))
3726 return memset (mem, 0, sz);
3727
3728 return mem;
3729 }
3730
3731 #if MORECORE_CLEARS
3732 if (perturb_byte == 0 && (p == oldtop && csz > oldtopsize))
3733 {
3734 /* clear only the bytes from non-freshly-sbrked memory */
3735 csz = oldtopsize;
3736 }
3737 #endif
3738
3739 /* Unroll clear of <= 36 bytes (72 if 8byte sizes). We know that
3740 contents have an odd number of INTERNAL_SIZE_T-sized words;
3741 minimally 3. */
3742 d = (INTERNAL_SIZE_T *) mem;
3743 clearsize = csz - SIZE_SZ;
3744 nclears = clearsize / sizeof (INTERNAL_SIZE_T);
3745 assert (nclears >= 3);
3746
3747 if (nclears > 9)
3748 return memset (d, 0, clearsize);
3749
3750 else
3751 {
3752 *(d + 0) = 0;
3753 *(d + 1) = 0;
3754 *(d + 2) = 0;
3755 if (nclears > 4)
3756 {
3757 *(d + 3) = 0;
3758 *(d + 4) = 0;
3759 if (nclears > 6)
3760 {
3761 *(d + 5) = 0;
3762 *(d + 6) = 0;
3763 if (nclears > 8)
3764 {
3765 *(d + 7) = 0;
3766 *(d + 8) = 0;
3767 }
3768 }
3769 }
3770 }
3771
3772 return mem;
3773 }
3774
3775 /*
3776 ------------------------------ malloc ------------------------------
3777 */
3778
3779 static void *
3780 _int_malloc (mstate av, size_t bytes)
3781 {
3782 INTERNAL_SIZE_T nb; /* normalized request size */
3783 unsigned int idx; /* associated bin index */
3784 mbinptr bin; /* associated bin */
3785
3786 mchunkptr victim; /* inspected/selected chunk */
3787 INTERNAL_SIZE_T size; /* its size */
3788 int victim_index; /* its bin index */
3789
3790 mchunkptr remainder; /* remainder from a split */
3791 unsigned long remainder_size; /* its size */
3792
3793 unsigned int block; /* bit map traverser */
3794 unsigned int bit; /* bit map traverser */
3795 unsigned int map; /* current word of binmap */
3796
3797 mchunkptr fwd; /* misc temp for linking */
3798 mchunkptr bck; /* misc temp for linking */
3799
3800 #if USE_TCACHE
3801 size_t tcache_unsorted_count; /* count of unsorted chunks processed */
3802 #endif
3803
3804 /*
3805 Convert request size to internal form by adding SIZE_SZ bytes
3806 overhead plus possibly more to obtain necessary alignment and/or
3807 to obtain a size of at least MINSIZE, the smallest allocatable
3808 size. Also, checked_request2size returns false for request sizes
3809 that are so large that they wrap around zero when padded and
3810 aligned.
3811 */
3812
3813 if (!checked_request2size (bytes, &nb))
3814 {
3815 __set_errno (ENOMEM);
3816 return NULL;
3817 }
3818
3819 /* There are no usable arenas. Fall back to sysmalloc to get a chunk from
3820 mmap. */
3821 if (__glibc_unlikely (av == NULL))
3822 {
3823 void *p = sysmalloc (nb, av);
3824 if (p != NULL)
3825 alloc_perturb (p, bytes);
3826 return p;
3827 }
3828
3829 /*
3830 If the size qualifies as a fastbin, first check corresponding bin.
3831 This code is safe to execute even if av is not yet initialized, so we
3832 can try it without checking, which saves some time on this fast path.
3833 */
3834
3835 #define REMOVE_FB(fb, victim, pp) \
3836 do \
3837 { \
3838 victim = pp; \
3839 if (victim == NULL) \
3840 break; \
3841 pp = REVEAL_PTR (victim->fd); \
3842 if (__glibc_unlikely (pp != NULL && misaligned_chunk (pp))) \
3843 malloc_printerr ("malloc(): unaligned fastbin chunk detected"); \
3844 } \
3845 while ((pp = catomic_compare_and_exchange_val_acq (fb, pp, victim)) \
3846 != victim); \
3847
3848 if ((unsigned long) (nb) <= (unsigned long) (get_max_fast ()))
3849 {
3850 idx = fastbin_index (nb);
3851 mfastbinptr *fb = &fastbin (av, idx);
3852 mchunkptr pp;
3853 victim = *fb;
3854
3855 if (victim != NULL)
3856 {
3857 if (__glibc_unlikely (misaligned_chunk (victim)))
3858 malloc_printerr ("malloc(): unaligned fastbin chunk detected 2");
3859
3860 if (SINGLE_THREAD_P)
3861 *fb = REVEAL_PTR (victim->fd);
3862 else
3863 REMOVE_FB (fb, pp, victim);
3864 if (__glibc_likely (victim != NULL))
3865 {
3866 size_t victim_idx = fastbin_index (chunksize (victim));
3867 if (__builtin_expect (victim_idx != idx, 0))
3868 malloc_printerr ("malloc(): memory corruption (fast)");
3869 check_remalloced_chunk (av, victim, nb);
3870 #if USE_TCACHE
3871 /* While we're here, if we see other chunks of the same size,
3872 stash them in the tcache. */
3873 size_t tc_idx = csize2tidx (nb);
3874 if (tcache && tc_idx < mp_.tcache_bins)
3875 {
3876 mchunkptr tc_victim;
3877
3878 /* While bin not empty and tcache not full, copy chunks. */
3879 while (tcache->counts[tc_idx] < mp_.tcache_count
3880 && (tc_victim = *fb) != NULL)
3881 {
3882 if (__glibc_unlikely (misaligned_chunk (tc_victim)))
3883 malloc_printerr ("malloc(): unaligned fastbin chunk detected 3");
3884 if (SINGLE_THREAD_P)
3885 *fb = REVEAL_PTR (tc_victim->fd);
3886 else
3887 {
3888 REMOVE_FB (fb, pp, tc_victim);
3889 if (__glibc_unlikely (tc_victim == NULL))
3890 break;
3891 }
3892 tcache_put (tc_victim, tc_idx);
3893 }
3894 }
3895 #endif
3896 void *p = chunk2mem (victim);
3897 alloc_perturb (p, bytes);
3898 return p;
3899 }
3900 }
3901 }
3902
3903 /*
3904 If a small request, check regular bin. Since these "smallbins"
3905 hold one size each, no searching within bins is necessary.
3906 (For a large request, we need to wait until unsorted chunks are
3907 processed to find best fit. But for small ones, fits are exact
3908 anyway, so we can check now, which is faster.)
3909 */
3910
3911 if (in_smallbin_range (nb))
3912 {
3913 idx = smallbin_index (nb);
3914 bin = bin_at (av, idx);
3915
3916 if ((victim = last (bin)) != bin)
3917 {
3918 bck = victim->bk;
3919 if (__glibc_unlikely (bck->fd != victim))
3920 malloc_printerr ("malloc(): smallbin double linked list corrupted");
3921 set_inuse_bit_at_offset (victim, nb);
3922 bin->bk = bck;
3923 bck->fd = bin;
3924
3925 if (av != &main_arena)
3926 set_non_main_arena (victim);
3927 check_malloced_chunk (av, victim, nb);
3928 #if USE_TCACHE
3929 /* While we're here, if we see other chunks of the same size,
3930 stash them in the tcache. */
3931 size_t tc_idx = csize2tidx (nb);
3932 if (tcache && tc_idx < mp_.tcache_bins)
3933 {
3934 mchunkptr tc_victim;
3935
3936 /* While bin not empty and tcache not full, copy chunks over. */
3937 while (tcache->counts[tc_idx] < mp_.tcache_count
3938 && (tc_victim = last (bin)) != bin)
3939 {
3940 if (tc_victim != 0)
3941 {
3942 bck = tc_victim->bk;
3943 set_inuse_bit_at_offset (tc_victim, nb);
3944 if (av != &main_arena)
3945 set_non_main_arena (tc_victim);
3946 bin->bk = bck;
3947 bck->fd = bin;
3948
3949 tcache_put (tc_victim, tc_idx);
3950 }
3951 }
3952 }
3953 #endif
3954 void *p = chunk2mem (victim);
3955 alloc_perturb (p, bytes);
3956 return p;
3957 }
3958 }
3959
3960 /*
3961 If this is a large request, consolidate fastbins before continuing.
3962 While it might look excessive to kill all fastbins before
3963 even seeing if there is space available, this avoids
3964 fragmentation problems normally associated with fastbins.
3965 Also, in practice, programs tend to have runs of either small or
3966 large requests, but less often mixtures, so consolidation is not
3967 invoked all that often in most programs. And the programs that
3968 it is called frequently in otherwise tend to fragment.
3969 */
3970
3971 else
3972 {
3973 idx = largebin_index (nb);
3974 if (atomic_load_relaxed (&av->have_fastchunks))
3975 malloc_consolidate (av);
3976 }
3977
3978 /*
3979 Process recently freed or remaindered chunks, taking one only if
3980 it is exact fit, or, if this a small request, the chunk is remainder from
3981 the most recent non-exact fit. Place other traversed chunks in
3982 bins. Note that this step is the only place in any routine where
3983 chunks are placed in bins.
3984
3985 The outer loop here is needed because we might not realize until
3986 near the end of malloc that we should have consolidated, so must
3987 do so and retry. This happens at most once, and only when we would
3988 otherwise need to expand memory to service a "small" request.
3989 */
3990
3991 #if USE_TCACHE
3992 INTERNAL_SIZE_T tcache_nb = 0;
3993 size_t tc_idx = csize2tidx (nb);
3994 if (tcache && tc_idx < mp_.tcache_bins)
3995 tcache_nb = nb;
3996 int return_cached = 0;
3997
3998 tcache_unsorted_count = 0;
3999 #endif
4000
4001 for (;; )
4002 {
4003 int iters = 0;
4004 while ((victim = unsorted_chunks (av)->bk) != unsorted_chunks (av))
4005 {
4006 bck = victim->bk;
4007 size = chunksize (victim);
4008 mchunkptr next = chunk_at_offset (victim, size);
4009
4010 if (__glibc_unlikely (size <= CHUNK_HDR_SZ)
4011 || __glibc_unlikely (size > av->system_mem))
4012 malloc_printerr ("malloc(): invalid size (unsorted)");
4013 if (__glibc_unlikely (chunksize_nomask (next) < CHUNK_HDR_SZ)
4014 || __glibc_unlikely (chunksize_nomask (next) > av->system_mem))
4015 malloc_printerr ("malloc(): invalid next size (unsorted)");
4016 if (__glibc_unlikely ((prev_size (next) & ~(SIZE_BITS)) != size))
4017 malloc_printerr ("malloc(): mismatching next->prev_size (unsorted)");
4018 if (__glibc_unlikely (bck->fd != victim)
4019 || __glibc_unlikely (victim->fd != unsorted_chunks (av)))
4020 malloc_printerr ("malloc(): unsorted double linked list corrupted");
4021 if (__glibc_unlikely (prev_inuse (next)))
4022 malloc_printerr ("malloc(): invalid next->prev_inuse (unsorted)");
4023
4024 /*
4025 If a small request, try to use last remainder if it is the
4026 only chunk in unsorted bin. This helps promote locality for
4027 runs of consecutive small requests. This is the only
4028 exception to best-fit, and applies only when there is
4029 no exact fit for a small chunk.
4030 */
4031
4032 if (in_smallbin_range (nb) &&
4033 bck == unsorted_chunks (av) &&
4034 victim == av->last_remainder &&
4035 (unsigned long) (size) > (unsigned long) (nb + MINSIZE))
4036 {
4037 /* split and reattach remainder */
4038 remainder_size = size - nb;
4039 remainder = chunk_at_offset (victim, nb);
4040 unsorted_chunks (av)->bk = unsorted_chunks (av)->fd = remainder;
4041 av->last_remainder = remainder;
4042 remainder->bk = remainder->fd = unsorted_chunks (av);
4043 if (!in_smallbin_range (remainder_size))
4044 {
4045 remainder->fd_nextsize = NULL;
4046 remainder->bk_nextsize = NULL;
4047 }
4048
4049 set_head (victim, nb | PREV_INUSE |
4050 (av != &main_arena ? NON_MAIN_ARENA : 0));
4051 set_head (remainder, remainder_size | PREV_INUSE);
4052 set_foot (remainder, remainder_size);
4053
4054 check_malloced_chunk (av, victim, nb);
4055 void *p = chunk2mem (victim);
4056 alloc_perturb (p, bytes);
4057 return p;
4058 }
4059
4060 /* remove from unsorted list */
4061 if (__glibc_unlikely (bck->fd != victim))
4062 malloc_printerr ("malloc(): corrupted unsorted chunks 3");
4063 unsorted_chunks (av)->bk = bck;
4064 bck->fd = unsorted_chunks (av);
4065
4066 /* Take now instead of binning if exact fit */
4067
4068 if (size == nb)
4069 {
4070 set_inuse_bit_at_offset (victim, size);
4071 if (av != &main_arena)
4072 set_non_main_arena (victim);
4073 #if USE_TCACHE
4074 /* Fill cache first, return to user only if cache fills.
4075 We may return one of these chunks later. */
4076 if (tcache_nb
4077 && tcache->counts[tc_idx] < mp_.tcache_count)
4078 {
4079 tcache_put (victim, tc_idx);
4080 return_cached = 1;
4081 continue;
4082 }
4083 else
4084 {
4085 #endif
4086 check_malloced_chunk (av, victim, nb);
4087 void *p = chunk2mem (victim);
4088 alloc_perturb (p, bytes);
4089 return p;
4090 #if USE_TCACHE
4091 }
4092 #endif
4093 }
4094
4095 /* place chunk in bin */
4096
4097 if (in_smallbin_range (size))
4098 {
4099 victim_index = smallbin_index (size);
4100 bck = bin_at (av, victim_index);
4101 fwd = bck->fd;
4102 }
4103 else
4104 {
4105 victim_index = largebin_index (size);
4106 bck = bin_at (av, victim_index);
4107 fwd = bck->fd;
4108
4109 /* maintain large bins in sorted order */
4110 if (fwd != bck)
4111 {
4112 /* Or with inuse bit to speed comparisons */
4113 size |= PREV_INUSE;
4114 /* if smaller than smallest, bypass loop below */
4115 assert (chunk_main_arena (bck->bk));
4116 if ((unsigned long) (size)
4117 < (unsigned long) chunksize_nomask (bck->bk))
4118 {
4119 fwd = bck;
4120 bck = bck->bk;
4121
4122 victim->fd_nextsize = fwd->fd;
4123 victim->bk_nextsize = fwd->fd->bk_nextsize;
4124 fwd->fd->bk_nextsize = victim->bk_nextsize->fd_nextsize = victim;
4125 }
4126 else
4127 {
4128 assert (chunk_main_arena (fwd));
4129 while ((unsigned long) size < chunksize_nomask (fwd))
4130 {
4131 fwd = fwd->fd_nextsize;
4132 assert (chunk_main_arena (fwd));
4133 }
4134
4135 if ((unsigned long) size
4136 == (unsigned long) chunksize_nomask (fwd))
4137 /* Always insert in the second position. */
4138 fwd = fwd->fd;
4139 else
4140 {
4141 victim->fd_nextsize = fwd;
4142 victim->bk_nextsize = fwd->bk_nextsize;
4143 if (__glibc_unlikely (fwd->bk_nextsize->fd_nextsize != fwd))
4144 malloc_printerr ("malloc(): largebin double linked list corrupted (nextsize)");
4145 fwd->bk_nextsize = victim;
4146 victim->bk_nextsize->fd_nextsize = victim;
4147 }
4148 bck = fwd->bk;
4149 if (bck->fd != fwd)
4150 malloc_printerr ("malloc(): largebin double linked list corrupted (bk)");
4151 }
4152 }
4153 else
4154 victim->fd_nextsize = victim->bk_nextsize = victim;
4155 }
4156
4157 mark_bin (av, victim_index);
4158 victim->bk = bck;
4159 victim->fd = fwd;
4160 fwd->bk = victim;
4161 bck->fd = victim;
4162
4163 #if USE_TCACHE
4164 /* If we've processed as many chunks as we're allowed while
4165 filling the cache, return one of the cached ones. */
4166 ++tcache_unsorted_count;
4167 if (return_cached
4168 && mp_.tcache_unsorted_limit > 0
4169 && tcache_unsorted_count > mp_.tcache_unsorted_limit)
4170 {
4171 return tcache_get (tc_idx);
4172 }
4173 #endif
4174
4175 #define MAX_ITERS 10000
4176 if (++iters >= MAX_ITERS)
4177 break;
4178 }
4179
4180 #if USE_TCACHE
4181 /* If all the small chunks we found ended up cached, return one now. */
4182 if (return_cached)
4183 {
4184 return tcache_get (tc_idx);
4185 }
4186 #endif
4187
4188 /*
4189 If a large request, scan through the chunks of current bin in
4190 sorted order to find smallest that fits. Use the skip list for this.
4191 */
4192
4193 if (!in_smallbin_range (nb))
4194 {
4195 bin = bin_at (av, idx);
4196
4197 /* skip scan if empty or largest chunk is too small */
4198 if ((victim = first (bin)) != bin
4199 && (unsigned long) chunksize_nomask (victim)
4200 >= (unsigned long) (nb))
4201 {
4202 victim = victim->bk_nextsize;
4203 while (((unsigned long) (size = chunksize (victim)) <
4204 (unsigned long) (nb)))
4205 victim = victim->bk_nextsize;
4206
4207 /* Avoid removing the first entry for a size so that the skip
4208 list does not have to be rerouted. */
4209 if (victim != last (bin)
4210 && chunksize_nomask (victim)
4211 == chunksize_nomask (victim->fd))
4212 victim = victim->fd;
4213
4214 remainder_size = size - nb;
4215 unlink_chunk (av, victim);
4216
4217 /* Exhaust */
4218 if (remainder_size < MINSIZE)
4219 {
4220 set_inuse_bit_at_offset (victim, size);
4221 if (av != &main_arena)
4222 set_non_main_arena (victim);
4223 }
4224 /* Split */
4225 else
4226 {
4227 remainder = chunk_at_offset (victim, nb);
4228 /* We cannot assume the unsorted list is empty and therefore
4229 have to perform a complete insert here. */
4230 bck = unsorted_chunks (av);
4231 fwd = bck->fd;
4232 if (__glibc_unlikely (fwd->bk != bck))
4233 malloc_printerr ("malloc(): corrupted unsorted chunks");
4234 remainder->bk = bck;
4235 remainder->fd = fwd;
4236 bck->fd = remainder;
4237 fwd->bk = remainder;
4238 if (!in_smallbin_range (remainder_size))
4239 {
4240 remainder->fd_nextsize = NULL;
4241 remainder->bk_nextsize = NULL;
4242 }
4243 set_head (victim, nb | PREV_INUSE |
4244 (av != &main_arena ? NON_MAIN_ARENA : 0));
4245 set_head (remainder, remainder_size | PREV_INUSE);
4246 set_foot (remainder, remainder_size);
4247 }
4248 check_malloced_chunk (av, victim, nb);
4249 void *p = chunk2mem (victim);
4250 alloc_perturb (p, bytes);
4251 return p;
4252 }
4253 }
4254
4255 /*
4256 Search for a chunk by scanning bins, starting with next largest
4257 bin. This search is strictly by best-fit; i.e., the smallest
4258 (with ties going to approximately the least recently used) chunk
4259 that fits is selected.
4260
4261 The bitmap avoids needing to check that most blocks are nonempty.
4262 The particular case of skipping all bins during warm-up phases
4263 when no chunks have been returned yet is faster than it might look.
4264 */
4265
4266 ++idx;
4267 bin = bin_at (av, idx);
4268 block = idx2block (idx);
4269 map = av->binmap[block];
4270 bit = idx2bit (idx);
4271
4272 for (;; )
4273 {
4274 /* Skip rest of block if there are no more set bits in this block. */
4275 if (bit > map || bit == 0)
4276 {
4277 do
4278 {
4279 if (++block >= BINMAPSIZE) /* out of bins */
4280 goto use_top;
4281 }
4282 while ((map = av->binmap[block]) == 0);
4283
4284 bin = bin_at (av, (block << BINMAPSHIFT));
4285 bit = 1;
4286 }
4287
4288 /* Advance to bin with set bit. There must be one. */
4289 while ((bit & map) == 0)
4290 {
4291 bin = next_bin (bin);
4292 bit <<= 1;
4293 assert (bit != 0);
4294 }
4295
4296 /* Inspect the bin. It is likely to be non-empty */
4297 victim = last (bin);
4298
4299 /* If a false alarm (empty bin), clear the bit. */
4300 if (victim == bin)
4301 {
4302 av->binmap[block] = map &= ~bit; /* Write through */
4303 bin = next_bin (bin);
4304 bit <<= 1;
4305 }
4306
4307 else
4308 {
4309 size = chunksize (victim);
4310
4311 /* We know the first chunk in this bin is big enough to use. */
4312 assert ((unsigned long) (size) >= (unsigned long) (nb));
4313
4314 remainder_size = size - nb;
4315
4316 /* unlink */
4317 unlink_chunk (av, victim);
4318
4319 /* Exhaust */
4320 if (remainder_size < MINSIZE)
4321 {
4322 set_inuse_bit_at_offset (victim, size);
4323 if (av != &main_arena)
4324 set_non_main_arena (victim);
4325 }
4326
4327 /* Split */
4328 else
4329 {
4330 remainder = chunk_at_offset (victim, nb);
4331
4332 /* We cannot assume the unsorted list is empty and therefore
4333 have to perform a complete insert here. */
4334 bck = unsorted_chunks (av);
4335 fwd = bck->fd;
4336 if (__glibc_unlikely (fwd->bk != bck))
4337 malloc_printerr ("malloc(): corrupted unsorted chunks 2");
4338 remainder->bk = bck;
4339 remainder->fd = fwd;
4340 bck->fd = remainder;
4341 fwd->bk = remainder;
4342
4343 /* advertise as last remainder */
4344 if (in_smallbin_range (nb))
4345 av->last_remainder = remainder;
4346 if (!in_smallbin_range (remainder_size))
4347 {
4348 remainder->fd_nextsize = NULL;
4349 remainder->bk_nextsize = NULL;
4350 }
4351 set_head (victim, nb | PREV_INUSE |
4352 (av != &main_arena ? NON_MAIN_ARENA : 0));
4353 set_head (remainder, remainder_size | PREV_INUSE);
4354 set_foot (remainder, remainder_size);
4355 }
4356 check_malloced_chunk (av, victim, nb);
4357 void *p = chunk2mem (victim);
4358 alloc_perturb (p, bytes);
4359 return p;
4360 }
4361 }
4362
4363 use_top:
4364 /*
4365 If large enough, split off the chunk bordering the end of memory
4366 (held in av->top). Note that this is in accord with the best-fit
4367 search rule. In effect, av->top is treated as larger (and thus
4368 less well fitting) than any other available chunk since it can
4369 be extended to be as large as necessary (up to system
4370 limitations).
4371
4372 We require that av->top always exists (i.e., has size >=
4373 MINSIZE) after initialization, so if it would otherwise be
4374 exhausted by current request, it is replenished. (The main
4375 reason for ensuring it exists is that we may need MINSIZE space
4376 to put in fenceposts in sysmalloc.)
4377 */
4378
4379 victim = av->top;
4380 size = chunksize (victim);
4381
4382 if (__glibc_unlikely (size > av->system_mem))
4383 malloc_printerr ("malloc(): corrupted top size");
4384
4385 if ((unsigned long) (size) >= (unsigned long) (nb + MINSIZE))
4386 {
4387 remainder_size = size - nb;
4388 remainder = chunk_at_offset (victim, nb);
4389 av->top = remainder;
4390 set_head (victim, nb | PREV_INUSE |
4391 (av != &main_arena ? NON_MAIN_ARENA : 0));
4392 set_head (remainder, remainder_size | PREV_INUSE);
4393
4394 check_malloced_chunk (av, victim, nb);
4395 void *p = chunk2mem (victim);
4396 alloc_perturb (p, bytes);
4397 return p;
4398 }
4399
4400 /* When we are using atomic ops to free fast chunks we can get
4401 here for all block sizes. */
4402 else if (atomic_load_relaxed (&av->have_fastchunks))
4403 {
4404 malloc_consolidate (av);
4405 /* restore original bin index */
4406 if (in_smallbin_range (nb))
4407 idx = smallbin_index (nb);
4408 else
4409 idx = largebin_index (nb);
4410 }
4411
4412 /*
4413 Otherwise, relay to handle system-dependent cases
4414 */
4415 else
4416 {
4417 void *p = sysmalloc (nb, av);
4418 if (p != NULL)
4419 alloc_perturb (p, bytes);
4420 return p;
4421 }
4422 }
4423 }
4424
4425 /*
4426 ------------------------------ free ------------------------------
4427 */
4428
4429 static void
4430 _int_free (mstate av, mchunkptr p, int have_lock)
4431 {
4432 INTERNAL_SIZE_T size; /* its size */
4433 mfastbinptr *fb; /* associated fastbin */
4434 mchunkptr nextchunk; /* next contiguous chunk */
4435 INTERNAL_SIZE_T nextsize; /* its size */
4436 int nextinuse; /* true if nextchunk is used */
4437 INTERNAL_SIZE_T prevsize; /* size of previous contiguous chunk */
4438 mchunkptr bck; /* misc temp for linking */
4439 mchunkptr fwd; /* misc temp for linking */
4440
4441 size = chunksize (p);
4442
4443 /* Little security check which won't hurt performance: the
4444 allocator never wrapps around at the end of the address space.
4445 Therefore we can exclude some size values which might appear
4446 here by accident or by "design" from some intruder. */
4447 if (__builtin_expect ((uintptr_t) p > (uintptr_t) -size, 0)
4448 || __builtin_expect (misaligned_chunk (p), 0))
4449 malloc_printerr ("free(): invalid pointer");
4450 /* We know that each chunk is at least MINSIZE bytes in size or a
4451 multiple of MALLOC_ALIGNMENT. */
4452 if (__glibc_unlikely (size < MINSIZE || !aligned_OK (size)))
4453 malloc_printerr ("free(): invalid size");
4454
4455 check_inuse_chunk(av, p);
4456
4457 #if USE_TCACHE
4458 {
4459 size_t tc_idx = csize2tidx (size);
4460 if (tcache != NULL && tc_idx < mp_.tcache_bins)
4461 {
4462 /* Check to see if it's already in the tcache. */
4463 tcache_entry *e = (tcache_entry *) chunk2mem (p);
4464
4465 /* This test succeeds on double free. However, we don't 100%
4466 trust it (it also matches random payload data at a 1 in
4467 2^<size_t> chance), so verify it's not an unlikely
4468 coincidence before aborting. */
4469 if (__glibc_unlikely (e->key == tcache_key))
4470 {
4471 tcache_entry *tmp;
4472 size_t cnt = 0;
4473 LIBC_PROBE (memory_tcache_double_free, 2, e, tc_idx);
4474 for (tmp = tcache->entries[tc_idx];
4475 tmp;
4476 tmp = REVEAL_PTR (tmp->next), ++cnt)
4477 {
4478 if (cnt >= mp_.tcache_count)
4479 malloc_printerr ("free(): too many chunks detected in tcache");
4480 if (__glibc_unlikely (!aligned_OK (tmp)))
4481 malloc_printerr ("free(): unaligned chunk detected in tcache 2");
4482 if (tmp == e)
4483 malloc_printerr ("free(): double free detected in tcache 2");
4484 /* If we get here, it was a coincidence. We've wasted a
4485 few cycles, but don't abort. */
4486 }
4487 }
4488
4489 if (tcache->counts[tc_idx] < mp_.tcache_count)
4490 {
4491 tcache_put (p, tc_idx);
4492 return;
4493 }
4494 }
4495 }
4496 #endif
4497
4498 /*
4499 If eligible, place chunk on a fastbin so it can be found
4500 and used quickly in malloc.
4501 */
4502
4503 if ((unsigned long)(size) <= (unsigned long)(get_max_fast ())
4504
4505 #if TRIM_FASTBINS
4506 /*
4507 If TRIM_FASTBINS set, don't place chunks
4508 bordering top into fastbins
4509 */
4510 && (chunk_at_offset(p, size) != av->top)
4511 #endif
4512 ) {
4513
4514 if (__builtin_expect (chunksize_nomask (chunk_at_offset (p, size))
4515 <= CHUNK_HDR_SZ, 0)
4516 || __builtin_expect (chunksize (chunk_at_offset (p, size))
4517 >= av->system_mem, 0))
4518 {
4519 bool fail = true;
4520 /* We might not have a lock at this point and concurrent modifications
4521 of system_mem might result in a false positive. Redo the test after
4522 getting the lock. */
4523 if (!have_lock)
4524 {
4525 __libc_lock_lock (av->mutex);
4526 fail = (chunksize_nomask (chunk_at_offset (p, size)) <= CHUNK_HDR_SZ
4527 || chunksize (chunk_at_offset (p, size)) >= av->system_mem);
4528 __libc_lock_unlock (av->mutex);
4529 }
4530
4531 if (fail)
4532 malloc_printerr ("free(): invalid next size (fast)");
4533 }
4534
4535 free_perturb (chunk2mem(p), size - CHUNK_HDR_SZ);
4536
4537 atomic_store_relaxed (&av->have_fastchunks, true);
4538 unsigned int idx = fastbin_index(size);
4539 fb = &fastbin (av, idx);
4540
4541 /* Atomically link P to its fastbin: P->FD = *FB; *FB = P; */
4542 mchunkptr old = *fb, old2;
4543
4544 if (SINGLE_THREAD_P)
4545 {
4546 /* Check that the top of the bin is not the record we are going to
4547 add (i.e., double free). */
4548 if (__builtin_expect (old == p, 0))
4549 malloc_printerr ("double free or corruption (fasttop)");
4550 p->fd = PROTECT_PTR (&p->fd, old);
4551 *fb = p;
4552 }
4553 else
4554 do
4555 {
4556 /* Check that the top of the bin is not the record we are going to
4557 add (i.e., double free). */
4558 if (__builtin_expect (old == p, 0))
4559 malloc_printerr ("double free or corruption (fasttop)");
4560 old2 = old;
4561 p->fd = PROTECT_PTR (&p->fd, old);
4562 }
4563 while ((old = catomic_compare_and_exchange_val_rel (fb, p, old2))
4564 != old2);
4565
4566 /* Check that size of fastbin chunk at the top is the same as
4567 size of the chunk that we are adding. We can dereference OLD
4568 only if we have the lock, otherwise it might have already been
4569 allocated again. */
4570 if (have_lock && old != NULL
4571 && __builtin_expect (fastbin_index (chunksize (old)) != idx, 0))
4572 malloc_printerr ("invalid fastbin entry (free)");
4573 }
4574
4575 /*
4576 Consolidate other non-mmapped chunks as they arrive.
4577 */
4578
4579 else if (!chunk_is_mmapped(p)) {
4580
4581 /* If we're single-threaded, don't lock the arena. */
4582 if (SINGLE_THREAD_P)
4583 have_lock = true;
4584
4585 if (!have_lock)
4586 __libc_lock_lock (av->mutex);
4587
4588 nextchunk = chunk_at_offset(p, size);
4589
4590 /* Lightweight tests: check whether the block is already the
4591 top block. */
4592 if (__glibc_unlikely (p == av->top))
4593 malloc_printerr ("double free or corruption (top)");
4594 /* Or whether the next chunk is beyond the boundaries of the arena. */
4595 if (__builtin_expect (contiguous (av)
4596 && (char *) nextchunk
4597 >= ((char *) av->top + chunksize(av->top)), 0))
4598 malloc_printerr ("double free or corruption (out)");
4599 /* Or whether the block is actually not marked used. */
4600 if (__glibc_unlikely (!prev_inuse(nextchunk)))
4601 malloc_printerr ("double free or corruption (!prev)");
4602
4603 nextsize = chunksize(nextchunk);
4604 if (__builtin_expect (chunksize_nomask (nextchunk) <= CHUNK_HDR_SZ, 0)
4605 || __builtin_expect (nextsize >= av->system_mem, 0))
4606 malloc_printerr ("free(): invalid next size (normal)");
4607
4608 free_perturb (chunk2mem(p), size - CHUNK_HDR_SZ);
4609
4610 /* consolidate backward */
4611 if (!prev_inuse(p)) {
4612 prevsize = prev_size (p);
4613 size += prevsize;
4614 p = chunk_at_offset(p, -((long) prevsize));
4615 if (__glibc_unlikely (chunksize(p) != prevsize))
4616 malloc_printerr ("corrupted size vs. prev_size while consolidating");
4617 unlink_chunk (av, p);
4618 }
4619
4620 if (nextchunk != av->top) {
4621 /* get and clear inuse bit */
4622 nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
4623
4624 /* consolidate forward */
4625 if (!nextinuse) {
4626 unlink_chunk (av, nextchunk);
4627 size += nextsize;
4628 } else
4629 clear_inuse_bit_at_offset(nextchunk, 0);
4630
4631 /*
4632 Place the chunk in unsorted chunk list. Chunks are
4633 not placed into regular bins until after they have
4634 been given one chance to be used in malloc.
4635 */
4636
4637 bck = unsorted_chunks(av);
4638 fwd = bck->fd;
4639 if (__glibc_unlikely (fwd->bk != bck))
4640 malloc_printerr ("free(): corrupted unsorted chunks");
4641 p->fd = fwd;
4642 p->bk = bck;
4643 if (!in_smallbin_range(size))
4644 {
4645 p->fd_nextsize = NULL;
4646 p->bk_nextsize = NULL;
4647 }
4648 bck->fd = p;
4649 fwd->bk = p;
4650
4651 set_head(p, size | PREV_INUSE);
4652 set_foot(p, size);
4653
4654 check_free_chunk(av, p);
4655 }
4656
4657 /*
4658 If the chunk borders the current high end of memory,
4659 consolidate into top
4660 */
4661
4662 else {
4663 size += nextsize;
4664 set_head(p, size | PREV_INUSE);
4665 av->top = p;
4666 check_chunk(av, p);
4667 }
4668
4669 /*
4670 If freeing a large space, consolidate possibly-surrounding
4671 chunks. Then, if the total unused topmost memory exceeds trim
4672 threshold, ask malloc_trim to reduce top.
4673
4674 Unless max_fast is 0, we don't know if there are fastbins
4675 bordering top, so we cannot tell for sure whether threshold
4676 has been reached unless fastbins are consolidated. But we
4677 don't want to consolidate on each free. As a compromise,
4678 consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
4679 is reached.
4680 */
4681
4682 if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) {
4683 if (atomic_load_relaxed (&av->have_fastchunks))
4684 malloc_consolidate(av);
4685
4686 if (av == &main_arena) {
4687 #ifndef MORECORE_CANNOT_TRIM
4688 if ((unsigned long)(chunksize(av->top)) >=
4689 (unsigned long)(mp_.trim_threshold))
4690 systrim(mp_.top_pad, av);
4691 #endif
4692 } else {
4693 /* Always try heap_trim(), even if the top chunk is not
4694 large, because the corresponding heap might go away. */
4695 heap_info *heap = heap_for_ptr(top(av));
4696
4697 assert(heap->ar_ptr == av);
4698 heap_trim(heap, mp_.top_pad);
4699 }
4700 }
4701
4702 if (!have_lock)
4703 __libc_lock_unlock (av->mutex);
4704 }
4705 /*
4706 If the chunk was allocated via mmap, release via munmap().
4707 */
4708
4709 else {
4710 munmap_chunk (p);
4711 }
4712 }
4713
4714 /*
4715 ------------------------- malloc_consolidate -------------------------
4716
4717 malloc_consolidate is a specialized version of free() that tears
4718 down chunks held in fastbins. Free itself cannot be used for this
4719 purpose since, among other things, it might place chunks back onto
4720 fastbins. So, instead, we need to use a minor variant of the same
4721 code.
4722 */
4723
4724 static void malloc_consolidate(mstate av)
4725 {
4726 mfastbinptr* fb; /* current fastbin being consolidated */
4727 mfastbinptr* maxfb; /* last fastbin (for loop control) */
4728 mchunkptr p; /* current chunk being consolidated */
4729 mchunkptr nextp; /* next chunk to consolidate */
4730 mchunkptr unsorted_bin; /* bin header */
4731 mchunkptr first_unsorted; /* chunk to link to */
4732
4733 /* These have same use as in free() */
4734 mchunkptr nextchunk;
4735 INTERNAL_SIZE_T size;
4736 INTERNAL_SIZE_T nextsize;
4737 INTERNAL_SIZE_T prevsize;
4738 int nextinuse;
4739
4740 atomic_store_relaxed (&av->have_fastchunks, false);
4741
4742 unsorted_bin = unsorted_chunks(av);
4743
4744 /*
4745 Remove each chunk from fast bin and consolidate it, placing it
4746 then in unsorted bin. Among other reasons for doing this,
4747 placing in unsorted bin avoids needing to calculate actual bins
4748 until malloc is sure that chunks aren't immediately going to be
4749 reused anyway.
4750 */
4751
4752 maxfb = &fastbin (av, NFASTBINS - 1);
4753 fb = &fastbin (av, 0);
4754 do {
4755 p = atomic_exchange_acq (fb, NULL);
4756 if (p != 0) {
4757 do {
4758 {
4759 if (__glibc_unlikely (misaligned_chunk (p)))
4760 malloc_printerr ("malloc_consolidate(): "
4761 "unaligned fastbin chunk detected");
4762
4763 unsigned int idx = fastbin_index (chunksize (p));
4764 if ((&fastbin (av, idx)) != fb)
4765 malloc_printerr ("malloc_consolidate(): invalid chunk size");
4766 }
4767
4768 check_inuse_chunk(av, p);
4769 nextp = REVEAL_PTR (p->fd);
4770
4771 /* Slightly streamlined version of consolidation code in free() */
4772 size = chunksize (p);
4773 nextchunk = chunk_at_offset(p, size);
4774 nextsize = chunksize(nextchunk);
4775
4776 if (!prev_inuse(p)) {
4777 prevsize = prev_size (p);
4778 size += prevsize;
4779 p = chunk_at_offset(p, -((long) prevsize));
4780 if (__glibc_unlikely (chunksize(p) != prevsize))
4781 malloc_printerr ("corrupted size vs. prev_size in fastbins");
4782 unlink_chunk (av, p);
4783 }
4784
4785 if (nextchunk != av->top) {
4786 nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
4787
4788 if (!nextinuse) {
4789 size += nextsize;
4790 unlink_chunk (av, nextchunk);
4791 } else
4792 clear_inuse_bit_at_offset(nextchunk, 0);
4793
4794 first_unsorted = unsorted_bin->fd;
4795 unsorted_bin->fd = p;
4796 first_unsorted->bk = p;
4797
4798 if (!in_smallbin_range (size)) {
4799 p->fd_nextsize = NULL;
4800 p->bk_nextsize = NULL;
4801 }
4802
4803 set_head(p, size | PREV_INUSE);
4804 p->bk = unsorted_bin;
4805 p->fd = first_unsorted;
4806 set_foot(p, size);
4807 }
4808
4809 else {
4810 size += nextsize;
4811 set_head(p, size | PREV_INUSE);
4812 av->top = p;
4813 }
4814
4815 } while ( (p = nextp) != 0);
4816
4817 }
4818 } while (fb++ != maxfb);
4819 }
4820
4821 /*
4822 ------------------------------ realloc ------------------------------
4823 */
4824
4825 static void *
4826 _int_realloc (mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize,
4827 INTERNAL_SIZE_T nb)
4828 {
4829 mchunkptr newp; /* chunk to return */
4830 INTERNAL_SIZE_T newsize; /* its size */
4831 void* newmem; /* corresponding user mem */
4832
4833 mchunkptr next; /* next contiguous chunk after oldp */
4834
4835 mchunkptr remainder; /* extra space at end of newp */
4836 unsigned long remainder_size; /* its size */
4837
4838 /* oldmem size */
4839 if (__builtin_expect (chunksize_nomask (oldp) <= CHUNK_HDR_SZ, 0)
4840 || __builtin_expect (oldsize >= av->system_mem, 0))
4841 malloc_printerr ("realloc(): invalid old size");
4842
4843 check_inuse_chunk (av, oldp);
4844
4845 /* All callers already filter out mmap'ed chunks. */
4846 assert (!chunk_is_mmapped (oldp));
4847
4848 next = chunk_at_offset (oldp, oldsize);
4849 INTERNAL_SIZE_T nextsize = chunksize (next);
4850 if (__builtin_expect (chunksize_nomask (next) <= CHUNK_HDR_SZ, 0)
4851 || __builtin_expect (nextsize >= av->system_mem, 0))
4852 malloc_printerr ("realloc(): invalid next size");
4853
4854 if ((unsigned long) (oldsize) >= (unsigned long) (nb))
4855 {
4856 /* already big enough; split below */
4857 newp = oldp;
4858 newsize = oldsize;
4859 }
4860
4861 else
4862 {
4863 /* Try to expand forward into top */
4864 if (next == av->top &&
4865 (unsigned long) (newsize = oldsize + nextsize) >=
4866 (unsigned long) (nb + MINSIZE))
4867 {
4868 set_head_size (oldp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0));
4869 av->top = chunk_at_offset (oldp, nb);
4870 set_head (av->top, (newsize - nb) | PREV_INUSE);
4871 check_inuse_chunk (av, oldp);
4872 return tag_new_usable (chunk2mem (oldp));
4873 }
4874
4875 /* Try to expand forward into next chunk; split off remainder below */
4876 else if (next != av->top &&
4877 !inuse (next) &&
4878 (unsigned long) (newsize = oldsize + nextsize) >=
4879 (unsigned long) (nb))
4880 {
4881 newp = oldp;
4882 unlink_chunk (av, next);
4883 }
4884
4885 /* allocate, copy, free */
4886 else
4887 {
4888 newmem = _int_malloc (av, nb - MALLOC_ALIGN_MASK);
4889 if (newmem == 0)
4890 return 0; /* propagate failure */
4891
4892 newp = mem2chunk (newmem);
4893 newsize = chunksize (newp);
4894
4895 /*
4896 Avoid copy if newp is next chunk after oldp.
4897 */
4898 if (newp == next)
4899 {
4900 newsize += oldsize;
4901 newp = oldp;
4902 }
4903 else
4904 {
4905 void *oldmem = chunk2mem (oldp);
4906 size_t sz = memsize (oldp);
4907 (void) tag_region (oldmem, sz);
4908 newmem = tag_new_usable (newmem);
4909 memcpy (newmem, oldmem, sz);
4910 _int_free (av, oldp, 1);
4911 check_inuse_chunk (av, newp);
4912 return newmem;
4913 }
4914 }
4915 }
4916
4917 /* If possible, free extra space in old or extended chunk */
4918
4919 assert ((unsigned long) (newsize) >= (unsigned long) (nb));
4920
4921 remainder_size = newsize - nb;
4922
4923 if (remainder_size < MINSIZE) /* not enough extra to split off */
4924 {
4925 set_head_size (newp, newsize | (av != &main_arena ? NON_MAIN_ARENA : 0));
4926 set_inuse_bit_at_offset (newp, newsize);
4927 }
4928 else /* split remainder */
4929 {
4930 remainder = chunk_at_offset (newp, nb);
4931 /* Clear any user-space tags before writing the header. */
4932 remainder = tag_region (remainder, remainder_size);
4933 set_head_size (newp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0));
4934 set_head (remainder, remainder_size | PREV_INUSE |
4935 (av != &main_arena ? NON_MAIN_ARENA : 0));
4936 /* Mark remainder as inuse so free() won't complain */
4937 set_inuse_bit_at_offset (remainder, remainder_size);
4938 _int_free (av, remainder, 1);
4939 }
4940
4941 check_inuse_chunk (av, newp);
4942 return tag_new_usable (chunk2mem (newp));
4943 }
4944
4945 /*
4946 ------------------------------ memalign ------------------------------
4947 */
4948
4949 static void *
4950 _int_memalign (mstate av, size_t alignment, size_t bytes)
4951 {
4952 INTERNAL_SIZE_T nb; /* padded request size */
4953 char *m; /* memory returned by malloc call */
4954 mchunkptr p; /* corresponding chunk */
4955 char *brk; /* alignment point within p */
4956 mchunkptr newp; /* chunk to return */
4957 INTERNAL_SIZE_T newsize; /* its size */
4958 INTERNAL_SIZE_T leadsize; /* leading space before alignment point */
4959 mchunkptr remainder; /* spare room at end to split off */
4960 unsigned long remainder_size; /* its size */
4961 INTERNAL_SIZE_T size;
4962
4963
4964
4965 if (!checked_request2size (bytes, &nb))
4966 {
4967 __set_errno (ENOMEM);
4968 return NULL;
4969 }
4970
4971 /*
4972 Strategy: find a spot within that chunk that meets the alignment
4973 request, and then possibly free the leading and trailing space.
4974 */
4975
4976 /* Call malloc with worst case padding to hit alignment. */
4977
4978 m = (char *) (_int_malloc (av, nb + alignment + MINSIZE));
4979
4980 if (m == 0)
4981 return 0; /* propagate failure */
4982
4983 p = mem2chunk (m);
4984
4985 if ((((unsigned long) (m)) % alignment) != 0) /* misaligned */
4986
4987 { /*
4988 Find an aligned spot inside chunk. Since we need to give back
4989 leading space in a chunk of at least MINSIZE, if the first
4990 calculation places us at a spot with less than MINSIZE leader,
4991 we can move to the next aligned spot -- we've allocated enough
4992 total room so that this is always possible.
4993 */
4994 brk = (char *) mem2chunk (((unsigned long) (m + alignment - 1)) &
4995 - ((signed long) alignment));
4996 if ((unsigned long) (brk - (char *) (p)) < MINSIZE)
4997 brk += alignment;
4998
4999 newp = (mchunkptr) brk;
5000 leadsize = brk - (char *) (p);
5001 newsize = chunksize (p) - leadsize;
5002
5003 /* For mmapped chunks, just adjust offset */
5004 if (chunk_is_mmapped (p))
5005 {
5006 set_prev_size (newp, prev_size (p) + leadsize);
5007 set_head (newp, newsize | IS_MMAPPED);
5008 return chunk2mem (newp);
5009 }
5010
5011 /* Otherwise, give back leader, use the rest */
5012 set_head (newp, newsize | PREV_INUSE |
5013 (av != &main_arena ? NON_MAIN_ARENA : 0));
5014 set_inuse_bit_at_offset (newp, newsize);
5015 set_head_size (p, leadsize | (av != &main_arena ? NON_MAIN_ARENA : 0));
5016 _int_free (av, p, 1);
5017 p = newp;
5018
5019 assert (newsize >= nb &&
5020 (((unsigned long) (chunk2mem (p))) % alignment) == 0);
5021 }
5022
5023 /* Also give back spare room at the end */
5024 if (!chunk_is_mmapped (p))
5025 {
5026 size = chunksize (p);
5027 if ((unsigned long) (size) > (unsigned long) (nb + MINSIZE))
5028 {
5029 remainder_size = size - nb;
5030 remainder = chunk_at_offset (p, nb);
5031 set_head (remainder, remainder_size | PREV_INUSE |
5032 (av != &main_arena ? NON_MAIN_ARENA : 0));
5033 set_head_size (p, nb);
5034 _int_free (av, remainder, 1);
5035 }
5036 }
5037
5038 check_inuse_chunk (av, p);
5039 return chunk2mem (p);
5040 }
5041
5042
5043 /*
5044 ------------------------------ malloc_trim ------------------------------
5045 */
5046
5047 static int
5048 mtrim (mstate av, size_t pad)
5049 {
5050 /* Ensure all blocks are consolidated. */
5051 malloc_consolidate (av);
5052
5053 const size_t ps = GLRO (dl_pagesize);
5054 int psindex = bin_index (ps);
5055 const size_t psm1 = ps - 1;
5056
5057 int result = 0;
5058 for (int i = 1; i < NBINS; ++i)
5059 if (i == 1 || i >= psindex)
5060 {
5061 mbinptr bin = bin_at (av, i);
5062
5063 for (mchunkptr p = last (bin); p != bin; p = p->bk)
5064 {
5065 INTERNAL_SIZE_T size = chunksize (p);
5066
5067 if (size > psm1 + sizeof (struct malloc_chunk))
5068 {
5069 /* See whether the chunk contains at least one unused page. */
5070 char *paligned_mem = (char *) (((uintptr_t) p
5071 + sizeof (struct malloc_chunk)
5072 + psm1) & ~psm1);
5073
5074 assert ((char *) chunk2mem (p) + 2 * CHUNK_HDR_SZ
5075 <= paligned_mem);
5076 assert ((char *) p + size > paligned_mem);
5077
5078 /* This is the size we could potentially free. */
5079 size -= paligned_mem - (char *) p;
5080
5081 if (size > psm1)
5082 {
5083 #if MALLOC_DEBUG
5084 /* When debugging we simulate destroying the memory
5085 content. */
5086 memset (paligned_mem, 0x89, size & ~psm1);
5087 #endif
5088 __madvise (paligned_mem, size & ~psm1, MADV_DONTNEED);
5089
5090 result = 1;
5091 }
5092 }
5093 }
5094 }
5095
5096 #ifndef MORECORE_CANNOT_TRIM
5097 return result | (av == &main_arena ? systrim (pad, av) : 0);
5098
5099 #else
5100 return result;
5101 #endif
5102 }
5103
5104
5105 int
5106 __malloc_trim (size_t s)
5107 {
5108 int result = 0;
5109
5110 if (__malloc_initialized < 0)
5111 ptmalloc_init ();
5112
5113 mstate ar_ptr = &main_arena;
5114 do
5115 {
5116 __libc_lock_lock (ar_ptr->mutex);
5117 result |= mtrim (ar_ptr, s);
5118 __libc_lock_unlock (ar_ptr->mutex);
5119
5120 ar_ptr = ar_ptr->next;
5121 }
5122 while (ar_ptr != &main_arena);
5123
5124 return result;
5125 }
5126
5127
5128 /*
5129 ------------------------- malloc_usable_size -------------------------
5130 */
5131
5132 static size_t
5133 musable (void *mem)
5134 {
5135 mchunkptr p;
5136 if (mem != 0)
5137 {
5138 size_t result = 0;
5139
5140 p = mem2chunk (mem);
5141
5142 if (__builtin_expect (using_malloc_checking == 1, 0))
5143 return malloc_check_get_size (p);
5144
5145 if (chunk_is_mmapped (p))
5146 {
5147 if (DUMPED_MAIN_ARENA_CHUNK (p))
5148 result = chunksize (p) - SIZE_SZ;
5149 else
5150 result = chunksize (p) - CHUNK_HDR_SZ;
5151 }
5152 else if (inuse (p))
5153 result = memsize (p);
5154
5155 return result;
5156 }
5157 return 0;
5158 }
5159
5160
5161 size_t
5162 __malloc_usable_size (void *m)
5163 {
5164 size_t result;
5165
5166 result = musable (m);
5167 return result;
5168 }
5169
5170 /*
5171 ------------------------------ mallinfo ------------------------------
5172 Accumulate malloc statistics for arena AV into M.
5173 */
5174
5175 static void
5176 int_mallinfo (mstate av, struct mallinfo2 *m)
5177 {
5178 size_t i;
5179 mbinptr b;
5180 mchunkptr p;
5181 INTERNAL_SIZE_T avail;
5182 INTERNAL_SIZE_T fastavail;
5183 int nblocks;
5184 int nfastblocks;
5185
5186 check_malloc_state (av);
5187
5188 /* Account for top */
5189 avail = chunksize (av->top);
5190 nblocks = 1; /* top always exists */
5191
5192 /* traverse fastbins */
5193 nfastblocks = 0;
5194 fastavail = 0;
5195
5196 for (i = 0; i < NFASTBINS; ++i)
5197 {
5198 for (p = fastbin (av, i);
5199 p != 0;
5200 p = REVEAL_PTR (p->fd))
5201 {
5202 if (__glibc_unlikely (misaligned_chunk (p)))
5203 malloc_printerr ("int_mallinfo(): "
5204 "unaligned fastbin chunk detected");
5205 ++nfastblocks;
5206 fastavail += chunksize (p);
5207 }
5208 }
5209
5210 avail += fastavail;
5211
5212 /* traverse regular bins */
5213 for (i = 1; i < NBINS; ++i)
5214 {
5215 b = bin_at (av, i);
5216 for (p = last (b); p != b; p = p->bk)
5217 {
5218 ++nblocks;
5219 avail += chunksize (p);
5220 }
5221 }
5222
5223 m->smblks += nfastblocks;
5224 m->ordblks += nblocks;
5225 m->fordblks += avail;
5226 m->uordblks += av->system_mem - avail;
5227 m->arena += av->system_mem;
5228 m->fsmblks += fastavail;
5229 if (av == &main_arena)
5230 {
5231 m->hblks = mp_.n_mmaps;
5232 m->hblkhd = mp_.mmapped_mem;
5233 m->usmblks = 0;
5234 m->keepcost = chunksize (av->top);
5235 }
5236 }
5237
5238
5239 struct mallinfo2
5240 __libc_mallinfo2 (void)
5241 {
5242 struct mallinfo2 m;
5243 mstate ar_ptr;
5244
5245 if (__malloc_initialized < 0)
5246 ptmalloc_init ();
5247
5248 memset (&m, 0, sizeof (m));
5249 ar_ptr = &main_arena;
5250 do
5251 {
5252 __libc_lock_lock (ar_ptr->mutex);
5253 int_mallinfo (ar_ptr, &m);
5254 __libc_lock_unlock (ar_ptr->mutex);
5255
5256 ar_ptr = ar_ptr->next;
5257 }
5258 while (ar_ptr != &main_arena);
5259
5260 return m;
5261 }
5262 libc_hidden_def (__libc_mallinfo2)
5263
5264 struct mallinfo
5265 __libc_mallinfo (void)
5266 {
5267 struct mallinfo m;
5268 struct mallinfo2 m2 = __libc_mallinfo2 ();
5269
5270 m.arena = m2.arena;
5271 m.ordblks = m2.ordblks;
5272 m.smblks = m2.smblks;
5273 m.hblks = m2.hblks;
5274 m.hblkhd = m2.hblkhd;
5275 m.usmblks = m2.usmblks;
5276 m.fsmblks = m2.fsmblks;
5277 m.uordblks = m2.uordblks;
5278 m.fordblks = m2.fordblks;
5279 m.keepcost = m2.keepcost;
5280
5281 return m;
5282 }
5283
5284
5285 /*
5286 ------------------------------ malloc_stats ------------------------------
5287 */
5288
5289 void
5290 __malloc_stats (void)
5291 {
5292 int i;
5293 mstate ar_ptr;
5294 unsigned int in_use_b = mp_.mmapped_mem, system_b = in_use_b;
5295
5296 if (__malloc_initialized < 0)
5297 ptmalloc_init ();
5298 _IO_flockfile (stderr);
5299 int old_flags2 = stderr->_flags2;
5300 stderr->_flags2 |= _IO_FLAGS2_NOTCANCEL;
5301 for (i = 0, ar_ptr = &main_arena;; i++)
5302 {
5303 struct mallinfo2 mi;
5304
5305 memset (&mi, 0, sizeof (mi));
5306 __libc_lock_lock (ar_ptr->mutex);
5307 int_mallinfo (ar_ptr, &mi);
5308 fprintf (stderr, "Arena %d:\n", i);
5309 fprintf (stderr, "system bytes = %10u\n", (unsigned int) mi.arena);
5310 fprintf (stderr, "in use bytes = %10u\n", (unsigned int) mi.uordblks);
5311 #if MALLOC_DEBUG > 1
5312 if (i > 0)
5313 dump_heap (heap_for_ptr (top (ar_ptr)));
5314 #endif
5315 system_b += mi.arena;
5316 in_use_b += mi.uordblks;
5317 __libc_lock_unlock (ar_ptr->mutex);
5318 ar_ptr = ar_ptr->next;
5319 if (ar_ptr == &main_arena)
5320 break;
5321 }
5322 fprintf (stderr, "Total (incl. mmap):\n");
5323 fprintf (stderr, "system bytes = %10u\n", system_b);
5324 fprintf (stderr, "in use bytes = %10u\n", in_use_b);
5325 fprintf (stderr, "max mmap regions = %10u\n", (unsigned int) mp_.max_n_mmaps);
5326 fprintf (stderr, "max mmap bytes = %10lu\n",
5327 (unsigned long) mp_.max_mmapped_mem);
5328 stderr->_flags2 = old_flags2;
5329 _IO_funlockfile (stderr);
5330 }
5331
5332
5333 /*
5334 ------------------------------ mallopt ------------------------------
5335 */
5336 static __always_inline int
5337 do_set_trim_threshold (size_t value)
5338 {
5339 LIBC_PROBE (memory_mallopt_trim_threshold, 3, value, mp_.trim_threshold,
5340 mp_.no_dyn_threshold);
5341 mp_.trim_threshold = value;
5342 mp_.no_dyn_threshold = 1;
5343 return 1;
5344 }
5345
5346 static __always_inline int
5347 do_set_top_pad (size_t value)
5348 {
5349 LIBC_PROBE (memory_mallopt_top_pad, 3, value, mp_.top_pad,
5350 mp_.no_dyn_threshold);
5351 mp_.top_pad = value;
5352 mp_.no_dyn_threshold = 1;
5353 return 1;
5354 }
5355
5356 static __always_inline int
5357 do_set_mmap_threshold (size_t value)
5358 {
5359 /* Forbid setting the threshold too high. */
5360 if (value <= HEAP_MAX_SIZE / 2)
5361 {
5362 LIBC_PROBE (memory_mallopt_mmap_threshold, 3, value, mp_.mmap_threshold,
5363 mp_.no_dyn_threshold);
5364 mp_.mmap_threshold = value;
5365 mp_.no_dyn_threshold = 1;
5366 return 1;
5367 }
5368 return 0;
5369 }
5370
5371 static __always_inline int
5372 do_set_mmaps_max (int32_t value)
5373 {
5374 LIBC_PROBE (memory_mallopt_mmap_max, 3, value, mp_.n_mmaps_max,
5375 mp_.no_dyn_threshold);
5376 mp_.n_mmaps_max = value;
5377 mp_.no_dyn_threshold = 1;
5378 return 1;
5379 }
5380
5381 static __always_inline int
5382 do_set_mallopt_check (int32_t value)
5383 {
5384 return 1;
5385 }
5386
5387 static __always_inline int
5388 do_set_perturb_byte (int32_t value)
5389 {
5390 LIBC_PROBE (memory_mallopt_perturb, 2, value, perturb_byte);
5391 perturb_byte = value;
5392 return 1;
5393 }
5394
5395 static __always_inline int
5396 do_set_arena_test (size_t value)
5397 {
5398 LIBC_PROBE (memory_mallopt_arena_test, 2, value, mp_.arena_test);
5399 mp_.arena_test = value;
5400 return 1;
5401 }
5402
5403 static __always_inline int
5404 do_set_arena_max (size_t value)
5405 {
5406 LIBC_PROBE (memory_mallopt_arena_max, 2, value, mp_.arena_max);
5407 mp_.arena_max = value;
5408 return 1;
5409 }
5410
5411 #if USE_TCACHE
5412 static __always_inline int
5413 do_set_tcache_max (size_t value)
5414 {
5415 if (value <= MAX_TCACHE_SIZE)
5416 {
5417 LIBC_PROBE (memory_tunable_tcache_max_bytes, 2, value, mp_.tcache_max_bytes);
5418 mp_.tcache_max_bytes = value;
5419 mp_.tcache_bins = csize2tidx (request2size(value)) + 1;
5420 return 1;
5421 }
5422 return 0;
5423 }
5424
5425 static __always_inline int
5426 do_set_tcache_count (size_t value)
5427 {
5428 if (value <= MAX_TCACHE_COUNT)
5429 {
5430 LIBC_PROBE (memory_tunable_tcache_count, 2, value, mp_.tcache_count);
5431 mp_.tcache_count = value;
5432 return 1;
5433 }
5434 return 0;
5435 }
5436
5437 static __always_inline int
5438 do_set_tcache_unsorted_limit (size_t value)
5439 {
5440 LIBC_PROBE (memory_tunable_tcache_unsorted_limit, 2, value, mp_.tcache_unsorted_limit);
5441 mp_.tcache_unsorted_limit = value;
5442 return 1;
5443 }
5444 #endif
5445
5446 static inline int
5447 __always_inline
5448 do_set_mxfast (size_t value)
5449 {
5450 if (value <= MAX_FAST_SIZE)
5451 {
5452 LIBC_PROBE (memory_mallopt_mxfast, 2, value, get_max_fast ());
5453 set_max_fast (value);
5454 return 1;
5455 }
5456 return 0;
5457 }
5458
5459 int
5460 __libc_mallopt (int param_number, int value)
5461 {
5462 mstate av = &main_arena;
5463 int res = 1;
5464
5465 if (__malloc_initialized < 0)
5466 ptmalloc_init ();
5467 __libc_lock_lock (av->mutex);
5468
5469 LIBC_PROBE (memory_mallopt, 2, param_number, value);
5470
5471 /* We must consolidate main arena before changing max_fast
5472 (see definition of set_max_fast). */
5473 malloc_consolidate (av);
5474
5475 /* Many of these helper functions take a size_t. We do not worry
5476 about overflow here, because negative int values will wrap to
5477 very large size_t values and the helpers have sufficient range
5478 checking for such conversions. Many of these helpers are also
5479 used by the tunables macros in arena.c. */
5480
5481 switch (param_number)
5482 {
5483 case M_MXFAST:
5484 res = do_set_mxfast (value);
5485 break;
5486
5487 case M_TRIM_THRESHOLD:
5488 res = do_set_trim_threshold (value);
5489 break;
5490
5491 case M_TOP_PAD:
5492 res = do_set_top_pad (value);
5493 break;
5494
5495 case M_MMAP_THRESHOLD:
5496 res = do_set_mmap_threshold (value);
5497 break;
5498
5499 case M_MMAP_MAX:
5500 res = do_set_mmaps_max (value);
5501 break;
5502
5503 case M_CHECK_ACTION:
5504 res = do_set_mallopt_check (value);
5505 break;
5506
5507 case M_PERTURB:
5508 res = do_set_perturb_byte (value);
5509 break;
5510
5511 case M_ARENA_TEST:
5512 if (value > 0)
5513 res = do_set_arena_test (value);
5514 break;
5515
5516 case M_ARENA_MAX:
5517 if (value > 0)
5518 res = do_set_arena_max (value);
5519 break;
5520 }
5521 __libc_lock_unlock (av->mutex);
5522 return res;
5523 }
5524 libc_hidden_def (__libc_mallopt)
5525
5526
5527 /*
5528 -------------------- Alternative MORECORE functions --------------------
5529 */
5530
5531
5532 /*
5533 General Requirements for MORECORE.
5534
5535 The MORECORE function must have the following properties:
5536
5537 If MORECORE_CONTIGUOUS is false:
5538
5539 * MORECORE must allocate in multiples of pagesize. It will
5540 only be called with arguments that are multiples of pagesize.
5541
5542 * MORECORE(0) must return an address that is at least
5543 MALLOC_ALIGNMENT aligned. (Page-aligning always suffices.)
5544
5545 else (i.e. If MORECORE_CONTIGUOUS is true):
5546
5547 * Consecutive calls to MORECORE with positive arguments
5548 return increasing addresses, indicating that space has been
5549 contiguously extended.
5550
5551 * MORECORE need not allocate in multiples of pagesize.
5552 Calls to MORECORE need not have args of multiples of pagesize.
5553
5554 * MORECORE need not page-align.
5555
5556 In either case:
5557
5558 * MORECORE may allocate more memory than requested. (Or even less,
5559 but this will generally result in a malloc failure.)
5560
5561 * MORECORE must not allocate memory when given argument zero, but
5562 instead return one past the end address of memory from previous
5563 nonzero call. This malloc does NOT call MORECORE(0)
5564 until at least one call with positive arguments is made, so
5565 the initial value returned is not important.
5566
5567 * Even though consecutive calls to MORECORE need not return contiguous
5568 addresses, it must be OK for malloc'ed chunks to span multiple
5569 regions in those cases where they do happen to be contiguous.
5570
5571 * MORECORE need not handle negative arguments -- it may instead
5572 just return MORECORE_FAILURE when given negative arguments.
5573 Negative arguments are always multiples of pagesize. MORECORE
5574 must not misinterpret negative args as large positive unsigned
5575 args. You can suppress all such calls from even occurring by defining
5576 MORECORE_CANNOT_TRIM,
5577
5578 There is some variation across systems about the type of the
5579 argument to sbrk/MORECORE. If size_t is unsigned, then it cannot
5580 actually be size_t, because sbrk supports negative args, so it is
5581 normally the signed type of the same width as size_t (sometimes
5582 declared as "intptr_t", and sometimes "ptrdiff_t"). It doesn't much
5583 matter though. Internally, we use "long" as arguments, which should
5584 work across all reasonable possibilities.
5585
5586 Additionally, if MORECORE ever returns failure for a positive
5587 request, then mmap is used as a noncontiguous system allocator. This
5588 is a useful backup strategy for systems with holes in address spaces
5589 -- in this case sbrk cannot contiguously expand the heap, but mmap
5590 may be able to map noncontiguous space.
5591
5592 If you'd like mmap to ALWAYS be used, you can define MORECORE to be
5593 a function that always returns MORECORE_FAILURE.
5594
5595 If you are using this malloc with something other than sbrk (or its
5596 emulation) to supply memory regions, you probably want to set
5597 MORECORE_CONTIGUOUS as false. As an example, here is a custom
5598 allocator kindly contributed for pre-OSX macOS. It uses virtually
5599 but not necessarily physically contiguous non-paged memory (locked
5600 in, present and won't get swapped out). You can use it by
5601 uncommenting this section, adding some #includes, and setting up the
5602 appropriate defines above:
5603
5604 *#define MORECORE osMoreCore
5605 *#define MORECORE_CONTIGUOUS 0
5606
5607 There is also a shutdown routine that should somehow be called for
5608 cleanup upon program exit.
5609
5610 *#define MAX_POOL_ENTRIES 100
5611 *#define MINIMUM_MORECORE_SIZE (64 * 1024)
5612 static int next_os_pool;
5613 void *our_os_pools[MAX_POOL_ENTRIES];
5614
5615 void *osMoreCore(int size)
5616 {
5617 void *ptr = 0;
5618 static void *sbrk_top = 0;
5619
5620 if (size > 0)
5621 {
5622 if (size < MINIMUM_MORECORE_SIZE)
5623 size = MINIMUM_MORECORE_SIZE;
5624 if (CurrentExecutionLevel() == kTaskLevel)
5625 ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
5626 if (ptr == 0)
5627 {
5628 return (void *) MORECORE_FAILURE;
5629 }
5630 // save ptrs so they can be freed during cleanup
5631 our_os_pools[next_os_pool] = ptr;
5632 next_os_pool++;
5633 ptr = (void *) ((((unsigned long) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);
5634 sbrk_top = (char *) ptr + size;
5635 return ptr;
5636 }
5637 else if (size < 0)
5638 {
5639 // we don't currently support shrink behavior
5640 return (void *) MORECORE_FAILURE;
5641 }
5642 else
5643 {
5644 return sbrk_top;
5645 }
5646 }
5647
5648 // cleanup any allocated memory pools
5649 // called as last thing before shutting down driver
5650
5651 void osCleanupMem(void)
5652 {
5653 void **ptr;
5654
5655 for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)
5656 if (*ptr)
5657 {
5658 PoolDeallocate(*ptr);
5659 * ptr = 0;
5660 }
5661 }
5662
5663 */
5664
5665
5666 /* Helper code. */
5667
5668 extern char **__libc_argv attribute_hidden;
5669
5670 static void
5671 malloc_printerr (const char *str)
5672 {
5673 __libc_message (do_abort, "%s\n", str);
5674 __builtin_unreachable ();
5675 }
5676
5677 /* We need a wrapper function for one of the additions of POSIX. */
5678 int
5679 __posix_memalign (void **memptr, size_t alignment, size_t size)
5680 {
5681 void *mem;
5682
5683 /* Test whether the SIZE argument is valid. It must be a power of
5684 two multiple of sizeof (void *). */
5685 if (alignment % sizeof (void *) != 0
5686 || !powerof2 (alignment / sizeof (void *))
5687 || alignment == 0)
5688 return EINVAL;
5689
5690
5691 void *address = RETURN_ADDRESS (0);
5692 mem = _mid_memalign (alignment, size, address);
5693
5694 if (mem != NULL)
5695 {
5696 *memptr = mem;
5697 return 0;
5698 }
5699
5700 return ENOMEM;
5701 }
5702 weak_alias (__posix_memalign, posix_memalign)
5703
5704
5705 int
5706 __malloc_info (int options, FILE *fp)
5707 {
5708 /* For now, at least. */
5709 if (options != 0)
5710 return EINVAL;
5711
5712 int n = 0;
5713 size_t total_nblocks = 0;
5714 size_t total_nfastblocks = 0;
5715 size_t total_avail = 0;
5716 size_t total_fastavail = 0;
5717 size_t total_system = 0;
5718 size_t total_max_system = 0;
5719 size_t total_aspace = 0;
5720 size_t total_aspace_mprotect = 0;
5721
5722
5723
5724 if (__malloc_initialized < 0)
5725 ptmalloc_init ();
5726
5727 fputs ("<malloc version=\"1\">\n", fp);
5728
5729 /* Iterate over all arenas currently in use. */
5730 mstate ar_ptr = &main_arena;
5731 do
5732 {
5733 fprintf (fp, "<heap nr=\"%d\">\n<sizes>\n", n++);
5734
5735 size_t nblocks = 0;
5736 size_t nfastblocks = 0;
5737 size_t avail = 0;
5738 size_t fastavail = 0;
5739 struct
5740 {
5741 size_t from;
5742 size_t to;
5743 size_t total;
5744 size_t count;
5745 } sizes[NFASTBINS + NBINS - 1];
5746 #define nsizes (sizeof (sizes) / sizeof (sizes[0]))
5747
5748 __libc_lock_lock (ar_ptr->mutex);
5749
5750 /* Account for top chunk. The top-most available chunk is
5751 treated specially and is never in any bin. See "initial_top"
5752 comments. */
5753 avail = chunksize (ar_ptr->top);
5754 nblocks = 1; /* Top always exists. */
5755
5756 for (size_t i = 0; i < NFASTBINS; ++i)
5757 {
5758 mchunkptr p = fastbin (ar_ptr, i);
5759 if (p != NULL)
5760 {
5761 size_t nthissize = 0;
5762 size_t thissize = chunksize (p);
5763
5764 while (p != NULL)
5765 {
5766 if (__glibc_unlikely (misaligned_chunk (p)))
5767 malloc_printerr ("__malloc_info(): "
5768 "unaligned fastbin chunk detected");
5769 ++nthissize;
5770 p = REVEAL_PTR (p->fd);
5771 }
5772
5773 fastavail += nthissize * thissize;
5774 nfastblocks += nthissize;
5775 sizes[i].from = thissize - (MALLOC_ALIGNMENT - 1);
5776 sizes[i].to = thissize;
5777 sizes[i].count = nthissize;
5778 }
5779 else
5780 sizes[i].from = sizes[i].to = sizes[i].count = 0;
5781
5782 sizes[i].total = sizes[i].count * sizes[i].to;
5783 }
5784
5785
5786 mbinptr bin;
5787 struct malloc_chunk *r;
5788
5789 for (size_t i = 1; i < NBINS; ++i)
5790 {
5791 bin = bin_at (ar_ptr, i);
5792 r = bin->fd;
5793 sizes[NFASTBINS - 1 + i].from = ~((size_t) 0);
5794 sizes[NFASTBINS - 1 + i].to = sizes[NFASTBINS - 1 + i].total
5795 = sizes[NFASTBINS - 1 + i].count = 0;
5796
5797 if (r != NULL)
5798 while (r != bin)
5799 {
5800 size_t r_size = chunksize_nomask (r);
5801 ++sizes[NFASTBINS - 1 + i].count;
5802 sizes[NFASTBINS - 1 + i].total += r_size;
5803 sizes[NFASTBINS - 1 + i].from
5804 = MIN (sizes[NFASTBINS - 1 + i].from, r_size);
5805 sizes[NFASTBINS - 1 + i].to = MAX (sizes[NFASTBINS - 1 + i].to,
5806 r_size);
5807
5808 r = r->fd;
5809 }
5810
5811 if (sizes[NFASTBINS - 1 + i].count == 0)
5812 sizes[NFASTBINS - 1 + i].from = 0;
5813 nblocks += sizes[NFASTBINS - 1 + i].count;
5814 avail += sizes[NFASTBINS - 1 + i].total;
5815 }
5816
5817 size_t heap_size = 0;
5818 size_t heap_mprotect_size = 0;
5819 size_t heap_count = 0;
5820 if (ar_ptr != &main_arena)
5821 {
5822 /* Iterate over the arena heaps from back to front. */
5823 heap_info *heap = heap_for_ptr (top (ar_ptr));
5824 do
5825 {
5826 heap_size += heap->size;
5827 heap_mprotect_size += heap->mprotect_size;
5828 heap = heap->prev;
5829 ++heap_count;
5830 }
5831 while (heap != NULL);
5832 }
5833
5834 __libc_lock_unlock (ar_ptr->mutex);
5835
5836 total_nfastblocks += nfastblocks;
5837 total_fastavail += fastavail;
5838
5839 total_nblocks += nblocks;
5840 total_avail += avail;
5841
5842 for (size_t i = 0; i < nsizes; ++i)
5843 if (sizes[i].count != 0 && i != NFASTBINS)
5844 fprintf (fp, "\
5845 <size from=\"%zu\" to=\"%zu\" total=\"%zu\" count=\"%zu\"/>\n",
5846 sizes[i].from, sizes[i].to, sizes[i].total, sizes[i].count);
5847
5848 if (sizes[NFASTBINS].count != 0)
5849 fprintf (fp, "\
5850 <unsorted from=\"%zu\" to=\"%zu\" total=\"%zu\" count=\"%zu\"/>\n",
5851 sizes[NFASTBINS].from, sizes[NFASTBINS].to,
5852 sizes[NFASTBINS].total, sizes[NFASTBINS].count);
5853
5854 total_system += ar_ptr->system_mem;
5855 total_max_system += ar_ptr->max_system_mem;
5856
5857 fprintf (fp,
5858 "</sizes>\n<total type=\"fast\" count=\"%zu\" size=\"%zu\"/>\n"
5859 "<total type=\"rest\" count=\"%zu\" size=\"%zu\"/>\n"
5860 "<system type=\"current\" size=\"%zu\"/>\n"
5861 "<system type=\"max\" size=\"%zu\"/>\n",
5862 nfastblocks, fastavail, nblocks, avail,
5863 ar_ptr->system_mem, ar_ptr->max_system_mem);
5864
5865 if (ar_ptr != &main_arena)
5866 {
5867 fprintf (fp,
5868 "<aspace type=\"total\" size=\"%zu\"/>\n"
5869 "<aspace type=\"mprotect\" size=\"%zu\"/>\n"
5870 "<aspace type=\"subheaps\" size=\"%zu\"/>\n",
5871 heap_size, heap_mprotect_size, heap_count);
5872 total_aspace += heap_size;
5873 total_aspace_mprotect += heap_mprotect_size;
5874 }
5875 else
5876 {
5877 fprintf (fp,
5878 "<aspace type=\"total\" size=\"%zu\"/>\n"
5879 "<aspace type=\"mprotect\" size=\"%zu\"/>\n",
5880 ar_ptr->system_mem, ar_ptr->system_mem);
5881 total_aspace += ar_ptr->system_mem;
5882 total_aspace_mprotect += ar_ptr->system_mem;
5883 }
5884
5885 fputs ("</heap>\n", fp);
5886 ar_ptr = ar_ptr->next;
5887 }
5888 while (ar_ptr != &main_arena);
5889
5890 fprintf (fp,
5891 "<total type=\"fast\" count=\"%zu\" size=\"%zu\"/>\n"
5892 "<total type=\"rest\" count=\"%zu\" size=\"%zu\"/>\n"
5893 "<total type=\"mmap\" count=\"%d\" size=\"%zu\"/>\n"
5894 "<system type=\"current\" size=\"%zu\"/>\n"
5895 "<system type=\"max\" size=\"%zu\"/>\n"
5896 "<aspace type=\"total\" size=\"%zu\"/>\n"
5897 "<aspace type=\"mprotect\" size=\"%zu\"/>\n"
5898 "</malloc>\n",
5899 total_nfastblocks, total_fastavail, total_nblocks, total_avail,
5900 mp_.n_mmaps, mp_.mmapped_mem,
5901 total_system, total_max_system,
5902 total_aspace, total_aspace_mprotect);
5903
5904 return 0;
5905 }
5906 weak_alias (__malloc_info, malloc_info)
5907
5908
5909 strong_alias (__libc_calloc, __calloc) weak_alias (__libc_calloc, calloc)
5910 strong_alias (__libc_free, __free) strong_alias (__libc_free, free)
5911 strong_alias (__libc_malloc, __malloc) strong_alias (__libc_malloc, malloc)
5912 strong_alias (__libc_memalign, __memalign)
5913 weak_alias (__libc_memalign, memalign)
5914 strong_alias (__libc_realloc, __realloc) strong_alias (__libc_realloc, realloc)
5915 strong_alias (__libc_valloc, __valloc) weak_alias (__libc_valloc, valloc)
5916 strong_alias (__libc_pvalloc, __pvalloc) weak_alias (__libc_pvalloc, pvalloc)
5917 strong_alias (__libc_mallinfo, __mallinfo)
5918 weak_alias (__libc_mallinfo, mallinfo)
5919 strong_alias (__libc_mallinfo2, __mallinfo2)
5920 weak_alias (__libc_mallinfo2, mallinfo2)
5921 strong_alias (__libc_mallopt, __mallopt) weak_alias (__libc_mallopt, mallopt)
5922
5923 weak_alias (__malloc_stats, malloc_stats)
5924 weak_alias (__malloc_usable_size, malloc_usable_size)
5925 weak_alias (__malloc_trim, malloc_trim)
5926
5927 #if SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_26)
5928 compat_symbol (libc, __libc_free, cfree, GLIBC_2_0);
5929 #endif
5930
5931 /* ------------------------------------------------------------
5932 History:
5933
5934 [see ftp://g.oswego.edu/pub/misc/malloc.c for the history of dlmalloc]
5935
5936 */
5937 /*
5938 * Local variables:
5939 * c-basic-offset: 2
5940 * End:
5941 */