]> git.ipfire.org Git - thirdparty/glibc.git/blob - malloc/malloc.c
300e879b8cfba94443e2fb8308a6aac018a71603
[thirdparty/glibc.git] / malloc / malloc.c
1 /* Malloc implementation for multiple threads without lock contention.
2 Copyright (C) 1996-2009, 2010, 2011 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Wolfram Gloger <wg@malloc.de>
5 and Doug Lea <dl@cs.oswego.edu>, 2001.
6
7 The GNU C Library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public License as
9 published by the Free Software Foundation; either version 2.1 of the
10 License, or (at your option) any later version.
11
12 The GNU C Library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public
18 License along with the GNU C Library; see the file COPYING.LIB. If not,
19 write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
21
22 /*
23 This is a version (aka ptmalloc2) of malloc/free/realloc written by
24 Doug Lea and adapted to multiple threads/arenas by Wolfram Gloger.
25
26 There have been substantial changesmade after the integration into
27 glibc in all parts of the code. Do not look for much commonality
28 with the ptmalloc2 version.
29
30 * Version ptmalloc2-20011215
31 based on:
32 VERSION 2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee)
33
34 * Quickstart
35
36 In order to compile this implementation, a Makefile is provided with
37 the ptmalloc2 distribution, which has pre-defined targets for some
38 popular systems (e.g. "make posix" for Posix threads). All that is
39 typically required with regard to compiler flags is the selection of
40 the thread package via defining one out of USE_PTHREADS, USE_THR or
41 USE_SPROC. Check the thread-m.h file for what effects this has.
42 Many/most systems will additionally require USE_TSD_DATA_HACK to be
43 defined, so this is the default for "make posix".
44
45 * Why use this malloc?
46
47 This is not the fastest, most space-conserving, most portable, or
48 most tunable malloc ever written. However it is among the fastest
49 while also being among the most space-conserving, portable and tunable.
50 Consistent balance across these factors results in a good general-purpose
51 allocator for malloc-intensive programs.
52
53 The main properties of the algorithms are:
54 * For large (>= 512 bytes) requests, it is a pure best-fit allocator,
55 with ties normally decided via FIFO (i.e. least recently used).
56 * For small (<= 64 bytes by default) requests, it is a caching
57 allocator, that maintains pools of quickly recycled chunks.
58 * In between, and for combinations of large and small requests, it does
59 the best it can trying to meet both goals at once.
60 * For very large requests (>= 128KB by default), it relies on system
61 memory mapping facilities, if supported.
62
63 For a longer but slightly out of date high-level description, see
64 http://gee.cs.oswego.edu/dl/html/malloc.html
65
66 You may already by default be using a C library containing a malloc
67 that is based on some version of this malloc (for example in
68 linux). You might still want to use the one in this file in order to
69 customize settings or to avoid overheads associated with library
70 versions.
71
72 * Contents, described in more detail in "description of public routines" below.
73
74 Standard (ANSI/SVID/...) functions:
75 malloc(size_t n);
76 calloc(size_t n_elements, size_t element_size);
77 free(void* p);
78 realloc(void* p, size_t n);
79 memalign(size_t alignment, size_t n);
80 valloc(size_t n);
81 mallinfo()
82 mallopt(int parameter_number, int parameter_value)
83
84 Additional functions:
85 independent_calloc(size_t n_elements, size_t size, void* chunks[]);
86 independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);
87 pvalloc(size_t n);
88 cfree(void* p);
89 malloc_trim(size_t pad);
90 malloc_usable_size(void* p);
91 malloc_stats();
92
93 * Vital statistics:
94
95 Supported pointer representation: 4 or 8 bytes
96 Supported size_t representation: 4 or 8 bytes
97 Note that size_t is allowed to be 4 bytes even if pointers are 8.
98 You can adjust this by defining INTERNAL_SIZE_T
99
100 Alignment: 2 * sizeof(size_t) (default)
101 (i.e., 8 byte alignment with 4byte size_t). This suffices for
102 nearly all current machines and C compilers. However, you can
103 define MALLOC_ALIGNMENT to be wider than this if necessary.
104
105 Minimum overhead per allocated chunk: 4 or 8 bytes
106 Each malloced chunk has a hidden word of overhead holding size
107 and status information.
108
109 Minimum allocated size: 4-byte ptrs: 16 bytes (including 4 overhead)
110 8-byte ptrs: 24/32 bytes (including, 4/8 overhead)
111
112 When a chunk is freed, 12 (for 4byte ptrs) or 20 (for 8 byte
113 ptrs but 4 byte size) or 24 (for 8/8) additional bytes are
114 needed; 4 (8) for a trailing size field and 8 (16) bytes for
115 free list pointers. Thus, the minimum allocatable size is
116 16/24/32 bytes.
117
118 Even a request for zero bytes (i.e., malloc(0)) returns a
119 pointer to something of the minimum allocatable size.
120
121 The maximum overhead wastage (i.e., number of extra bytes
122 allocated than were requested in malloc) is less than or equal
123 to the minimum size, except for requests >= mmap_threshold that
124 are serviced via mmap(), where the worst case wastage is 2 *
125 sizeof(size_t) bytes plus the remainder from a system page (the
126 minimal mmap unit); typically 4096 or 8192 bytes.
127
128 Maximum allocated size: 4-byte size_t: 2^32 minus about two pages
129 8-byte size_t: 2^64 minus about two pages
130
131 It is assumed that (possibly signed) size_t values suffice to
132 represent chunk sizes. `Possibly signed' is due to the fact
133 that `size_t' may be defined on a system as either a signed or
134 an unsigned type. The ISO C standard says that it must be
135 unsigned, but a few systems are known not to adhere to this.
136 Additionally, even when size_t is unsigned, sbrk (which is by
137 default used to obtain memory from system) accepts signed
138 arguments, and may not be able to handle size_t-wide arguments
139 with negative sign bit. Generally, values that would
140 appear as negative after accounting for overhead and alignment
141 are supported only via mmap(), which does not have this
142 limitation.
143
144 Requests for sizes outside the allowed range will perform an optional
145 failure action and then return null. (Requests may also
146 also fail because a system is out of memory.)
147
148 Thread-safety: thread-safe
149
150 Compliance: I believe it is compliant with the 1997 Single Unix Specification
151 Also SVID/XPG, ANSI C, and probably others as well.
152
153 * Synopsis of compile-time options:
154
155 People have reported using previous versions of this malloc on all
156 versions of Unix, sometimes by tweaking some of the defines
157 below. It has been tested most extensively on Solaris and Linux.
158 People also report using it in stand-alone embedded systems.
159
160 The implementation is in straight, hand-tuned ANSI C. It is not
161 at all modular. (Sorry!) It uses a lot of macros. To be at all
162 usable, this code should be compiled using an optimizing compiler
163 (for example gcc -O3) that can simplify expressions and control
164 paths. (FAQ: some macros import variables as arguments rather than
165 declare locals because people reported that some debuggers
166 otherwise get confused.)
167
168 OPTION DEFAULT VALUE
169
170 Compilation Environment options:
171
172 HAVE_MREMAP 0 unless linux defined
173
174 Changing default word sizes:
175
176 INTERNAL_SIZE_T size_t
177 MALLOC_ALIGNMENT MAX (2 * sizeof(INTERNAL_SIZE_T),
178 __alignof__ (long double))
179
180 Configuration and functionality options:
181
182 USE_DL_PREFIX NOT defined
183 USE_PUBLIC_MALLOC_WRAPPERS NOT defined
184 USE_MALLOC_LOCK NOT defined
185 MALLOC_DEBUG NOT defined
186 REALLOC_ZERO_BYTES_FREES 1
187 TRIM_FASTBINS 0
188
189 Options for customizing MORECORE:
190
191 MORECORE sbrk
192 MORECORE_FAILURE -1
193 MORECORE_CONTIGUOUS 1
194 MORECORE_CANNOT_TRIM NOT defined
195 MORECORE_CLEARS 1
196 MMAP_AS_MORECORE_SIZE (1024 * 1024)
197
198 Tuning options that are also dynamically changeable via mallopt:
199
200 DEFAULT_MXFAST 64 (for 32bit), 128 (for 64bit)
201 DEFAULT_TRIM_THRESHOLD 128 * 1024
202 DEFAULT_TOP_PAD 0
203 DEFAULT_MMAP_THRESHOLD 128 * 1024
204 DEFAULT_MMAP_MAX 65536
205
206 There are several other #defined constants and macros that you
207 probably don't want to touch unless you are extending or adapting malloc. */
208
209 /*
210 void* is the pointer type that malloc should say it returns
211 */
212
213 #ifndef void
214 #define void void
215 #endif /*void*/
216
217 #include <stddef.h> /* for size_t */
218 #include <stdlib.h> /* for getenv(), abort() */
219
220 #include <malloc-machine.h>
221
222 #include <atomic.h>
223 #include <stdio-common/_itoa.h>
224 #include <bits/wordsize.h>
225 #include <sys/sysinfo.h>
226
227 #include <ldsodefs.h>
228
229 #ifdef __cplusplus
230 extern "C" {
231 #endif
232
233 #include <unistd.h>
234 #include <stdio.h> /* needed for malloc_stats */
235 #include <errno.h>
236
237 /* For uintptr_t. */
238 #include <stdint.h>
239
240 /* For va_arg, va_start, va_end. */
241 #include <stdarg.h>
242
243 /* For writev and struct iovec. */
244 #include <sys/uio.h>
245 /* For syslog. */
246 #include <sys/syslog.h>
247
248 /* For various dynamic linking things. */
249 #include <dlfcn.h>
250
251
252 /*
253 Debugging:
254
255 Because freed chunks may be overwritten with bookkeeping fields, this
256 malloc will often die when freed memory is overwritten by user
257 programs. This can be very effective (albeit in an annoying way)
258 in helping track down dangling pointers.
259
260 If you compile with -DMALLOC_DEBUG, a number of assertion checks are
261 enabled that will catch more memory errors. You probably won't be
262 able to make much sense of the actual assertion errors, but they
263 should help you locate incorrectly overwritten memory. The checking
264 is fairly extensive, and will slow down execution
265 noticeably. Calling malloc_stats or mallinfo with MALLOC_DEBUG set
266 will attempt to check every non-mmapped allocated and free chunk in
267 the course of computing the summmaries. (By nature, mmapped regions
268 cannot be checked very much automatically.)
269
270 Setting MALLOC_DEBUG may also be helpful if you are trying to modify
271 this code. The assertions in the check routines spell out in more
272 detail the assumptions and invariants underlying the algorithms.
273
274 Setting MALLOC_DEBUG does NOT provide an automated mechanism for
275 checking that all accesses to malloced memory stay within their
276 bounds. However, there are several add-ons and adaptations of this
277 or other mallocs available that do this.
278 */
279
280 #ifdef NDEBUG
281 # define assert(expr) ((void) 0)
282 #else
283 # define assert(expr) \
284 ((expr) \
285 ? ((void) 0) \
286 : __malloc_assert (__STRING (expr), __FILE__, __LINE__, __func__))
287
288 extern const char *__progname;
289
290 static void
291 __malloc_assert (const char *assertion, const char *file, unsigned int line,
292 const char *function)
293 {
294 (void) __fxprintf (NULL, "%s%s%s:%u: %s%sAssertion `%s' failed.\n",
295 __progname, __progname[0] ? ": " : "",
296 file, line,
297 function ? function : "", function ? ": " : "",
298 assertion);
299 fflush (stderr);
300 abort ();
301 }
302 #endif
303
304
305 /*
306 INTERNAL_SIZE_T is the word-size used for internal bookkeeping
307 of chunk sizes.
308
309 The default version is the same as size_t.
310
311 While not strictly necessary, it is best to define this as an
312 unsigned type, even if size_t is a signed type. This may avoid some
313 artificial size limitations on some systems.
314
315 On a 64-bit machine, you may be able to reduce malloc overhead by
316 defining INTERNAL_SIZE_T to be a 32 bit `unsigned int' at the
317 expense of not being able to handle more than 2^32 of malloced
318 space. If this limitation is acceptable, you are encouraged to set
319 this unless you are on a platform requiring 16byte alignments. In
320 this case the alignment requirements turn out to negate any
321 potential advantages of decreasing size_t word size.
322
323 Implementors: Beware of the possible combinations of:
324 - INTERNAL_SIZE_T might be signed or unsigned, might be 32 or 64 bits,
325 and might be the same width as int or as long
326 - size_t might have different width and signedness as INTERNAL_SIZE_T
327 - int and long might be 32 or 64 bits, and might be the same width
328 To deal with this, most comparisons and difference computations
329 among INTERNAL_SIZE_Ts should cast them to unsigned long, being
330 aware of the fact that casting an unsigned int to a wider long does
331 not sign-extend. (This also makes checking for negative numbers
332 awkward.) Some of these casts result in harmless compiler warnings
333 on some systems.
334 */
335
336 #ifndef INTERNAL_SIZE_T
337 #define INTERNAL_SIZE_T size_t
338 #endif
339
340 /* The corresponding word size */
341 #define SIZE_SZ (sizeof(INTERNAL_SIZE_T))
342
343
344 /*
345 MALLOC_ALIGNMENT is the minimum alignment for malloc'ed chunks.
346 It must be a power of two at least 2 * SIZE_SZ, even on machines
347 for which smaller alignments would suffice. It may be defined as
348 larger than this though. Note however that code and data structures
349 are optimized for the case of 8-byte alignment.
350 */
351
352
353 #ifndef MALLOC_ALIGNMENT
354 /* XXX This is the correct definition. It differs from 2*SIZE_SZ only on
355 powerpc32. For the time being, changing this is causing more
356 compatibility problems due to malloc_get_state/malloc_set_state than
357 will returning blocks not adequately aligned for long double objects
358 under -mlong-double-128.
359
360 #define MALLOC_ALIGNMENT (2 * SIZE_SZ < __alignof__ (long double) \
361 ? __alignof__ (long double) : 2 * SIZE_SZ)
362 */
363 #define MALLOC_ALIGNMENT (2 * SIZE_SZ)
364 #endif
365
366 /* The corresponding bit mask value */
367 #define MALLOC_ALIGN_MASK (MALLOC_ALIGNMENT - 1)
368
369
370
371 /*
372 REALLOC_ZERO_BYTES_FREES should be set if a call to
373 realloc with zero bytes should be the same as a call to free.
374 This is required by the C standard. Otherwise, since this malloc
375 returns a unique pointer for malloc(0), so does realloc(p, 0).
376 */
377
378 #ifndef REALLOC_ZERO_BYTES_FREES
379 #define REALLOC_ZERO_BYTES_FREES 1
380 #endif
381
382 /*
383 TRIM_FASTBINS controls whether free() of a very small chunk can
384 immediately lead to trimming. Setting to true (1) can reduce memory
385 footprint, but will almost always slow down programs that use a lot
386 of small chunks.
387
388 Define this only if you are willing to give up some speed to more
389 aggressively reduce system-level memory footprint when releasing
390 memory in programs that use many small chunks. You can get
391 essentially the same effect by setting MXFAST to 0, but this can
392 lead to even greater slowdowns in programs using many small chunks.
393 TRIM_FASTBINS is an in-between compile-time option, that disables
394 only those chunks bordering topmost memory from being placed in
395 fastbins.
396 */
397
398 #ifndef TRIM_FASTBINS
399 #define TRIM_FASTBINS 0
400 #endif
401
402
403 /*
404 USE_DL_PREFIX will prefix all public routines with the string 'dl'.
405 This is necessary when you only want to use this malloc in one part
406 of a program, using your regular system malloc elsewhere.
407 */
408
409 /* #define USE_DL_PREFIX */
410
411
412 /*
413 Two-phase name translation.
414 All of the actual routines are given mangled names.
415 When wrappers are used, they become the public callable versions.
416 When DL_PREFIX is used, the callable names are prefixed.
417 */
418
419 #ifdef USE_DL_PREFIX
420 #define public_cALLOc dlcalloc
421 #define public_fREe dlfree
422 #define public_cFREe dlcfree
423 #define public_mALLOc dlmalloc
424 #define public_mEMALIGn dlmemalign
425 #define public_rEALLOc dlrealloc
426 #define public_vALLOc dlvalloc
427 #define public_pVALLOc dlpvalloc
428 #define public_mALLINFo dlmallinfo
429 #define public_mALLOPt dlmallopt
430 #define public_mTRIm dlmalloc_trim
431 #define public_mSTATs dlmalloc_stats
432 #define public_mUSABLe dlmalloc_usable_size
433 #define public_iCALLOc dlindependent_calloc
434 #define public_iCOMALLOc dlindependent_comalloc
435 #define public_gET_STATe dlget_state
436 #define public_sET_STATe dlset_state
437 #else /* USE_DL_PREFIX */
438
439 /* Special defines for the GNU C library. */
440 #define public_cALLOc __libc_calloc
441 #define public_fREe __libc_free
442 #define public_cFREe __libc_cfree
443 #define public_mALLOc __libc_malloc
444 #define public_mEMALIGn __libc_memalign
445 #define public_rEALLOc __libc_realloc
446 #define public_vALLOc __libc_valloc
447 #define public_pVALLOc __libc_pvalloc
448 #define public_mALLINFo __libc_mallinfo
449 #define public_mALLOPt __libc_mallopt
450 #define public_mTRIm __malloc_trim
451 #define public_mSTATs __malloc_stats
452 #define public_mUSABLe __malloc_usable_size
453 #define public_iCALLOc __libc_independent_calloc
454 #define public_iCOMALLOc __libc_independent_comalloc
455 #define public_gET_STATe __malloc_get_state
456 #define public_sET_STATe __malloc_set_state
457 #define open __open
458 #define mmap __mmap
459 #define munmap __munmap
460 #define mremap __mremap
461 #define mprotect __mprotect
462 #define MORECORE (*__morecore)
463 #define MORECORE_FAILURE 0
464
465 void * __default_morecore (ptrdiff_t);
466 void *(*__morecore)(ptrdiff_t) = __default_morecore;
467
468 #endif /* USE_DL_PREFIX */
469
470
471 #include <string.h>
472
473
474 /* Force a value to be in a register and stop the compiler referring
475 to the source (mostly memory location) again. */
476 #define force_reg(val) \
477 ({ __typeof (val) _v; asm ("" : "=r" (_v) : "0" (val)); _v; })
478
479
480 /*
481 MORECORE-related declarations. By default, rely on sbrk
482 */
483
484
485 /*
486 MORECORE is the name of the routine to call to obtain more memory
487 from the system. See below for general guidance on writing
488 alternative MORECORE functions, as well as a version for WIN32 and a
489 sample version for pre-OSX macos.
490 */
491
492 #ifndef MORECORE
493 #define MORECORE sbrk
494 #endif
495
496 /*
497 MORECORE_FAILURE is the value returned upon failure of MORECORE
498 as well as mmap. Since it cannot be an otherwise valid memory address,
499 and must reflect values of standard sys calls, you probably ought not
500 try to redefine it.
501 */
502
503 #ifndef MORECORE_FAILURE
504 #define MORECORE_FAILURE (-1)
505 #endif
506
507 /*
508 If MORECORE_CONTIGUOUS is true, take advantage of fact that
509 consecutive calls to MORECORE with positive arguments always return
510 contiguous increasing addresses. This is true of unix sbrk. Even
511 if not defined, when regions happen to be contiguous, malloc will
512 permit allocations spanning regions obtained from different
513 calls. But defining this when applicable enables some stronger
514 consistency checks and space efficiencies.
515 */
516
517 #ifndef MORECORE_CONTIGUOUS
518 #define MORECORE_CONTIGUOUS 1
519 #endif
520
521 /*
522 Define MORECORE_CANNOT_TRIM if your version of MORECORE
523 cannot release space back to the system when given negative
524 arguments. This is generally necessary only if you are using
525 a hand-crafted MORECORE function that cannot handle negative arguments.
526 */
527
528 /* #define MORECORE_CANNOT_TRIM */
529
530 /* MORECORE_CLEARS (default 1)
531 The degree to which the routine mapped to MORECORE zeroes out
532 memory: never (0), only for newly allocated space (1) or always
533 (2). The distinction between (1) and (2) is necessary because on
534 some systems, if the application first decrements and then
535 increments the break value, the contents of the reallocated space
536 are unspecified.
537 */
538
539 #ifndef MORECORE_CLEARS
540 #define MORECORE_CLEARS 1
541 #endif
542
543
544 /*
545 MMAP_AS_MORECORE_SIZE is the minimum mmap size argument to use if
546 sbrk fails, and mmap is used as a backup. The value must be a
547 multiple of page size. This backup strategy generally applies only
548 when systems have "holes" in address space, so sbrk cannot perform
549 contiguous expansion, but there is still space available on system.
550 On systems for which this is known to be useful (i.e. most linux
551 kernels), this occurs only when programs allocate huge amounts of
552 memory. Between this, and the fact that mmap regions tend to be
553 limited, the size should be large, to avoid too many mmap calls and
554 thus avoid running out of kernel resources. */
555
556 #ifndef MMAP_AS_MORECORE_SIZE
557 #define MMAP_AS_MORECORE_SIZE (1024 * 1024)
558 #endif
559
560 /*
561 Define HAVE_MREMAP to make realloc() use mremap() to re-allocate
562 large blocks. This is currently only possible on Linux with
563 kernel versions newer than 1.3.77.
564 */
565
566 #ifndef HAVE_MREMAP
567 #ifdef linux
568 #define HAVE_MREMAP 1
569 #else
570 #define HAVE_MREMAP 0
571 #endif
572
573 #endif /* HAVE_MREMAP */
574
575
576 /*
577 This version of malloc supports the standard SVID/XPG mallinfo
578 routine that returns a struct containing usage properties and
579 statistics. It should work on any SVID/XPG compliant system that has
580 a /usr/include/malloc.h defining struct mallinfo. (If you'd like to
581 install such a thing yourself, cut out the preliminary declarations
582 as described above and below and save them in a malloc.h file. But
583 there's no compelling reason to bother to do this.)
584
585 The main declaration needed is the mallinfo struct that is returned
586 (by-copy) by mallinfo(). The SVID/XPG malloinfo struct contains a
587 bunch of fields that are not even meaningful in this version of
588 malloc. These fields are are instead filled by mallinfo() with
589 other numbers that might be of interest.
590 */
591
592
593 /* ---------- description of public routines ------------ */
594
595 /*
596 malloc(size_t n)
597 Returns a pointer to a newly allocated chunk of at least n bytes, or null
598 if no space is available. Additionally, on failure, errno is
599 set to ENOMEM on ANSI C systems.
600
601 If n is zero, malloc returns a minumum-sized chunk. (The minimum
602 size is 16 bytes on most 32bit systems, and 24 or 32 bytes on 64bit
603 systems.) On most systems, size_t is an unsigned type, so calls
604 with negative arguments are interpreted as requests for huge amounts
605 of space, which will often fail. The maximum supported value of n
606 differs across systems, but is in all cases less than the maximum
607 representable value of a size_t.
608 */
609 void* public_mALLOc(size_t);
610 libc_hidden_proto (public_mALLOc)
611
612 /*
613 free(void* p)
614 Releases the chunk of memory pointed to by p, that had been previously
615 allocated using malloc or a related routine such as realloc.
616 It has no effect if p is null. It can have arbitrary (i.e., bad!)
617 effects if p has already been freed.
618
619 Unless disabled (using mallopt), freeing very large spaces will
620 when possible, automatically trigger operations that give
621 back unused memory to the system, thus reducing program footprint.
622 */
623 void public_fREe(void*);
624 libc_hidden_proto (public_fREe)
625
626 /*
627 calloc(size_t n_elements, size_t element_size);
628 Returns a pointer to n_elements * element_size bytes, with all locations
629 set to zero.
630 */
631 void* public_cALLOc(size_t, size_t);
632
633 /*
634 realloc(void* p, size_t n)
635 Returns a pointer to a chunk of size n that contains the same data
636 as does chunk p up to the minimum of (n, p's size) bytes, or null
637 if no space is available.
638
639 The returned pointer may or may not be the same as p. The algorithm
640 prefers extending p when possible, otherwise it employs the
641 equivalent of a malloc-copy-free sequence.
642
643 If p is null, realloc is equivalent to malloc.
644
645 If space is not available, realloc returns null, errno is set (if on
646 ANSI) and p is NOT freed.
647
648 if n is for fewer bytes than already held by p, the newly unused
649 space is lopped off and freed if possible. Unless the #define
650 REALLOC_ZERO_BYTES_FREES is set, realloc with a size argument of
651 zero (re)allocates a minimum-sized chunk.
652
653 Large chunks that were internally obtained via mmap will always
654 be reallocated using malloc-copy-free sequences unless
655 the system supports MREMAP (currently only linux).
656
657 The old unix realloc convention of allowing the last-free'd chunk
658 to be used as an argument to realloc is not supported.
659 */
660 void* public_rEALLOc(void*, size_t);
661 libc_hidden_proto (public_rEALLOc)
662
663 /*
664 memalign(size_t alignment, size_t n);
665 Returns a pointer to a newly allocated chunk of n bytes, aligned
666 in accord with the alignment argument.
667
668 The alignment argument should be a power of two. If the argument is
669 not a power of two, the nearest greater power is used.
670 8-byte alignment is guaranteed by normal malloc calls, so don't
671 bother calling memalign with an argument of 8 or less.
672
673 Overreliance on memalign is a sure way to fragment space.
674 */
675 void* public_mEMALIGn(size_t, size_t);
676 libc_hidden_proto (public_mEMALIGn)
677
678 /*
679 valloc(size_t n);
680 Equivalent to memalign(pagesize, n), where pagesize is the page
681 size of the system. If the pagesize is unknown, 4096 is used.
682 */
683 void* public_vALLOc(size_t);
684
685
686
687 /*
688 mallopt(int parameter_number, int parameter_value)
689 Sets tunable parameters The format is to provide a
690 (parameter-number, parameter-value) pair. mallopt then sets the
691 corresponding parameter to the argument value if it can (i.e., so
692 long as the value is meaningful), and returns 1 if successful else
693 0. SVID/XPG/ANSI defines four standard param numbers for mallopt,
694 normally defined in malloc.h. Only one of these (M_MXFAST) is used
695 in this malloc. The others (M_NLBLKS, M_GRAIN, M_KEEP) don't apply,
696 so setting them has no effect. But this malloc also supports four
697 other options in mallopt. See below for details. Briefly, supported
698 parameters are as follows (listed defaults are for "typical"
699 configurations).
700
701 Symbol param # default allowed param values
702 M_MXFAST 1 64 0-80 (0 disables fastbins)
703 M_TRIM_THRESHOLD -1 128*1024 any (-1U disables trimming)
704 M_TOP_PAD -2 0 any
705 M_MMAP_THRESHOLD -3 128*1024 any (or 0 if no MMAP support)
706 M_MMAP_MAX -4 65536 any (0 disables use of mmap)
707 */
708 int public_mALLOPt(int, int);
709
710
711 /*
712 mallinfo()
713 Returns (by copy) a struct containing various summary statistics:
714
715 arena: current total non-mmapped bytes allocated from system
716 ordblks: the number of free chunks
717 smblks: the number of fastbin blocks (i.e., small chunks that
718 have been freed but not use resused or consolidated)
719 hblks: current number of mmapped regions
720 hblkhd: total bytes held in mmapped regions
721 usmblks: the maximum total allocated space. This will be greater
722 than current total if trimming has occurred.
723 fsmblks: total bytes held in fastbin blocks
724 uordblks: current total allocated space (normal or mmapped)
725 fordblks: total free space
726 keepcost: the maximum number of bytes that could ideally be released
727 back to system via malloc_trim. ("ideally" means that
728 it ignores page restrictions etc.)
729
730 Because these fields are ints, but internal bookkeeping may
731 be kept as longs, the reported values may wrap around zero and
732 thus be inaccurate.
733 */
734 struct mallinfo public_mALLINFo(void);
735
736
737 /*
738 pvalloc(size_t n);
739 Equivalent to valloc(minimum-page-that-holds(n)), that is,
740 round up n to nearest pagesize.
741 */
742 void* public_pVALLOc(size_t);
743
744 /*
745 cfree(void* p);
746 Equivalent to free(p).
747
748 cfree is needed/defined on some systems that pair it with calloc,
749 for odd historical reasons (such as: cfree is used in example
750 code in the first edition of K&R).
751 */
752 void public_cFREe(void*);
753
754 /*
755 malloc_trim(size_t pad);
756
757 If possible, gives memory back to the system (via negative
758 arguments to sbrk) if there is unused memory at the `high' end of
759 the malloc pool. You can call this after freeing large blocks of
760 memory to potentially reduce the system-level memory requirements
761 of a program. However, it cannot guarantee to reduce memory. Under
762 some allocation patterns, some large free blocks of memory will be
763 locked between two used chunks, so they cannot be given back to
764 the system.
765
766 The `pad' argument to malloc_trim represents the amount of free
767 trailing space to leave untrimmed. If this argument is zero,
768 only the minimum amount of memory to maintain internal data
769 structures will be left (one page or less). Non-zero arguments
770 can be supplied to maintain enough trailing space to service
771 future expected allocations without having to re-obtain memory
772 from the system.
773
774 Malloc_trim returns 1 if it actually released any memory, else 0.
775 On systems that do not support "negative sbrks", it will always
776 return 0.
777 */
778 int public_mTRIm(size_t);
779
780 /*
781 malloc_usable_size(void* p);
782
783 Returns the number of bytes you can actually use in
784 an allocated chunk, which may be more than you requested (although
785 often not) due to alignment and minimum size constraints.
786 You can use this many bytes without worrying about
787 overwriting other allocated objects. This is not a particularly great
788 programming practice. malloc_usable_size can be more useful in
789 debugging and assertions, for example:
790
791 p = malloc(n);
792 assert(malloc_usable_size(p) >= 256);
793
794 */
795 size_t public_mUSABLe(void*);
796
797 /*
798 malloc_stats();
799 Prints on stderr the amount of space obtained from the system (both
800 via sbrk and mmap), the maximum amount (which may be more than
801 current if malloc_trim and/or munmap got called), and the current
802 number of bytes allocated via malloc (or realloc, etc) but not yet
803 freed. Note that this is the number of bytes allocated, not the
804 number requested. It will be larger than the number requested
805 because of alignment and bookkeeping overhead. Because it includes
806 alignment wastage as being in use, this figure may be greater than
807 zero even when no user-level chunks are allocated.
808
809 The reported current and maximum system memory can be inaccurate if
810 a program makes other calls to system memory allocation functions
811 (normally sbrk) outside of malloc.
812
813 malloc_stats prints only the most commonly interesting statistics.
814 More information can be obtained by calling mallinfo.
815
816 */
817 void public_mSTATs(void);
818
819 /*
820 malloc_get_state(void);
821
822 Returns the state of all malloc variables in an opaque data
823 structure.
824 */
825 void* public_gET_STATe(void);
826
827 /*
828 malloc_set_state(void* state);
829
830 Restore the state of all malloc variables from data obtained with
831 malloc_get_state().
832 */
833 int public_sET_STATe(void*);
834
835 /*
836 posix_memalign(void **memptr, size_t alignment, size_t size);
837
838 POSIX wrapper like memalign(), checking for validity of size.
839 */
840 int __posix_memalign(void **, size_t, size_t);
841
842 /* mallopt tuning options */
843
844 /*
845 M_MXFAST is the maximum request size used for "fastbins", special bins
846 that hold returned chunks without consolidating their spaces. This
847 enables future requests for chunks of the same size to be handled
848 very quickly, but can increase fragmentation, and thus increase the
849 overall memory footprint of a program.
850
851 This malloc manages fastbins very conservatively yet still
852 efficiently, so fragmentation is rarely a problem for values less
853 than or equal to the default. The maximum supported value of MXFAST
854 is 80. You wouldn't want it any higher than this anyway. Fastbins
855 are designed especially for use with many small structs, objects or
856 strings -- the default handles structs/objects/arrays with sizes up
857 to 8 4byte fields, or small strings representing words, tokens,
858 etc. Using fastbins for larger objects normally worsens
859 fragmentation without improving speed.
860
861 M_MXFAST is set in REQUEST size units. It is internally used in
862 chunksize units, which adds padding and alignment. You can reduce
863 M_MXFAST to 0 to disable all use of fastbins. This causes the malloc
864 algorithm to be a closer approximation of fifo-best-fit in all cases,
865 not just for larger requests, but will generally cause it to be
866 slower.
867 */
868
869
870 /* M_MXFAST is a standard SVID/XPG tuning option, usually listed in malloc.h */
871 #ifndef M_MXFAST
872 #define M_MXFAST 1
873 #endif
874
875 #ifndef DEFAULT_MXFAST
876 #define DEFAULT_MXFAST (64 * SIZE_SZ / 4)
877 #endif
878
879
880 /*
881 M_TRIM_THRESHOLD is the maximum amount of unused top-most memory
882 to keep before releasing via malloc_trim in free().
883
884 Automatic trimming is mainly useful in long-lived programs.
885 Because trimming via sbrk can be slow on some systems, and can
886 sometimes be wasteful (in cases where programs immediately
887 afterward allocate more large chunks) the value should be high
888 enough so that your overall system performance would improve by
889 releasing this much memory.
890
891 The trim threshold and the mmap control parameters (see below)
892 can be traded off with one another. Trimming and mmapping are
893 two different ways of releasing unused memory back to the
894 system. Between these two, it is often possible to keep
895 system-level demands of a long-lived program down to a bare
896 minimum. For example, in one test suite of sessions measuring
897 the XF86 X server on Linux, using a trim threshold of 128K and a
898 mmap threshold of 192K led to near-minimal long term resource
899 consumption.
900
901 If you are using this malloc in a long-lived program, it should
902 pay to experiment with these values. As a rough guide, you
903 might set to a value close to the average size of a process
904 (program) running on your system. Releasing this much memory
905 would allow such a process to run in memory. Generally, it's
906 worth it to tune for trimming rather tham memory mapping when a
907 program undergoes phases where several large chunks are
908 allocated and released in ways that can reuse each other's
909 storage, perhaps mixed with phases where there are no such
910 chunks at all. And in well-behaved long-lived programs,
911 controlling release of large blocks via trimming versus mapping
912 is usually faster.
913
914 However, in most programs, these parameters serve mainly as
915 protection against the system-level effects of carrying around
916 massive amounts of unneeded memory. Since frequent calls to
917 sbrk, mmap, and munmap otherwise degrade performance, the default
918 parameters are set to relatively high values that serve only as
919 safeguards.
920
921 The trim value It must be greater than page size to have any useful
922 effect. To disable trimming completely, you can set to
923 (unsigned long)(-1)
924
925 Trim settings interact with fastbin (MXFAST) settings: Unless
926 TRIM_FASTBINS is defined, automatic trimming never takes place upon
927 freeing a chunk with size less than or equal to MXFAST. Trimming is
928 instead delayed until subsequent freeing of larger chunks. However,
929 you can still force an attempted trim by calling malloc_trim.
930
931 Also, trimming is not generally possible in cases where
932 the main arena is obtained via mmap.
933
934 Note that the trick some people use of mallocing a huge space and
935 then freeing it at program startup, in an attempt to reserve system
936 memory, doesn't have the intended effect under automatic trimming,
937 since that memory will immediately be returned to the system.
938 */
939
940 #define M_TRIM_THRESHOLD -1
941
942 #ifndef DEFAULT_TRIM_THRESHOLD
943 #define DEFAULT_TRIM_THRESHOLD (128 * 1024)
944 #endif
945
946 /*
947 M_TOP_PAD is the amount of extra `padding' space to allocate or
948 retain whenever sbrk is called. It is used in two ways internally:
949
950 * When sbrk is called to extend the top of the arena to satisfy
951 a new malloc request, this much padding is added to the sbrk
952 request.
953
954 * When malloc_trim is called automatically from free(),
955 it is used as the `pad' argument.
956
957 In both cases, the actual amount of padding is rounded
958 so that the end of the arena is always a system page boundary.
959
960 The main reason for using padding is to avoid calling sbrk so
961 often. Having even a small pad greatly reduces the likelihood
962 that nearly every malloc request during program start-up (or
963 after trimming) will invoke sbrk, which needlessly wastes
964 time.
965
966 Automatic rounding-up to page-size units is normally sufficient
967 to avoid measurable overhead, so the default is 0. However, in
968 systems where sbrk is relatively slow, it can pay to increase
969 this value, at the expense of carrying around more memory than
970 the program needs.
971 */
972
973 #define M_TOP_PAD -2
974
975 #ifndef DEFAULT_TOP_PAD
976 #define DEFAULT_TOP_PAD (0)
977 #endif
978
979 /*
980 MMAP_THRESHOLD_MAX and _MIN are the bounds on the dynamically
981 adjusted MMAP_THRESHOLD.
982 */
983
984 #ifndef DEFAULT_MMAP_THRESHOLD_MIN
985 #define DEFAULT_MMAP_THRESHOLD_MIN (128 * 1024)
986 #endif
987
988 #ifndef DEFAULT_MMAP_THRESHOLD_MAX
989 /* For 32-bit platforms we cannot increase the maximum mmap
990 threshold much because it is also the minimum value for the
991 maximum heap size and its alignment. Going above 512k (i.e., 1M
992 for new heaps) wastes too much address space. */
993 # if __WORDSIZE == 32
994 # define DEFAULT_MMAP_THRESHOLD_MAX (512 * 1024)
995 # else
996 # define DEFAULT_MMAP_THRESHOLD_MAX (4 * 1024 * 1024 * sizeof(long))
997 # endif
998 #endif
999
1000 /*
1001 M_MMAP_THRESHOLD is the request size threshold for using mmap()
1002 to service a request. Requests of at least this size that cannot
1003 be allocated using already-existing space will be serviced via mmap.
1004 (If enough normal freed space already exists it is used instead.)
1005
1006 Using mmap segregates relatively large chunks of memory so that
1007 they can be individually obtained and released from the host
1008 system. A request serviced through mmap is never reused by any
1009 other request (at least not directly; the system may just so
1010 happen to remap successive requests to the same locations).
1011
1012 Segregating space in this way has the benefits that:
1013
1014 1. Mmapped space can ALWAYS be individually released back
1015 to the system, which helps keep the system level memory
1016 demands of a long-lived program low.
1017 2. Mapped memory can never become `locked' between
1018 other chunks, as can happen with normally allocated chunks, which
1019 means that even trimming via malloc_trim would not release them.
1020 3. On some systems with "holes" in address spaces, mmap can obtain
1021 memory that sbrk cannot.
1022
1023 However, it has the disadvantages that:
1024
1025 1. The space cannot be reclaimed, consolidated, and then
1026 used to service later requests, as happens with normal chunks.
1027 2. It can lead to more wastage because of mmap page alignment
1028 requirements
1029 3. It causes malloc performance to be more dependent on host
1030 system memory management support routines which may vary in
1031 implementation quality and may impose arbitrary
1032 limitations. Generally, servicing a request via normal
1033 malloc steps is faster than going through a system's mmap.
1034
1035 The advantages of mmap nearly always outweigh disadvantages for
1036 "large" chunks, but the value of "large" varies across systems. The
1037 default is an empirically derived value that works well in most
1038 systems.
1039
1040
1041 Update in 2006:
1042 The above was written in 2001. Since then the world has changed a lot.
1043 Memory got bigger. Applications got bigger. The virtual address space
1044 layout in 32 bit linux changed.
1045
1046 In the new situation, brk() and mmap space is shared and there are no
1047 artificial limits on brk size imposed by the kernel. What is more,
1048 applications have started using transient allocations larger than the
1049 128Kb as was imagined in 2001.
1050
1051 The price for mmap is also high now; each time glibc mmaps from the
1052 kernel, the kernel is forced to zero out the memory it gives to the
1053 application. Zeroing memory is expensive and eats a lot of cache and
1054 memory bandwidth. This has nothing to do with the efficiency of the
1055 virtual memory system, by doing mmap the kernel just has no choice but
1056 to zero.
1057
1058 In 2001, the kernel had a maximum size for brk() which was about 800
1059 megabytes on 32 bit x86, at that point brk() would hit the first
1060 mmaped shared libaries and couldn't expand anymore. With current 2.6
1061 kernels, the VA space layout is different and brk() and mmap
1062 both can span the entire heap at will.
1063
1064 Rather than using a static threshold for the brk/mmap tradeoff,
1065 we are now using a simple dynamic one. The goal is still to avoid
1066 fragmentation. The old goals we kept are
1067 1) try to get the long lived large allocations to use mmap()
1068 2) really large allocations should always use mmap()
1069 and we're adding now:
1070 3) transient allocations should use brk() to avoid forcing the kernel
1071 having to zero memory over and over again
1072
1073 The implementation works with a sliding threshold, which is by default
1074 limited to go between 128Kb and 32Mb (64Mb for 64 bitmachines) and starts
1075 out at 128Kb as per the 2001 default.
1076
1077 This allows us to satisfy requirement 1) under the assumption that long
1078 lived allocations are made early in the process' lifespan, before it has
1079 started doing dynamic allocations of the same size (which will
1080 increase the threshold).
1081
1082 The upperbound on the threshold satisfies requirement 2)
1083
1084 The threshold goes up in value when the application frees memory that was
1085 allocated with the mmap allocator. The idea is that once the application
1086 starts freeing memory of a certain size, it's highly probable that this is
1087 a size the application uses for transient allocations. This estimator
1088 is there to satisfy the new third requirement.
1089
1090 */
1091
1092 #define M_MMAP_THRESHOLD -3
1093
1094 #ifndef DEFAULT_MMAP_THRESHOLD
1095 #define DEFAULT_MMAP_THRESHOLD DEFAULT_MMAP_THRESHOLD_MIN
1096 #endif
1097
1098 /*
1099 M_MMAP_MAX is the maximum number of requests to simultaneously
1100 service using mmap. This parameter exists because
1101 some systems have a limited number of internal tables for
1102 use by mmap, and using more than a few of them may degrade
1103 performance.
1104
1105 The default is set to a value that serves only as a safeguard.
1106 Setting to 0 disables use of mmap for servicing large requests.
1107 */
1108
1109 #define M_MMAP_MAX -4
1110
1111 #ifndef DEFAULT_MMAP_MAX
1112 #define DEFAULT_MMAP_MAX (65536)
1113 #endif
1114
1115 #ifdef __cplusplus
1116 } /* end of extern "C" */
1117 #endif
1118
1119 #include <malloc.h>
1120
1121 #ifndef BOUNDED_N
1122 #define BOUNDED_N(ptr, sz) (ptr)
1123 #endif
1124 #ifndef RETURN_ADDRESS
1125 #define RETURN_ADDRESS(X_) (NULL)
1126 #endif
1127
1128 /* On some platforms we can compile internal, not exported functions better.
1129 Let the environment provide a macro and define it to be empty if it
1130 is not available. */
1131 #ifndef internal_function
1132 # define internal_function
1133 #endif
1134
1135 /* Forward declarations. */
1136 struct malloc_chunk;
1137 typedef struct malloc_chunk* mchunkptr;
1138
1139 /* Internal routines. */
1140
1141 static void* _int_malloc(mstate, size_t);
1142 static void _int_free(mstate, mchunkptr, int);
1143 static void* _int_realloc(mstate, mchunkptr, INTERNAL_SIZE_T,
1144 INTERNAL_SIZE_T);
1145 static void* _int_memalign(mstate, size_t, size_t);
1146 static void* _int_valloc(mstate, size_t);
1147 static void* _int_pvalloc(mstate, size_t);
1148 /*static void* cALLOc(size_t, size_t);*/
1149 static int mTRIm(mstate, size_t);
1150 static size_t mUSABLe(void*);
1151 static void mSTATs(void);
1152 static int mALLOPt(int, int);
1153 static struct mallinfo mALLINFo(mstate);
1154 static void malloc_printerr(int action, const char *str, void *ptr);
1155
1156 static void* internal_function mem2mem_check(void *p, size_t sz);
1157 static int internal_function top_check(void);
1158 static void internal_function munmap_chunk(mchunkptr p);
1159 #if HAVE_MREMAP
1160 static mchunkptr internal_function mremap_chunk(mchunkptr p, size_t new_size);
1161 #endif
1162
1163 static void* malloc_check(size_t sz, const void *caller);
1164 static void free_check(void* mem, const void *caller);
1165 static void* realloc_check(void* oldmem, size_t bytes,
1166 const void *caller);
1167 static void* memalign_check(size_t alignment, size_t bytes,
1168 const void *caller);
1169 /* These routines are never needed in this configuration. */
1170 static void* malloc_atfork(size_t sz, const void *caller);
1171 static void free_atfork(void* mem, const void *caller);
1172
1173
1174 /* ------------- Optional versions of memcopy ---------------- */
1175
1176
1177 /*
1178 Note: memcpy is ONLY invoked with non-overlapping regions,
1179 so the (usually slower) memmove is not needed.
1180 */
1181
1182 #define MALLOC_COPY(dest, src, nbytes) memcpy(dest, src, nbytes)
1183 #define MALLOC_ZERO(dest, nbytes) memset(dest, 0, nbytes)
1184
1185
1186 /* ------------------ MMAP support ------------------ */
1187
1188
1189 #include <fcntl.h>
1190 #include <sys/mman.h>
1191
1192 #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
1193 # define MAP_ANONYMOUS MAP_ANON
1194 #endif
1195 #if !defined(MAP_FAILED)
1196 # define MAP_FAILED ((char*)-1)
1197 #endif
1198
1199 #ifndef MAP_NORESERVE
1200 # ifdef MAP_AUTORESRV
1201 # define MAP_NORESERVE MAP_AUTORESRV
1202 # else
1203 # define MAP_NORESERVE 0
1204 # endif
1205 #endif
1206
1207 /*
1208 Nearly all versions of mmap support MAP_ANONYMOUS,
1209 so the following is unlikely to be needed, but is
1210 supplied just in case.
1211 */
1212
1213 #ifndef MAP_ANONYMOUS
1214
1215 static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */
1216
1217 #define MMAP(addr, size, prot, flags) ((dev_zero_fd < 0) ? \
1218 (dev_zero_fd = open("/dev/zero", O_RDWR), \
1219 mmap((addr), (size), (prot), (flags), dev_zero_fd, 0)) : \
1220 mmap((addr), (size), (prot), (flags), dev_zero_fd, 0))
1221
1222 #else
1223
1224 #define MMAP(addr, size, prot, flags) \
1225 (mmap((addr), (size), (prot), (flags)|MAP_ANONYMOUS, -1, 0))
1226
1227 #endif
1228
1229
1230 /*
1231 ----------------------- Chunk representations -----------------------
1232 */
1233
1234
1235 /*
1236 This struct declaration is misleading (but accurate and necessary).
1237 It declares a "view" into memory allowing access to necessary
1238 fields at known offsets from a given base. See explanation below.
1239 */
1240
1241 struct malloc_chunk {
1242
1243 INTERNAL_SIZE_T prev_size; /* Size of previous chunk (if free). */
1244 INTERNAL_SIZE_T size; /* Size in bytes, including overhead. */
1245
1246 struct malloc_chunk* fd; /* double links -- used only if free. */
1247 struct malloc_chunk* bk;
1248
1249 /* Only used for large blocks: pointer to next larger size. */
1250 struct malloc_chunk* fd_nextsize; /* double links -- used only if free. */
1251 struct malloc_chunk* bk_nextsize;
1252 };
1253
1254
1255 /*
1256 malloc_chunk details:
1257
1258 (The following includes lightly edited explanations by Colin Plumb.)
1259
1260 Chunks of memory are maintained using a `boundary tag' method as
1261 described in e.g., Knuth or Standish. (See the paper by Paul
1262 Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a
1263 survey of such techniques.) Sizes of free chunks are stored both
1264 in the front of each chunk and at the end. This makes
1265 consolidating fragmented chunks into bigger chunks very fast. The
1266 size fields also hold bits representing whether chunks are free or
1267 in use.
1268
1269 An allocated chunk looks like this:
1270
1271
1272 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1273 | Size of previous chunk, if allocated | |
1274 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1275 | Size of chunk, in bytes |M|P|
1276 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1277 | User data starts here... .
1278 . .
1279 . (malloc_usable_size() bytes) .
1280 . |
1281 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1282 | Size of chunk |
1283 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1284
1285
1286 Where "chunk" is the front of the chunk for the purpose of most of
1287 the malloc code, but "mem" is the pointer that is returned to the
1288 user. "Nextchunk" is the beginning of the next contiguous chunk.
1289
1290 Chunks always begin on even word boundries, so the mem portion
1291 (which is returned to the user) is also on an even word boundary, and
1292 thus at least double-word aligned.
1293
1294 Free chunks are stored in circular doubly-linked lists, and look like this:
1295
1296 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1297 | Size of previous chunk |
1298 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1299 `head:' | Size of chunk, in bytes |P|
1300 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1301 | Forward pointer to next chunk in list |
1302 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1303 | Back pointer to previous chunk in list |
1304 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1305 | Unused space (may be 0 bytes long) .
1306 . .
1307 . |
1308 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1309 `foot:' | Size of chunk, in bytes |
1310 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1311
1312 The P (PREV_INUSE) bit, stored in the unused low-order bit of the
1313 chunk size (which is always a multiple of two words), is an in-use
1314 bit for the *previous* chunk. If that bit is *clear*, then the
1315 word before the current chunk size contains the previous chunk
1316 size, and can be used to find the front of the previous chunk.
1317 The very first chunk allocated always has this bit set,
1318 preventing access to non-existent (or non-owned) memory. If
1319 prev_inuse is set for any given chunk, then you CANNOT determine
1320 the size of the previous chunk, and might even get a memory
1321 addressing fault when trying to do so.
1322
1323 Note that the `foot' of the current chunk is actually represented
1324 as the prev_size of the NEXT chunk. This makes it easier to
1325 deal with alignments etc but can be very confusing when trying
1326 to extend or adapt this code.
1327
1328 The two exceptions to all this are
1329
1330 1. The special chunk `top' doesn't bother using the
1331 trailing size field since there is no next contiguous chunk
1332 that would have to index off it. After initialization, `top'
1333 is forced to always exist. If it would become less than
1334 MINSIZE bytes long, it is replenished.
1335
1336 2. Chunks allocated via mmap, which have the second-lowest-order
1337 bit M (IS_MMAPPED) set in their size fields. Because they are
1338 allocated one-by-one, each must contain its own trailing size field.
1339
1340 */
1341
1342 /*
1343 ---------- Size and alignment checks and conversions ----------
1344 */
1345
1346 /* conversion from malloc headers to user pointers, and back */
1347
1348 #define chunk2mem(p) ((void*)((char*)(p) + 2*SIZE_SZ))
1349 #define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ))
1350
1351 /* The smallest possible chunk */
1352 #define MIN_CHUNK_SIZE (offsetof(struct malloc_chunk, fd_nextsize))
1353
1354 /* The smallest size we can malloc is an aligned minimal chunk */
1355
1356 #define MINSIZE \
1357 (unsigned long)(((MIN_CHUNK_SIZE+MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK))
1358
1359 /* Check if m has acceptable alignment */
1360
1361 #define aligned_OK(m) (((unsigned long)(m) & MALLOC_ALIGN_MASK) == 0)
1362
1363 #define misaligned_chunk(p) \
1364 ((uintptr_t)(MALLOC_ALIGNMENT == 2 * SIZE_SZ ? (p) : chunk2mem (p)) \
1365 & MALLOC_ALIGN_MASK)
1366
1367
1368 /*
1369 Check if a request is so large that it would wrap around zero when
1370 padded and aligned. To simplify some other code, the bound is made
1371 low enough so that adding MINSIZE will also not wrap around zero.
1372 */
1373
1374 #define REQUEST_OUT_OF_RANGE(req) \
1375 ((unsigned long)(req) >= \
1376 (unsigned long)(INTERNAL_SIZE_T)(-2 * MINSIZE))
1377
1378 /* pad request bytes into a usable size -- internal version */
1379
1380 #define request2size(req) \
1381 (((req) + SIZE_SZ + MALLOC_ALIGN_MASK < MINSIZE) ? \
1382 MINSIZE : \
1383 ((req) + SIZE_SZ + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)
1384
1385 /* Same, except also perform argument check */
1386
1387 #define checked_request2size(req, sz) \
1388 if (REQUEST_OUT_OF_RANGE(req)) { \
1389 __set_errno (ENOMEM); \
1390 return 0; \
1391 } \
1392 (sz) = request2size(req);
1393
1394 /*
1395 --------------- Physical chunk operations ---------------
1396 */
1397
1398
1399 /* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
1400 #define PREV_INUSE 0x1
1401
1402 /* extract inuse bit of previous chunk */
1403 #define prev_inuse(p) ((p)->size & PREV_INUSE)
1404
1405
1406 /* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
1407 #define IS_MMAPPED 0x2
1408
1409 /* check for mmap()'ed chunk */
1410 #define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED)
1411
1412
1413 /* size field is or'ed with NON_MAIN_ARENA if the chunk was obtained
1414 from a non-main arena. This is only set immediately before handing
1415 the chunk to the user, if necessary. */
1416 #define NON_MAIN_ARENA 0x4
1417
1418 /* check for chunk from non-main arena */
1419 #define chunk_non_main_arena(p) ((p)->size & NON_MAIN_ARENA)
1420
1421
1422 /*
1423 Bits to mask off when extracting size
1424
1425 Note: IS_MMAPPED is intentionally not masked off from size field in
1426 macros for which mmapped chunks should never be seen. This should
1427 cause helpful core dumps to occur if it is tried by accident by
1428 people extending or adapting this malloc.
1429 */
1430 #define SIZE_BITS (PREV_INUSE|IS_MMAPPED|NON_MAIN_ARENA)
1431
1432 /* Get size, ignoring use bits */
1433 #define chunksize(p) ((p)->size & ~(SIZE_BITS))
1434
1435
1436 /* Ptr to next physical malloc_chunk. */
1437 #define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & ~SIZE_BITS) ))
1438
1439 /* Ptr to previous physical malloc_chunk */
1440 #define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_size) ))
1441
1442 /* Treat space at ptr + offset as a chunk */
1443 #define chunk_at_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
1444
1445 /* extract p's inuse bit */
1446 #define inuse(p)\
1447 ((((mchunkptr)(((char*)(p))+((p)->size & ~SIZE_BITS)))->size) & PREV_INUSE)
1448
1449 /* set/clear chunk as being inuse without otherwise disturbing */
1450 #define set_inuse(p)\
1451 ((mchunkptr)(((char*)(p)) + ((p)->size & ~SIZE_BITS)))->size |= PREV_INUSE
1452
1453 #define clear_inuse(p)\
1454 ((mchunkptr)(((char*)(p)) + ((p)->size & ~SIZE_BITS)))->size &= ~(PREV_INUSE)
1455
1456
1457 /* check/set/clear inuse bits in known places */
1458 #define inuse_bit_at_offset(p, s)\
1459 (((mchunkptr)(((char*)(p)) + (s)))->size & PREV_INUSE)
1460
1461 #define set_inuse_bit_at_offset(p, s)\
1462 (((mchunkptr)(((char*)(p)) + (s)))->size |= PREV_INUSE)
1463
1464 #define clear_inuse_bit_at_offset(p, s)\
1465 (((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE))
1466
1467
1468 /* Set size at head, without disturbing its use bit */
1469 #define set_head_size(p, s) ((p)->size = (((p)->size & SIZE_BITS) | (s)))
1470
1471 /* Set size/use field */
1472 #define set_head(p, s) ((p)->size = (s))
1473
1474 /* Set size at footer (only when chunk is not in use) */
1475 #define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_size = (s))
1476
1477
1478 /*
1479 -------------------- Internal data structures --------------------
1480
1481 All internal state is held in an instance of malloc_state defined
1482 below. There are no other static variables, except in two optional
1483 cases:
1484 * If USE_MALLOC_LOCK is defined, the mALLOC_MUTEx declared above.
1485 * If mmap doesn't support MAP_ANONYMOUS, a dummy file descriptor
1486 for mmap.
1487
1488 Beware of lots of tricks that minimize the total bookkeeping space
1489 requirements. The result is a little over 1K bytes (for 4byte
1490 pointers and size_t.)
1491 */
1492
1493 /*
1494 Bins
1495
1496 An array of bin headers for free chunks. Each bin is doubly
1497 linked. The bins are approximately proportionally (log) spaced.
1498 There are a lot of these bins (128). This may look excessive, but
1499 works very well in practice. Most bins hold sizes that are
1500 unusual as malloc request sizes, but are more usual for fragments
1501 and consolidated sets of chunks, which is what these bins hold, so
1502 they can be found quickly. All procedures maintain the invariant
1503 that no consolidated chunk physically borders another one, so each
1504 chunk in a list is known to be preceeded and followed by either
1505 inuse chunks or the ends of memory.
1506
1507 Chunks in bins are kept in size order, with ties going to the
1508 approximately least recently used chunk. Ordering isn't needed
1509 for the small bins, which all contain the same-sized chunks, but
1510 facilitates best-fit allocation for larger chunks. These lists
1511 are just sequential. Keeping them in order almost never requires
1512 enough traversal to warrant using fancier ordered data
1513 structures.
1514
1515 Chunks of the same size are linked with the most
1516 recently freed at the front, and allocations are taken from the
1517 back. This results in LRU (FIFO) allocation order, which tends
1518 to give each chunk an equal opportunity to be consolidated with
1519 adjacent freed chunks, resulting in larger free chunks and less
1520 fragmentation.
1521
1522 To simplify use in double-linked lists, each bin header acts
1523 as a malloc_chunk. This avoids special-casing for headers.
1524 But to conserve space and improve locality, we allocate
1525 only the fd/bk pointers of bins, and then use repositioning tricks
1526 to treat these as the fields of a malloc_chunk*.
1527 */
1528
1529 typedef struct malloc_chunk* mbinptr;
1530
1531 /* addressing -- note that bin_at(0) does not exist */
1532 #define bin_at(m, i) \
1533 (mbinptr) (((char *) &((m)->bins[((i) - 1) * 2])) \
1534 - offsetof (struct malloc_chunk, fd))
1535
1536 /* analog of ++bin */
1537 #define next_bin(b) ((mbinptr)((char*)(b) + (sizeof(mchunkptr)<<1)))
1538
1539 /* Reminders about list directionality within bins */
1540 #define first(b) ((b)->fd)
1541 #define last(b) ((b)->bk)
1542
1543 /* Take a chunk off a bin list */
1544 #define unlink(P, BK, FD) { \
1545 FD = P->fd; \
1546 BK = P->bk; \
1547 if (__builtin_expect (FD->bk != P || BK->fd != P, 0)) \
1548 malloc_printerr (check_action, "corrupted double-linked list", P); \
1549 else { \
1550 FD->bk = BK; \
1551 BK->fd = FD; \
1552 if (!in_smallbin_range (P->size) \
1553 && __builtin_expect (P->fd_nextsize != NULL, 0)) { \
1554 assert (P->fd_nextsize->bk_nextsize == P); \
1555 assert (P->bk_nextsize->fd_nextsize == P); \
1556 if (FD->fd_nextsize == NULL) { \
1557 if (P->fd_nextsize == P) \
1558 FD->fd_nextsize = FD->bk_nextsize = FD; \
1559 else { \
1560 FD->fd_nextsize = P->fd_nextsize; \
1561 FD->bk_nextsize = P->bk_nextsize; \
1562 P->fd_nextsize->bk_nextsize = FD; \
1563 P->bk_nextsize->fd_nextsize = FD; \
1564 } \
1565 } else { \
1566 P->fd_nextsize->bk_nextsize = P->bk_nextsize; \
1567 P->bk_nextsize->fd_nextsize = P->fd_nextsize; \
1568 } \
1569 } \
1570 } \
1571 }
1572
1573 /*
1574 Indexing
1575
1576 Bins for sizes < 512 bytes contain chunks of all the same size, spaced
1577 8 bytes apart. Larger bins are approximately logarithmically spaced:
1578
1579 64 bins of size 8
1580 32 bins of size 64
1581 16 bins of size 512
1582 8 bins of size 4096
1583 4 bins of size 32768
1584 2 bins of size 262144
1585 1 bin of size what's left
1586
1587 There is actually a little bit of slop in the numbers in bin_index
1588 for the sake of speed. This makes no difference elsewhere.
1589
1590 The bins top out around 1MB because we expect to service large
1591 requests via mmap.
1592 */
1593
1594 #define NBINS 128
1595 #define NSMALLBINS 64
1596 #define SMALLBIN_WIDTH MALLOC_ALIGNMENT
1597 #define MIN_LARGE_SIZE (NSMALLBINS * SMALLBIN_WIDTH)
1598
1599 #define in_smallbin_range(sz) \
1600 ((unsigned long)(sz) < (unsigned long)MIN_LARGE_SIZE)
1601
1602 #define smallbin_index(sz) \
1603 (SMALLBIN_WIDTH == 16 ? (((unsigned)(sz)) >> 4) : (((unsigned)(sz)) >> 3))
1604
1605 #define largebin_index_32(sz) \
1606 (((((unsigned long)(sz)) >> 6) <= 38)? 56 + (((unsigned long)(sz)) >> 6): \
1607 ((((unsigned long)(sz)) >> 9) <= 20)? 91 + (((unsigned long)(sz)) >> 9): \
1608 ((((unsigned long)(sz)) >> 12) <= 10)? 110 + (((unsigned long)(sz)) >> 12): \
1609 ((((unsigned long)(sz)) >> 15) <= 4)? 119 + (((unsigned long)(sz)) >> 15): \
1610 ((((unsigned long)(sz)) >> 18) <= 2)? 124 + (((unsigned long)(sz)) >> 18): \
1611 126)
1612
1613 // XXX It remains to be seen whether it is good to keep the widths of
1614 // XXX the buckets the same or whether it should be scaled by a factor
1615 // XXX of two as well.
1616 #define largebin_index_64(sz) \
1617 (((((unsigned long)(sz)) >> 6) <= 48)? 48 + (((unsigned long)(sz)) >> 6): \
1618 ((((unsigned long)(sz)) >> 9) <= 20)? 91 + (((unsigned long)(sz)) >> 9): \
1619 ((((unsigned long)(sz)) >> 12) <= 10)? 110 + (((unsigned long)(sz)) >> 12): \
1620 ((((unsigned long)(sz)) >> 15) <= 4)? 119 + (((unsigned long)(sz)) >> 15): \
1621 ((((unsigned long)(sz)) >> 18) <= 2)? 124 + (((unsigned long)(sz)) >> 18): \
1622 126)
1623
1624 #define largebin_index(sz) \
1625 (SIZE_SZ == 8 ? largebin_index_64 (sz) : largebin_index_32 (sz))
1626
1627 #define bin_index(sz) \
1628 ((in_smallbin_range(sz)) ? smallbin_index(sz) : largebin_index(sz))
1629
1630
1631 /*
1632 Unsorted chunks
1633
1634 All remainders from chunk splits, as well as all returned chunks,
1635 are first placed in the "unsorted" bin. They are then placed
1636 in regular bins after malloc gives them ONE chance to be used before
1637 binning. So, basically, the unsorted_chunks list acts as a queue,
1638 with chunks being placed on it in free (and malloc_consolidate),
1639 and taken off (to be either used or placed in bins) in malloc.
1640
1641 The NON_MAIN_ARENA flag is never set for unsorted chunks, so it
1642 does not have to be taken into account in size comparisons.
1643 */
1644
1645 /* The otherwise unindexable 1-bin is used to hold unsorted chunks. */
1646 #define unsorted_chunks(M) (bin_at(M, 1))
1647
1648 /*
1649 Top
1650
1651 The top-most available chunk (i.e., the one bordering the end of
1652 available memory) is treated specially. It is never included in
1653 any bin, is used only if no other chunk is available, and is
1654 released back to the system if it is very large (see
1655 M_TRIM_THRESHOLD). Because top initially
1656 points to its own bin with initial zero size, thus forcing
1657 extension on the first malloc request, we avoid having any special
1658 code in malloc to check whether it even exists yet. But we still
1659 need to do so when getting memory from system, so we make
1660 initial_top treat the bin as a legal but unusable chunk during the
1661 interval between initialization and the first call to
1662 sYSMALLOc. (This is somewhat delicate, since it relies on
1663 the 2 preceding words to be zero during this interval as well.)
1664 */
1665
1666 /* Conveniently, the unsorted bin can be used as dummy top on first call */
1667 #define initial_top(M) (unsorted_chunks(M))
1668
1669 /*
1670 Binmap
1671
1672 To help compensate for the large number of bins, a one-level index
1673 structure is used for bin-by-bin searching. `binmap' is a
1674 bitvector recording whether bins are definitely empty so they can
1675 be skipped over during during traversals. The bits are NOT always
1676 cleared as soon as bins are empty, but instead only
1677 when they are noticed to be empty during traversal in malloc.
1678 */
1679
1680 /* Conservatively use 32 bits per map word, even if on 64bit system */
1681 #define BINMAPSHIFT 5
1682 #define BITSPERMAP (1U << BINMAPSHIFT)
1683 #define BINMAPSIZE (NBINS / BITSPERMAP)
1684
1685 #define idx2block(i) ((i) >> BINMAPSHIFT)
1686 #define idx2bit(i) ((1U << ((i) & ((1U << BINMAPSHIFT)-1))))
1687
1688 #define mark_bin(m,i) ((m)->binmap[idx2block(i)] |= idx2bit(i))
1689 #define unmark_bin(m,i) ((m)->binmap[idx2block(i)] &= ~(idx2bit(i)))
1690 #define get_binmap(m,i) ((m)->binmap[idx2block(i)] & idx2bit(i))
1691
1692 /*
1693 Fastbins
1694
1695 An array of lists holding recently freed small chunks. Fastbins
1696 are not doubly linked. It is faster to single-link them, and
1697 since chunks are never removed from the middles of these lists,
1698 double linking is not necessary. Also, unlike regular bins, they
1699 are not even processed in FIFO order (they use faster LIFO) since
1700 ordering doesn't much matter in the transient contexts in which
1701 fastbins are normally used.
1702
1703 Chunks in fastbins keep their inuse bit set, so they cannot
1704 be consolidated with other free chunks. malloc_consolidate
1705 releases all chunks in fastbins and consolidates them with
1706 other free chunks.
1707 */
1708
1709 typedef struct malloc_chunk* mfastbinptr;
1710 #define fastbin(ar_ptr, idx) ((ar_ptr)->fastbinsY[idx])
1711
1712 /* offset 2 to use otherwise unindexable first 2 bins */
1713 #define fastbin_index(sz) \
1714 ((((unsigned int)(sz)) >> (SIZE_SZ == 8 ? 4 : 3)) - 2)
1715
1716
1717 /* The maximum fastbin request size we support */
1718 #define MAX_FAST_SIZE (80 * SIZE_SZ / 4)
1719
1720 #define NFASTBINS (fastbin_index(request2size(MAX_FAST_SIZE))+1)
1721
1722 /*
1723 FASTBIN_CONSOLIDATION_THRESHOLD is the size of a chunk in free()
1724 that triggers automatic consolidation of possibly-surrounding
1725 fastbin chunks. This is a heuristic, so the exact value should not
1726 matter too much. It is defined at half the default trim threshold as a
1727 compromise heuristic to only attempt consolidation if it is likely
1728 to lead to trimming. However, it is not dynamically tunable, since
1729 consolidation reduces fragmentation surrounding large chunks even
1730 if trimming is not used.
1731 */
1732
1733 #define FASTBIN_CONSOLIDATION_THRESHOLD (65536UL)
1734
1735 /*
1736 Since the lowest 2 bits in max_fast don't matter in size comparisons,
1737 they are used as flags.
1738 */
1739
1740 /*
1741 FASTCHUNKS_BIT held in max_fast indicates that there are probably
1742 some fastbin chunks. It is set true on entering a chunk into any
1743 fastbin, and cleared only in malloc_consolidate.
1744
1745 The truth value is inverted so that have_fastchunks will be true
1746 upon startup (since statics are zero-filled), simplifying
1747 initialization checks.
1748 */
1749
1750 #define FASTCHUNKS_BIT (1U)
1751
1752 #define have_fastchunks(M) (((M)->flags & FASTCHUNKS_BIT) == 0)
1753 #define clear_fastchunks(M) catomic_or (&(M)->flags, FASTCHUNKS_BIT)
1754 #define set_fastchunks(M) catomic_and (&(M)->flags, ~FASTCHUNKS_BIT)
1755
1756 /*
1757 NONCONTIGUOUS_BIT indicates that MORECORE does not return contiguous
1758 regions. Otherwise, contiguity is exploited in merging together,
1759 when possible, results from consecutive MORECORE calls.
1760
1761 The initial value comes from MORECORE_CONTIGUOUS, but is
1762 changed dynamically if mmap is ever used as an sbrk substitute.
1763 */
1764
1765 #define NONCONTIGUOUS_BIT (2U)
1766
1767 #define contiguous(M) (((M)->flags & NONCONTIGUOUS_BIT) == 0)
1768 #define noncontiguous(M) (((M)->flags & NONCONTIGUOUS_BIT) != 0)
1769 #define set_noncontiguous(M) ((M)->flags |= NONCONTIGUOUS_BIT)
1770 #define set_contiguous(M) ((M)->flags &= ~NONCONTIGUOUS_BIT)
1771
1772 /*
1773 Set value of max_fast.
1774 Use impossibly small value if 0.
1775 Precondition: there are no existing fastbin chunks.
1776 Setting the value clears fastchunk bit but preserves noncontiguous bit.
1777 */
1778
1779 #define set_max_fast(s) \
1780 global_max_fast = (((s) == 0) \
1781 ? SMALLBIN_WIDTH: ((s + SIZE_SZ) & ~MALLOC_ALIGN_MASK))
1782 #define get_max_fast() global_max_fast
1783
1784
1785 /*
1786 ----------- Internal state representation and initialization -----------
1787 */
1788
1789 struct malloc_state {
1790 /* Serialize access. */
1791 mutex_t mutex;
1792
1793 /* Flags (formerly in max_fast). */
1794 int flags;
1795
1796 #if THREAD_STATS
1797 /* Statistics for locking. Only used if THREAD_STATS is defined. */
1798 long stat_lock_direct, stat_lock_loop, stat_lock_wait;
1799 #endif
1800
1801 /* Fastbins */
1802 mfastbinptr fastbinsY[NFASTBINS];
1803
1804 /* Base of the topmost chunk -- not otherwise kept in a bin */
1805 mchunkptr top;
1806
1807 /* The remainder from the most recent split of a small request */
1808 mchunkptr last_remainder;
1809
1810 /* Normal bins packed as described above */
1811 mchunkptr bins[NBINS * 2 - 2];
1812
1813 /* Bitmap of bins */
1814 unsigned int binmap[BINMAPSIZE];
1815
1816 /* Linked list */
1817 struct malloc_state *next;
1818
1819 #ifdef PER_THREAD
1820 /* Linked list for free arenas. */
1821 struct malloc_state *next_free;
1822 #endif
1823
1824 /* Memory allocated from the system in this arena. */
1825 INTERNAL_SIZE_T system_mem;
1826 INTERNAL_SIZE_T max_system_mem;
1827 };
1828
1829 struct malloc_par {
1830 /* Tunable parameters */
1831 unsigned long trim_threshold;
1832 INTERNAL_SIZE_T top_pad;
1833 INTERNAL_SIZE_T mmap_threshold;
1834 #ifdef PER_THREAD
1835 INTERNAL_SIZE_T arena_test;
1836 INTERNAL_SIZE_T arena_max;
1837 #endif
1838
1839 /* Memory map support */
1840 int n_mmaps;
1841 int n_mmaps_max;
1842 int max_n_mmaps;
1843 /* the mmap_threshold is dynamic, until the user sets
1844 it manually, at which point we need to disable any
1845 dynamic behavior. */
1846 int no_dyn_threshold;
1847
1848 /* Statistics */
1849 INTERNAL_SIZE_T mmapped_mem;
1850 /*INTERNAL_SIZE_T sbrked_mem;*/
1851 /*INTERNAL_SIZE_T max_sbrked_mem;*/
1852 INTERNAL_SIZE_T max_mmapped_mem;
1853 INTERNAL_SIZE_T max_total_mem; /* only kept for NO_THREADS */
1854
1855 /* First address handed out by MORECORE/sbrk. */
1856 char* sbrk_base;
1857 };
1858
1859 /* There are several instances of this struct ("arenas") in this
1860 malloc. If you are adapting this malloc in a way that does NOT use
1861 a static or mmapped malloc_state, you MUST explicitly zero-fill it
1862 before using. This malloc relies on the property that malloc_state
1863 is initialized to all zeroes (as is true of C statics). */
1864
1865 static struct malloc_state main_arena =
1866 {
1867 .mutex = MUTEX_INITIALIZER,
1868 .next = &main_arena
1869 };
1870
1871 /* There is only one instance of the malloc parameters. */
1872
1873 static struct malloc_par mp_ =
1874 {
1875 .top_pad = DEFAULT_TOP_PAD,
1876 .n_mmaps_max = DEFAULT_MMAP_MAX,
1877 .mmap_threshold = DEFAULT_MMAP_THRESHOLD,
1878 .trim_threshold = DEFAULT_TRIM_THRESHOLD,
1879 #ifdef PER_THREAD
1880 # define NARENAS_FROM_NCORES(n) ((n) * (sizeof(long) == 4 ? 2 : 8))
1881 .arena_test = NARENAS_FROM_NCORES (1)
1882 #endif
1883 };
1884
1885
1886 #ifdef PER_THREAD
1887 /* Non public mallopt parameters. */
1888 #define M_ARENA_TEST -7
1889 #define M_ARENA_MAX -8
1890 #endif
1891
1892
1893 /* Maximum size of memory handled in fastbins. */
1894 static INTERNAL_SIZE_T global_max_fast;
1895
1896 /*
1897 Initialize a malloc_state struct.
1898
1899 This is called only from within malloc_consolidate, which needs
1900 be called in the same contexts anyway. It is never called directly
1901 outside of malloc_consolidate because some optimizing compilers try
1902 to inline it at all call points, which turns out not to be an
1903 optimization at all. (Inlining it in malloc_consolidate is fine though.)
1904 */
1905
1906 static void malloc_init_state(mstate av)
1907 {
1908 int i;
1909 mbinptr bin;
1910
1911 /* Establish circular links for normal bins */
1912 for (i = 1; i < NBINS; ++i) {
1913 bin = bin_at(av,i);
1914 bin->fd = bin->bk = bin;
1915 }
1916
1917 #if MORECORE_CONTIGUOUS
1918 if (av != &main_arena)
1919 #endif
1920 set_noncontiguous(av);
1921 if (av == &main_arena)
1922 set_max_fast(DEFAULT_MXFAST);
1923 av->flags |= FASTCHUNKS_BIT;
1924
1925 av->top = initial_top(av);
1926 }
1927
1928 /*
1929 Other internal utilities operating on mstates
1930 */
1931
1932 static void* sYSMALLOc(INTERNAL_SIZE_T, mstate);
1933 static int sYSTRIm(size_t, mstate);
1934 static void malloc_consolidate(mstate);
1935
1936
1937 /* -------------- Early definitions for debugging hooks ---------------- */
1938
1939 /* Define and initialize the hook variables. These weak definitions must
1940 appear before any use of the variables in a function (arena.c uses one). */
1941 #ifndef weak_variable
1942 /* In GNU libc we want the hook variables to be weak definitions to
1943 avoid a problem with Emacs. */
1944 # define weak_variable weak_function
1945 #endif
1946
1947 /* Forward declarations. */
1948 static void* malloc_hook_ini __MALLOC_P ((size_t sz,
1949 const __malloc_ptr_t caller));
1950 static void* realloc_hook_ini __MALLOC_P ((void* ptr, size_t sz,
1951 const __malloc_ptr_t caller));
1952 static void* memalign_hook_ini __MALLOC_P ((size_t alignment, size_t sz,
1953 const __malloc_ptr_t caller));
1954
1955 void weak_variable (*__malloc_initialize_hook) (void) = NULL;
1956 void weak_variable (*__free_hook) (__malloc_ptr_t __ptr,
1957 const __malloc_ptr_t) = NULL;
1958 __malloc_ptr_t weak_variable (*__malloc_hook)
1959 (size_t __size, const __malloc_ptr_t) = malloc_hook_ini;
1960 __malloc_ptr_t weak_variable (*__realloc_hook)
1961 (__malloc_ptr_t __ptr, size_t __size, const __malloc_ptr_t)
1962 = realloc_hook_ini;
1963 __malloc_ptr_t weak_variable (*__memalign_hook)
1964 (size_t __alignment, size_t __size, const __malloc_ptr_t)
1965 = memalign_hook_ini;
1966 void weak_variable (*__after_morecore_hook) (void) = NULL;
1967
1968
1969 /* ---------------- Error behavior ------------------------------------ */
1970
1971 #ifndef DEFAULT_CHECK_ACTION
1972 #define DEFAULT_CHECK_ACTION 3
1973 #endif
1974
1975 static int check_action = DEFAULT_CHECK_ACTION;
1976
1977
1978 /* ------------------ Testing support ----------------------------------*/
1979
1980 static int perturb_byte;
1981
1982 #define alloc_perturb(p, n) memset (p, (perturb_byte ^ 0xff) & 0xff, n)
1983 #define free_perturb(p, n) memset (p, perturb_byte & 0xff, n)
1984
1985
1986 /* ------------------- Support for multiple arenas -------------------- */
1987 #include "arena.c"
1988
1989 /*
1990 Debugging support
1991
1992 These routines make a number of assertions about the states
1993 of data structures that should be true at all times. If any
1994 are not true, it's very likely that a user program has somehow
1995 trashed memory. (It's also possible that there is a coding error
1996 in malloc. In which case, please report it!)
1997 */
1998
1999 #if ! MALLOC_DEBUG
2000
2001 #define check_chunk(A,P)
2002 #define check_free_chunk(A,P)
2003 #define check_inuse_chunk(A,P)
2004 #define check_remalloced_chunk(A,P,N)
2005 #define check_malloced_chunk(A,P,N)
2006 #define check_malloc_state(A)
2007
2008 #else
2009
2010 #define check_chunk(A,P) do_check_chunk(A,P)
2011 #define check_free_chunk(A,P) do_check_free_chunk(A,P)
2012 #define check_inuse_chunk(A,P) do_check_inuse_chunk(A,P)
2013 #define check_remalloced_chunk(A,P,N) do_check_remalloced_chunk(A,P,N)
2014 #define check_malloced_chunk(A,P,N) do_check_malloced_chunk(A,P,N)
2015 #define check_malloc_state(A) do_check_malloc_state(A)
2016
2017 /*
2018 Properties of all chunks
2019 */
2020
2021 static void do_check_chunk(mstate av, mchunkptr p)
2022 {
2023 unsigned long sz = chunksize(p);
2024 /* min and max possible addresses assuming contiguous allocation */
2025 char* max_address = (char*)(av->top) + chunksize(av->top);
2026 char* min_address = max_address - av->system_mem;
2027
2028 if (!chunk_is_mmapped(p)) {
2029
2030 /* Has legal address ... */
2031 if (p != av->top) {
2032 if (contiguous(av)) {
2033 assert(((char*)p) >= min_address);
2034 assert(((char*)p + sz) <= ((char*)(av->top)));
2035 }
2036 }
2037 else {
2038 /* top size is always at least MINSIZE */
2039 assert((unsigned long)(sz) >= MINSIZE);
2040 /* top predecessor always marked inuse */
2041 assert(prev_inuse(p));
2042 }
2043
2044 }
2045 else {
2046 /* address is outside main heap */
2047 if (contiguous(av) && av->top != initial_top(av)) {
2048 assert(((char*)p) < min_address || ((char*)p) >= max_address);
2049 }
2050 /* chunk is page-aligned */
2051 assert(((p->prev_size + sz) & (GLRO(dl_pagesize)-1)) == 0);
2052 /* mem is aligned */
2053 assert(aligned_OK(chunk2mem(p)));
2054 }
2055 }
2056
2057 /*
2058 Properties of free chunks
2059 */
2060
2061 static void do_check_free_chunk(mstate av, mchunkptr p)
2062 {
2063 INTERNAL_SIZE_T sz = p->size & ~(PREV_INUSE|NON_MAIN_ARENA);
2064 mchunkptr next = chunk_at_offset(p, sz);
2065
2066 do_check_chunk(av, p);
2067
2068 /* Chunk must claim to be free ... */
2069 assert(!inuse(p));
2070 assert (!chunk_is_mmapped(p));
2071
2072 /* Unless a special marker, must have OK fields */
2073 if ((unsigned long)(sz) >= MINSIZE)
2074 {
2075 assert((sz & MALLOC_ALIGN_MASK) == 0);
2076 assert(aligned_OK(chunk2mem(p)));
2077 /* ... matching footer field */
2078 assert(next->prev_size == sz);
2079 /* ... and is fully consolidated */
2080 assert(prev_inuse(p));
2081 assert (next == av->top || inuse(next));
2082
2083 /* ... and has minimally sane links */
2084 assert(p->fd->bk == p);
2085 assert(p->bk->fd == p);
2086 }
2087 else /* markers are always of size SIZE_SZ */
2088 assert(sz == SIZE_SZ);
2089 }
2090
2091 /*
2092 Properties of inuse chunks
2093 */
2094
2095 static void do_check_inuse_chunk(mstate av, mchunkptr p)
2096 {
2097 mchunkptr next;
2098
2099 do_check_chunk(av, p);
2100
2101 if (chunk_is_mmapped(p))
2102 return; /* mmapped chunks have no next/prev */
2103
2104 /* Check whether it claims to be in use ... */
2105 assert(inuse(p));
2106
2107 next = next_chunk(p);
2108
2109 /* ... and is surrounded by OK chunks.
2110 Since more things can be checked with free chunks than inuse ones,
2111 if an inuse chunk borders them and debug is on, it's worth doing them.
2112 */
2113 if (!prev_inuse(p)) {
2114 /* Note that we cannot even look at prev unless it is not inuse */
2115 mchunkptr prv = prev_chunk(p);
2116 assert(next_chunk(prv) == p);
2117 do_check_free_chunk(av, prv);
2118 }
2119
2120 if (next == av->top) {
2121 assert(prev_inuse(next));
2122 assert(chunksize(next) >= MINSIZE);
2123 }
2124 else if (!inuse(next))
2125 do_check_free_chunk(av, next);
2126 }
2127
2128 /*
2129 Properties of chunks recycled from fastbins
2130 */
2131
2132 static void do_check_remalloced_chunk(mstate av, mchunkptr p, INTERNAL_SIZE_T s)
2133 {
2134 INTERNAL_SIZE_T sz = p->size & ~(PREV_INUSE|NON_MAIN_ARENA);
2135
2136 if (!chunk_is_mmapped(p)) {
2137 assert(av == arena_for_chunk(p));
2138 if (chunk_non_main_arena(p))
2139 assert(av != &main_arena);
2140 else
2141 assert(av == &main_arena);
2142 }
2143
2144 do_check_inuse_chunk(av, p);
2145
2146 /* Legal size ... */
2147 assert((sz & MALLOC_ALIGN_MASK) == 0);
2148 assert((unsigned long)(sz) >= MINSIZE);
2149 /* ... and alignment */
2150 assert(aligned_OK(chunk2mem(p)));
2151 /* chunk is less than MINSIZE more than request */
2152 assert((long)(sz) - (long)(s) >= 0);
2153 assert((long)(sz) - (long)(s + MINSIZE) < 0);
2154 }
2155
2156 /*
2157 Properties of nonrecycled chunks at the point they are malloced
2158 */
2159
2160 static void do_check_malloced_chunk(mstate av, mchunkptr p, INTERNAL_SIZE_T s)
2161 {
2162 /* same as recycled case ... */
2163 do_check_remalloced_chunk(av, p, s);
2164
2165 /*
2166 ... plus, must obey implementation invariant that prev_inuse is
2167 always true of any allocated chunk; i.e., that each allocated
2168 chunk borders either a previously allocated and still in-use
2169 chunk, or the base of its memory arena. This is ensured
2170 by making all allocations from the `lowest' part of any found
2171 chunk. This does not necessarily hold however for chunks
2172 recycled via fastbins.
2173 */
2174
2175 assert(prev_inuse(p));
2176 }
2177
2178
2179 /*
2180 Properties of malloc_state.
2181
2182 This may be useful for debugging malloc, as well as detecting user
2183 programmer errors that somehow write into malloc_state.
2184
2185 If you are extending or experimenting with this malloc, you can
2186 probably figure out how to hack this routine to print out or
2187 display chunk addresses, sizes, bins, and other instrumentation.
2188 */
2189
2190 static void do_check_malloc_state(mstate av)
2191 {
2192 int i;
2193 mchunkptr p;
2194 mchunkptr q;
2195 mbinptr b;
2196 unsigned int idx;
2197 INTERNAL_SIZE_T size;
2198 unsigned long total = 0;
2199 int max_fast_bin;
2200
2201 /* internal size_t must be no wider than pointer type */
2202 assert(sizeof(INTERNAL_SIZE_T) <= sizeof(char*));
2203
2204 /* alignment is a power of 2 */
2205 assert((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-1)) == 0);
2206
2207 /* cannot run remaining checks until fully initialized */
2208 if (av->top == 0 || av->top == initial_top(av))
2209 return;
2210
2211 /* pagesize is a power of 2 */
2212 assert((GLRO(dl_pagesize) & (GLRO(dl_pagesize)-1)) == 0);
2213
2214 /* A contiguous main_arena is consistent with sbrk_base. */
2215 if (av == &main_arena && contiguous(av))
2216 assert((char*)mp_.sbrk_base + av->system_mem ==
2217 (char*)av->top + chunksize(av->top));
2218
2219 /* properties of fastbins */
2220
2221 /* max_fast is in allowed range */
2222 assert((get_max_fast () & ~1) <= request2size(MAX_FAST_SIZE));
2223
2224 max_fast_bin = fastbin_index(get_max_fast ());
2225
2226 for (i = 0; i < NFASTBINS; ++i) {
2227 p = fastbin (av, i);
2228
2229 /* The following test can only be performed for the main arena.
2230 While mallopt calls malloc_consolidate to get rid of all fast
2231 bins (especially those larger than the new maximum) this does
2232 only happen for the main arena. Trying to do this for any
2233 other arena would mean those arenas have to be locked and
2234 malloc_consolidate be called for them. This is excessive. And
2235 even if this is acceptable to somebody it still cannot solve
2236 the problem completely since if the arena is locked a
2237 concurrent malloc call might create a new arena which then
2238 could use the newly invalid fast bins. */
2239
2240 /* all bins past max_fast are empty */
2241 if (av == &main_arena && i > max_fast_bin)
2242 assert(p == 0);
2243
2244 while (p != 0) {
2245 /* each chunk claims to be inuse */
2246 do_check_inuse_chunk(av, p);
2247 total += chunksize(p);
2248 /* chunk belongs in this bin */
2249 assert(fastbin_index(chunksize(p)) == i);
2250 p = p->fd;
2251 }
2252 }
2253
2254 if (total != 0)
2255 assert(have_fastchunks(av));
2256 else if (!have_fastchunks(av))
2257 assert(total == 0);
2258
2259 /* check normal bins */
2260 for (i = 1; i < NBINS; ++i) {
2261 b = bin_at(av,i);
2262
2263 /* binmap is accurate (except for bin 1 == unsorted_chunks) */
2264 if (i >= 2) {
2265 unsigned int binbit = get_binmap(av,i);
2266 int empty = last(b) == b;
2267 if (!binbit)
2268 assert(empty);
2269 else if (!empty)
2270 assert(binbit);
2271 }
2272
2273 for (p = last(b); p != b; p = p->bk) {
2274 /* each chunk claims to be free */
2275 do_check_free_chunk(av, p);
2276 size = chunksize(p);
2277 total += size;
2278 if (i >= 2) {
2279 /* chunk belongs in bin */
2280 idx = bin_index(size);
2281 assert(idx == i);
2282 /* lists are sorted */
2283 assert(p->bk == b ||
2284 (unsigned long)chunksize(p->bk) >= (unsigned long)chunksize(p));
2285
2286 if (!in_smallbin_range(size))
2287 {
2288 if (p->fd_nextsize != NULL)
2289 {
2290 if (p->fd_nextsize == p)
2291 assert (p->bk_nextsize == p);
2292 else
2293 {
2294 if (p->fd_nextsize == first (b))
2295 assert (chunksize (p) < chunksize (p->fd_nextsize));
2296 else
2297 assert (chunksize (p) > chunksize (p->fd_nextsize));
2298
2299 if (p == first (b))
2300 assert (chunksize (p) > chunksize (p->bk_nextsize));
2301 else
2302 assert (chunksize (p) < chunksize (p->bk_nextsize));
2303 }
2304 }
2305 else
2306 assert (p->bk_nextsize == NULL);
2307 }
2308 } else if (!in_smallbin_range(size))
2309 assert (p->fd_nextsize == NULL && p->bk_nextsize == NULL);
2310 /* chunk is followed by a legal chain of inuse chunks */
2311 for (q = next_chunk(p);
2312 (q != av->top && inuse(q) &&
2313 (unsigned long)(chunksize(q)) >= MINSIZE);
2314 q = next_chunk(q))
2315 do_check_inuse_chunk(av, q);
2316 }
2317 }
2318
2319 /* top chunk is OK */
2320 check_chunk(av, av->top);
2321
2322 /* sanity checks for statistics */
2323
2324 assert(mp_.n_mmaps <= mp_.max_n_mmaps);
2325
2326 assert((unsigned long)(av->system_mem) <=
2327 (unsigned long)(av->max_system_mem));
2328
2329 assert((unsigned long)(mp_.mmapped_mem) <=
2330 (unsigned long)(mp_.max_mmapped_mem));
2331 }
2332 #endif
2333
2334
2335 /* ----------------- Support for debugging hooks -------------------- */
2336 #include "hooks.c"
2337
2338
2339 /* ----------- Routines dealing with system allocation -------------- */
2340
2341 /*
2342 sysmalloc handles malloc cases requiring more memory from the system.
2343 On entry, it is assumed that av->top does not have enough
2344 space to service request for nb bytes, thus requiring that av->top
2345 be extended or replaced.
2346 */
2347
2348 static void* sYSMALLOc(INTERNAL_SIZE_T nb, mstate av)
2349 {
2350 mchunkptr old_top; /* incoming value of av->top */
2351 INTERNAL_SIZE_T old_size; /* its size */
2352 char* old_end; /* its end address */
2353
2354 long size; /* arg to first MORECORE or mmap call */
2355 char* brk; /* return value from MORECORE */
2356
2357 long correction; /* arg to 2nd MORECORE call */
2358 char* snd_brk; /* 2nd return val */
2359
2360 INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */
2361 INTERNAL_SIZE_T end_misalign; /* partial page left at end of new space */
2362 char* aligned_brk; /* aligned offset into brk */
2363
2364 mchunkptr p; /* the allocated/returned chunk */
2365 mchunkptr remainder; /* remainder from allocation */
2366 unsigned long remainder_size; /* its size */
2367
2368 unsigned long sum; /* for updating stats */
2369
2370 size_t pagemask = GLRO(dl_pagesize) - 1;
2371 bool tried_mmap = false;
2372
2373
2374 /*
2375 If have mmap, and the request size meets the mmap threshold, and
2376 the system supports mmap, and there are few enough currently
2377 allocated mmapped regions, try to directly map this request
2378 rather than expanding top.
2379 */
2380
2381 if ((unsigned long)(nb) >= (unsigned long)(mp_.mmap_threshold) &&
2382 (mp_.n_mmaps < mp_.n_mmaps_max)) {
2383
2384 char* mm; /* return value from mmap call*/
2385
2386 try_mmap:
2387 /*
2388 Round up size to nearest page. For mmapped chunks, the overhead
2389 is one SIZE_SZ unit larger than for normal chunks, because there
2390 is no following chunk whose prev_size field could be used.
2391
2392 See the front_misalign handling below, for glibc there is no
2393 need for further alignments. */
2394 size = (nb + SIZE_SZ + pagemask) & ~pagemask;
2395 tried_mmap = true;
2396
2397 /* Don't try if size wraps around 0 */
2398 if ((unsigned long)(size) > (unsigned long)(nb)) {
2399
2400 mm = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE));
2401
2402 if (mm != MAP_FAILED) {
2403
2404 /*
2405 The offset to the start of the mmapped region is stored
2406 in the prev_size field of the chunk. This allows us to adjust
2407 returned start address to meet alignment requirements here
2408 and in memalign(), and still be able to compute proper
2409 address argument for later munmap in free() and realloc().
2410
2411 For glibc, chunk2mem increases the address by 2*SIZE_SZ and
2412 MALLOC_ALIGN_MASK is 2*SIZE_SZ-1. Each mmap'ed area is page
2413 aligned and therefore definitely MALLOC_ALIGN_MASK-aligned. */
2414 assert (((INTERNAL_SIZE_T)chunk2mem(mm) & MALLOC_ALIGN_MASK) == 0);
2415
2416 p = (mchunkptr)mm;
2417 set_head(p, size|IS_MMAPPED);
2418
2419 /* update statistics */
2420
2421 if (++mp_.n_mmaps > mp_.max_n_mmaps)
2422 mp_.max_n_mmaps = mp_.n_mmaps;
2423
2424 sum = mp_.mmapped_mem += size;
2425 if (sum > (unsigned long)(mp_.max_mmapped_mem))
2426 mp_.max_mmapped_mem = sum;
2427
2428 check_chunk(av, p);
2429
2430 return chunk2mem(p);
2431 }
2432 }
2433 }
2434
2435 /* Record incoming configuration of top */
2436
2437 old_top = av->top;
2438 old_size = chunksize(old_top);
2439 old_end = (char*)(chunk_at_offset(old_top, old_size));
2440
2441 brk = snd_brk = (char*)(MORECORE_FAILURE);
2442
2443 /*
2444 If not the first time through, we require old_size to be
2445 at least MINSIZE and to have prev_inuse set.
2446 */
2447
2448 assert((old_top == initial_top(av) && old_size == 0) ||
2449 ((unsigned long) (old_size) >= MINSIZE &&
2450 prev_inuse(old_top) &&
2451 ((unsigned long)old_end & pagemask) == 0));
2452
2453 /* Precondition: not enough current space to satisfy nb request */
2454 assert((unsigned long)(old_size) < (unsigned long)(nb + MINSIZE));
2455
2456
2457 if (av != &main_arena) {
2458
2459 heap_info *old_heap, *heap;
2460 size_t old_heap_size;
2461
2462 /* First try to extend the current heap. */
2463 old_heap = heap_for_ptr(old_top);
2464 old_heap_size = old_heap->size;
2465 if ((long) (MINSIZE + nb - old_size) > 0
2466 && grow_heap(old_heap, MINSIZE + nb - old_size) == 0) {
2467 av->system_mem += old_heap->size - old_heap_size;
2468 arena_mem += old_heap->size - old_heap_size;
2469 set_head(old_top, (((char *)old_heap + old_heap->size) - (char *)old_top)
2470 | PREV_INUSE);
2471 }
2472 else if ((heap = new_heap(nb + (MINSIZE + sizeof(*heap)), mp_.top_pad))) {
2473 /* Use a newly allocated heap. */
2474 heap->ar_ptr = av;
2475 heap->prev = old_heap;
2476 av->system_mem += heap->size;
2477 arena_mem += heap->size;
2478 /* Set up the new top. */
2479 top(av) = chunk_at_offset(heap, sizeof(*heap));
2480 set_head(top(av), (heap->size - sizeof(*heap)) | PREV_INUSE);
2481
2482 /* Setup fencepost and free the old top chunk. */
2483 /* The fencepost takes at least MINSIZE bytes, because it might
2484 become the top chunk again later. Note that a footer is set
2485 up, too, although the chunk is marked in use. */
2486 old_size -= MINSIZE;
2487 set_head(chunk_at_offset(old_top, old_size + 2*SIZE_SZ), 0|PREV_INUSE);
2488 if (old_size >= MINSIZE) {
2489 set_head(chunk_at_offset(old_top, old_size), (2*SIZE_SZ)|PREV_INUSE);
2490 set_foot(chunk_at_offset(old_top, old_size), (2*SIZE_SZ));
2491 set_head(old_top, old_size|PREV_INUSE|NON_MAIN_ARENA);
2492 _int_free(av, old_top, 1);
2493 } else {
2494 set_head(old_top, (old_size + 2*SIZE_SZ)|PREV_INUSE);
2495 set_foot(old_top, (old_size + 2*SIZE_SZ));
2496 }
2497 }
2498 else if (!tried_mmap)
2499 /* We can at least try to use to mmap memory. */
2500 goto try_mmap;
2501
2502 } else { /* av == main_arena */
2503
2504
2505 /* Request enough space for nb + pad + overhead */
2506
2507 size = nb + mp_.top_pad + MINSIZE;
2508
2509 /*
2510 If contiguous, we can subtract out existing space that we hope to
2511 combine with new space. We add it back later only if
2512 we don't actually get contiguous space.
2513 */
2514
2515 if (contiguous(av))
2516 size -= old_size;
2517
2518 /*
2519 Round to a multiple of page size.
2520 If MORECORE is not contiguous, this ensures that we only call it
2521 with whole-page arguments. And if MORECORE is contiguous and
2522 this is not first time through, this preserves page-alignment of
2523 previous calls. Otherwise, we correct to page-align below.
2524 */
2525
2526 size = (size + pagemask) & ~pagemask;
2527
2528 /*
2529 Don't try to call MORECORE if argument is so big as to appear
2530 negative. Note that since mmap takes size_t arg, it may succeed
2531 below even if we cannot call MORECORE.
2532 */
2533
2534 if (size > 0)
2535 brk = (char*)(MORECORE(size));
2536
2537 if (brk != (char*)(MORECORE_FAILURE)) {
2538 /* Call the `morecore' hook if necessary. */
2539 void (*hook) (void) = force_reg (__after_morecore_hook);
2540 if (__builtin_expect (hook != NULL, 0))
2541 (*hook) ();
2542 } else {
2543 /*
2544 If have mmap, try using it as a backup when MORECORE fails or
2545 cannot be used. This is worth doing on systems that have "holes" in
2546 address space, so sbrk cannot extend to give contiguous space, but
2547 space is available elsewhere. Note that we ignore mmap max count
2548 and threshold limits, since the space will not be used as a
2549 segregated mmap region.
2550 */
2551
2552 /* Cannot merge with old top, so add its size back in */
2553 if (contiguous(av))
2554 size = (size + old_size + pagemask) & ~pagemask;
2555
2556 /* If we are relying on mmap as backup, then use larger units */
2557 if ((unsigned long)(size) < (unsigned long)(MMAP_AS_MORECORE_SIZE))
2558 size = MMAP_AS_MORECORE_SIZE;
2559
2560 /* Don't try if size wraps around 0 */
2561 if ((unsigned long)(size) > (unsigned long)(nb)) {
2562
2563 char *mbrk = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE));
2564
2565 if (mbrk != MAP_FAILED) {
2566
2567 /* We do not need, and cannot use, another sbrk call to find end */
2568 brk = mbrk;
2569 snd_brk = brk + size;
2570
2571 /*
2572 Record that we no longer have a contiguous sbrk region.
2573 After the first time mmap is used as backup, we do not
2574 ever rely on contiguous space since this could incorrectly
2575 bridge regions.
2576 */
2577 set_noncontiguous(av);
2578 }
2579 }
2580 }
2581
2582 if (brk != (char*)(MORECORE_FAILURE)) {
2583 if (mp_.sbrk_base == 0)
2584 mp_.sbrk_base = brk;
2585 av->system_mem += size;
2586
2587 /*
2588 If MORECORE extends previous space, we can likewise extend top size.
2589 */
2590
2591 if (brk == old_end && snd_brk == (char*)(MORECORE_FAILURE))
2592 set_head(old_top, (size + old_size) | PREV_INUSE);
2593
2594 else if (contiguous(av) && old_size && brk < old_end) {
2595 /* Oops! Someone else killed our space.. Can't touch anything. */
2596 malloc_printerr (3, "break adjusted to free malloc space", brk);
2597 }
2598
2599 /*
2600 Otherwise, make adjustments:
2601
2602 * If the first time through or noncontiguous, we need to call sbrk
2603 just to find out where the end of memory lies.
2604
2605 * We need to ensure that all returned chunks from malloc will meet
2606 MALLOC_ALIGNMENT
2607
2608 * If there was an intervening foreign sbrk, we need to adjust sbrk
2609 request size to account for fact that we will not be able to
2610 combine new space with existing space in old_top.
2611
2612 * Almost all systems internally allocate whole pages at a time, in
2613 which case we might as well use the whole last page of request.
2614 So we allocate enough more memory to hit a page boundary now,
2615 which in turn causes future contiguous calls to page-align.
2616 */
2617
2618 else {
2619 front_misalign = 0;
2620 end_misalign = 0;
2621 correction = 0;
2622 aligned_brk = brk;
2623
2624 /* handle contiguous cases */
2625 if (contiguous(av)) {
2626
2627 /* Count foreign sbrk as system_mem. */
2628 if (old_size)
2629 av->system_mem += brk - old_end;
2630
2631 /* Guarantee alignment of first new chunk made from this space */
2632
2633 front_misalign = (INTERNAL_SIZE_T)chunk2mem(brk) & MALLOC_ALIGN_MASK;
2634 if (front_misalign > 0) {
2635
2636 /*
2637 Skip over some bytes to arrive at an aligned position.
2638 We don't need to specially mark these wasted front bytes.
2639 They will never be accessed anyway because
2640 prev_inuse of av->top (and any chunk created from its start)
2641 is always true after initialization.
2642 */
2643
2644 correction = MALLOC_ALIGNMENT - front_misalign;
2645 aligned_brk += correction;
2646 }
2647
2648 /*
2649 If this isn't adjacent to existing space, then we will not
2650 be able to merge with old_top space, so must add to 2nd request.
2651 */
2652
2653 correction += old_size;
2654
2655 /* Extend the end address to hit a page boundary */
2656 end_misalign = (INTERNAL_SIZE_T)(brk + size + correction);
2657 correction += ((end_misalign + pagemask) & ~pagemask) - end_misalign;
2658
2659 assert(correction >= 0);
2660 snd_brk = (char*)(MORECORE(correction));
2661
2662 /*
2663 If can't allocate correction, try to at least find out current
2664 brk. It might be enough to proceed without failing.
2665
2666 Note that if second sbrk did NOT fail, we assume that space
2667 is contiguous with first sbrk. This is a safe assumption unless
2668 program is multithreaded but doesn't use locks and a foreign sbrk
2669 occurred between our first and second calls.
2670 */
2671
2672 if (snd_brk == (char*)(MORECORE_FAILURE)) {
2673 correction = 0;
2674 snd_brk = (char*)(MORECORE(0));
2675 } else {
2676 /* Call the `morecore' hook if necessary. */
2677 void (*hook) (void) = force_reg (__after_morecore_hook);
2678 if (__builtin_expect (hook != NULL, 0))
2679 (*hook) ();
2680 }
2681 }
2682
2683 /* handle non-contiguous cases */
2684 else {
2685 /* MORECORE/mmap must correctly align */
2686 assert(((unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK) == 0);
2687
2688 /* Find out current end of memory */
2689 if (snd_brk == (char*)(MORECORE_FAILURE)) {
2690 snd_brk = (char*)(MORECORE(0));
2691 }
2692 }
2693
2694 /* Adjust top based on results of second sbrk */
2695 if (snd_brk != (char*)(MORECORE_FAILURE)) {
2696 av->top = (mchunkptr)aligned_brk;
2697 set_head(av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE);
2698 av->system_mem += correction;
2699
2700 /*
2701 If not the first time through, we either have a
2702 gap due to foreign sbrk or a non-contiguous region. Insert a
2703 double fencepost at old_top to prevent consolidation with space
2704 we don't own. These fenceposts are artificial chunks that are
2705 marked as inuse and are in any case too small to use. We need
2706 two to make sizes and alignments work out.
2707 */
2708
2709 if (old_size != 0) {
2710 /*
2711 Shrink old_top to insert fenceposts, keeping size a
2712 multiple of MALLOC_ALIGNMENT. We know there is at least
2713 enough space in old_top to do this.
2714 */
2715 old_size = (old_size - 4*SIZE_SZ) & ~MALLOC_ALIGN_MASK;
2716 set_head(old_top, old_size | PREV_INUSE);
2717
2718 /*
2719 Note that the following assignments completely overwrite
2720 old_top when old_size was previously MINSIZE. This is
2721 intentional. We need the fencepost, even if old_top otherwise gets
2722 lost.
2723 */
2724 chunk_at_offset(old_top, old_size )->size =
2725 (2*SIZE_SZ)|PREV_INUSE;
2726
2727 chunk_at_offset(old_top, old_size + 2*SIZE_SZ)->size =
2728 (2*SIZE_SZ)|PREV_INUSE;
2729
2730 /* If possible, release the rest. */
2731 if (old_size >= MINSIZE) {
2732 _int_free(av, old_top, 1);
2733 }
2734
2735 }
2736 }
2737 }
2738 }
2739
2740 } /* if (av != &main_arena) */
2741
2742 if ((unsigned long)av->system_mem > (unsigned long)(av->max_system_mem))
2743 av->max_system_mem = av->system_mem;
2744 check_malloc_state(av);
2745
2746 /* finally, do the allocation */
2747 p = av->top;
2748 size = chunksize(p);
2749
2750 /* check that one of the above allocation paths succeeded */
2751 if ((unsigned long)(size) >= (unsigned long)(nb + MINSIZE)) {
2752 remainder_size = size - nb;
2753 remainder = chunk_at_offset(p, nb);
2754 av->top = remainder;
2755 set_head(p, nb | PREV_INUSE | (av != &main_arena ? NON_MAIN_ARENA : 0));
2756 set_head(remainder, remainder_size | PREV_INUSE);
2757 check_malloced_chunk(av, p, nb);
2758 return chunk2mem(p);
2759 }
2760
2761 /* catch all failure paths */
2762 __set_errno (ENOMEM);
2763 return 0;
2764 }
2765
2766
2767 /*
2768 sYSTRIm is an inverse of sorts to sYSMALLOc. It gives memory back
2769 to the system (via negative arguments to sbrk) if there is unused
2770 memory at the `high' end of the malloc pool. It is called
2771 automatically by free() when top space exceeds the trim
2772 threshold. It is also called by the public malloc_trim routine. It
2773 returns 1 if it actually released any memory, else 0.
2774 */
2775
2776 static int sYSTRIm(size_t pad, mstate av)
2777 {
2778 long top_size; /* Amount of top-most memory */
2779 long extra; /* Amount to release */
2780 long released; /* Amount actually released */
2781 char* current_brk; /* address returned by pre-check sbrk call */
2782 char* new_brk; /* address returned by post-check sbrk call */
2783 size_t pagesz;
2784
2785 pagesz = GLRO(dl_pagesize);
2786 top_size = chunksize(av->top);
2787
2788 /* Release in pagesize units, keeping at least one page */
2789 extra = (top_size - pad - MINSIZE - 1) & ~(pagesz - 1);
2790
2791 if (extra > 0) {
2792
2793 /*
2794 Only proceed if end of memory is where we last set it.
2795 This avoids problems if there were foreign sbrk calls.
2796 */
2797 current_brk = (char*)(MORECORE(0));
2798 if (current_brk == (char*)(av->top) + top_size) {
2799
2800 /*
2801 Attempt to release memory. We ignore MORECORE return value,
2802 and instead call again to find out where new end of memory is.
2803 This avoids problems if first call releases less than we asked,
2804 of if failure somehow altered brk value. (We could still
2805 encounter problems if it altered brk in some very bad way,
2806 but the only thing we can do is adjust anyway, which will cause
2807 some downstream failure.)
2808 */
2809
2810 MORECORE(-extra);
2811 /* Call the `morecore' hook if necessary. */
2812 void (*hook) (void) = force_reg (__after_morecore_hook);
2813 if (__builtin_expect (hook != NULL, 0))
2814 (*hook) ();
2815 new_brk = (char*)(MORECORE(0));
2816
2817 if (new_brk != (char*)MORECORE_FAILURE) {
2818 released = (long)(current_brk - new_brk);
2819
2820 if (released != 0) {
2821 /* Success. Adjust top. */
2822 av->system_mem -= released;
2823 set_head(av->top, (top_size - released) | PREV_INUSE);
2824 check_malloc_state(av);
2825 return 1;
2826 }
2827 }
2828 }
2829 }
2830 return 0;
2831 }
2832
2833 static void
2834 internal_function
2835 munmap_chunk(mchunkptr p)
2836 {
2837 INTERNAL_SIZE_T size = chunksize(p);
2838
2839 assert (chunk_is_mmapped(p));
2840
2841 uintptr_t block = (uintptr_t) p - p->prev_size;
2842 size_t total_size = p->prev_size + size;
2843 /* Unfortunately we have to do the compilers job by hand here. Normally
2844 we would test BLOCK and TOTAL-SIZE separately for compliance with the
2845 page size. But gcc does not recognize the optimization possibility
2846 (in the moment at least) so we combine the two values into one before
2847 the bit test. */
2848 if (__builtin_expect (((block | total_size) & (GLRO(dl_pagesize) - 1)) != 0, 0))
2849 {
2850 malloc_printerr (check_action, "munmap_chunk(): invalid pointer",
2851 chunk2mem (p));
2852 return;
2853 }
2854
2855 mp_.n_mmaps--;
2856 mp_.mmapped_mem -= total_size;
2857
2858 /* If munmap failed the process virtual memory address space is in a
2859 bad shape. Just leave the block hanging around, the process will
2860 terminate shortly anyway since not much can be done. */
2861 munmap((char *)block, total_size);
2862 }
2863
2864 #if HAVE_MREMAP
2865
2866 static mchunkptr
2867 internal_function
2868 mremap_chunk(mchunkptr p, size_t new_size)
2869 {
2870 size_t page_mask = GLRO(dl_pagesize) - 1;
2871 INTERNAL_SIZE_T offset = p->prev_size;
2872 INTERNAL_SIZE_T size = chunksize(p);
2873 char *cp;
2874
2875 assert (chunk_is_mmapped(p));
2876 assert(((size + offset) & (GLRO(dl_pagesize)-1)) == 0);
2877
2878 /* Note the extra SIZE_SZ overhead as in mmap_chunk(). */
2879 new_size = (new_size + offset + SIZE_SZ + page_mask) & ~page_mask;
2880
2881 /* No need to remap if the number of pages does not change. */
2882 if (size + offset == new_size)
2883 return p;
2884
2885 cp = (char *)mremap((char *)p - offset, size + offset, new_size,
2886 MREMAP_MAYMOVE);
2887
2888 if (cp == MAP_FAILED) return 0;
2889
2890 p = (mchunkptr)(cp + offset);
2891
2892 assert(aligned_OK(chunk2mem(p)));
2893
2894 assert((p->prev_size == offset));
2895 set_head(p, (new_size - offset)|IS_MMAPPED);
2896
2897 mp_.mmapped_mem -= size + offset;
2898 mp_.mmapped_mem += new_size;
2899 if ((unsigned long)mp_.mmapped_mem > (unsigned long)mp_.max_mmapped_mem)
2900 mp_.max_mmapped_mem = mp_.mmapped_mem;
2901 return p;
2902 }
2903
2904 #endif /* HAVE_MREMAP */
2905
2906 /*------------------------ Public wrappers. --------------------------------*/
2907
2908 void*
2909 public_mALLOc(size_t bytes)
2910 {
2911 mstate ar_ptr;
2912 void *victim;
2913
2914 __malloc_ptr_t (*hook) (size_t, __const __malloc_ptr_t)
2915 = force_reg (__malloc_hook);
2916 if (__builtin_expect (hook != NULL, 0))
2917 return (*hook)(bytes, RETURN_ADDRESS (0));
2918
2919 arena_lookup(ar_ptr);
2920
2921 arena_lock(ar_ptr, bytes);
2922 if(!ar_ptr)
2923 return 0;
2924 victim = _int_malloc(ar_ptr, bytes);
2925 if(!victim) {
2926 /* Maybe the failure is due to running out of mmapped areas. */
2927 if(ar_ptr != &main_arena) {
2928 (void)mutex_unlock(&ar_ptr->mutex);
2929 ar_ptr = &main_arena;
2930 (void)mutex_lock(&ar_ptr->mutex);
2931 victim = _int_malloc(ar_ptr, bytes);
2932 (void)mutex_unlock(&ar_ptr->mutex);
2933 } else {
2934 /* ... or sbrk() has failed and there is still a chance to mmap() */
2935 ar_ptr = arena_get2(ar_ptr->next ? ar_ptr : 0, bytes);
2936 (void)mutex_unlock(&main_arena.mutex);
2937 if(ar_ptr) {
2938 victim = _int_malloc(ar_ptr, bytes);
2939 (void)mutex_unlock(&ar_ptr->mutex);
2940 }
2941 }
2942 } else
2943 (void)mutex_unlock(&ar_ptr->mutex);
2944 assert(!victim || chunk_is_mmapped(mem2chunk(victim)) ||
2945 ar_ptr == arena_for_chunk(mem2chunk(victim)));
2946 return victim;
2947 }
2948 libc_hidden_def(public_mALLOc)
2949
2950 void
2951 public_fREe(void* mem)
2952 {
2953 mstate ar_ptr;
2954 mchunkptr p; /* chunk corresponding to mem */
2955
2956 void (*hook) (__malloc_ptr_t, __const __malloc_ptr_t)
2957 = force_reg (__free_hook);
2958 if (__builtin_expect (hook != NULL, 0)) {
2959 (*hook)(mem, RETURN_ADDRESS (0));
2960 return;
2961 }
2962
2963 if (mem == 0) /* free(0) has no effect */
2964 return;
2965
2966 p = mem2chunk(mem);
2967
2968 if (chunk_is_mmapped(p)) /* release mmapped memory. */
2969 {
2970 /* see if the dynamic brk/mmap threshold needs adjusting */
2971 if (!mp_.no_dyn_threshold
2972 && p->size > mp_.mmap_threshold
2973 && p->size <= DEFAULT_MMAP_THRESHOLD_MAX)
2974 {
2975 mp_.mmap_threshold = chunksize (p);
2976 mp_.trim_threshold = 2 * mp_.mmap_threshold;
2977 }
2978 munmap_chunk(p);
2979 return;
2980 }
2981
2982 ar_ptr = arena_for_chunk(p);
2983 _int_free(ar_ptr, p, 0);
2984 }
2985 libc_hidden_def (public_fREe)
2986
2987 void*
2988 public_rEALLOc(void* oldmem, size_t bytes)
2989 {
2990 mstate ar_ptr;
2991 INTERNAL_SIZE_T nb; /* padded request size */
2992
2993 void* newp; /* chunk to return */
2994
2995 __malloc_ptr_t (*hook) (__malloc_ptr_t, size_t, __const __malloc_ptr_t) =
2996 force_reg (__realloc_hook);
2997 if (__builtin_expect (hook != NULL, 0))
2998 return (*hook)(oldmem, bytes, RETURN_ADDRESS (0));
2999
3000 #if REALLOC_ZERO_BYTES_FREES
3001 if (bytes == 0 && oldmem != NULL) { public_fREe(oldmem); return 0; }
3002 #endif
3003
3004 /* realloc of null is supposed to be same as malloc */
3005 if (oldmem == 0) return public_mALLOc(bytes);
3006
3007 /* chunk corresponding to oldmem */
3008 const mchunkptr oldp = mem2chunk(oldmem);
3009 /* its size */
3010 const INTERNAL_SIZE_T oldsize = chunksize(oldp);
3011
3012 /* Little security check which won't hurt performance: the
3013 allocator never wrapps around at the end of the address space.
3014 Therefore we can exclude some size values which might appear
3015 here by accident or by "design" from some intruder. */
3016 if (__builtin_expect ((uintptr_t) oldp > (uintptr_t) -oldsize, 0)
3017 || __builtin_expect (misaligned_chunk (oldp), 0))
3018 {
3019 malloc_printerr (check_action, "realloc(): invalid pointer", oldmem);
3020 return NULL;
3021 }
3022
3023 checked_request2size(bytes, nb);
3024
3025 if (chunk_is_mmapped(oldp))
3026 {
3027 void* newmem;
3028
3029 #if HAVE_MREMAP
3030 newp = mremap_chunk(oldp, nb);
3031 if(newp) return chunk2mem(newp);
3032 #endif
3033 /* Note the extra SIZE_SZ overhead. */
3034 if(oldsize - SIZE_SZ >= nb) return oldmem; /* do nothing */
3035 /* Must alloc, copy, free. */
3036 newmem = public_mALLOc(bytes);
3037 if (newmem == 0) return 0; /* propagate failure */
3038 MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ);
3039 munmap_chunk(oldp);
3040 return newmem;
3041 }
3042
3043 ar_ptr = arena_for_chunk(oldp);
3044 #if THREAD_STATS
3045 if(!mutex_trylock(&ar_ptr->mutex))
3046 ++(ar_ptr->stat_lock_direct);
3047 else {
3048 (void)mutex_lock(&ar_ptr->mutex);
3049 ++(ar_ptr->stat_lock_wait);
3050 }
3051 #else
3052 (void)mutex_lock(&ar_ptr->mutex);
3053 #endif
3054
3055 #if !defined PER_THREAD
3056 /* As in malloc(), remember this arena for the next allocation. */
3057 tsd_setspecific(arena_key, (void *)ar_ptr);
3058 #endif
3059
3060 newp = _int_realloc(ar_ptr, oldp, oldsize, nb);
3061
3062 (void)mutex_unlock(&ar_ptr->mutex);
3063 assert(!newp || chunk_is_mmapped(mem2chunk(newp)) ||
3064 ar_ptr == arena_for_chunk(mem2chunk(newp)));
3065
3066 if (newp == NULL)
3067 {
3068 /* Try harder to allocate memory in other arenas. */
3069 newp = public_mALLOc(bytes);
3070 if (newp != NULL)
3071 {
3072 MALLOC_COPY (newp, oldmem, oldsize - SIZE_SZ);
3073 _int_free(ar_ptr, oldp, 0);
3074 }
3075 }
3076
3077 return newp;
3078 }
3079 libc_hidden_def (public_rEALLOc)
3080
3081 void*
3082 public_mEMALIGn(size_t alignment, size_t bytes)
3083 {
3084 mstate ar_ptr;
3085 void *p;
3086
3087 __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, size_t,
3088 __const __malloc_ptr_t)) =
3089 force_reg (__memalign_hook);
3090 if (__builtin_expect (hook != NULL, 0))
3091 return (*hook)(alignment, bytes, RETURN_ADDRESS (0));
3092
3093 /* If need less alignment than we give anyway, just relay to malloc */
3094 if (alignment <= MALLOC_ALIGNMENT) return public_mALLOc(bytes);
3095
3096 /* Otherwise, ensure that it is at least a minimum chunk size */
3097 if (alignment < MINSIZE) alignment = MINSIZE;
3098
3099 arena_get(ar_ptr, bytes + alignment + MINSIZE);
3100 if(!ar_ptr)
3101 return 0;
3102 p = _int_memalign(ar_ptr, alignment, bytes);
3103 if(!p) {
3104 /* Maybe the failure is due to running out of mmapped areas. */
3105 if(ar_ptr != &main_arena) {
3106 (void)mutex_unlock(&ar_ptr->mutex);
3107 ar_ptr = &main_arena;
3108 (void)mutex_lock(&ar_ptr->mutex);
3109 p = _int_memalign(ar_ptr, alignment, bytes);
3110 (void)mutex_unlock(&ar_ptr->mutex);
3111 } else {
3112 /* ... or sbrk() has failed and there is still a chance to mmap() */
3113 mstate prev = ar_ptr->next ? ar_ptr : 0;
3114 (void)mutex_unlock(&ar_ptr->mutex);
3115 ar_ptr = arena_get2(prev, bytes);
3116 if(ar_ptr) {
3117 p = _int_memalign(ar_ptr, alignment, bytes);
3118 (void)mutex_unlock(&ar_ptr->mutex);
3119 }
3120 }
3121 } else
3122 (void)mutex_unlock(&ar_ptr->mutex);
3123 assert(!p || chunk_is_mmapped(mem2chunk(p)) ||
3124 ar_ptr == arena_for_chunk(mem2chunk(p)));
3125 return p;
3126 }
3127 /* For ISO C11. */
3128 weak_alias (public_mEMALIGn, aligned_alloc)
3129 libc_hidden_def (public_mEMALIGn)
3130
3131 void*
3132 public_vALLOc(size_t bytes)
3133 {
3134 mstate ar_ptr;
3135 void *p;
3136
3137 if(__malloc_initialized < 0)
3138 ptmalloc_init ();
3139
3140 size_t pagesz = GLRO(dl_pagesize);
3141
3142 __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, size_t,
3143 __const __malloc_ptr_t)) =
3144 force_reg (__memalign_hook);
3145 if (__builtin_expect (hook != NULL, 0))
3146 return (*hook)(pagesz, bytes, RETURN_ADDRESS (0));
3147
3148 arena_get(ar_ptr, bytes + pagesz + MINSIZE);
3149 if(!ar_ptr)
3150 return 0;
3151 p = _int_valloc(ar_ptr, bytes);
3152 (void)mutex_unlock(&ar_ptr->mutex);
3153 if(!p) {
3154 /* Maybe the failure is due to running out of mmapped areas. */
3155 if(ar_ptr != &main_arena) {
3156 ar_ptr = &main_arena;
3157 (void)mutex_lock(&ar_ptr->mutex);
3158 p = _int_memalign(ar_ptr, pagesz, bytes);
3159 (void)mutex_unlock(&ar_ptr->mutex);
3160 } else {
3161 /* ... or sbrk() has failed and there is still a chance to mmap() */
3162 ar_ptr = arena_get2(ar_ptr->next ? ar_ptr : 0, bytes);
3163 if(ar_ptr) {
3164 p = _int_memalign(ar_ptr, pagesz, bytes);
3165 (void)mutex_unlock(&ar_ptr->mutex);
3166 }
3167 }
3168 }
3169 assert(!p || chunk_is_mmapped(mem2chunk(p)) ||
3170 ar_ptr == arena_for_chunk(mem2chunk(p)));
3171
3172 return p;
3173 }
3174
3175 void*
3176 public_pVALLOc(size_t bytes)
3177 {
3178 mstate ar_ptr;
3179 void *p;
3180
3181 if(__malloc_initialized < 0)
3182 ptmalloc_init ();
3183
3184 size_t pagesz = GLRO(dl_pagesize);
3185 size_t page_mask = GLRO(dl_pagesize) - 1;
3186 size_t rounded_bytes = (bytes + page_mask) & ~(page_mask);
3187
3188 __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, size_t,
3189 __const __malloc_ptr_t)) =
3190 force_reg (__memalign_hook);
3191 if (__builtin_expect (hook != NULL, 0))
3192 return (*hook)(pagesz, rounded_bytes, RETURN_ADDRESS (0));
3193
3194 arena_get(ar_ptr, bytes + 2*pagesz + MINSIZE);
3195 p = _int_pvalloc(ar_ptr, bytes);
3196 (void)mutex_unlock(&ar_ptr->mutex);
3197 if(!p) {
3198 /* Maybe the failure is due to running out of mmapped areas. */
3199 if(ar_ptr != &main_arena) {
3200 ar_ptr = &main_arena;
3201 (void)mutex_lock(&ar_ptr->mutex);
3202 p = _int_memalign(ar_ptr, pagesz, rounded_bytes);
3203 (void)mutex_unlock(&ar_ptr->mutex);
3204 } else {
3205 /* ... or sbrk() has failed and there is still a chance to mmap() */
3206 ar_ptr = arena_get2(ar_ptr->next ? ar_ptr : 0,
3207 bytes + 2*pagesz + MINSIZE);
3208 if(ar_ptr) {
3209 p = _int_memalign(ar_ptr, pagesz, rounded_bytes);
3210 (void)mutex_unlock(&ar_ptr->mutex);
3211 }
3212 }
3213 }
3214 assert(!p || chunk_is_mmapped(mem2chunk(p)) ||
3215 ar_ptr == arena_for_chunk(mem2chunk(p)));
3216
3217 return p;
3218 }
3219
3220 void*
3221 public_cALLOc(size_t n, size_t elem_size)
3222 {
3223 mstate av;
3224 mchunkptr oldtop, p;
3225 INTERNAL_SIZE_T bytes, sz, csz, oldtopsize;
3226 void* mem;
3227 unsigned long clearsize;
3228 unsigned long nclears;
3229 INTERNAL_SIZE_T* d;
3230
3231 /* size_t is unsigned so the behavior on overflow is defined. */
3232 bytes = n * elem_size;
3233 #define HALF_INTERNAL_SIZE_T \
3234 (((INTERNAL_SIZE_T) 1) << (8 * sizeof (INTERNAL_SIZE_T) / 2))
3235 if (__builtin_expect ((n | elem_size) >= HALF_INTERNAL_SIZE_T, 0)) {
3236 if (elem_size != 0 && bytes / elem_size != n) {
3237 __set_errno (ENOMEM);
3238 return 0;
3239 }
3240 }
3241
3242 __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, __const __malloc_ptr_t)) =
3243 force_reg (__malloc_hook);
3244 if (__builtin_expect (hook != NULL, 0)) {
3245 sz = bytes;
3246 mem = (*hook)(sz, RETURN_ADDRESS (0));
3247 if(mem == 0)
3248 return 0;
3249 return memset(mem, 0, sz);
3250 }
3251
3252 sz = bytes;
3253
3254 arena_get(av, sz);
3255 if(!av)
3256 return 0;
3257
3258 /* Check if we hand out the top chunk, in which case there may be no
3259 need to clear. */
3260 #if MORECORE_CLEARS
3261 oldtop = top(av);
3262 oldtopsize = chunksize(top(av));
3263 #if MORECORE_CLEARS < 2
3264 /* Only newly allocated memory is guaranteed to be cleared. */
3265 if (av == &main_arena &&
3266 oldtopsize < mp_.sbrk_base + av->max_system_mem - (char *)oldtop)
3267 oldtopsize = (mp_.sbrk_base + av->max_system_mem - (char *)oldtop);
3268 #endif
3269 if (av != &main_arena)
3270 {
3271 heap_info *heap = heap_for_ptr (oldtop);
3272 if (oldtopsize < (char *) heap + heap->mprotect_size - (char *) oldtop)
3273 oldtopsize = (char *) heap + heap->mprotect_size - (char *) oldtop;
3274 }
3275 #endif
3276 mem = _int_malloc(av, sz);
3277
3278 /* Only clearing follows, so we can unlock early. */
3279 (void)mutex_unlock(&av->mutex);
3280
3281 assert(!mem || chunk_is_mmapped(mem2chunk(mem)) ||
3282 av == arena_for_chunk(mem2chunk(mem)));
3283
3284 if (mem == 0) {
3285 /* Maybe the failure is due to running out of mmapped areas. */
3286 if(av != &main_arena) {
3287 (void)mutex_lock(&main_arena.mutex);
3288 mem = _int_malloc(&main_arena, sz);
3289 (void)mutex_unlock(&main_arena.mutex);
3290 } else {
3291 /* ... or sbrk() has failed and there is still a chance to mmap() */
3292 (void)mutex_lock(&main_arena.mutex);
3293 av = arena_get2(av->next ? av : 0, sz);
3294 (void)mutex_unlock(&main_arena.mutex);
3295 if(av) {
3296 mem = _int_malloc(av, sz);
3297 (void)mutex_unlock(&av->mutex);
3298 }
3299 }
3300 if (mem == 0) return 0;
3301 }
3302 p = mem2chunk(mem);
3303
3304 /* Two optional cases in which clearing not necessary */
3305 if (chunk_is_mmapped (p))
3306 {
3307 if (__builtin_expect (perturb_byte, 0))
3308 MALLOC_ZERO (mem, sz);
3309 return mem;
3310 }
3311
3312 csz = chunksize(p);
3313
3314 #if MORECORE_CLEARS
3315 if (perturb_byte == 0 && (p == oldtop && csz > oldtopsize)) {
3316 /* clear only the bytes from non-freshly-sbrked memory */
3317 csz = oldtopsize;
3318 }
3319 #endif
3320
3321 /* Unroll clear of <= 36 bytes (72 if 8byte sizes). We know that
3322 contents have an odd number of INTERNAL_SIZE_T-sized words;
3323 minimally 3. */
3324 d = (INTERNAL_SIZE_T*)mem;
3325 clearsize = csz - SIZE_SZ;
3326 nclears = clearsize / sizeof(INTERNAL_SIZE_T);
3327 assert(nclears >= 3);
3328
3329 if (nclears > 9)
3330 MALLOC_ZERO(d, clearsize);
3331
3332 else {
3333 *(d+0) = 0;
3334 *(d+1) = 0;
3335 *(d+2) = 0;
3336 if (nclears > 4) {
3337 *(d+3) = 0;
3338 *(d+4) = 0;
3339 if (nclears > 6) {
3340 *(d+5) = 0;
3341 *(d+6) = 0;
3342 if (nclears > 8) {
3343 *(d+7) = 0;
3344 *(d+8) = 0;
3345 }
3346 }
3347 }
3348 }
3349
3350 return mem;
3351 }
3352
3353
3354 int
3355 public_mTRIm(size_t s)
3356 {
3357 int result = 0;
3358
3359 if(__malloc_initialized < 0)
3360 ptmalloc_init ();
3361
3362 mstate ar_ptr = &main_arena;
3363 do
3364 {
3365 (void) mutex_lock (&ar_ptr->mutex);
3366 result |= mTRIm (ar_ptr, s);
3367 (void) mutex_unlock (&ar_ptr->mutex);
3368
3369 ar_ptr = ar_ptr->next;
3370 }
3371 while (ar_ptr != &main_arena);
3372
3373 return result;
3374 }
3375
3376 size_t
3377 public_mUSABLe(void* m)
3378 {
3379 size_t result;
3380
3381 result = mUSABLe(m);
3382 return result;
3383 }
3384
3385 void
3386 public_mSTATs()
3387 {
3388 mSTATs();
3389 }
3390
3391 struct mallinfo public_mALLINFo()
3392 {
3393 struct mallinfo m;
3394
3395 if(__malloc_initialized < 0)
3396 ptmalloc_init ();
3397 (void)mutex_lock(&main_arena.mutex);
3398 m = mALLINFo(&main_arena);
3399 (void)mutex_unlock(&main_arena.mutex);
3400 return m;
3401 }
3402
3403 int
3404 public_mALLOPt(int p, int v)
3405 {
3406 int result;
3407 result = mALLOPt(p, v);
3408 return result;
3409 }
3410
3411 /*
3412 ------------------------------ malloc ------------------------------
3413 */
3414
3415 static void*
3416 _int_malloc(mstate av, size_t bytes)
3417 {
3418 INTERNAL_SIZE_T nb; /* normalized request size */
3419 unsigned int idx; /* associated bin index */
3420 mbinptr bin; /* associated bin */
3421
3422 mchunkptr victim; /* inspected/selected chunk */
3423 INTERNAL_SIZE_T size; /* its size */
3424 int victim_index; /* its bin index */
3425
3426 mchunkptr remainder; /* remainder from a split */
3427 unsigned long remainder_size; /* its size */
3428
3429 unsigned int block; /* bit map traverser */
3430 unsigned int bit; /* bit map traverser */
3431 unsigned int map; /* current word of binmap */
3432
3433 mchunkptr fwd; /* misc temp for linking */
3434 mchunkptr bck; /* misc temp for linking */
3435
3436 const char *errstr = NULL;
3437
3438 /*
3439 Convert request size to internal form by adding SIZE_SZ bytes
3440 overhead plus possibly more to obtain necessary alignment and/or
3441 to obtain a size of at least MINSIZE, the smallest allocatable
3442 size. Also, checked_request2size traps (returning 0) request sizes
3443 that are so large that they wrap around zero when padded and
3444 aligned.
3445 */
3446
3447 checked_request2size(bytes, nb);
3448
3449 /*
3450 If the size qualifies as a fastbin, first check corresponding bin.
3451 This code is safe to execute even if av is not yet initialized, so we
3452 can try it without checking, which saves some time on this fast path.
3453 */
3454
3455 if ((unsigned long)(nb) <= (unsigned long)(get_max_fast ())) {
3456 idx = fastbin_index(nb);
3457 mfastbinptr* fb = &fastbin (av, idx);
3458 mchunkptr pp = *fb;
3459 do
3460 {
3461 victim = pp;
3462 if (victim == NULL)
3463 break;
3464 }
3465 while ((pp = catomic_compare_and_exchange_val_acq (fb, victim->fd, victim))
3466 != victim);
3467 if (victim != 0) {
3468 if (__builtin_expect (fastbin_index (chunksize (victim)) != idx, 0))
3469 {
3470 errstr = "malloc(): memory corruption (fast)";
3471 errout:
3472 malloc_printerr (check_action, errstr, chunk2mem (victim));
3473 return NULL;
3474 }
3475 check_remalloced_chunk(av, victim, nb);
3476 void *p = chunk2mem(victim);
3477 if (__builtin_expect (perturb_byte, 0))
3478 alloc_perturb (p, bytes);
3479 return p;
3480 }
3481 }
3482
3483 /*
3484 If a small request, check regular bin. Since these "smallbins"
3485 hold one size each, no searching within bins is necessary.
3486 (For a large request, we need to wait until unsorted chunks are
3487 processed to find best fit. But for small ones, fits are exact
3488 anyway, so we can check now, which is faster.)
3489 */
3490
3491 if (in_smallbin_range(nb)) {
3492 idx = smallbin_index(nb);
3493 bin = bin_at(av,idx);
3494
3495 if ( (victim = last(bin)) != bin) {
3496 if (victim == 0) /* initialization check */
3497 malloc_consolidate(av);
3498 else {
3499 bck = victim->bk;
3500 if (__builtin_expect (bck->fd != victim, 0))
3501 {
3502 errstr = "malloc(): smallbin double linked list corrupted";
3503 goto errout;
3504 }
3505 set_inuse_bit_at_offset(victim, nb);
3506 bin->bk = bck;
3507 bck->fd = bin;
3508
3509 if (av != &main_arena)
3510 victim->size |= NON_MAIN_ARENA;
3511 check_malloced_chunk(av, victim, nb);
3512 void *p = chunk2mem(victim);
3513 if (__builtin_expect (perturb_byte, 0))
3514 alloc_perturb (p, bytes);
3515 return p;
3516 }
3517 }
3518 }
3519
3520 /*
3521 If this is a large request, consolidate fastbins before continuing.
3522 While it might look excessive to kill all fastbins before
3523 even seeing if there is space available, this avoids
3524 fragmentation problems normally associated with fastbins.
3525 Also, in practice, programs tend to have runs of either small or
3526 large requests, but less often mixtures, so consolidation is not
3527 invoked all that often in most programs. And the programs that
3528 it is called frequently in otherwise tend to fragment.
3529 */
3530
3531 else {
3532 idx = largebin_index(nb);
3533 if (have_fastchunks(av))
3534 malloc_consolidate(av);
3535 }
3536
3537 /*
3538 Process recently freed or remaindered chunks, taking one only if
3539 it is exact fit, or, if this a small request, the chunk is remainder from
3540 the most recent non-exact fit. Place other traversed chunks in
3541 bins. Note that this step is the only place in any routine where
3542 chunks are placed in bins.
3543
3544 The outer loop here is needed because we might not realize until
3545 near the end of malloc that we should have consolidated, so must
3546 do so and retry. This happens at most once, and only when we would
3547 otherwise need to expand memory to service a "small" request.
3548 */
3549
3550 for(;;) {
3551
3552 int iters = 0;
3553 while ( (victim = unsorted_chunks(av)->bk) != unsorted_chunks(av)) {
3554 bck = victim->bk;
3555 if (__builtin_expect (victim->size <= 2 * SIZE_SZ, 0)
3556 || __builtin_expect (victim->size > av->system_mem, 0))
3557 malloc_printerr (check_action, "malloc(): memory corruption",
3558 chunk2mem (victim));
3559 size = chunksize(victim);
3560
3561 /*
3562 If a small request, try to use last remainder if it is the
3563 only chunk in unsorted bin. This helps promote locality for
3564 runs of consecutive small requests. This is the only
3565 exception to best-fit, and applies only when there is
3566 no exact fit for a small chunk.
3567 */
3568
3569 if (in_smallbin_range(nb) &&
3570 bck == unsorted_chunks(av) &&
3571 victim == av->last_remainder &&
3572 (unsigned long)(size) > (unsigned long)(nb + MINSIZE)) {
3573
3574 /* split and reattach remainder */
3575 remainder_size = size - nb;
3576 remainder = chunk_at_offset(victim, nb);
3577 unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
3578 av->last_remainder = remainder;
3579 remainder->bk = remainder->fd = unsorted_chunks(av);
3580 if (!in_smallbin_range(remainder_size))
3581 {
3582 remainder->fd_nextsize = NULL;
3583 remainder->bk_nextsize = NULL;
3584 }
3585
3586 set_head(victim, nb | PREV_INUSE |
3587 (av != &main_arena ? NON_MAIN_ARENA : 0));
3588 set_head(remainder, remainder_size | PREV_INUSE);
3589 set_foot(remainder, remainder_size);
3590
3591 check_malloced_chunk(av, victim, nb);
3592 void *p = chunk2mem(victim);
3593 if (__builtin_expect (perturb_byte, 0))
3594 alloc_perturb (p, bytes);
3595 return p;
3596 }
3597
3598 /* remove from unsorted list */
3599 unsorted_chunks(av)->bk = bck;
3600 bck->fd = unsorted_chunks(av);
3601
3602 /* Take now instead of binning if exact fit */
3603
3604 if (size == nb) {
3605 set_inuse_bit_at_offset(victim, size);
3606 if (av != &main_arena)
3607 victim->size |= NON_MAIN_ARENA;
3608 check_malloced_chunk(av, victim, nb);
3609 void *p = chunk2mem(victim);
3610 if (__builtin_expect (perturb_byte, 0))
3611 alloc_perturb (p, bytes);
3612 return p;
3613 }
3614
3615 /* place chunk in bin */
3616
3617 if (in_smallbin_range(size)) {
3618 victim_index = smallbin_index(size);
3619 bck = bin_at(av, victim_index);
3620 fwd = bck->fd;
3621 }
3622 else {
3623 victim_index = largebin_index(size);
3624 bck = bin_at(av, victim_index);
3625 fwd = bck->fd;
3626
3627 /* maintain large bins in sorted order */
3628 if (fwd != bck) {
3629 /* Or with inuse bit to speed comparisons */
3630 size |= PREV_INUSE;
3631 /* if smaller than smallest, bypass loop below */
3632 assert((bck->bk->size & NON_MAIN_ARENA) == 0);
3633 if ((unsigned long)(size) < (unsigned long)(bck->bk->size)) {
3634 fwd = bck;
3635 bck = bck->bk;
3636
3637 victim->fd_nextsize = fwd->fd;
3638 victim->bk_nextsize = fwd->fd->bk_nextsize;
3639 fwd->fd->bk_nextsize = victim->bk_nextsize->fd_nextsize = victim;
3640 }
3641 else {
3642 assert((fwd->size & NON_MAIN_ARENA) == 0);
3643 while ((unsigned long) size < fwd->size)
3644 {
3645 fwd = fwd->fd_nextsize;
3646 assert((fwd->size & NON_MAIN_ARENA) == 0);
3647 }
3648
3649 if ((unsigned long) size == (unsigned long) fwd->size)
3650 /* Always insert in the second position. */
3651 fwd = fwd->fd;
3652 else
3653 {
3654 victim->fd_nextsize = fwd;
3655 victim->bk_nextsize = fwd->bk_nextsize;
3656 fwd->bk_nextsize = victim;
3657 victim->bk_nextsize->fd_nextsize = victim;
3658 }
3659 bck = fwd->bk;
3660 }
3661 } else
3662 victim->fd_nextsize = victim->bk_nextsize = victim;
3663 }
3664
3665 mark_bin(av, victim_index);
3666 victim->bk = bck;
3667 victim->fd = fwd;
3668 fwd->bk = victim;
3669 bck->fd = victim;
3670
3671 #define MAX_ITERS 10000
3672 if (++iters >= MAX_ITERS)
3673 break;
3674 }
3675
3676 /*
3677 If a large request, scan through the chunks of current bin in
3678 sorted order to find smallest that fits. Use the skip list for this.
3679 */
3680
3681 if (!in_smallbin_range(nb)) {
3682 bin = bin_at(av, idx);
3683
3684 /* skip scan if empty or largest chunk is too small */
3685 if ((victim = first(bin)) != bin &&
3686 (unsigned long)(victim->size) >= (unsigned long)(nb)) {
3687
3688 victim = victim->bk_nextsize;
3689 while (((unsigned long)(size = chunksize(victim)) <
3690 (unsigned long)(nb)))
3691 victim = victim->bk_nextsize;
3692
3693 /* Avoid removing the first entry for a size so that the skip
3694 list does not have to be rerouted. */
3695 if (victim != last(bin) && victim->size == victim->fd->size)
3696 victim = victim->fd;
3697
3698 remainder_size = size - nb;
3699 unlink(victim, bck, fwd);
3700
3701 /* Exhaust */
3702 if (remainder_size < MINSIZE) {
3703 set_inuse_bit_at_offset(victim, size);
3704 if (av != &main_arena)
3705 victim->size |= NON_MAIN_ARENA;
3706 }
3707 /* Split */
3708 else {
3709 remainder = chunk_at_offset(victim, nb);
3710 /* We cannot assume the unsorted list is empty and therefore
3711 have to perform a complete insert here. */
3712 bck = unsorted_chunks(av);
3713 fwd = bck->fd;
3714 if (__builtin_expect (fwd->bk != bck, 0))
3715 {
3716 errstr = "malloc(): corrupted unsorted chunks";
3717 goto errout;
3718 }
3719 remainder->bk = bck;
3720 remainder->fd = fwd;
3721 bck->fd = remainder;
3722 fwd->bk = remainder;
3723 if (!in_smallbin_range(remainder_size))
3724 {
3725 remainder->fd_nextsize = NULL;
3726 remainder->bk_nextsize = NULL;
3727 }
3728 set_head(victim, nb | PREV_INUSE |
3729 (av != &main_arena ? NON_MAIN_ARENA : 0));
3730 set_head(remainder, remainder_size | PREV_INUSE);
3731 set_foot(remainder, remainder_size);
3732 }
3733 check_malloced_chunk(av, victim, nb);
3734 void *p = chunk2mem(victim);
3735 if (__builtin_expect (perturb_byte, 0))
3736 alloc_perturb (p, bytes);
3737 return p;
3738 }
3739 }
3740
3741 /*
3742 Search for a chunk by scanning bins, starting with next largest
3743 bin. This search is strictly by best-fit; i.e., the smallest
3744 (with ties going to approximately the least recently used) chunk
3745 that fits is selected.
3746
3747 The bitmap avoids needing to check that most blocks are nonempty.
3748 The particular case of skipping all bins during warm-up phases
3749 when no chunks have been returned yet is faster than it might look.
3750 */
3751
3752 ++idx;
3753 bin = bin_at(av,idx);
3754 block = idx2block(idx);
3755 map = av->binmap[block];
3756 bit = idx2bit(idx);
3757
3758 for (;;) {
3759
3760 /* Skip rest of block if there are no more set bits in this block. */
3761 if (bit > map || bit == 0) {
3762 do {
3763 if (++block >= BINMAPSIZE) /* out of bins */
3764 goto use_top;
3765 } while ( (map = av->binmap[block]) == 0);
3766
3767 bin = bin_at(av, (block << BINMAPSHIFT));
3768 bit = 1;
3769 }
3770
3771 /* Advance to bin with set bit. There must be one. */
3772 while ((bit & map) == 0) {
3773 bin = next_bin(bin);
3774 bit <<= 1;
3775 assert(bit != 0);
3776 }
3777
3778 /* Inspect the bin. It is likely to be non-empty */
3779 victim = last(bin);
3780
3781 /* If a false alarm (empty bin), clear the bit. */
3782 if (victim == bin) {
3783 av->binmap[block] = map &= ~bit; /* Write through */
3784 bin = next_bin(bin);
3785 bit <<= 1;
3786 }
3787
3788 else {
3789 size = chunksize(victim);
3790
3791 /* We know the first chunk in this bin is big enough to use. */
3792 assert((unsigned long)(size) >= (unsigned long)(nb));
3793
3794 remainder_size = size - nb;
3795
3796 /* unlink */
3797 unlink(victim, bck, fwd);
3798
3799 /* Exhaust */
3800 if (remainder_size < MINSIZE) {
3801 set_inuse_bit_at_offset(victim, size);
3802 if (av != &main_arena)
3803 victim->size |= NON_MAIN_ARENA;
3804 }
3805
3806 /* Split */
3807 else {
3808 remainder = chunk_at_offset(victim, nb);
3809
3810 /* We cannot assume the unsorted list is empty and therefore
3811 have to perform a complete insert here. */
3812 bck = unsorted_chunks(av);
3813 fwd = bck->fd;
3814 if (__builtin_expect (fwd->bk != bck, 0))
3815 {
3816 errstr = "malloc(): corrupted unsorted chunks 2";
3817 goto errout;
3818 }
3819 remainder->bk = bck;
3820 remainder->fd = fwd;
3821 bck->fd = remainder;
3822 fwd->bk = remainder;
3823
3824 /* advertise as last remainder */
3825 if (in_smallbin_range(nb))
3826 av->last_remainder = remainder;
3827 if (!in_smallbin_range(remainder_size))
3828 {
3829 remainder->fd_nextsize = NULL;
3830 remainder->bk_nextsize = NULL;
3831 }
3832 set_head(victim, nb | PREV_INUSE |
3833 (av != &main_arena ? NON_MAIN_ARENA : 0));
3834 set_head(remainder, remainder_size | PREV_INUSE);
3835 set_foot(remainder, remainder_size);
3836 }
3837 check_malloced_chunk(av, victim, nb);
3838 void *p = chunk2mem(victim);
3839 if (__builtin_expect (perturb_byte, 0))
3840 alloc_perturb (p, bytes);
3841 return p;
3842 }
3843 }
3844
3845 use_top:
3846 /*
3847 If large enough, split off the chunk bordering the end of memory
3848 (held in av->top). Note that this is in accord with the best-fit
3849 search rule. In effect, av->top is treated as larger (and thus
3850 less well fitting) than any other available chunk since it can
3851 be extended to be as large as necessary (up to system
3852 limitations).
3853
3854 We require that av->top always exists (i.e., has size >=
3855 MINSIZE) after initialization, so if it would otherwise be
3856 exhausted by current request, it is replenished. (The main
3857 reason for ensuring it exists is that we may need MINSIZE space
3858 to put in fenceposts in sysmalloc.)
3859 */
3860
3861 victim = av->top;
3862 size = chunksize(victim);
3863
3864 if ((unsigned long)(size) >= (unsigned long)(nb + MINSIZE)) {
3865 remainder_size = size - nb;
3866 remainder = chunk_at_offset(victim, nb);
3867 av->top = remainder;
3868 set_head(victim, nb | PREV_INUSE |
3869 (av != &main_arena ? NON_MAIN_ARENA : 0));
3870 set_head(remainder, remainder_size | PREV_INUSE);
3871
3872 check_malloced_chunk(av, victim, nb);
3873 void *p = chunk2mem(victim);
3874 if (__builtin_expect (perturb_byte, 0))
3875 alloc_perturb (p, bytes);
3876 return p;
3877 }
3878
3879 /* When we are using atomic ops to free fast chunks we can get
3880 here for all block sizes. */
3881 else if (have_fastchunks(av)) {
3882 malloc_consolidate(av);
3883 /* restore original bin index */
3884 if (in_smallbin_range(nb))
3885 idx = smallbin_index(nb);
3886 else
3887 idx = largebin_index(nb);
3888 }
3889
3890 /*
3891 Otherwise, relay to handle system-dependent cases
3892 */
3893 else {
3894 void *p = sYSMALLOc(nb, av);
3895 if (p != NULL && __builtin_expect (perturb_byte, 0))
3896 alloc_perturb (p, bytes);
3897 return p;
3898 }
3899 }
3900 }
3901
3902 /*
3903 ------------------------------ free ------------------------------
3904 */
3905
3906 static void
3907 _int_free(mstate av, mchunkptr p, int have_lock)
3908 {
3909 INTERNAL_SIZE_T size; /* its size */
3910 mfastbinptr* fb; /* associated fastbin */
3911 mchunkptr nextchunk; /* next contiguous chunk */
3912 INTERNAL_SIZE_T nextsize; /* its size */
3913 int nextinuse; /* true if nextchunk is used */
3914 INTERNAL_SIZE_T prevsize; /* size of previous contiguous chunk */
3915 mchunkptr bck; /* misc temp for linking */
3916 mchunkptr fwd; /* misc temp for linking */
3917
3918 const char *errstr = NULL;
3919 int locked = 0;
3920
3921 size = chunksize(p);
3922
3923 /* Little security check which won't hurt performance: the
3924 allocator never wrapps around at the end of the address space.
3925 Therefore we can exclude some size values which might appear
3926 here by accident or by "design" from some intruder. */
3927 if (__builtin_expect ((uintptr_t) p > (uintptr_t) -size, 0)
3928 || __builtin_expect (misaligned_chunk (p), 0))
3929 {
3930 errstr = "free(): invalid pointer";
3931 errout:
3932 if (! have_lock && locked)
3933 (void)mutex_unlock(&av->mutex);
3934 malloc_printerr (check_action, errstr, chunk2mem(p));
3935 return;
3936 }
3937 /* We know that each chunk is at least MINSIZE bytes in size. */
3938 if (__builtin_expect (size < MINSIZE, 0))
3939 {
3940 errstr = "free(): invalid size";
3941 goto errout;
3942 }
3943
3944 check_inuse_chunk(av, p);
3945
3946 /*
3947 If eligible, place chunk on a fastbin so it can be found
3948 and used quickly in malloc.
3949 */
3950
3951 if ((unsigned long)(size) <= (unsigned long)(get_max_fast ())
3952
3953 #if TRIM_FASTBINS
3954 /*
3955 If TRIM_FASTBINS set, don't place chunks
3956 bordering top into fastbins
3957 */
3958 && (chunk_at_offset(p, size) != av->top)
3959 #endif
3960 ) {
3961
3962 if (__builtin_expect (chunk_at_offset (p, size)->size <= 2 * SIZE_SZ, 0)
3963 || __builtin_expect (chunksize (chunk_at_offset (p, size))
3964 >= av->system_mem, 0))
3965 {
3966 /* We might not have a lock at this point and concurrent modifications
3967 of system_mem might have let to a false positive. Redo the test
3968 after getting the lock. */
3969 if (have_lock
3970 || ({ assert (locked == 0);
3971 mutex_lock(&av->mutex);
3972 locked = 1;
3973 chunk_at_offset (p, size)->size <= 2 * SIZE_SZ
3974 || chunksize (chunk_at_offset (p, size)) >= av->system_mem;
3975 }))
3976 {
3977 errstr = "free(): invalid next size (fast)";
3978 goto errout;
3979 }
3980 if (! have_lock)
3981 {
3982 (void)mutex_unlock(&av->mutex);
3983 locked = 0;
3984 }
3985 }
3986
3987 if (__builtin_expect (perturb_byte, 0))
3988 free_perturb (chunk2mem(p), size - 2 * SIZE_SZ);
3989
3990 set_fastchunks(av);
3991 unsigned int idx = fastbin_index(size);
3992 fb = &fastbin (av, idx);
3993
3994 mchunkptr fd;
3995 mchunkptr old = *fb;
3996 unsigned int old_idx = ~0u;
3997 do
3998 {
3999 /* Another simple check: make sure the top of the bin is not the
4000 record we are going to add (i.e., double free). */
4001 if (__builtin_expect (old == p, 0))
4002 {
4003 errstr = "double free or corruption (fasttop)";
4004 goto errout;
4005 }
4006 if (old != NULL)
4007 old_idx = fastbin_index(chunksize(old));
4008 p->fd = fd = old;
4009 }
4010 while ((old = catomic_compare_and_exchange_val_rel (fb, p, fd)) != fd);
4011
4012 if (fd != NULL && __builtin_expect (old_idx != idx, 0))
4013 {
4014 errstr = "invalid fastbin entry (free)";
4015 goto errout;
4016 }
4017 }
4018
4019 /*
4020 Consolidate other non-mmapped chunks as they arrive.
4021 */
4022
4023 else if (!chunk_is_mmapped(p)) {
4024 if (! have_lock) {
4025 #if THREAD_STATS
4026 if(!mutex_trylock(&av->mutex))
4027 ++(av->stat_lock_direct);
4028 else {
4029 (void)mutex_lock(&av->mutex);
4030 ++(av->stat_lock_wait);
4031 }
4032 #else
4033 (void)mutex_lock(&av->mutex);
4034 #endif
4035 locked = 1;
4036 }
4037
4038 nextchunk = chunk_at_offset(p, size);
4039
4040 /* Lightweight tests: check whether the block is already the
4041 top block. */
4042 if (__builtin_expect (p == av->top, 0))
4043 {
4044 errstr = "double free or corruption (top)";
4045 goto errout;
4046 }
4047 /* Or whether the next chunk is beyond the boundaries of the arena. */
4048 if (__builtin_expect (contiguous (av)
4049 && (char *) nextchunk
4050 >= ((char *) av->top + chunksize(av->top)), 0))
4051 {
4052 errstr = "double free or corruption (out)";
4053 goto errout;
4054 }
4055 /* Or whether the block is actually not marked used. */
4056 if (__builtin_expect (!prev_inuse(nextchunk), 0))
4057 {
4058 errstr = "double free or corruption (!prev)";
4059 goto errout;
4060 }
4061
4062 nextsize = chunksize(nextchunk);
4063 if (__builtin_expect (nextchunk->size <= 2 * SIZE_SZ, 0)
4064 || __builtin_expect (nextsize >= av->system_mem, 0))
4065 {
4066 errstr = "free(): invalid next size (normal)";
4067 goto errout;
4068 }
4069
4070 if (__builtin_expect (perturb_byte, 0))
4071 free_perturb (chunk2mem(p), size - 2 * SIZE_SZ);
4072
4073 /* consolidate backward */
4074 if (!prev_inuse(p)) {
4075 prevsize = p->prev_size;
4076 size += prevsize;
4077 p = chunk_at_offset(p, -((long) prevsize));
4078 unlink(p, bck, fwd);
4079 }
4080
4081 if (nextchunk != av->top) {
4082 /* get and clear inuse bit */
4083 nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
4084
4085 /* consolidate forward */
4086 if (!nextinuse) {
4087 unlink(nextchunk, bck, fwd);
4088 size += nextsize;
4089 } else
4090 clear_inuse_bit_at_offset(nextchunk, 0);
4091
4092 /*
4093 Place the chunk in unsorted chunk list. Chunks are
4094 not placed into regular bins until after they have
4095 been given one chance to be used in malloc.
4096 */
4097
4098 bck = unsorted_chunks(av);
4099 fwd = bck->fd;
4100 if (__builtin_expect (fwd->bk != bck, 0))
4101 {
4102 errstr = "free(): corrupted unsorted chunks";
4103 goto errout;
4104 }
4105 p->fd = fwd;
4106 p->bk = bck;
4107 if (!in_smallbin_range(size))
4108 {
4109 p->fd_nextsize = NULL;
4110 p->bk_nextsize = NULL;
4111 }
4112 bck->fd = p;
4113 fwd->bk = p;
4114
4115 set_head(p, size | PREV_INUSE);
4116 set_foot(p, size);
4117
4118 check_free_chunk(av, p);
4119 }
4120
4121 /*
4122 If the chunk borders the current high end of memory,
4123 consolidate into top
4124 */
4125
4126 else {
4127 size += nextsize;
4128 set_head(p, size | PREV_INUSE);
4129 av->top = p;
4130 check_chunk(av, p);
4131 }
4132
4133 /*
4134 If freeing a large space, consolidate possibly-surrounding
4135 chunks. Then, if the total unused topmost memory exceeds trim
4136 threshold, ask malloc_trim to reduce top.
4137
4138 Unless max_fast is 0, we don't know if there are fastbins
4139 bordering top, so we cannot tell for sure whether threshold
4140 has been reached unless fastbins are consolidated. But we
4141 don't want to consolidate on each free. As a compromise,
4142 consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
4143 is reached.
4144 */
4145
4146 if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) {
4147 if (have_fastchunks(av))
4148 malloc_consolidate(av);
4149
4150 if (av == &main_arena) {
4151 #ifndef MORECORE_CANNOT_TRIM
4152 if ((unsigned long)(chunksize(av->top)) >=
4153 (unsigned long)(mp_.trim_threshold))
4154 sYSTRIm(mp_.top_pad, av);
4155 #endif
4156 } else {
4157 /* Always try heap_trim(), even if the top chunk is not
4158 large, because the corresponding heap might go away. */
4159 heap_info *heap = heap_for_ptr(top(av));
4160
4161 assert(heap->ar_ptr == av);
4162 heap_trim(heap, mp_.top_pad);
4163 }
4164 }
4165
4166 if (! have_lock) {
4167 assert (locked);
4168 (void)mutex_unlock(&av->mutex);
4169 }
4170 }
4171 /*
4172 If the chunk was allocated via mmap, release via munmap().
4173 */
4174
4175 else {
4176 munmap_chunk (p);
4177 }
4178 }
4179
4180 /*
4181 ------------------------- malloc_consolidate -------------------------
4182
4183 malloc_consolidate is a specialized version of free() that tears
4184 down chunks held in fastbins. Free itself cannot be used for this
4185 purpose since, among other things, it might place chunks back onto
4186 fastbins. So, instead, we need to use a minor variant of the same
4187 code.
4188
4189 Also, because this routine needs to be called the first time through
4190 malloc anyway, it turns out to be the perfect place to trigger
4191 initialization code.
4192 */
4193
4194 static void malloc_consolidate(mstate av)
4195 {
4196 mfastbinptr* fb; /* current fastbin being consolidated */
4197 mfastbinptr* maxfb; /* last fastbin (for loop control) */
4198 mchunkptr p; /* current chunk being consolidated */
4199 mchunkptr nextp; /* next chunk to consolidate */
4200 mchunkptr unsorted_bin; /* bin header */
4201 mchunkptr first_unsorted; /* chunk to link to */
4202
4203 /* These have same use as in free() */
4204 mchunkptr nextchunk;
4205 INTERNAL_SIZE_T size;
4206 INTERNAL_SIZE_T nextsize;
4207 INTERNAL_SIZE_T prevsize;
4208 int nextinuse;
4209 mchunkptr bck;
4210 mchunkptr fwd;
4211
4212 /*
4213 If max_fast is 0, we know that av hasn't
4214 yet been initialized, in which case do so below
4215 */
4216
4217 if (get_max_fast () != 0) {
4218 clear_fastchunks(av);
4219
4220 unsorted_bin = unsorted_chunks(av);
4221
4222 /*
4223 Remove each chunk from fast bin and consolidate it, placing it
4224 then in unsorted bin. Among other reasons for doing this,
4225 placing in unsorted bin avoids needing to calculate actual bins
4226 until malloc is sure that chunks aren't immediately going to be
4227 reused anyway.
4228 */
4229
4230 maxfb = &fastbin (av, NFASTBINS - 1);
4231 fb = &fastbin (av, 0);
4232 do {
4233 p = atomic_exchange_acq (fb, 0);
4234 if (p != 0) {
4235 do {
4236 check_inuse_chunk(av, p);
4237 nextp = p->fd;
4238
4239 /* Slightly streamlined version of consolidation code in free() */
4240 size = p->size & ~(PREV_INUSE|NON_MAIN_ARENA);
4241 nextchunk = chunk_at_offset(p, size);
4242 nextsize = chunksize(nextchunk);
4243
4244 if (!prev_inuse(p)) {
4245 prevsize = p->prev_size;
4246 size += prevsize;
4247 p = chunk_at_offset(p, -((long) prevsize));
4248 unlink(p, bck, fwd);
4249 }
4250
4251 if (nextchunk != av->top) {
4252 nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
4253
4254 if (!nextinuse) {
4255 size += nextsize;
4256 unlink(nextchunk, bck, fwd);
4257 } else
4258 clear_inuse_bit_at_offset(nextchunk, 0);
4259
4260 first_unsorted = unsorted_bin->fd;
4261 unsorted_bin->fd = p;
4262 first_unsorted->bk = p;
4263
4264 if (!in_smallbin_range (size)) {
4265 p->fd_nextsize = NULL;
4266 p->bk_nextsize = NULL;
4267 }
4268
4269 set_head(p, size | PREV_INUSE);
4270 p->bk = unsorted_bin;
4271 p->fd = first_unsorted;
4272 set_foot(p, size);
4273 }
4274
4275 else {
4276 size += nextsize;
4277 set_head(p, size | PREV_INUSE);
4278 av->top = p;
4279 }
4280
4281 } while ( (p = nextp) != 0);
4282
4283 }
4284 } while (fb++ != maxfb);
4285 }
4286 else {
4287 malloc_init_state(av);
4288 check_malloc_state(av);
4289 }
4290 }
4291
4292 /*
4293 ------------------------------ realloc ------------------------------
4294 */
4295
4296 void*
4297 _int_realloc(mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize,
4298 INTERNAL_SIZE_T nb)
4299 {
4300 mchunkptr newp; /* chunk to return */
4301 INTERNAL_SIZE_T newsize; /* its size */
4302 void* newmem; /* corresponding user mem */
4303
4304 mchunkptr next; /* next contiguous chunk after oldp */
4305
4306 mchunkptr remainder; /* extra space at end of newp */
4307 unsigned long remainder_size; /* its size */
4308
4309 mchunkptr bck; /* misc temp for linking */
4310 mchunkptr fwd; /* misc temp for linking */
4311
4312 unsigned long copysize; /* bytes to copy */
4313 unsigned int ncopies; /* INTERNAL_SIZE_T words to copy */
4314 INTERNAL_SIZE_T* s; /* copy source */
4315 INTERNAL_SIZE_T* d; /* copy destination */
4316
4317 const char *errstr = NULL;
4318
4319 /* oldmem size */
4320 if (__builtin_expect (oldp->size <= 2 * SIZE_SZ, 0)
4321 || __builtin_expect (oldsize >= av->system_mem, 0))
4322 {
4323 errstr = "realloc(): invalid old size";
4324 errout:
4325 malloc_printerr (check_action, errstr, chunk2mem(oldp));
4326 return NULL;
4327 }
4328
4329 check_inuse_chunk(av, oldp);
4330
4331 /* All callers already filter out mmap'ed chunks. */
4332 assert (!chunk_is_mmapped(oldp));
4333
4334 next = chunk_at_offset(oldp, oldsize);
4335 INTERNAL_SIZE_T nextsize = chunksize(next);
4336 if (__builtin_expect (next->size <= 2 * SIZE_SZ, 0)
4337 || __builtin_expect (nextsize >= av->system_mem, 0))
4338 {
4339 errstr = "realloc(): invalid next size";
4340 goto errout;
4341 }
4342
4343 if ((unsigned long)(oldsize) >= (unsigned long)(nb)) {
4344 /* already big enough; split below */
4345 newp = oldp;
4346 newsize = oldsize;
4347 }
4348
4349 else {
4350 /* Try to expand forward into top */
4351 if (next == av->top &&
4352 (unsigned long)(newsize = oldsize + nextsize) >=
4353 (unsigned long)(nb + MINSIZE)) {
4354 set_head_size(oldp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0));
4355 av->top = chunk_at_offset(oldp, nb);
4356 set_head(av->top, (newsize - nb) | PREV_INUSE);
4357 check_inuse_chunk(av, oldp);
4358 return chunk2mem(oldp);
4359 }
4360
4361 /* Try to expand forward into next chunk; split off remainder below */
4362 else if (next != av->top &&
4363 !inuse(next) &&
4364 (unsigned long)(newsize = oldsize + nextsize) >=
4365 (unsigned long)(nb)) {
4366 newp = oldp;
4367 unlink(next, bck, fwd);
4368 }
4369
4370 /* allocate, copy, free */
4371 else {
4372 newmem = _int_malloc(av, nb - MALLOC_ALIGN_MASK);
4373 if (newmem == 0)
4374 return 0; /* propagate failure */
4375
4376 newp = mem2chunk(newmem);
4377 newsize = chunksize(newp);
4378
4379 /*
4380 Avoid copy if newp is next chunk after oldp.
4381 */
4382 if (newp == next) {
4383 newsize += oldsize;
4384 newp = oldp;
4385 }
4386 else {
4387 /*
4388 Unroll copy of <= 36 bytes (72 if 8byte sizes)
4389 We know that contents have an odd number of
4390 INTERNAL_SIZE_T-sized words; minimally 3.
4391 */
4392
4393 copysize = oldsize - SIZE_SZ;
4394 s = (INTERNAL_SIZE_T*)(chunk2mem(oldp));
4395 d = (INTERNAL_SIZE_T*)(newmem);
4396 ncopies = copysize / sizeof(INTERNAL_SIZE_T);
4397 assert(ncopies >= 3);
4398
4399 if (ncopies > 9)
4400 MALLOC_COPY(d, s, copysize);
4401
4402 else {
4403 *(d+0) = *(s+0);
4404 *(d+1) = *(s+1);
4405 *(d+2) = *(s+2);
4406 if (ncopies > 4) {
4407 *(d+3) = *(s+3);
4408 *(d+4) = *(s+4);
4409 if (ncopies > 6) {
4410 *(d+5) = *(s+5);
4411 *(d+6) = *(s+6);
4412 if (ncopies > 8) {
4413 *(d+7) = *(s+7);
4414 *(d+8) = *(s+8);
4415 }
4416 }
4417 }
4418 }
4419
4420 _int_free(av, oldp, 1);
4421 check_inuse_chunk(av, newp);
4422 return chunk2mem(newp);
4423 }
4424 }
4425 }
4426
4427 /* If possible, free extra space in old or extended chunk */
4428
4429 assert((unsigned long)(newsize) >= (unsigned long)(nb));
4430
4431 remainder_size = newsize - nb;
4432
4433 if (remainder_size < MINSIZE) { /* not enough extra to split off */
4434 set_head_size(newp, newsize | (av != &main_arena ? NON_MAIN_ARENA : 0));
4435 set_inuse_bit_at_offset(newp, newsize);
4436 }
4437 else { /* split remainder */
4438 remainder = chunk_at_offset(newp, nb);
4439 set_head_size(newp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0));
4440 set_head(remainder, remainder_size | PREV_INUSE |
4441 (av != &main_arena ? NON_MAIN_ARENA : 0));
4442 /* Mark remainder as inuse so free() won't complain */
4443 set_inuse_bit_at_offset(remainder, remainder_size);
4444 _int_free(av, remainder, 1);
4445 }
4446
4447 check_inuse_chunk(av, newp);
4448 return chunk2mem(newp);
4449 }
4450
4451 /*
4452 ------------------------------ memalign ------------------------------
4453 */
4454
4455 static void*
4456 _int_memalign(mstate av, size_t alignment, size_t bytes)
4457 {
4458 INTERNAL_SIZE_T nb; /* padded request size */
4459 char* m; /* memory returned by malloc call */
4460 mchunkptr p; /* corresponding chunk */
4461 char* brk; /* alignment point within p */
4462 mchunkptr newp; /* chunk to return */
4463 INTERNAL_SIZE_T newsize; /* its size */
4464 INTERNAL_SIZE_T leadsize; /* leading space before alignment point */
4465 mchunkptr remainder; /* spare room at end to split off */
4466 unsigned long remainder_size; /* its size */
4467 INTERNAL_SIZE_T size;
4468
4469 /* If need less alignment than we give anyway, just relay to malloc */
4470
4471 if (alignment <= MALLOC_ALIGNMENT) return _int_malloc(av, bytes);
4472
4473 /* Otherwise, ensure that it is at least a minimum chunk size */
4474
4475 if (alignment < MINSIZE) alignment = MINSIZE;
4476
4477 /* Make sure alignment is power of 2 (in case MINSIZE is not). */
4478 if ((alignment & (alignment - 1)) != 0) {
4479 size_t a = MALLOC_ALIGNMENT * 2;
4480 while ((unsigned long)a < (unsigned long)alignment) a <<= 1;
4481 alignment = a;
4482 }
4483
4484 checked_request2size(bytes, nb);
4485
4486 /*
4487 Strategy: find a spot within that chunk that meets the alignment
4488 request, and then possibly free the leading and trailing space.
4489 */
4490
4491
4492 /* Call malloc with worst case padding to hit alignment. */
4493
4494 m = (char*)(_int_malloc(av, nb + alignment + MINSIZE));
4495
4496 if (m == 0) return 0; /* propagate failure */
4497
4498 p = mem2chunk(m);
4499
4500 if ((((unsigned long)(m)) % alignment) != 0) { /* misaligned */
4501
4502 /*
4503 Find an aligned spot inside chunk. Since we need to give back
4504 leading space in a chunk of at least MINSIZE, if the first
4505 calculation places us at a spot with less than MINSIZE leader,
4506 we can move to the next aligned spot -- we've allocated enough
4507 total room so that this is always possible.
4508 */
4509
4510 brk = (char*)mem2chunk(((unsigned long)(m + alignment - 1)) &
4511 -((signed long) alignment));
4512 if ((unsigned long)(brk - (char*)(p)) < MINSIZE)
4513 brk += alignment;
4514
4515 newp = (mchunkptr)brk;
4516 leadsize = brk - (char*)(p);
4517 newsize = chunksize(p) - leadsize;
4518
4519 /* For mmapped chunks, just adjust offset */
4520 if (chunk_is_mmapped(p)) {
4521 newp->prev_size = p->prev_size + leadsize;
4522 set_head(newp, newsize|IS_MMAPPED);
4523 return chunk2mem(newp);
4524 }
4525
4526 /* Otherwise, give back leader, use the rest */
4527 set_head(newp, newsize | PREV_INUSE |
4528 (av != &main_arena ? NON_MAIN_ARENA : 0));
4529 set_inuse_bit_at_offset(newp, newsize);
4530 set_head_size(p, leadsize | (av != &main_arena ? NON_MAIN_ARENA : 0));
4531 _int_free(av, p, 1);
4532 p = newp;
4533
4534 assert (newsize >= nb &&
4535 (((unsigned long)(chunk2mem(p))) % alignment) == 0);
4536 }
4537
4538 /* Also give back spare room at the end */
4539 if (!chunk_is_mmapped(p)) {
4540 size = chunksize(p);
4541 if ((unsigned long)(size) > (unsigned long)(nb + MINSIZE)) {
4542 remainder_size = size - nb;
4543 remainder = chunk_at_offset(p, nb);
4544 set_head(remainder, remainder_size | PREV_INUSE |
4545 (av != &main_arena ? NON_MAIN_ARENA : 0));
4546 set_head_size(p, nb);
4547 _int_free(av, remainder, 1);
4548 }
4549 }
4550
4551 check_inuse_chunk(av, p);
4552 return chunk2mem(p);
4553 }
4554
4555
4556 /*
4557 ------------------------------ valloc ------------------------------
4558 */
4559
4560 static void*
4561 _int_valloc(mstate av, size_t bytes)
4562 {
4563 /* Ensure initialization/consolidation */
4564 if (have_fastchunks(av)) malloc_consolidate(av);
4565 return _int_memalign(av, GLRO(dl_pagesize), bytes);
4566 }
4567
4568 /*
4569 ------------------------------ pvalloc ------------------------------
4570 */
4571
4572
4573 static void*
4574 _int_pvalloc(mstate av, size_t bytes)
4575 {
4576 size_t pagesz;
4577
4578 /* Ensure initialization/consolidation */
4579 if (have_fastchunks(av)) malloc_consolidate(av);
4580 pagesz = GLRO(dl_pagesize);
4581 return _int_memalign(av, pagesz, (bytes + pagesz - 1) & ~(pagesz - 1));
4582 }
4583
4584
4585 /*
4586 ------------------------------ malloc_trim ------------------------------
4587 */
4588
4589 static int mTRIm(mstate av, size_t pad)
4590 {
4591 /* Ensure initialization/consolidation */
4592 malloc_consolidate (av);
4593
4594 const size_t ps = GLRO(dl_pagesize);
4595 int psindex = bin_index (ps);
4596 const size_t psm1 = ps - 1;
4597
4598 int result = 0;
4599 for (int i = 1; i < NBINS; ++i)
4600 if (i == 1 || i >= psindex)
4601 {
4602 mbinptr bin = bin_at (av, i);
4603
4604 for (mchunkptr p = last (bin); p != bin; p = p->bk)
4605 {
4606 INTERNAL_SIZE_T size = chunksize (p);
4607
4608 if (size > psm1 + sizeof (struct malloc_chunk))
4609 {
4610 /* See whether the chunk contains at least one unused page. */
4611 char *paligned_mem = (char *) (((uintptr_t) p
4612 + sizeof (struct malloc_chunk)
4613 + psm1) & ~psm1);
4614
4615 assert ((char *) chunk2mem (p) + 4 * SIZE_SZ <= paligned_mem);
4616 assert ((char *) p + size > paligned_mem);
4617
4618 /* This is the size we could potentially free. */
4619 size -= paligned_mem - (char *) p;
4620
4621 if (size > psm1)
4622 {
4623 #ifdef MALLOC_DEBUG
4624 /* When debugging we simulate destroying the memory
4625 content. */
4626 memset (paligned_mem, 0x89, size & ~psm1);
4627 #endif
4628 madvise (paligned_mem, size & ~psm1, MADV_DONTNEED);
4629
4630 result = 1;
4631 }
4632 }
4633 }
4634 }
4635
4636 #ifndef MORECORE_CANNOT_TRIM
4637 return result | (av == &main_arena ? sYSTRIm (pad, av) : 0);
4638 #else
4639 return result;
4640 #endif
4641 }
4642
4643
4644 /*
4645 ------------------------- malloc_usable_size -------------------------
4646 */
4647
4648 size_t mUSABLe(void* mem)
4649 {
4650 mchunkptr p;
4651 if (mem != 0) {
4652 p = mem2chunk(mem);
4653 if (chunk_is_mmapped(p))
4654 return chunksize(p) - 2*SIZE_SZ;
4655 else if (inuse(p))
4656 return chunksize(p) - SIZE_SZ;
4657 }
4658 return 0;
4659 }
4660
4661 /*
4662 ------------------------------ mallinfo ------------------------------
4663 */
4664
4665 struct mallinfo mALLINFo(mstate av)
4666 {
4667 struct mallinfo mi;
4668 size_t i;
4669 mbinptr b;
4670 mchunkptr p;
4671 INTERNAL_SIZE_T avail;
4672 INTERNAL_SIZE_T fastavail;
4673 int nblocks;
4674 int nfastblocks;
4675
4676 /* Ensure initialization */
4677 if (av->top == 0) malloc_consolidate(av);
4678
4679 check_malloc_state(av);
4680
4681 /* Account for top */
4682 avail = chunksize(av->top);
4683 nblocks = 1; /* top always exists */
4684
4685 /* traverse fastbins */
4686 nfastblocks = 0;
4687 fastavail = 0;
4688
4689 for (i = 0; i < NFASTBINS; ++i) {
4690 for (p = fastbin (av, i); p != 0; p = p->fd) {
4691 ++nfastblocks;
4692 fastavail += chunksize(p);
4693 }
4694 }
4695
4696 avail += fastavail;
4697
4698 /* traverse regular bins */
4699 for (i = 1; i < NBINS; ++i) {
4700 b = bin_at(av, i);
4701 for (p = last(b); p != b; p = p->bk) {
4702 ++nblocks;
4703 avail += chunksize(p);
4704 }
4705 }
4706
4707 mi.smblks = nfastblocks;
4708 mi.ordblks = nblocks;
4709 mi.fordblks = avail;
4710 mi.uordblks = av->system_mem - avail;
4711 mi.arena = av->system_mem;
4712 mi.hblks = mp_.n_mmaps;
4713 mi.hblkhd = mp_.mmapped_mem;
4714 mi.fsmblks = fastavail;
4715 mi.keepcost = chunksize(av->top);
4716 mi.usmblks = mp_.max_total_mem;
4717 return mi;
4718 }
4719
4720 /*
4721 ------------------------------ malloc_stats ------------------------------
4722 */
4723
4724 void mSTATs()
4725 {
4726 int i;
4727 mstate ar_ptr;
4728 struct mallinfo mi;
4729 unsigned int in_use_b = mp_.mmapped_mem, system_b = in_use_b;
4730 #if THREAD_STATS
4731 long stat_lock_direct = 0, stat_lock_loop = 0, stat_lock_wait = 0;
4732 #endif
4733
4734 if(__malloc_initialized < 0)
4735 ptmalloc_init ();
4736 _IO_flockfile (stderr);
4737 int old_flags2 = ((_IO_FILE *) stderr)->_flags2;
4738 ((_IO_FILE *) stderr)->_flags2 |= _IO_FLAGS2_NOTCANCEL;
4739 for (i=0, ar_ptr = &main_arena;; i++) {
4740 (void)mutex_lock(&ar_ptr->mutex);
4741 mi = mALLINFo(ar_ptr);
4742 fprintf(stderr, "Arena %d:\n", i);
4743 fprintf(stderr, "system bytes = %10u\n", (unsigned int)mi.arena);
4744 fprintf(stderr, "in use bytes = %10u\n", (unsigned int)mi.uordblks);
4745 #if MALLOC_DEBUG > 1
4746 if (i > 0)
4747 dump_heap(heap_for_ptr(top(ar_ptr)));
4748 #endif
4749 system_b += mi.arena;
4750 in_use_b += mi.uordblks;
4751 #if THREAD_STATS
4752 stat_lock_direct += ar_ptr->stat_lock_direct;
4753 stat_lock_loop += ar_ptr->stat_lock_loop;
4754 stat_lock_wait += ar_ptr->stat_lock_wait;
4755 #endif
4756 (void)mutex_unlock(&ar_ptr->mutex);
4757 ar_ptr = ar_ptr->next;
4758 if(ar_ptr == &main_arena) break;
4759 }
4760 fprintf(stderr, "Total (incl. mmap):\n");
4761 fprintf(stderr, "system bytes = %10u\n", system_b);
4762 fprintf(stderr, "in use bytes = %10u\n", in_use_b);
4763 fprintf(stderr, "max mmap regions = %10u\n", (unsigned int)mp_.max_n_mmaps);
4764 fprintf(stderr, "max mmap bytes = %10lu\n",
4765 (unsigned long)mp_.max_mmapped_mem);
4766 #if THREAD_STATS
4767 fprintf(stderr, "heaps created = %10d\n", stat_n_heaps);
4768 fprintf(stderr, "locked directly = %10ld\n", stat_lock_direct);
4769 fprintf(stderr, "locked in loop = %10ld\n", stat_lock_loop);
4770 fprintf(stderr, "locked waiting = %10ld\n", stat_lock_wait);
4771 fprintf(stderr, "locked total = %10ld\n",
4772 stat_lock_direct + stat_lock_loop + stat_lock_wait);
4773 #endif
4774 ((_IO_FILE *) stderr)->_flags2 |= old_flags2;
4775 _IO_funlockfile (stderr);
4776 }
4777
4778
4779 /*
4780 ------------------------------ mallopt ------------------------------
4781 */
4782
4783 int mALLOPt(int param_number, int value)
4784 {
4785 mstate av = &main_arena;
4786 int res = 1;
4787
4788 if(__malloc_initialized < 0)
4789 ptmalloc_init ();
4790 (void)mutex_lock(&av->mutex);
4791 /* Ensure initialization/consolidation */
4792 malloc_consolidate(av);
4793
4794 switch(param_number) {
4795 case M_MXFAST:
4796 if (value >= 0 && value <= MAX_FAST_SIZE) {
4797 set_max_fast(value);
4798 }
4799 else
4800 res = 0;
4801 break;
4802
4803 case M_TRIM_THRESHOLD:
4804 mp_.trim_threshold = value;
4805 mp_.no_dyn_threshold = 1;
4806 break;
4807
4808 case M_TOP_PAD:
4809 mp_.top_pad = value;
4810 mp_.no_dyn_threshold = 1;
4811 break;
4812
4813 case M_MMAP_THRESHOLD:
4814 /* Forbid setting the threshold too high. */
4815 if((unsigned long)value > HEAP_MAX_SIZE/2)
4816 res = 0;
4817 else
4818 mp_.mmap_threshold = value;
4819 mp_.no_dyn_threshold = 1;
4820 break;
4821
4822 case M_MMAP_MAX:
4823 mp_.n_mmaps_max = value;
4824 mp_.no_dyn_threshold = 1;
4825 break;
4826
4827 case M_CHECK_ACTION:
4828 check_action = value;
4829 break;
4830
4831 case M_PERTURB:
4832 perturb_byte = value;
4833 break;
4834
4835 #ifdef PER_THREAD
4836 case M_ARENA_TEST:
4837 if (value > 0)
4838 mp_.arena_test = value;
4839 break;
4840
4841 case M_ARENA_MAX:
4842 if (value > 0)
4843 mp_.arena_max = value;
4844 break;
4845 #endif
4846 }
4847 (void)mutex_unlock(&av->mutex);
4848 return res;
4849 }
4850
4851
4852 /*
4853 -------------------- Alternative MORECORE functions --------------------
4854 */
4855
4856
4857 /*
4858 General Requirements for MORECORE.
4859
4860 The MORECORE function must have the following properties:
4861
4862 If MORECORE_CONTIGUOUS is false:
4863
4864 * MORECORE must allocate in multiples of pagesize. It will
4865 only be called with arguments that are multiples of pagesize.
4866
4867 * MORECORE(0) must return an address that is at least
4868 MALLOC_ALIGNMENT aligned. (Page-aligning always suffices.)
4869
4870 else (i.e. If MORECORE_CONTIGUOUS is true):
4871
4872 * Consecutive calls to MORECORE with positive arguments
4873 return increasing addresses, indicating that space has been
4874 contiguously extended.
4875
4876 * MORECORE need not allocate in multiples of pagesize.
4877 Calls to MORECORE need not have args of multiples of pagesize.
4878
4879 * MORECORE need not page-align.
4880
4881 In either case:
4882
4883 * MORECORE may allocate more memory than requested. (Or even less,
4884 but this will generally result in a malloc failure.)
4885
4886 * MORECORE must not allocate memory when given argument zero, but
4887 instead return one past the end address of memory from previous
4888 nonzero call. This malloc does NOT call MORECORE(0)
4889 until at least one call with positive arguments is made, so
4890 the initial value returned is not important.
4891
4892 * Even though consecutive calls to MORECORE need not return contiguous
4893 addresses, it must be OK for malloc'ed chunks to span multiple
4894 regions in those cases where they do happen to be contiguous.
4895
4896 * MORECORE need not handle negative arguments -- it may instead
4897 just return MORECORE_FAILURE when given negative arguments.
4898 Negative arguments are always multiples of pagesize. MORECORE
4899 must not misinterpret negative args as large positive unsigned
4900 args. You can suppress all such calls from even occurring by defining
4901 MORECORE_CANNOT_TRIM,
4902
4903 There is some variation across systems about the type of the
4904 argument to sbrk/MORECORE. If size_t is unsigned, then it cannot
4905 actually be size_t, because sbrk supports negative args, so it is
4906 normally the signed type of the same width as size_t (sometimes
4907 declared as "intptr_t", and sometimes "ptrdiff_t"). It doesn't much
4908 matter though. Internally, we use "long" as arguments, which should
4909 work across all reasonable possibilities.
4910
4911 Additionally, if MORECORE ever returns failure for a positive
4912 request, then mmap is used as a noncontiguous system allocator. This
4913 is a useful backup strategy for systems with holes in address spaces
4914 -- in this case sbrk cannot contiguously expand the heap, but mmap
4915 may be able to map noncontiguous space.
4916
4917 If you'd like mmap to ALWAYS be used, you can define MORECORE to be
4918 a function that always returns MORECORE_FAILURE.
4919
4920 If you are using this malloc with something other than sbrk (or its
4921 emulation) to supply memory regions, you probably want to set
4922 MORECORE_CONTIGUOUS as false. As an example, here is a custom
4923 allocator kindly contributed for pre-OSX macOS. It uses virtually
4924 but not necessarily physically contiguous non-paged memory (locked
4925 in, present and won't get swapped out). You can use it by
4926 uncommenting this section, adding some #includes, and setting up the
4927 appropriate defines above:
4928
4929 #define MORECORE osMoreCore
4930 #define MORECORE_CONTIGUOUS 0
4931
4932 There is also a shutdown routine that should somehow be called for
4933 cleanup upon program exit.
4934
4935 #define MAX_POOL_ENTRIES 100
4936 #define MINIMUM_MORECORE_SIZE (64 * 1024)
4937 static int next_os_pool;
4938 void *our_os_pools[MAX_POOL_ENTRIES];
4939
4940 void *osMoreCore(int size)
4941 {
4942 void *ptr = 0;
4943 static void *sbrk_top = 0;
4944
4945 if (size > 0)
4946 {
4947 if (size < MINIMUM_MORECORE_SIZE)
4948 size = MINIMUM_MORECORE_SIZE;
4949 if (CurrentExecutionLevel() == kTaskLevel)
4950 ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
4951 if (ptr == 0)
4952 {
4953 return (void *) MORECORE_FAILURE;
4954 }
4955 // save ptrs so they can be freed during cleanup
4956 our_os_pools[next_os_pool] = ptr;
4957 next_os_pool++;
4958 ptr = (void *) ((((unsigned long) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);
4959 sbrk_top = (char *) ptr + size;
4960 return ptr;
4961 }
4962 else if (size < 0)
4963 {
4964 // we don't currently support shrink behavior
4965 return (void *) MORECORE_FAILURE;
4966 }
4967 else
4968 {
4969 return sbrk_top;
4970 }
4971 }
4972
4973 // cleanup any allocated memory pools
4974 // called as last thing before shutting down driver
4975
4976 void osCleanupMem(void)
4977 {
4978 void **ptr;
4979
4980 for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)
4981 if (*ptr)
4982 {
4983 PoolDeallocate(*ptr);
4984 *ptr = 0;
4985 }
4986 }
4987
4988 */
4989
4990
4991 /* Helper code. */
4992
4993 extern char **__libc_argv attribute_hidden;
4994
4995 static void
4996 malloc_printerr(int action, const char *str, void *ptr)
4997 {
4998 if ((action & 5) == 5)
4999 __libc_message (action & 2, "%s\n", str);
5000 else if (action & 1)
5001 {
5002 char buf[2 * sizeof (uintptr_t) + 1];
5003
5004 buf[sizeof (buf) - 1] = '\0';
5005 char *cp = _itoa_word ((uintptr_t) ptr, &buf[sizeof (buf) - 1], 16, 0);
5006 while (cp > buf)
5007 *--cp = '0';
5008
5009 __libc_message (action & 2,
5010 "*** glibc detected *** %s: %s: 0x%s ***\n",
5011 __libc_argv[0] ?: "<unknown>", str, cp);
5012 }
5013 else if (action & 2)
5014 abort ();
5015 }
5016
5017 #include <sys/param.h>
5018
5019 /* We need a wrapper function for one of the additions of POSIX. */
5020 int
5021 __posix_memalign (void **memptr, size_t alignment, size_t size)
5022 {
5023 void *mem;
5024
5025 /* Test whether the SIZE argument is valid. It must be a power of
5026 two multiple of sizeof (void *). */
5027 if (alignment % sizeof (void *) != 0
5028 || !powerof2 (alignment / sizeof (void *)) != 0
5029 || alignment == 0)
5030 return EINVAL;
5031
5032 /* Call the hook here, so that caller is posix_memalign's caller
5033 and not posix_memalign itself. */
5034 __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, size_t,
5035 __const __malloc_ptr_t)) =
5036 force_reg (__memalign_hook);
5037 if (__builtin_expect (hook != NULL, 0))
5038 mem = (*hook)(alignment, size, RETURN_ADDRESS (0));
5039 else
5040 mem = public_mEMALIGn (alignment, size);
5041
5042 if (mem != NULL) {
5043 *memptr = mem;
5044 return 0;
5045 }
5046
5047 return ENOMEM;
5048 }
5049 weak_alias (__posix_memalign, posix_memalign)
5050
5051
5052 int
5053 malloc_info (int options, FILE *fp)
5054 {
5055 /* For now, at least. */
5056 if (options != 0)
5057 return EINVAL;
5058
5059 int n = 0;
5060 size_t total_nblocks = 0;
5061 size_t total_nfastblocks = 0;
5062 size_t total_avail = 0;
5063 size_t total_fastavail = 0;
5064 size_t total_system = 0;
5065 size_t total_max_system = 0;
5066 size_t total_aspace = 0;
5067 size_t total_aspace_mprotect = 0;
5068
5069 void mi_arena (mstate ar_ptr)
5070 {
5071 fprintf (fp, "<heap nr=\"%d\">\n<sizes>\n", n++);
5072
5073 size_t nblocks = 0;
5074 size_t nfastblocks = 0;
5075 size_t avail = 0;
5076 size_t fastavail = 0;
5077 struct
5078 {
5079 size_t from;
5080 size_t to;
5081 size_t total;
5082 size_t count;
5083 } sizes[NFASTBINS + NBINS - 1];
5084 #define nsizes (sizeof (sizes) / sizeof (sizes[0]))
5085
5086 mutex_lock (&ar_ptr->mutex);
5087
5088 for (size_t i = 0; i < NFASTBINS; ++i)
5089 {
5090 mchunkptr p = fastbin (ar_ptr, i);
5091 if (p != NULL)
5092 {
5093 size_t nthissize = 0;
5094 size_t thissize = chunksize (p);
5095
5096 while (p != NULL)
5097 {
5098 ++nthissize;
5099 p = p->fd;
5100 }
5101
5102 fastavail += nthissize * thissize;
5103 nfastblocks += nthissize;
5104 sizes[i].from = thissize - (MALLOC_ALIGNMENT - 1);
5105 sizes[i].to = thissize;
5106 sizes[i].count = nthissize;
5107 }
5108 else
5109 sizes[i].from = sizes[i].to = sizes[i].count = 0;
5110
5111 sizes[i].total = sizes[i].count * sizes[i].to;
5112 }
5113
5114 mbinptr bin = bin_at (ar_ptr, 1);
5115 struct malloc_chunk *r = bin->fd;
5116 if (r != NULL)
5117 {
5118 while (r != bin)
5119 {
5120 ++sizes[NFASTBINS].count;
5121 sizes[NFASTBINS].total += r->size;
5122 sizes[NFASTBINS].from = MIN (sizes[NFASTBINS].from, r->size);
5123 sizes[NFASTBINS].to = MAX (sizes[NFASTBINS].to, r->size);
5124 r = r->fd;
5125 }
5126 nblocks += sizes[NFASTBINS].count;
5127 avail += sizes[NFASTBINS].total;
5128 }
5129
5130 for (size_t i = 2; i < NBINS; ++i)
5131 {
5132 bin = bin_at (ar_ptr, i);
5133 r = bin->fd;
5134 sizes[NFASTBINS - 1 + i].from = ~((size_t) 0);
5135 sizes[NFASTBINS - 1 + i].to = sizes[NFASTBINS - 1 + i].total
5136 = sizes[NFASTBINS - 1 + i].count = 0;
5137
5138 if (r != NULL)
5139 while (r != bin)
5140 {
5141 ++sizes[NFASTBINS - 1 + i].count;
5142 sizes[NFASTBINS - 1 + i].total += r->size;
5143 sizes[NFASTBINS - 1 + i].from
5144 = MIN (sizes[NFASTBINS - 1 + i].from, r->size);
5145 sizes[NFASTBINS - 1 + i].to = MAX (sizes[NFASTBINS - 1 + i].to,
5146 r->size);
5147
5148 r = r->fd;
5149 }
5150
5151 if (sizes[NFASTBINS - 1 + i].count == 0)
5152 sizes[NFASTBINS - 1 + i].from = 0;
5153 nblocks += sizes[NFASTBINS - 1 + i].count;
5154 avail += sizes[NFASTBINS - 1 + i].total;
5155 }
5156
5157 mutex_unlock (&ar_ptr->mutex);
5158
5159 total_nfastblocks += nfastblocks;
5160 total_fastavail += fastavail;
5161
5162 total_nblocks += nblocks;
5163 total_avail += avail;
5164
5165 for (size_t i = 0; i < nsizes; ++i)
5166 if (sizes[i].count != 0 && i != NFASTBINS)
5167 fprintf (fp, "\
5168 <size from=\"%zu\" to=\"%zu\" total=\"%zu\" count=\"%zu\"/>\n",
5169 sizes[i].from, sizes[i].to, sizes[i].total, sizes[i].count);
5170
5171 if (sizes[NFASTBINS].count != 0)
5172 fprintf (fp, "\
5173 <unsorted from=\"%zu\" to=\"%zu\" total=\"%zu\" count=\"%zu\"/>\n",
5174 sizes[NFASTBINS].from, sizes[NFASTBINS].to,
5175 sizes[NFASTBINS].total, sizes[NFASTBINS].count);
5176
5177 total_system += ar_ptr->system_mem;
5178 total_max_system += ar_ptr->max_system_mem;
5179
5180 fprintf (fp,
5181 "</sizes>\n<total type=\"fast\" count=\"%zu\" size=\"%zu\"/>\n"
5182 "<total type=\"rest\" count=\"%zu\" size=\"%zu\"/>\n"
5183 "<system type=\"current\" size=\"%zu\"/>\n"
5184 "<system type=\"max\" size=\"%zu\"/>\n",
5185 nfastblocks, fastavail, nblocks, avail,
5186 ar_ptr->system_mem, ar_ptr->max_system_mem);
5187
5188 if (ar_ptr != &main_arena)
5189 {
5190 heap_info *heap = heap_for_ptr(top(ar_ptr));
5191 fprintf (fp,
5192 "<aspace type=\"total\" size=\"%zu\"/>\n"
5193 "<aspace type=\"mprotect\" size=\"%zu\"/>\n",
5194 heap->size, heap->mprotect_size);
5195 total_aspace += heap->size;
5196 total_aspace_mprotect += heap->mprotect_size;
5197 }
5198 else
5199 {
5200 fprintf (fp,
5201 "<aspace type=\"total\" size=\"%zu\"/>\n"
5202 "<aspace type=\"mprotect\" size=\"%zu\"/>\n",
5203 ar_ptr->system_mem, ar_ptr->system_mem);
5204 total_aspace += ar_ptr->system_mem;
5205 total_aspace_mprotect += ar_ptr->system_mem;
5206 }
5207
5208 fputs ("</heap>\n", fp);
5209 }
5210
5211 if(__malloc_initialized < 0)
5212 ptmalloc_init ();
5213
5214 fputs ("<malloc version=\"1\">\n", fp);
5215
5216 /* Iterate over all arenas currently in use. */
5217 mstate ar_ptr = &main_arena;
5218 do
5219 {
5220 mi_arena (ar_ptr);
5221 ar_ptr = ar_ptr->next;
5222 }
5223 while (ar_ptr != &main_arena);
5224
5225 fprintf (fp,
5226 "<total type=\"fast\" count=\"%zu\" size=\"%zu\"/>\n"
5227 "<total type=\"rest\" count=\"%zu\" size=\"%zu\"/>\n"
5228 "<system type=\"current\" size=\"%zu\"/>\n"
5229 "<system type=\"max\" size=\"%zu\"/>\n"
5230 "<aspace type=\"total\" size=\"%zu\"/>\n"
5231 "<aspace type=\"mprotect\" size=\"%zu\"/>\n"
5232 "</malloc>\n",
5233 total_nfastblocks, total_fastavail, total_nblocks, total_avail,
5234 total_system, total_max_system,
5235 total_aspace, total_aspace_mprotect);
5236
5237 return 0;
5238 }
5239
5240
5241 strong_alias (__libc_calloc, __calloc) weak_alias (__libc_calloc, calloc)
5242 strong_alias (__libc_free, __cfree) weak_alias (__libc_free, cfree)
5243 strong_alias (__libc_free, __free) strong_alias (__libc_free, free)
5244 strong_alias (__libc_malloc, __malloc) strong_alias (__libc_malloc, malloc)
5245 strong_alias (__libc_memalign, __memalign)
5246 weak_alias (__libc_memalign, memalign)
5247 strong_alias (__libc_realloc, __realloc) strong_alias (__libc_realloc, realloc)
5248 strong_alias (__libc_valloc, __valloc) weak_alias (__libc_valloc, valloc)
5249 strong_alias (__libc_pvalloc, __pvalloc) weak_alias (__libc_pvalloc, pvalloc)
5250 strong_alias (__libc_mallinfo, __mallinfo)
5251 weak_alias (__libc_mallinfo, mallinfo)
5252 strong_alias (__libc_mallopt, __mallopt) weak_alias (__libc_mallopt, mallopt)
5253
5254 weak_alias (__malloc_stats, malloc_stats)
5255 weak_alias (__malloc_usable_size, malloc_usable_size)
5256 weak_alias (__malloc_trim, malloc_trim)
5257 weak_alias (__malloc_get_state, malloc_get_state)
5258 weak_alias (__malloc_set_state, malloc_set_state)
5259
5260
5261 /* ------------------------------------------------------------
5262 History:
5263
5264 [see ftp://g.oswego.edu/pub/misc/malloc.c for the history of dlmalloc]
5265
5266 */
5267 /*
5268 * Local variables:
5269 * c-basic-offset: 2
5270 * End:
5271 */