]> git.ipfire.org Git - thirdparty/glibc.git/blob - malloc/malloc.c
Properly handle MALLOC_ALIGNMENT > 2 * SIZE_SZ
[thirdparty/glibc.git] / malloc / malloc.c
1 /* Malloc implementation for multiple threads without lock contention.
2 Copyright (C) 1996-2009, 2010, 2011, 2012 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Wolfram Gloger <wg@malloc.de>
5 and Doug Lea <dl@cs.oswego.edu>, 2001.
6
7 The GNU C Library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public License as
9 published by the Free Software Foundation; either version 2.1 of the
10 License, or (at your option) any later version.
11
12 The GNU C Library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public
18 License along with the GNU C Library; see the file COPYING.LIB. If
19 not, see <http://www.gnu.org/licenses/>. */
20
21 /*
22 This is a version (aka ptmalloc2) of malloc/free/realloc written by
23 Doug Lea and adapted to multiple threads/arenas by Wolfram Gloger.
24
25 There have been substantial changesmade after the integration into
26 glibc in all parts of the code. Do not look for much commonality
27 with the ptmalloc2 version.
28
29 * Version ptmalloc2-20011215
30 based on:
31 VERSION 2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee)
32
33 * Quickstart
34
35 In order to compile this implementation, a Makefile is provided with
36 the ptmalloc2 distribution, which has pre-defined targets for some
37 popular systems (e.g. "make posix" for Posix threads). All that is
38 typically required with regard to compiler flags is the selection of
39 the thread package via defining one out of USE_PTHREADS, USE_THR or
40 USE_SPROC. Check the thread-m.h file for what effects this has.
41 Many/most systems will additionally require USE_TSD_DATA_HACK to be
42 defined, so this is the default for "make posix".
43
44 * Why use this malloc?
45
46 This is not the fastest, most space-conserving, most portable, or
47 most tunable malloc ever written. However it is among the fastest
48 while also being among the most space-conserving, portable and tunable.
49 Consistent balance across these factors results in a good general-purpose
50 allocator for malloc-intensive programs.
51
52 The main properties of the algorithms are:
53 * For large (>= 512 bytes) requests, it is a pure best-fit allocator,
54 with ties normally decided via FIFO (i.e. least recently used).
55 * For small (<= 64 bytes by default) requests, it is a caching
56 allocator, that maintains pools of quickly recycled chunks.
57 * In between, and for combinations of large and small requests, it does
58 the best it can trying to meet both goals at once.
59 * For very large requests (>= 128KB by default), it relies on system
60 memory mapping facilities, if supported.
61
62 For a longer but slightly out of date high-level description, see
63 http://gee.cs.oswego.edu/dl/html/malloc.html
64
65 You may already by default be using a C library containing a malloc
66 that is based on some version of this malloc (for example in
67 linux). You might still want to use the one in this file in order to
68 customize settings or to avoid overheads associated with library
69 versions.
70
71 * Contents, described in more detail in "description of public routines" below.
72
73 Standard (ANSI/SVID/...) functions:
74 malloc(size_t n);
75 calloc(size_t n_elements, size_t element_size);
76 free(void* p);
77 realloc(void* p, size_t n);
78 memalign(size_t alignment, size_t n);
79 valloc(size_t n);
80 mallinfo()
81 mallopt(int parameter_number, int parameter_value)
82
83 Additional functions:
84 independent_calloc(size_t n_elements, size_t size, void* chunks[]);
85 independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);
86 pvalloc(size_t n);
87 cfree(void* p);
88 malloc_trim(size_t pad);
89 malloc_usable_size(void* p);
90 malloc_stats();
91
92 * Vital statistics:
93
94 Supported pointer representation: 4 or 8 bytes
95 Supported size_t representation: 4 or 8 bytes
96 Note that size_t is allowed to be 4 bytes even if pointers are 8.
97 You can adjust this by defining INTERNAL_SIZE_T
98
99 Alignment: 2 * sizeof(size_t) (default)
100 (i.e., 8 byte alignment with 4byte size_t). This suffices for
101 nearly all current machines and C compilers. However, you can
102 define MALLOC_ALIGNMENT to be wider than this if necessary.
103
104 Minimum overhead per allocated chunk: 4 or 8 bytes
105 Each malloced chunk has a hidden word of overhead holding size
106 and status information.
107
108 Minimum allocated size: 4-byte ptrs: 16 bytes (including 4 overhead)
109 8-byte ptrs: 24/32 bytes (including, 4/8 overhead)
110
111 When a chunk is freed, 12 (for 4byte ptrs) or 20 (for 8 byte
112 ptrs but 4 byte size) or 24 (for 8/8) additional bytes are
113 needed; 4 (8) for a trailing size field and 8 (16) bytes for
114 free list pointers. Thus, the minimum allocatable size is
115 16/24/32 bytes.
116
117 Even a request for zero bytes (i.e., malloc(0)) returns a
118 pointer to something of the minimum allocatable size.
119
120 The maximum overhead wastage (i.e., number of extra bytes
121 allocated than were requested in malloc) is less than or equal
122 to the minimum size, except for requests >= mmap_threshold that
123 are serviced via mmap(), where the worst case wastage is 2 *
124 sizeof(size_t) bytes plus the remainder from a system page (the
125 minimal mmap unit); typically 4096 or 8192 bytes.
126
127 Maximum allocated size: 4-byte size_t: 2^32 minus about two pages
128 8-byte size_t: 2^64 minus about two pages
129
130 It is assumed that (possibly signed) size_t values suffice to
131 represent chunk sizes. `Possibly signed' is due to the fact
132 that `size_t' may be defined on a system as either a signed or
133 an unsigned type. The ISO C standard says that it must be
134 unsigned, but a few systems are known not to adhere to this.
135 Additionally, even when size_t is unsigned, sbrk (which is by
136 default used to obtain memory from system) accepts signed
137 arguments, and may not be able to handle size_t-wide arguments
138 with negative sign bit. Generally, values that would
139 appear as negative after accounting for overhead and alignment
140 are supported only via mmap(), which does not have this
141 limitation.
142
143 Requests for sizes outside the allowed range will perform an optional
144 failure action and then return null. (Requests may also
145 also fail because a system is out of memory.)
146
147 Thread-safety: thread-safe
148
149 Compliance: I believe it is compliant with the 1997 Single Unix Specification
150 Also SVID/XPG, ANSI C, and probably others as well.
151
152 * Synopsis of compile-time options:
153
154 People have reported using previous versions of this malloc on all
155 versions of Unix, sometimes by tweaking some of the defines
156 below. It has been tested most extensively on Solaris and Linux.
157 People also report using it in stand-alone embedded systems.
158
159 The implementation is in straight, hand-tuned ANSI C. It is not
160 at all modular. (Sorry!) It uses a lot of macros. To be at all
161 usable, this code should be compiled using an optimizing compiler
162 (for example gcc -O3) that can simplify expressions and control
163 paths. (FAQ: some macros import variables as arguments rather than
164 declare locals because people reported that some debuggers
165 otherwise get confused.)
166
167 OPTION DEFAULT VALUE
168
169 Compilation Environment options:
170
171 HAVE_MREMAP 0 unless linux defined
172
173 Changing default word sizes:
174
175 INTERNAL_SIZE_T size_t
176 MALLOC_ALIGNMENT MAX (2 * sizeof(INTERNAL_SIZE_T),
177 __alignof__ (long double))
178
179 Configuration and functionality options:
180
181 USE_PUBLIC_MALLOC_WRAPPERS NOT defined
182 USE_MALLOC_LOCK NOT defined
183 MALLOC_DEBUG NOT defined
184 REALLOC_ZERO_BYTES_FREES 1
185 TRIM_FASTBINS 0
186
187 Options for customizing MORECORE:
188
189 MORECORE sbrk
190 MORECORE_FAILURE -1
191 MORECORE_CONTIGUOUS 1
192 MORECORE_CANNOT_TRIM NOT defined
193 MORECORE_CLEARS 1
194 MMAP_AS_MORECORE_SIZE (1024 * 1024)
195
196 Tuning options that are also dynamically changeable via mallopt:
197
198 DEFAULT_MXFAST 64 (for 32bit), 128 (for 64bit)
199 DEFAULT_TRIM_THRESHOLD 128 * 1024
200 DEFAULT_TOP_PAD 0
201 DEFAULT_MMAP_THRESHOLD 128 * 1024
202 DEFAULT_MMAP_MAX 65536
203
204 There are several other #defined constants and macros that you
205 probably don't want to touch unless you are extending or adapting malloc. */
206
207 /*
208 void* is the pointer type that malloc should say it returns
209 */
210
211 #ifndef void
212 #define void void
213 #endif /*void*/
214
215 #include <stddef.h> /* for size_t */
216 #include <stdlib.h> /* for getenv(), abort() */
217
218 #include <malloc-machine.h>
219
220 #include <atomic.h>
221 #include <_itoa.h>
222 #include <bits/wordsize.h>
223 #include <sys/sysinfo.h>
224
225 #include <ldsodefs.h>
226
227 #include <unistd.h>
228 #include <stdio.h> /* needed for malloc_stats */
229 #include <errno.h>
230
231 #include <shlib-compat.h>
232
233 /* For uintptr_t. */
234 #include <stdint.h>
235
236 /* For va_arg, va_start, va_end. */
237 #include <stdarg.h>
238
239
240 /*
241 Debugging:
242
243 Because freed chunks may be overwritten with bookkeeping fields, this
244 malloc will often die when freed memory is overwritten by user
245 programs. This can be very effective (albeit in an annoying way)
246 in helping track down dangling pointers.
247
248 If you compile with -DMALLOC_DEBUG, a number of assertion checks are
249 enabled that will catch more memory errors. You probably won't be
250 able to make much sense of the actual assertion errors, but they
251 should help you locate incorrectly overwritten memory. The checking
252 is fairly extensive, and will slow down execution
253 noticeably. Calling malloc_stats or mallinfo with MALLOC_DEBUG set
254 will attempt to check every non-mmapped allocated and free chunk in
255 the course of computing the summmaries. (By nature, mmapped regions
256 cannot be checked very much automatically.)
257
258 Setting MALLOC_DEBUG may also be helpful if you are trying to modify
259 this code. The assertions in the check routines spell out in more
260 detail the assumptions and invariants underlying the algorithms.
261
262 Setting MALLOC_DEBUG does NOT provide an automated mechanism for
263 checking that all accesses to malloced memory stay within their
264 bounds. However, there are several add-ons and adaptations of this
265 or other mallocs available that do this.
266 */
267
268 #ifdef NDEBUG
269 # define assert(expr) ((void) 0)
270 #else
271 # define assert(expr) \
272 ((expr) \
273 ? ((void) 0) \
274 : __malloc_assert (__STRING (expr), __FILE__, __LINE__, __func__))
275
276 extern const char *__progname;
277
278 static void
279 __malloc_assert (const char *assertion, const char *file, unsigned int line,
280 const char *function)
281 {
282 (void) __fxprintf (NULL, "%s%s%s:%u: %s%sAssertion `%s' failed.\n",
283 __progname, __progname[0] ? ": " : "",
284 file, line,
285 function ? function : "", function ? ": " : "",
286 assertion);
287 fflush (stderr);
288 abort ();
289 }
290 #endif
291
292
293 /*
294 INTERNAL_SIZE_T is the word-size used for internal bookkeeping
295 of chunk sizes.
296
297 The default version is the same as size_t.
298
299 While not strictly necessary, it is best to define this as an
300 unsigned type, even if size_t is a signed type. This may avoid some
301 artificial size limitations on some systems.
302
303 On a 64-bit machine, you may be able to reduce malloc overhead by
304 defining INTERNAL_SIZE_T to be a 32 bit `unsigned int' at the
305 expense of not being able to handle more than 2^32 of malloced
306 space. If this limitation is acceptable, you are encouraged to set
307 this unless you are on a platform requiring 16byte alignments. In
308 this case the alignment requirements turn out to negate any
309 potential advantages of decreasing size_t word size.
310
311 Implementors: Beware of the possible combinations of:
312 - INTERNAL_SIZE_T might be signed or unsigned, might be 32 or 64 bits,
313 and might be the same width as int or as long
314 - size_t might have different width and signedness as INTERNAL_SIZE_T
315 - int and long might be 32 or 64 bits, and might be the same width
316 To deal with this, most comparisons and difference computations
317 among INTERNAL_SIZE_Ts should cast them to unsigned long, being
318 aware of the fact that casting an unsigned int to a wider long does
319 not sign-extend. (This also makes checking for negative numbers
320 awkward.) Some of these casts result in harmless compiler warnings
321 on some systems.
322 */
323
324 #ifndef INTERNAL_SIZE_T
325 #define INTERNAL_SIZE_T size_t
326 #endif
327
328 /* The corresponding word size */
329 #define SIZE_SZ (sizeof(INTERNAL_SIZE_T))
330
331
332 /*
333 MALLOC_ALIGNMENT is the minimum alignment for malloc'ed chunks.
334 It must be a power of two at least 2 * SIZE_SZ, even on machines
335 for which smaller alignments would suffice. It may be defined as
336 larger than this though. Note however that code and data structures
337 are optimized for the case of 8-byte alignment.
338 */
339
340
341 #ifndef MALLOC_ALIGNMENT
342 # if !SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_16)
343 /* This is the correct definition when there is no past ABI to constrain it.
344
345 Among configurations with a past ABI constraint, it differs from
346 2*SIZE_SZ only on powerpc32. For the time being, changing this is
347 causing more compatibility problems due to malloc_get_state and
348 malloc_set_state than will returning blocks not adequately aligned for
349 long double objects under -mlong-double-128. */
350
351 # define MALLOC_ALIGNMENT (2 * SIZE_SZ < __alignof__ (long double) \
352 ? __alignof__ (long double) : 2 * SIZE_SZ)
353 # else
354 # define MALLOC_ALIGNMENT (2 * SIZE_SZ)
355 # endif
356 #endif
357
358 /* The corresponding bit mask value */
359 #define MALLOC_ALIGN_MASK (MALLOC_ALIGNMENT - 1)
360
361
362
363 /*
364 REALLOC_ZERO_BYTES_FREES should be set if a call to
365 realloc with zero bytes should be the same as a call to free.
366 This is required by the C standard. Otherwise, since this malloc
367 returns a unique pointer for malloc(0), so does realloc(p, 0).
368 */
369
370 #ifndef REALLOC_ZERO_BYTES_FREES
371 #define REALLOC_ZERO_BYTES_FREES 1
372 #endif
373
374 /*
375 TRIM_FASTBINS controls whether free() of a very small chunk can
376 immediately lead to trimming. Setting to true (1) can reduce memory
377 footprint, but will almost always slow down programs that use a lot
378 of small chunks.
379
380 Define this only if you are willing to give up some speed to more
381 aggressively reduce system-level memory footprint when releasing
382 memory in programs that use many small chunks. You can get
383 essentially the same effect by setting MXFAST to 0, but this can
384 lead to even greater slowdowns in programs using many small chunks.
385 TRIM_FASTBINS is an in-between compile-time option, that disables
386 only those chunks bordering topmost memory from being placed in
387 fastbins.
388 */
389
390 #ifndef TRIM_FASTBINS
391 #define TRIM_FASTBINS 0
392 #endif
393
394
395 /* Definition for getting more memory from the OS. */
396 #define MORECORE (*__morecore)
397 #define MORECORE_FAILURE 0
398 void * __default_morecore (ptrdiff_t);
399 void *(*__morecore)(ptrdiff_t) = __default_morecore;
400
401
402 #include <string.h>
403
404
405 /* Force a value to be in a register and stop the compiler referring
406 to the source (mostly memory location) again. */
407 #define force_reg(val) \
408 ({ __typeof (val) _v; asm ("" : "=r" (_v) : "0" (val)); _v; })
409
410
411 /*
412 MORECORE-related declarations. By default, rely on sbrk
413 */
414
415
416 /*
417 MORECORE is the name of the routine to call to obtain more memory
418 from the system. See below for general guidance on writing
419 alternative MORECORE functions, as well as a version for WIN32 and a
420 sample version for pre-OSX macos.
421 */
422
423 #ifndef MORECORE
424 #define MORECORE sbrk
425 #endif
426
427 /*
428 MORECORE_FAILURE is the value returned upon failure of MORECORE
429 as well as mmap. Since it cannot be an otherwise valid memory address,
430 and must reflect values of standard sys calls, you probably ought not
431 try to redefine it.
432 */
433
434 #ifndef MORECORE_FAILURE
435 #define MORECORE_FAILURE (-1)
436 #endif
437
438 /*
439 If MORECORE_CONTIGUOUS is true, take advantage of fact that
440 consecutive calls to MORECORE with positive arguments always return
441 contiguous increasing addresses. This is true of unix sbrk. Even
442 if not defined, when regions happen to be contiguous, malloc will
443 permit allocations spanning regions obtained from different
444 calls. But defining this when applicable enables some stronger
445 consistency checks and space efficiencies.
446 */
447
448 #ifndef MORECORE_CONTIGUOUS
449 #define MORECORE_CONTIGUOUS 1
450 #endif
451
452 /*
453 Define MORECORE_CANNOT_TRIM if your version of MORECORE
454 cannot release space back to the system when given negative
455 arguments. This is generally necessary only if you are using
456 a hand-crafted MORECORE function that cannot handle negative arguments.
457 */
458
459 /* #define MORECORE_CANNOT_TRIM */
460
461 /* MORECORE_CLEARS (default 1)
462 The degree to which the routine mapped to MORECORE zeroes out
463 memory: never (0), only for newly allocated space (1) or always
464 (2). The distinction between (1) and (2) is necessary because on
465 some systems, if the application first decrements and then
466 increments the break value, the contents of the reallocated space
467 are unspecified.
468 */
469
470 #ifndef MORECORE_CLEARS
471 #define MORECORE_CLEARS 1
472 #endif
473
474
475 /*
476 MMAP_AS_MORECORE_SIZE is the minimum mmap size argument to use if
477 sbrk fails, and mmap is used as a backup. The value must be a
478 multiple of page size. This backup strategy generally applies only
479 when systems have "holes" in address space, so sbrk cannot perform
480 contiguous expansion, but there is still space available on system.
481 On systems for which this is known to be useful (i.e. most linux
482 kernels), this occurs only when programs allocate huge amounts of
483 memory. Between this, and the fact that mmap regions tend to be
484 limited, the size should be large, to avoid too many mmap calls and
485 thus avoid running out of kernel resources. */
486
487 #ifndef MMAP_AS_MORECORE_SIZE
488 #define MMAP_AS_MORECORE_SIZE (1024 * 1024)
489 #endif
490
491 /*
492 Define HAVE_MREMAP to make realloc() use mremap() to re-allocate
493 large blocks. This is currently only possible on Linux with
494 kernel versions newer than 1.3.77.
495 */
496
497 #ifndef HAVE_MREMAP
498 #ifdef linux
499 #define HAVE_MREMAP 1
500 #else
501 #define HAVE_MREMAP 0
502 #endif
503
504 #endif /* HAVE_MREMAP */
505
506
507 /*
508 This version of malloc supports the standard SVID/XPG mallinfo
509 routine that returns a struct containing usage properties and
510 statistics. It should work on any SVID/XPG compliant system that has
511 a /usr/include/malloc.h defining struct mallinfo. (If you'd like to
512 install such a thing yourself, cut out the preliminary declarations
513 as described above and below and save them in a malloc.h file. But
514 there's no compelling reason to bother to do this.)
515
516 The main declaration needed is the mallinfo struct that is returned
517 (by-copy) by mallinfo(). The SVID/XPG malloinfo struct contains a
518 bunch of fields that are not even meaningful in this version of
519 malloc. These fields are are instead filled by mallinfo() with
520 other numbers that might be of interest.
521 */
522
523
524 /* ---------- description of public routines ------------ */
525
526 /*
527 malloc(size_t n)
528 Returns a pointer to a newly allocated chunk of at least n bytes, or null
529 if no space is available. Additionally, on failure, errno is
530 set to ENOMEM on ANSI C systems.
531
532 If n is zero, malloc returns a minumum-sized chunk. (The minimum
533 size is 16 bytes on most 32bit systems, and 24 or 32 bytes on 64bit
534 systems.) On most systems, size_t is an unsigned type, so calls
535 with negative arguments are interpreted as requests for huge amounts
536 of space, which will often fail. The maximum supported value of n
537 differs across systems, but is in all cases less than the maximum
538 representable value of a size_t.
539 */
540 void* __libc_malloc(size_t);
541 libc_hidden_proto (__libc_malloc)
542
543 /*
544 free(void* p)
545 Releases the chunk of memory pointed to by p, that had been previously
546 allocated using malloc or a related routine such as realloc.
547 It has no effect if p is null. It can have arbitrary (i.e., bad!)
548 effects if p has already been freed.
549
550 Unless disabled (using mallopt), freeing very large spaces will
551 when possible, automatically trigger operations that give
552 back unused memory to the system, thus reducing program footprint.
553 */
554 void __libc_free(void*);
555 libc_hidden_proto (__libc_free)
556
557 /*
558 calloc(size_t n_elements, size_t element_size);
559 Returns a pointer to n_elements * element_size bytes, with all locations
560 set to zero.
561 */
562 void* __libc_calloc(size_t, size_t);
563
564 /*
565 realloc(void* p, size_t n)
566 Returns a pointer to a chunk of size n that contains the same data
567 as does chunk p up to the minimum of (n, p's size) bytes, or null
568 if no space is available.
569
570 The returned pointer may or may not be the same as p. The algorithm
571 prefers extending p when possible, otherwise it employs the
572 equivalent of a malloc-copy-free sequence.
573
574 If p is null, realloc is equivalent to malloc.
575
576 If space is not available, realloc returns null, errno is set (if on
577 ANSI) and p is NOT freed.
578
579 if n is for fewer bytes than already held by p, the newly unused
580 space is lopped off and freed if possible. Unless the #define
581 REALLOC_ZERO_BYTES_FREES is set, realloc with a size argument of
582 zero (re)allocates a minimum-sized chunk.
583
584 Large chunks that were internally obtained via mmap will always
585 be reallocated using malloc-copy-free sequences unless
586 the system supports MREMAP (currently only linux).
587
588 The old unix realloc convention of allowing the last-free'd chunk
589 to be used as an argument to realloc is not supported.
590 */
591 void* __libc_realloc(void*, size_t);
592 libc_hidden_proto (__libc_realloc)
593
594 /*
595 memalign(size_t alignment, size_t n);
596 Returns a pointer to a newly allocated chunk of n bytes, aligned
597 in accord with the alignment argument.
598
599 The alignment argument should be a power of two. If the argument is
600 not a power of two, the nearest greater power is used.
601 8-byte alignment is guaranteed by normal malloc calls, so don't
602 bother calling memalign with an argument of 8 or less.
603
604 Overreliance on memalign is a sure way to fragment space.
605 */
606 void* __libc_memalign(size_t, size_t);
607 libc_hidden_proto (__libc_memalign)
608
609 /*
610 valloc(size_t n);
611 Equivalent to memalign(pagesize, n), where pagesize is the page
612 size of the system. If the pagesize is unknown, 4096 is used.
613 */
614 void* __libc_valloc(size_t);
615
616
617
618 /*
619 mallopt(int parameter_number, int parameter_value)
620 Sets tunable parameters The format is to provide a
621 (parameter-number, parameter-value) pair. mallopt then sets the
622 corresponding parameter to the argument value if it can (i.e., so
623 long as the value is meaningful), and returns 1 if successful else
624 0. SVID/XPG/ANSI defines four standard param numbers for mallopt,
625 normally defined in malloc.h. Only one of these (M_MXFAST) is used
626 in this malloc. The others (M_NLBLKS, M_GRAIN, M_KEEP) don't apply,
627 so setting them has no effect. But this malloc also supports four
628 other options in mallopt. See below for details. Briefly, supported
629 parameters are as follows (listed defaults are for "typical"
630 configurations).
631
632 Symbol param # default allowed param values
633 M_MXFAST 1 64 0-80 (0 disables fastbins)
634 M_TRIM_THRESHOLD -1 128*1024 any (-1U disables trimming)
635 M_TOP_PAD -2 0 any
636 M_MMAP_THRESHOLD -3 128*1024 any (or 0 if no MMAP support)
637 M_MMAP_MAX -4 65536 any (0 disables use of mmap)
638 */
639 int __libc_mallopt(int, int);
640 libc_hidden_proto (__libc_mallopt)
641
642
643 /*
644 mallinfo()
645 Returns (by copy) a struct containing various summary statistics:
646
647 arena: current total non-mmapped bytes allocated from system
648 ordblks: the number of free chunks
649 smblks: the number of fastbin blocks (i.e., small chunks that
650 have been freed but not use resused or consolidated)
651 hblks: current number of mmapped regions
652 hblkhd: total bytes held in mmapped regions
653 usmblks: the maximum total allocated space. This will be greater
654 than current total if trimming has occurred.
655 fsmblks: total bytes held in fastbin blocks
656 uordblks: current total allocated space (normal or mmapped)
657 fordblks: total free space
658 keepcost: the maximum number of bytes that could ideally be released
659 back to system via malloc_trim. ("ideally" means that
660 it ignores page restrictions etc.)
661
662 Because these fields are ints, but internal bookkeeping may
663 be kept as longs, the reported values may wrap around zero and
664 thus be inaccurate.
665 */
666 struct mallinfo __libc_mallinfo(void);
667
668
669 /*
670 pvalloc(size_t n);
671 Equivalent to valloc(minimum-page-that-holds(n)), that is,
672 round up n to nearest pagesize.
673 */
674 void* __libc_pvalloc(size_t);
675
676 /*
677 malloc_trim(size_t pad);
678
679 If possible, gives memory back to the system (via negative
680 arguments to sbrk) if there is unused memory at the `high' end of
681 the malloc pool. You can call this after freeing large blocks of
682 memory to potentially reduce the system-level memory requirements
683 of a program. However, it cannot guarantee to reduce memory. Under
684 some allocation patterns, some large free blocks of memory will be
685 locked between two used chunks, so they cannot be given back to
686 the system.
687
688 The `pad' argument to malloc_trim represents the amount of free
689 trailing space to leave untrimmed. If this argument is zero,
690 only the minimum amount of memory to maintain internal data
691 structures will be left (one page or less). Non-zero arguments
692 can be supplied to maintain enough trailing space to service
693 future expected allocations without having to re-obtain memory
694 from the system.
695
696 Malloc_trim returns 1 if it actually released any memory, else 0.
697 On systems that do not support "negative sbrks", it will always
698 return 0.
699 */
700 int __malloc_trim(size_t);
701
702 /*
703 malloc_usable_size(void* p);
704
705 Returns the number of bytes you can actually use in
706 an allocated chunk, which may be more than you requested (although
707 often not) due to alignment and minimum size constraints.
708 You can use this many bytes without worrying about
709 overwriting other allocated objects. This is not a particularly great
710 programming practice. malloc_usable_size can be more useful in
711 debugging and assertions, for example:
712
713 p = malloc(n);
714 assert(malloc_usable_size(p) >= 256);
715
716 */
717 size_t __malloc_usable_size(void*);
718
719 /*
720 malloc_stats();
721 Prints on stderr the amount of space obtained from the system (both
722 via sbrk and mmap), the maximum amount (which may be more than
723 current if malloc_trim and/or munmap got called), and the current
724 number of bytes allocated via malloc (or realloc, etc) but not yet
725 freed. Note that this is the number of bytes allocated, not the
726 number requested. It will be larger than the number requested
727 because of alignment and bookkeeping overhead. Because it includes
728 alignment wastage as being in use, this figure may be greater than
729 zero even when no user-level chunks are allocated.
730
731 The reported current and maximum system memory can be inaccurate if
732 a program makes other calls to system memory allocation functions
733 (normally sbrk) outside of malloc.
734
735 malloc_stats prints only the most commonly interesting statistics.
736 More information can be obtained by calling mallinfo.
737
738 */
739 void __malloc_stats(void);
740
741 /*
742 malloc_get_state(void);
743
744 Returns the state of all malloc variables in an opaque data
745 structure.
746 */
747 void* __malloc_get_state(void);
748
749 /*
750 malloc_set_state(void* state);
751
752 Restore the state of all malloc variables from data obtained with
753 malloc_get_state().
754 */
755 int __malloc_set_state(void*);
756
757 /*
758 posix_memalign(void **memptr, size_t alignment, size_t size);
759
760 POSIX wrapper like memalign(), checking for validity of size.
761 */
762 int __posix_memalign(void **, size_t, size_t);
763
764 /* mallopt tuning options */
765
766 /*
767 M_MXFAST is the maximum request size used for "fastbins", special bins
768 that hold returned chunks without consolidating their spaces. This
769 enables future requests for chunks of the same size to be handled
770 very quickly, but can increase fragmentation, and thus increase the
771 overall memory footprint of a program.
772
773 This malloc manages fastbins very conservatively yet still
774 efficiently, so fragmentation is rarely a problem for values less
775 than or equal to the default. The maximum supported value of MXFAST
776 is 80. You wouldn't want it any higher than this anyway. Fastbins
777 are designed especially for use with many small structs, objects or
778 strings -- the default handles structs/objects/arrays with sizes up
779 to 8 4byte fields, or small strings representing words, tokens,
780 etc. Using fastbins for larger objects normally worsens
781 fragmentation without improving speed.
782
783 M_MXFAST is set in REQUEST size units. It is internally used in
784 chunksize units, which adds padding and alignment. You can reduce
785 M_MXFAST to 0 to disable all use of fastbins. This causes the malloc
786 algorithm to be a closer approximation of fifo-best-fit in all cases,
787 not just for larger requests, but will generally cause it to be
788 slower.
789 */
790
791
792 /* M_MXFAST is a standard SVID/XPG tuning option, usually listed in malloc.h */
793 #ifndef M_MXFAST
794 #define M_MXFAST 1
795 #endif
796
797 #ifndef DEFAULT_MXFAST
798 #define DEFAULT_MXFAST (64 * SIZE_SZ / 4)
799 #endif
800
801
802 /*
803 M_TRIM_THRESHOLD is the maximum amount of unused top-most memory
804 to keep before releasing via malloc_trim in free().
805
806 Automatic trimming is mainly useful in long-lived programs.
807 Because trimming via sbrk can be slow on some systems, and can
808 sometimes be wasteful (in cases where programs immediately
809 afterward allocate more large chunks) the value should be high
810 enough so that your overall system performance would improve by
811 releasing this much memory.
812
813 The trim threshold and the mmap control parameters (see below)
814 can be traded off with one another. Trimming and mmapping are
815 two different ways of releasing unused memory back to the
816 system. Between these two, it is often possible to keep
817 system-level demands of a long-lived program down to a bare
818 minimum. For example, in one test suite of sessions measuring
819 the XF86 X server on Linux, using a trim threshold of 128K and a
820 mmap threshold of 192K led to near-minimal long term resource
821 consumption.
822
823 If you are using this malloc in a long-lived program, it should
824 pay to experiment with these values. As a rough guide, you
825 might set to a value close to the average size of a process
826 (program) running on your system. Releasing this much memory
827 would allow such a process to run in memory. Generally, it's
828 worth it to tune for trimming rather tham memory mapping when a
829 program undergoes phases where several large chunks are
830 allocated and released in ways that can reuse each other's
831 storage, perhaps mixed with phases where there are no such
832 chunks at all. And in well-behaved long-lived programs,
833 controlling release of large blocks via trimming versus mapping
834 is usually faster.
835
836 However, in most programs, these parameters serve mainly as
837 protection against the system-level effects of carrying around
838 massive amounts of unneeded memory. Since frequent calls to
839 sbrk, mmap, and munmap otherwise degrade performance, the default
840 parameters are set to relatively high values that serve only as
841 safeguards.
842
843 The trim value It must be greater than page size to have any useful
844 effect. To disable trimming completely, you can set to
845 (unsigned long)(-1)
846
847 Trim settings interact with fastbin (MXFAST) settings: Unless
848 TRIM_FASTBINS is defined, automatic trimming never takes place upon
849 freeing a chunk with size less than or equal to MXFAST. Trimming is
850 instead delayed until subsequent freeing of larger chunks. However,
851 you can still force an attempted trim by calling malloc_trim.
852
853 Also, trimming is not generally possible in cases where
854 the main arena is obtained via mmap.
855
856 Note that the trick some people use of mallocing a huge space and
857 then freeing it at program startup, in an attempt to reserve system
858 memory, doesn't have the intended effect under automatic trimming,
859 since that memory will immediately be returned to the system.
860 */
861
862 #define M_TRIM_THRESHOLD -1
863
864 #ifndef DEFAULT_TRIM_THRESHOLD
865 #define DEFAULT_TRIM_THRESHOLD (128 * 1024)
866 #endif
867
868 /*
869 M_TOP_PAD is the amount of extra `padding' space to allocate or
870 retain whenever sbrk is called. It is used in two ways internally:
871
872 * When sbrk is called to extend the top of the arena to satisfy
873 a new malloc request, this much padding is added to the sbrk
874 request.
875
876 * When malloc_trim is called automatically from free(),
877 it is used as the `pad' argument.
878
879 In both cases, the actual amount of padding is rounded
880 so that the end of the arena is always a system page boundary.
881
882 The main reason for using padding is to avoid calling sbrk so
883 often. Having even a small pad greatly reduces the likelihood
884 that nearly every malloc request during program start-up (or
885 after trimming) will invoke sbrk, which needlessly wastes
886 time.
887
888 Automatic rounding-up to page-size units is normally sufficient
889 to avoid measurable overhead, so the default is 0. However, in
890 systems where sbrk is relatively slow, it can pay to increase
891 this value, at the expense of carrying around more memory than
892 the program needs.
893 */
894
895 #define M_TOP_PAD -2
896
897 #ifndef DEFAULT_TOP_PAD
898 #define DEFAULT_TOP_PAD (0)
899 #endif
900
901 /*
902 MMAP_THRESHOLD_MAX and _MIN are the bounds on the dynamically
903 adjusted MMAP_THRESHOLD.
904 */
905
906 #ifndef DEFAULT_MMAP_THRESHOLD_MIN
907 #define DEFAULT_MMAP_THRESHOLD_MIN (128 * 1024)
908 #endif
909
910 #ifndef DEFAULT_MMAP_THRESHOLD_MAX
911 /* For 32-bit platforms we cannot increase the maximum mmap
912 threshold much because it is also the minimum value for the
913 maximum heap size and its alignment. Going above 512k (i.e., 1M
914 for new heaps) wastes too much address space. */
915 # if __WORDSIZE == 32
916 # define DEFAULT_MMAP_THRESHOLD_MAX (512 * 1024)
917 # else
918 # define DEFAULT_MMAP_THRESHOLD_MAX (4 * 1024 * 1024 * sizeof(long))
919 # endif
920 #endif
921
922 /*
923 M_MMAP_THRESHOLD is the request size threshold for using mmap()
924 to service a request. Requests of at least this size that cannot
925 be allocated using already-existing space will be serviced via mmap.
926 (If enough normal freed space already exists it is used instead.)
927
928 Using mmap segregates relatively large chunks of memory so that
929 they can be individually obtained and released from the host
930 system. A request serviced through mmap is never reused by any
931 other request (at least not directly; the system may just so
932 happen to remap successive requests to the same locations).
933
934 Segregating space in this way has the benefits that:
935
936 1. Mmapped space can ALWAYS be individually released back
937 to the system, which helps keep the system level memory
938 demands of a long-lived program low.
939 2. Mapped memory can never become `locked' between
940 other chunks, as can happen with normally allocated chunks, which
941 means that even trimming via malloc_trim would not release them.
942 3. On some systems with "holes" in address spaces, mmap can obtain
943 memory that sbrk cannot.
944
945 However, it has the disadvantages that:
946
947 1. The space cannot be reclaimed, consolidated, and then
948 used to service later requests, as happens with normal chunks.
949 2. It can lead to more wastage because of mmap page alignment
950 requirements
951 3. It causes malloc performance to be more dependent on host
952 system memory management support routines which may vary in
953 implementation quality and may impose arbitrary
954 limitations. Generally, servicing a request via normal
955 malloc steps is faster than going through a system's mmap.
956
957 The advantages of mmap nearly always outweigh disadvantages for
958 "large" chunks, but the value of "large" varies across systems. The
959 default is an empirically derived value that works well in most
960 systems.
961
962
963 Update in 2006:
964 The above was written in 2001. Since then the world has changed a lot.
965 Memory got bigger. Applications got bigger. The virtual address space
966 layout in 32 bit linux changed.
967
968 In the new situation, brk() and mmap space is shared and there are no
969 artificial limits on brk size imposed by the kernel. What is more,
970 applications have started using transient allocations larger than the
971 128Kb as was imagined in 2001.
972
973 The price for mmap is also high now; each time glibc mmaps from the
974 kernel, the kernel is forced to zero out the memory it gives to the
975 application. Zeroing memory is expensive and eats a lot of cache and
976 memory bandwidth. This has nothing to do with the efficiency of the
977 virtual memory system, by doing mmap the kernel just has no choice but
978 to zero.
979
980 In 2001, the kernel had a maximum size for brk() which was about 800
981 megabytes on 32 bit x86, at that point brk() would hit the first
982 mmaped shared libaries and couldn't expand anymore. With current 2.6
983 kernels, the VA space layout is different and brk() and mmap
984 both can span the entire heap at will.
985
986 Rather than using a static threshold for the brk/mmap tradeoff,
987 we are now using a simple dynamic one. The goal is still to avoid
988 fragmentation. The old goals we kept are
989 1) try to get the long lived large allocations to use mmap()
990 2) really large allocations should always use mmap()
991 and we're adding now:
992 3) transient allocations should use brk() to avoid forcing the kernel
993 having to zero memory over and over again
994
995 The implementation works with a sliding threshold, which is by default
996 limited to go between 128Kb and 32Mb (64Mb for 64 bitmachines) and starts
997 out at 128Kb as per the 2001 default.
998
999 This allows us to satisfy requirement 1) under the assumption that long
1000 lived allocations are made early in the process' lifespan, before it has
1001 started doing dynamic allocations of the same size (which will
1002 increase the threshold).
1003
1004 The upperbound on the threshold satisfies requirement 2)
1005
1006 The threshold goes up in value when the application frees memory that was
1007 allocated with the mmap allocator. The idea is that once the application
1008 starts freeing memory of a certain size, it's highly probable that this is
1009 a size the application uses for transient allocations. This estimator
1010 is there to satisfy the new third requirement.
1011
1012 */
1013
1014 #define M_MMAP_THRESHOLD -3
1015
1016 #ifndef DEFAULT_MMAP_THRESHOLD
1017 #define DEFAULT_MMAP_THRESHOLD DEFAULT_MMAP_THRESHOLD_MIN
1018 #endif
1019
1020 /*
1021 M_MMAP_MAX is the maximum number of requests to simultaneously
1022 service using mmap. This parameter exists because
1023 some systems have a limited number of internal tables for
1024 use by mmap, and using more than a few of them may degrade
1025 performance.
1026
1027 The default is set to a value that serves only as a safeguard.
1028 Setting to 0 disables use of mmap for servicing large requests.
1029 */
1030
1031 #define M_MMAP_MAX -4
1032
1033 #ifndef DEFAULT_MMAP_MAX
1034 #define DEFAULT_MMAP_MAX (65536)
1035 #endif
1036
1037 #include <malloc.h>
1038
1039 #ifndef RETURN_ADDRESS
1040 #define RETURN_ADDRESS(X_) (NULL)
1041 #endif
1042
1043 /* On some platforms we can compile internal, not exported functions better.
1044 Let the environment provide a macro and define it to be empty if it
1045 is not available. */
1046 #ifndef internal_function
1047 # define internal_function
1048 #endif
1049
1050 /* Forward declarations. */
1051 struct malloc_chunk;
1052 typedef struct malloc_chunk* mchunkptr;
1053
1054 /* Internal routines. */
1055
1056 static void* _int_malloc(mstate, size_t);
1057 static void _int_free(mstate, mchunkptr, int);
1058 static void* _int_realloc(mstate, mchunkptr, INTERNAL_SIZE_T,
1059 INTERNAL_SIZE_T);
1060 static void* _int_memalign(mstate, size_t, size_t);
1061 static void* _int_valloc(mstate, size_t);
1062 static void* _int_pvalloc(mstate, size_t);
1063 static void malloc_printerr(int action, const char *str, void *ptr);
1064
1065 static void* internal_function mem2mem_check(void *p, size_t sz);
1066 static int internal_function top_check(void);
1067 static void internal_function munmap_chunk(mchunkptr p);
1068 #if HAVE_MREMAP
1069 static mchunkptr internal_function mremap_chunk(mchunkptr p, size_t new_size);
1070 #endif
1071
1072 static void* malloc_check(size_t sz, const void *caller);
1073 static void free_check(void* mem, const void *caller);
1074 static void* realloc_check(void* oldmem, size_t bytes,
1075 const void *caller);
1076 static void* memalign_check(size_t alignment, size_t bytes,
1077 const void *caller);
1078 /* These routines are never needed in this configuration. */
1079 static void* malloc_atfork(size_t sz, const void *caller);
1080 static void free_atfork(void* mem, const void *caller);
1081
1082
1083 /* ------------- Optional versions of memcopy ---------------- */
1084
1085
1086 /*
1087 Note: memcpy is ONLY invoked with non-overlapping regions,
1088 so the (usually slower) memmove is not needed.
1089 */
1090
1091 #define MALLOC_COPY(dest, src, nbytes) memcpy(dest, src, nbytes)
1092 #define MALLOC_ZERO(dest, nbytes) memset(dest, 0, nbytes)
1093
1094
1095 /* ------------------ MMAP support ------------------ */
1096
1097
1098 #include <fcntl.h>
1099 #include <sys/mman.h>
1100
1101 #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
1102 # define MAP_ANONYMOUS MAP_ANON
1103 #endif
1104
1105 #ifndef MAP_NORESERVE
1106 # define MAP_NORESERVE 0
1107 #endif
1108
1109 #define MMAP(addr, size, prot, flags) \
1110 __mmap((addr), (size), (prot), (flags)|MAP_ANONYMOUS|MAP_PRIVATE, -1, 0)
1111
1112
1113 /*
1114 ----------------------- Chunk representations -----------------------
1115 */
1116
1117
1118 /*
1119 This struct declaration is misleading (but accurate and necessary).
1120 It declares a "view" into memory allowing access to necessary
1121 fields at known offsets from a given base. See explanation below.
1122 */
1123
1124 struct malloc_chunk {
1125
1126 INTERNAL_SIZE_T prev_size; /* Size of previous chunk (if free). */
1127 INTERNAL_SIZE_T size; /* Size in bytes, including overhead. */
1128
1129 struct malloc_chunk* fd; /* double links -- used only if free. */
1130 struct malloc_chunk* bk;
1131
1132 /* Only used for large blocks: pointer to next larger size. */
1133 struct malloc_chunk* fd_nextsize; /* double links -- used only if free. */
1134 struct malloc_chunk* bk_nextsize;
1135 };
1136
1137
1138 /*
1139 malloc_chunk details:
1140
1141 (The following includes lightly edited explanations by Colin Plumb.)
1142
1143 Chunks of memory are maintained using a `boundary tag' method as
1144 described in e.g., Knuth or Standish. (See the paper by Paul
1145 Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a
1146 survey of such techniques.) Sizes of free chunks are stored both
1147 in the front of each chunk and at the end. This makes
1148 consolidating fragmented chunks into bigger chunks very fast. The
1149 size fields also hold bits representing whether chunks are free or
1150 in use.
1151
1152 An allocated chunk looks like this:
1153
1154
1155 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1156 | Size of previous chunk, if allocated | |
1157 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1158 | Size of chunk, in bytes |M|P|
1159 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1160 | User data starts here... .
1161 . .
1162 . (malloc_usable_size() bytes) .
1163 . |
1164 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1165 | Size of chunk |
1166 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1167
1168
1169 Where "chunk" is the front of the chunk for the purpose of most of
1170 the malloc code, but "mem" is the pointer that is returned to the
1171 user. "Nextchunk" is the beginning of the next contiguous chunk.
1172
1173 Chunks always begin on even word boundries, so the mem portion
1174 (which is returned to the user) is also on an even word boundary, and
1175 thus at least double-word aligned.
1176
1177 Free chunks are stored in circular doubly-linked lists, and look like this:
1178
1179 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1180 | Size of previous chunk |
1181 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1182 `head:' | Size of chunk, in bytes |P|
1183 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1184 | Forward pointer to next chunk in list |
1185 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1186 | Back pointer to previous chunk in list |
1187 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1188 | Unused space (may be 0 bytes long) .
1189 . .
1190 . |
1191 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1192 `foot:' | Size of chunk, in bytes |
1193 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1194
1195 The P (PREV_INUSE) bit, stored in the unused low-order bit of the
1196 chunk size (which is always a multiple of two words), is an in-use
1197 bit for the *previous* chunk. If that bit is *clear*, then the
1198 word before the current chunk size contains the previous chunk
1199 size, and can be used to find the front of the previous chunk.
1200 The very first chunk allocated always has this bit set,
1201 preventing access to non-existent (or non-owned) memory. If
1202 prev_inuse is set for any given chunk, then you CANNOT determine
1203 the size of the previous chunk, and might even get a memory
1204 addressing fault when trying to do so.
1205
1206 Note that the `foot' of the current chunk is actually represented
1207 as the prev_size of the NEXT chunk. This makes it easier to
1208 deal with alignments etc but can be very confusing when trying
1209 to extend or adapt this code.
1210
1211 The two exceptions to all this are
1212
1213 1. The special chunk `top' doesn't bother using the
1214 trailing size field since there is no next contiguous chunk
1215 that would have to index off it. After initialization, `top'
1216 is forced to always exist. If it would become less than
1217 MINSIZE bytes long, it is replenished.
1218
1219 2. Chunks allocated via mmap, which have the second-lowest-order
1220 bit M (IS_MMAPPED) set in their size fields. Because they are
1221 allocated one-by-one, each must contain its own trailing size field.
1222
1223 */
1224
1225 /*
1226 ---------- Size and alignment checks and conversions ----------
1227 */
1228
1229 /* conversion from malloc headers to user pointers, and back */
1230
1231 #define chunk2mem(p) ((void*)((char*)(p) + 2*SIZE_SZ))
1232 #define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ))
1233
1234 /* The smallest possible chunk */
1235 #define MIN_CHUNK_SIZE (offsetof(struct malloc_chunk, fd_nextsize))
1236
1237 /* The smallest size we can malloc is an aligned minimal chunk */
1238
1239 #define MINSIZE \
1240 (unsigned long)(((MIN_CHUNK_SIZE+MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK))
1241
1242 /* Check if m has acceptable alignment */
1243
1244 #define aligned_OK(m) (((unsigned long)(m) & MALLOC_ALIGN_MASK) == 0)
1245
1246 #define misaligned_chunk(p) \
1247 ((uintptr_t)(MALLOC_ALIGNMENT == 2 * SIZE_SZ ? (p) : chunk2mem (p)) \
1248 & MALLOC_ALIGN_MASK)
1249
1250
1251 /*
1252 Check if a request is so large that it would wrap around zero when
1253 padded and aligned. To simplify some other code, the bound is made
1254 low enough so that adding MINSIZE will also not wrap around zero.
1255 */
1256
1257 #define REQUEST_OUT_OF_RANGE(req) \
1258 ((unsigned long)(req) >= \
1259 (unsigned long)(INTERNAL_SIZE_T)(-2 * MINSIZE))
1260
1261 /* pad request bytes into a usable size -- internal version */
1262
1263 #define request2size(req) \
1264 (((req) + SIZE_SZ + MALLOC_ALIGN_MASK < MINSIZE) ? \
1265 MINSIZE : \
1266 ((req) + SIZE_SZ + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)
1267
1268 /* Same, except also perform argument check */
1269
1270 #define checked_request2size(req, sz) \
1271 if (REQUEST_OUT_OF_RANGE(req)) { \
1272 __set_errno (ENOMEM); \
1273 return 0; \
1274 } \
1275 (sz) = request2size(req);
1276
1277 /*
1278 --------------- Physical chunk operations ---------------
1279 */
1280
1281
1282 /* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
1283 #define PREV_INUSE 0x1
1284
1285 /* extract inuse bit of previous chunk */
1286 #define prev_inuse(p) ((p)->size & PREV_INUSE)
1287
1288
1289 /* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
1290 #define IS_MMAPPED 0x2
1291
1292 /* check for mmap()'ed chunk */
1293 #define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED)
1294
1295
1296 /* size field is or'ed with NON_MAIN_ARENA if the chunk was obtained
1297 from a non-main arena. This is only set immediately before handing
1298 the chunk to the user, if necessary. */
1299 #define NON_MAIN_ARENA 0x4
1300
1301 /* check for chunk from non-main arena */
1302 #define chunk_non_main_arena(p) ((p)->size & NON_MAIN_ARENA)
1303
1304
1305 /*
1306 Bits to mask off when extracting size
1307
1308 Note: IS_MMAPPED is intentionally not masked off from size field in
1309 macros for which mmapped chunks should never be seen. This should
1310 cause helpful core dumps to occur if it is tried by accident by
1311 people extending or adapting this malloc.
1312 */
1313 #define SIZE_BITS (PREV_INUSE|IS_MMAPPED|NON_MAIN_ARENA)
1314
1315 /* Get size, ignoring use bits */
1316 #define chunksize(p) ((p)->size & ~(SIZE_BITS))
1317
1318
1319 /* Ptr to next physical malloc_chunk. */
1320 #define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & ~SIZE_BITS) ))
1321
1322 /* Ptr to previous physical malloc_chunk */
1323 #define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_size) ))
1324
1325 /* Treat space at ptr + offset as a chunk */
1326 #define chunk_at_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
1327
1328 /* extract p's inuse bit */
1329 #define inuse(p)\
1330 ((((mchunkptr)(((char*)(p))+((p)->size & ~SIZE_BITS)))->size) & PREV_INUSE)
1331
1332 /* set/clear chunk as being inuse without otherwise disturbing */
1333 #define set_inuse(p)\
1334 ((mchunkptr)(((char*)(p)) + ((p)->size & ~SIZE_BITS)))->size |= PREV_INUSE
1335
1336 #define clear_inuse(p)\
1337 ((mchunkptr)(((char*)(p)) + ((p)->size & ~SIZE_BITS)))->size &= ~(PREV_INUSE)
1338
1339
1340 /* check/set/clear inuse bits in known places */
1341 #define inuse_bit_at_offset(p, s)\
1342 (((mchunkptr)(((char*)(p)) + (s)))->size & PREV_INUSE)
1343
1344 #define set_inuse_bit_at_offset(p, s)\
1345 (((mchunkptr)(((char*)(p)) + (s)))->size |= PREV_INUSE)
1346
1347 #define clear_inuse_bit_at_offset(p, s)\
1348 (((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE))
1349
1350
1351 /* Set size at head, without disturbing its use bit */
1352 #define set_head_size(p, s) ((p)->size = (((p)->size & SIZE_BITS) | (s)))
1353
1354 /* Set size/use field */
1355 #define set_head(p, s) ((p)->size = (s))
1356
1357 /* Set size at footer (only when chunk is not in use) */
1358 #define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_size = (s))
1359
1360
1361 /*
1362 -------------------- Internal data structures --------------------
1363
1364 All internal state is held in an instance of malloc_state defined
1365 below. There are no other static variables, except in two optional
1366 cases:
1367 * If USE_MALLOC_LOCK is defined, the mALLOC_MUTEx declared above.
1368 * If mmap doesn't support MAP_ANONYMOUS, a dummy file descriptor
1369 for mmap.
1370
1371 Beware of lots of tricks that minimize the total bookkeeping space
1372 requirements. The result is a little over 1K bytes (for 4byte
1373 pointers and size_t.)
1374 */
1375
1376 /*
1377 Bins
1378
1379 An array of bin headers for free chunks. Each bin is doubly
1380 linked. The bins are approximately proportionally (log) spaced.
1381 There are a lot of these bins (128). This may look excessive, but
1382 works very well in practice. Most bins hold sizes that are
1383 unusual as malloc request sizes, but are more usual for fragments
1384 and consolidated sets of chunks, which is what these bins hold, so
1385 they can be found quickly. All procedures maintain the invariant
1386 that no consolidated chunk physically borders another one, so each
1387 chunk in a list is known to be preceeded and followed by either
1388 inuse chunks or the ends of memory.
1389
1390 Chunks in bins are kept in size order, with ties going to the
1391 approximately least recently used chunk. Ordering isn't needed
1392 for the small bins, which all contain the same-sized chunks, but
1393 facilitates best-fit allocation for larger chunks. These lists
1394 are just sequential. Keeping them in order almost never requires
1395 enough traversal to warrant using fancier ordered data
1396 structures.
1397
1398 Chunks of the same size are linked with the most
1399 recently freed at the front, and allocations are taken from the
1400 back. This results in LRU (FIFO) allocation order, which tends
1401 to give each chunk an equal opportunity to be consolidated with
1402 adjacent freed chunks, resulting in larger free chunks and less
1403 fragmentation.
1404
1405 To simplify use in double-linked lists, each bin header acts
1406 as a malloc_chunk. This avoids special-casing for headers.
1407 But to conserve space and improve locality, we allocate
1408 only the fd/bk pointers of bins, and then use repositioning tricks
1409 to treat these as the fields of a malloc_chunk*.
1410 */
1411
1412 typedef struct malloc_chunk* mbinptr;
1413
1414 /* addressing -- note that bin_at(0) does not exist */
1415 #define bin_at(m, i) \
1416 (mbinptr) (((char *) &((m)->bins[((i) - 1) * 2])) \
1417 - offsetof (struct malloc_chunk, fd))
1418
1419 /* analog of ++bin */
1420 #define next_bin(b) ((mbinptr)((char*)(b) + (sizeof(mchunkptr)<<1)))
1421
1422 /* Reminders about list directionality within bins */
1423 #define first(b) ((b)->fd)
1424 #define last(b) ((b)->bk)
1425
1426 /* Take a chunk off a bin list */
1427 #define unlink(P, BK, FD) { \
1428 FD = P->fd; \
1429 BK = P->bk; \
1430 if (__builtin_expect (FD->bk != P || BK->fd != P, 0)) \
1431 malloc_printerr (check_action, "corrupted double-linked list", P); \
1432 else { \
1433 FD->bk = BK; \
1434 BK->fd = FD; \
1435 if (!in_smallbin_range (P->size) \
1436 && __builtin_expect (P->fd_nextsize != NULL, 0)) { \
1437 assert (P->fd_nextsize->bk_nextsize == P); \
1438 assert (P->bk_nextsize->fd_nextsize == P); \
1439 if (FD->fd_nextsize == NULL) { \
1440 if (P->fd_nextsize == P) \
1441 FD->fd_nextsize = FD->bk_nextsize = FD; \
1442 else { \
1443 FD->fd_nextsize = P->fd_nextsize; \
1444 FD->bk_nextsize = P->bk_nextsize; \
1445 P->fd_nextsize->bk_nextsize = FD; \
1446 P->bk_nextsize->fd_nextsize = FD; \
1447 } \
1448 } else { \
1449 P->fd_nextsize->bk_nextsize = P->bk_nextsize; \
1450 P->bk_nextsize->fd_nextsize = P->fd_nextsize; \
1451 } \
1452 } \
1453 } \
1454 }
1455
1456 /*
1457 Indexing
1458
1459 Bins for sizes < 512 bytes contain chunks of all the same size, spaced
1460 8 bytes apart. Larger bins are approximately logarithmically spaced:
1461
1462 64 bins of size 8
1463 32 bins of size 64
1464 16 bins of size 512
1465 8 bins of size 4096
1466 4 bins of size 32768
1467 2 bins of size 262144
1468 1 bin of size what's left
1469
1470 There is actually a little bit of slop in the numbers in bin_index
1471 for the sake of speed. This makes no difference elsewhere.
1472
1473 The bins top out around 1MB because we expect to service large
1474 requests via mmap.
1475
1476 Bin 0 does not exist. Bin 1 is the unordered list; if that would be
1477 a valid chunk size the small bins are bumped up one.
1478 */
1479
1480 #define NBINS 128
1481 #define NSMALLBINS 64
1482 #define SMALLBIN_WIDTH MALLOC_ALIGNMENT
1483 #define SMALLBIN_CORRECTION (MALLOC_ALIGNMENT > 2 * SIZE_SZ)
1484 #define MIN_LARGE_SIZE ((NSMALLBINS - SMALLBIN_CORRECTION) * SMALLBIN_WIDTH)
1485
1486 #define in_smallbin_range(sz) \
1487 ((unsigned long)(sz) < (unsigned long)MIN_LARGE_SIZE)
1488
1489 #define smallbin_index(sz) \
1490 ((SMALLBIN_WIDTH == 16 ? (((unsigned)(sz)) >> 4) : (((unsigned)(sz)) >> 3)) \
1491 + SMALLBIN_CORRECTION)
1492
1493 #define largebin_index_32(sz) \
1494 (((((unsigned long)(sz)) >> 6) <= 38)? 56 + (((unsigned long)(sz)) >> 6): \
1495 ((((unsigned long)(sz)) >> 9) <= 20)? 91 + (((unsigned long)(sz)) >> 9): \
1496 ((((unsigned long)(sz)) >> 12) <= 10)? 110 + (((unsigned long)(sz)) >> 12): \
1497 ((((unsigned long)(sz)) >> 15) <= 4)? 119 + (((unsigned long)(sz)) >> 15): \
1498 ((((unsigned long)(sz)) >> 18) <= 2)? 124 + (((unsigned long)(sz)) >> 18): \
1499 126)
1500
1501 #define largebin_index_32_big(sz) \
1502 (((((unsigned long)(sz)) >> 6) <= 45)? 49 + (((unsigned long)(sz)) >> 6): \
1503 ((((unsigned long)(sz)) >> 9) <= 20)? 91 + (((unsigned long)(sz)) >> 9): \
1504 ((((unsigned long)(sz)) >> 12) <= 10)? 110 + (((unsigned long)(sz)) >> 12): \
1505 ((((unsigned long)(sz)) >> 15) <= 4)? 119 + (((unsigned long)(sz)) >> 15): \
1506 ((((unsigned long)(sz)) >> 18) <= 2)? 124 + (((unsigned long)(sz)) >> 18): \
1507 126)
1508
1509 // XXX It remains to be seen whether it is good to keep the widths of
1510 // XXX the buckets the same or whether it should be scaled by a factor
1511 // XXX of two as well.
1512 #define largebin_index_64(sz) \
1513 (((((unsigned long)(sz)) >> 6) <= 48)? 48 + (((unsigned long)(sz)) >> 6): \
1514 ((((unsigned long)(sz)) >> 9) <= 20)? 91 + (((unsigned long)(sz)) >> 9): \
1515 ((((unsigned long)(sz)) >> 12) <= 10)? 110 + (((unsigned long)(sz)) >> 12): \
1516 ((((unsigned long)(sz)) >> 15) <= 4)? 119 + (((unsigned long)(sz)) >> 15): \
1517 ((((unsigned long)(sz)) >> 18) <= 2)? 124 + (((unsigned long)(sz)) >> 18): \
1518 126)
1519
1520 #define largebin_index(sz) \
1521 (SIZE_SZ == 8 ? largebin_index_64 (sz) \
1522 : MALLOC_ALIGNMENT == 16 ? largebin_index_32_big (sz) \
1523 : largebin_index_32 (sz))
1524
1525 #define bin_index(sz) \
1526 ((in_smallbin_range(sz)) ? smallbin_index(sz) : largebin_index(sz))
1527
1528
1529 /*
1530 Unsorted chunks
1531
1532 All remainders from chunk splits, as well as all returned chunks,
1533 are first placed in the "unsorted" bin. They are then placed
1534 in regular bins after malloc gives them ONE chance to be used before
1535 binning. So, basically, the unsorted_chunks list acts as a queue,
1536 with chunks being placed on it in free (and malloc_consolidate),
1537 and taken off (to be either used or placed in bins) in malloc.
1538
1539 The NON_MAIN_ARENA flag is never set for unsorted chunks, so it
1540 does not have to be taken into account in size comparisons.
1541 */
1542
1543 /* The otherwise unindexable 1-bin is used to hold unsorted chunks. */
1544 #define unsorted_chunks(M) (bin_at(M, 1))
1545
1546 /*
1547 Top
1548
1549 The top-most available chunk (i.e., the one bordering the end of
1550 available memory) is treated specially. It is never included in
1551 any bin, is used only if no other chunk is available, and is
1552 released back to the system if it is very large (see
1553 M_TRIM_THRESHOLD). Because top initially
1554 points to its own bin with initial zero size, thus forcing
1555 extension on the first malloc request, we avoid having any special
1556 code in malloc to check whether it even exists yet. But we still
1557 need to do so when getting memory from system, so we make
1558 initial_top treat the bin as a legal but unusable chunk during the
1559 interval between initialization and the first call to
1560 sysmalloc. (This is somewhat delicate, since it relies on
1561 the 2 preceding words to be zero during this interval as well.)
1562 */
1563
1564 /* Conveniently, the unsorted bin can be used as dummy top on first call */
1565 #define initial_top(M) (unsorted_chunks(M))
1566
1567 /*
1568 Binmap
1569
1570 To help compensate for the large number of bins, a one-level index
1571 structure is used for bin-by-bin searching. `binmap' is a
1572 bitvector recording whether bins are definitely empty so they can
1573 be skipped over during during traversals. The bits are NOT always
1574 cleared as soon as bins are empty, but instead only
1575 when they are noticed to be empty during traversal in malloc.
1576 */
1577
1578 /* Conservatively use 32 bits per map word, even if on 64bit system */
1579 #define BINMAPSHIFT 5
1580 #define BITSPERMAP (1U << BINMAPSHIFT)
1581 #define BINMAPSIZE (NBINS / BITSPERMAP)
1582
1583 #define idx2block(i) ((i) >> BINMAPSHIFT)
1584 #define idx2bit(i) ((1U << ((i) & ((1U << BINMAPSHIFT)-1))))
1585
1586 #define mark_bin(m,i) ((m)->binmap[idx2block(i)] |= idx2bit(i))
1587 #define unmark_bin(m,i) ((m)->binmap[idx2block(i)] &= ~(idx2bit(i)))
1588 #define get_binmap(m,i) ((m)->binmap[idx2block(i)] & idx2bit(i))
1589
1590 /*
1591 Fastbins
1592
1593 An array of lists holding recently freed small chunks. Fastbins
1594 are not doubly linked. It is faster to single-link them, and
1595 since chunks are never removed from the middles of these lists,
1596 double linking is not necessary. Also, unlike regular bins, they
1597 are not even processed in FIFO order (they use faster LIFO) since
1598 ordering doesn't much matter in the transient contexts in which
1599 fastbins are normally used.
1600
1601 Chunks in fastbins keep their inuse bit set, so they cannot
1602 be consolidated with other free chunks. malloc_consolidate
1603 releases all chunks in fastbins and consolidates them with
1604 other free chunks.
1605 */
1606
1607 typedef struct malloc_chunk* mfastbinptr;
1608 #define fastbin(ar_ptr, idx) ((ar_ptr)->fastbinsY[idx])
1609
1610 /* offset 2 to use otherwise unindexable first 2 bins */
1611 #define fastbin_index(sz) \
1612 ((((unsigned int)(sz)) >> (SIZE_SZ == 8 ? 4 : 3)) - 2)
1613
1614
1615 /* The maximum fastbin request size we support */
1616 #define MAX_FAST_SIZE (80 * SIZE_SZ / 4)
1617
1618 #define NFASTBINS (fastbin_index(request2size(MAX_FAST_SIZE))+1)
1619
1620 /*
1621 FASTBIN_CONSOLIDATION_THRESHOLD is the size of a chunk in free()
1622 that triggers automatic consolidation of possibly-surrounding
1623 fastbin chunks. This is a heuristic, so the exact value should not
1624 matter too much. It is defined at half the default trim threshold as a
1625 compromise heuristic to only attempt consolidation if it is likely
1626 to lead to trimming. However, it is not dynamically tunable, since
1627 consolidation reduces fragmentation surrounding large chunks even
1628 if trimming is not used.
1629 */
1630
1631 #define FASTBIN_CONSOLIDATION_THRESHOLD (65536UL)
1632
1633 /*
1634 Since the lowest 2 bits in max_fast don't matter in size comparisons,
1635 they are used as flags.
1636 */
1637
1638 /*
1639 FASTCHUNKS_BIT held in max_fast indicates that there are probably
1640 some fastbin chunks. It is set true on entering a chunk into any
1641 fastbin, and cleared only in malloc_consolidate.
1642
1643 The truth value is inverted so that have_fastchunks will be true
1644 upon startup (since statics are zero-filled), simplifying
1645 initialization checks.
1646 */
1647
1648 #define FASTCHUNKS_BIT (1U)
1649
1650 #define have_fastchunks(M) (((M)->flags & FASTCHUNKS_BIT) == 0)
1651 #define clear_fastchunks(M) catomic_or (&(M)->flags, FASTCHUNKS_BIT)
1652 #define set_fastchunks(M) catomic_and (&(M)->flags, ~FASTCHUNKS_BIT)
1653
1654 /*
1655 NONCONTIGUOUS_BIT indicates that MORECORE does not return contiguous
1656 regions. Otherwise, contiguity is exploited in merging together,
1657 when possible, results from consecutive MORECORE calls.
1658
1659 The initial value comes from MORECORE_CONTIGUOUS, but is
1660 changed dynamically if mmap is ever used as an sbrk substitute.
1661 */
1662
1663 #define NONCONTIGUOUS_BIT (2U)
1664
1665 #define contiguous(M) (((M)->flags & NONCONTIGUOUS_BIT) == 0)
1666 #define noncontiguous(M) (((M)->flags & NONCONTIGUOUS_BIT) != 0)
1667 #define set_noncontiguous(M) ((M)->flags |= NONCONTIGUOUS_BIT)
1668 #define set_contiguous(M) ((M)->flags &= ~NONCONTIGUOUS_BIT)
1669
1670 /*
1671 Set value of max_fast.
1672 Use impossibly small value if 0.
1673 Precondition: there are no existing fastbin chunks.
1674 Setting the value clears fastchunk bit but preserves noncontiguous bit.
1675 */
1676
1677 #define set_max_fast(s) \
1678 global_max_fast = (((s) == 0) \
1679 ? SMALLBIN_WIDTH: ((s + SIZE_SZ) & ~MALLOC_ALIGN_MASK))
1680 #define get_max_fast() global_max_fast
1681
1682
1683 /*
1684 ----------- Internal state representation and initialization -----------
1685 */
1686
1687 struct malloc_state {
1688 /* Serialize access. */
1689 mutex_t mutex;
1690
1691 /* Flags (formerly in max_fast). */
1692 int flags;
1693
1694 #if THREAD_STATS
1695 /* Statistics for locking. Only used if THREAD_STATS is defined. */
1696 long stat_lock_direct, stat_lock_loop, stat_lock_wait;
1697 #endif
1698
1699 /* Fastbins */
1700 mfastbinptr fastbinsY[NFASTBINS];
1701
1702 /* Base of the topmost chunk -- not otherwise kept in a bin */
1703 mchunkptr top;
1704
1705 /* The remainder from the most recent split of a small request */
1706 mchunkptr last_remainder;
1707
1708 /* Normal bins packed as described above */
1709 mchunkptr bins[NBINS * 2 - 2];
1710
1711 /* Bitmap of bins */
1712 unsigned int binmap[BINMAPSIZE];
1713
1714 /* Linked list */
1715 struct malloc_state *next;
1716
1717 #ifdef PER_THREAD
1718 /* Linked list for free arenas. */
1719 struct malloc_state *next_free;
1720 #endif
1721
1722 /* Memory allocated from the system in this arena. */
1723 INTERNAL_SIZE_T system_mem;
1724 INTERNAL_SIZE_T max_system_mem;
1725 };
1726
1727 struct malloc_par {
1728 /* Tunable parameters */
1729 unsigned long trim_threshold;
1730 INTERNAL_SIZE_T top_pad;
1731 INTERNAL_SIZE_T mmap_threshold;
1732 #ifdef PER_THREAD
1733 INTERNAL_SIZE_T arena_test;
1734 INTERNAL_SIZE_T arena_max;
1735 #endif
1736
1737 /* Memory map support */
1738 int n_mmaps;
1739 int n_mmaps_max;
1740 int max_n_mmaps;
1741 /* the mmap_threshold is dynamic, until the user sets
1742 it manually, at which point we need to disable any
1743 dynamic behavior. */
1744 int no_dyn_threshold;
1745
1746 /* Statistics */
1747 INTERNAL_SIZE_T mmapped_mem;
1748 /*INTERNAL_SIZE_T sbrked_mem;*/
1749 /*INTERNAL_SIZE_T max_sbrked_mem;*/
1750 INTERNAL_SIZE_T max_mmapped_mem;
1751 INTERNAL_SIZE_T max_total_mem; /* only kept for NO_THREADS */
1752
1753 /* First address handed out by MORECORE/sbrk. */
1754 char* sbrk_base;
1755 };
1756
1757 /* There are several instances of this struct ("arenas") in this
1758 malloc. If you are adapting this malloc in a way that does NOT use
1759 a static or mmapped malloc_state, you MUST explicitly zero-fill it
1760 before using. This malloc relies on the property that malloc_state
1761 is initialized to all zeroes (as is true of C statics). */
1762
1763 static struct malloc_state main_arena =
1764 {
1765 .mutex = MUTEX_INITIALIZER,
1766 .next = &main_arena
1767 };
1768
1769 /* There is only one instance of the malloc parameters. */
1770
1771 static struct malloc_par mp_ =
1772 {
1773 .top_pad = DEFAULT_TOP_PAD,
1774 .n_mmaps_max = DEFAULT_MMAP_MAX,
1775 .mmap_threshold = DEFAULT_MMAP_THRESHOLD,
1776 .trim_threshold = DEFAULT_TRIM_THRESHOLD,
1777 #ifdef PER_THREAD
1778 # define NARENAS_FROM_NCORES(n) ((n) * (sizeof(long) == 4 ? 2 : 8))
1779 .arena_test = NARENAS_FROM_NCORES (1)
1780 #endif
1781 };
1782
1783
1784 #ifdef PER_THREAD
1785 /* Non public mallopt parameters. */
1786 #define M_ARENA_TEST -7
1787 #define M_ARENA_MAX -8
1788 #endif
1789
1790
1791 /* Maximum size of memory handled in fastbins. */
1792 static INTERNAL_SIZE_T global_max_fast;
1793
1794 /*
1795 Initialize a malloc_state struct.
1796
1797 This is called only from within malloc_consolidate, which needs
1798 be called in the same contexts anyway. It is never called directly
1799 outside of malloc_consolidate because some optimizing compilers try
1800 to inline it at all call points, which turns out not to be an
1801 optimization at all. (Inlining it in malloc_consolidate is fine though.)
1802 */
1803
1804 static void malloc_init_state(mstate av)
1805 {
1806 int i;
1807 mbinptr bin;
1808
1809 /* Establish circular links for normal bins */
1810 for (i = 1; i < NBINS; ++i) {
1811 bin = bin_at(av,i);
1812 bin->fd = bin->bk = bin;
1813 }
1814
1815 #if MORECORE_CONTIGUOUS
1816 if (av != &main_arena)
1817 #endif
1818 set_noncontiguous(av);
1819 if (av == &main_arena)
1820 set_max_fast(DEFAULT_MXFAST);
1821 av->flags |= FASTCHUNKS_BIT;
1822
1823 av->top = initial_top(av);
1824 }
1825
1826 /*
1827 Other internal utilities operating on mstates
1828 */
1829
1830 static void* sysmalloc(INTERNAL_SIZE_T, mstate);
1831 static int systrim(size_t, mstate);
1832 static void malloc_consolidate(mstate);
1833
1834
1835 /* -------------- Early definitions for debugging hooks ---------------- */
1836
1837 /* Define and initialize the hook variables. These weak definitions must
1838 appear before any use of the variables in a function (arena.c uses one). */
1839 #ifndef weak_variable
1840 /* In GNU libc we want the hook variables to be weak definitions to
1841 avoid a problem with Emacs. */
1842 # define weak_variable weak_function
1843 #endif
1844
1845 /* Forward declarations. */
1846 static void* malloc_hook_ini __MALLOC_P ((size_t sz,
1847 const __malloc_ptr_t caller));
1848 static void* realloc_hook_ini __MALLOC_P ((void* ptr, size_t sz,
1849 const __malloc_ptr_t caller));
1850 static void* memalign_hook_ini __MALLOC_P ((size_t alignment, size_t sz,
1851 const __malloc_ptr_t caller));
1852
1853 void weak_variable (*__malloc_initialize_hook) (void) = NULL;
1854 void weak_variable (*__free_hook) (__malloc_ptr_t __ptr,
1855 const __malloc_ptr_t) = NULL;
1856 __malloc_ptr_t weak_variable (*__malloc_hook)
1857 (size_t __size, const __malloc_ptr_t) = malloc_hook_ini;
1858 __malloc_ptr_t weak_variable (*__realloc_hook)
1859 (__malloc_ptr_t __ptr, size_t __size, const __malloc_ptr_t)
1860 = realloc_hook_ini;
1861 __malloc_ptr_t weak_variable (*__memalign_hook)
1862 (size_t __alignment, size_t __size, const __malloc_ptr_t)
1863 = memalign_hook_ini;
1864 void weak_variable (*__after_morecore_hook) (void) = NULL;
1865
1866
1867 /* ---------------- Error behavior ------------------------------------ */
1868
1869 #ifndef DEFAULT_CHECK_ACTION
1870 #define DEFAULT_CHECK_ACTION 3
1871 #endif
1872
1873 static int check_action = DEFAULT_CHECK_ACTION;
1874
1875
1876 /* ------------------ Testing support ----------------------------------*/
1877
1878 static int perturb_byte;
1879
1880 #define alloc_perturb(p, n) memset (p, (perturb_byte ^ 0xff) & 0xff, n)
1881 #define free_perturb(p, n) memset (p, perturb_byte & 0xff, n)
1882
1883
1884 /* ------------------- Support for multiple arenas -------------------- */
1885 #include "arena.c"
1886
1887 /*
1888 Debugging support
1889
1890 These routines make a number of assertions about the states
1891 of data structures that should be true at all times. If any
1892 are not true, it's very likely that a user program has somehow
1893 trashed memory. (It's also possible that there is a coding error
1894 in malloc. In which case, please report it!)
1895 */
1896
1897 #if ! MALLOC_DEBUG
1898
1899 #define check_chunk(A,P)
1900 #define check_free_chunk(A,P)
1901 #define check_inuse_chunk(A,P)
1902 #define check_remalloced_chunk(A,P,N)
1903 #define check_malloced_chunk(A,P,N)
1904 #define check_malloc_state(A)
1905
1906 #else
1907
1908 #define check_chunk(A,P) do_check_chunk(A,P)
1909 #define check_free_chunk(A,P) do_check_free_chunk(A,P)
1910 #define check_inuse_chunk(A,P) do_check_inuse_chunk(A,P)
1911 #define check_remalloced_chunk(A,P,N) do_check_remalloced_chunk(A,P,N)
1912 #define check_malloced_chunk(A,P,N) do_check_malloced_chunk(A,P,N)
1913 #define check_malloc_state(A) do_check_malloc_state(A)
1914
1915 /*
1916 Properties of all chunks
1917 */
1918
1919 static void do_check_chunk(mstate av, mchunkptr p)
1920 {
1921 unsigned long sz = chunksize(p);
1922 /* min and max possible addresses assuming contiguous allocation */
1923 char* max_address = (char*)(av->top) + chunksize(av->top);
1924 char* min_address = max_address - av->system_mem;
1925
1926 if (!chunk_is_mmapped(p)) {
1927
1928 /* Has legal address ... */
1929 if (p != av->top) {
1930 if (contiguous(av)) {
1931 assert(((char*)p) >= min_address);
1932 assert(((char*)p + sz) <= ((char*)(av->top)));
1933 }
1934 }
1935 else {
1936 /* top size is always at least MINSIZE */
1937 assert((unsigned long)(sz) >= MINSIZE);
1938 /* top predecessor always marked inuse */
1939 assert(prev_inuse(p));
1940 }
1941
1942 }
1943 else {
1944 /* address is outside main heap */
1945 if (contiguous(av) && av->top != initial_top(av)) {
1946 assert(((char*)p) < min_address || ((char*)p) >= max_address);
1947 }
1948 /* chunk is page-aligned */
1949 assert(((p->prev_size + sz) & (GLRO(dl_pagesize)-1)) == 0);
1950 /* mem is aligned */
1951 assert(aligned_OK(chunk2mem(p)));
1952 }
1953 }
1954
1955 /*
1956 Properties of free chunks
1957 */
1958
1959 static void do_check_free_chunk(mstate av, mchunkptr p)
1960 {
1961 INTERNAL_SIZE_T sz = p->size & ~(PREV_INUSE|NON_MAIN_ARENA);
1962 mchunkptr next = chunk_at_offset(p, sz);
1963
1964 do_check_chunk(av, p);
1965
1966 /* Chunk must claim to be free ... */
1967 assert(!inuse(p));
1968 assert (!chunk_is_mmapped(p));
1969
1970 /* Unless a special marker, must have OK fields */
1971 if ((unsigned long)(sz) >= MINSIZE)
1972 {
1973 assert((sz & MALLOC_ALIGN_MASK) == 0);
1974 assert(aligned_OK(chunk2mem(p)));
1975 /* ... matching footer field */
1976 assert(next->prev_size == sz);
1977 /* ... and is fully consolidated */
1978 assert(prev_inuse(p));
1979 assert (next == av->top || inuse(next));
1980
1981 /* ... and has minimally sane links */
1982 assert(p->fd->bk == p);
1983 assert(p->bk->fd == p);
1984 }
1985 else /* markers are always of size SIZE_SZ */
1986 assert(sz == SIZE_SZ);
1987 }
1988
1989 /*
1990 Properties of inuse chunks
1991 */
1992
1993 static void do_check_inuse_chunk(mstate av, mchunkptr p)
1994 {
1995 mchunkptr next;
1996
1997 do_check_chunk(av, p);
1998
1999 if (chunk_is_mmapped(p))
2000 return; /* mmapped chunks have no next/prev */
2001
2002 /* Check whether it claims to be in use ... */
2003 assert(inuse(p));
2004
2005 next = next_chunk(p);
2006
2007 /* ... and is surrounded by OK chunks.
2008 Since more things can be checked with free chunks than inuse ones,
2009 if an inuse chunk borders them and debug is on, it's worth doing them.
2010 */
2011 if (!prev_inuse(p)) {
2012 /* Note that we cannot even look at prev unless it is not inuse */
2013 mchunkptr prv = prev_chunk(p);
2014 assert(next_chunk(prv) == p);
2015 do_check_free_chunk(av, prv);
2016 }
2017
2018 if (next == av->top) {
2019 assert(prev_inuse(next));
2020 assert(chunksize(next) >= MINSIZE);
2021 }
2022 else if (!inuse(next))
2023 do_check_free_chunk(av, next);
2024 }
2025
2026 /*
2027 Properties of chunks recycled from fastbins
2028 */
2029
2030 static void do_check_remalloced_chunk(mstate av, mchunkptr p, INTERNAL_SIZE_T s)
2031 {
2032 INTERNAL_SIZE_T sz = p->size & ~(PREV_INUSE|NON_MAIN_ARENA);
2033
2034 if (!chunk_is_mmapped(p)) {
2035 assert(av == arena_for_chunk(p));
2036 if (chunk_non_main_arena(p))
2037 assert(av != &main_arena);
2038 else
2039 assert(av == &main_arena);
2040 }
2041
2042 do_check_inuse_chunk(av, p);
2043
2044 /* Legal size ... */
2045 assert((sz & MALLOC_ALIGN_MASK) == 0);
2046 assert((unsigned long)(sz) >= MINSIZE);
2047 /* ... and alignment */
2048 assert(aligned_OK(chunk2mem(p)));
2049 /* chunk is less than MINSIZE more than request */
2050 assert((long)(sz) - (long)(s) >= 0);
2051 assert((long)(sz) - (long)(s + MINSIZE) < 0);
2052 }
2053
2054 /*
2055 Properties of nonrecycled chunks at the point they are malloced
2056 */
2057
2058 static void do_check_malloced_chunk(mstate av, mchunkptr p, INTERNAL_SIZE_T s)
2059 {
2060 /* same as recycled case ... */
2061 do_check_remalloced_chunk(av, p, s);
2062
2063 /*
2064 ... plus, must obey implementation invariant that prev_inuse is
2065 always true of any allocated chunk; i.e., that each allocated
2066 chunk borders either a previously allocated and still in-use
2067 chunk, or the base of its memory arena. This is ensured
2068 by making all allocations from the `lowest' part of any found
2069 chunk. This does not necessarily hold however for chunks
2070 recycled via fastbins.
2071 */
2072
2073 assert(prev_inuse(p));
2074 }
2075
2076
2077 /*
2078 Properties of malloc_state.
2079
2080 This may be useful for debugging malloc, as well as detecting user
2081 programmer errors that somehow write into malloc_state.
2082
2083 If you are extending or experimenting with this malloc, you can
2084 probably figure out how to hack this routine to print out or
2085 display chunk addresses, sizes, bins, and other instrumentation.
2086 */
2087
2088 static void do_check_malloc_state(mstate av)
2089 {
2090 int i;
2091 mchunkptr p;
2092 mchunkptr q;
2093 mbinptr b;
2094 unsigned int idx;
2095 INTERNAL_SIZE_T size;
2096 unsigned long total = 0;
2097 int max_fast_bin;
2098
2099 /* internal size_t must be no wider than pointer type */
2100 assert(sizeof(INTERNAL_SIZE_T) <= sizeof(char*));
2101
2102 /* alignment is a power of 2 */
2103 assert((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-1)) == 0);
2104
2105 /* cannot run remaining checks until fully initialized */
2106 if (av->top == 0 || av->top == initial_top(av))
2107 return;
2108
2109 /* pagesize is a power of 2 */
2110 assert((GLRO(dl_pagesize) & (GLRO(dl_pagesize)-1)) == 0);
2111
2112 /* A contiguous main_arena is consistent with sbrk_base. */
2113 if (av == &main_arena && contiguous(av))
2114 assert((char*)mp_.sbrk_base + av->system_mem ==
2115 (char*)av->top + chunksize(av->top));
2116
2117 /* properties of fastbins */
2118
2119 /* max_fast is in allowed range */
2120 assert((get_max_fast () & ~1) <= request2size(MAX_FAST_SIZE));
2121
2122 max_fast_bin = fastbin_index(get_max_fast ());
2123
2124 for (i = 0; i < NFASTBINS; ++i) {
2125 p = fastbin (av, i);
2126
2127 /* The following test can only be performed for the main arena.
2128 While mallopt calls malloc_consolidate to get rid of all fast
2129 bins (especially those larger than the new maximum) this does
2130 only happen for the main arena. Trying to do this for any
2131 other arena would mean those arenas have to be locked and
2132 malloc_consolidate be called for them. This is excessive. And
2133 even if this is acceptable to somebody it still cannot solve
2134 the problem completely since if the arena is locked a
2135 concurrent malloc call might create a new arena which then
2136 could use the newly invalid fast bins. */
2137
2138 /* all bins past max_fast are empty */
2139 if (av == &main_arena && i > max_fast_bin)
2140 assert(p == 0);
2141
2142 while (p != 0) {
2143 /* each chunk claims to be inuse */
2144 do_check_inuse_chunk(av, p);
2145 total += chunksize(p);
2146 /* chunk belongs in this bin */
2147 assert(fastbin_index(chunksize(p)) == i);
2148 p = p->fd;
2149 }
2150 }
2151
2152 if (total != 0)
2153 assert(have_fastchunks(av));
2154 else if (!have_fastchunks(av))
2155 assert(total == 0);
2156
2157 /* check normal bins */
2158 for (i = 1; i < NBINS; ++i) {
2159 b = bin_at(av,i);
2160
2161 /* binmap is accurate (except for bin 1 == unsorted_chunks) */
2162 if (i >= 2) {
2163 unsigned int binbit = get_binmap(av,i);
2164 int empty = last(b) == b;
2165 if (!binbit)
2166 assert(empty);
2167 else if (!empty)
2168 assert(binbit);
2169 }
2170
2171 for (p = last(b); p != b; p = p->bk) {
2172 /* each chunk claims to be free */
2173 do_check_free_chunk(av, p);
2174 size = chunksize(p);
2175 total += size;
2176 if (i >= 2) {
2177 /* chunk belongs in bin */
2178 idx = bin_index(size);
2179 assert(idx == i);
2180 /* lists are sorted */
2181 assert(p->bk == b ||
2182 (unsigned long)chunksize(p->bk) >= (unsigned long)chunksize(p));
2183
2184 if (!in_smallbin_range(size))
2185 {
2186 if (p->fd_nextsize != NULL)
2187 {
2188 if (p->fd_nextsize == p)
2189 assert (p->bk_nextsize == p);
2190 else
2191 {
2192 if (p->fd_nextsize == first (b))
2193 assert (chunksize (p) < chunksize (p->fd_nextsize));
2194 else
2195 assert (chunksize (p) > chunksize (p->fd_nextsize));
2196
2197 if (p == first (b))
2198 assert (chunksize (p) > chunksize (p->bk_nextsize));
2199 else
2200 assert (chunksize (p) < chunksize (p->bk_nextsize));
2201 }
2202 }
2203 else
2204 assert (p->bk_nextsize == NULL);
2205 }
2206 } else if (!in_smallbin_range(size))
2207 assert (p->fd_nextsize == NULL && p->bk_nextsize == NULL);
2208 /* chunk is followed by a legal chain of inuse chunks */
2209 for (q = next_chunk(p);
2210 (q != av->top && inuse(q) &&
2211 (unsigned long)(chunksize(q)) >= MINSIZE);
2212 q = next_chunk(q))
2213 do_check_inuse_chunk(av, q);
2214 }
2215 }
2216
2217 /* top chunk is OK */
2218 check_chunk(av, av->top);
2219
2220 /* sanity checks for statistics */
2221
2222 assert(mp_.n_mmaps <= mp_.max_n_mmaps);
2223
2224 assert((unsigned long)(av->system_mem) <=
2225 (unsigned long)(av->max_system_mem));
2226
2227 assert((unsigned long)(mp_.mmapped_mem) <=
2228 (unsigned long)(mp_.max_mmapped_mem));
2229 }
2230 #endif
2231
2232
2233 /* ----------------- Support for debugging hooks -------------------- */
2234 #include "hooks.c"
2235
2236
2237 /* ----------- Routines dealing with system allocation -------------- */
2238
2239 /*
2240 sysmalloc handles malloc cases requiring more memory from the system.
2241 On entry, it is assumed that av->top does not have enough
2242 space to service request for nb bytes, thus requiring that av->top
2243 be extended or replaced.
2244 */
2245
2246 static void* sysmalloc(INTERNAL_SIZE_T nb, mstate av)
2247 {
2248 mchunkptr old_top; /* incoming value of av->top */
2249 INTERNAL_SIZE_T old_size; /* its size */
2250 char* old_end; /* its end address */
2251
2252 long size; /* arg to first MORECORE or mmap call */
2253 char* brk; /* return value from MORECORE */
2254
2255 long correction; /* arg to 2nd MORECORE call */
2256 char* snd_brk; /* 2nd return val */
2257
2258 INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */
2259 INTERNAL_SIZE_T end_misalign; /* partial page left at end of new space */
2260 char* aligned_brk; /* aligned offset into brk */
2261
2262 mchunkptr p; /* the allocated/returned chunk */
2263 mchunkptr remainder; /* remainder from allocation */
2264 unsigned long remainder_size; /* its size */
2265
2266 unsigned long sum; /* for updating stats */
2267
2268 size_t pagemask = GLRO(dl_pagesize) - 1;
2269 bool tried_mmap = false;
2270
2271
2272 /*
2273 If have mmap, and the request size meets the mmap threshold, and
2274 the system supports mmap, and there are few enough currently
2275 allocated mmapped regions, try to directly map this request
2276 rather than expanding top.
2277 */
2278
2279 if ((unsigned long)(nb) >= (unsigned long)(mp_.mmap_threshold) &&
2280 (mp_.n_mmaps < mp_.n_mmaps_max)) {
2281
2282 char* mm; /* return value from mmap call*/
2283
2284 try_mmap:
2285 /*
2286 Round up size to nearest page. For mmapped chunks, the overhead
2287 is one SIZE_SZ unit larger than for normal chunks, because there
2288 is no following chunk whose prev_size field could be used.
2289
2290 See the front_misalign handling below, for glibc there is no
2291 need for further alignments unless we have have high alignment.
2292 */
2293 if (MALLOC_ALIGNMENT == 2 * SIZE_SZ)
2294 size = (nb + SIZE_SZ + pagemask) & ~pagemask;
2295 else
2296 size = (nb + SIZE_SZ + MALLOC_ALIGN_MASK + pagemask) & ~pagemask;
2297 tried_mmap = true;
2298
2299 /* Don't try if size wraps around 0 */
2300 if ((unsigned long)(size) > (unsigned long)(nb)) {
2301
2302 mm = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, 0));
2303
2304 if (mm != MAP_FAILED) {
2305
2306 /*
2307 The offset to the start of the mmapped region is stored
2308 in the prev_size field of the chunk. This allows us to adjust
2309 returned start address to meet alignment requirements here
2310 and in memalign(), and still be able to compute proper
2311 address argument for later munmap in free() and realloc().
2312 */
2313
2314 if (MALLOC_ALIGNMENT == 2 * SIZE_SZ)
2315 {
2316 /* For glibc, chunk2mem increases the address by 2*SIZE_SZ and
2317 MALLOC_ALIGN_MASK is 2*SIZE_SZ-1. Each mmap'ed area is page
2318 aligned and therefore definitely MALLOC_ALIGN_MASK-aligned. */
2319 assert (((INTERNAL_SIZE_T)chunk2mem(mm) & MALLOC_ALIGN_MASK) == 0);
2320 front_misalign = 0;
2321 }
2322 else
2323 front_misalign = (INTERNAL_SIZE_T)chunk2mem(mm) & MALLOC_ALIGN_MASK;
2324 if (front_misalign > 0) {
2325 correction = MALLOC_ALIGNMENT - front_misalign;
2326 p = (mchunkptr)(mm + correction);
2327 p->prev_size = correction;
2328 set_head(p, (size - correction) |IS_MMAPPED);
2329 }
2330 else
2331 {
2332 p = (mchunkptr)mm;
2333 set_head(p, size|IS_MMAPPED);
2334 }
2335
2336 /* update statistics */
2337
2338 if (++mp_.n_mmaps > mp_.max_n_mmaps)
2339 mp_.max_n_mmaps = mp_.n_mmaps;
2340
2341 sum = mp_.mmapped_mem += size;
2342 if (sum > (unsigned long)(mp_.max_mmapped_mem))
2343 mp_.max_mmapped_mem = sum;
2344
2345 check_chunk(av, p);
2346
2347 return chunk2mem(p);
2348 }
2349 }
2350 }
2351
2352 /* Record incoming configuration of top */
2353
2354 old_top = av->top;
2355 old_size = chunksize(old_top);
2356 old_end = (char*)(chunk_at_offset(old_top, old_size));
2357
2358 brk = snd_brk = (char*)(MORECORE_FAILURE);
2359
2360 /*
2361 If not the first time through, we require old_size to be
2362 at least MINSIZE and to have prev_inuse set.
2363 */
2364
2365 assert((old_top == initial_top(av) && old_size == 0) ||
2366 ((unsigned long) (old_size) >= MINSIZE &&
2367 prev_inuse(old_top) &&
2368 ((unsigned long)old_end & pagemask) == 0));
2369
2370 /* Precondition: not enough current space to satisfy nb request */
2371 assert((unsigned long)(old_size) < (unsigned long)(nb + MINSIZE));
2372
2373
2374 if (av != &main_arena) {
2375
2376 heap_info *old_heap, *heap;
2377 size_t old_heap_size;
2378
2379 /* First try to extend the current heap. */
2380 old_heap = heap_for_ptr(old_top);
2381 old_heap_size = old_heap->size;
2382 if ((long) (MINSIZE + nb - old_size) > 0
2383 && grow_heap(old_heap, MINSIZE + nb - old_size) == 0) {
2384 av->system_mem += old_heap->size - old_heap_size;
2385 arena_mem += old_heap->size - old_heap_size;
2386 set_head(old_top, (((char *)old_heap + old_heap->size) - (char *)old_top)
2387 | PREV_INUSE);
2388 }
2389 else if ((heap = new_heap(nb + (MINSIZE + sizeof(*heap)), mp_.top_pad))) {
2390 /* Use a newly allocated heap. */
2391 heap->ar_ptr = av;
2392 heap->prev = old_heap;
2393 av->system_mem += heap->size;
2394 arena_mem += heap->size;
2395 /* Set up the new top. */
2396 top(av) = chunk_at_offset(heap, sizeof(*heap));
2397 set_head(top(av), (heap->size - sizeof(*heap)) | PREV_INUSE);
2398
2399 /* Setup fencepost and free the old top chunk. */
2400 /* The fencepost takes at least MINSIZE bytes, because it might
2401 become the top chunk again later. Note that a footer is set
2402 up, too, although the chunk is marked in use. */
2403 old_size -= MINSIZE;
2404 set_head(chunk_at_offset(old_top, old_size + 2*SIZE_SZ), 0|PREV_INUSE);
2405 if (old_size >= MINSIZE) {
2406 set_head(chunk_at_offset(old_top, old_size), (2*SIZE_SZ)|PREV_INUSE);
2407 set_foot(chunk_at_offset(old_top, old_size), (2*SIZE_SZ));
2408 set_head(old_top, old_size|PREV_INUSE|NON_MAIN_ARENA);
2409 _int_free(av, old_top, 1);
2410 } else {
2411 set_head(old_top, (old_size + 2*SIZE_SZ)|PREV_INUSE);
2412 set_foot(old_top, (old_size + 2*SIZE_SZ));
2413 }
2414 }
2415 else if (!tried_mmap)
2416 /* We can at least try to use to mmap memory. */
2417 goto try_mmap;
2418
2419 } else { /* av == main_arena */
2420
2421
2422 /* Request enough space for nb + pad + overhead */
2423
2424 size = nb + mp_.top_pad + MINSIZE;
2425
2426 /*
2427 If contiguous, we can subtract out existing space that we hope to
2428 combine with new space. We add it back later only if
2429 we don't actually get contiguous space.
2430 */
2431
2432 if (contiguous(av))
2433 size -= old_size;
2434
2435 /*
2436 Round to a multiple of page size.
2437 If MORECORE is not contiguous, this ensures that we only call it
2438 with whole-page arguments. And if MORECORE is contiguous and
2439 this is not first time through, this preserves page-alignment of
2440 previous calls. Otherwise, we correct to page-align below.
2441 */
2442
2443 size = (size + pagemask) & ~pagemask;
2444
2445 /*
2446 Don't try to call MORECORE if argument is so big as to appear
2447 negative. Note that since mmap takes size_t arg, it may succeed
2448 below even if we cannot call MORECORE.
2449 */
2450
2451 if (size > 0)
2452 brk = (char*)(MORECORE(size));
2453
2454 if (brk != (char*)(MORECORE_FAILURE)) {
2455 /* Call the `morecore' hook if necessary. */
2456 void (*hook) (void) = force_reg (__after_morecore_hook);
2457 if (__builtin_expect (hook != NULL, 0))
2458 (*hook) ();
2459 } else {
2460 /*
2461 If have mmap, try using it as a backup when MORECORE fails or
2462 cannot be used. This is worth doing on systems that have "holes" in
2463 address space, so sbrk cannot extend to give contiguous space, but
2464 space is available elsewhere. Note that we ignore mmap max count
2465 and threshold limits, since the space will not be used as a
2466 segregated mmap region.
2467 */
2468
2469 /* Cannot merge with old top, so add its size back in */
2470 if (contiguous(av))
2471 size = (size + old_size + pagemask) & ~pagemask;
2472
2473 /* If we are relying on mmap as backup, then use larger units */
2474 if ((unsigned long)(size) < (unsigned long)(MMAP_AS_MORECORE_SIZE))
2475 size = MMAP_AS_MORECORE_SIZE;
2476
2477 /* Don't try if size wraps around 0 */
2478 if ((unsigned long)(size) > (unsigned long)(nb)) {
2479
2480 char *mbrk = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, 0));
2481
2482 if (mbrk != MAP_FAILED) {
2483
2484 /* We do not need, and cannot use, another sbrk call to find end */
2485 brk = mbrk;
2486 snd_brk = brk + size;
2487
2488 /*
2489 Record that we no longer have a contiguous sbrk region.
2490 After the first time mmap is used as backup, we do not
2491 ever rely on contiguous space since this could incorrectly
2492 bridge regions.
2493 */
2494 set_noncontiguous(av);
2495 }
2496 }
2497 }
2498
2499 if (brk != (char*)(MORECORE_FAILURE)) {
2500 if (mp_.sbrk_base == 0)
2501 mp_.sbrk_base = brk;
2502 av->system_mem += size;
2503
2504 /*
2505 If MORECORE extends previous space, we can likewise extend top size.
2506 */
2507
2508 if (brk == old_end && snd_brk == (char*)(MORECORE_FAILURE))
2509 set_head(old_top, (size + old_size) | PREV_INUSE);
2510
2511 else if (contiguous(av) && old_size && brk < old_end) {
2512 /* Oops! Someone else killed our space.. Can't touch anything. */
2513 malloc_printerr (3, "break adjusted to free malloc space", brk);
2514 }
2515
2516 /*
2517 Otherwise, make adjustments:
2518
2519 * If the first time through or noncontiguous, we need to call sbrk
2520 just to find out where the end of memory lies.
2521
2522 * We need to ensure that all returned chunks from malloc will meet
2523 MALLOC_ALIGNMENT
2524
2525 * If there was an intervening foreign sbrk, we need to adjust sbrk
2526 request size to account for fact that we will not be able to
2527 combine new space with existing space in old_top.
2528
2529 * Almost all systems internally allocate whole pages at a time, in
2530 which case we might as well use the whole last page of request.
2531 So we allocate enough more memory to hit a page boundary now,
2532 which in turn causes future contiguous calls to page-align.
2533 */
2534
2535 else {
2536 front_misalign = 0;
2537 end_misalign = 0;
2538 correction = 0;
2539 aligned_brk = brk;
2540
2541 /* handle contiguous cases */
2542 if (contiguous(av)) {
2543
2544 /* Count foreign sbrk as system_mem. */
2545 if (old_size)
2546 av->system_mem += brk - old_end;
2547
2548 /* Guarantee alignment of first new chunk made from this space */
2549
2550 front_misalign = (INTERNAL_SIZE_T)chunk2mem(brk) & MALLOC_ALIGN_MASK;
2551 if (front_misalign > 0) {
2552
2553 /*
2554 Skip over some bytes to arrive at an aligned position.
2555 We don't need to specially mark these wasted front bytes.
2556 They will never be accessed anyway because
2557 prev_inuse of av->top (and any chunk created from its start)
2558 is always true after initialization.
2559 */
2560
2561 correction = MALLOC_ALIGNMENT - front_misalign;
2562 aligned_brk += correction;
2563 }
2564
2565 /*
2566 If this isn't adjacent to existing space, then we will not
2567 be able to merge with old_top space, so must add to 2nd request.
2568 */
2569
2570 correction += old_size;
2571
2572 /* Extend the end address to hit a page boundary */
2573 end_misalign = (INTERNAL_SIZE_T)(brk + size + correction);
2574 correction += ((end_misalign + pagemask) & ~pagemask) - end_misalign;
2575
2576 assert(correction >= 0);
2577 snd_brk = (char*)(MORECORE(correction));
2578
2579 /*
2580 If can't allocate correction, try to at least find out current
2581 brk. It might be enough to proceed without failing.
2582
2583 Note that if second sbrk did NOT fail, we assume that space
2584 is contiguous with first sbrk. This is a safe assumption unless
2585 program is multithreaded but doesn't use locks and a foreign sbrk
2586 occurred between our first and second calls.
2587 */
2588
2589 if (snd_brk == (char*)(MORECORE_FAILURE)) {
2590 correction = 0;
2591 snd_brk = (char*)(MORECORE(0));
2592 } else {
2593 /* Call the `morecore' hook if necessary. */
2594 void (*hook) (void) = force_reg (__after_morecore_hook);
2595 if (__builtin_expect (hook != NULL, 0))
2596 (*hook) ();
2597 }
2598 }
2599
2600 /* handle non-contiguous cases */
2601 else {
2602 if (MALLOC_ALIGNMENT == 2 * SIZE_SZ)
2603 /* MORECORE/mmap must correctly align */
2604 assert(((unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK) == 0);
2605 else {
2606 front_misalign = (INTERNAL_SIZE_T)chunk2mem(brk) & MALLOC_ALIGN_MASK;
2607 if (front_misalign > 0) {
2608
2609 /*
2610 Skip over some bytes to arrive at an aligned position.
2611 We don't need to specially mark these wasted front bytes.
2612 They will never be accessed anyway because
2613 prev_inuse of av->top (and any chunk created from its start)
2614 is always true after initialization.
2615 */
2616
2617 aligned_brk += MALLOC_ALIGNMENT - front_misalign;
2618 }
2619 }
2620
2621 /* Find out current end of memory */
2622 if (snd_brk == (char*)(MORECORE_FAILURE)) {
2623 snd_brk = (char*)(MORECORE(0));
2624 }
2625 }
2626
2627 /* Adjust top based on results of second sbrk */
2628 if (snd_brk != (char*)(MORECORE_FAILURE)) {
2629 av->top = (mchunkptr)aligned_brk;
2630 set_head(av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE);
2631 av->system_mem += correction;
2632
2633 /*
2634 If not the first time through, we either have a
2635 gap due to foreign sbrk or a non-contiguous region. Insert a
2636 double fencepost at old_top to prevent consolidation with space
2637 we don't own. These fenceposts are artificial chunks that are
2638 marked as inuse and are in any case too small to use. We need
2639 two to make sizes and alignments work out.
2640 */
2641
2642 if (old_size != 0) {
2643 /*
2644 Shrink old_top to insert fenceposts, keeping size a
2645 multiple of MALLOC_ALIGNMENT. We know there is at least
2646 enough space in old_top to do this.
2647 */
2648 old_size = (old_size - 4*SIZE_SZ) & ~MALLOC_ALIGN_MASK;
2649 set_head(old_top, old_size | PREV_INUSE);
2650
2651 /*
2652 Note that the following assignments completely overwrite
2653 old_top when old_size was previously MINSIZE. This is
2654 intentional. We need the fencepost, even if old_top otherwise gets
2655 lost.
2656 */
2657 chunk_at_offset(old_top, old_size )->size =
2658 (2*SIZE_SZ)|PREV_INUSE;
2659
2660 chunk_at_offset(old_top, old_size + 2*SIZE_SZ)->size =
2661 (2*SIZE_SZ)|PREV_INUSE;
2662
2663 /* If possible, release the rest. */
2664 if (old_size >= MINSIZE) {
2665 _int_free(av, old_top, 1);
2666 }
2667
2668 }
2669 }
2670 }
2671 }
2672
2673 } /* if (av != &main_arena) */
2674
2675 if ((unsigned long)av->system_mem > (unsigned long)(av->max_system_mem))
2676 av->max_system_mem = av->system_mem;
2677 check_malloc_state(av);
2678
2679 /* finally, do the allocation */
2680 p = av->top;
2681 size = chunksize(p);
2682
2683 /* check that one of the above allocation paths succeeded */
2684 if ((unsigned long)(size) >= (unsigned long)(nb + MINSIZE)) {
2685 remainder_size = size - nb;
2686 remainder = chunk_at_offset(p, nb);
2687 av->top = remainder;
2688 set_head(p, nb | PREV_INUSE | (av != &main_arena ? NON_MAIN_ARENA : 0));
2689 set_head(remainder, remainder_size | PREV_INUSE);
2690 check_malloced_chunk(av, p, nb);
2691 return chunk2mem(p);
2692 }
2693
2694 /* catch all failure paths */
2695 __set_errno (ENOMEM);
2696 return 0;
2697 }
2698
2699
2700 /*
2701 systrim is an inverse of sorts to sysmalloc. It gives memory back
2702 to the system (via negative arguments to sbrk) if there is unused
2703 memory at the `high' end of the malloc pool. It is called
2704 automatically by free() when top space exceeds the trim
2705 threshold. It is also called by the public malloc_trim routine. It
2706 returns 1 if it actually released any memory, else 0.
2707 */
2708
2709 static int systrim(size_t pad, mstate av)
2710 {
2711 long top_size; /* Amount of top-most memory */
2712 long extra; /* Amount to release */
2713 long released; /* Amount actually released */
2714 char* current_brk; /* address returned by pre-check sbrk call */
2715 char* new_brk; /* address returned by post-check sbrk call */
2716 size_t pagesz;
2717
2718 pagesz = GLRO(dl_pagesize);
2719 top_size = chunksize(av->top);
2720
2721 /* Release in pagesize units, keeping at least one page */
2722 extra = (top_size - pad - MINSIZE - 1) & ~(pagesz - 1);
2723
2724 if (extra > 0) {
2725
2726 /*
2727 Only proceed if end of memory is where we last set it.
2728 This avoids problems if there were foreign sbrk calls.
2729 */
2730 current_brk = (char*)(MORECORE(0));
2731 if (current_brk == (char*)(av->top) + top_size) {
2732
2733 /*
2734 Attempt to release memory. We ignore MORECORE return value,
2735 and instead call again to find out where new end of memory is.
2736 This avoids problems if first call releases less than we asked,
2737 of if failure somehow altered brk value. (We could still
2738 encounter problems if it altered brk in some very bad way,
2739 but the only thing we can do is adjust anyway, which will cause
2740 some downstream failure.)
2741 */
2742
2743 MORECORE(-extra);
2744 /* Call the `morecore' hook if necessary. */
2745 void (*hook) (void) = force_reg (__after_morecore_hook);
2746 if (__builtin_expect (hook != NULL, 0))
2747 (*hook) ();
2748 new_brk = (char*)(MORECORE(0));
2749
2750 if (new_brk != (char*)MORECORE_FAILURE) {
2751 released = (long)(current_brk - new_brk);
2752
2753 if (released != 0) {
2754 /* Success. Adjust top. */
2755 av->system_mem -= released;
2756 set_head(av->top, (top_size - released) | PREV_INUSE);
2757 check_malloc_state(av);
2758 return 1;
2759 }
2760 }
2761 }
2762 }
2763 return 0;
2764 }
2765
2766 static void
2767 internal_function
2768 munmap_chunk(mchunkptr p)
2769 {
2770 INTERNAL_SIZE_T size = chunksize(p);
2771
2772 assert (chunk_is_mmapped(p));
2773
2774 uintptr_t block = (uintptr_t) p - p->prev_size;
2775 size_t total_size = p->prev_size + size;
2776 /* Unfortunately we have to do the compilers job by hand here. Normally
2777 we would test BLOCK and TOTAL-SIZE separately for compliance with the
2778 page size. But gcc does not recognize the optimization possibility
2779 (in the moment at least) so we combine the two values into one before
2780 the bit test. */
2781 if (__builtin_expect (((block | total_size) & (GLRO(dl_pagesize) - 1)) != 0, 0))
2782 {
2783 malloc_printerr (check_action, "munmap_chunk(): invalid pointer",
2784 chunk2mem (p));
2785 return;
2786 }
2787
2788 mp_.n_mmaps--;
2789 mp_.mmapped_mem -= total_size;
2790
2791 /* If munmap failed the process virtual memory address space is in a
2792 bad shape. Just leave the block hanging around, the process will
2793 terminate shortly anyway since not much can be done. */
2794 __munmap((char *)block, total_size);
2795 }
2796
2797 #if HAVE_MREMAP
2798
2799 static mchunkptr
2800 internal_function
2801 mremap_chunk(mchunkptr p, size_t new_size)
2802 {
2803 size_t page_mask = GLRO(dl_pagesize) - 1;
2804 INTERNAL_SIZE_T offset = p->prev_size;
2805 INTERNAL_SIZE_T size = chunksize(p);
2806 char *cp;
2807
2808 assert (chunk_is_mmapped(p));
2809 assert(((size + offset) & (GLRO(dl_pagesize)-1)) == 0);
2810
2811 /* Note the extra SIZE_SZ overhead as in mmap_chunk(). */
2812 new_size = (new_size + offset + SIZE_SZ + page_mask) & ~page_mask;
2813
2814 /* No need to remap if the number of pages does not change. */
2815 if (size + offset == new_size)
2816 return p;
2817
2818 cp = (char *)__mremap((char *)p - offset, size + offset, new_size,
2819 MREMAP_MAYMOVE);
2820
2821 if (cp == MAP_FAILED) return 0;
2822
2823 p = (mchunkptr)(cp + offset);
2824
2825 assert(aligned_OK(chunk2mem(p)));
2826
2827 assert((p->prev_size == offset));
2828 set_head(p, (new_size - offset)|IS_MMAPPED);
2829
2830 mp_.mmapped_mem -= size + offset;
2831 mp_.mmapped_mem += new_size;
2832 if ((unsigned long)mp_.mmapped_mem > (unsigned long)mp_.max_mmapped_mem)
2833 mp_.max_mmapped_mem = mp_.mmapped_mem;
2834 return p;
2835 }
2836
2837 #endif /* HAVE_MREMAP */
2838
2839 /*------------------------ Public wrappers. --------------------------------*/
2840
2841 void*
2842 __libc_malloc(size_t bytes)
2843 {
2844 mstate ar_ptr;
2845 void *victim;
2846
2847 __malloc_ptr_t (*hook) (size_t, const __malloc_ptr_t)
2848 = force_reg (__malloc_hook);
2849 if (__builtin_expect (hook != NULL, 0))
2850 return (*hook)(bytes, RETURN_ADDRESS (0));
2851
2852 arena_lookup(ar_ptr);
2853
2854 arena_lock(ar_ptr, bytes);
2855 if(!ar_ptr)
2856 return 0;
2857 victim = _int_malloc(ar_ptr, bytes);
2858 if(!victim) {
2859 /* Maybe the failure is due to running out of mmapped areas. */
2860 if(ar_ptr != &main_arena) {
2861 (void)mutex_unlock(&ar_ptr->mutex);
2862 ar_ptr = &main_arena;
2863 (void)mutex_lock(&ar_ptr->mutex);
2864 victim = _int_malloc(ar_ptr, bytes);
2865 (void)mutex_unlock(&ar_ptr->mutex);
2866 } else {
2867 /* ... or sbrk() has failed and there is still a chance to mmap() */
2868 ar_ptr = arena_get2(ar_ptr->next ? ar_ptr : 0, bytes);
2869 (void)mutex_unlock(&main_arena.mutex);
2870 if(ar_ptr) {
2871 victim = _int_malloc(ar_ptr, bytes);
2872 (void)mutex_unlock(&ar_ptr->mutex);
2873 }
2874 }
2875 } else
2876 (void)mutex_unlock(&ar_ptr->mutex);
2877 assert(!victim || chunk_is_mmapped(mem2chunk(victim)) ||
2878 ar_ptr == arena_for_chunk(mem2chunk(victim)));
2879 return victim;
2880 }
2881 libc_hidden_def(__libc_malloc)
2882
2883 void
2884 __libc_free(void* mem)
2885 {
2886 mstate ar_ptr;
2887 mchunkptr p; /* chunk corresponding to mem */
2888
2889 void (*hook) (__malloc_ptr_t, const __malloc_ptr_t)
2890 = force_reg (__free_hook);
2891 if (__builtin_expect (hook != NULL, 0)) {
2892 (*hook)(mem, RETURN_ADDRESS (0));
2893 return;
2894 }
2895
2896 if (mem == 0) /* free(0) has no effect */
2897 return;
2898
2899 p = mem2chunk(mem);
2900
2901 if (chunk_is_mmapped(p)) /* release mmapped memory. */
2902 {
2903 /* see if the dynamic brk/mmap threshold needs adjusting */
2904 if (!mp_.no_dyn_threshold
2905 && p->size > mp_.mmap_threshold
2906 && p->size <= DEFAULT_MMAP_THRESHOLD_MAX)
2907 {
2908 mp_.mmap_threshold = chunksize (p);
2909 mp_.trim_threshold = 2 * mp_.mmap_threshold;
2910 }
2911 munmap_chunk(p);
2912 return;
2913 }
2914
2915 ar_ptr = arena_for_chunk(p);
2916 _int_free(ar_ptr, p, 0);
2917 }
2918 libc_hidden_def (__libc_free)
2919
2920 void*
2921 __libc_realloc(void* oldmem, size_t bytes)
2922 {
2923 mstate ar_ptr;
2924 INTERNAL_SIZE_T nb; /* padded request size */
2925
2926 void* newp; /* chunk to return */
2927
2928 __malloc_ptr_t (*hook) (__malloc_ptr_t, size_t, const __malloc_ptr_t) =
2929 force_reg (__realloc_hook);
2930 if (__builtin_expect (hook != NULL, 0))
2931 return (*hook)(oldmem, bytes, RETURN_ADDRESS (0));
2932
2933 #if REALLOC_ZERO_BYTES_FREES
2934 if (bytes == 0 && oldmem != NULL) { __libc_free(oldmem); return 0; }
2935 #endif
2936
2937 /* realloc of null is supposed to be same as malloc */
2938 if (oldmem == 0) return __libc_malloc(bytes);
2939
2940 /* chunk corresponding to oldmem */
2941 const mchunkptr oldp = mem2chunk(oldmem);
2942 /* its size */
2943 const INTERNAL_SIZE_T oldsize = chunksize(oldp);
2944
2945 /* Little security check which won't hurt performance: the
2946 allocator never wrapps around at the end of the address space.
2947 Therefore we can exclude some size values which might appear
2948 here by accident or by "design" from some intruder. */
2949 if (__builtin_expect ((uintptr_t) oldp > (uintptr_t) -oldsize, 0)
2950 || __builtin_expect (misaligned_chunk (oldp), 0))
2951 {
2952 malloc_printerr (check_action, "realloc(): invalid pointer", oldmem);
2953 return NULL;
2954 }
2955
2956 checked_request2size(bytes, nb);
2957
2958 if (chunk_is_mmapped(oldp))
2959 {
2960 void* newmem;
2961
2962 #if HAVE_MREMAP
2963 newp = mremap_chunk(oldp, nb);
2964 if(newp) return chunk2mem(newp);
2965 #endif
2966 /* Note the extra SIZE_SZ overhead. */
2967 if(oldsize - SIZE_SZ >= nb) return oldmem; /* do nothing */
2968 /* Must alloc, copy, free. */
2969 newmem = __libc_malloc(bytes);
2970 if (newmem == 0) return 0; /* propagate failure */
2971 MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ);
2972 munmap_chunk(oldp);
2973 return newmem;
2974 }
2975
2976 ar_ptr = arena_for_chunk(oldp);
2977 #if THREAD_STATS
2978 if(!mutex_trylock(&ar_ptr->mutex))
2979 ++(ar_ptr->stat_lock_direct);
2980 else {
2981 (void)mutex_lock(&ar_ptr->mutex);
2982 ++(ar_ptr->stat_lock_wait);
2983 }
2984 #else
2985 (void)mutex_lock(&ar_ptr->mutex);
2986 #endif
2987
2988 #if !defined PER_THREAD
2989 /* As in malloc(), remember this arena for the next allocation. */
2990 tsd_setspecific(arena_key, (void *)ar_ptr);
2991 #endif
2992
2993 newp = _int_realloc(ar_ptr, oldp, oldsize, nb);
2994
2995 (void)mutex_unlock(&ar_ptr->mutex);
2996 assert(!newp || chunk_is_mmapped(mem2chunk(newp)) ||
2997 ar_ptr == arena_for_chunk(mem2chunk(newp)));
2998
2999 if (newp == NULL)
3000 {
3001 /* Try harder to allocate memory in other arenas. */
3002 newp = __libc_malloc(bytes);
3003 if (newp != NULL)
3004 {
3005 MALLOC_COPY (newp, oldmem, oldsize - SIZE_SZ);
3006 _int_free(ar_ptr, oldp, 0);
3007 }
3008 }
3009
3010 return newp;
3011 }
3012 libc_hidden_def (__libc_realloc)
3013
3014 void*
3015 __libc_memalign(size_t alignment, size_t bytes)
3016 {
3017 mstate ar_ptr;
3018 void *p;
3019
3020 __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, size_t,
3021 const __malloc_ptr_t)) =
3022 force_reg (__memalign_hook);
3023 if (__builtin_expect (hook != NULL, 0))
3024 return (*hook)(alignment, bytes, RETURN_ADDRESS (0));
3025
3026 /* If need less alignment than we give anyway, just relay to malloc */
3027 if (alignment <= MALLOC_ALIGNMENT) return __libc_malloc(bytes);
3028
3029 /* Otherwise, ensure that it is at least a minimum chunk size */
3030 if (alignment < MINSIZE) alignment = MINSIZE;
3031
3032 arena_get(ar_ptr, bytes + alignment + MINSIZE);
3033 if(!ar_ptr)
3034 return 0;
3035 p = _int_memalign(ar_ptr, alignment, bytes);
3036 if(!p) {
3037 /* Maybe the failure is due to running out of mmapped areas. */
3038 if(ar_ptr != &main_arena) {
3039 (void)mutex_unlock(&ar_ptr->mutex);
3040 ar_ptr = &main_arena;
3041 (void)mutex_lock(&ar_ptr->mutex);
3042 p = _int_memalign(ar_ptr, alignment, bytes);
3043 (void)mutex_unlock(&ar_ptr->mutex);
3044 } else {
3045 /* ... or sbrk() has failed and there is still a chance to mmap() */
3046 mstate prev = ar_ptr->next ? ar_ptr : 0;
3047 (void)mutex_unlock(&ar_ptr->mutex);
3048 ar_ptr = arena_get2(prev, bytes);
3049 if(ar_ptr) {
3050 p = _int_memalign(ar_ptr, alignment, bytes);
3051 (void)mutex_unlock(&ar_ptr->mutex);
3052 }
3053 }
3054 } else
3055 (void)mutex_unlock(&ar_ptr->mutex);
3056 assert(!p || chunk_is_mmapped(mem2chunk(p)) ||
3057 ar_ptr == arena_for_chunk(mem2chunk(p)));
3058 return p;
3059 }
3060 /* For ISO C11. */
3061 weak_alias (__libc_memalign, aligned_alloc)
3062 libc_hidden_def (__libc_memalign)
3063
3064 void*
3065 __libc_valloc(size_t bytes)
3066 {
3067 mstate ar_ptr;
3068 void *p;
3069
3070 if(__malloc_initialized < 0)
3071 ptmalloc_init ();
3072
3073 size_t pagesz = GLRO(dl_pagesize);
3074
3075 __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, size_t,
3076 const __malloc_ptr_t)) =
3077 force_reg (__memalign_hook);
3078 if (__builtin_expect (hook != NULL, 0))
3079 return (*hook)(pagesz, bytes, RETURN_ADDRESS (0));
3080
3081 arena_get(ar_ptr, bytes + pagesz + MINSIZE);
3082 if(!ar_ptr)
3083 return 0;
3084 p = _int_valloc(ar_ptr, bytes);
3085 (void)mutex_unlock(&ar_ptr->mutex);
3086 if(!p) {
3087 /* Maybe the failure is due to running out of mmapped areas. */
3088 if(ar_ptr != &main_arena) {
3089 ar_ptr = &main_arena;
3090 (void)mutex_lock(&ar_ptr->mutex);
3091 p = _int_memalign(ar_ptr, pagesz, bytes);
3092 (void)mutex_unlock(&ar_ptr->mutex);
3093 } else {
3094 /* ... or sbrk() has failed and there is still a chance to mmap() */
3095 ar_ptr = arena_get2(ar_ptr->next ? ar_ptr : 0, bytes);
3096 if(ar_ptr) {
3097 p = _int_memalign(ar_ptr, pagesz, bytes);
3098 (void)mutex_unlock(&ar_ptr->mutex);
3099 }
3100 }
3101 }
3102 assert(!p || chunk_is_mmapped(mem2chunk(p)) ||
3103 ar_ptr == arena_for_chunk(mem2chunk(p)));
3104
3105 return p;
3106 }
3107
3108 void*
3109 __libc_pvalloc(size_t bytes)
3110 {
3111 mstate ar_ptr;
3112 void *p;
3113
3114 if(__malloc_initialized < 0)
3115 ptmalloc_init ();
3116
3117 size_t pagesz = GLRO(dl_pagesize);
3118 size_t page_mask = GLRO(dl_pagesize) - 1;
3119 size_t rounded_bytes = (bytes + page_mask) & ~(page_mask);
3120
3121 __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, size_t,
3122 const __malloc_ptr_t)) =
3123 force_reg (__memalign_hook);
3124 if (__builtin_expect (hook != NULL, 0))
3125 return (*hook)(pagesz, rounded_bytes, RETURN_ADDRESS (0));
3126
3127 arena_get(ar_ptr, bytes + 2*pagesz + MINSIZE);
3128 p = _int_pvalloc(ar_ptr, bytes);
3129 (void)mutex_unlock(&ar_ptr->mutex);
3130 if(!p) {
3131 /* Maybe the failure is due to running out of mmapped areas. */
3132 if(ar_ptr != &main_arena) {
3133 ar_ptr = &main_arena;
3134 (void)mutex_lock(&ar_ptr->mutex);
3135 p = _int_memalign(ar_ptr, pagesz, rounded_bytes);
3136 (void)mutex_unlock(&ar_ptr->mutex);
3137 } else {
3138 /* ... or sbrk() has failed and there is still a chance to mmap() */
3139 ar_ptr = arena_get2(ar_ptr->next ? ar_ptr : 0,
3140 bytes + 2*pagesz + MINSIZE);
3141 if(ar_ptr) {
3142 p = _int_memalign(ar_ptr, pagesz, rounded_bytes);
3143 (void)mutex_unlock(&ar_ptr->mutex);
3144 }
3145 }
3146 }
3147 assert(!p || chunk_is_mmapped(mem2chunk(p)) ||
3148 ar_ptr == arena_for_chunk(mem2chunk(p)));
3149
3150 return p;
3151 }
3152
3153 void*
3154 __libc_calloc(size_t n, size_t elem_size)
3155 {
3156 mstate av;
3157 mchunkptr oldtop, p;
3158 INTERNAL_SIZE_T bytes, sz, csz, oldtopsize;
3159 void* mem;
3160 unsigned long clearsize;
3161 unsigned long nclears;
3162 INTERNAL_SIZE_T* d;
3163
3164 /* size_t is unsigned so the behavior on overflow is defined. */
3165 bytes = n * elem_size;
3166 #define HALF_INTERNAL_SIZE_T \
3167 (((INTERNAL_SIZE_T) 1) << (8 * sizeof (INTERNAL_SIZE_T) / 2))
3168 if (__builtin_expect ((n | elem_size) >= HALF_INTERNAL_SIZE_T, 0)) {
3169 if (elem_size != 0 && bytes / elem_size != n) {
3170 __set_errno (ENOMEM);
3171 return 0;
3172 }
3173 }
3174
3175 __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, const __malloc_ptr_t)) =
3176 force_reg (__malloc_hook);
3177 if (__builtin_expect (hook != NULL, 0)) {
3178 sz = bytes;
3179 mem = (*hook)(sz, RETURN_ADDRESS (0));
3180 if(mem == 0)
3181 return 0;
3182 return memset(mem, 0, sz);
3183 }
3184
3185 sz = bytes;
3186
3187 arena_get(av, sz);
3188 if(!av)
3189 return 0;
3190
3191 /* Check if we hand out the top chunk, in which case there may be no
3192 need to clear. */
3193 #if MORECORE_CLEARS
3194 oldtop = top(av);
3195 oldtopsize = chunksize(top(av));
3196 #if MORECORE_CLEARS < 2
3197 /* Only newly allocated memory is guaranteed to be cleared. */
3198 if (av == &main_arena &&
3199 oldtopsize < mp_.sbrk_base + av->max_system_mem - (char *)oldtop)
3200 oldtopsize = (mp_.sbrk_base + av->max_system_mem - (char *)oldtop);
3201 #endif
3202 if (av != &main_arena)
3203 {
3204 heap_info *heap = heap_for_ptr (oldtop);
3205 if (oldtopsize < (char *) heap + heap->mprotect_size - (char *) oldtop)
3206 oldtopsize = (char *) heap + heap->mprotect_size - (char *) oldtop;
3207 }
3208 #endif
3209 mem = _int_malloc(av, sz);
3210
3211 /* Only clearing follows, so we can unlock early. */
3212 (void)mutex_unlock(&av->mutex);
3213
3214 assert(!mem || chunk_is_mmapped(mem2chunk(mem)) ||
3215 av == arena_for_chunk(mem2chunk(mem)));
3216
3217 if (mem == 0) {
3218 /* Maybe the failure is due to running out of mmapped areas. */
3219 if(av != &main_arena) {
3220 (void)mutex_lock(&main_arena.mutex);
3221 mem = _int_malloc(&main_arena, sz);
3222 (void)mutex_unlock(&main_arena.mutex);
3223 } else {
3224 /* ... or sbrk() has failed and there is still a chance to mmap() */
3225 (void)mutex_lock(&main_arena.mutex);
3226 av = arena_get2(av->next ? av : 0, sz);
3227 (void)mutex_unlock(&main_arena.mutex);
3228 if(av) {
3229 mem = _int_malloc(av, sz);
3230 (void)mutex_unlock(&av->mutex);
3231 }
3232 }
3233 if (mem == 0) return 0;
3234 }
3235 p = mem2chunk(mem);
3236
3237 /* Two optional cases in which clearing not necessary */
3238 if (chunk_is_mmapped (p))
3239 {
3240 if (__builtin_expect (perturb_byte, 0))
3241 MALLOC_ZERO (mem, sz);
3242 return mem;
3243 }
3244
3245 csz = chunksize(p);
3246
3247 #if MORECORE_CLEARS
3248 if (perturb_byte == 0 && (p == oldtop && csz > oldtopsize)) {
3249 /* clear only the bytes from non-freshly-sbrked memory */
3250 csz = oldtopsize;
3251 }
3252 #endif
3253
3254 /* Unroll clear of <= 36 bytes (72 if 8byte sizes). We know that
3255 contents have an odd number of INTERNAL_SIZE_T-sized words;
3256 minimally 3. */
3257 d = (INTERNAL_SIZE_T*)mem;
3258 clearsize = csz - SIZE_SZ;
3259 nclears = clearsize / sizeof(INTERNAL_SIZE_T);
3260 assert(nclears >= 3);
3261
3262 if (nclears > 9)
3263 MALLOC_ZERO(d, clearsize);
3264
3265 else {
3266 *(d+0) = 0;
3267 *(d+1) = 0;
3268 *(d+2) = 0;
3269 if (nclears > 4) {
3270 *(d+3) = 0;
3271 *(d+4) = 0;
3272 if (nclears > 6) {
3273 *(d+5) = 0;
3274 *(d+6) = 0;
3275 if (nclears > 8) {
3276 *(d+7) = 0;
3277 *(d+8) = 0;
3278 }
3279 }
3280 }
3281 }
3282
3283 return mem;
3284 }
3285
3286 /*
3287 ------------------------------ malloc ------------------------------
3288 */
3289
3290 static void*
3291 _int_malloc(mstate av, size_t bytes)
3292 {
3293 INTERNAL_SIZE_T nb; /* normalized request size */
3294 unsigned int idx; /* associated bin index */
3295 mbinptr bin; /* associated bin */
3296
3297 mchunkptr victim; /* inspected/selected chunk */
3298 INTERNAL_SIZE_T size; /* its size */
3299 int victim_index; /* its bin index */
3300
3301 mchunkptr remainder; /* remainder from a split */
3302 unsigned long remainder_size; /* its size */
3303
3304 unsigned int block; /* bit map traverser */
3305 unsigned int bit; /* bit map traverser */
3306 unsigned int map; /* current word of binmap */
3307
3308 mchunkptr fwd; /* misc temp for linking */
3309 mchunkptr bck; /* misc temp for linking */
3310
3311 const char *errstr = NULL;
3312
3313 /*
3314 Convert request size to internal form by adding SIZE_SZ bytes
3315 overhead plus possibly more to obtain necessary alignment and/or
3316 to obtain a size of at least MINSIZE, the smallest allocatable
3317 size. Also, checked_request2size traps (returning 0) request sizes
3318 that are so large that they wrap around zero when padded and
3319 aligned.
3320 */
3321
3322 checked_request2size(bytes, nb);
3323
3324 /*
3325 If the size qualifies as a fastbin, first check corresponding bin.
3326 This code is safe to execute even if av is not yet initialized, so we
3327 can try it without checking, which saves some time on this fast path.
3328 */
3329
3330 if ((unsigned long)(nb) <= (unsigned long)(get_max_fast ())) {
3331 idx = fastbin_index(nb);
3332 mfastbinptr* fb = &fastbin (av, idx);
3333 mchunkptr pp = *fb;
3334 do
3335 {
3336 victim = pp;
3337 if (victim == NULL)
3338 break;
3339 }
3340 while ((pp = catomic_compare_and_exchange_val_acq (fb, victim->fd, victim))
3341 != victim);
3342 if (victim != 0) {
3343 if (__builtin_expect (fastbin_index (chunksize (victim)) != idx, 0))
3344 {
3345 errstr = "malloc(): memory corruption (fast)";
3346 errout:
3347 malloc_printerr (check_action, errstr, chunk2mem (victim));
3348 return NULL;
3349 }
3350 check_remalloced_chunk(av, victim, nb);
3351 void *p = chunk2mem(victim);
3352 if (__builtin_expect (perturb_byte, 0))
3353 alloc_perturb (p, bytes);
3354 return p;
3355 }
3356 }
3357
3358 /*
3359 If a small request, check regular bin. Since these "smallbins"
3360 hold one size each, no searching within bins is necessary.
3361 (For a large request, we need to wait until unsorted chunks are
3362 processed to find best fit. But for small ones, fits are exact
3363 anyway, so we can check now, which is faster.)
3364 */
3365
3366 if (in_smallbin_range(nb)) {
3367 idx = smallbin_index(nb);
3368 bin = bin_at(av,idx);
3369
3370 if ( (victim = last(bin)) != bin) {
3371 if (victim == 0) /* initialization check */
3372 malloc_consolidate(av);
3373 else {
3374 bck = victim->bk;
3375 if (__builtin_expect (bck->fd != victim, 0))
3376 {
3377 errstr = "malloc(): smallbin double linked list corrupted";
3378 goto errout;
3379 }
3380 set_inuse_bit_at_offset(victim, nb);
3381 bin->bk = bck;
3382 bck->fd = bin;
3383
3384 if (av != &main_arena)
3385 victim->size |= NON_MAIN_ARENA;
3386 check_malloced_chunk(av, victim, nb);
3387 void *p = chunk2mem(victim);
3388 if (__builtin_expect (perturb_byte, 0))
3389 alloc_perturb (p, bytes);
3390 return p;
3391 }
3392 }
3393 }
3394
3395 /*
3396 If this is a large request, consolidate fastbins before continuing.
3397 While it might look excessive to kill all fastbins before
3398 even seeing if there is space available, this avoids
3399 fragmentation problems normally associated with fastbins.
3400 Also, in practice, programs tend to have runs of either small or
3401 large requests, but less often mixtures, so consolidation is not
3402 invoked all that often in most programs. And the programs that
3403 it is called frequently in otherwise tend to fragment.
3404 */
3405
3406 else {
3407 idx = largebin_index(nb);
3408 if (have_fastchunks(av))
3409 malloc_consolidate(av);
3410 }
3411
3412 /*
3413 Process recently freed or remaindered chunks, taking one only if
3414 it is exact fit, or, if this a small request, the chunk is remainder from
3415 the most recent non-exact fit. Place other traversed chunks in
3416 bins. Note that this step is the only place in any routine where
3417 chunks are placed in bins.
3418
3419 The outer loop here is needed because we might not realize until
3420 near the end of malloc that we should have consolidated, so must
3421 do so and retry. This happens at most once, and only when we would
3422 otherwise need to expand memory to service a "small" request.
3423 */
3424
3425 for(;;) {
3426
3427 int iters = 0;
3428 while ( (victim = unsorted_chunks(av)->bk) != unsorted_chunks(av)) {
3429 bck = victim->bk;
3430 if (__builtin_expect (victim->size <= 2 * SIZE_SZ, 0)
3431 || __builtin_expect (victim->size > av->system_mem, 0))
3432 malloc_printerr (check_action, "malloc(): memory corruption",
3433 chunk2mem (victim));
3434 size = chunksize(victim);
3435
3436 /*
3437 If a small request, try to use last remainder if it is the
3438 only chunk in unsorted bin. This helps promote locality for
3439 runs of consecutive small requests. This is the only
3440 exception to best-fit, and applies only when there is
3441 no exact fit for a small chunk.
3442 */
3443
3444 if (in_smallbin_range(nb) &&
3445 bck == unsorted_chunks(av) &&
3446 victim == av->last_remainder &&
3447 (unsigned long)(size) > (unsigned long)(nb + MINSIZE)) {
3448
3449 /* split and reattach remainder */
3450 remainder_size = size - nb;
3451 remainder = chunk_at_offset(victim, nb);
3452 unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
3453 av->last_remainder = remainder;
3454 remainder->bk = remainder->fd = unsorted_chunks(av);
3455 if (!in_smallbin_range(remainder_size))
3456 {
3457 remainder->fd_nextsize = NULL;
3458 remainder->bk_nextsize = NULL;
3459 }
3460
3461 set_head(victim, nb | PREV_INUSE |
3462 (av != &main_arena ? NON_MAIN_ARENA : 0));
3463 set_head(remainder, remainder_size | PREV_INUSE);
3464 set_foot(remainder, remainder_size);
3465
3466 check_malloced_chunk(av, victim, nb);
3467 void *p = chunk2mem(victim);
3468 if (__builtin_expect (perturb_byte, 0))
3469 alloc_perturb (p, bytes);
3470 return p;
3471 }
3472
3473 /* remove from unsorted list */
3474 unsorted_chunks(av)->bk = bck;
3475 bck->fd = unsorted_chunks(av);
3476
3477 /* Take now instead of binning if exact fit */
3478
3479 if (size == nb) {
3480 set_inuse_bit_at_offset(victim, size);
3481 if (av != &main_arena)
3482 victim->size |= NON_MAIN_ARENA;
3483 check_malloced_chunk(av, victim, nb);
3484 void *p = chunk2mem(victim);
3485 if (__builtin_expect (perturb_byte, 0))
3486 alloc_perturb (p, bytes);
3487 return p;
3488 }
3489
3490 /* place chunk in bin */
3491
3492 if (in_smallbin_range(size)) {
3493 victim_index = smallbin_index(size);
3494 bck = bin_at(av, victim_index);
3495 fwd = bck->fd;
3496 }
3497 else {
3498 victim_index = largebin_index(size);
3499 bck = bin_at(av, victim_index);
3500 fwd = bck->fd;
3501
3502 /* maintain large bins in sorted order */
3503 if (fwd != bck) {
3504 /* Or with inuse bit to speed comparisons */
3505 size |= PREV_INUSE;
3506 /* if smaller than smallest, bypass loop below */
3507 assert((bck->bk->size & NON_MAIN_ARENA) == 0);
3508 if ((unsigned long)(size) < (unsigned long)(bck->bk->size)) {
3509 fwd = bck;
3510 bck = bck->bk;
3511
3512 victim->fd_nextsize = fwd->fd;
3513 victim->bk_nextsize = fwd->fd->bk_nextsize;
3514 fwd->fd->bk_nextsize = victim->bk_nextsize->fd_nextsize = victim;
3515 }
3516 else {
3517 assert((fwd->size & NON_MAIN_ARENA) == 0);
3518 while ((unsigned long) size < fwd->size)
3519 {
3520 fwd = fwd->fd_nextsize;
3521 assert((fwd->size & NON_MAIN_ARENA) == 0);
3522 }
3523
3524 if ((unsigned long) size == (unsigned long) fwd->size)
3525 /* Always insert in the second position. */
3526 fwd = fwd->fd;
3527 else
3528 {
3529 victim->fd_nextsize = fwd;
3530 victim->bk_nextsize = fwd->bk_nextsize;
3531 fwd->bk_nextsize = victim;
3532 victim->bk_nextsize->fd_nextsize = victim;
3533 }
3534 bck = fwd->bk;
3535 }
3536 } else
3537 victim->fd_nextsize = victim->bk_nextsize = victim;
3538 }
3539
3540 mark_bin(av, victim_index);
3541 victim->bk = bck;
3542 victim->fd = fwd;
3543 fwd->bk = victim;
3544 bck->fd = victim;
3545
3546 #define MAX_ITERS 10000
3547 if (++iters >= MAX_ITERS)
3548 break;
3549 }
3550
3551 /*
3552 If a large request, scan through the chunks of current bin in
3553 sorted order to find smallest that fits. Use the skip list for this.
3554 */
3555
3556 if (!in_smallbin_range(nb)) {
3557 bin = bin_at(av, idx);
3558
3559 /* skip scan if empty or largest chunk is too small */
3560 if ((victim = first(bin)) != bin &&
3561 (unsigned long)(victim->size) >= (unsigned long)(nb)) {
3562
3563 victim = victim->bk_nextsize;
3564 while (((unsigned long)(size = chunksize(victim)) <
3565 (unsigned long)(nb)))
3566 victim = victim->bk_nextsize;
3567
3568 /* Avoid removing the first entry for a size so that the skip
3569 list does not have to be rerouted. */
3570 if (victim != last(bin) && victim->size == victim->fd->size)
3571 victim = victim->fd;
3572
3573 remainder_size = size - nb;
3574 unlink(victim, bck, fwd);
3575
3576 /* Exhaust */
3577 if (remainder_size < MINSIZE) {
3578 set_inuse_bit_at_offset(victim, size);
3579 if (av != &main_arena)
3580 victim->size |= NON_MAIN_ARENA;
3581 }
3582 /* Split */
3583 else {
3584 remainder = chunk_at_offset(victim, nb);
3585 /* We cannot assume the unsorted list is empty and therefore
3586 have to perform a complete insert here. */
3587 bck = unsorted_chunks(av);
3588 fwd = bck->fd;
3589 if (__builtin_expect (fwd->bk != bck, 0))
3590 {
3591 errstr = "malloc(): corrupted unsorted chunks";
3592 goto errout;
3593 }
3594 remainder->bk = bck;
3595 remainder->fd = fwd;
3596 bck->fd = remainder;
3597 fwd->bk = remainder;
3598 if (!in_smallbin_range(remainder_size))
3599 {
3600 remainder->fd_nextsize = NULL;
3601 remainder->bk_nextsize = NULL;
3602 }
3603 set_head(victim, nb | PREV_INUSE |
3604 (av != &main_arena ? NON_MAIN_ARENA : 0));
3605 set_head(remainder, remainder_size | PREV_INUSE);
3606 set_foot(remainder, remainder_size);
3607 }
3608 check_malloced_chunk(av, victim, nb);
3609 void *p = chunk2mem(victim);
3610 if (__builtin_expect (perturb_byte, 0))
3611 alloc_perturb (p, bytes);
3612 return p;
3613 }
3614 }
3615
3616 /*
3617 Search for a chunk by scanning bins, starting with next largest
3618 bin. This search is strictly by best-fit; i.e., the smallest
3619 (with ties going to approximately the least recently used) chunk
3620 that fits is selected.
3621
3622 The bitmap avoids needing to check that most blocks are nonempty.
3623 The particular case of skipping all bins during warm-up phases
3624 when no chunks have been returned yet is faster than it might look.
3625 */
3626
3627 ++idx;
3628 bin = bin_at(av,idx);
3629 block = idx2block(idx);
3630 map = av->binmap[block];
3631 bit = idx2bit(idx);
3632
3633 for (;;) {
3634
3635 /* Skip rest of block if there are no more set bits in this block. */
3636 if (bit > map || bit == 0) {
3637 do {
3638 if (++block >= BINMAPSIZE) /* out of bins */
3639 goto use_top;
3640 } while ( (map = av->binmap[block]) == 0);
3641
3642 bin = bin_at(av, (block << BINMAPSHIFT));
3643 bit = 1;
3644 }
3645
3646 /* Advance to bin with set bit. There must be one. */
3647 while ((bit & map) == 0) {
3648 bin = next_bin(bin);
3649 bit <<= 1;
3650 assert(bit != 0);
3651 }
3652
3653 /* Inspect the bin. It is likely to be non-empty */
3654 victim = last(bin);
3655
3656 /* If a false alarm (empty bin), clear the bit. */
3657 if (victim == bin) {
3658 av->binmap[block] = map &= ~bit; /* Write through */
3659 bin = next_bin(bin);
3660 bit <<= 1;
3661 }
3662
3663 else {
3664 size = chunksize(victim);
3665
3666 /* We know the first chunk in this bin is big enough to use. */
3667 assert((unsigned long)(size) >= (unsigned long)(nb));
3668
3669 remainder_size = size - nb;
3670
3671 /* unlink */
3672 unlink(victim, bck, fwd);
3673
3674 /* Exhaust */
3675 if (remainder_size < MINSIZE) {
3676 set_inuse_bit_at_offset(victim, size);
3677 if (av != &main_arena)
3678 victim->size |= NON_MAIN_ARENA;
3679 }
3680
3681 /* Split */
3682 else {
3683 remainder = chunk_at_offset(victim, nb);
3684
3685 /* We cannot assume the unsorted list is empty and therefore
3686 have to perform a complete insert here. */
3687 bck = unsorted_chunks(av);
3688 fwd = bck->fd;
3689 if (__builtin_expect (fwd->bk != bck, 0))
3690 {
3691 errstr = "malloc(): corrupted unsorted chunks 2";
3692 goto errout;
3693 }
3694 remainder->bk = bck;
3695 remainder->fd = fwd;
3696 bck->fd = remainder;
3697 fwd->bk = remainder;
3698
3699 /* advertise as last remainder */
3700 if (in_smallbin_range(nb))
3701 av->last_remainder = remainder;
3702 if (!in_smallbin_range(remainder_size))
3703 {
3704 remainder->fd_nextsize = NULL;
3705 remainder->bk_nextsize = NULL;
3706 }
3707 set_head(victim, nb | PREV_INUSE |
3708 (av != &main_arena ? NON_MAIN_ARENA : 0));
3709 set_head(remainder, remainder_size | PREV_INUSE);
3710 set_foot(remainder, remainder_size);
3711 }
3712 check_malloced_chunk(av, victim, nb);
3713 void *p = chunk2mem(victim);
3714 if (__builtin_expect (perturb_byte, 0))
3715 alloc_perturb (p, bytes);
3716 return p;
3717 }
3718 }
3719
3720 use_top:
3721 /*
3722 If large enough, split off the chunk bordering the end of memory
3723 (held in av->top). Note that this is in accord with the best-fit
3724 search rule. In effect, av->top is treated as larger (and thus
3725 less well fitting) than any other available chunk since it can
3726 be extended to be as large as necessary (up to system
3727 limitations).
3728
3729 We require that av->top always exists (i.e., has size >=
3730 MINSIZE) after initialization, so if it would otherwise be
3731 exhausted by current request, it is replenished. (The main
3732 reason for ensuring it exists is that we may need MINSIZE space
3733 to put in fenceposts in sysmalloc.)
3734 */
3735
3736 victim = av->top;
3737 size = chunksize(victim);
3738
3739 if ((unsigned long)(size) >= (unsigned long)(nb + MINSIZE)) {
3740 remainder_size = size - nb;
3741 remainder = chunk_at_offset(victim, nb);
3742 av->top = remainder;
3743 set_head(victim, nb | PREV_INUSE |
3744 (av != &main_arena ? NON_MAIN_ARENA : 0));
3745 set_head(remainder, remainder_size | PREV_INUSE);
3746
3747 check_malloced_chunk(av, victim, nb);
3748 void *p = chunk2mem(victim);
3749 if (__builtin_expect (perturb_byte, 0))
3750 alloc_perturb (p, bytes);
3751 return p;
3752 }
3753
3754 /* When we are using atomic ops to free fast chunks we can get
3755 here for all block sizes. */
3756 else if (have_fastchunks(av)) {
3757 malloc_consolidate(av);
3758 /* restore original bin index */
3759 if (in_smallbin_range(nb))
3760 idx = smallbin_index(nb);
3761 else
3762 idx = largebin_index(nb);
3763 }
3764
3765 /*
3766 Otherwise, relay to handle system-dependent cases
3767 */
3768 else {
3769 void *p = sysmalloc(nb, av);
3770 if (p != NULL && __builtin_expect (perturb_byte, 0))
3771 alloc_perturb (p, bytes);
3772 return p;
3773 }
3774 }
3775 }
3776
3777 /*
3778 ------------------------------ free ------------------------------
3779 */
3780
3781 static void
3782 _int_free(mstate av, mchunkptr p, int have_lock)
3783 {
3784 INTERNAL_SIZE_T size; /* its size */
3785 mfastbinptr* fb; /* associated fastbin */
3786 mchunkptr nextchunk; /* next contiguous chunk */
3787 INTERNAL_SIZE_T nextsize; /* its size */
3788 int nextinuse; /* true if nextchunk is used */
3789 INTERNAL_SIZE_T prevsize; /* size of previous contiguous chunk */
3790 mchunkptr bck; /* misc temp for linking */
3791 mchunkptr fwd; /* misc temp for linking */
3792
3793 const char *errstr = NULL;
3794 int locked = 0;
3795
3796 size = chunksize(p);
3797
3798 /* Little security check which won't hurt performance: the
3799 allocator never wrapps around at the end of the address space.
3800 Therefore we can exclude some size values which might appear
3801 here by accident or by "design" from some intruder. */
3802 if (__builtin_expect ((uintptr_t) p > (uintptr_t) -size, 0)
3803 || __builtin_expect (misaligned_chunk (p), 0))
3804 {
3805 errstr = "free(): invalid pointer";
3806 errout:
3807 if (! have_lock && locked)
3808 (void)mutex_unlock(&av->mutex);
3809 malloc_printerr (check_action, errstr, chunk2mem(p));
3810 return;
3811 }
3812 /* We know that each chunk is at least MINSIZE bytes in size. */
3813 if (__builtin_expect (size < MINSIZE, 0))
3814 {
3815 errstr = "free(): invalid size";
3816 goto errout;
3817 }
3818
3819 check_inuse_chunk(av, p);
3820
3821 /*
3822 If eligible, place chunk on a fastbin so it can be found
3823 and used quickly in malloc.
3824 */
3825
3826 if ((unsigned long)(size) <= (unsigned long)(get_max_fast ())
3827
3828 #if TRIM_FASTBINS
3829 /*
3830 If TRIM_FASTBINS set, don't place chunks
3831 bordering top into fastbins
3832 */
3833 && (chunk_at_offset(p, size) != av->top)
3834 #endif
3835 ) {
3836
3837 if (__builtin_expect (chunk_at_offset (p, size)->size <= 2 * SIZE_SZ, 0)
3838 || __builtin_expect (chunksize (chunk_at_offset (p, size))
3839 >= av->system_mem, 0))
3840 {
3841 /* We might not have a lock at this point and concurrent modifications
3842 of system_mem might have let to a false positive. Redo the test
3843 after getting the lock. */
3844 if (have_lock
3845 || ({ assert (locked == 0);
3846 mutex_lock(&av->mutex);
3847 locked = 1;
3848 chunk_at_offset (p, size)->size <= 2 * SIZE_SZ
3849 || chunksize (chunk_at_offset (p, size)) >= av->system_mem;
3850 }))
3851 {
3852 errstr = "free(): invalid next size (fast)";
3853 goto errout;
3854 }
3855 if (! have_lock)
3856 {
3857 (void)mutex_unlock(&av->mutex);
3858 locked = 0;
3859 }
3860 }
3861
3862 if (__builtin_expect (perturb_byte, 0))
3863 free_perturb (chunk2mem(p), size - 2 * SIZE_SZ);
3864
3865 set_fastchunks(av);
3866 unsigned int idx = fastbin_index(size);
3867 fb = &fastbin (av, idx);
3868
3869 mchunkptr fd;
3870 mchunkptr old = *fb;
3871 unsigned int old_idx = ~0u;
3872 do
3873 {
3874 /* Another simple check: make sure the top of the bin is not the
3875 record we are going to add (i.e., double free). */
3876 if (__builtin_expect (old == p, 0))
3877 {
3878 errstr = "double free or corruption (fasttop)";
3879 goto errout;
3880 }
3881 if (old != NULL)
3882 old_idx = fastbin_index(chunksize(old));
3883 p->fd = fd = old;
3884 }
3885 while ((old = catomic_compare_and_exchange_val_rel (fb, p, fd)) != fd);
3886
3887 if (fd != NULL && __builtin_expect (old_idx != idx, 0))
3888 {
3889 errstr = "invalid fastbin entry (free)";
3890 goto errout;
3891 }
3892 }
3893
3894 /*
3895 Consolidate other non-mmapped chunks as they arrive.
3896 */
3897
3898 else if (!chunk_is_mmapped(p)) {
3899 if (! have_lock) {
3900 #if THREAD_STATS
3901 if(!mutex_trylock(&av->mutex))
3902 ++(av->stat_lock_direct);
3903 else {
3904 (void)mutex_lock(&av->mutex);
3905 ++(av->stat_lock_wait);
3906 }
3907 #else
3908 (void)mutex_lock(&av->mutex);
3909 #endif
3910 locked = 1;
3911 }
3912
3913 nextchunk = chunk_at_offset(p, size);
3914
3915 /* Lightweight tests: check whether the block is already the
3916 top block. */
3917 if (__builtin_expect (p == av->top, 0))
3918 {
3919 errstr = "double free or corruption (top)";
3920 goto errout;
3921 }
3922 /* Or whether the next chunk is beyond the boundaries of the arena. */
3923 if (__builtin_expect (contiguous (av)
3924 && (char *) nextchunk
3925 >= ((char *) av->top + chunksize(av->top)), 0))
3926 {
3927 errstr = "double free or corruption (out)";
3928 goto errout;
3929 }
3930 /* Or whether the block is actually not marked used. */
3931 if (__builtin_expect (!prev_inuse(nextchunk), 0))
3932 {
3933 errstr = "double free or corruption (!prev)";
3934 goto errout;
3935 }
3936
3937 nextsize = chunksize(nextchunk);
3938 if (__builtin_expect (nextchunk->size <= 2 * SIZE_SZ, 0)
3939 || __builtin_expect (nextsize >= av->system_mem, 0))
3940 {
3941 errstr = "free(): invalid next size (normal)";
3942 goto errout;
3943 }
3944
3945 if (__builtin_expect (perturb_byte, 0))
3946 free_perturb (chunk2mem(p), size - 2 * SIZE_SZ);
3947
3948 /* consolidate backward */
3949 if (!prev_inuse(p)) {
3950 prevsize = p->prev_size;
3951 size += prevsize;
3952 p = chunk_at_offset(p, -((long) prevsize));
3953 unlink(p, bck, fwd);
3954 }
3955
3956 if (nextchunk != av->top) {
3957 /* get and clear inuse bit */
3958 nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
3959
3960 /* consolidate forward */
3961 if (!nextinuse) {
3962 unlink(nextchunk, bck, fwd);
3963 size += nextsize;
3964 } else
3965 clear_inuse_bit_at_offset(nextchunk, 0);
3966
3967 /*
3968 Place the chunk in unsorted chunk list. Chunks are
3969 not placed into regular bins until after they have
3970 been given one chance to be used in malloc.
3971 */
3972
3973 bck = unsorted_chunks(av);
3974 fwd = bck->fd;
3975 if (__builtin_expect (fwd->bk != bck, 0))
3976 {
3977 errstr = "free(): corrupted unsorted chunks";
3978 goto errout;
3979 }
3980 p->fd = fwd;
3981 p->bk = bck;
3982 if (!in_smallbin_range(size))
3983 {
3984 p->fd_nextsize = NULL;
3985 p->bk_nextsize = NULL;
3986 }
3987 bck->fd = p;
3988 fwd->bk = p;
3989
3990 set_head(p, size | PREV_INUSE);
3991 set_foot(p, size);
3992
3993 check_free_chunk(av, p);
3994 }
3995
3996 /*
3997 If the chunk borders the current high end of memory,
3998 consolidate into top
3999 */
4000
4001 else {
4002 size += nextsize;
4003 set_head(p, size | PREV_INUSE);
4004 av->top = p;
4005 check_chunk(av, p);
4006 }
4007
4008 /*
4009 If freeing a large space, consolidate possibly-surrounding
4010 chunks. Then, if the total unused topmost memory exceeds trim
4011 threshold, ask malloc_trim to reduce top.
4012
4013 Unless max_fast is 0, we don't know if there are fastbins
4014 bordering top, so we cannot tell for sure whether threshold
4015 has been reached unless fastbins are consolidated. But we
4016 don't want to consolidate on each free. As a compromise,
4017 consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
4018 is reached.
4019 */
4020
4021 if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) {
4022 if (have_fastchunks(av))
4023 malloc_consolidate(av);
4024
4025 if (av == &main_arena) {
4026 #ifndef MORECORE_CANNOT_TRIM
4027 if ((unsigned long)(chunksize(av->top)) >=
4028 (unsigned long)(mp_.trim_threshold))
4029 systrim(mp_.top_pad, av);
4030 #endif
4031 } else {
4032 /* Always try heap_trim(), even if the top chunk is not
4033 large, because the corresponding heap might go away. */
4034 heap_info *heap = heap_for_ptr(top(av));
4035
4036 assert(heap->ar_ptr == av);
4037 heap_trim(heap, mp_.top_pad);
4038 }
4039 }
4040
4041 if (! have_lock) {
4042 assert (locked);
4043 (void)mutex_unlock(&av->mutex);
4044 }
4045 }
4046 /*
4047 If the chunk was allocated via mmap, release via munmap().
4048 */
4049
4050 else {
4051 munmap_chunk (p);
4052 }
4053 }
4054
4055 /*
4056 ------------------------- malloc_consolidate -------------------------
4057
4058 malloc_consolidate is a specialized version of free() that tears
4059 down chunks held in fastbins. Free itself cannot be used for this
4060 purpose since, among other things, it might place chunks back onto
4061 fastbins. So, instead, we need to use a minor variant of the same
4062 code.
4063
4064 Also, because this routine needs to be called the first time through
4065 malloc anyway, it turns out to be the perfect place to trigger
4066 initialization code.
4067 */
4068
4069 static void malloc_consolidate(mstate av)
4070 {
4071 mfastbinptr* fb; /* current fastbin being consolidated */
4072 mfastbinptr* maxfb; /* last fastbin (for loop control) */
4073 mchunkptr p; /* current chunk being consolidated */
4074 mchunkptr nextp; /* next chunk to consolidate */
4075 mchunkptr unsorted_bin; /* bin header */
4076 mchunkptr first_unsorted; /* chunk to link to */
4077
4078 /* These have same use as in free() */
4079 mchunkptr nextchunk;
4080 INTERNAL_SIZE_T size;
4081 INTERNAL_SIZE_T nextsize;
4082 INTERNAL_SIZE_T prevsize;
4083 int nextinuse;
4084 mchunkptr bck;
4085 mchunkptr fwd;
4086
4087 /*
4088 If max_fast is 0, we know that av hasn't
4089 yet been initialized, in which case do so below
4090 */
4091
4092 if (get_max_fast () != 0) {
4093 clear_fastchunks(av);
4094
4095 unsorted_bin = unsorted_chunks(av);
4096
4097 /*
4098 Remove each chunk from fast bin and consolidate it, placing it
4099 then in unsorted bin. Among other reasons for doing this,
4100 placing in unsorted bin avoids needing to calculate actual bins
4101 until malloc is sure that chunks aren't immediately going to be
4102 reused anyway.
4103 */
4104
4105 maxfb = &fastbin (av, NFASTBINS - 1);
4106 fb = &fastbin (av, 0);
4107 do {
4108 p = atomic_exchange_acq (fb, 0);
4109 if (p != 0) {
4110 do {
4111 check_inuse_chunk(av, p);
4112 nextp = p->fd;
4113
4114 /* Slightly streamlined version of consolidation code in free() */
4115 size = p->size & ~(PREV_INUSE|NON_MAIN_ARENA);
4116 nextchunk = chunk_at_offset(p, size);
4117 nextsize = chunksize(nextchunk);
4118
4119 if (!prev_inuse(p)) {
4120 prevsize = p->prev_size;
4121 size += prevsize;
4122 p = chunk_at_offset(p, -((long) prevsize));
4123 unlink(p, bck, fwd);
4124 }
4125
4126 if (nextchunk != av->top) {
4127 nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
4128
4129 if (!nextinuse) {
4130 size += nextsize;
4131 unlink(nextchunk, bck, fwd);
4132 } else
4133 clear_inuse_bit_at_offset(nextchunk, 0);
4134
4135 first_unsorted = unsorted_bin->fd;
4136 unsorted_bin->fd = p;
4137 first_unsorted->bk = p;
4138
4139 if (!in_smallbin_range (size)) {
4140 p->fd_nextsize = NULL;
4141 p->bk_nextsize = NULL;
4142 }
4143
4144 set_head(p, size | PREV_INUSE);
4145 p->bk = unsorted_bin;
4146 p->fd = first_unsorted;
4147 set_foot(p, size);
4148 }
4149
4150 else {
4151 size += nextsize;
4152 set_head(p, size | PREV_INUSE);
4153 av->top = p;
4154 }
4155
4156 } while ( (p = nextp) != 0);
4157
4158 }
4159 } while (fb++ != maxfb);
4160 }
4161 else {
4162 malloc_init_state(av);
4163 check_malloc_state(av);
4164 }
4165 }
4166
4167 /*
4168 ------------------------------ realloc ------------------------------
4169 */
4170
4171 void*
4172 _int_realloc(mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize,
4173 INTERNAL_SIZE_T nb)
4174 {
4175 mchunkptr newp; /* chunk to return */
4176 INTERNAL_SIZE_T newsize; /* its size */
4177 void* newmem; /* corresponding user mem */
4178
4179 mchunkptr next; /* next contiguous chunk after oldp */
4180
4181 mchunkptr remainder; /* extra space at end of newp */
4182 unsigned long remainder_size; /* its size */
4183
4184 mchunkptr bck; /* misc temp for linking */
4185 mchunkptr fwd; /* misc temp for linking */
4186
4187 unsigned long copysize; /* bytes to copy */
4188 unsigned int ncopies; /* INTERNAL_SIZE_T words to copy */
4189 INTERNAL_SIZE_T* s; /* copy source */
4190 INTERNAL_SIZE_T* d; /* copy destination */
4191
4192 const char *errstr = NULL;
4193
4194 /* oldmem size */
4195 if (__builtin_expect (oldp->size <= 2 * SIZE_SZ, 0)
4196 || __builtin_expect (oldsize >= av->system_mem, 0))
4197 {
4198 errstr = "realloc(): invalid old size";
4199 errout:
4200 malloc_printerr (check_action, errstr, chunk2mem(oldp));
4201 return NULL;
4202 }
4203
4204 check_inuse_chunk(av, oldp);
4205
4206 /* All callers already filter out mmap'ed chunks. */
4207 assert (!chunk_is_mmapped(oldp));
4208
4209 next = chunk_at_offset(oldp, oldsize);
4210 INTERNAL_SIZE_T nextsize = chunksize(next);
4211 if (__builtin_expect (next->size <= 2 * SIZE_SZ, 0)
4212 || __builtin_expect (nextsize >= av->system_mem, 0))
4213 {
4214 errstr = "realloc(): invalid next size";
4215 goto errout;
4216 }
4217
4218 if ((unsigned long)(oldsize) >= (unsigned long)(nb)) {
4219 /* already big enough; split below */
4220 newp = oldp;
4221 newsize = oldsize;
4222 }
4223
4224 else {
4225 /* Try to expand forward into top */
4226 if (next == av->top &&
4227 (unsigned long)(newsize = oldsize + nextsize) >=
4228 (unsigned long)(nb + MINSIZE)) {
4229 set_head_size(oldp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0));
4230 av->top = chunk_at_offset(oldp, nb);
4231 set_head(av->top, (newsize - nb) | PREV_INUSE);
4232 check_inuse_chunk(av, oldp);
4233 return chunk2mem(oldp);
4234 }
4235
4236 /* Try to expand forward into next chunk; split off remainder below */
4237 else if (next != av->top &&
4238 !inuse(next) &&
4239 (unsigned long)(newsize = oldsize + nextsize) >=
4240 (unsigned long)(nb)) {
4241 newp = oldp;
4242 unlink(next, bck, fwd);
4243 }
4244
4245 /* allocate, copy, free */
4246 else {
4247 newmem = _int_malloc(av, nb - MALLOC_ALIGN_MASK);
4248 if (newmem == 0)
4249 return 0; /* propagate failure */
4250
4251 newp = mem2chunk(newmem);
4252 newsize = chunksize(newp);
4253
4254 /*
4255 Avoid copy if newp is next chunk after oldp.
4256 */
4257 if (newp == next) {
4258 newsize += oldsize;
4259 newp = oldp;
4260 }
4261 else {
4262 /*
4263 Unroll copy of <= 36 bytes (72 if 8byte sizes)
4264 We know that contents have an odd number of
4265 INTERNAL_SIZE_T-sized words; minimally 3.
4266 */
4267
4268 copysize = oldsize - SIZE_SZ;
4269 s = (INTERNAL_SIZE_T*)(chunk2mem(oldp));
4270 d = (INTERNAL_SIZE_T*)(newmem);
4271 ncopies = copysize / sizeof(INTERNAL_SIZE_T);
4272 assert(ncopies >= 3);
4273
4274 if (ncopies > 9)
4275 MALLOC_COPY(d, s, copysize);
4276
4277 else {
4278 *(d+0) = *(s+0);
4279 *(d+1) = *(s+1);
4280 *(d+2) = *(s+2);
4281 if (ncopies > 4) {
4282 *(d+3) = *(s+3);
4283 *(d+4) = *(s+4);
4284 if (ncopies > 6) {
4285 *(d+5) = *(s+5);
4286 *(d+6) = *(s+6);
4287 if (ncopies > 8) {
4288 *(d+7) = *(s+7);
4289 *(d+8) = *(s+8);
4290 }
4291 }
4292 }
4293 }
4294
4295 _int_free(av, oldp, 1);
4296 check_inuse_chunk(av, newp);
4297 return chunk2mem(newp);
4298 }
4299 }
4300 }
4301
4302 /* If possible, free extra space in old or extended chunk */
4303
4304 assert((unsigned long)(newsize) >= (unsigned long)(nb));
4305
4306 remainder_size = newsize - nb;
4307
4308 if (remainder_size < MINSIZE) { /* not enough extra to split off */
4309 set_head_size(newp, newsize | (av != &main_arena ? NON_MAIN_ARENA : 0));
4310 set_inuse_bit_at_offset(newp, newsize);
4311 }
4312 else { /* split remainder */
4313 remainder = chunk_at_offset(newp, nb);
4314 set_head_size(newp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0));
4315 set_head(remainder, remainder_size | PREV_INUSE |
4316 (av != &main_arena ? NON_MAIN_ARENA : 0));
4317 /* Mark remainder as inuse so free() won't complain */
4318 set_inuse_bit_at_offset(remainder, remainder_size);
4319 _int_free(av, remainder, 1);
4320 }
4321
4322 check_inuse_chunk(av, newp);
4323 return chunk2mem(newp);
4324 }
4325
4326 /*
4327 ------------------------------ memalign ------------------------------
4328 */
4329
4330 static void*
4331 _int_memalign(mstate av, size_t alignment, size_t bytes)
4332 {
4333 INTERNAL_SIZE_T nb; /* padded request size */
4334 char* m; /* memory returned by malloc call */
4335 mchunkptr p; /* corresponding chunk */
4336 char* brk; /* alignment point within p */
4337 mchunkptr newp; /* chunk to return */
4338 INTERNAL_SIZE_T newsize; /* its size */
4339 INTERNAL_SIZE_T leadsize; /* leading space before alignment point */
4340 mchunkptr remainder; /* spare room at end to split off */
4341 unsigned long remainder_size; /* its size */
4342 INTERNAL_SIZE_T size;
4343
4344 /* If need less alignment than we give anyway, just relay to malloc */
4345
4346 if (alignment <= MALLOC_ALIGNMENT) return _int_malloc(av, bytes);
4347
4348 /* Otherwise, ensure that it is at least a minimum chunk size */
4349
4350 if (alignment < MINSIZE) alignment = MINSIZE;
4351
4352 /* Make sure alignment is power of 2 (in case MINSIZE is not). */
4353 if ((alignment & (alignment - 1)) != 0) {
4354 size_t a = MALLOC_ALIGNMENT * 2;
4355 while ((unsigned long)a < (unsigned long)alignment) a <<= 1;
4356 alignment = a;
4357 }
4358
4359 checked_request2size(bytes, nb);
4360
4361 /*
4362 Strategy: find a spot within that chunk that meets the alignment
4363 request, and then possibly free the leading and trailing space.
4364 */
4365
4366
4367 /* Call malloc with worst case padding to hit alignment. */
4368
4369 m = (char*)(_int_malloc(av, nb + alignment + MINSIZE));
4370
4371 if (m == 0) return 0; /* propagate failure */
4372
4373 p = mem2chunk(m);
4374
4375 if ((((unsigned long)(m)) % alignment) != 0) { /* misaligned */
4376
4377 /*
4378 Find an aligned spot inside chunk. Since we need to give back
4379 leading space in a chunk of at least MINSIZE, if the first
4380 calculation places us at a spot with less than MINSIZE leader,
4381 we can move to the next aligned spot -- we've allocated enough
4382 total room so that this is always possible.
4383 */
4384
4385 brk = (char*)mem2chunk(((unsigned long)(m + alignment - 1)) &
4386 -((signed long) alignment));
4387 if ((unsigned long)(brk - (char*)(p)) < MINSIZE)
4388 brk += alignment;
4389
4390 newp = (mchunkptr)brk;
4391 leadsize = brk - (char*)(p);
4392 newsize = chunksize(p) - leadsize;
4393
4394 /* For mmapped chunks, just adjust offset */
4395 if (chunk_is_mmapped(p)) {
4396 newp->prev_size = p->prev_size + leadsize;
4397 set_head(newp, newsize|IS_MMAPPED);
4398 return chunk2mem(newp);
4399 }
4400
4401 /* Otherwise, give back leader, use the rest */
4402 set_head(newp, newsize | PREV_INUSE |
4403 (av != &main_arena ? NON_MAIN_ARENA : 0));
4404 set_inuse_bit_at_offset(newp, newsize);
4405 set_head_size(p, leadsize | (av != &main_arena ? NON_MAIN_ARENA : 0));
4406 _int_free(av, p, 1);
4407 p = newp;
4408
4409 assert (newsize >= nb &&
4410 (((unsigned long)(chunk2mem(p))) % alignment) == 0);
4411 }
4412
4413 /* Also give back spare room at the end */
4414 if (!chunk_is_mmapped(p)) {
4415 size = chunksize(p);
4416 if ((unsigned long)(size) > (unsigned long)(nb + MINSIZE)) {
4417 remainder_size = size - nb;
4418 remainder = chunk_at_offset(p, nb);
4419 set_head(remainder, remainder_size | PREV_INUSE |
4420 (av != &main_arena ? NON_MAIN_ARENA : 0));
4421 set_head_size(p, nb);
4422 _int_free(av, remainder, 1);
4423 }
4424 }
4425
4426 check_inuse_chunk(av, p);
4427 return chunk2mem(p);
4428 }
4429
4430
4431 /*
4432 ------------------------------ valloc ------------------------------
4433 */
4434
4435 static void*
4436 _int_valloc(mstate av, size_t bytes)
4437 {
4438 /* Ensure initialization/consolidation */
4439 if (have_fastchunks(av)) malloc_consolidate(av);
4440 return _int_memalign(av, GLRO(dl_pagesize), bytes);
4441 }
4442
4443 /*
4444 ------------------------------ pvalloc ------------------------------
4445 */
4446
4447
4448 static void*
4449 _int_pvalloc(mstate av, size_t bytes)
4450 {
4451 size_t pagesz;
4452
4453 /* Ensure initialization/consolidation */
4454 if (have_fastchunks(av)) malloc_consolidate(av);
4455 pagesz = GLRO(dl_pagesize);
4456 return _int_memalign(av, pagesz, (bytes + pagesz - 1) & ~(pagesz - 1));
4457 }
4458
4459
4460 /*
4461 ------------------------------ malloc_trim ------------------------------
4462 */
4463
4464 static int mtrim(mstate av, size_t pad)
4465 {
4466 /* Ensure initialization/consolidation */
4467 malloc_consolidate (av);
4468
4469 const size_t ps = GLRO(dl_pagesize);
4470 int psindex = bin_index (ps);
4471 const size_t psm1 = ps - 1;
4472
4473 int result = 0;
4474 for (int i = 1; i < NBINS; ++i)
4475 if (i == 1 || i >= psindex)
4476 {
4477 mbinptr bin = bin_at (av, i);
4478
4479 for (mchunkptr p = last (bin); p != bin; p = p->bk)
4480 {
4481 INTERNAL_SIZE_T size = chunksize (p);
4482
4483 if (size > psm1 + sizeof (struct malloc_chunk))
4484 {
4485 /* See whether the chunk contains at least one unused page. */
4486 char *paligned_mem = (char *) (((uintptr_t) p
4487 + sizeof (struct malloc_chunk)
4488 + psm1) & ~psm1);
4489
4490 assert ((char *) chunk2mem (p) + 4 * SIZE_SZ <= paligned_mem);
4491 assert ((char *) p + size > paligned_mem);
4492
4493 /* This is the size we could potentially free. */
4494 size -= paligned_mem - (char *) p;
4495
4496 if (size > psm1)
4497 {
4498 #ifdef MALLOC_DEBUG
4499 /* When debugging we simulate destroying the memory
4500 content. */
4501 memset (paligned_mem, 0x89, size & ~psm1);
4502 #endif
4503 madvise (paligned_mem, size & ~psm1, MADV_DONTNEED);
4504
4505 result = 1;
4506 }
4507 }
4508 }
4509 }
4510
4511 #ifndef MORECORE_CANNOT_TRIM
4512 return result | (av == &main_arena ? systrim (pad, av) : 0);
4513 #else
4514 return result;
4515 #endif
4516 }
4517
4518
4519 int
4520 __malloc_trim(size_t s)
4521 {
4522 int result = 0;
4523
4524 if(__malloc_initialized < 0)
4525 ptmalloc_init ();
4526
4527 mstate ar_ptr = &main_arena;
4528 do
4529 {
4530 (void) mutex_lock (&ar_ptr->mutex);
4531 result |= mtrim (ar_ptr, s);
4532 (void) mutex_unlock (&ar_ptr->mutex);
4533
4534 ar_ptr = ar_ptr->next;
4535 }
4536 while (ar_ptr != &main_arena);
4537
4538 return result;
4539 }
4540
4541
4542 /*
4543 ------------------------- malloc_usable_size -------------------------
4544 */
4545
4546 static size_t
4547 musable(void* mem)
4548 {
4549 mchunkptr p;
4550 if (mem != 0) {
4551 p = mem2chunk(mem);
4552 if (chunk_is_mmapped(p))
4553 return chunksize(p) - 2*SIZE_SZ;
4554 else if (inuse(p))
4555 return chunksize(p) - SIZE_SZ;
4556 }
4557 return 0;
4558 }
4559
4560
4561 size_t
4562 __malloc_usable_size(void* m)
4563 {
4564 size_t result;
4565
4566 result = musable(m);
4567 return result;
4568 }
4569
4570 /*
4571 ------------------------------ mallinfo ------------------------------
4572 Accumulate malloc statistics for arena AV into M.
4573 */
4574
4575 static void
4576 int_mallinfo(mstate av, struct mallinfo *m)
4577 {
4578 size_t i;
4579 mbinptr b;
4580 mchunkptr p;
4581 INTERNAL_SIZE_T avail;
4582 INTERNAL_SIZE_T fastavail;
4583 int nblocks;
4584 int nfastblocks;
4585
4586 /* Ensure initialization */
4587 if (av->top == 0) malloc_consolidate(av);
4588
4589 check_malloc_state(av);
4590
4591 /* Account for top */
4592 avail = chunksize(av->top);
4593 nblocks = 1; /* top always exists */
4594
4595 /* traverse fastbins */
4596 nfastblocks = 0;
4597 fastavail = 0;
4598
4599 for (i = 0; i < NFASTBINS; ++i) {
4600 for (p = fastbin (av, i); p != 0; p = p->fd) {
4601 ++nfastblocks;
4602 fastavail += chunksize(p);
4603 }
4604 }
4605
4606 avail += fastavail;
4607
4608 /* traverse regular bins */
4609 for (i = 1; i < NBINS; ++i) {
4610 b = bin_at(av, i);
4611 for (p = last(b); p != b; p = p->bk) {
4612 ++nblocks;
4613 avail += chunksize(p);
4614 }
4615 }
4616
4617 m->smblks += nfastblocks;
4618 m->ordblks += nblocks;
4619 m->fordblks += avail;
4620 m->uordblks += av->system_mem - avail;
4621 m->arena += av->system_mem;
4622 m->fsmblks += fastavail;
4623 if (av == &main_arena)
4624 {
4625 m->hblks = mp_.n_mmaps;
4626 m->hblkhd = mp_.mmapped_mem;
4627 m->usmblks = mp_.max_total_mem;
4628 m->keepcost = chunksize(av->top);
4629 }
4630 }
4631
4632
4633 struct mallinfo __libc_mallinfo()
4634 {
4635 struct mallinfo m;
4636 mstate ar_ptr;
4637
4638 if(__malloc_initialized < 0)
4639 ptmalloc_init ();
4640
4641 memset(&m, 0, sizeof (m));
4642 ar_ptr = &main_arena;
4643 do {
4644 (void)mutex_lock(&ar_ptr->mutex);
4645 int_mallinfo(ar_ptr, &m);
4646 (void)mutex_unlock(&ar_ptr->mutex);
4647
4648 ar_ptr = ar_ptr->next;
4649 } while (ar_ptr != &main_arena);
4650
4651 return m;
4652 }
4653
4654 /*
4655 ------------------------------ malloc_stats ------------------------------
4656 */
4657
4658 void
4659 __malloc_stats()
4660 {
4661 int i;
4662 mstate ar_ptr;
4663 unsigned int in_use_b = mp_.mmapped_mem, system_b = in_use_b;
4664 #if THREAD_STATS
4665 long stat_lock_direct = 0, stat_lock_loop = 0, stat_lock_wait = 0;
4666 #endif
4667
4668 if(__malloc_initialized < 0)
4669 ptmalloc_init ();
4670 _IO_flockfile (stderr);
4671 int old_flags2 = ((_IO_FILE *) stderr)->_flags2;
4672 ((_IO_FILE *) stderr)->_flags2 |= _IO_FLAGS2_NOTCANCEL;
4673 for (i=0, ar_ptr = &main_arena;; i++) {
4674 struct mallinfo mi;
4675
4676 memset(&mi, 0, sizeof(mi));
4677 (void)mutex_lock(&ar_ptr->mutex);
4678 int_mallinfo(ar_ptr, &mi);
4679 fprintf(stderr, "Arena %d:\n", i);
4680 fprintf(stderr, "system bytes = %10u\n", (unsigned int)mi.arena);
4681 fprintf(stderr, "in use bytes = %10u\n", (unsigned int)mi.uordblks);
4682 #if MALLOC_DEBUG > 1
4683 if (i > 0)
4684 dump_heap(heap_for_ptr(top(ar_ptr)));
4685 #endif
4686 system_b += mi.arena;
4687 in_use_b += mi.uordblks;
4688 #if THREAD_STATS
4689 stat_lock_direct += ar_ptr->stat_lock_direct;
4690 stat_lock_loop += ar_ptr->stat_lock_loop;
4691 stat_lock_wait += ar_ptr->stat_lock_wait;
4692 #endif
4693 (void)mutex_unlock(&ar_ptr->mutex);
4694 ar_ptr = ar_ptr->next;
4695 if(ar_ptr == &main_arena) break;
4696 }
4697 fprintf(stderr, "Total (incl. mmap):\n");
4698 fprintf(stderr, "system bytes = %10u\n", system_b);
4699 fprintf(stderr, "in use bytes = %10u\n", in_use_b);
4700 fprintf(stderr, "max mmap regions = %10u\n", (unsigned int)mp_.max_n_mmaps);
4701 fprintf(stderr, "max mmap bytes = %10lu\n",
4702 (unsigned long)mp_.max_mmapped_mem);
4703 #if THREAD_STATS
4704 fprintf(stderr, "heaps created = %10d\n", stat_n_heaps);
4705 fprintf(stderr, "locked directly = %10ld\n", stat_lock_direct);
4706 fprintf(stderr, "locked in loop = %10ld\n", stat_lock_loop);
4707 fprintf(stderr, "locked waiting = %10ld\n", stat_lock_wait);
4708 fprintf(stderr, "locked total = %10ld\n",
4709 stat_lock_direct + stat_lock_loop + stat_lock_wait);
4710 #endif
4711 ((_IO_FILE *) stderr)->_flags2 |= old_flags2;
4712 _IO_funlockfile (stderr);
4713 }
4714
4715
4716 /*
4717 ------------------------------ mallopt ------------------------------
4718 */
4719
4720 int __libc_mallopt(int param_number, int value)
4721 {
4722 mstate av = &main_arena;
4723 int res = 1;
4724
4725 if(__malloc_initialized < 0)
4726 ptmalloc_init ();
4727 (void)mutex_lock(&av->mutex);
4728 /* Ensure initialization/consolidation */
4729 malloc_consolidate(av);
4730
4731 switch(param_number) {
4732 case M_MXFAST:
4733 if (value >= 0 && value <= MAX_FAST_SIZE) {
4734 set_max_fast(value);
4735 }
4736 else
4737 res = 0;
4738 break;
4739
4740 case M_TRIM_THRESHOLD:
4741 mp_.trim_threshold = value;
4742 mp_.no_dyn_threshold = 1;
4743 break;
4744
4745 case M_TOP_PAD:
4746 mp_.top_pad = value;
4747 mp_.no_dyn_threshold = 1;
4748 break;
4749
4750 case M_MMAP_THRESHOLD:
4751 /* Forbid setting the threshold too high. */
4752 if((unsigned long)value > HEAP_MAX_SIZE/2)
4753 res = 0;
4754 else
4755 mp_.mmap_threshold = value;
4756 mp_.no_dyn_threshold = 1;
4757 break;
4758
4759 case M_MMAP_MAX:
4760 mp_.n_mmaps_max = value;
4761 mp_.no_dyn_threshold = 1;
4762 break;
4763
4764 case M_CHECK_ACTION:
4765 check_action = value;
4766 break;
4767
4768 case M_PERTURB:
4769 perturb_byte = value;
4770 break;
4771
4772 #ifdef PER_THREAD
4773 case M_ARENA_TEST:
4774 if (value > 0)
4775 mp_.arena_test = value;
4776 break;
4777
4778 case M_ARENA_MAX:
4779 if (value > 0)
4780 mp_.arena_max = value;
4781 break;
4782 #endif
4783 }
4784 (void)mutex_unlock(&av->mutex);
4785 return res;
4786 }
4787 libc_hidden_def (__libc_mallopt)
4788
4789
4790 /*
4791 -------------------- Alternative MORECORE functions --------------------
4792 */
4793
4794
4795 /*
4796 General Requirements for MORECORE.
4797
4798 The MORECORE function must have the following properties:
4799
4800 If MORECORE_CONTIGUOUS is false:
4801
4802 * MORECORE must allocate in multiples of pagesize. It will
4803 only be called with arguments that are multiples of pagesize.
4804
4805 * MORECORE(0) must return an address that is at least
4806 MALLOC_ALIGNMENT aligned. (Page-aligning always suffices.)
4807
4808 else (i.e. If MORECORE_CONTIGUOUS is true):
4809
4810 * Consecutive calls to MORECORE with positive arguments
4811 return increasing addresses, indicating that space has been
4812 contiguously extended.
4813
4814 * MORECORE need not allocate in multiples of pagesize.
4815 Calls to MORECORE need not have args of multiples of pagesize.
4816
4817 * MORECORE need not page-align.
4818
4819 In either case:
4820
4821 * MORECORE may allocate more memory than requested. (Or even less,
4822 but this will generally result in a malloc failure.)
4823
4824 * MORECORE must not allocate memory when given argument zero, but
4825 instead return one past the end address of memory from previous
4826 nonzero call. This malloc does NOT call MORECORE(0)
4827 until at least one call with positive arguments is made, so
4828 the initial value returned is not important.
4829
4830 * Even though consecutive calls to MORECORE need not return contiguous
4831 addresses, it must be OK for malloc'ed chunks to span multiple
4832 regions in those cases where they do happen to be contiguous.
4833
4834 * MORECORE need not handle negative arguments -- it may instead
4835 just return MORECORE_FAILURE when given negative arguments.
4836 Negative arguments are always multiples of pagesize. MORECORE
4837 must not misinterpret negative args as large positive unsigned
4838 args. You can suppress all such calls from even occurring by defining
4839 MORECORE_CANNOT_TRIM,
4840
4841 There is some variation across systems about the type of the
4842 argument to sbrk/MORECORE. If size_t is unsigned, then it cannot
4843 actually be size_t, because sbrk supports negative args, so it is
4844 normally the signed type of the same width as size_t (sometimes
4845 declared as "intptr_t", and sometimes "ptrdiff_t"). It doesn't much
4846 matter though. Internally, we use "long" as arguments, which should
4847 work across all reasonable possibilities.
4848
4849 Additionally, if MORECORE ever returns failure for a positive
4850 request, then mmap is used as a noncontiguous system allocator. This
4851 is a useful backup strategy for systems with holes in address spaces
4852 -- in this case sbrk cannot contiguously expand the heap, but mmap
4853 may be able to map noncontiguous space.
4854
4855 If you'd like mmap to ALWAYS be used, you can define MORECORE to be
4856 a function that always returns MORECORE_FAILURE.
4857
4858 If you are using this malloc with something other than sbrk (or its
4859 emulation) to supply memory regions, you probably want to set
4860 MORECORE_CONTIGUOUS as false. As an example, here is a custom
4861 allocator kindly contributed for pre-OSX macOS. It uses virtually
4862 but not necessarily physically contiguous non-paged memory (locked
4863 in, present and won't get swapped out). You can use it by
4864 uncommenting this section, adding some #includes, and setting up the
4865 appropriate defines above:
4866
4867 #define MORECORE osMoreCore
4868 #define MORECORE_CONTIGUOUS 0
4869
4870 There is also a shutdown routine that should somehow be called for
4871 cleanup upon program exit.
4872
4873 #define MAX_POOL_ENTRIES 100
4874 #define MINIMUM_MORECORE_SIZE (64 * 1024)
4875 static int next_os_pool;
4876 void *our_os_pools[MAX_POOL_ENTRIES];
4877
4878 void *osMoreCore(int size)
4879 {
4880 void *ptr = 0;
4881 static void *sbrk_top = 0;
4882
4883 if (size > 0)
4884 {
4885 if (size < MINIMUM_MORECORE_SIZE)
4886 size = MINIMUM_MORECORE_SIZE;
4887 if (CurrentExecutionLevel() == kTaskLevel)
4888 ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
4889 if (ptr == 0)
4890 {
4891 return (void *) MORECORE_FAILURE;
4892 }
4893 // save ptrs so they can be freed during cleanup
4894 our_os_pools[next_os_pool] = ptr;
4895 next_os_pool++;
4896 ptr = (void *) ((((unsigned long) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);
4897 sbrk_top = (char *) ptr + size;
4898 return ptr;
4899 }
4900 else if (size < 0)
4901 {
4902 // we don't currently support shrink behavior
4903 return (void *) MORECORE_FAILURE;
4904 }
4905 else
4906 {
4907 return sbrk_top;
4908 }
4909 }
4910
4911 // cleanup any allocated memory pools
4912 // called as last thing before shutting down driver
4913
4914 void osCleanupMem(void)
4915 {
4916 void **ptr;
4917
4918 for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)
4919 if (*ptr)
4920 {
4921 PoolDeallocate(*ptr);
4922 *ptr = 0;
4923 }
4924 }
4925
4926 */
4927
4928
4929 /* Helper code. */
4930
4931 extern char **__libc_argv attribute_hidden;
4932
4933 static void
4934 malloc_printerr(int action, const char *str, void *ptr)
4935 {
4936 if ((action & 5) == 5)
4937 __libc_message (action & 2, "%s\n", str);
4938 else if (action & 1)
4939 {
4940 char buf[2 * sizeof (uintptr_t) + 1];
4941
4942 buf[sizeof (buf) - 1] = '\0';
4943 char *cp = _itoa_word ((uintptr_t) ptr, &buf[sizeof (buf) - 1], 16, 0);
4944 while (cp > buf)
4945 *--cp = '0';
4946
4947 __libc_message (action & 2,
4948 "*** glibc detected *** %s: %s: 0x%s ***\n",
4949 __libc_argv[0] ?: "<unknown>", str, cp);
4950 }
4951 else if (action & 2)
4952 abort ();
4953 }
4954
4955 #include <sys/param.h>
4956
4957 /* We need a wrapper function for one of the additions of POSIX. */
4958 int
4959 __posix_memalign (void **memptr, size_t alignment, size_t size)
4960 {
4961 void *mem;
4962
4963 /* Test whether the SIZE argument is valid. It must be a power of
4964 two multiple of sizeof (void *). */
4965 if (alignment % sizeof (void *) != 0
4966 || !powerof2 (alignment / sizeof (void *)) != 0
4967 || alignment == 0)
4968 return EINVAL;
4969
4970 /* Call the hook here, so that caller is posix_memalign's caller
4971 and not posix_memalign itself. */
4972 __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, size_t,
4973 const __malloc_ptr_t)) =
4974 force_reg (__memalign_hook);
4975 if (__builtin_expect (hook != NULL, 0))
4976 mem = (*hook)(alignment, size, RETURN_ADDRESS (0));
4977 else
4978 mem = __libc_memalign (alignment, size);
4979
4980 if (mem != NULL) {
4981 *memptr = mem;
4982 return 0;
4983 }
4984
4985 return ENOMEM;
4986 }
4987 weak_alias (__posix_memalign, posix_memalign)
4988
4989
4990 int
4991 malloc_info (int options, FILE *fp)
4992 {
4993 /* For now, at least. */
4994 if (options != 0)
4995 return EINVAL;
4996
4997 int n = 0;
4998 size_t total_nblocks = 0;
4999 size_t total_nfastblocks = 0;
5000 size_t total_avail = 0;
5001 size_t total_fastavail = 0;
5002 size_t total_system = 0;
5003 size_t total_max_system = 0;
5004 size_t total_aspace = 0;
5005 size_t total_aspace_mprotect = 0;
5006
5007 void mi_arena (mstate ar_ptr)
5008 {
5009 fprintf (fp, "<heap nr=\"%d\">\n<sizes>\n", n++);
5010
5011 size_t nblocks = 0;
5012 size_t nfastblocks = 0;
5013 size_t avail = 0;
5014 size_t fastavail = 0;
5015 struct
5016 {
5017 size_t from;
5018 size_t to;
5019 size_t total;
5020 size_t count;
5021 } sizes[NFASTBINS + NBINS - 1];
5022 #define nsizes (sizeof (sizes) / sizeof (sizes[0]))
5023
5024 mutex_lock (&ar_ptr->mutex);
5025
5026 for (size_t i = 0; i < NFASTBINS; ++i)
5027 {
5028 mchunkptr p = fastbin (ar_ptr, i);
5029 if (p != NULL)
5030 {
5031 size_t nthissize = 0;
5032 size_t thissize = chunksize (p);
5033
5034 while (p != NULL)
5035 {
5036 ++nthissize;
5037 p = p->fd;
5038 }
5039
5040 fastavail += nthissize * thissize;
5041 nfastblocks += nthissize;
5042 sizes[i].from = thissize - (MALLOC_ALIGNMENT - 1);
5043 sizes[i].to = thissize;
5044 sizes[i].count = nthissize;
5045 }
5046 else
5047 sizes[i].from = sizes[i].to = sizes[i].count = 0;
5048
5049 sizes[i].total = sizes[i].count * sizes[i].to;
5050 }
5051
5052 mbinptr bin = bin_at (ar_ptr, 1);
5053 struct malloc_chunk *r = bin->fd;
5054 if (r != NULL)
5055 {
5056 while (r != bin)
5057 {
5058 ++sizes[NFASTBINS].count;
5059 sizes[NFASTBINS].total += r->size;
5060 sizes[NFASTBINS].from = MIN (sizes[NFASTBINS].from, r->size);
5061 sizes[NFASTBINS].to = MAX (sizes[NFASTBINS].to, r->size);
5062 r = r->fd;
5063 }
5064 nblocks += sizes[NFASTBINS].count;
5065 avail += sizes[NFASTBINS].total;
5066 }
5067
5068 for (size_t i = 2; i < NBINS; ++i)
5069 {
5070 bin = bin_at (ar_ptr, i);
5071 r = bin->fd;
5072 sizes[NFASTBINS - 1 + i].from = ~((size_t) 0);
5073 sizes[NFASTBINS - 1 + i].to = sizes[NFASTBINS - 1 + i].total
5074 = sizes[NFASTBINS - 1 + i].count = 0;
5075
5076 if (r != NULL)
5077 while (r != bin)
5078 {
5079 ++sizes[NFASTBINS - 1 + i].count;
5080 sizes[NFASTBINS - 1 + i].total += r->size;
5081 sizes[NFASTBINS - 1 + i].from
5082 = MIN (sizes[NFASTBINS - 1 + i].from, r->size);
5083 sizes[NFASTBINS - 1 + i].to = MAX (sizes[NFASTBINS - 1 + i].to,
5084 r->size);
5085
5086 r = r->fd;
5087 }
5088
5089 if (sizes[NFASTBINS - 1 + i].count == 0)
5090 sizes[NFASTBINS - 1 + i].from = 0;
5091 nblocks += sizes[NFASTBINS - 1 + i].count;
5092 avail += sizes[NFASTBINS - 1 + i].total;
5093 }
5094
5095 mutex_unlock (&ar_ptr->mutex);
5096
5097 total_nfastblocks += nfastblocks;
5098 total_fastavail += fastavail;
5099
5100 total_nblocks += nblocks;
5101 total_avail += avail;
5102
5103 for (size_t i = 0; i < nsizes; ++i)
5104 if (sizes[i].count != 0 && i != NFASTBINS)
5105 fprintf (fp, "\
5106 <size from=\"%zu\" to=\"%zu\" total=\"%zu\" count=\"%zu\"/>\n",
5107 sizes[i].from, sizes[i].to, sizes[i].total, sizes[i].count);
5108
5109 if (sizes[NFASTBINS].count != 0)
5110 fprintf (fp, "\
5111 <unsorted from=\"%zu\" to=\"%zu\" total=\"%zu\" count=\"%zu\"/>\n",
5112 sizes[NFASTBINS].from, sizes[NFASTBINS].to,
5113 sizes[NFASTBINS].total, sizes[NFASTBINS].count);
5114
5115 total_system += ar_ptr->system_mem;
5116 total_max_system += ar_ptr->max_system_mem;
5117
5118 fprintf (fp,
5119 "</sizes>\n<total type=\"fast\" count=\"%zu\" size=\"%zu\"/>\n"
5120 "<total type=\"rest\" count=\"%zu\" size=\"%zu\"/>\n"
5121 "<system type=\"current\" size=\"%zu\"/>\n"
5122 "<system type=\"max\" size=\"%zu\"/>\n",
5123 nfastblocks, fastavail, nblocks, avail,
5124 ar_ptr->system_mem, ar_ptr->max_system_mem);
5125
5126 if (ar_ptr != &main_arena)
5127 {
5128 heap_info *heap = heap_for_ptr(top(ar_ptr));
5129 fprintf (fp,
5130 "<aspace type=\"total\" size=\"%zu\"/>\n"
5131 "<aspace type=\"mprotect\" size=\"%zu\"/>\n",
5132 heap->size, heap->mprotect_size);
5133 total_aspace += heap->size;
5134 total_aspace_mprotect += heap->mprotect_size;
5135 }
5136 else
5137 {
5138 fprintf (fp,
5139 "<aspace type=\"total\" size=\"%zu\"/>\n"
5140 "<aspace type=\"mprotect\" size=\"%zu\"/>\n",
5141 ar_ptr->system_mem, ar_ptr->system_mem);
5142 total_aspace += ar_ptr->system_mem;
5143 total_aspace_mprotect += ar_ptr->system_mem;
5144 }
5145
5146 fputs ("</heap>\n", fp);
5147 }
5148
5149 if(__malloc_initialized < 0)
5150 ptmalloc_init ();
5151
5152 fputs ("<malloc version=\"1\">\n", fp);
5153
5154 /* Iterate over all arenas currently in use. */
5155 mstate ar_ptr = &main_arena;
5156 do
5157 {
5158 mi_arena (ar_ptr);
5159 ar_ptr = ar_ptr->next;
5160 }
5161 while (ar_ptr != &main_arena);
5162
5163 fprintf (fp,
5164 "<total type=\"fast\" count=\"%zu\" size=\"%zu\"/>\n"
5165 "<total type=\"rest\" count=\"%zu\" size=\"%zu\"/>\n"
5166 "<system type=\"current\" size=\"%zu\"/>\n"
5167 "<system type=\"max\" size=\"%zu\"/>\n"
5168 "<aspace type=\"total\" size=\"%zu\"/>\n"
5169 "<aspace type=\"mprotect\" size=\"%zu\"/>\n"
5170 "</malloc>\n",
5171 total_nfastblocks, total_fastavail, total_nblocks, total_avail,
5172 total_system, total_max_system,
5173 total_aspace, total_aspace_mprotect);
5174
5175 return 0;
5176 }
5177
5178
5179 strong_alias (__libc_calloc, __calloc) weak_alias (__libc_calloc, calloc)
5180 strong_alias (__libc_free, __cfree) weak_alias (__libc_free, cfree)
5181 strong_alias (__libc_free, __free) strong_alias (__libc_free, free)
5182 strong_alias (__libc_malloc, __malloc) strong_alias (__libc_malloc, malloc)
5183 strong_alias (__libc_memalign, __memalign)
5184 weak_alias (__libc_memalign, memalign)
5185 strong_alias (__libc_realloc, __realloc) strong_alias (__libc_realloc, realloc)
5186 strong_alias (__libc_valloc, __valloc) weak_alias (__libc_valloc, valloc)
5187 strong_alias (__libc_pvalloc, __pvalloc) weak_alias (__libc_pvalloc, pvalloc)
5188 strong_alias (__libc_mallinfo, __mallinfo)
5189 weak_alias (__libc_mallinfo, mallinfo)
5190 strong_alias (__libc_mallopt, __mallopt) weak_alias (__libc_mallopt, mallopt)
5191
5192 weak_alias (__malloc_stats, malloc_stats)
5193 weak_alias (__malloc_usable_size, malloc_usable_size)
5194 weak_alias (__malloc_trim, malloc_trim)
5195 weak_alias (__malloc_get_state, malloc_get_state)
5196 weak_alias (__malloc_set_state, malloc_set_state)
5197
5198
5199 /* ------------------------------------------------------------
5200 History:
5201
5202 [see ftp://g.oswego.edu/pub/misc/malloc.c for the history of dlmalloc]
5203
5204 */
5205 /*
5206 * Local variables:
5207 * c-basic-offset: 2
5208 * End:
5209 */