]> git.ipfire.org Git - thirdparty/glibc.git/blob - malloc/malloc.c
* malloc/hooks.c (MALLOC_STATE_VERSION): Bump.
[thirdparty/glibc.git] / malloc / malloc.c
1 /* Malloc implementation for multiple threads without lock contention.
2 Copyright (C) 1996-2006, 2007 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Wolfram Gloger <wg@malloc.de>
5 and Doug Lea <dl@cs.oswego.edu>, 2001.
6
7 The GNU C Library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public License as
9 published by the Free Software Foundation; either version 2.1 of the
10 License, or (at your option) any later version.
11
12 The GNU C Library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public
18 License along with the GNU C Library; see the file COPYING.LIB. If not,
19 write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
21
22 /*
23 This is a version (aka ptmalloc2) of malloc/free/realloc written by
24 Doug Lea and adapted to multiple threads/arenas by Wolfram Gloger.
25
26 * Version ptmalloc2-20011215
27 based on:
28 VERSION 2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee)
29
30 * Quickstart
31
32 In order to compile this implementation, a Makefile is provided with
33 the ptmalloc2 distribution, which has pre-defined targets for some
34 popular systems (e.g. "make posix" for Posix threads). All that is
35 typically required with regard to compiler flags is the selection of
36 the thread package via defining one out of USE_PTHREADS, USE_THR or
37 USE_SPROC. Check the thread-m.h file for what effects this has.
38 Many/most systems will additionally require USE_TSD_DATA_HACK to be
39 defined, so this is the default for "make posix".
40
41 * Why use this malloc?
42
43 This is not the fastest, most space-conserving, most portable, or
44 most tunable malloc ever written. However it is among the fastest
45 while also being among the most space-conserving, portable and tunable.
46 Consistent balance across these factors results in a good general-purpose
47 allocator for malloc-intensive programs.
48
49 The main properties of the algorithms are:
50 * For large (>= 512 bytes) requests, it is a pure best-fit allocator,
51 with ties normally decided via FIFO (i.e. least recently used).
52 * For small (<= 64 bytes by default) requests, it is a caching
53 allocator, that maintains pools of quickly recycled chunks.
54 * In between, and for combinations of large and small requests, it does
55 the best it can trying to meet both goals at once.
56 * For very large requests (>= 128KB by default), it relies on system
57 memory mapping facilities, if supported.
58
59 For a longer but slightly out of date high-level description, see
60 http://gee.cs.oswego.edu/dl/html/malloc.html
61
62 You may already by default be using a C library containing a malloc
63 that is based on some version of this malloc (for example in
64 linux). You might still want to use the one in this file in order to
65 customize settings or to avoid overheads associated with library
66 versions.
67
68 * Contents, described in more detail in "description of public routines" below.
69
70 Standard (ANSI/SVID/...) functions:
71 malloc(size_t n);
72 calloc(size_t n_elements, size_t element_size);
73 free(Void_t* p);
74 realloc(Void_t* p, size_t n);
75 memalign(size_t alignment, size_t n);
76 valloc(size_t n);
77 mallinfo()
78 mallopt(int parameter_number, int parameter_value)
79
80 Additional functions:
81 independent_calloc(size_t n_elements, size_t size, Void_t* chunks[]);
82 independent_comalloc(size_t n_elements, size_t sizes[], Void_t* chunks[]);
83 pvalloc(size_t n);
84 cfree(Void_t* p);
85 malloc_trim(size_t pad);
86 malloc_usable_size(Void_t* p);
87 malloc_stats();
88
89 * Vital statistics:
90
91 Supported pointer representation: 4 or 8 bytes
92 Supported size_t representation: 4 or 8 bytes
93 Note that size_t is allowed to be 4 bytes even if pointers are 8.
94 You can adjust this by defining INTERNAL_SIZE_T
95
96 Alignment: 2 * sizeof(size_t) (default)
97 (i.e., 8 byte alignment with 4byte size_t). This suffices for
98 nearly all current machines and C compilers. However, you can
99 define MALLOC_ALIGNMENT to be wider than this if necessary.
100
101 Minimum overhead per allocated chunk: 4 or 8 bytes
102 Each malloced chunk has a hidden word of overhead holding size
103 and status information.
104
105 Minimum allocated size: 4-byte ptrs: 16 bytes (including 4 overhead)
106 8-byte ptrs: 24/32 bytes (including, 4/8 overhead)
107
108 When a chunk is freed, 12 (for 4byte ptrs) or 20 (for 8 byte
109 ptrs but 4 byte size) or 24 (for 8/8) additional bytes are
110 needed; 4 (8) for a trailing size field and 8 (16) bytes for
111 free list pointers. Thus, the minimum allocatable size is
112 16/24/32 bytes.
113
114 Even a request for zero bytes (i.e., malloc(0)) returns a
115 pointer to something of the minimum allocatable size.
116
117 The maximum overhead wastage (i.e., number of extra bytes
118 allocated than were requested in malloc) is less than or equal
119 to the minimum size, except for requests >= mmap_threshold that
120 are serviced via mmap(), where the worst case wastage is 2 *
121 sizeof(size_t) bytes plus the remainder from a system page (the
122 minimal mmap unit); typically 4096 or 8192 bytes.
123
124 Maximum allocated size: 4-byte size_t: 2^32 minus about two pages
125 8-byte size_t: 2^64 minus about two pages
126
127 It is assumed that (possibly signed) size_t values suffice to
128 represent chunk sizes. `Possibly signed' is due to the fact
129 that `size_t' may be defined on a system as either a signed or
130 an unsigned type. The ISO C standard says that it must be
131 unsigned, but a few systems are known not to adhere to this.
132 Additionally, even when size_t is unsigned, sbrk (which is by
133 default used to obtain memory from system) accepts signed
134 arguments, and may not be able to handle size_t-wide arguments
135 with negative sign bit. Generally, values that would
136 appear as negative after accounting for overhead and alignment
137 are supported only via mmap(), which does not have this
138 limitation.
139
140 Requests for sizes outside the allowed range will perform an optional
141 failure action and then return null. (Requests may also
142 also fail because a system is out of memory.)
143
144 Thread-safety: thread-safe unless NO_THREADS is defined
145
146 Compliance: I believe it is compliant with the 1997 Single Unix Specification
147 (See http://www.opennc.org). Also SVID/XPG, ANSI C, and probably
148 others as well.
149
150 * Synopsis of compile-time options:
151
152 People have reported using previous versions of this malloc on all
153 versions of Unix, sometimes by tweaking some of the defines
154 below. It has been tested most extensively on Solaris and
155 Linux. It is also reported to work on WIN32 platforms.
156 People also report using it in stand-alone embedded systems.
157
158 The implementation is in straight, hand-tuned ANSI C. It is not
159 at all modular. (Sorry!) It uses a lot of macros. To be at all
160 usable, this code should be compiled using an optimizing compiler
161 (for example gcc -O3) that can simplify expressions and control
162 paths. (FAQ: some macros import variables as arguments rather than
163 declare locals because people reported that some debuggers
164 otherwise get confused.)
165
166 OPTION DEFAULT VALUE
167
168 Compilation Environment options:
169
170 __STD_C derived from C compiler defines
171 WIN32 NOT defined
172 HAVE_MEMCPY defined
173 USE_MEMCPY 1 if HAVE_MEMCPY is defined
174 HAVE_MMAP defined as 1
175 MMAP_CLEARS 1
176 HAVE_MREMAP 0 unless linux defined
177 USE_ARENAS the same as HAVE_MMAP
178 malloc_getpagesize derived from system #includes, or 4096 if not
179 HAVE_USR_INCLUDE_MALLOC_H NOT defined
180 LACKS_UNISTD_H NOT defined unless WIN32
181 LACKS_SYS_PARAM_H NOT defined unless WIN32
182 LACKS_SYS_MMAN_H NOT defined unless WIN32
183
184 Changing default word sizes:
185
186 INTERNAL_SIZE_T size_t
187 MALLOC_ALIGNMENT MAX (2 * sizeof(INTERNAL_SIZE_T),
188 __alignof__ (long double))
189
190 Configuration and functionality options:
191
192 USE_DL_PREFIX NOT defined
193 USE_PUBLIC_MALLOC_WRAPPERS NOT defined
194 USE_MALLOC_LOCK NOT defined
195 MALLOC_DEBUG NOT defined
196 REALLOC_ZERO_BYTES_FREES 1
197 MALLOC_FAILURE_ACTION errno = ENOMEM, if __STD_C defined, else no-op
198 TRIM_FASTBINS 0
199
200 Options for customizing MORECORE:
201
202 MORECORE sbrk
203 MORECORE_FAILURE -1
204 MORECORE_CONTIGUOUS 1
205 MORECORE_CANNOT_TRIM NOT defined
206 MORECORE_CLEARS 1
207 MMAP_AS_MORECORE_SIZE (1024 * 1024)
208
209 Tuning options that are also dynamically changeable via mallopt:
210
211 DEFAULT_MXFAST 64
212 DEFAULT_TRIM_THRESHOLD 128 * 1024
213 DEFAULT_TOP_PAD 0
214 DEFAULT_MMAP_THRESHOLD 128 * 1024
215 DEFAULT_MMAP_MAX 65536
216
217 There are several other #defined constants and macros that you
218 probably don't want to touch unless you are extending or adapting malloc. */
219
220 /*
221 __STD_C should be nonzero if using ANSI-standard C compiler, a C++
222 compiler, or a C compiler sufficiently close to ANSI to get away
223 with it.
224 */
225
226 #ifndef __STD_C
227 #if defined(__STDC__) || defined(__cplusplus)
228 #define __STD_C 1
229 #else
230 #define __STD_C 0
231 #endif
232 #endif /*__STD_C*/
233
234
235 /*
236 Void_t* is the pointer type that malloc should say it returns
237 */
238
239 #ifndef Void_t
240 #if (__STD_C || defined(WIN32))
241 #define Void_t void
242 #else
243 #define Void_t char
244 #endif
245 #endif /*Void_t*/
246
247 #if __STD_C
248 #include <stddef.h> /* for size_t */
249 #include <stdlib.h> /* for getenv(), abort() */
250 #else
251 #include <sys/types.h>
252 #endif
253
254 #include <malloc-machine.h>
255
256 #ifdef _LIBC
257 #include <stdio-common/_itoa.h>
258 #include <bits/wordsize.h>
259 #endif
260
261 #ifdef __cplusplus
262 extern "C" {
263 #endif
264
265 /* define LACKS_UNISTD_H if your system does not have a <unistd.h>. */
266
267 /* #define LACKS_UNISTD_H */
268
269 #ifndef LACKS_UNISTD_H
270 #include <unistd.h>
271 #endif
272
273 /* define LACKS_SYS_PARAM_H if your system does not have a <sys/param.h>. */
274
275 /* #define LACKS_SYS_PARAM_H */
276
277
278 #include <stdio.h> /* needed for malloc_stats */
279 #include <errno.h> /* needed for optional MALLOC_FAILURE_ACTION */
280
281 /* For uintptr_t. */
282 #include <stdint.h>
283
284 /* For va_arg, va_start, va_end. */
285 #include <stdarg.h>
286
287 /* For writev and struct iovec. */
288 #include <sys/uio.h>
289 /* For syslog. */
290 #include <sys/syslog.h>
291
292 /* For various dynamic linking things. */
293 #include <dlfcn.h>
294
295
296 /*
297 Debugging:
298
299 Because freed chunks may be overwritten with bookkeeping fields, this
300 malloc will often die when freed memory is overwritten by user
301 programs. This can be very effective (albeit in an annoying way)
302 in helping track down dangling pointers.
303
304 If you compile with -DMALLOC_DEBUG, a number of assertion checks are
305 enabled that will catch more memory errors. You probably won't be
306 able to make much sense of the actual assertion errors, but they
307 should help you locate incorrectly overwritten memory. The checking
308 is fairly extensive, and will slow down execution
309 noticeably. Calling malloc_stats or mallinfo with MALLOC_DEBUG set
310 will attempt to check every non-mmapped allocated and free chunk in
311 the course of computing the summmaries. (By nature, mmapped regions
312 cannot be checked very much automatically.)
313
314 Setting MALLOC_DEBUG may also be helpful if you are trying to modify
315 this code. The assertions in the check routines spell out in more
316 detail the assumptions and invariants underlying the algorithms.
317
318 Setting MALLOC_DEBUG does NOT provide an automated mechanism for
319 checking that all accesses to malloced memory stay within their
320 bounds. However, there are several add-ons and adaptations of this
321 or other mallocs available that do this.
322 */
323
324 #if MALLOC_DEBUG
325 #include <assert.h>
326 #else
327 #undef assert
328 #define assert(x) ((void)0)
329 #endif
330
331
332 /*
333 INTERNAL_SIZE_T is the word-size used for internal bookkeeping
334 of chunk sizes.
335
336 The default version is the same as size_t.
337
338 While not strictly necessary, it is best to define this as an
339 unsigned type, even if size_t is a signed type. This may avoid some
340 artificial size limitations on some systems.
341
342 On a 64-bit machine, you may be able to reduce malloc overhead by
343 defining INTERNAL_SIZE_T to be a 32 bit `unsigned int' at the
344 expense of not being able to handle more than 2^32 of malloced
345 space. If this limitation is acceptable, you are encouraged to set
346 this unless you are on a platform requiring 16byte alignments. In
347 this case the alignment requirements turn out to negate any
348 potential advantages of decreasing size_t word size.
349
350 Implementors: Beware of the possible combinations of:
351 - INTERNAL_SIZE_T might be signed or unsigned, might be 32 or 64 bits,
352 and might be the same width as int or as long
353 - size_t might have different width and signedness as INTERNAL_SIZE_T
354 - int and long might be 32 or 64 bits, and might be the same width
355 To deal with this, most comparisons and difference computations
356 among INTERNAL_SIZE_Ts should cast them to unsigned long, being
357 aware of the fact that casting an unsigned int to a wider long does
358 not sign-extend. (This also makes checking for negative numbers
359 awkward.) Some of these casts result in harmless compiler warnings
360 on some systems.
361 */
362
363 #ifndef INTERNAL_SIZE_T
364 #define INTERNAL_SIZE_T size_t
365 #endif
366
367 /* The corresponding word size */
368 #define SIZE_SZ (sizeof(INTERNAL_SIZE_T))
369
370
371 /*
372 MALLOC_ALIGNMENT is the minimum alignment for malloc'ed chunks.
373 It must be a power of two at least 2 * SIZE_SZ, even on machines
374 for which smaller alignments would suffice. It may be defined as
375 larger than this though. Note however that code and data structures
376 are optimized for the case of 8-byte alignment.
377 */
378
379
380 #ifndef MALLOC_ALIGNMENT
381 /* XXX This is the correct definition. It differs from 2*SIZE_SZ only on
382 powerpc32. For the time being, changing this is causing more
383 compatibility problems due to malloc_get_state/malloc_set_state than
384 will returning blocks not adequately aligned for long double objects
385 under -mlong-double-128.
386
387 #define MALLOC_ALIGNMENT (2 * SIZE_SZ < __alignof__ (long double) \
388 ? __alignof__ (long double) : 2 * SIZE_SZ)
389 */
390 #define MALLOC_ALIGNMENT (2 * SIZE_SZ)
391 #endif
392
393 /* The corresponding bit mask value */
394 #define MALLOC_ALIGN_MASK (MALLOC_ALIGNMENT - 1)
395
396
397
398 /*
399 REALLOC_ZERO_BYTES_FREES should be set if a call to
400 realloc with zero bytes should be the same as a call to free.
401 This is required by the C standard. Otherwise, since this malloc
402 returns a unique pointer for malloc(0), so does realloc(p, 0).
403 */
404
405 #ifndef REALLOC_ZERO_BYTES_FREES
406 #define REALLOC_ZERO_BYTES_FREES 1
407 #endif
408
409 /*
410 TRIM_FASTBINS controls whether free() of a very small chunk can
411 immediately lead to trimming. Setting to true (1) can reduce memory
412 footprint, but will almost always slow down programs that use a lot
413 of small chunks.
414
415 Define this only if you are willing to give up some speed to more
416 aggressively reduce system-level memory footprint when releasing
417 memory in programs that use many small chunks. You can get
418 essentially the same effect by setting MXFAST to 0, but this can
419 lead to even greater slowdowns in programs using many small chunks.
420 TRIM_FASTBINS is an in-between compile-time option, that disables
421 only those chunks bordering topmost memory from being placed in
422 fastbins.
423 */
424
425 #ifndef TRIM_FASTBINS
426 #define TRIM_FASTBINS 0
427 #endif
428
429
430 /*
431 USE_DL_PREFIX will prefix all public routines with the string 'dl'.
432 This is necessary when you only want to use this malloc in one part
433 of a program, using your regular system malloc elsewhere.
434 */
435
436 /* #define USE_DL_PREFIX */
437
438
439 /*
440 Two-phase name translation.
441 All of the actual routines are given mangled names.
442 When wrappers are used, they become the public callable versions.
443 When DL_PREFIX is used, the callable names are prefixed.
444 */
445
446 #ifdef USE_DL_PREFIX
447 #define public_cALLOc dlcalloc
448 #define public_fREe dlfree
449 #define public_cFREe dlcfree
450 #define public_mALLOc dlmalloc
451 #define public_mEMALIGn dlmemalign
452 #define public_rEALLOc dlrealloc
453 #define public_vALLOc dlvalloc
454 #define public_pVALLOc dlpvalloc
455 #define public_mALLINFo dlmallinfo
456 #define public_mALLOPt dlmallopt
457 #define public_mTRIm dlmalloc_trim
458 #define public_mSTATs dlmalloc_stats
459 #define public_mUSABLe dlmalloc_usable_size
460 #define public_iCALLOc dlindependent_calloc
461 #define public_iCOMALLOc dlindependent_comalloc
462 #define public_gET_STATe dlget_state
463 #define public_sET_STATe dlset_state
464 #else /* USE_DL_PREFIX */
465 #ifdef _LIBC
466
467 /* Special defines for the GNU C library. */
468 #define public_cALLOc __libc_calloc
469 #define public_fREe __libc_free
470 #define public_cFREe __libc_cfree
471 #define public_mALLOc __libc_malloc
472 #define public_mEMALIGn __libc_memalign
473 #define public_rEALLOc __libc_realloc
474 #define public_vALLOc __libc_valloc
475 #define public_pVALLOc __libc_pvalloc
476 #define public_mALLINFo __libc_mallinfo
477 #define public_mALLOPt __libc_mallopt
478 #define public_mTRIm __malloc_trim
479 #define public_mSTATs __malloc_stats
480 #define public_mUSABLe __malloc_usable_size
481 #define public_iCALLOc __libc_independent_calloc
482 #define public_iCOMALLOc __libc_independent_comalloc
483 #define public_gET_STATe __malloc_get_state
484 #define public_sET_STATe __malloc_set_state
485 #define malloc_getpagesize __getpagesize()
486 #define open __open
487 #define mmap __mmap
488 #define munmap __munmap
489 #define mremap __mremap
490 #define mprotect __mprotect
491 #define MORECORE (*__morecore)
492 #define MORECORE_FAILURE 0
493
494 Void_t * __default_morecore (ptrdiff_t);
495 Void_t *(*__morecore)(ptrdiff_t) = __default_morecore;
496
497 #else /* !_LIBC */
498 #define public_cALLOc calloc
499 #define public_fREe free
500 #define public_cFREe cfree
501 #define public_mALLOc malloc
502 #define public_mEMALIGn memalign
503 #define public_rEALLOc realloc
504 #define public_vALLOc valloc
505 #define public_pVALLOc pvalloc
506 #define public_mALLINFo mallinfo
507 #define public_mALLOPt mallopt
508 #define public_mTRIm malloc_trim
509 #define public_mSTATs malloc_stats
510 #define public_mUSABLe malloc_usable_size
511 #define public_iCALLOc independent_calloc
512 #define public_iCOMALLOc independent_comalloc
513 #define public_gET_STATe malloc_get_state
514 #define public_sET_STATe malloc_set_state
515 #endif /* _LIBC */
516 #endif /* USE_DL_PREFIX */
517
518 #ifndef _LIBC
519 #define __builtin_expect(expr, val) (expr)
520
521 #define fwrite(buf, size, count, fp) _IO_fwrite (buf, size, count, fp)
522 #endif
523
524 /*
525 HAVE_MEMCPY should be defined if you are not otherwise using
526 ANSI STD C, but still have memcpy and memset in your C library
527 and want to use them in calloc and realloc. Otherwise simple
528 macro versions are defined below.
529
530 USE_MEMCPY should be defined as 1 if you actually want to
531 have memset and memcpy called. People report that the macro
532 versions are faster than libc versions on some systems.
533
534 Even if USE_MEMCPY is set to 1, loops to copy/clear small chunks
535 (of <= 36 bytes) are manually unrolled in realloc and calloc.
536 */
537
538 #define HAVE_MEMCPY
539
540 #ifndef USE_MEMCPY
541 #ifdef HAVE_MEMCPY
542 #define USE_MEMCPY 1
543 #else
544 #define USE_MEMCPY 0
545 #endif
546 #endif
547
548
549 #if (__STD_C || defined(HAVE_MEMCPY))
550
551 #ifdef _LIBC
552 # include <string.h>
553 #else
554 #ifdef WIN32
555 /* On Win32 memset and memcpy are already declared in windows.h */
556 #else
557 #if __STD_C
558 void* memset(void*, int, size_t);
559 void* memcpy(void*, const void*, size_t);
560 #else
561 Void_t* memset();
562 Void_t* memcpy();
563 #endif
564 #endif
565 #endif
566 #endif
567
568 /*
569 MALLOC_FAILURE_ACTION is the action to take before "return 0" when
570 malloc fails to be able to return memory, either because memory is
571 exhausted or because of illegal arguments.
572
573 By default, sets errno if running on STD_C platform, else does nothing.
574 */
575
576 #ifndef MALLOC_FAILURE_ACTION
577 #if __STD_C
578 #define MALLOC_FAILURE_ACTION \
579 errno = ENOMEM;
580
581 #else
582 #define MALLOC_FAILURE_ACTION
583 #endif
584 #endif
585
586 /*
587 MORECORE-related declarations. By default, rely on sbrk
588 */
589
590
591 #ifdef LACKS_UNISTD_H
592 #if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
593 #if __STD_C
594 extern Void_t* sbrk(ptrdiff_t);
595 #else
596 extern Void_t* sbrk();
597 #endif
598 #endif
599 #endif
600
601 /*
602 MORECORE is the name of the routine to call to obtain more memory
603 from the system. See below for general guidance on writing
604 alternative MORECORE functions, as well as a version for WIN32 and a
605 sample version for pre-OSX macos.
606 */
607
608 #ifndef MORECORE
609 #define MORECORE sbrk
610 #endif
611
612 /*
613 MORECORE_FAILURE is the value returned upon failure of MORECORE
614 as well as mmap. Since it cannot be an otherwise valid memory address,
615 and must reflect values of standard sys calls, you probably ought not
616 try to redefine it.
617 */
618
619 #ifndef MORECORE_FAILURE
620 #define MORECORE_FAILURE (-1)
621 #endif
622
623 /*
624 If MORECORE_CONTIGUOUS is true, take advantage of fact that
625 consecutive calls to MORECORE with positive arguments always return
626 contiguous increasing addresses. This is true of unix sbrk. Even
627 if not defined, when regions happen to be contiguous, malloc will
628 permit allocations spanning regions obtained from different
629 calls. But defining this when applicable enables some stronger
630 consistency checks and space efficiencies.
631 */
632
633 #ifndef MORECORE_CONTIGUOUS
634 #define MORECORE_CONTIGUOUS 1
635 #endif
636
637 /*
638 Define MORECORE_CANNOT_TRIM if your version of MORECORE
639 cannot release space back to the system when given negative
640 arguments. This is generally necessary only if you are using
641 a hand-crafted MORECORE function that cannot handle negative arguments.
642 */
643
644 /* #define MORECORE_CANNOT_TRIM */
645
646 /* MORECORE_CLEARS (default 1)
647 The degree to which the routine mapped to MORECORE zeroes out
648 memory: never (0), only for newly allocated space (1) or always
649 (2). The distinction between (1) and (2) is necessary because on
650 some systems, if the application first decrements and then
651 increments the break value, the contents of the reallocated space
652 are unspecified.
653 */
654
655 #ifndef MORECORE_CLEARS
656 #define MORECORE_CLEARS 1
657 #endif
658
659
660 /*
661 Define HAVE_MMAP as true to optionally make malloc() use mmap() to
662 allocate very large blocks. These will be returned to the
663 operating system immediately after a free(). Also, if mmap
664 is available, it is used as a backup strategy in cases where
665 MORECORE fails to provide space from system.
666
667 This malloc is best tuned to work with mmap for large requests.
668 If you do not have mmap, operations involving very large chunks (1MB
669 or so) may be slower than you'd like.
670 */
671
672 #ifndef HAVE_MMAP
673 #define HAVE_MMAP 1
674
675 /*
676 Standard unix mmap using /dev/zero clears memory so calloc doesn't
677 need to.
678 */
679
680 #ifndef MMAP_CLEARS
681 #define MMAP_CLEARS 1
682 #endif
683
684 #else /* no mmap */
685 #ifndef MMAP_CLEARS
686 #define MMAP_CLEARS 0
687 #endif
688 #endif
689
690
691 /*
692 MMAP_AS_MORECORE_SIZE is the minimum mmap size argument to use if
693 sbrk fails, and mmap is used as a backup (which is done only if
694 HAVE_MMAP). The value must be a multiple of page size. This
695 backup strategy generally applies only when systems have "holes" in
696 address space, so sbrk cannot perform contiguous expansion, but
697 there is still space available on system. On systems for which
698 this is known to be useful (i.e. most linux kernels), this occurs
699 only when programs allocate huge amounts of memory. Between this,
700 and the fact that mmap regions tend to be limited, the size should
701 be large, to avoid too many mmap calls and thus avoid running out
702 of kernel resources.
703 */
704
705 #ifndef MMAP_AS_MORECORE_SIZE
706 #define MMAP_AS_MORECORE_SIZE (1024 * 1024)
707 #endif
708
709 /*
710 Define HAVE_MREMAP to make realloc() use mremap() to re-allocate
711 large blocks. This is currently only possible on Linux with
712 kernel versions newer than 1.3.77.
713 */
714
715 #ifndef HAVE_MREMAP
716 #ifdef linux
717 #define HAVE_MREMAP 1
718 #else
719 #define HAVE_MREMAP 0
720 #endif
721
722 #endif /* HAVE_MMAP */
723
724 /* Define USE_ARENAS to enable support for multiple `arenas'. These
725 are allocated using mmap(), are necessary for threads and
726 occasionally useful to overcome address space limitations affecting
727 sbrk(). */
728
729 #ifndef USE_ARENAS
730 #define USE_ARENAS HAVE_MMAP
731 #endif
732
733
734 /*
735 The system page size. To the extent possible, this malloc manages
736 memory from the system in page-size units. Note that this value is
737 cached during initialization into a field of malloc_state. So even
738 if malloc_getpagesize is a function, it is only called once.
739
740 The following mechanics for getpagesize were adapted from bsd/gnu
741 getpagesize.h. If none of the system-probes here apply, a value of
742 4096 is used, which should be OK: If they don't apply, then using
743 the actual value probably doesn't impact performance.
744 */
745
746
747 #ifndef malloc_getpagesize
748
749 #ifndef LACKS_UNISTD_H
750 # include <unistd.h>
751 #endif
752
753 # ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */
754 # ifndef _SC_PAGE_SIZE
755 # define _SC_PAGE_SIZE _SC_PAGESIZE
756 # endif
757 # endif
758
759 # ifdef _SC_PAGE_SIZE
760 # define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
761 # else
762 # if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
763 extern size_t getpagesize();
764 # define malloc_getpagesize getpagesize()
765 # else
766 # ifdef WIN32 /* use supplied emulation of getpagesize */
767 # define malloc_getpagesize getpagesize()
768 # else
769 # ifndef LACKS_SYS_PARAM_H
770 # include <sys/param.h>
771 # endif
772 # ifdef EXEC_PAGESIZE
773 # define malloc_getpagesize EXEC_PAGESIZE
774 # else
775 # ifdef NBPG
776 # ifndef CLSIZE
777 # define malloc_getpagesize NBPG
778 # else
779 # define malloc_getpagesize (NBPG * CLSIZE)
780 # endif
781 # else
782 # ifdef NBPC
783 # define malloc_getpagesize NBPC
784 # else
785 # ifdef PAGESIZE
786 # define malloc_getpagesize PAGESIZE
787 # else /* just guess */
788 # define malloc_getpagesize (4096)
789 # endif
790 # endif
791 # endif
792 # endif
793 # endif
794 # endif
795 # endif
796 #endif
797
798 /*
799 This version of malloc supports the standard SVID/XPG mallinfo
800 routine that returns a struct containing usage properties and
801 statistics. It should work on any SVID/XPG compliant system that has
802 a /usr/include/malloc.h defining struct mallinfo. (If you'd like to
803 install such a thing yourself, cut out the preliminary declarations
804 as described above and below and save them in a malloc.h file. But
805 there's no compelling reason to bother to do this.)
806
807 The main declaration needed is the mallinfo struct that is returned
808 (by-copy) by mallinfo(). The SVID/XPG malloinfo struct contains a
809 bunch of fields that are not even meaningful in this version of
810 malloc. These fields are are instead filled by mallinfo() with
811 other numbers that might be of interest.
812
813 HAVE_USR_INCLUDE_MALLOC_H should be set if you have a
814 /usr/include/malloc.h file that includes a declaration of struct
815 mallinfo. If so, it is included; else an SVID2/XPG2 compliant
816 version is declared below. These must be precisely the same for
817 mallinfo() to work. The original SVID version of this struct,
818 defined on most systems with mallinfo, declares all fields as
819 ints. But some others define as unsigned long. If your system
820 defines the fields using a type of different width than listed here,
821 you must #include your system version and #define
822 HAVE_USR_INCLUDE_MALLOC_H.
823 */
824
825 /* #define HAVE_USR_INCLUDE_MALLOC_H */
826
827 #ifdef HAVE_USR_INCLUDE_MALLOC_H
828 #include "/usr/include/malloc.h"
829 #endif
830
831
832 /* ---------- description of public routines ------------ */
833
834 /*
835 malloc(size_t n)
836 Returns a pointer to a newly allocated chunk of at least n bytes, or null
837 if no space is available. Additionally, on failure, errno is
838 set to ENOMEM on ANSI C systems.
839
840 If n is zero, malloc returns a minumum-sized chunk. (The minimum
841 size is 16 bytes on most 32bit systems, and 24 or 32 bytes on 64bit
842 systems.) On most systems, size_t is an unsigned type, so calls
843 with negative arguments are interpreted as requests for huge amounts
844 of space, which will often fail. The maximum supported value of n
845 differs across systems, but is in all cases less than the maximum
846 representable value of a size_t.
847 */
848 #if __STD_C
849 Void_t* public_mALLOc(size_t);
850 #else
851 Void_t* public_mALLOc();
852 #endif
853 #ifdef libc_hidden_proto
854 libc_hidden_proto (public_mALLOc)
855 #endif
856
857 /*
858 free(Void_t* p)
859 Releases the chunk of memory pointed to by p, that had been previously
860 allocated using malloc or a related routine such as realloc.
861 It has no effect if p is null. It can have arbitrary (i.e., bad!)
862 effects if p has already been freed.
863
864 Unless disabled (using mallopt), freeing very large spaces will
865 when possible, automatically trigger operations that give
866 back unused memory to the system, thus reducing program footprint.
867 */
868 #if __STD_C
869 void public_fREe(Void_t*);
870 #else
871 void public_fREe();
872 #endif
873 #ifdef libc_hidden_proto
874 libc_hidden_proto (public_fREe)
875 #endif
876
877 /*
878 calloc(size_t n_elements, size_t element_size);
879 Returns a pointer to n_elements * element_size bytes, with all locations
880 set to zero.
881 */
882 #if __STD_C
883 Void_t* public_cALLOc(size_t, size_t);
884 #else
885 Void_t* public_cALLOc();
886 #endif
887
888 /*
889 realloc(Void_t* p, size_t n)
890 Returns a pointer to a chunk of size n that contains the same data
891 as does chunk p up to the minimum of (n, p's size) bytes, or null
892 if no space is available.
893
894 The returned pointer may or may not be the same as p. The algorithm
895 prefers extending p when possible, otherwise it employs the
896 equivalent of a malloc-copy-free sequence.
897
898 If p is null, realloc is equivalent to malloc.
899
900 If space is not available, realloc returns null, errno is set (if on
901 ANSI) and p is NOT freed.
902
903 if n is for fewer bytes than already held by p, the newly unused
904 space is lopped off and freed if possible. Unless the #define
905 REALLOC_ZERO_BYTES_FREES is set, realloc with a size argument of
906 zero (re)allocates a minimum-sized chunk.
907
908 Large chunks that were internally obtained via mmap will always
909 be reallocated using malloc-copy-free sequences unless
910 the system supports MREMAP (currently only linux).
911
912 The old unix realloc convention of allowing the last-free'd chunk
913 to be used as an argument to realloc is not supported.
914 */
915 #if __STD_C
916 Void_t* public_rEALLOc(Void_t*, size_t);
917 #else
918 Void_t* public_rEALLOc();
919 #endif
920 #ifdef libc_hidden_proto
921 libc_hidden_proto (public_rEALLOc)
922 #endif
923
924 /*
925 memalign(size_t alignment, size_t n);
926 Returns a pointer to a newly allocated chunk of n bytes, aligned
927 in accord with the alignment argument.
928
929 The alignment argument should be a power of two. If the argument is
930 not a power of two, the nearest greater power is used.
931 8-byte alignment is guaranteed by normal malloc calls, so don't
932 bother calling memalign with an argument of 8 or less.
933
934 Overreliance on memalign is a sure way to fragment space.
935 */
936 #if __STD_C
937 Void_t* public_mEMALIGn(size_t, size_t);
938 #else
939 Void_t* public_mEMALIGn();
940 #endif
941 #ifdef libc_hidden_proto
942 libc_hidden_proto (public_mEMALIGn)
943 #endif
944
945 /*
946 valloc(size_t n);
947 Equivalent to memalign(pagesize, n), where pagesize is the page
948 size of the system. If the pagesize is unknown, 4096 is used.
949 */
950 #if __STD_C
951 Void_t* public_vALLOc(size_t);
952 #else
953 Void_t* public_vALLOc();
954 #endif
955
956
957
958 /*
959 mallopt(int parameter_number, int parameter_value)
960 Sets tunable parameters The format is to provide a
961 (parameter-number, parameter-value) pair. mallopt then sets the
962 corresponding parameter to the argument value if it can (i.e., so
963 long as the value is meaningful), and returns 1 if successful else
964 0. SVID/XPG/ANSI defines four standard param numbers for mallopt,
965 normally defined in malloc.h. Only one of these (M_MXFAST) is used
966 in this malloc. The others (M_NLBLKS, M_GRAIN, M_KEEP) don't apply,
967 so setting them has no effect. But this malloc also supports four
968 other options in mallopt. See below for details. Briefly, supported
969 parameters are as follows (listed defaults are for "typical"
970 configurations).
971
972 Symbol param # default allowed param values
973 M_MXFAST 1 64 0-80 (0 disables fastbins)
974 M_TRIM_THRESHOLD -1 128*1024 any (-1U disables trimming)
975 M_TOP_PAD -2 0 any
976 M_MMAP_THRESHOLD -3 128*1024 any (or 0 if no MMAP support)
977 M_MMAP_MAX -4 65536 any (0 disables use of mmap)
978 */
979 #if __STD_C
980 int public_mALLOPt(int, int);
981 #else
982 int public_mALLOPt();
983 #endif
984
985
986 /*
987 mallinfo()
988 Returns (by copy) a struct containing various summary statistics:
989
990 arena: current total non-mmapped bytes allocated from system
991 ordblks: the number of free chunks
992 smblks: the number of fastbin blocks (i.e., small chunks that
993 have been freed but not use resused or consolidated)
994 hblks: current number of mmapped regions
995 hblkhd: total bytes held in mmapped regions
996 usmblks: the maximum total allocated space. This will be greater
997 than current total if trimming has occurred.
998 fsmblks: total bytes held in fastbin blocks
999 uordblks: current total allocated space (normal or mmapped)
1000 fordblks: total free space
1001 keepcost: the maximum number of bytes that could ideally be released
1002 back to system via malloc_trim. ("ideally" means that
1003 it ignores page restrictions etc.)
1004
1005 Because these fields are ints, but internal bookkeeping may
1006 be kept as longs, the reported values may wrap around zero and
1007 thus be inaccurate.
1008 */
1009 #if __STD_C
1010 struct mallinfo public_mALLINFo(void);
1011 #else
1012 struct mallinfo public_mALLINFo();
1013 #endif
1014
1015 #ifndef _LIBC
1016 /*
1017 independent_calloc(size_t n_elements, size_t element_size, Void_t* chunks[]);
1018
1019 independent_calloc is similar to calloc, but instead of returning a
1020 single cleared space, it returns an array of pointers to n_elements
1021 independent elements that can hold contents of size elem_size, each
1022 of which starts out cleared, and can be independently freed,
1023 realloc'ed etc. The elements are guaranteed to be adjacently
1024 allocated (this is not guaranteed to occur with multiple callocs or
1025 mallocs), which may also improve cache locality in some
1026 applications.
1027
1028 The "chunks" argument is optional (i.e., may be null, which is
1029 probably the most typical usage). If it is null, the returned array
1030 is itself dynamically allocated and should also be freed when it is
1031 no longer needed. Otherwise, the chunks array must be of at least
1032 n_elements in length. It is filled in with the pointers to the
1033 chunks.
1034
1035 In either case, independent_calloc returns this pointer array, or
1036 null if the allocation failed. If n_elements is zero and "chunks"
1037 is null, it returns a chunk representing an array with zero elements
1038 (which should be freed if not wanted).
1039
1040 Each element must be individually freed when it is no longer
1041 needed. If you'd like to instead be able to free all at once, you
1042 should instead use regular calloc and assign pointers into this
1043 space to represent elements. (In this case though, you cannot
1044 independently free elements.)
1045
1046 independent_calloc simplifies and speeds up implementations of many
1047 kinds of pools. It may also be useful when constructing large data
1048 structures that initially have a fixed number of fixed-sized nodes,
1049 but the number is not known at compile time, and some of the nodes
1050 may later need to be freed. For example:
1051
1052 struct Node { int item; struct Node* next; };
1053
1054 struct Node* build_list() {
1055 struct Node** pool;
1056 int n = read_number_of_nodes_needed();
1057 if (n <= 0) return 0;
1058 pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0);
1059 if (pool == 0) die();
1060 // organize into a linked list...
1061 struct Node* first = pool[0];
1062 for (i = 0; i < n-1; ++i)
1063 pool[i]->next = pool[i+1];
1064 free(pool); // Can now free the array (or not, if it is needed later)
1065 return first;
1066 }
1067 */
1068 #if __STD_C
1069 Void_t** public_iCALLOc(size_t, size_t, Void_t**);
1070 #else
1071 Void_t** public_iCALLOc();
1072 #endif
1073
1074 /*
1075 independent_comalloc(size_t n_elements, size_t sizes[], Void_t* chunks[]);
1076
1077 independent_comalloc allocates, all at once, a set of n_elements
1078 chunks with sizes indicated in the "sizes" array. It returns
1079 an array of pointers to these elements, each of which can be
1080 independently freed, realloc'ed etc. The elements are guaranteed to
1081 be adjacently allocated (this is not guaranteed to occur with
1082 multiple callocs or mallocs), which may also improve cache locality
1083 in some applications.
1084
1085 The "chunks" argument is optional (i.e., may be null). If it is null
1086 the returned array is itself dynamically allocated and should also
1087 be freed when it is no longer needed. Otherwise, the chunks array
1088 must be of at least n_elements in length. It is filled in with the
1089 pointers to the chunks.
1090
1091 In either case, independent_comalloc returns this pointer array, or
1092 null if the allocation failed. If n_elements is zero and chunks is
1093 null, it returns a chunk representing an array with zero elements
1094 (which should be freed if not wanted).
1095
1096 Each element must be individually freed when it is no longer
1097 needed. If you'd like to instead be able to free all at once, you
1098 should instead use a single regular malloc, and assign pointers at
1099 particular offsets in the aggregate space. (In this case though, you
1100 cannot independently free elements.)
1101
1102 independent_comallac differs from independent_calloc in that each
1103 element may have a different size, and also that it does not
1104 automatically clear elements.
1105
1106 independent_comalloc can be used to speed up allocation in cases
1107 where several structs or objects must always be allocated at the
1108 same time. For example:
1109
1110 struct Head { ... }
1111 struct Foot { ... }
1112
1113 void send_message(char* msg) {
1114 int msglen = strlen(msg);
1115 size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) };
1116 void* chunks[3];
1117 if (independent_comalloc(3, sizes, chunks) == 0)
1118 die();
1119 struct Head* head = (struct Head*)(chunks[0]);
1120 char* body = (char*)(chunks[1]);
1121 struct Foot* foot = (struct Foot*)(chunks[2]);
1122 // ...
1123 }
1124
1125 In general though, independent_comalloc is worth using only for
1126 larger values of n_elements. For small values, you probably won't
1127 detect enough difference from series of malloc calls to bother.
1128
1129 Overuse of independent_comalloc can increase overall memory usage,
1130 since it cannot reuse existing noncontiguous small chunks that
1131 might be available for some of the elements.
1132 */
1133 #if __STD_C
1134 Void_t** public_iCOMALLOc(size_t, size_t*, Void_t**);
1135 #else
1136 Void_t** public_iCOMALLOc();
1137 #endif
1138
1139 #endif /* _LIBC */
1140
1141
1142 /*
1143 pvalloc(size_t n);
1144 Equivalent to valloc(minimum-page-that-holds(n)), that is,
1145 round up n to nearest pagesize.
1146 */
1147 #if __STD_C
1148 Void_t* public_pVALLOc(size_t);
1149 #else
1150 Void_t* public_pVALLOc();
1151 #endif
1152
1153 /*
1154 cfree(Void_t* p);
1155 Equivalent to free(p).
1156
1157 cfree is needed/defined on some systems that pair it with calloc,
1158 for odd historical reasons (such as: cfree is used in example
1159 code in the first edition of K&R).
1160 */
1161 #if __STD_C
1162 void public_cFREe(Void_t*);
1163 #else
1164 void public_cFREe();
1165 #endif
1166
1167 /*
1168 malloc_trim(size_t pad);
1169
1170 If possible, gives memory back to the system (via negative
1171 arguments to sbrk) if there is unused memory at the `high' end of
1172 the malloc pool. You can call this after freeing large blocks of
1173 memory to potentially reduce the system-level memory requirements
1174 of a program. However, it cannot guarantee to reduce memory. Under
1175 some allocation patterns, some large free blocks of memory will be
1176 locked between two used chunks, so they cannot be given back to
1177 the system.
1178
1179 The `pad' argument to malloc_trim represents the amount of free
1180 trailing space to leave untrimmed. If this argument is zero,
1181 only the minimum amount of memory to maintain internal data
1182 structures will be left (one page or less). Non-zero arguments
1183 can be supplied to maintain enough trailing space to service
1184 future expected allocations without having to re-obtain memory
1185 from the system.
1186
1187 Malloc_trim returns 1 if it actually released any memory, else 0.
1188 On systems that do not support "negative sbrks", it will always
1189 rreturn 0.
1190 */
1191 #if __STD_C
1192 int public_mTRIm(size_t);
1193 #else
1194 int public_mTRIm();
1195 #endif
1196
1197 /*
1198 malloc_usable_size(Void_t* p);
1199
1200 Returns the number of bytes you can actually use in
1201 an allocated chunk, which may be more than you requested (although
1202 often not) due to alignment and minimum size constraints.
1203 You can use this many bytes without worrying about
1204 overwriting other allocated objects. This is not a particularly great
1205 programming practice. malloc_usable_size can be more useful in
1206 debugging and assertions, for example:
1207
1208 p = malloc(n);
1209 assert(malloc_usable_size(p) >= 256);
1210
1211 */
1212 #if __STD_C
1213 size_t public_mUSABLe(Void_t*);
1214 #else
1215 size_t public_mUSABLe();
1216 #endif
1217
1218 /*
1219 malloc_stats();
1220 Prints on stderr the amount of space obtained from the system (both
1221 via sbrk and mmap), the maximum amount (which may be more than
1222 current if malloc_trim and/or munmap got called), and the current
1223 number of bytes allocated via malloc (or realloc, etc) but not yet
1224 freed. Note that this is the number of bytes allocated, not the
1225 number requested. It will be larger than the number requested
1226 because of alignment and bookkeeping overhead. Because it includes
1227 alignment wastage as being in use, this figure may be greater than
1228 zero even when no user-level chunks are allocated.
1229
1230 The reported current and maximum system memory can be inaccurate if
1231 a program makes other calls to system memory allocation functions
1232 (normally sbrk) outside of malloc.
1233
1234 malloc_stats prints only the most commonly interesting statistics.
1235 More information can be obtained by calling mallinfo.
1236
1237 */
1238 #if __STD_C
1239 void public_mSTATs(void);
1240 #else
1241 void public_mSTATs();
1242 #endif
1243
1244 /*
1245 malloc_get_state(void);
1246
1247 Returns the state of all malloc variables in an opaque data
1248 structure.
1249 */
1250 #if __STD_C
1251 Void_t* public_gET_STATe(void);
1252 #else
1253 Void_t* public_gET_STATe();
1254 #endif
1255
1256 /*
1257 malloc_set_state(Void_t* state);
1258
1259 Restore the state of all malloc variables from data obtained with
1260 malloc_get_state().
1261 */
1262 #if __STD_C
1263 int public_sET_STATe(Void_t*);
1264 #else
1265 int public_sET_STATe();
1266 #endif
1267
1268 #ifdef _LIBC
1269 /*
1270 posix_memalign(void **memptr, size_t alignment, size_t size);
1271
1272 POSIX wrapper like memalign(), checking for validity of size.
1273 */
1274 int __posix_memalign(void **, size_t, size_t);
1275 #endif
1276
1277 /* mallopt tuning options */
1278
1279 /*
1280 M_MXFAST is the maximum request size used for "fastbins", special bins
1281 that hold returned chunks without consolidating their spaces. This
1282 enables future requests for chunks of the same size to be handled
1283 very quickly, but can increase fragmentation, and thus increase the
1284 overall memory footprint of a program.
1285
1286 This malloc manages fastbins very conservatively yet still
1287 efficiently, so fragmentation is rarely a problem for values less
1288 than or equal to the default. The maximum supported value of MXFAST
1289 is 80. You wouldn't want it any higher than this anyway. Fastbins
1290 are designed especially for use with many small structs, objects or
1291 strings -- the default handles structs/objects/arrays with sizes up
1292 to 8 4byte fields, or small strings representing words, tokens,
1293 etc. Using fastbins for larger objects normally worsens
1294 fragmentation without improving speed.
1295
1296 M_MXFAST is set in REQUEST size units. It is internally used in
1297 chunksize units, which adds padding and alignment. You can reduce
1298 M_MXFAST to 0 to disable all use of fastbins. This causes the malloc
1299 algorithm to be a closer approximation of fifo-best-fit in all cases,
1300 not just for larger requests, but will generally cause it to be
1301 slower.
1302 */
1303
1304
1305 /* M_MXFAST is a standard SVID/XPG tuning option, usually listed in malloc.h */
1306 #ifndef M_MXFAST
1307 #define M_MXFAST 1
1308 #endif
1309
1310 #ifndef DEFAULT_MXFAST
1311 #define DEFAULT_MXFAST 64
1312 #endif
1313
1314
1315 /*
1316 M_TRIM_THRESHOLD is the maximum amount of unused top-most memory
1317 to keep before releasing via malloc_trim in free().
1318
1319 Automatic trimming is mainly useful in long-lived programs.
1320 Because trimming via sbrk can be slow on some systems, and can
1321 sometimes be wasteful (in cases where programs immediately
1322 afterward allocate more large chunks) the value should be high
1323 enough so that your overall system performance would improve by
1324 releasing this much memory.
1325
1326 The trim threshold and the mmap control parameters (see below)
1327 can be traded off with one another. Trimming and mmapping are
1328 two different ways of releasing unused memory back to the
1329 system. Between these two, it is often possible to keep
1330 system-level demands of a long-lived program down to a bare
1331 minimum. For example, in one test suite of sessions measuring
1332 the XF86 X server on Linux, using a trim threshold of 128K and a
1333 mmap threshold of 192K led to near-minimal long term resource
1334 consumption.
1335
1336 If you are using this malloc in a long-lived program, it should
1337 pay to experiment with these values. As a rough guide, you
1338 might set to a value close to the average size of a process
1339 (program) running on your system. Releasing this much memory
1340 would allow such a process to run in memory. Generally, it's
1341 worth it to tune for trimming rather tham memory mapping when a
1342 program undergoes phases where several large chunks are
1343 allocated and released in ways that can reuse each other's
1344 storage, perhaps mixed with phases where there are no such
1345 chunks at all. And in well-behaved long-lived programs,
1346 controlling release of large blocks via trimming versus mapping
1347 is usually faster.
1348
1349 However, in most programs, these parameters serve mainly as
1350 protection against the system-level effects of carrying around
1351 massive amounts of unneeded memory. Since frequent calls to
1352 sbrk, mmap, and munmap otherwise degrade performance, the default
1353 parameters are set to relatively high values that serve only as
1354 safeguards.
1355
1356 The trim value It must be greater than page size to have any useful
1357 effect. To disable trimming completely, you can set to
1358 (unsigned long)(-1)
1359
1360 Trim settings interact with fastbin (MXFAST) settings: Unless
1361 TRIM_FASTBINS is defined, automatic trimming never takes place upon
1362 freeing a chunk with size less than or equal to MXFAST. Trimming is
1363 instead delayed until subsequent freeing of larger chunks. However,
1364 you can still force an attempted trim by calling malloc_trim.
1365
1366 Also, trimming is not generally possible in cases where
1367 the main arena is obtained via mmap.
1368
1369 Note that the trick some people use of mallocing a huge space and
1370 then freeing it at program startup, in an attempt to reserve system
1371 memory, doesn't have the intended effect under automatic trimming,
1372 since that memory will immediately be returned to the system.
1373 */
1374
1375 #define M_TRIM_THRESHOLD -1
1376
1377 #ifndef DEFAULT_TRIM_THRESHOLD
1378 #define DEFAULT_TRIM_THRESHOLD (128 * 1024)
1379 #endif
1380
1381 /*
1382 M_TOP_PAD is the amount of extra `padding' space to allocate or
1383 retain whenever sbrk is called. It is used in two ways internally:
1384
1385 * When sbrk is called to extend the top of the arena to satisfy
1386 a new malloc request, this much padding is added to the sbrk
1387 request.
1388
1389 * When malloc_trim is called automatically from free(),
1390 it is used as the `pad' argument.
1391
1392 In both cases, the actual amount of padding is rounded
1393 so that the end of the arena is always a system page boundary.
1394
1395 The main reason for using padding is to avoid calling sbrk so
1396 often. Having even a small pad greatly reduces the likelihood
1397 that nearly every malloc request during program start-up (or
1398 after trimming) will invoke sbrk, which needlessly wastes
1399 time.
1400
1401 Automatic rounding-up to page-size units is normally sufficient
1402 to avoid measurable overhead, so the default is 0. However, in
1403 systems where sbrk is relatively slow, it can pay to increase
1404 this value, at the expense of carrying around more memory than
1405 the program needs.
1406 */
1407
1408 #define M_TOP_PAD -2
1409
1410 #ifndef DEFAULT_TOP_PAD
1411 #define DEFAULT_TOP_PAD (0)
1412 #endif
1413
1414 /*
1415 MMAP_THRESHOLD_MAX and _MIN are the bounds on the dynamically
1416 adjusted MMAP_THRESHOLD.
1417 */
1418
1419 #ifndef DEFAULT_MMAP_THRESHOLD_MIN
1420 #define DEFAULT_MMAP_THRESHOLD_MIN (128 * 1024)
1421 #endif
1422
1423 #ifndef DEFAULT_MMAP_THRESHOLD_MAX
1424 /* For 32-bit platforms we cannot increase the maximum mmap
1425 threshold much because it is also the minimum value for the
1426 maximum heap size and its alignment. Going above 512k (i.e., 1M
1427 for new heaps) wastes too much address space. */
1428 # if __WORDSIZE == 32
1429 # define DEFAULT_MMAP_THRESHOLD_MAX (512 * 1024)
1430 # else
1431 # define DEFAULT_MMAP_THRESHOLD_MAX (4 * 1024 * 1024 * sizeof(long))
1432 # endif
1433 #endif
1434
1435 /*
1436 M_MMAP_THRESHOLD is the request size threshold for using mmap()
1437 to service a request. Requests of at least this size that cannot
1438 be allocated using already-existing space will be serviced via mmap.
1439 (If enough normal freed space already exists it is used instead.)
1440
1441 Using mmap segregates relatively large chunks of memory so that
1442 they can be individually obtained and released from the host
1443 system. A request serviced through mmap is never reused by any
1444 other request (at least not directly; the system may just so
1445 happen to remap successive requests to the same locations).
1446
1447 Segregating space in this way has the benefits that:
1448
1449 1. Mmapped space can ALWAYS be individually released back
1450 to the system, which helps keep the system level memory
1451 demands of a long-lived program low.
1452 2. Mapped memory can never become `locked' between
1453 other chunks, as can happen with normally allocated chunks, which
1454 means that even trimming via malloc_trim would not release them.
1455 3. On some systems with "holes" in address spaces, mmap can obtain
1456 memory that sbrk cannot.
1457
1458 However, it has the disadvantages that:
1459
1460 1. The space cannot be reclaimed, consolidated, and then
1461 used to service later requests, as happens with normal chunks.
1462 2. It can lead to more wastage because of mmap page alignment
1463 requirements
1464 3. It causes malloc performance to be more dependent on host
1465 system memory management support routines which may vary in
1466 implementation quality and may impose arbitrary
1467 limitations. Generally, servicing a request via normal
1468 malloc steps is faster than going through a system's mmap.
1469
1470 The advantages of mmap nearly always outweigh disadvantages for
1471 "large" chunks, but the value of "large" varies across systems. The
1472 default is an empirically derived value that works well in most
1473 systems.
1474
1475
1476 Update in 2006:
1477 The above was written in 2001. Since then the world has changed a lot.
1478 Memory got bigger. Applications got bigger. The virtual address space
1479 layout in 32 bit linux changed.
1480
1481 In the new situation, brk() and mmap space is shared and there are no
1482 artificial limits on brk size imposed by the kernel. What is more,
1483 applications have started using transient allocations larger than the
1484 128Kb as was imagined in 2001.
1485
1486 The price for mmap is also high now; each time glibc mmaps from the
1487 kernel, the kernel is forced to zero out the memory it gives to the
1488 application. Zeroing memory is expensive and eats a lot of cache and
1489 memory bandwidth. This has nothing to do with the efficiency of the
1490 virtual memory system, by doing mmap the kernel just has no choice but
1491 to zero.
1492
1493 In 2001, the kernel had a maximum size for brk() which was about 800
1494 megabytes on 32 bit x86, at that point brk() would hit the first
1495 mmaped shared libaries and couldn't expand anymore. With current 2.6
1496 kernels, the VA space layout is different and brk() and mmap
1497 both can span the entire heap at will.
1498
1499 Rather than using a static threshold for the brk/mmap tradeoff,
1500 we are now using a simple dynamic one. The goal is still to avoid
1501 fragmentation. The old goals we kept are
1502 1) try to get the long lived large allocations to use mmap()
1503 2) really large allocations should always use mmap()
1504 and we're adding now:
1505 3) transient allocations should use brk() to avoid forcing the kernel
1506 having to zero memory over and over again
1507
1508 The implementation works with a sliding threshold, which is by default
1509 limited to go between 128Kb and 32Mb (64Mb for 64 bitmachines) and starts
1510 out at 128Kb as per the 2001 default.
1511
1512 This allows us to satisfy requirement 1) under the assumption that long
1513 lived allocations are made early in the process' lifespan, before it has
1514 started doing dynamic allocations of the same size (which will
1515 increase the threshold).
1516
1517 The upperbound on the threshold satisfies requirement 2)
1518
1519 The threshold goes up in value when the application frees memory that was
1520 allocated with the mmap allocator. The idea is that once the application
1521 starts freeing memory of a certain size, it's highly probable that this is
1522 a size the application uses for transient allocations. This estimator
1523 is there to satisfy the new third requirement.
1524
1525 */
1526
1527 #define M_MMAP_THRESHOLD -3
1528
1529 #ifndef DEFAULT_MMAP_THRESHOLD
1530 #define DEFAULT_MMAP_THRESHOLD DEFAULT_MMAP_THRESHOLD_MIN
1531 #endif
1532
1533 /*
1534 M_MMAP_MAX is the maximum number of requests to simultaneously
1535 service using mmap. This parameter exists because
1536 some systems have a limited number of internal tables for
1537 use by mmap, and using more than a few of them may degrade
1538 performance.
1539
1540 The default is set to a value that serves only as a safeguard.
1541 Setting to 0 disables use of mmap for servicing large requests. If
1542 HAVE_MMAP is not set, the default value is 0, and attempts to set it
1543 to non-zero values in mallopt will fail.
1544 */
1545
1546 #define M_MMAP_MAX -4
1547
1548 #ifndef DEFAULT_MMAP_MAX
1549 #if HAVE_MMAP
1550 #define DEFAULT_MMAP_MAX (65536)
1551 #else
1552 #define DEFAULT_MMAP_MAX (0)
1553 #endif
1554 #endif
1555
1556 #ifdef __cplusplus
1557 } /* end of extern "C" */
1558 #endif
1559
1560 #include <malloc.h>
1561
1562 #ifndef BOUNDED_N
1563 #define BOUNDED_N(ptr, sz) (ptr)
1564 #endif
1565 #ifndef RETURN_ADDRESS
1566 #define RETURN_ADDRESS(X_) (NULL)
1567 #endif
1568
1569 /* On some platforms we can compile internal, not exported functions better.
1570 Let the environment provide a macro and define it to be empty if it
1571 is not available. */
1572 #ifndef internal_function
1573 # define internal_function
1574 #endif
1575
1576 /* Forward declarations. */
1577 struct malloc_chunk;
1578 typedef struct malloc_chunk* mchunkptr;
1579
1580 /* Internal routines. */
1581
1582 #if __STD_C
1583
1584 Void_t* _int_malloc(mstate, size_t);
1585 void _int_free(mstate, Void_t*);
1586 Void_t* _int_realloc(mstate, Void_t*, size_t);
1587 Void_t* _int_memalign(mstate, size_t, size_t);
1588 Void_t* _int_valloc(mstate, size_t);
1589 static Void_t* _int_pvalloc(mstate, size_t);
1590 /*static Void_t* cALLOc(size_t, size_t);*/
1591 #ifndef _LIBC
1592 static Void_t** _int_icalloc(mstate, size_t, size_t, Void_t**);
1593 static Void_t** _int_icomalloc(mstate, size_t, size_t*, Void_t**);
1594 #endif
1595 static int mTRIm(size_t);
1596 static size_t mUSABLe(Void_t*);
1597 static void mSTATs(void);
1598 static int mALLOPt(int, int);
1599 static struct mallinfo mALLINFo(mstate);
1600 static void malloc_printerr(int action, const char *str, void *ptr);
1601
1602 static Void_t* internal_function mem2mem_check(Void_t *p, size_t sz);
1603 static int internal_function top_check(void);
1604 static void internal_function munmap_chunk(mchunkptr p);
1605 #if HAVE_MREMAP
1606 static mchunkptr internal_function mremap_chunk(mchunkptr p, size_t new_size);
1607 #endif
1608
1609 static Void_t* malloc_check(size_t sz, const Void_t *caller);
1610 static void free_check(Void_t* mem, const Void_t *caller);
1611 static Void_t* realloc_check(Void_t* oldmem, size_t bytes,
1612 const Void_t *caller);
1613 static Void_t* memalign_check(size_t alignment, size_t bytes,
1614 const Void_t *caller);
1615 #ifndef NO_THREADS
1616 # ifdef _LIBC
1617 # if USE___THREAD || !defined SHARED
1618 /* These routines are never needed in this configuration. */
1619 # define NO_STARTER
1620 # endif
1621 # endif
1622 # ifdef NO_STARTER
1623 # undef NO_STARTER
1624 # else
1625 static Void_t* malloc_starter(size_t sz, const Void_t *caller);
1626 static Void_t* memalign_starter(size_t aln, size_t sz, const Void_t *caller);
1627 static void free_starter(Void_t* mem, const Void_t *caller);
1628 # endif
1629 static Void_t* malloc_atfork(size_t sz, const Void_t *caller);
1630 static void free_atfork(Void_t* mem, const Void_t *caller);
1631 #endif
1632
1633 #else
1634
1635 Void_t* _int_malloc();
1636 void _int_free();
1637 Void_t* _int_realloc();
1638 Void_t* _int_memalign();
1639 Void_t* _int_valloc();
1640 Void_t* _int_pvalloc();
1641 /*static Void_t* cALLOc();*/
1642 static Void_t** _int_icalloc();
1643 static Void_t** _int_icomalloc();
1644 static int mTRIm();
1645 static size_t mUSABLe();
1646 static void mSTATs();
1647 static int mALLOPt();
1648 static struct mallinfo mALLINFo();
1649
1650 #endif
1651
1652
1653
1654
1655 /* ------------- Optional versions of memcopy ---------------- */
1656
1657
1658 #if USE_MEMCPY
1659
1660 /*
1661 Note: memcpy is ONLY invoked with non-overlapping regions,
1662 so the (usually slower) memmove is not needed.
1663 */
1664
1665 #define MALLOC_COPY(dest, src, nbytes) memcpy(dest, src, nbytes)
1666 #define MALLOC_ZERO(dest, nbytes) memset(dest, 0, nbytes)
1667
1668 #else /* !USE_MEMCPY */
1669
1670 /* Use Duff's device for good zeroing/copying performance. */
1671
1672 #define MALLOC_ZERO(charp, nbytes) \
1673 do { \
1674 INTERNAL_SIZE_T* mzp = (INTERNAL_SIZE_T*)(charp); \
1675 unsigned long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T); \
1676 long mcn; \
1677 if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \
1678 switch (mctmp) { \
1679 case 0: for(;;) { *mzp++ = 0; \
1680 case 7: *mzp++ = 0; \
1681 case 6: *mzp++ = 0; \
1682 case 5: *mzp++ = 0; \
1683 case 4: *mzp++ = 0; \
1684 case 3: *mzp++ = 0; \
1685 case 2: *mzp++ = 0; \
1686 case 1: *mzp++ = 0; if(mcn <= 0) break; mcn--; } \
1687 } \
1688 } while(0)
1689
1690 #define MALLOC_COPY(dest,src,nbytes) \
1691 do { \
1692 INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) src; \
1693 INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) dest; \
1694 unsigned long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T); \
1695 long mcn; \
1696 if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \
1697 switch (mctmp) { \
1698 case 0: for(;;) { *mcdst++ = *mcsrc++; \
1699 case 7: *mcdst++ = *mcsrc++; \
1700 case 6: *mcdst++ = *mcsrc++; \
1701 case 5: *mcdst++ = *mcsrc++; \
1702 case 4: *mcdst++ = *mcsrc++; \
1703 case 3: *mcdst++ = *mcsrc++; \
1704 case 2: *mcdst++ = *mcsrc++; \
1705 case 1: *mcdst++ = *mcsrc++; if(mcn <= 0) break; mcn--; } \
1706 } \
1707 } while(0)
1708
1709 #endif
1710
1711 /* ------------------ MMAP support ------------------ */
1712
1713
1714 #if HAVE_MMAP
1715
1716 #include <fcntl.h>
1717 #ifndef LACKS_SYS_MMAN_H
1718 #include <sys/mman.h>
1719 #endif
1720
1721 #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
1722 # define MAP_ANONYMOUS MAP_ANON
1723 #endif
1724 #if !defined(MAP_FAILED)
1725 # define MAP_FAILED ((char*)-1)
1726 #endif
1727
1728 #ifndef MAP_NORESERVE
1729 # ifdef MAP_AUTORESRV
1730 # define MAP_NORESERVE MAP_AUTORESRV
1731 # else
1732 # define MAP_NORESERVE 0
1733 # endif
1734 #endif
1735
1736 /*
1737 Nearly all versions of mmap support MAP_ANONYMOUS,
1738 so the following is unlikely to be needed, but is
1739 supplied just in case.
1740 */
1741
1742 #ifndef MAP_ANONYMOUS
1743
1744 static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */
1745
1746 #define MMAP(addr, size, prot, flags) ((dev_zero_fd < 0) ? \
1747 (dev_zero_fd = open("/dev/zero", O_RDWR), \
1748 mmap((addr), (size), (prot), (flags), dev_zero_fd, 0)) : \
1749 mmap((addr), (size), (prot), (flags), dev_zero_fd, 0))
1750
1751 #else
1752
1753 #define MMAP(addr, size, prot, flags) \
1754 (mmap((addr), (size), (prot), (flags)|MAP_ANONYMOUS, -1, 0))
1755
1756 #endif
1757
1758
1759 #endif /* HAVE_MMAP */
1760
1761
1762 /*
1763 ----------------------- Chunk representations -----------------------
1764 */
1765
1766
1767 /*
1768 This struct declaration is misleading (but accurate and necessary).
1769 It declares a "view" into memory allowing access to necessary
1770 fields at known offsets from a given base. See explanation below.
1771 */
1772
1773 struct malloc_chunk {
1774
1775 INTERNAL_SIZE_T prev_size; /* Size of previous chunk (if free). */
1776 INTERNAL_SIZE_T size; /* Size in bytes, including overhead. */
1777
1778 struct malloc_chunk* fd; /* double links -- used only if free. */
1779 struct malloc_chunk* bk;
1780
1781 /* Only used for large blocks: pointer to next larger size. */
1782 struct malloc_chunk* fd_nextsize; /* double links -- used only if free. */
1783 struct malloc_chunk* bk_nextsize;
1784 };
1785
1786
1787 /*
1788 malloc_chunk details:
1789
1790 (The following includes lightly edited explanations by Colin Plumb.)
1791
1792 Chunks of memory are maintained using a `boundary tag' method as
1793 described in e.g., Knuth or Standish. (See the paper by Paul
1794 Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a
1795 survey of such techniques.) Sizes of free chunks are stored both
1796 in the front of each chunk and at the end. This makes
1797 consolidating fragmented chunks into bigger chunks very fast. The
1798 size fields also hold bits representing whether chunks are free or
1799 in use.
1800
1801 An allocated chunk looks like this:
1802
1803
1804 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1805 | Size of previous chunk, if allocated | |
1806 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1807 | Size of chunk, in bytes |M|P|
1808 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1809 | User data starts here... .
1810 . .
1811 . (malloc_usable_size() bytes) .
1812 . |
1813 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1814 | Size of chunk |
1815 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1816
1817
1818 Where "chunk" is the front of the chunk for the purpose of most of
1819 the malloc code, but "mem" is the pointer that is returned to the
1820 user. "Nextchunk" is the beginning of the next contiguous chunk.
1821
1822 Chunks always begin on even word boundries, so the mem portion
1823 (which is returned to the user) is also on an even word boundary, and
1824 thus at least double-word aligned.
1825
1826 Free chunks are stored in circular doubly-linked lists, and look like this:
1827
1828 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1829 | Size of previous chunk |
1830 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1831 `head:' | Size of chunk, in bytes |P|
1832 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1833 | Forward pointer to next chunk in list |
1834 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1835 | Back pointer to previous chunk in list |
1836 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1837 | Unused space (may be 0 bytes long) .
1838 . .
1839 . |
1840 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1841 `foot:' | Size of chunk, in bytes |
1842 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1843
1844 The P (PREV_INUSE) bit, stored in the unused low-order bit of the
1845 chunk size (which is always a multiple of two words), is an in-use
1846 bit for the *previous* chunk. If that bit is *clear*, then the
1847 word before the current chunk size contains the previous chunk
1848 size, and can be used to find the front of the previous chunk.
1849 The very first chunk allocated always has this bit set,
1850 preventing access to non-existent (or non-owned) memory. If
1851 prev_inuse is set for any given chunk, then you CANNOT determine
1852 the size of the previous chunk, and might even get a memory
1853 addressing fault when trying to do so.
1854
1855 Note that the `foot' of the current chunk is actually represented
1856 as the prev_size of the NEXT chunk. This makes it easier to
1857 deal with alignments etc but can be very confusing when trying
1858 to extend or adapt this code.
1859
1860 The two exceptions to all this are
1861
1862 1. The special chunk `top' doesn't bother using the
1863 trailing size field since there is no next contiguous chunk
1864 that would have to index off it. After initialization, `top'
1865 is forced to always exist. If it would become less than
1866 MINSIZE bytes long, it is replenished.
1867
1868 2. Chunks allocated via mmap, which have the second-lowest-order
1869 bit M (IS_MMAPPED) set in their size fields. Because they are
1870 allocated one-by-one, each must contain its own trailing size field.
1871
1872 */
1873
1874 /*
1875 ---------- Size and alignment checks and conversions ----------
1876 */
1877
1878 /* conversion from malloc headers to user pointers, and back */
1879
1880 #define chunk2mem(p) ((Void_t*)((char*)(p) + 2*SIZE_SZ))
1881 #define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ))
1882
1883 /* The smallest possible chunk */
1884 #define MIN_CHUNK_SIZE (offsetof(struct malloc_chunk, fd_nextsize))
1885
1886 /* The smallest size we can malloc is an aligned minimal chunk */
1887
1888 #define MINSIZE \
1889 (unsigned long)(((MIN_CHUNK_SIZE+MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK))
1890
1891 /* Check if m has acceptable alignment */
1892
1893 #define aligned_OK(m) (((unsigned long)(m) & MALLOC_ALIGN_MASK) == 0)
1894
1895 #define misaligned_chunk(p) \
1896 ((uintptr_t)(MALLOC_ALIGNMENT == 2 * SIZE_SZ ? (p) : chunk2mem (p)) \
1897 & MALLOC_ALIGN_MASK)
1898
1899
1900 /*
1901 Check if a request is so large that it would wrap around zero when
1902 padded and aligned. To simplify some other code, the bound is made
1903 low enough so that adding MINSIZE will also not wrap around zero.
1904 */
1905
1906 #define REQUEST_OUT_OF_RANGE(req) \
1907 ((unsigned long)(req) >= \
1908 (unsigned long)(INTERNAL_SIZE_T)(-2 * MINSIZE))
1909
1910 /* pad request bytes into a usable size -- internal version */
1911
1912 #define request2size(req) \
1913 (((req) + SIZE_SZ + MALLOC_ALIGN_MASK < MINSIZE) ? \
1914 MINSIZE : \
1915 ((req) + SIZE_SZ + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)
1916
1917 /* Same, except also perform argument check */
1918
1919 #define checked_request2size(req, sz) \
1920 if (REQUEST_OUT_OF_RANGE(req)) { \
1921 MALLOC_FAILURE_ACTION; \
1922 return 0; \
1923 } \
1924 (sz) = request2size(req);
1925
1926 /*
1927 --------------- Physical chunk operations ---------------
1928 */
1929
1930
1931 /* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
1932 #define PREV_INUSE 0x1
1933
1934 /* extract inuse bit of previous chunk */
1935 #define prev_inuse(p) ((p)->size & PREV_INUSE)
1936
1937
1938 /* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
1939 #define IS_MMAPPED 0x2
1940
1941 /* check for mmap()'ed chunk */
1942 #define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED)
1943
1944
1945 /* size field is or'ed with NON_MAIN_ARENA if the chunk was obtained
1946 from a non-main arena. This is only set immediately before handing
1947 the chunk to the user, if necessary. */
1948 #define NON_MAIN_ARENA 0x4
1949
1950 /* check for chunk from non-main arena */
1951 #define chunk_non_main_arena(p) ((p)->size & NON_MAIN_ARENA)
1952
1953
1954 /*
1955 Bits to mask off when extracting size
1956
1957 Note: IS_MMAPPED is intentionally not masked off from size field in
1958 macros for which mmapped chunks should never be seen. This should
1959 cause helpful core dumps to occur if it is tried by accident by
1960 people extending or adapting this malloc.
1961 */
1962 #define SIZE_BITS (PREV_INUSE|IS_MMAPPED|NON_MAIN_ARENA)
1963
1964 /* Get size, ignoring use bits */
1965 #define chunksize(p) ((p)->size & ~(SIZE_BITS))
1966
1967
1968 /* Ptr to next physical malloc_chunk. */
1969 #define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & ~SIZE_BITS) ))
1970
1971 /* Ptr to previous physical malloc_chunk */
1972 #define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_size) ))
1973
1974 /* Treat space at ptr + offset as a chunk */
1975 #define chunk_at_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
1976
1977 /* extract p's inuse bit */
1978 #define inuse(p)\
1979 ((((mchunkptr)(((char*)(p))+((p)->size & ~SIZE_BITS)))->size) & PREV_INUSE)
1980
1981 /* set/clear chunk as being inuse without otherwise disturbing */
1982 #define set_inuse(p)\
1983 ((mchunkptr)(((char*)(p)) + ((p)->size & ~SIZE_BITS)))->size |= PREV_INUSE
1984
1985 #define clear_inuse(p)\
1986 ((mchunkptr)(((char*)(p)) + ((p)->size & ~SIZE_BITS)))->size &= ~(PREV_INUSE)
1987
1988
1989 /* check/set/clear inuse bits in known places */
1990 #define inuse_bit_at_offset(p, s)\
1991 (((mchunkptr)(((char*)(p)) + (s)))->size & PREV_INUSE)
1992
1993 #define set_inuse_bit_at_offset(p, s)\
1994 (((mchunkptr)(((char*)(p)) + (s)))->size |= PREV_INUSE)
1995
1996 #define clear_inuse_bit_at_offset(p, s)\
1997 (((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE))
1998
1999
2000 /* Set size at head, without disturbing its use bit */
2001 #define set_head_size(p, s) ((p)->size = (((p)->size & SIZE_BITS) | (s)))
2002
2003 /* Set size/use field */
2004 #define set_head(p, s) ((p)->size = (s))
2005
2006 /* Set size at footer (only when chunk is not in use) */
2007 #define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_size = (s))
2008
2009
2010 /*
2011 -------------------- Internal data structures --------------------
2012
2013 All internal state is held in an instance of malloc_state defined
2014 below. There are no other static variables, except in two optional
2015 cases:
2016 * If USE_MALLOC_LOCK is defined, the mALLOC_MUTEx declared above.
2017 * If HAVE_MMAP is true, but mmap doesn't support
2018 MAP_ANONYMOUS, a dummy file descriptor for mmap.
2019
2020 Beware of lots of tricks that minimize the total bookkeeping space
2021 requirements. The result is a little over 1K bytes (for 4byte
2022 pointers and size_t.)
2023 */
2024
2025 /*
2026 Bins
2027
2028 An array of bin headers for free chunks. Each bin is doubly
2029 linked. The bins are approximately proportionally (log) spaced.
2030 There are a lot of these bins (128). This may look excessive, but
2031 works very well in practice. Most bins hold sizes that are
2032 unusual as malloc request sizes, but are more usual for fragments
2033 and consolidated sets of chunks, which is what these bins hold, so
2034 they can be found quickly. All procedures maintain the invariant
2035 that no consolidated chunk physically borders another one, so each
2036 chunk in a list is known to be preceeded and followed by either
2037 inuse chunks or the ends of memory.
2038
2039 Chunks in bins are kept in size order, with ties going to the
2040 approximately least recently used chunk. Ordering isn't needed
2041 for the small bins, which all contain the same-sized chunks, but
2042 facilitates best-fit allocation for larger chunks. These lists
2043 are just sequential. Keeping them in order almost never requires
2044 enough traversal to warrant using fancier ordered data
2045 structures.
2046
2047 Chunks of the same size are linked with the most
2048 recently freed at the front, and allocations are taken from the
2049 back. This results in LRU (FIFO) allocation order, which tends
2050 to give each chunk an equal opportunity to be consolidated with
2051 adjacent freed chunks, resulting in larger free chunks and less
2052 fragmentation.
2053
2054 To simplify use in double-linked lists, each bin header acts
2055 as a malloc_chunk. This avoids special-casing for headers.
2056 But to conserve space and improve locality, we allocate
2057 only the fd/bk pointers of bins, and then use repositioning tricks
2058 to treat these as the fields of a malloc_chunk*.
2059 */
2060
2061 typedef struct malloc_chunk* mbinptr;
2062
2063 /* addressing -- note that bin_at(0) does not exist */
2064 #define bin_at(m, i) \
2065 (mbinptr) (((char *) &((m)->bins[((i) - 1) * 2])) \
2066 - offsetof (struct malloc_chunk, fd))
2067
2068 /* analog of ++bin */
2069 #define next_bin(b) ((mbinptr)((char*)(b) + (sizeof(mchunkptr)<<1)))
2070
2071 /* Reminders about list directionality within bins */
2072 #define first(b) ((b)->fd)
2073 #define last(b) ((b)->bk)
2074
2075 /* Take a chunk off a bin list */
2076 #define unlink(P, BK, FD) { \
2077 FD = P->fd; \
2078 BK = P->bk; \
2079 if (__builtin_expect (FD->bk != P || BK->fd != P, 0)) \
2080 malloc_printerr (check_action, "corrupted double-linked list", P); \
2081 else { \
2082 FD->bk = BK; \
2083 BK->fd = FD; \
2084 if (!in_smallbin_range (P->size) \
2085 && __builtin_expect (P->fd_nextsize != NULL, 0)) { \
2086 assert (P->fd_nextsize->bk_nextsize == P); \
2087 assert (P->bk_nextsize->fd_nextsize == P); \
2088 if (FD->fd_nextsize == NULL) { \
2089 if (P->fd_nextsize == P) \
2090 FD->fd_nextsize = FD->bk_nextsize = FD; \
2091 else { \
2092 FD->fd_nextsize = P->fd_nextsize; \
2093 FD->bk_nextsize = P->bk_nextsize; \
2094 P->fd_nextsize->bk_nextsize = FD; \
2095 P->bk_nextsize->fd_nextsize = FD; \
2096 } \
2097 } else { \
2098 P->fd_nextsize->bk_nextsize = P->bk_nextsize; \
2099 P->bk_nextsize->fd_nextsize = P->fd_nextsize; \
2100 } \
2101 } \
2102 } \
2103 }
2104
2105 /*
2106 Indexing
2107
2108 Bins for sizes < 512 bytes contain chunks of all the same size, spaced
2109 8 bytes apart. Larger bins are approximately logarithmically spaced:
2110
2111 64 bins of size 8
2112 32 bins of size 64
2113 16 bins of size 512
2114 8 bins of size 4096
2115 4 bins of size 32768
2116 2 bins of size 262144
2117 1 bin of size what's left
2118
2119 There is actually a little bit of slop in the numbers in bin_index
2120 for the sake of speed. This makes no difference elsewhere.
2121
2122 The bins top out around 1MB because we expect to service large
2123 requests via mmap.
2124 */
2125
2126 #define NBINS 128
2127 #define NSMALLBINS 64
2128 #define SMALLBIN_WIDTH MALLOC_ALIGNMENT
2129 #define MIN_LARGE_SIZE (NSMALLBINS * SMALLBIN_WIDTH)
2130
2131 #define in_smallbin_range(sz) \
2132 ((unsigned long)(sz) < (unsigned long)MIN_LARGE_SIZE)
2133
2134 #define smallbin_index(sz) \
2135 (SMALLBIN_WIDTH == 16 ? (((unsigned)(sz)) >> 4) : (((unsigned)(sz)) >> 3))
2136
2137 #define largebin_index_32(sz) \
2138 (((((unsigned long)(sz)) >> 6) <= 38)? 56 + (((unsigned long)(sz)) >> 6): \
2139 ((((unsigned long)(sz)) >> 9) <= 20)? 91 + (((unsigned long)(sz)) >> 9): \
2140 ((((unsigned long)(sz)) >> 12) <= 10)? 110 + (((unsigned long)(sz)) >> 12): \
2141 ((((unsigned long)(sz)) >> 15) <= 4)? 119 + (((unsigned long)(sz)) >> 15): \
2142 ((((unsigned long)(sz)) >> 18) <= 2)? 124 + (((unsigned long)(sz)) >> 18): \
2143 126)
2144
2145 // XXX It remains to be seen whether it is good to keep the widths of
2146 // XXX the buckets the same or whether it should be scaled by a factor
2147 // XXX of two as well.
2148 #define largebin_index_64(sz) \
2149 (((((unsigned long)(sz)) >> 6) <= 48)? 48 + (((unsigned long)(sz)) >> 6): \
2150 ((((unsigned long)(sz)) >> 9) <= 20)? 91 + (((unsigned long)(sz)) >> 9): \
2151 ((((unsigned long)(sz)) >> 12) <= 10)? 110 + (((unsigned long)(sz)) >> 12): \
2152 ((((unsigned long)(sz)) >> 15) <= 4)? 119 + (((unsigned long)(sz)) >> 15): \
2153 ((((unsigned long)(sz)) >> 18) <= 2)? 124 + (((unsigned long)(sz)) >> 18): \
2154 126)
2155
2156 #define largebin_index(sz) \
2157 (SIZE_SZ == 8 ? largebin_index_64 (sz) : largebin_index_32 (sz))
2158
2159 #define bin_index(sz) \
2160 ((in_smallbin_range(sz)) ? smallbin_index(sz) : largebin_index(sz))
2161
2162
2163 /*
2164 Unsorted chunks
2165
2166 All remainders from chunk splits, as well as all returned chunks,
2167 are first placed in the "unsorted" bin. They are then placed
2168 in regular bins after malloc gives them ONE chance to be used before
2169 binning. So, basically, the unsorted_chunks list acts as a queue,
2170 with chunks being placed on it in free (and malloc_consolidate),
2171 and taken off (to be either used or placed in bins) in malloc.
2172
2173 The NON_MAIN_ARENA flag is never set for unsorted chunks, so it
2174 does not have to be taken into account in size comparisons.
2175 */
2176
2177 /* The otherwise unindexable 1-bin is used to hold unsorted chunks. */
2178 #define unsorted_chunks(M) (bin_at(M, 1))
2179
2180 /*
2181 Top
2182
2183 The top-most available chunk (i.e., the one bordering the end of
2184 available memory) is treated specially. It is never included in
2185 any bin, is used only if no other chunk is available, and is
2186 released back to the system if it is very large (see
2187 M_TRIM_THRESHOLD). Because top initially
2188 points to its own bin with initial zero size, thus forcing
2189 extension on the first malloc request, we avoid having any special
2190 code in malloc to check whether it even exists yet. But we still
2191 need to do so when getting memory from system, so we make
2192 initial_top treat the bin as a legal but unusable chunk during the
2193 interval between initialization and the first call to
2194 sYSMALLOc. (This is somewhat delicate, since it relies on
2195 the 2 preceding words to be zero during this interval as well.)
2196 */
2197
2198 /* Conveniently, the unsorted bin can be used as dummy top on first call */
2199 #define initial_top(M) (unsorted_chunks(M))
2200
2201 /*
2202 Binmap
2203
2204 To help compensate for the large number of bins, a one-level index
2205 structure is used for bin-by-bin searching. `binmap' is a
2206 bitvector recording whether bins are definitely empty so they can
2207 be skipped over during during traversals. The bits are NOT always
2208 cleared as soon as bins are empty, but instead only
2209 when they are noticed to be empty during traversal in malloc.
2210 */
2211
2212 /* Conservatively use 32 bits per map word, even if on 64bit system */
2213 #define BINMAPSHIFT 5
2214 #define BITSPERMAP (1U << BINMAPSHIFT)
2215 #define BINMAPSIZE (NBINS / BITSPERMAP)
2216
2217 #define idx2block(i) ((i) >> BINMAPSHIFT)
2218 #define idx2bit(i) ((1U << ((i) & ((1U << BINMAPSHIFT)-1))))
2219
2220 #define mark_bin(m,i) ((m)->binmap[idx2block(i)] |= idx2bit(i))
2221 #define unmark_bin(m,i) ((m)->binmap[idx2block(i)] &= ~(idx2bit(i)))
2222 #define get_binmap(m,i) ((m)->binmap[idx2block(i)] & idx2bit(i))
2223
2224 /*
2225 Fastbins
2226
2227 An array of lists holding recently freed small chunks. Fastbins
2228 are not doubly linked. It is faster to single-link them, and
2229 since chunks are never removed from the middles of these lists,
2230 double linking is not necessary. Also, unlike regular bins, they
2231 are not even processed in FIFO order (they use faster LIFO) since
2232 ordering doesn't much matter in the transient contexts in which
2233 fastbins are normally used.
2234
2235 Chunks in fastbins keep their inuse bit set, so they cannot
2236 be consolidated with other free chunks. malloc_consolidate
2237 releases all chunks in fastbins and consolidates them with
2238 other free chunks.
2239 */
2240
2241 typedef struct malloc_chunk* mfastbinptr;
2242
2243 /* offset 2 to use otherwise unindexable first 2 bins */
2244 #define fastbin_index(sz) ((((unsigned int)(sz)) >> 3) - 2)
2245
2246 /* The maximum fastbin request size we support */
2247 #define MAX_FAST_SIZE 80
2248
2249 #define NFASTBINS (fastbin_index(request2size(MAX_FAST_SIZE))+1)
2250
2251 /*
2252 FASTBIN_CONSOLIDATION_THRESHOLD is the size of a chunk in free()
2253 that triggers automatic consolidation of possibly-surrounding
2254 fastbin chunks. This is a heuristic, so the exact value should not
2255 matter too much. It is defined at half the default trim threshold as a
2256 compromise heuristic to only attempt consolidation if it is likely
2257 to lead to trimming. However, it is not dynamically tunable, since
2258 consolidation reduces fragmentation surrounding large chunks even
2259 if trimming is not used.
2260 */
2261
2262 #define FASTBIN_CONSOLIDATION_THRESHOLD (65536UL)
2263
2264 /*
2265 Since the lowest 2 bits in max_fast don't matter in size comparisons,
2266 they are used as flags.
2267 */
2268
2269 /*
2270 FASTCHUNKS_BIT held in max_fast indicates that there are probably
2271 some fastbin chunks. It is set true on entering a chunk into any
2272 fastbin, and cleared only in malloc_consolidate.
2273
2274 The truth value is inverted so that have_fastchunks will be true
2275 upon startup (since statics are zero-filled), simplifying
2276 initialization checks.
2277 */
2278
2279 #define FASTCHUNKS_BIT (1U)
2280
2281 #define have_fastchunks(M) (((M)->flags & FASTCHUNKS_BIT) == 0)
2282 #define clear_fastchunks(M) ((M)->flags |= FASTCHUNKS_BIT)
2283 #define set_fastchunks(M) ((M)->flags &= ~FASTCHUNKS_BIT)
2284
2285 /*
2286 NONCONTIGUOUS_BIT indicates that MORECORE does not return contiguous
2287 regions. Otherwise, contiguity is exploited in merging together,
2288 when possible, results from consecutive MORECORE calls.
2289
2290 The initial value comes from MORECORE_CONTIGUOUS, but is
2291 changed dynamically if mmap is ever used as an sbrk substitute.
2292 */
2293
2294 #define NONCONTIGUOUS_BIT (2U)
2295
2296 #define contiguous(M) (((M)->flags & NONCONTIGUOUS_BIT) == 0)
2297 #define noncontiguous(M) (((M)->flags & NONCONTIGUOUS_BIT) != 0)
2298 #define set_noncontiguous(M) ((M)->flags |= NONCONTIGUOUS_BIT)
2299 #define set_contiguous(M) ((M)->flags &= ~NONCONTIGUOUS_BIT)
2300
2301 /*
2302 Set value of max_fast.
2303 Use impossibly small value if 0.
2304 Precondition: there are no existing fastbin chunks.
2305 Setting the value clears fastchunk bit but preserves noncontiguous bit.
2306 */
2307
2308 #define set_max_fast(s) \
2309 global_max_fast = ((s) == 0)? SMALLBIN_WIDTH: request2size(s)
2310 #define get_max_fast() global_max_fast
2311
2312
2313 /*
2314 ----------- Internal state representation and initialization -----------
2315 */
2316
2317 struct malloc_state {
2318 /* Serialize access. */
2319 mutex_t mutex;
2320
2321 /* Flags (formerly in max_fast). */
2322 int flags;
2323
2324 #if THREAD_STATS
2325 /* Statistics for locking. Only used if THREAD_STATS is defined. */
2326 long stat_lock_direct, stat_lock_loop, stat_lock_wait;
2327 #endif
2328
2329 /* Fastbins */
2330 mfastbinptr fastbins[NFASTBINS];
2331
2332 /* Base of the topmost chunk -- not otherwise kept in a bin */
2333 mchunkptr top;
2334
2335 /* The remainder from the most recent split of a small request */
2336 mchunkptr last_remainder;
2337
2338 /* Normal bins packed as described above */
2339 mchunkptr bins[NBINS * 2 - 2];
2340
2341 /* Bitmap of bins */
2342 unsigned int binmap[BINMAPSIZE];
2343
2344 /* Linked list */
2345 struct malloc_state *next;
2346
2347 /* Memory allocated from the system in this arena. */
2348 INTERNAL_SIZE_T system_mem;
2349 INTERNAL_SIZE_T max_system_mem;
2350 };
2351
2352 struct malloc_par {
2353 /* Tunable parameters */
2354 unsigned long trim_threshold;
2355 INTERNAL_SIZE_T top_pad;
2356 INTERNAL_SIZE_T mmap_threshold;
2357
2358 /* Memory map support */
2359 int n_mmaps;
2360 int n_mmaps_max;
2361 int max_n_mmaps;
2362 /* the mmap_threshold is dynamic, until the user sets
2363 it manually, at which point we need to disable any
2364 dynamic behavior. */
2365 int no_dyn_threshold;
2366
2367 /* Cache malloc_getpagesize */
2368 unsigned int pagesize;
2369
2370 /* Statistics */
2371 INTERNAL_SIZE_T mmapped_mem;
2372 /*INTERNAL_SIZE_T sbrked_mem;*/
2373 /*INTERNAL_SIZE_T max_sbrked_mem;*/
2374 INTERNAL_SIZE_T max_mmapped_mem;
2375 INTERNAL_SIZE_T max_total_mem; /* only kept for NO_THREADS */
2376
2377 /* First address handed out by MORECORE/sbrk. */
2378 char* sbrk_base;
2379 };
2380
2381 /* There are several instances of this struct ("arenas") in this
2382 malloc. If you are adapting this malloc in a way that does NOT use
2383 a static or mmapped malloc_state, you MUST explicitly zero-fill it
2384 before using. This malloc relies on the property that malloc_state
2385 is initialized to all zeroes (as is true of C statics). */
2386
2387 static struct malloc_state main_arena;
2388
2389 /* There is only one instance of the malloc parameters. */
2390
2391 static struct malloc_par mp_;
2392
2393
2394 /* Maximum size of memory handled in fastbins. */
2395 static INTERNAL_SIZE_T global_max_fast;
2396
2397 /*
2398 Initialize a malloc_state struct.
2399
2400 This is called only from within malloc_consolidate, which needs
2401 be called in the same contexts anyway. It is never called directly
2402 outside of malloc_consolidate because some optimizing compilers try
2403 to inline it at all call points, which turns out not to be an
2404 optimization at all. (Inlining it in malloc_consolidate is fine though.)
2405 */
2406
2407 #if __STD_C
2408 static void malloc_init_state(mstate av)
2409 #else
2410 static void malloc_init_state(av) mstate av;
2411 #endif
2412 {
2413 int i;
2414 mbinptr bin;
2415
2416 /* Establish circular links for normal bins */
2417 for (i = 1; i < NBINS; ++i) {
2418 bin = bin_at(av,i);
2419 bin->fd = bin->bk = bin;
2420 }
2421
2422 #if MORECORE_CONTIGUOUS
2423 if (av != &main_arena)
2424 #endif
2425 set_noncontiguous(av);
2426 if (av == &main_arena)
2427 set_max_fast(DEFAULT_MXFAST);
2428 av->flags |= FASTCHUNKS_BIT;
2429
2430 av->top = initial_top(av);
2431 }
2432
2433 /*
2434 Other internal utilities operating on mstates
2435 */
2436
2437 #if __STD_C
2438 static Void_t* sYSMALLOc(INTERNAL_SIZE_T, mstate);
2439 static int sYSTRIm(size_t, mstate);
2440 static void malloc_consolidate(mstate);
2441 #ifndef _LIBC
2442 static Void_t** iALLOc(mstate, size_t, size_t*, int, Void_t**);
2443 #endif
2444 #else
2445 static Void_t* sYSMALLOc();
2446 static int sYSTRIm();
2447 static void malloc_consolidate();
2448 static Void_t** iALLOc();
2449 #endif
2450
2451
2452 /* -------------- Early definitions for debugging hooks ---------------- */
2453
2454 /* Define and initialize the hook variables. These weak definitions must
2455 appear before any use of the variables in a function (arena.c uses one). */
2456 #ifndef weak_variable
2457 #ifndef _LIBC
2458 #define weak_variable /**/
2459 #else
2460 /* In GNU libc we want the hook variables to be weak definitions to
2461 avoid a problem with Emacs. */
2462 #define weak_variable weak_function
2463 #endif
2464 #endif
2465
2466 /* Forward declarations. */
2467 static Void_t* malloc_hook_ini __MALLOC_P ((size_t sz,
2468 const __malloc_ptr_t caller));
2469 static Void_t* realloc_hook_ini __MALLOC_P ((Void_t* ptr, size_t sz,
2470 const __malloc_ptr_t caller));
2471 static Void_t* memalign_hook_ini __MALLOC_P ((size_t alignment, size_t sz,
2472 const __malloc_ptr_t caller));
2473
2474 void weak_variable (*__malloc_initialize_hook) (void) = NULL;
2475 void weak_variable (*__free_hook) (__malloc_ptr_t __ptr,
2476 const __malloc_ptr_t) = NULL;
2477 __malloc_ptr_t weak_variable (*__malloc_hook)
2478 (size_t __size, const __malloc_ptr_t) = malloc_hook_ini;
2479 __malloc_ptr_t weak_variable (*__realloc_hook)
2480 (__malloc_ptr_t __ptr, size_t __size, const __malloc_ptr_t)
2481 = realloc_hook_ini;
2482 __malloc_ptr_t weak_variable (*__memalign_hook)
2483 (size_t __alignment, size_t __size, const __malloc_ptr_t)
2484 = memalign_hook_ini;
2485 void weak_variable (*__after_morecore_hook) (void) = NULL;
2486
2487
2488 /* ---------------- Error behavior ------------------------------------ */
2489
2490 #ifndef DEFAULT_CHECK_ACTION
2491 #define DEFAULT_CHECK_ACTION 3
2492 #endif
2493
2494 static int check_action = DEFAULT_CHECK_ACTION;
2495
2496
2497 /* ------------------ Testing support ----------------------------------*/
2498
2499 static int perturb_byte;
2500
2501 #define alloc_perturb(p, n) memset (p, (perturb_byte ^ 0xff) & 0xff, n)
2502 #define free_perturb(p, n) memset (p, perturb_byte & 0xff, n)
2503
2504
2505 /* ------------------- Support for multiple arenas -------------------- */
2506 #include "arena.c"
2507
2508 /*
2509 Debugging support
2510
2511 These routines make a number of assertions about the states
2512 of data structures that should be true at all times. If any
2513 are not true, it's very likely that a user program has somehow
2514 trashed memory. (It's also possible that there is a coding error
2515 in malloc. In which case, please report it!)
2516 */
2517
2518 #if ! MALLOC_DEBUG
2519
2520 #define check_chunk(A,P)
2521 #define check_free_chunk(A,P)
2522 #define check_inuse_chunk(A,P)
2523 #define check_remalloced_chunk(A,P,N)
2524 #define check_malloced_chunk(A,P,N)
2525 #define check_malloc_state(A)
2526
2527 #else
2528
2529 #define check_chunk(A,P) do_check_chunk(A,P)
2530 #define check_free_chunk(A,P) do_check_free_chunk(A,P)
2531 #define check_inuse_chunk(A,P) do_check_inuse_chunk(A,P)
2532 #define check_remalloced_chunk(A,P,N) do_check_remalloced_chunk(A,P,N)
2533 #define check_malloced_chunk(A,P,N) do_check_malloced_chunk(A,P,N)
2534 #define check_malloc_state(A) do_check_malloc_state(A)
2535
2536 /*
2537 Properties of all chunks
2538 */
2539
2540 #if __STD_C
2541 static void do_check_chunk(mstate av, mchunkptr p)
2542 #else
2543 static void do_check_chunk(av, p) mstate av; mchunkptr p;
2544 #endif
2545 {
2546 unsigned long sz = chunksize(p);
2547 /* min and max possible addresses assuming contiguous allocation */
2548 char* max_address = (char*)(av->top) + chunksize(av->top);
2549 char* min_address = max_address - av->system_mem;
2550
2551 if (!chunk_is_mmapped(p)) {
2552
2553 /* Has legal address ... */
2554 if (p != av->top) {
2555 if (contiguous(av)) {
2556 assert(((char*)p) >= min_address);
2557 assert(((char*)p + sz) <= ((char*)(av->top)));
2558 }
2559 }
2560 else {
2561 /* top size is always at least MINSIZE */
2562 assert((unsigned long)(sz) >= MINSIZE);
2563 /* top predecessor always marked inuse */
2564 assert(prev_inuse(p));
2565 }
2566
2567 }
2568 else {
2569 #if HAVE_MMAP
2570 /* address is outside main heap */
2571 if (contiguous(av) && av->top != initial_top(av)) {
2572 assert(((char*)p) < min_address || ((char*)p) >= max_address);
2573 }
2574 /* chunk is page-aligned */
2575 assert(((p->prev_size + sz) & (mp_.pagesize-1)) == 0);
2576 /* mem is aligned */
2577 assert(aligned_OK(chunk2mem(p)));
2578 #else
2579 /* force an appropriate assert violation if debug set */
2580 assert(!chunk_is_mmapped(p));
2581 #endif
2582 }
2583 }
2584
2585 /*
2586 Properties of free chunks
2587 */
2588
2589 #if __STD_C
2590 static void do_check_free_chunk(mstate av, mchunkptr p)
2591 #else
2592 static void do_check_free_chunk(av, p) mstate av; mchunkptr p;
2593 #endif
2594 {
2595 INTERNAL_SIZE_T sz = p->size & ~(PREV_INUSE|NON_MAIN_ARENA);
2596 mchunkptr next = chunk_at_offset(p, sz);
2597
2598 do_check_chunk(av, p);
2599
2600 /* Chunk must claim to be free ... */
2601 assert(!inuse(p));
2602 assert (!chunk_is_mmapped(p));
2603
2604 /* Unless a special marker, must have OK fields */
2605 if ((unsigned long)(sz) >= MINSIZE)
2606 {
2607 assert((sz & MALLOC_ALIGN_MASK) == 0);
2608 assert(aligned_OK(chunk2mem(p)));
2609 /* ... matching footer field */
2610 assert(next->prev_size == sz);
2611 /* ... and is fully consolidated */
2612 assert(prev_inuse(p));
2613 assert (next == av->top || inuse(next));
2614
2615 /* ... and has minimally sane links */
2616 assert(p->fd->bk == p);
2617 assert(p->bk->fd == p);
2618 }
2619 else /* markers are always of size SIZE_SZ */
2620 assert(sz == SIZE_SZ);
2621 }
2622
2623 /*
2624 Properties of inuse chunks
2625 */
2626
2627 #if __STD_C
2628 static void do_check_inuse_chunk(mstate av, mchunkptr p)
2629 #else
2630 static void do_check_inuse_chunk(av, p) mstate av; mchunkptr p;
2631 #endif
2632 {
2633 mchunkptr next;
2634
2635 do_check_chunk(av, p);
2636
2637 if (chunk_is_mmapped(p))
2638 return; /* mmapped chunks have no next/prev */
2639
2640 /* Check whether it claims to be in use ... */
2641 assert(inuse(p));
2642
2643 next = next_chunk(p);
2644
2645 /* ... and is surrounded by OK chunks.
2646 Since more things can be checked with free chunks than inuse ones,
2647 if an inuse chunk borders them and debug is on, it's worth doing them.
2648 */
2649 if (!prev_inuse(p)) {
2650 /* Note that we cannot even look at prev unless it is not inuse */
2651 mchunkptr prv = prev_chunk(p);
2652 assert(next_chunk(prv) == p);
2653 do_check_free_chunk(av, prv);
2654 }
2655
2656 if (next == av->top) {
2657 assert(prev_inuse(next));
2658 assert(chunksize(next) >= MINSIZE);
2659 }
2660 else if (!inuse(next))
2661 do_check_free_chunk(av, next);
2662 }
2663
2664 /*
2665 Properties of chunks recycled from fastbins
2666 */
2667
2668 #if __STD_C
2669 static void do_check_remalloced_chunk(mstate av, mchunkptr p, INTERNAL_SIZE_T s)
2670 #else
2671 static void do_check_remalloced_chunk(av, p, s)
2672 mstate av; mchunkptr p; INTERNAL_SIZE_T s;
2673 #endif
2674 {
2675 INTERNAL_SIZE_T sz = p->size & ~(PREV_INUSE|NON_MAIN_ARENA);
2676
2677 if (!chunk_is_mmapped(p)) {
2678 assert(av == arena_for_chunk(p));
2679 if (chunk_non_main_arena(p))
2680 assert(av != &main_arena);
2681 else
2682 assert(av == &main_arena);
2683 }
2684
2685 do_check_inuse_chunk(av, p);
2686
2687 /* Legal size ... */
2688 assert((sz & MALLOC_ALIGN_MASK) == 0);
2689 assert((unsigned long)(sz) >= MINSIZE);
2690 /* ... and alignment */
2691 assert(aligned_OK(chunk2mem(p)));
2692 /* chunk is less than MINSIZE more than request */
2693 assert((long)(sz) - (long)(s) >= 0);
2694 assert((long)(sz) - (long)(s + MINSIZE) < 0);
2695 }
2696
2697 /*
2698 Properties of nonrecycled chunks at the point they are malloced
2699 */
2700
2701 #if __STD_C
2702 static void do_check_malloced_chunk(mstate av, mchunkptr p, INTERNAL_SIZE_T s)
2703 #else
2704 static void do_check_malloced_chunk(av, p, s)
2705 mstate av; mchunkptr p; INTERNAL_SIZE_T s;
2706 #endif
2707 {
2708 /* same as recycled case ... */
2709 do_check_remalloced_chunk(av, p, s);
2710
2711 /*
2712 ... plus, must obey implementation invariant that prev_inuse is
2713 always true of any allocated chunk; i.e., that each allocated
2714 chunk borders either a previously allocated and still in-use
2715 chunk, or the base of its memory arena. This is ensured
2716 by making all allocations from the the `lowest' part of any found
2717 chunk. This does not necessarily hold however for chunks
2718 recycled via fastbins.
2719 */
2720
2721 assert(prev_inuse(p));
2722 }
2723
2724
2725 /*
2726 Properties of malloc_state.
2727
2728 This may be useful for debugging malloc, as well as detecting user
2729 programmer errors that somehow write into malloc_state.
2730
2731 If you are extending or experimenting with this malloc, you can
2732 probably figure out how to hack this routine to print out or
2733 display chunk addresses, sizes, bins, and other instrumentation.
2734 */
2735
2736 static void do_check_malloc_state(mstate av)
2737 {
2738 int i;
2739 mchunkptr p;
2740 mchunkptr q;
2741 mbinptr b;
2742 unsigned int binbit;
2743 int empty;
2744 unsigned int idx;
2745 INTERNAL_SIZE_T size;
2746 unsigned long total = 0;
2747 int max_fast_bin;
2748
2749 /* internal size_t must be no wider than pointer type */
2750 assert(sizeof(INTERNAL_SIZE_T) <= sizeof(char*));
2751
2752 /* alignment is a power of 2 */
2753 assert((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-1)) == 0);
2754
2755 /* cannot run remaining checks until fully initialized */
2756 if (av->top == 0 || av->top == initial_top(av))
2757 return;
2758
2759 /* pagesize is a power of 2 */
2760 assert((mp_.pagesize & (mp_.pagesize-1)) == 0);
2761
2762 /* A contiguous main_arena is consistent with sbrk_base. */
2763 if (av == &main_arena && contiguous(av))
2764 assert((char*)mp_.sbrk_base + av->system_mem ==
2765 (char*)av->top + chunksize(av->top));
2766
2767 /* properties of fastbins */
2768
2769 /* max_fast is in allowed range */
2770 assert((get_max_fast () & ~1) <= request2size(MAX_FAST_SIZE));
2771
2772 max_fast_bin = fastbin_index(get_max_fast ());
2773
2774 for (i = 0; i < NFASTBINS; ++i) {
2775 p = av->fastbins[i];
2776
2777 /* The following test can only be performed for the main arena.
2778 While mallopt calls malloc_consolidate to get rid of all fast
2779 bins (especially those larger than the new maximum) this does
2780 only happen for the main arena. Trying to do this for any
2781 other arena would mean those arenas have to be locked and
2782 malloc_consolidate be called for them. This is excessive. And
2783 even if this is acceptable to somebody it still cannot solve
2784 the problem completely since if the arena is locked a
2785 concurrent malloc call might create a new arena which then
2786 could use the newly invalid fast bins. */
2787
2788 /* all bins past max_fast are empty */
2789 if (av == &main_arena && i > max_fast_bin)
2790 assert(p == 0);
2791
2792 while (p != 0) {
2793 /* each chunk claims to be inuse */
2794 do_check_inuse_chunk(av, p);
2795 total += chunksize(p);
2796 /* chunk belongs in this bin */
2797 assert(fastbin_index(chunksize(p)) == i);
2798 p = p->fd;
2799 }
2800 }
2801
2802 if (total != 0)
2803 assert(have_fastchunks(av));
2804 else if (!have_fastchunks(av))
2805 assert(total == 0);
2806
2807 /* check normal bins */
2808 for (i = 1; i < NBINS; ++i) {
2809 b = bin_at(av,i);
2810
2811 /* binmap is accurate (except for bin 1 == unsorted_chunks) */
2812 if (i >= 2) {
2813 binbit = get_binmap(av,i);
2814 empty = last(b) == b;
2815 if (!binbit)
2816 assert(empty);
2817 else if (!empty)
2818 assert(binbit);
2819 }
2820
2821 for (p = last(b); p != b; p = p->bk) {
2822 /* each chunk claims to be free */
2823 do_check_free_chunk(av, p);
2824 size = chunksize(p);
2825 total += size;
2826 if (i >= 2) {
2827 /* chunk belongs in bin */
2828 idx = bin_index(size);
2829 assert(idx == i);
2830 /* lists are sorted */
2831 assert(p->bk == b ||
2832 (unsigned long)chunksize(p->bk) >= (unsigned long)chunksize(p));
2833
2834 if (!in_smallbin_range(size))
2835 {
2836 if (p->fd_nextsize != NULL)
2837 {
2838 if (p->fd_nextsize == p)
2839 assert (p->bk_nextsize == p);
2840 else
2841 {
2842 if (p->fd_nextsize == first (b))
2843 assert (chunksize (p) < chunksize (p->fd_nextsize));
2844 else
2845 assert (chunksize (p) > chunksize (p->fd_nextsize));
2846
2847 if (p == first (b))
2848 assert (chunksize (p) > chunksize (p->bk_nextsize));
2849 else
2850 assert (chunksize (p) < chunksize (p->bk_nextsize));
2851 }
2852 }
2853 else
2854 assert (p->bk_nextsize == NULL);
2855 }
2856 } else if (!in_smallbin_range(size))
2857 assert (p->fd_nextsize == NULL && p->bk_nextsize == NULL);
2858 /* chunk is followed by a legal chain of inuse chunks */
2859 for (q = next_chunk(p);
2860 (q != av->top && inuse(q) &&
2861 (unsigned long)(chunksize(q)) >= MINSIZE);
2862 q = next_chunk(q))
2863 do_check_inuse_chunk(av, q);
2864 }
2865 }
2866
2867 /* top chunk is OK */
2868 check_chunk(av, av->top);
2869
2870 /* sanity checks for statistics */
2871
2872 #ifdef NO_THREADS
2873 assert(total <= (unsigned long)(mp_.max_total_mem));
2874 assert(mp_.n_mmaps >= 0);
2875 #endif
2876 assert(mp_.n_mmaps <= mp_.max_n_mmaps);
2877
2878 assert((unsigned long)(av->system_mem) <=
2879 (unsigned long)(av->max_system_mem));
2880
2881 assert((unsigned long)(mp_.mmapped_mem) <=
2882 (unsigned long)(mp_.max_mmapped_mem));
2883
2884 #ifdef NO_THREADS
2885 assert((unsigned long)(mp_.max_total_mem) >=
2886 (unsigned long)(mp_.mmapped_mem) + (unsigned long)(av->system_mem));
2887 #endif
2888 }
2889 #endif
2890
2891
2892 /* ----------------- Support for debugging hooks -------------------- */
2893 #include "hooks.c"
2894
2895
2896 /* ----------- Routines dealing with system allocation -------------- */
2897
2898 /*
2899 sysmalloc handles malloc cases requiring more memory from the system.
2900 On entry, it is assumed that av->top does not have enough
2901 space to service request for nb bytes, thus requiring that av->top
2902 be extended or replaced.
2903 */
2904
2905 #if __STD_C
2906 static Void_t* sYSMALLOc(INTERNAL_SIZE_T nb, mstate av)
2907 #else
2908 static Void_t* sYSMALLOc(nb, av) INTERNAL_SIZE_T nb; mstate av;
2909 #endif
2910 {
2911 mchunkptr old_top; /* incoming value of av->top */
2912 INTERNAL_SIZE_T old_size; /* its size */
2913 char* old_end; /* its end address */
2914
2915 long size; /* arg to first MORECORE or mmap call */
2916 char* brk; /* return value from MORECORE */
2917
2918 long correction; /* arg to 2nd MORECORE call */
2919 char* snd_brk; /* 2nd return val */
2920
2921 INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */
2922 INTERNAL_SIZE_T end_misalign; /* partial page left at end of new space */
2923 char* aligned_brk; /* aligned offset into brk */
2924
2925 mchunkptr p; /* the allocated/returned chunk */
2926 mchunkptr remainder; /* remainder from allocation */
2927 unsigned long remainder_size; /* its size */
2928
2929 unsigned long sum; /* for updating stats */
2930
2931 size_t pagemask = mp_.pagesize - 1;
2932 bool tried_mmap = false;
2933
2934
2935 #if HAVE_MMAP
2936
2937 /*
2938 If have mmap, and the request size meets the mmap threshold, and
2939 the system supports mmap, and there are few enough currently
2940 allocated mmapped regions, try to directly map this request
2941 rather than expanding top.
2942 */
2943
2944 if ((unsigned long)(nb) >= (unsigned long)(mp_.mmap_threshold) &&
2945 (mp_.n_mmaps < mp_.n_mmaps_max)) {
2946
2947 char* mm; /* return value from mmap call*/
2948
2949 try_mmap:
2950 /*
2951 Round up size to nearest page. For mmapped chunks, the overhead
2952 is one SIZE_SZ unit larger than for normal chunks, because there
2953 is no following chunk whose prev_size field could be used.
2954 */
2955 #if 1
2956 /* See the front_misalign handling below, for glibc there is no
2957 need for further alignments. */
2958 size = (nb + SIZE_SZ + pagemask) & ~pagemask;
2959 #else
2960 size = (nb + SIZE_SZ + MALLOC_ALIGN_MASK + pagemask) & ~pagemask;
2961 #endif
2962 tried_mmap = true;
2963
2964 /* Don't try if size wraps around 0 */
2965 if ((unsigned long)(size) > (unsigned long)(nb)) {
2966
2967 mm = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE));
2968
2969 if (mm != MAP_FAILED) {
2970
2971 /*
2972 The offset to the start of the mmapped region is stored
2973 in the prev_size field of the chunk. This allows us to adjust
2974 returned start address to meet alignment requirements here
2975 and in memalign(), and still be able to compute proper
2976 address argument for later munmap in free() and realloc().
2977 */
2978
2979 #if 1
2980 /* For glibc, chunk2mem increases the address by 2*SIZE_SZ and
2981 MALLOC_ALIGN_MASK is 2*SIZE_SZ-1. Each mmap'ed area is page
2982 aligned and therefore definitely MALLOC_ALIGN_MASK-aligned. */
2983 assert (((INTERNAL_SIZE_T)chunk2mem(mm) & MALLOC_ALIGN_MASK) == 0);
2984 #else
2985 front_misalign = (INTERNAL_SIZE_T)chunk2mem(mm) & MALLOC_ALIGN_MASK;
2986 if (front_misalign > 0) {
2987 correction = MALLOC_ALIGNMENT - front_misalign;
2988 p = (mchunkptr)(mm + correction);
2989 p->prev_size = correction;
2990 set_head(p, (size - correction) |IS_MMAPPED);
2991 }
2992 else
2993 #endif
2994 {
2995 p = (mchunkptr)mm;
2996 set_head(p, size|IS_MMAPPED);
2997 }
2998
2999 /* update statistics */
3000
3001 if (++mp_.n_mmaps > mp_.max_n_mmaps)
3002 mp_.max_n_mmaps = mp_.n_mmaps;
3003
3004 sum = mp_.mmapped_mem += size;
3005 if (sum > (unsigned long)(mp_.max_mmapped_mem))
3006 mp_.max_mmapped_mem = sum;
3007 #ifdef NO_THREADS
3008 sum += av->system_mem;
3009 if (sum > (unsigned long)(mp_.max_total_mem))
3010 mp_.max_total_mem = sum;
3011 #endif
3012
3013 check_chunk(av, p);
3014
3015 return chunk2mem(p);
3016 }
3017 }
3018 }
3019 #endif
3020
3021 /* Record incoming configuration of top */
3022
3023 old_top = av->top;
3024 old_size = chunksize(old_top);
3025 old_end = (char*)(chunk_at_offset(old_top, old_size));
3026
3027 brk = snd_brk = (char*)(MORECORE_FAILURE);
3028
3029 /*
3030 If not the first time through, we require old_size to be
3031 at least MINSIZE and to have prev_inuse set.
3032 */
3033
3034 assert((old_top == initial_top(av) && old_size == 0) ||
3035 ((unsigned long) (old_size) >= MINSIZE &&
3036 prev_inuse(old_top) &&
3037 ((unsigned long)old_end & pagemask) == 0));
3038
3039 /* Precondition: not enough current space to satisfy nb request */
3040 assert((unsigned long)(old_size) < (unsigned long)(nb + MINSIZE));
3041
3042 /* Precondition: all fastbins are consolidated */
3043 assert(!have_fastchunks(av));
3044
3045
3046 if (av != &main_arena) {
3047
3048 heap_info *old_heap, *heap;
3049 size_t old_heap_size;
3050
3051 /* First try to extend the current heap. */
3052 old_heap = heap_for_ptr(old_top);
3053 old_heap_size = old_heap->size;
3054 if ((long) (MINSIZE + nb - old_size) > 0
3055 && grow_heap(old_heap, MINSIZE + nb - old_size) == 0) {
3056 av->system_mem += old_heap->size - old_heap_size;
3057 arena_mem += old_heap->size - old_heap_size;
3058 #if 0
3059 if(mmapped_mem + arena_mem + sbrked_mem > max_total_mem)
3060 max_total_mem = mmapped_mem + arena_mem + sbrked_mem;
3061 #endif
3062 set_head(old_top, (((char *)old_heap + old_heap->size) - (char *)old_top)
3063 | PREV_INUSE);
3064 }
3065 else if ((heap = new_heap(nb + (MINSIZE + sizeof(*heap)), mp_.top_pad))) {
3066 /* Use a newly allocated heap. */
3067 heap->ar_ptr = av;
3068 heap->prev = old_heap;
3069 av->system_mem += heap->size;
3070 arena_mem += heap->size;
3071 #if 0
3072 if((unsigned long)(mmapped_mem + arena_mem + sbrked_mem) > max_total_mem)
3073 max_total_mem = mmapped_mem + arena_mem + sbrked_mem;
3074 #endif
3075 /* Set up the new top. */
3076 top(av) = chunk_at_offset(heap, sizeof(*heap));
3077 set_head(top(av), (heap->size - sizeof(*heap)) | PREV_INUSE);
3078
3079 /* Setup fencepost and free the old top chunk. */
3080 /* The fencepost takes at least MINSIZE bytes, because it might
3081 become the top chunk again later. Note that a footer is set
3082 up, too, although the chunk is marked in use. */
3083 old_size -= MINSIZE;
3084 set_head(chunk_at_offset(old_top, old_size + 2*SIZE_SZ), 0|PREV_INUSE);
3085 if (old_size >= MINSIZE) {
3086 set_head(chunk_at_offset(old_top, old_size), (2*SIZE_SZ)|PREV_INUSE);
3087 set_foot(chunk_at_offset(old_top, old_size), (2*SIZE_SZ));
3088 set_head(old_top, old_size|PREV_INUSE|NON_MAIN_ARENA);
3089 _int_free(av, chunk2mem(old_top));
3090 } else {
3091 set_head(old_top, (old_size + 2*SIZE_SZ)|PREV_INUSE);
3092 set_foot(old_top, (old_size + 2*SIZE_SZ));
3093 }
3094 }
3095 else if (!tried_mmap)
3096 /* We can at least try to use to mmap memory. */
3097 goto try_mmap;
3098
3099 } else { /* av == main_arena */
3100
3101
3102 /* Request enough space for nb + pad + overhead */
3103
3104 size = nb + mp_.top_pad + MINSIZE;
3105
3106 /*
3107 If contiguous, we can subtract out existing space that we hope to
3108 combine with new space. We add it back later only if
3109 we don't actually get contiguous space.
3110 */
3111
3112 if (contiguous(av))
3113 size -= old_size;
3114
3115 /*
3116 Round to a multiple of page size.
3117 If MORECORE is not contiguous, this ensures that we only call it
3118 with whole-page arguments. And if MORECORE is contiguous and
3119 this is not first time through, this preserves page-alignment of
3120 previous calls. Otherwise, we correct to page-align below.
3121 */
3122
3123 size = (size + pagemask) & ~pagemask;
3124
3125 /*
3126 Don't try to call MORECORE if argument is so big as to appear
3127 negative. Note that since mmap takes size_t arg, it may succeed
3128 below even if we cannot call MORECORE.
3129 */
3130
3131 if (size > 0)
3132 brk = (char*)(MORECORE(size));
3133
3134 if (brk != (char*)(MORECORE_FAILURE)) {
3135 /* Call the `morecore' hook if necessary. */
3136 if (__after_morecore_hook)
3137 (*__after_morecore_hook) ();
3138 } else {
3139 /*
3140 If have mmap, try using it as a backup when MORECORE fails or
3141 cannot be used. This is worth doing on systems that have "holes" in
3142 address space, so sbrk cannot extend to give contiguous space, but
3143 space is available elsewhere. Note that we ignore mmap max count
3144 and threshold limits, since the space will not be used as a
3145 segregated mmap region.
3146 */
3147
3148 #if HAVE_MMAP
3149 /* Cannot merge with old top, so add its size back in */
3150 if (contiguous(av))
3151 size = (size + old_size + pagemask) & ~pagemask;
3152
3153 /* If we are relying on mmap as backup, then use larger units */
3154 if ((unsigned long)(size) < (unsigned long)(MMAP_AS_MORECORE_SIZE))
3155 size = MMAP_AS_MORECORE_SIZE;
3156
3157 /* Don't try if size wraps around 0 */
3158 if ((unsigned long)(size) > (unsigned long)(nb)) {
3159
3160 char *mbrk = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE));
3161
3162 if (mbrk != MAP_FAILED) {
3163
3164 /* We do not need, and cannot use, another sbrk call to find end */
3165 brk = mbrk;
3166 snd_brk = brk + size;
3167
3168 /*
3169 Record that we no longer have a contiguous sbrk region.
3170 After the first time mmap is used as backup, we do not
3171 ever rely on contiguous space since this could incorrectly
3172 bridge regions.
3173 */
3174 set_noncontiguous(av);
3175 }
3176 }
3177 #endif
3178 }
3179
3180 if (brk != (char*)(MORECORE_FAILURE)) {
3181 if (mp_.sbrk_base == 0)
3182 mp_.sbrk_base = brk;
3183 av->system_mem += size;
3184
3185 /*
3186 If MORECORE extends previous space, we can likewise extend top size.
3187 */
3188
3189 if (brk == old_end && snd_brk == (char*)(MORECORE_FAILURE))
3190 set_head(old_top, (size + old_size) | PREV_INUSE);
3191
3192 else if (contiguous(av) && old_size && brk < old_end) {
3193 /* Oops! Someone else killed our space.. Can't touch anything. */
3194 assert(0);
3195 }
3196
3197 /*
3198 Otherwise, make adjustments:
3199
3200 * If the first time through or noncontiguous, we need to call sbrk
3201 just to find out where the end of memory lies.
3202
3203 * We need to ensure that all returned chunks from malloc will meet
3204 MALLOC_ALIGNMENT
3205
3206 * If there was an intervening foreign sbrk, we need to adjust sbrk
3207 request size to account for fact that we will not be able to
3208 combine new space with existing space in old_top.
3209
3210 * Almost all systems internally allocate whole pages at a time, in
3211 which case we might as well use the whole last page of request.
3212 So we allocate enough more memory to hit a page boundary now,
3213 which in turn causes future contiguous calls to page-align.
3214 */
3215
3216 else {
3217 front_misalign = 0;
3218 end_misalign = 0;
3219 correction = 0;
3220 aligned_brk = brk;
3221
3222 /* handle contiguous cases */
3223 if (contiguous(av)) {
3224
3225 /* Count foreign sbrk as system_mem. */
3226 if (old_size)
3227 av->system_mem += brk - old_end;
3228
3229 /* Guarantee alignment of first new chunk made from this space */
3230
3231 front_misalign = (INTERNAL_SIZE_T)chunk2mem(brk) & MALLOC_ALIGN_MASK;
3232 if (front_misalign > 0) {
3233
3234 /*
3235 Skip over some bytes to arrive at an aligned position.
3236 We don't need to specially mark these wasted front bytes.
3237 They will never be accessed anyway because
3238 prev_inuse of av->top (and any chunk created from its start)
3239 is always true after initialization.
3240 */
3241
3242 correction = MALLOC_ALIGNMENT - front_misalign;
3243 aligned_brk += correction;
3244 }
3245
3246 /*
3247 If this isn't adjacent to existing space, then we will not
3248 be able to merge with old_top space, so must add to 2nd request.
3249 */
3250
3251 correction += old_size;
3252
3253 /* Extend the end address to hit a page boundary */
3254 end_misalign = (INTERNAL_SIZE_T)(brk + size + correction);
3255 correction += ((end_misalign + pagemask) & ~pagemask) - end_misalign;
3256
3257 assert(correction >= 0);
3258 snd_brk = (char*)(MORECORE(correction));
3259
3260 /*
3261 If can't allocate correction, try to at least find out current
3262 brk. It might be enough to proceed without failing.
3263
3264 Note that if second sbrk did NOT fail, we assume that space
3265 is contiguous with first sbrk. This is a safe assumption unless
3266 program is multithreaded but doesn't use locks and a foreign sbrk
3267 occurred between our first and second calls.
3268 */
3269
3270 if (snd_brk == (char*)(MORECORE_FAILURE)) {
3271 correction = 0;
3272 snd_brk = (char*)(MORECORE(0));
3273 } else
3274 /* Call the `morecore' hook if necessary. */
3275 if (__after_morecore_hook)
3276 (*__after_morecore_hook) ();
3277 }
3278
3279 /* handle non-contiguous cases */
3280 else {
3281 /* MORECORE/mmap must correctly align */
3282 assert(((unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK) == 0);
3283
3284 /* Find out current end of memory */
3285 if (snd_brk == (char*)(MORECORE_FAILURE)) {
3286 snd_brk = (char*)(MORECORE(0));
3287 }
3288 }
3289
3290 /* Adjust top based on results of second sbrk */
3291 if (snd_brk != (char*)(MORECORE_FAILURE)) {
3292 av->top = (mchunkptr)aligned_brk;
3293 set_head(av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE);
3294 av->system_mem += correction;
3295
3296 /*
3297 If not the first time through, we either have a
3298 gap due to foreign sbrk or a non-contiguous region. Insert a
3299 double fencepost at old_top to prevent consolidation with space
3300 we don't own. These fenceposts are artificial chunks that are
3301 marked as inuse and are in any case too small to use. We need
3302 two to make sizes and alignments work out.
3303 */
3304
3305 if (old_size != 0) {
3306 /*
3307 Shrink old_top to insert fenceposts, keeping size a
3308 multiple of MALLOC_ALIGNMENT. We know there is at least
3309 enough space in old_top to do this.
3310 */
3311 old_size = (old_size - 4*SIZE_SZ) & ~MALLOC_ALIGN_MASK;
3312 set_head(old_top, old_size | PREV_INUSE);
3313
3314 /*
3315 Note that the following assignments completely overwrite
3316 old_top when old_size was previously MINSIZE. This is
3317 intentional. We need the fencepost, even if old_top otherwise gets
3318 lost.
3319 */
3320 chunk_at_offset(old_top, old_size )->size =
3321 (2*SIZE_SZ)|PREV_INUSE;
3322
3323 chunk_at_offset(old_top, old_size + 2*SIZE_SZ)->size =
3324 (2*SIZE_SZ)|PREV_INUSE;
3325
3326 /* If possible, release the rest. */
3327 if (old_size >= MINSIZE) {
3328 _int_free(av, chunk2mem(old_top));
3329 }
3330
3331 }
3332 }
3333 }
3334
3335 /* Update statistics */
3336 #ifdef NO_THREADS
3337 sum = av->system_mem + mp_.mmapped_mem;
3338 if (sum > (unsigned long)(mp_.max_total_mem))
3339 mp_.max_total_mem = sum;
3340 #endif
3341
3342 }
3343
3344 } /* if (av != &main_arena) */
3345
3346 if ((unsigned long)av->system_mem > (unsigned long)(av->max_system_mem))
3347 av->max_system_mem = av->system_mem;
3348 check_malloc_state(av);
3349
3350 /* finally, do the allocation */
3351 p = av->top;
3352 size = chunksize(p);
3353
3354 /* check that one of the above allocation paths succeeded */
3355 if ((unsigned long)(size) >= (unsigned long)(nb + MINSIZE)) {
3356 remainder_size = size - nb;
3357 remainder = chunk_at_offset(p, nb);
3358 av->top = remainder;
3359 set_head(p, nb | PREV_INUSE | (av != &main_arena ? NON_MAIN_ARENA : 0));
3360 set_head(remainder, remainder_size | PREV_INUSE);
3361 check_malloced_chunk(av, p, nb);
3362 return chunk2mem(p);
3363 }
3364
3365 /* catch all failure paths */
3366 MALLOC_FAILURE_ACTION;
3367 return 0;
3368 }
3369
3370
3371 /*
3372 sYSTRIm is an inverse of sorts to sYSMALLOc. It gives memory back
3373 to the system (via negative arguments to sbrk) if there is unused
3374 memory at the `high' end of the malloc pool. It is called
3375 automatically by free() when top space exceeds the trim
3376 threshold. It is also called by the public malloc_trim routine. It
3377 returns 1 if it actually released any memory, else 0.
3378 */
3379
3380 #if __STD_C
3381 static int sYSTRIm(size_t pad, mstate av)
3382 #else
3383 static int sYSTRIm(pad, av) size_t pad; mstate av;
3384 #endif
3385 {
3386 long top_size; /* Amount of top-most memory */
3387 long extra; /* Amount to release */
3388 long released; /* Amount actually released */
3389 char* current_brk; /* address returned by pre-check sbrk call */
3390 char* new_brk; /* address returned by post-check sbrk call */
3391 size_t pagesz;
3392
3393 pagesz = mp_.pagesize;
3394 top_size = chunksize(av->top);
3395
3396 /* Release in pagesize units, keeping at least one page */
3397 extra = ((top_size - pad - MINSIZE + (pagesz-1)) / pagesz - 1) * pagesz;
3398
3399 if (extra > 0) {
3400
3401 /*
3402 Only proceed if end of memory is where we last set it.
3403 This avoids problems if there were foreign sbrk calls.
3404 */
3405 current_brk = (char*)(MORECORE(0));
3406 if (current_brk == (char*)(av->top) + top_size) {
3407
3408 /*
3409 Attempt to release memory. We ignore MORECORE return value,
3410 and instead call again to find out where new end of memory is.
3411 This avoids problems if first call releases less than we asked,
3412 of if failure somehow altered brk value. (We could still
3413 encounter problems if it altered brk in some very bad way,
3414 but the only thing we can do is adjust anyway, which will cause
3415 some downstream failure.)
3416 */
3417
3418 MORECORE(-extra);
3419 /* Call the `morecore' hook if necessary. */
3420 if (__after_morecore_hook)
3421 (*__after_morecore_hook) ();
3422 new_brk = (char*)(MORECORE(0));
3423
3424 if (new_brk != (char*)MORECORE_FAILURE) {
3425 released = (long)(current_brk - new_brk);
3426
3427 if (released != 0) {
3428 /* Success. Adjust top. */
3429 av->system_mem -= released;
3430 set_head(av->top, (top_size - released) | PREV_INUSE);
3431 check_malloc_state(av);
3432 return 1;
3433 }
3434 }
3435 }
3436 }
3437 return 0;
3438 }
3439
3440 #ifdef HAVE_MMAP
3441
3442 static void
3443 internal_function
3444 #if __STD_C
3445 munmap_chunk(mchunkptr p)
3446 #else
3447 munmap_chunk(p) mchunkptr p;
3448 #endif
3449 {
3450 INTERNAL_SIZE_T size = chunksize(p);
3451
3452 assert (chunk_is_mmapped(p));
3453 #if 0
3454 assert(! ((char*)p >= mp_.sbrk_base && (char*)p < mp_.sbrk_base + mp_.sbrked_mem));
3455 assert((mp_.n_mmaps > 0));
3456 #endif
3457
3458 uintptr_t block = (uintptr_t) p - p->prev_size;
3459 size_t total_size = p->prev_size + size;
3460 /* Unfortunately we have to do the compilers job by hand here. Normally
3461 we would test BLOCK and TOTAL-SIZE separately for compliance with the
3462 page size. But gcc does not recognize the optimization possibility
3463 (in the moment at least) so we combine the two values into one before
3464 the bit test. */
3465 if (__builtin_expect (((block | total_size) & (mp_.pagesize - 1)) != 0, 0))
3466 {
3467 malloc_printerr (check_action, "munmap_chunk(): invalid pointer",
3468 chunk2mem (p));
3469 return;
3470 }
3471
3472 mp_.n_mmaps--;
3473 mp_.mmapped_mem -= total_size;
3474
3475 int ret __attribute__ ((unused)) = munmap((char *)block, total_size);
3476
3477 /* munmap returns non-zero on failure */
3478 assert(ret == 0);
3479 }
3480
3481 #if HAVE_MREMAP
3482
3483 static mchunkptr
3484 internal_function
3485 #if __STD_C
3486 mremap_chunk(mchunkptr p, size_t new_size)
3487 #else
3488 mremap_chunk(p, new_size) mchunkptr p; size_t new_size;
3489 #endif
3490 {
3491 size_t page_mask = mp_.pagesize - 1;
3492 INTERNAL_SIZE_T offset = p->prev_size;
3493 INTERNAL_SIZE_T size = chunksize(p);
3494 char *cp;
3495
3496 assert (chunk_is_mmapped(p));
3497 #if 0
3498 assert(! ((char*)p >= mp_.sbrk_base && (char*)p < mp_.sbrk_base + mp_.sbrked_mem));
3499 assert((mp_.n_mmaps > 0));
3500 #endif
3501 assert(((size + offset) & (mp_.pagesize-1)) == 0);
3502
3503 /* Note the extra SIZE_SZ overhead as in mmap_chunk(). */
3504 new_size = (new_size + offset + SIZE_SZ + page_mask) & ~page_mask;
3505
3506 cp = (char *)mremap((char *)p - offset, size + offset, new_size,
3507 MREMAP_MAYMOVE);
3508
3509 if (cp == MAP_FAILED) return 0;
3510
3511 p = (mchunkptr)(cp + offset);
3512
3513 assert(aligned_OK(chunk2mem(p)));
3514
3515 assert((p->prev_size == offset));
3516 set_head(p, (new_size - offset)|IS_MMAPPED);
3517
3518 mp_.mmapped_mem -= size + offset;
3519 mp_.mmapped_mem += new_size;
3520 if ((unsigned long)mp_.mmapped_mem > (unsigned long)mp_.max_mmapped_mem)
3521 mp_.max_mmapped_mem = mp_.mmapped_mem;
3522 #ifdef NO_THREADS
3523 if ((unsigned long)(mp_.mmapped_mem + arena_mem + main_arena.system_mem) >
3524 mp_.max_total_mem)
3525 mp_.max_total_mem = mp_.mmapped_mem + arena_mem + main_arena.system_mem;
3526 #endif
3527 return p;
3528 }
3529
3530 #endif /* HAVE_MREMAP */
3531
3532 #endif /* HAVE_MMAP */
3533
3534 /*------------------------ Public wrappers. --------------------------------*/
3535
3536 Void_t*
3537 public_mALLOc(size_t bytes)
3538 {
3539 mstate ar_ptr;
3540 Void_t *victim;
3541
3542 __malloc_ptr_t (*hook) (size_t, __const __malloc_ptr_t) = __malloc_hook;
3543 if (hook != NULL)
3544 return (*hook)(bytes, RETURN_ADDRESS (0));
3545
3546 arena_get(ar_ptr, bytes);
3547 if(!ar_ptr)
3548 return 0;
3549 victim = _int_malloc(ar_ptr, bytes);
3550 if(!victim) {
3551 /* Maybe the failure is due to running out of mmapped areas. */
3552 if(ar_ptr != &main_arena) {
3553 (void)mutex_unlock(&ar_ptr->mutex);
3554 (void)mutex_lock(&main_arena.mutex);
3555 victim = _int_malloc(&main_arena, bytes);
3556 (void)mutex_unlock(&main_arena.mutex);
3557 } else {
3558 #if USE_ARENAS
3559 /* ... or sbrk() has failed and there is still a chance to mmap() */
3560 ar_ptr = arena_get2(ar_ptr->next ? ar_ptr : 0, bytes);
3561 (void)mutex_unlock(&main_arena.mutex);
3562 if(ar_ptr) {
3563 victim = _int_malloc(ar_ptr, bytes);
3564 (void)mutex_unlock(&ar_ptr->mutex);
3565 }
3566 #endif
3567 }
3568 } else
3569 (void)mutex_unlock(&ar_ptr->mutex);
3570 assert(!victim || chunk_is_mmapped(mem2chunk(victim)) ||
3571 ar_ptr == arena_for_chunk(mem2chunk(victim)));
3572 return victim;
3573 }
3574 #ifdef libc_hidden_def
3575 libc_hidden_def(public_mALLOc)
3576 #endif
3577
3578 void
3579 public_fREe(Void_t* mem)
3580 {
3581 mstate ar_ptr;
3582 mchunkptr p; /* chunk corresponding to mem */
3583
3584 void (*hook) (__malloc_ptr_t, __const __malloc_ptr_t) = __free_hook;
3585 if (hook != NULL) {
3586 (*hook)(mem, RETURN_ADDRESS (0));
3587 return;
3588 }
3589
3590 if (mem == 0) /* free(0) has no effect */
3591 return;
3592
3593 p = mem2chunk(mem);
3594
3595 #if HAVE_MMAP
3596 if (chunk_is_mmapped(p)) /* release mmapped memory. */
3597 {
3598 /* see if the dynamic brk/mmap threshold needs adjusting */
3599 if (!mp_.no_dyn_threshold
3600 && p->size > mp_.mmap_threshold
3601 && p->size <= DEFAULT_MMAP_THRESHOLD_MAX)
3602 {
3603 mp_.mmap_threshold = chunksize (p);
3604 mp_.trim_threshold = 2 * mp_.mmap_threshold;
3605 }
3606 munmap_chunk(p);
3607 return;
3608 }
3609 #endif
3610
3611 ar_ptr = arena_for_chunk(p);
3612 #if THREAD_STATS
3613 if(!mutex_trylock(&ar_ptr->mutex))
3614 ++(ar_ptr->stat_lock_direct);
3615 else {
3616 (void)mutex_lock(&ar_ptr->mutex);
3617 ++(ar_ptr->stat_lock_wait);
3618 }
3619 #else
3620 (void)mutex_lock(&ar_ptr->mutex);
3621 #endif
3622 _int_free(ar_ptr, mem);
3623 (void)mutex_unlock(&ar_ptr->mutex);
3624 }
3625 #ifdef libc_hidden_def
3626 libc_hidden_def (public_fREe)
3627 #endif
3628
3629 Void_t*
3630 public_rEALLOc(Void_t* oldmem, size_t bytes)
3631 {
3632 mstate ar_ptr;
3633 INTERNAL_SIZE_T nb; /* padded request size */
3634
3635 mchunkptr oldp; /* chunk corresponding to oldmem */
3636 INTERNAL_SIZE_T oldsize; /* its size */
3637
3638 Void_t* newp; /* chunk to return */
3639
3640 __malloc_ptr_t (*hook) (__malloc_ptr_t, size_t, __const __malloc_ptr_t) =
3641 __realloc_hook;
3642 if (hook != NULL)
3643 return (*hook)(oldmem, bytes, RETURN_ADDRESS (0));
3644
3645 #if REALLOC_ZERO_BYTES_FREES
3646 if (bytes == 0 && oldmem != NULL) { public_fREe(oldmem); return 0; }
3647 #endif
3648
3649 /* realloc of null is supposed to be same as malloc */
3650 if (oldmem == 0) return public_mALLOc(bytes);
3651
3652 oldp = mem2chunk(oldmem);
3653 oldsize = chunksize(oldp);
3654
3655 /* Little security check which won't hurt performance: the
3656 allocator never wrapps around at the end of the address space.
3657 Therefore we can exclude some size values which might appear
3658 here by accident or by "design" from some intruder. */
3659 if (__builtin_expect ((uintptr_t) oldp > (uintptr_t) -oldsize, 0)
3660 || __builtin_expect (misaligned_chunk (oldp), 0))
3661 {
3662 malloc_printerr (check_action, "realloc(): invalid pointer", oldmem);
3663 return NULL;
3664 }
3665
3666 checked_request2size(bytes, nb);
3667
3668 #if HAVE_MMAP
3669 if (chunk_is_mmapped(oldp))
3670 {
3671 Void_t* newmem;
3672
3673 #if HAVE_MREMAP
3674 newp = mremap_chunk(oldp, nb);
3675 if(newp) return chunk2mem(newp);
3676 #endif
3677 /* Note the extra SIZE_SZ overhead. */
3678 if(oldsize - SIZE_SZ >= nb) return oldmem; /* do nothing */
3679 /* Must alloc, copy, free. */
3680 newmem = public_mALLOc(bytes);
3681 if (newmem == 0) return 0; /* propagate failure */
3682 MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ);
3683 munmap_chunk(oldp);
3684 return newmem;
3685 }
3686 #endif
3687
3688 ar_ptr = arena_for_chunk(oldp);
3689 #if THREAD_STATS
3690 if(!mutex_trylock(&ar_ptr->mutex))
3691 ++(ar_ptr->stat_lock_direct);
3692 else {
3693 (void)mutex_lock(&ar_ptr->mutex);
3694 ++(ar_ptr->stat_lock_wait);
3695 }
3696 #else
3697 (void)mutex_lock(&ar_ptr->mutex);
3698 #endif
3699
3700 #ifndef NO_THREADS
3701 /* As in malloc(), remember this arena for the next allocation. */
3702 tsd_setspecific(arena_key, (Void_t *)ar_ptr);
3703 #endif
3704
3705 newp = _int_realloc(ar_ptr, oldmem, bytes);
3706
3707 (void)mutex_unlock(&ar_ptr->mutex);
3708 assert(!newp || chunk_is_mmapped(mem2chunk(newp)) ||
3709 ar_ptr == arena_for_chunk(mem2chunk(newp)));
3710
3711 if (newp == NULL)
3712 {
3713 /* Try harder to allocate memory in other arenas. */
3714 newp = public_mALLOc(bytes);
3715 if (newp != NULL)
3716 {
3717 MALLOC_COPY (newp, oldmem, oldsize - 2 * SIZE_SZ);
3718 #if THREAD_STATS
3719 if(!mutex_trylock(&ar_ptr->mutex))
3720 ++(ar_ptr->stat_lock_direct);
3721 else {
3722 (void)mutex_lock(&ar_ptr->mutex);
3723 ++(ar_ptr->stat_lock_wait);
3724 }
3725 #else
3726 (void)mutex_lock(&ar_ptr->mutex);
3727 #endif
3728 _int_free(ar_ptr, oldmem);
3729 (void)mutex_unlock(&ar_ptr->mutex);
3730 }
3731 }
3732
3733 return newp;
3734 }
3735 #ifdef libc_hidden_def
3736 libc_hidden_def (public_rEALLOc)
3737 #endif
3738
3739 Void_t*
3740 public_mEMALIGn(size_t alignment, size_t bytes)
3741 {
3742 mstate ar_ptr;
3743 Void_t *p;
3744
3745 __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, size_t,
3746 __const __malloc_ptr_t)) =
3747 __memalign_hook;
3748 if (hook != NULL)
3749 return (*hook)(alignment, bytes, RETURN_ADDRESS (0));
3750
3751 /* If need less alignment than we give anyway, just relay to malloc */
3752 if (alignment <= MALLOC_ALIGNMENT) return public_mALLOc(bytes);
3753
3754 /* Otherwise, ensure that it is at least a minimum chunk size */
3755 if (alignment < MINSIZE) alignment = MINSIZE;
3756
3757 arena_get(ar_ptr, bytes + alignment + MINSIZE);
3758 if(!ar_ptr)
3759 return 0;
3760 p = _int_memalign(ar_ptr, alignment, bytes);
3761 (void)mutex_unlock(&ar_ptr->mutex);
3762 if(!p) {
3763 /* Maybe the failure is due to running out of mmapped areas. */
3764 if(ar_ptr != &main_arena) {
3765 (void)mutex_lock(&main_arena.mutex);
3766 p = _int_memalign(&main_arena, alignment, bytes);
3767 (void)mutex_unlock(&main_arena.mutex);
3768 } else {
3769 #if USE_ARENAS
3770 /* ... or sbrk() has failed and there is still a chance to mmap() */
3771 ar_ptr = arena_get2(ar_ptr->next ? ar_ptr : 0, bytes);
3772 if(ar_ptr) {
3773 p = _int_memalign(ar_ptr, alignment, bytes);
3774 (void)mutex_unlock(&ar_ptr->mutex);
3775 }
3776 #endif
3777 }
3778 }
3779 assert(!p || chunk_is_mmapped(mem2chunk(p)) ||
3780 ar_ptr == arena_for_chunk(mem2chunk(p)));
3781 return p;
3782 }
3783 #ifdef libc_hidden_def
3784 libc_hidden_def (public_mEMALIGn)
3785 #endif
3786
3787 Void_t*
3788 public_vALLOc(size_t bytes)
3789 {
3790 mstate ar_ptr;
3791 Void_t *p;
3792
3793 if(__malloc_initialized < 0)
3794 ptmalloc_init ();
3795
3796 __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, size_t,
3797 __const __malloc_ptr_t)) =
3798 __memalign_hook;
3799 if (hook != NULL)
3800 return (*hook)(mp_.pagesize, bytes, RETURN_ADDRESS (0));
3801
3802 arena_get(ar_ptr, bytes + mp_.pagesize + MINSIZE);
3803 if(!ar_ptr)
3804 return 0;
3805 p = _int_valloc(ar_ptr, bytes);
3806 (void)mutex_unlock(&ar_ptr->mutex);
3807 return p;
3808 }
3809
3810 Void_t*
3811 public_pVALLOc(size_t bytes)
3812 {
3813 mstate ar_ptr;
3814 Void_t *p;
3815
3816 if(__malloc_initialized < 0)
3817 ptmalloc_init ();
3818
3819 __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, size_t,
3820 __const __malloc_ptr_t)) =
3821 __memalign_hook;
3822 if (hook != NULL)
3823 return (*hook)(mp_.pagesize,
3824 (bytes + mp_.pagesize - 1) & ~(mp_.pagesize - 1),
3825 RETURN_ADDRESS (0));
3826
3827 arena_get(ar_ptr, bytes + 2*mp_.pagesize + MINSIZE);
3828 p = _int_pvalloc(ar_ptr, bytes);
3829 (void)mutex_unlock(&ar_ptr->mutex);
3830 return p;
3831 }
3832
3833 Void_t*
3834 public_cALLOc(size_t n, size_t elem_size)
3835 {
3836 mstate av;
3837 mchunkptr oldtop, p;
3838 INTERNAL_SIZE_T bytes, sz, csz, oldtopsize;
3839 Void_t* mem;
3840 unsigned long clearsize;
3841 unsigned long nclears;
3842 INTERNAL_SIZE_T* d;
3843 __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, __const __malloc_ptr_t)) =
3844 __malloc_hook;
3845
3846 /* size_t is unsigned so the behavior on overflow is defined. */
3847 bytes = n * elem_size;
3848 #define HALF_INTERNAL_SIZE_T \
3849 (((INTERNAL_SIZE_T) 1) << (8 * sizeof (INTERNAL_SIZE_T) / 2))
3850 if (__builtin_expect ((n | elem_size) >= HALF_INTERNAL_SIZE_T, 0)) {
3851 if (elem_size != 0 && bytes / elem_size != n) {
3852 MALLOC_FAILURE_ACTION;
3853 return 0;
3854 }
3855 }
3856
3857 if (hook != NULL) {
3858 sz = bytes;
3859 mem = (*hook)(sz, RETURN_ADDRESS (0));
3860 if(mem == 0)
3861 return 0;
3862 #ifdef HAVE_MEMCPY
3863 return memset(mem, 0, sz);
3864 #else
3865 while(sz > 0) ((char*)mem)[--sz] = 0; /* rather inefficient */
3866 return mem;
3867 #endif
3868 }
3869
3870 sz = bytes;
3871
3872 arena_get(av, sz);
3873 if(!av)
3874 return 0;
3875
3876 /* Check if we hand out the top chunk, in which case there may be no
3877 need to clear. */
3878 #if MORECORE_CLEARS
3879 oldtop = top(av);
3880 oldtopsize = chunksize(top(av));
3881 #if MORECORE_CLEARS < 2
3882 /* Only newly allocated memory is guaranteed to be cleared. */
3883 if (av == &main_arena &&
3884 oldtopsize < mp_.sbrk_base + av->max_system_mem - (char *)oldtop)
3885 oldtopsize = (mp_.sbrk_base + av->max_system_mem - (char *)oldtop);
3886 #endif
3887 #endif
3888 mem = _int_malloc(av, sz);
3889
3890 /* Only clearing follows, so we can unlock early. */
3891 (void)mutex_unlock(&av->mutex);
3892
3893 assert(!mem || chunk_is_mmapped(mem2chunk(mem)) ||
3894 av == arena_for_chunk(mem2chunk(mem)));
3895
3896 if (mem == 0) {
3897 /* Maybe the failure is due to running out of mmapped areas. */
3898 if(av != &main_arena) {
3899 (void)mutex_lock(&main_arena.mutex);
3900 mem = _int_malloc(&main_arena, sz);
3901 (void)mutex_unlock(&main_arena.mutex);
3902 } else {
3903 #if USE_ARENAS
3904 /* ... or sbrk() has failed and there is still a chance to mmap() */
3905 (void)mutex_lock(&main_arena.mutex);
3906 av = arena_get2(av->next ? av : 0, sz);
3907 (void)mutex_unlock(&main_arena.mutex);
3908 if(av) {
3909 mem = _int_malloc(av, sz);
3910 (void)mutex_unlock(&av->mutex);
3911 }
3912 #endif
3913 }
3914 if (mem == 0) return 0;
3915 }
3916 p = mem2chunk(mem);
3917
3918 /* Two optional cases in which clearing not necessary */
3919 #if HAVE_MMAP
3920 if (chunk_is_mmapped (p))
3921 {
3922 if (__builtin_expect (perturb_byte, 0))
3923 MALLOC_ZERO (mem, sz);
3924 return mem;
3925 }
3926 #endif
3927
3928 csz = chunksize(p);
3929
3930 #if MORECORE_CLEARS
3931 if (perturb_byte == 0 && (p == oldtop && csz > oldtopsize)) {
3932 /* clear only the bytes from non-freshly-sbrked memory */
3933 csz = oldtopsize;
3934 }
3935 #endif
3936
3937 /* Unroll clear of <= 36 bytes (72 if 8byte sizes). We know that
3938 contents have an odd number of INTERNAL_SIZE_T-sized words;
3939 minimally 3. */
3940 d = (INTERNAL_SIZE_T*)mem;
3941 clearsize = csz - SIZE_SZ;
3942 nclears = clearsize / sizeof(INTERNAL_SIZE_T);
3943 assert(nclears >= 3);
3944
3945 if (nclears > 9)
3946 MALLOC_ZERO(d, clearsize);
3947
3948 else {
3949 *(d+0) = 0;
3950 *(d+1) = 0;
3951 *(d+2) = 0;
3952 if (nclears > 4) {
3953 *(d+3) = 0;
3954 *(d+4) = 0;
3955 if (nclears > 6) {
3956 *(d+5) = 0;
3957 *(d+6) = 0;
3958 if (nclears > 8) {
3959 *(d+7) = 0;
3960 *(d+8) = 0;
3961 }
3962 }
3963 }
3964 }
3965
3966 return mem;
3967 }
3968
3969 #ifndef _LIBC
3970
3971 Void_t**
3972 public_iCALLOc(size_t n, size_t elem_size, Void_t** chunks)
3973 {
3974 mstate ar_ptr;
3975 Void_t** m;
3976
3977 arena_get(ar_ptr, n*elem_size);
3978 if(!ar_ptr)
3979 return 0;
3980
3981 m = _int_icalloc(ar_ptr, n, elem_size, chunks);
3982 (void)mutex_unlock(&ar_ptr->mutex);
3983 return m;
3984 }
3985
3986 Void_t**
3987 public_iCOMALLOc(size_t n, size_t sizes[], Void_t** chunks)
3988 {
3989 mstate ar_ptr;
3990 Void_t** m;
3991
3992 arena_get(ar_ptr, 0);
3993 if(!ar_ptr)
3994 return 0;
3995
3996 m = _int_icomalloc(ar_ptr, n, sizes, chunks);
3997 (void)mutex_unlock(&ar_ptr->mutex);
3998 return m;
3999 }
4000
4001 void
4002 public_cFREe(Void_t* m)
4003 {
4004 public_fREe(m);
4005 }
4006
4007 #endif /* _LIBC */
4008
4009 int
4010 public_mTRIm(size_t s)
4011 {
4012 int result;
4013
4014 if(__malloc_initialized < 0)
4015 ptmalloc_init ();
4016 (void)mutex_lock(&main_arena.mutex);
4017 result = mTRIm(s);
4018 (void)mutex_unlock(&main_arena.mutex);
4019 return result;
4020 }
4021
4022 size_t
4023 public_mUSABLe(Void_t* m)
4024 {
4025 size_t result;
4026
4027 result = mUSABLe(m);
4028 return result;
4029 }
4030
4031 void
4032 public_mSTATs()
4033 {
4034 mSTATs();
4035 }
4036
4037 struct mallinfo public_mALLINFo()
4038 {
4039 struct mallinfo m;
4040
4041 if(__malloc_initialized < 0)
4042 ptmalloc_init ();
4043 (void)mutex_lock(&main_arena.mutex);
4044 m = mALLINFo(&main_arena);
4045 (void)mutex_unlock(&main_arena.mutex);
4046 return m;
4047 }
4048
4049 int
4050 public_mALLOPt(int p, int v)
4051 {
4052 int result;
4053 result = mALLOPt(p, v);
4054 return result;
4055 }
4056
4057 /*
4058 ------------------------------ malloc ------------------------------
4059 */
4060
4061 Void_t*
4062 _int_malloc(mstate av, size_t bytes)
4063 {
4064 INTERNAL_SIZE_T nb; /* normalized request size */
4065 unsigned int idx; /* associated bin index */
4066 mbinptr bin; /* associated bin */
4067 mfastbinptr* fb; /* associated fastbin */
4068
4069 mchunkptr victim; /* inspected/selected chunk */
4070 INTERNAL_SIZE_T size; /* its size */
4071 int victim_index; /* its bin index */
4072
4073 mchunkptr remainder; /* remainder from a split */
4074 unsigned long remainder_size; /* its size */
4075
4076 unsigned int block; /* bit map traverser */
4077 unsigned int bit; /* bit map traverser */
4078 unsigned int map; /* current word of binmap */
4079
4080 mchunkptr fwd; /* misc temp for linking */
4081 mchunkptr bck; /* misc temp for linking */
4082
4083 /*
4084 Convert request size to internal form by adding SIZE_SZ bytes
4085 overhead plus possibly more to obtain necessary alignment and/or
4086 to obtain a size of at least MINSIZE, the smallest allocatable
4087 size. Also, checked_request2size traps (returning 0) request sizes
4088 that are so large that they wrap around zero when padded and
4089 aligned.
4090 */
4091
4092 checked_request2size(bytes, nb);
4093
4094 /*
4095 If the size qualifies as a fastbin, first check corresponding bin.
4096 This code is safe to execute even if av is not yet initialized, so we
4097 can try it without checking, which saves some time on this fast path.
4098 */
4099
4100 if ((unsigned long)(nb) <= (unsigned long)(get_max_fast ())) {
4101 long int idx = fastbin_index(nb);
4102 fb = &(av->fastbins[idx]);
4103 if ( (victim = *fb) != 0) {
4104 if (__builtin_expect (fastbin_index (chunksize (victim)) != idx, 0))
4105 malloc_printerr (check_action, "malloc(): memory corruption (fast)",
4106 chunk2mem (victim));
4107 *fb = victim->fd;
4108 check_remalloced_chunk(av, victim, nb);
4109 void *p = chunk2mem(victim);
4110 if (__builtin_expect (perturb_byte, 0))
4111 alloc_perturb (p, bytes);
4112 return p;
4113 }
4114 }
4115
4116 /*
4117 If a small request, check regular bin. Since these "smallbins"
4118 hold one size each, no searching within bins is necessary.
4119 (For a large request, we need to wait until unsorted chunks are
4120 processed to find best fit. But for small ones, fits are exact
4121 anyway, so we can check now, which is faster.)
4122 */
4123
4124 if (in_smallbin_range(nb)) {
4125 idx = smallbin_index(nb);
4126 bin = bin_at(av,idx);
4127
4128 if ( (victim = last(bin)) != bin) {
4129 if (victim == 0) /* initialization check */
4130 malloc_consolidate(av);
4131 else {
4132 bck = victim->bk;
4133 set_inuse_bit_at_offset(victim, nb);
4134 bin->bk = bck;
4135 bck->fd = bin;
4136
4137 if (av != &main_arena)
4138 victim->size |= NON_MAIN_ARENA;
4139 check_malloced_chunk(av, victim, nb);
4140 void *p = chunk2mem(victim);
4141 if (__builtin_expect (perturb_byte, 0))
4142 alloc_perturb (p, bytes);
4143 return p;
4144 }
4145 }
4146 }
4147
4148 /*
4149 If this is a large request, consolidate fastbins before continuing.
4150 While it might look excessive to kill all fastbins before
4151 even seeing if there is space available, this avoids
4152 fragmentation problems normally associated with fastbins.
4153 Also, in practice, programs tend to have runs of either small or
4154 large requests, but less often mixtures, so consolidation is not
4155 invoked all that often in most programs. And the programs that
4156 it is called frequently in otherwise tend to fragment.
4157 */
4158
4159 else {
4160 idx = largebin_index(nb);
4161 if (have_fastchunks(av))
4162 malloc_consolidate(av);
4163 }
4164
4165 /*
4166 Process recently freed or remaindered chunks, taking one only if
4167 it is exact fit, or, if this a small request, the chunk is remainder from
4168 the most recent non-exact fit. Place other traversed chunks in
4169 bins. Note that this step is the only place in any routine where
4170 chunks are placed in bins.
4171
4172 The outer loop here is needed because we might not realize until
4173 near the end of malloc that we should have consolidated, so must
4174 do so and retry. This happens at most once, and only when we would
4175 otherwise need to expand memory to service a "small" request.
4176 */
4177
4178 for(;;) {
4179
4180 int iters = 0;
4181 while ( (victim = unsorted_chunks(av)->bk) != unsorted_chunks(av)) {
4182 bck = victim->bk;
4183 if (__builtin_expect (victim->size <= 2 * SIZE_SZ, 0)
4184 || __builtin_expect (victim->size > av->system_mem, 0))
4185 malloc_printerr (check_action, "malloc(): memory corruption",
4186 chunk2mem (victim));
4187 size = chunksize(victim);
4188
4189 /*
4190 If a small request, try to use last remainder if it is the
4191 only chunk in unsorted bin. This helps promote locality for
4192 runs of consecutive small requests. This is the only
4193 exception to best-fit, and applies only when there is
4194 no exact fit for a small chunk.
4195 */
4196
4197 if (in_smallbin_range(nb) &&
4198 bck == unsorted_chunks(av) &&
4199 victim == av->last_remainder &&
4200 (unsigned long)(size) > (unsigned long)(nb + MINSIZE)) {
4201
4202 /* split and reattach remainder */
4203 remainder_size = size - nb;
4204 remainder = chunk_at_offset(victim, nb);
4205 unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
4206 av->last_remainder = remainder;
4207 remainder->bk = remainder->fd = unsorted_chunks(av);
4208 if (!in_smallbin_range(remainder_size))
4209 {
4210 remainder->fd_nextsize = NULL;
4211 remainder->bk_nextsize = NULL;
4212 }
4213
4214 set_head(victim, nb | PREV_INUSE |
4215 (av != &main_arena ? NON_MAIN_ARENA : 0));
4216 set_head(remainder, remainder_size | PREV_INUSE);
4217 set_foot(remainder, remainder_size);
4218
4219 check_malloced_chunk(av, victim, nb);
4220 void *p = chunk2mem(victim);
4221 if (__builtin_expect (perturb_byte, 0))
4222 alloc_perturb (p, bytes);
4223 return p;
4224 }
4225
4226 /* remove from unsorted list */
4227 unsorted_chunks(av)->bk = bck;
4228 bck->fd = unsorted_chunks(av);
4229
4230 /* Take now instead of binning if exact fit */
4231
4232 if (size == nb) {
4233 set_inuse_bit_at_offset(victim, size);
4234 if (av != &main_arena)
4235 victim->size |= NON_MAIN_ARENA;
4236 check_malloced_chunk(av, victim, nb);
4237 void *p = chunk2mem(victim);
4238 if (__builtin_expect (perturb_byte, 0))
4239 alloc_perturb (p, bytes);
4240 return p;
4241 }
4242
4243 /* place chunk in bin */
4244
4245 if (in_smallbin_range(size)) {
4246 victim_index = smallbin_index(size);
4247 bck = bin_at(av, victim_index);
4248 fwd = bck->fd;
4249 }
4250 else {
4251 victim_index = largebin_index(size);
4252 bck = bin_at(av, victim_index);
4253 fwd = bck->fd;
4254
4255 /* maintain large bins in sorted order */
4256 if (fwd != bck) {
4257 /* Or with inuse bit to speed comparisons */
4258 size |= PREV_INUSE;
4259 /* if smaller than smallest, bypass loop below */
4260 assert((bck->bk->size & NON_MAIN_ARENA) == 0);
4261 if ((unsigned long)(size) < (unsigned long)(bck->bk->size)) {
4262 fwd = bck;
4263 bck = bck->bk;
4264
4265 victim->fd_nextsize = fwd->fd;
4266 victim->bk_nextsize = fwd->fd->bk_nextsize;
4267 fwd->fd->bk_nextsize = victim->bk_nextsize->fd_nextsize = victim;
4268 }
4269 else {
4270 assert((fwd->size & NON_MAIN_ARENA) == 0);
4271 while ((unsigned long) size < fwd->size)
4272 {
4273 fwd = fwd->fd_nextsize;
4274 assert((fwd->size & NON_MAIN_ARENA) == 0);
4275 }
4276
4277 if ((unsigned long) size == (unsigned long) fwd->size)
4278 /* Always insert in the second position. */
4279 fwd = fwd->fd;
4280 else
4281 {
4282 victim->fd_nextsize = fwd;
4283 victim->bk_nextsize = fwd->bk_nextsize;
4284 fwd->bk_nextsize = victim;
4285 victim->bk_nextsize->fd_nextsize = victim;
4286 }
4287 bck = fwd->bk;
4288 }
4289 } else
4290 victim->fd_nextsize = victim->bk_nextsize = victim;
4291 }
4292
4293 mark_bin(av, victim_index);
4294 victim->bk = bck;
4295 victim->fd = fwd;
4296 fwd->bk = victim;
4297 bck->fd = victim;
4298
4299 #define MAX_ITERS 10000
4300 if (++iters >= MAX_ITERS)
4301 break;
4302 }
4303
4304 /*
4305 If a large request, scan through the chunks of current bin in
4306 sorted order to find smallest that fits. Use the skip list for this.
4307 */
4308
4309 if (!in_smallbin_range(nb)) {
4310 bin = bin_at(av, idx);
4311
4312 /* skip scan if empty or largest chunk is too small */
4313 if ((victim = first(bin)) != bin &&
4314 (unsigned long)(victim->size) >= (unsigned long)(nb)) {
4315
4316 victim = victim->bk_nextsize;
4317 while (((unsigned long)(size = chunksize(victim)) <
4318 (unsigned long)(nb)))
4319 victim = victim->bk_nextsize;
4320
4321 /* Avoid removing the first entry for a size so that the skip
4322 list does not have to be rerouted. */
4323 if (victim != last(bin) && victim->size == victim->fd->size)
4324 victim = victim->fd;
4325
4326 remainder_size = size - nb;
4327 unlink(victim, bck, fwd);
4328
4329 /* Exhaust */
4330 if (remainder_size < MINSIZE) {
4331 set_inuse_bit_at_offset(victim, size);
4332 if (av != &main_arena)
4333 victim->size |= NON_MAIN_ARENA;
4334 }
4335 /* Split */
4336 else {
4337 remainder = chunk_at_offset(victim, nb);
4338 /* We cannot assume the unsorted list is empty and therefore
4339 have to perform a complete insert here. */
4340 bck = unsorted_chunks(av);
4341 fwd = bck->fd;
4342 remainder->bk = bck;
4343 remainder->fd = fwd;
4344 bck->fd = remainder;
4345 fwd->bk = remainder;
4346 if (!in_smallbin_range(remainder_size))
4347 {
4348 remainder->fd_nextsize = NULL;
4349 remainder->bk_nextsize = NULL;
4350 }
4351 set_head(victim, nb | PREV_INUSE |
4352 (av != &main_arena ? NON_MAIN_ARENA : 0));
4353 set_head(remainder, remainder_size | PREV_INUSE);
4354 set_foot(remainder, remainder_size);
4355 }
4356 check_malloced_chunk(av, victim, nb);
4357 void *p = chunk2mem(victim);
4358 if (__builtin_expect (perturb_byte, 0))
4359 alloc_perturb (p, bytes);
4360 return p;
4361 }
4362 }
4363
4364 /*
4365 Search for a chunk by scanning bins, starting with next largest
4366 bin. This search is strictly by best-fit; i.e., the smallest
4367 (with ties going to approximately the least recently used) chunk
4368 that fits is selected.
4369
4370 The bitmap avoids needing to check that most blocks are nonempty.
4371 The particular case of skipping all bins during warm-up phases
4372 when no chunks have been returned yet is faster than it might look.
4373 */
4374
4375 ++idx;
4376 bin = bin_at(av,idx);
4377 block = idx2block(idx);
4378 map = av->binmap[block];
4379 bit = idx2bit(idx);
4380
4381 for (;;) {
4382
4383 /* Skip rest of block if there are no more set bits in this block. */
4384 if (bit > map || bit == 0) {
4385 do {
4386 if (++block >= BINMAPSIZE) /* out of bins */
4387 goto use_top;
4388 } while ( (map = av->binmap[block]) == 0);
4389
4390 bin = bin_at(av, (block << BINMAPSHIFT));
4391 bit = 1;
4392 }
4393
4394 /* Advance to bin with set bit. There must be one. */
4395 while ((bit & map) == 0) {
4396 bin = next_bin(bin);
4397 bit <<= 1;
4398 assert(bit != 0);
4399 }
4400
4401 /* Inspect the bin. It is likely to be non-empty */
4402 victim = last(bin);
4403
4404 /* If a false alarm (empty bin), clear the bit. */
4405 if (victim == bin) {
4406 av->binmap[block] = map &= ~bit; /* Write through */
4407 bin = next_bin(bin);
4408 bit <<= 1;
4409 }
4410
4411 else {
4412 size = chunksize(victim);
4413
4414 /* We know the first chunk in this bin is big enough to use. */
4415 assert((unsigned long)(size) >= (unsigned long)(nb));
4416
4417 remainder_size = size - nb;
4418
4419 /* unlink */
4420 unlink(victim, bck, fwd);
4421
4422 /* Exhaust */
4423 if (remainder_size < MINSIZE) {
4424 set_inuse_bit_at_offset(victim, size);
4425 if (av != &main_arena)
4426 victim->size |= NON_MAIN_ARENA;
4427 }
4428
4429 /* Split */
4430 else {
4431 remainder = chunk_at_offset(victim, nb);
4432
4433 /* We cannot assume the unsorted list is empty and therefore
4434 have to perform a complete insert here. */
4435 bck = unsorted_chunks(av);
4436 fwd = bck->fd;
4437 remainder->bk = bck;
4438 remainder->fd = fwd;
4439 bck->fd = remainder;
4440 fwd->bk = remainder;
4441
4442 /* advertise as last remainder */
4443 if (in_smallbin_range(nb))
4444 av->last_remainder = remainder;
4445 if (!in_smallbin_range(remainder_size))
4446 {
4447 remainder->fd_nextsize = NULL;
4448 remainder->bk_nextsize = NULL;
4449 }
4450 set_head(victim, nb | PREV_INUSE |
4451 (av != &main_arena ? NON_MAIN_ARENA : 0));
4452 set_head(remainder, remainder_size | PREV_INUSE);
4453 set_foot(remainder, remainder_size);
4454 }
4455 check_malloced_chunk(av, victim, nb);
4456 void *p = chunk2mem(victim);
4457 if (__builtin_expect (perturb_byte, 0))
4458 alloc_perturb (p, bytes);
4459 return p;
4460 }
4461 }
4462
4463 use_top:
4464 /*
4465 If large enough, split off the chunk bordering the end of memory
4466 (held in av->top). Note that this is in accord with the best-fit
4467 search rule. In effect, av->top is treated as larger (and thus
4468 less well fitting) than any other available chunk since it can
4469 be extended to be as large as necessary (up to system
4470 limitations).
4471
4472 We require that av->top always exists (i.e., has size >=
4473 MINSIZE) after initialization, so if it would otherwise be
4474 exhuasted by current request, it is replenished. (The main
4475 reason for ensuring it exists is that we may need MINSIZE space
4476 to put in fenceposts in sysmalloc.)
4477 */
4478
4479 victim = av->top;
4480 size = chunksize(victim);
4481
4482 if ((unsigned long)(size) >= (unsigned long)(nb + MINSIZE)) {
4483 remainder_size = size - nb;
4484 remainder = chunk_at_offset(victim, nb);
4485 av->top = remainder;
4486 set_head(victim, nb | PREV_INUSE |
4487 (av != &main_arena ? NON_MAIN_ARENA : 0));
4488 set_head(remainder, remainder_size | PREV_INUSE);
4489
4490 check_malloced_chunk(av, victim, nb);
4491 void *p = chunk2mem(victim);
4492 if (__builtin_expect (perturb_byte, 0))
4493 alloc_perturb (p, bytes);
4494 return p;
4495 }
4496
4497 /*
4498 If there is space available in fastbins, consolidate and retry,
4499 to possibly avoid expanding memory. This can occur only if nb is
4500 in smallbin range so we didn't consolidate upon entry.
4501 */
4502
4503 else if (have_fastchunks(av)) {
4504 assert(in_smallbin_range(nb));
4505 malloc_consolidate(av);
4506 idx = smallbin_index(nb); /* restore original bin index */
4507 }
4508
4509 /*
4510 Otherwise, relay to handle system-dependent cases
4511 */
4512 else {
4513 void *p = sYSMALLOc(nb, av);
4514 if (__builtin_expect (perturb_byte, 0))
4515 alloc_perturb (p, bytes);
4516 return p;
4517 }
4518 }
4519 }
4520
4521 /*
4522 ------------------------------ free ------------------------------
4523 */
4524
4525 void
4526 _int_free(mstate av, Void_t* mem)
4527 {
4528 mchunkptr p; /* chunk corresponding to mem */
4529 INTERNAL_SIZE_T size; /* its size */
4530 mfastbinptr* fb; /* associated fastbin */
4531 mchunkptr nextchunk; /* next contiguous chunk */
4532 INTERNAL_SIZE_T nextsize; /* its size */
4533 int nextinuse; /* true if nextchunk is used */
4534 INTERNAL_SIZE_T prevsize; /* size of previous contiguous chunk */
4535 mchunkptr bck; /* misc temp for linking */
4536 mchunkptr fwd; /* misc temp for linking */
4537
4538 const char *errstr = NULL;
4539
4540 p = mem2chunk(mem);
4541 size = chunksize(p);
4542
4543 /* Little security check which won't hurt performance: the
4544 allocator never wrapps around at the end of the address space.
4545 Therefore we can exclude some size values which might appear
4546 here by accident or by "design" from some intruder. */
4547 if (__builtin_expect ((uintptr_t) p > (uintptr_t) -size, 0)
4548 || __builtin_expect (misaligned_chunk (p), 0))
4549 {
4550 errstr = "free(): invalid pointer";
4551 errout:
4552 malloc_printerr (check_action, errstr, mem);
4553 return;
4554 }
4555 /* We know that each chunk is at least MINSIZE bytes in size. */
4556 if (__builtin_expect (size < MINSIZE, 0))
4557 {
4558 errstr = "free(): invalid size";
4559 goto errout;
4560 }
4561
4562 check_inuse_chunk(av, p);
4563
4564 /*
4565 If eligible, place chunk on a fastbin so it can be found
4566 and used quickly in malloc.
4567 */
4568
4569 if ((unsigned long)(size) <= (unsigned long)(get_max_fast ())
4570
4571 #if TRIM_FASTBINS
4572 /*
4573 If TRIM_FASTBINS set, don't place chunks
4574 bordering top into fastbins
4575 */
4576 && (chunk_at_offset(p, size) != av->top)
4577 #endif
4578 ) {
4579
4580 if (__builtin_expect (chunk_at_offset (p, size)->size <= 2 * SIZE_SZ, 0)
4581 || __builtin_expect (chunksize (chunk_at_offset (p, size))
4582 >= av->system_mem, 0))
4583 {
4584 errstr = "free(): invalid next size (fast)";
4585 goto errout;
4586 }
4587
4588 set_fastchunks(av);
4589 fb = &(av->fastbins[fastbin_index(size)]);
4590 /* Another simple check: make sure the top of the bin is not the
4591 record we are going to add (i.e., double free). */
4592 if (__builtin_expect (*fb == p, 0))
4593 {
4594 errstr = "double free or corruption (fasttop)";
4595 goto errout;
4596 }
4597
4598 if (__builtin_expect (perturb_byte, 0))
4599 free_perturb (mem, size - SIZE_SZ);
4600
4601 p->fd = *fb;
4602 *fb = p;
4603 }
4604
4605 /*
4606 Consolidate other non-mmapped chunks as they arrive.
4607 */
4608
4609 else if (!chunk_is_mmapped(p)) {
4610 nextchunk = chunk_at_offset(p, size);
4611
4612 /* Lightweight tests: check whether the block is already the
4613 top block. */
4614 if (__builtin_expect (p == av->top, 0))
4615 {
4616 errstr = "double free or corruption (top)";
4617 goto errout;
4618 }
4619 /* Or whether the next chunk is beyond the boundaries of the arena. */
4620 if (__builtin_expect (contiguous (av)
4621 && (char *) nextchunk
4622 >= ((char *) av->top + chunksize(av->top)), 0))
4623 {
4624 errstr = "double free or corruption (out)";
4625 goto errout;
4626 }
4627 /* Or whether the block is actually not marked used. */
4628 if (__builtin_expect (!prev_inuse(nextchunk), 0))
4629 {
4630 errstr = "double free or corruption (!prev)";
4631 goto errout;
4632 }
4633
4634 nextsize = chunksize(nextchunk);
4635 if (__builtin_expect (nextchunk->size <= 2 * SIZE_SZ, 0)
4636 || __builtin_expect (nextsize >= av->system_mem, 0))
4637 {
4638 errstr = "free(): invalid next size (normal)";
4639 goto errout;
4640 }
4641
4642 if (__builtin_expect (perturb_byte, 0))
4643 free_perturb (mem, size - SIZE_SZ);
4644
4645 /* consolidate backward */
4646 if (!prev_inuse(p)) {
4647 prevsize = p->prev_size;
4648 size += prevsize;
4649 p = chunk_at_offset(p, -((long) prevsize));
4650 unlink(p, bck, fwd);
4651 }
4652
4653 if (nextchunk != av->top) {
4654 /* get and clear inuse bit */
4655 nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
4656
4657 /* consolidate forward */
4658 if (!nextinuse) {
4659 unlink(nextchunk, bck, fwd);
4660 size += nextsize;
4661 } else
4662 clear_inuse_bit_at_offset(nextchunk, 0);
4663
4664 /*
4665 Place the chunk in unsorted chunk list. Chunks are
4666 not placed into regular bins until after they have
4667 been given one chance to be used in malloc.
4668 */
4669
4670 bck = unsorted_chunks(av);
4671 fwd = bck->fd;
4672 p->fd = fwd;
4673 p->bk = bck;
4674 if (!in_smallbin_range(size))
4675 {
4676 p->fd_nextsize = NULL;
4677 p->bk_nextsize = NULL;
4678 }
4679 bck->fd = p;
4680 fwd->bk = p;
4681
4682 set_head(p, size | PREV_INUSE);
4683 set_foot(p, size);
4684
4685 check_free_chunk(av, p);
4686 }
4687
4688 /*
4689 If the chunk borders the current high end of memory,
4690 consolidate into top
4691 */
4692
4693 else {
4694 size += nextsize;
4695 set_head(p, size | PREV_INUSE);
4696 av->top = p;
4697 check_chunk(av, p);
4698 }
4699
4700 /*
4701 If freeing a large space, consolidate possibly-surrounding
4702 chunks. Then, if the total unused topmost memory exceeds trim
4703 threshold, ask malloc_trim to reduce top.
4704
4705 Unless max_fast is 0, we don't know if there are fastbins
4706 bordering top, so we cannot tell for sure whether threshold
4707 has been reached unless fastbins are consolidated. But we
4708 don't want to consolidate on each free. As a compromise,
4709 consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
4710 is reached.
4711 */
4712
4713 if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) {
4714 if (have_fastchunks(av))
4715 malloc_consolidate(av);
4716
4717 if (av == &main_arena) {
4718 #ifndef MORECORE_CANNOT_TRIM
4719 if ((unsigned long)(chunksize(av->top)) >=
4720 (unsigned long)(mp_.trim_threshold))
4721 sYSTRIm(mp_.top_pad, av);
4722 #endif
4723 } else {
4724 /* Always try heap_trim(), even if the top chunk is not
4725 large, because the corresponding heap might go away. */
4726 heap_info *heap = heap_for_ptr(top(av));
4727
4728 assert(heap->ar_ptr == av);
4729 heap_trim(heap, mp_.top_pad);
4730 }
4731 }
4732
4733 }
4734 /*
4735 If the chunk was allocated via mmap, release via munmap(). Note
4736 that if HAVE_MMAP is false but chunk_is_mmapped is true, then
4737 user must have overwritten memory. There's nothing we can do to
4738 catch this error unless MALLOC_DEBUG is set, in which case
4739 check_inuse_chunk (above) will have triggered error.
4740 */
4741
4742 else {
4743 #if HAVE_MMAP
4744 munmap_chunk (p);
4745 #endif
4746 }
4747 }
4748
4749 /*
4750 ------------------------- malloc_consolidate -------------------------
4751
4752 malloc_consolidate is a specialized version of free() that tears
4753 down chunks held in fastbins. Free itself cannot be used for this
4754 purpose since, among other things, it might place chunks back onto
4755 fastbins. So, instead, we need to use a minor variant of the same
4756 code.
4757
4758 Also, because this routine needs to be called the first time through
4759 malloc anyway, it turns out to be the perfect place to trigger
4760 initialization code.
4761 */
4762
4763 #if __STD_C
4764 static void malloc_consolidate(mstate av)
4765 #else
4766 static void malloc_consolidate(av) mstate av;
4767 #endif
4768 {
4769 mfastbinptr* fb; /* current fastbin being consolidated */
4770 mfastbinptr* maxfb; /* last fastbin (for loop control) */
4771 mchunkptr p; /* current chunk being consolidated */
4772 mchunkptr nextp; /* next chunk to consolidate */
4773 mchunkptr unsorted_bin; /* bin header */
4774 mchunkptr first_unsorted; /* chunk to link to */
4775
4776 /* These have same use as in free() */
4777 mchunkptr nextchunk;
4778 INTERNAL_SIZE_T size;
4779 INTERNAL_SIZE_T nextsize;
4780 INTERNAL_SIZE_T prevsize;
4781 int nextinuse;
4782 mchunkptr bck;
4783 mchunkptr fwd;
4784
4785 /*
4786 If max_fast is 0, we know that av hasn't
4787 yet been initialized, in which case do so below
4788 */
4789
4790 if (get_max_fast () != 0) {
4791 clear_fastchunks(av);
4792
4793 unsorted_bin = unsorted_chunks(av);
4794
4795 /*
4796 Remove each chunk from fast bin and consolidate it, placing it
4797 then in unsorted bin. Among other reasons for doing this,
4798 placing in unsorted bin avoids needing to calculate actual bins
4799 until malloc is sure that chunks aren't immediately going to be
4800 reused anyway.
4801 */
4802
4803 #if 0
4804 /* It is wrong to limit the fast bins to search using get_max_fast
4805 because, except for the main arena, all the others might have
4806 blocks in the high fast bins. It's not worth it anyway, just
4807 search all bins all the time. */
4808 maxfb = &(av->fastbins[fastbin_index(get_max_fast ())]);
4809 #else
4810 maxfb = &(av->fastbins[NFASTBINS - 1]);
4811 #endif
4812 fb = &(av->fastbins[0]);
4813 do {
4814 if ( (p = *fb) != 0) {
4815 *fb = 0;
4816
4817 do {
4818 check_inuse_chunk(av, p);
4819 nextp = p->fd;
4820
4821 /* Slightly streamlined version of consolidation code in free() */
4822 size = p->size & ~(PREV_INUSE|NON_MAIN_ARENA);
4823 nextchunk = chunk_at_offset(p, size);
4824 nextsize = chunksize(nextchunk);
4825
4826 if (!prev_inuse(p)) {
4827 prevsize = p->prev_size;
4828 size += prevsize;
4829 p = chunk_at_offset(p, -((long) prevsize));
4830 unlink(p, bck, fwd);
4831 }
4832
4833 if (nextchunk != av->top) {
4834 nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
4835
4836 if (!nextinuse) {
4837 size += nextsize;
4838 unlink(nextchunk, bck, fwd);
4839 } else
4840 clear_inuse_bit_at_offset(nextchunk, 0);
4841
4842 first_unsorted = unsorted_bin->fd;
4843 unsorted_bin->fd = p;
4844 first_unsorted->bk = p;
4845
4846 if (!in_smallbin_range (size)) {
4847 p->fd_nextsize = NULL;
4848 p->bk_nextsize = NULL;
4849 }
4850
4851 set_head(p, size | PREV_INUSE);
4852 p->bk = unsorted_bin;
4853 p->fd = first_unsorted;
4854 set_foot(p, size);
4855 }
4856
4857 else {
4858 size += nextsize;
4859 set_head(p, size | PREV_INUSE);
4860 av->top = p;
4861 }
4862
4863 } while ( (p = nextp) != 0);
4864
4865 }
4866 } while (fb++ != maxfb);
4867 }
4868 else {
4869 malloc_init_state(av);
4870 check_malloc_state(av);
4871 }
4872 }
4873
4874 /*
4875 ------------------------------ realloc ------------------------------
4876 */
4877
4878 Void_t*
4879 _int_realloc(mstate av, Void_t* oldmem, size_t bytes)
4880 {
4881 INTERNAL_SIZE_T nb; /* padded request size */
4882
4883 mchunkptr oldp; /* chunk corresponding to oldmem */
4884 INTERNAL_SIZE_T oldsize; /* its size */
4885
4886 mchunkptr newp; /* chunk to return */
4887 INTERNAL_SIZE_T newsize; /* its size */
4888 Void_t* newmem; /* corresponding user mem */
4889
4890 mchunkptr next; /* next contiguous chunk after oldp */
4891
4892 mchunkptr remainder; /* extra space at end of newp */
4893 unsigned long remainder_size; /* its size */
4894
4895 mchunkptr bck; /* misc temp for linking */
4896 mchunkptr fwd; /* misc temp for linking */
4897
4898 unsigned long copysize; /* bytes to copy */
4899 unsigned int ncopies; /* INTERNAL_SIZE_T words to copy */
4900 INTERNAL_SIZE_T* s; /* copy source */
4901 INTERNAL_SIZE_T* d; /* copy destination */
4902
4903 const char *errstr = NULL;
4904
4905
4906 checked_request2size(bytes, nb);
4907
4908 oldp = mem2chunk(oldmem);
4909 oldsize = chunksize(oldp);
4910
4911 /* Simple tests for old block integrity. */
4912 if (__builtin_expect (misaligned_chunk (oldp), 0))
4913 {
4914 errstr = "realloc(): invalid pointer";
4915 errout:
4916 malloc_printerr (check_action, errstr, oldmem);
4917 return NULL;
4918 }
4919 if (__builtin_expect (oldp->size <= 2 * SIZE_SZ, 0)
4920 || __builtin_expect (oldsize >= av->system_mem, 0))
4921 {
4922 errstr = "realloc(): invalid old size";
4923 goto errout;
4924 }
4925
4926 check_inuse_chunk(av, oldp);
4927
4928 if (!chunk_is_mmapped(oldp)) {
4929
4930 next = chunk_at_offset(oldp, oldsize);
4931 INTERNAL_SIZE_T nextsize = chunksize(next);
4932 if (__builtin_expect (next->size <= 2 * SIZE_SZ, 0)
4933 || __builtin_expect (nextsize >= av->system_mem, 0))
4934 {
4935 errstr = "realloc(): invalid next size";
4936 goto errout;
4937 }
4938
4939 if ((unsigned long)(oldsize) >= (unsigned long)(nb)) {
4940 /* already big enough; split below */
4941 newp = oldp;
4942 newsize = oldsize;
4943 }
4944
4945 else {
4946 /* Try to expand forward into top */
4947 if (next == av->top &&
4948 (unsigned long)(newsize = oldsize + nextsize) >=
4949 (unsigned long)(nb + MINSIZE)) {
4950 set_head_size(oldp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0));
4951 av->top = chunk_at_offset(oldp, nb);
4952 set_head(av->top, (newsize - nb) | PREV_INUSE);
4953 check_inuse_chunk(av, oldp);
4954 return chunk2mem(oldp);
4955 }
4956
4957 /* Try to expand forward into next chunk; split off remainder below */
4958 else if (next != av->top &&
4959 !inuse(next) &&
4960 (unsigned long)(newsize = oldsize + nextsize) >=
4961 (unsigned long)(nb)) {
4962 newp = oldp;
4963 unlink(next, bck, fwd);
4964 }
4965
4966 /* allocate, copy, free */
4967 else {
4968 newmem = _int_malloc(av, nb - MALLOC_ALIGN_MASK);
4969 if (newmem == 0)
4970 return 0; /* propagate failure */
4971
4972 newp = mem2chunk(newmem);
4973 newsize = chunksize(newp);
4974
4975 /*
4976 Avoid copy if newp is next chunk after oldp.
4977 */
4978 if (newp == next) {
4979 newsize += oldsize;
4980 newp = oldp;
4981 }
4982 else {
4983 /*
4984 Unroll copy of <= 36 bytes (72 if 8byte sizes)
4985 We know that contents have an odd number of
4986 INTERNAL_SIZE_T-sized words; minimally 3.
4987 */
4988
4989 copysize = oldsize - SIZE_SZ;
4990 s = (INTERNAL_SIZE_T*)(oldmem);
4991 d = (INTERNAL_SIZE_T*)(newmem);
4992 ncopies = copysize / sizeof(INTERNAL_SIZE_T);
4993 assert(ncopies >= 3);
4994
4995 if (ncopies > 9)
4996 MALLOC_COPY(d, s, copysize);
4997
4998 else {
4999 *(d+0) = *(s+0);
5000 *(d+1) = *(s+1);
5001 *(d+2) = *(s+2);
5002 if (ncopies > 4) {
5003 *(d+3) = *(s+3);
5004 *(d+4) = *(s+4);
5005 if (ncopies > 6) {
5006 *(d+5) = *(s+5);
5007 *(d+6) = *(s+6);
5008 if (ncopies > 8) {
5009 *(d+7) = *(s+7);
5010 *(d+8) = *(s+8);
5011 }
5012 }
5013 }
5014 }
5015
5016 _int_free(av, oldmem);
5017 check_inuse_chunk(av, newp);
5018 return chunk2mem(newp);
5019 }
5020 }
5021 }
5022
5023 /* If possible, free extra space in old or extended chunk */
5024
5025 assert((unsigned long)(newsize) >= (unsigned long)(nb));
5026
5027 remainder_size = newsize - nb;
5028
5029 if (remainder_size < MINSIZE) { /* not enough extra to split off */
5030 set_head_size(newp, newsize | (av != &main_arena ? NON_MAIN_ARENA : 0));
5031 set_inuse_bit_at_offset(newp, newsize);
5032 }
5033 else { /* split remainder */
5034 remainder = chunk_at_offset(newp, nb);
5035 set_head_size(newp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0));
5036 set_head(remainder, remainder_size | PREV_INUSE |
5037 (av != &main_arena ? NON_MAIN_ARENA : 0));
5038 /* Mark remainder as inuse so free() won't complain */
5039 set_inuse_bit_at_offset(remainder, remainder_size);
5040 _int_free(av, chunk2mem(remainder));
5041 }
5042
5043 check_inuse_chunk(av, newp);
5044 return chunk2mem(newp);
5045 }
5046
5047 /*
5048 Handle mmap cases
5049 */
5050
5051 else {
5052 #if HAVE_MMAP
5053
5054 #if HAVE_MREMAP
5055 INTERNAL_SIZE_T offset = oldp->prev_size;
5056 size_t pagemask = mp_.pagesize - 1;
5057 char *cp;
5058 unsigned long sum;
5059
5060 /* Note the extra SIZE_SZ overhead */
5061 newsize = (nb + offset + SIZE_SZ + pagemask) & ~pagemask;
5062
5063 /* don't need to remap if still within same page */
5064 if (oldsize == newsize - offset)
5065 return oldmem;
5066
5067 cp = (char*)mremap((char*)oldp - offset, oldsize + offset, newsize, 1);
5068
5069 if (cp != MAP_FAILED) {
5070
5071 newp = (mchunkptr)(cp + offset);
5072 set_head(newp, (newsize - offset)|IS_MMAPPED);
5073
5074 assert(aligned_OK(chunk2mem(newp)));
5075 assert((newp->prev_size == offset));
5076
5077 /* update statistics */
5078 sum = mp_.mmapped_mem += newsize - oldsize;
5079 if (sum > (unsigned long)(mp_.max_mmapped_mem))
5080 mp_.max_mmapped_mem = sum;
5081 #ifdef NO_THREADS
5082 sum += main_arena.system_mem;
5083 if (sum > (unsigned long)(mp_.max_total_mem))
5084 mp_.max_total_mem = sum;
5085 #endif
5086
5087 return chunk2mem(newp);
5088 }
5089 #endif
5090
5091 /* Note the extra SIZE_SZ overhead. */
5092 if ((unsigned long)(oldsize) >= (unsigned long)(nb + SIZE_SZ))
5093 newmem = oldmem; /* do nothing */
5094 else {
5095 /* Must alloc, copy, free. */
5096 newmem = _int_malloc(av, nb - MALLOC_ALIGN_MASK);
5097 if (newmem != 0) {
5098 MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ);
5099 _int_free(av, oldmem);
5100 }
5101 }
5102 return newmem;
5103
5104 #else
5105 /* If !HAVE_MMAP, but chunk_is_mmapped, user must have overwritten mem */
5106 check_malloc_state(av);
5107 MALLOC_FAILURE_ACTION;
5108 return 0;
5109 #endif
5110 }
5111 }
5112
5113 /*
5114 ------------------------------ memalign ------------------------------
5115 */
5116
5117 Void_t*
5118 _int_memalign(mstate av, size_t alignment, size_t bytes)
5119 {
5120 INTERNAL_SIZE_T nb; /* padded request size */
5121 char* m; /* memory returned by malloc call */
5122 mchunkptr p; /* corresponding chunk */
5123 char* brk; /* alignment point within p */
5124 mchunkptr newp; /* chunk to return */
5125 INTERNAL_SIZE_T newsize; /* its size */
5126 INTERNAL_SIZE_T leadsize; /* leading space before alignment point */
5127 mchunkptr remainder; /* spare room at end to split off */
5128 unsigned long remainder_size; /* its size */
5129 INTERNAL_SIZE_T size;
5130
5131 /* If need less alignment than we give anyway, just relay to malloc */
5132
5133 if (alignment <= MALLOC_ALIGNMENT) return _int_malloc(av, bytes);
5134
5135 /* Otherwise, ensure that it is at least a minimum chunk size */
5136
5137 if (alignment < MINSIZE) alignment = MINSIZE;
5138
5139 /* Make sure alignment is power of 2 (in case MINSIZE is not). */
5140 if ((alignment & (alignment - 1)) != 0) {
5141 size_t a = MALLOC_ALIGNMENT * 2;
5142 while ((unsigned long)a < (unsigned long)alignment) a <<= 1;
5143 alignment = a;
5144 }
5145
5146 checked_request2size(bytes, nb);
5147
5148 /*
5149 Strategy: find a spot within that chunk that meets the alignment
5150 request, and then possibly free the leading and trailing space.
5151 */
5152
5153
5154 /* Call malloc with worst case padding to hit alignment. */
5155
5156 m = (char*)(_int_malloc(av, nb + alignment + MINSIZE));
5157
5158 if (m == 0) return 0; /* propagate failure */
5159
5160 p = mem2chunk(m);
5161
5162 if ((((unsigned long)(m)) % alignment) != 0) { /* misaligned */
5163
5164 /*
5165 Find an aligned spot inside chunk. Since we need to give back
5166 leading space in a chunk of at least MINSIZE, if the first
5167 calculation places us at a spot with less than MINSIZE leader,
5168 we can move to the next aligned spot -- we've allocated enough
5169 total room so that this is always possible.
5170 */
5171
5172 brk = (char*)mem2chunk(((unsigned long)(m + alignment - 1)) &
5173 -((signed long) alignment));
5174 if ((unsigned long)(brk - (char*)(p)) < MINSIZE)
5175 brk += alignment;
5176
5177 newp = (mchunkptr)brk;
5178 leadsize = brk - (char*)(p);
5179 newsize = chunksize(p) - leadsize;
5180
5181 /* For mmapped chunks, just adjust offset */
5182 if (chunk_is_mmapped(p)) {
5183 newp->prev_size = p->prev_size + leadsize;
5184 set_head(newp, newsize|IS_MMAPPED);
5185 return chunk2mem(newp);
5186 }
5187
5188 /* Otherwise, give back leader, use the rest */
5189 set_head(newp, newsize | PREV_INUSE |
5190 (av != &main_arena ? NON_MAIN_ARENA : 0));
5191 set_inuse_bit_at_offset(newp, newsize);
5192 set_head_size(p, leadsize | (av != &main_arena ? NON_MAIN_ARENA : 0));
5193 _int_free(av, chunk2mem(p));
5194 p = newp;
5195
5196 assert (newsize >= nb &&
5197 (((unsigned long)(chunk2mem(p))) % alignment) == 0);
5198 }
5199
5200 /* Also give back spare room at the end */
5201 if (!chunk_is_mmapped(p)) {
5202 size = chunksize(p);
5203 if ((unsigned long)(size) > (unsigned long)(nb + MINSIZE)) {
5204 remainder_size = size - nb;
5205 remainder = chunk_at_offset(p, nb);
5206 set_head(remainder, remainder_size | PREV_INUSE |
5207 (av != &main_arena ? NON_MAIN_ARENA : 0));
5208 set_head_size(p, nb);
5209 _int_free(av, chunk2mem(remainder));
5210 }
5211 }
5212
5213 check_inuse_chunk(av, p);
5214 return chunk2mem(p);
5215 }
5216
5217 #if 0
5218 /*
5219 ------------------------------ calloc ------------------------------
5220 */
5221
5222 #if __STD_C
5223 Void_t* cALLOc(size_t n_elements, size_t elem_size)
5224 #else
5225 Void_t* cALLOc(n_elements, elem_size) size_t n_elements; size_t elem_size;
5226 #endif
5227 {
5228 mchunkptr p;
5229 unsigned long clearsize;
5230 unsigned long nclears;
5231 INTERNAL_SIZE_T* d;
5232
5233 Void_t* mem = mALLOc(n_elements * elem_size);
5234
5235 if (mem != 0) {
5236 p = mem2chunk(mem);
5237
5238 #if MMAP_CLEARS
5239 if (!chunk_is_mmapped(p)) /* don't need to clear mmapped space */
5240 #endif
5241 {
5242 /*
5243 Unroll clear of <= 36 bytes (72 if 8byte sizes)
5244 We know that contents have an odd number of
5245 INTERNAL_SIZE_T-sized words; minimally 3.
5246 */
5247
5248 d = (INTERNAL_SIZE_T*)mem;
5249 clearsize = chunksize(p) - SIZE_SZ;
5250 nclears = clearsize / sizeof(INTERNAL_SIZE_T);
5251 assert(nclears >= 3);
5252
5253 if (nclears > 9)
5254 MALLOC_ZERO(d, clearsize);
5255
5256 else {
5257 *(d+0) = 0;
5258 *(d+1) = 0;
5259 *(d+2) = 0;
5260 if (nclears > 4) {
5261 *(d+3) = 0;
5262 *(d+4) = 0;
5263 if (nclears > 6) {
5264 *(d+5) = 0;
5265 *(d+6) = 0;
5266 if (nclears > 8) {
5267 *(d+7) = 0;
5268 *(d+8) = 0;
5269 }
5270 }
5271 }
5272 }
5273 }
5274 }
5275 return mem;
5276 }
5277 #endif /* 0 */
5278
5279 #ifndef _LIBC
5280 /*
5281 ------------------------- independent_calloc -------------------------
5282 */
5283
5284 Void_t**
5285 #if __STD_C
5286 _int_icalloc(mstate av, size_t n_elements, size_t elem_size, Void_t* chunks[])
5287 #else
5288 _int_icalloc(av, n_elements, elem_size, chunks)
5289 mstate av; size_t n_elements; size_t elem_size; Void_t* chunks[];
5290 #endif
5291 {
5292 size_t sz = elem_size; /* serves as 1-element array */
5293 /* opts arg of 3 means all elements are same size, and should be cleared */
5294 return iALLOc(av, n_elements, &sz, 3, chunks);
5295 }
5296
5297 /*
5298 ------------------------- independent_comalloc -------------------------
5299 */
5300
5301 Void_t**
5302 #if __STD_C
5303 _int_icomalloc(mstate av, size_t n_elements, size_t sizes[], Void_t* chunks[])
5304 #else
5305 _int_icomalloc(av, n_elements, sizes, chunks)
5306 mstate av; size_t n_elements; size_t sizes[]; Void_t* chunks[];
5307 #endif
5308 {
5309 return iALLOc(av, n_elements, sizes, 0, chunks);
5310 }
5311
5312
5313 /*
5314 ------------------------------ ialloc ------------------------------
5315 ialloc provides common support for independent_X routines, handling all of
5316 the combinations that can result.
5317
5318 The opts arg has:
5319 bit 0 set if all elements are same size (using sizes[0])
5320 bit 1 set if elements should be zeroed
5321 */
5322
5323
5324 static Void_t**
5325 #if __STD_C
5326 iALLOc(mstate av, size_t n_elements, size_t* sizes, int opts, Void_t* chunks[])
5327 #else
5328 iALLOc(av, n_elements, sizes, opts, chunks)
5329 mstate av; size_t n_elements; size_t* sizes; int opts; Void_t* chunks[];
5330 #endif
5331 {
5332 INTERNAL_SIZE_T element_size; /* chunksize of each element, if all same */
5333 INTERNAL_SIZE_T contents_size; /* total size of elements */
5334 INTERNAL_SIZE_T array_size; /* request size of pointer array */
5335 Void_t* mem; /* malloced aggregate space */
5336 mchunkptr p; /* corresponding chunk */
5337 INTERNAL_SIZE_T remainder_size; /* remaining bytes while splitting */
5338 Void_t** marray; /* either "chunks" or malloced ptr array */
5339 mchunkptr array_chunk; /* chunk for malloced ptr array */
5340 int mmx; /* to disable mmap */
5341 INTERNAL_SIZE_T size;
5342 INTERNAL_SIZE_T size_flags;
5343 size_t i;
5344
5345 /* Ensure initialization/consolidation */
5346 if (have_fastchunks(av)) malloc_consolidate(av);
5347
5348 /* compute array length, if needed */
5349 if (chunks != 0) {
5350 if (n_elements == 0)
5351 return chunks; /* nothing to do */
5352 marray = chunks;
5353 array_size = 0;
5354 }
5355 else {
5356 /* if empty req, must still return chunk representing empty array */
5357 if (n_elements == 0)
5358 return (Void_t**) _int_malloc(av, 0);
5359 marray = 0;
5360 array_size = request2size(n_elements * (sizeof(Void_t*)));
5361 }
5362
5363 /* compute total element size */
5364 if (opts & 0x1) { /* all-same-size */
5365 element_size = request2size(*sizes);
5366 contents_size = n_elements * element_size;
5367 }
5368 else { /* add up all the sizes */
5369 element_size = 0;
5370 contents_size = 0;
5371 for (i = 0; i != n_elements; ++i)
5372 contents_size += request2size(sizes[i]);
5373 }
5374
5375 /* subtract out alignment bytes from total to minimize overallocation */
5376 size = contents_size + array_size - MALLOC_ALIGN_MASK;
5377
5378 /*
5379 Allocate the aggregate chunk.
5380 But first disable mmap so malloc won't use it, since
5381 we would not be able to later free/realloc space internal
5382 to a segregated mmap region.
5383 */
5384 mmx = mp_.n_mmaps_max; /* disable mmap */
5385 mp_.n_mmaps_max = 0;
5386 mem = _int_malloc(av, size);
5387 mp_.n_mmaps_max = mmx; /* reset mmap */
5388 if (mem == 0)
5389 return 0;
5390
5391 p = mem2chunk(mem);
5392 assert(!chunk_is_mmapped(p));
5393 remainder_size = chunksize(p);
5394
5395 if (opts & 0x2) { /* optionally clear the elements */
5396 MALLOC_ZERO(mem, remainder_size - SIZE_SZ - array_size);
5397 }
5398
5399 size_flags = PREV_INUSE | (av != &main_arena ? NON_MAIN_ARENA : 0);
5400
5401 /* If not provided, allocate the pointer array as final part of chunk */
5402 if (marray == 0) {
5403 array_chunk = chunk_at_offset(p, contents_size);
5404 marray = (Void_t**) (chunk2mem(array_chunk));
5405 set_head(array_chunk, (remainder_size - contents_size) | size_flags);
5406 remainder_size = contents_size;
5407 }
5408
5409 /* split out elements */
5410 for (i = 0; ; ++i) {
5411 marray[i] = chunk2mem(p);
5412 if (i != n_elements-1) {
5413 if (element_size != 0)
5414 size = element_size;
5415 else
5416 size = request2size(sizes[i]);
5417 remainder_size -= size;
5418 set_head(p, size | size_flags);
5419 p = chunk_at_offset(p, size);
5420 }
5421 else { /* the final element absorbs any overallocation slop */
5422 set_head(p, remainder_size | size_flags);
5423 break;
5424 }
5425 }
5426
5427 #if MALLOC_DEBUG
5428 if (marray != chunks) {
5429 /* final element must have exactly exhausted chunk */
5430 if (element_size != 0)
5431 assert(remainder_size == element_size);
5432 else
5433 assert(remainder_size == request2size(sizes[i]));
5434 check_inuse_chunk(av, mem2chunk(marray));
5435 }
5436
5437 for (i = 0; i != n_elements; ++i)
5438 check_inuse_chunk(av, mem2chunk(marray[i]));
5439 #endif
5440
5441 return marray;
5442 }
5443 #endif /* _LIBC */
5444
5445
5446 /*
5447 ------------------------------ valloc ------------------------------
5448 */
5449
5450 Void_t*
5451 #if __STD_C
5452 _int_valloc(mstate av, size_t bytes)
5453 #else
5454 _int_valloc(av, bytes) mstate av; size_t bytes;
5455 #endif
5456 {
5457 /* Ensure initialization/consolidation */
5458 if (have_fastchunks(av)) malloc_consolidate(av);
5459 return _int_memalign(av, mp_.pagesize, bytes);
5460 }
5461
5462 /*
5463 ------------------------------ pvalloc ------------------------------
5464 */
5465
5466
5467 Void_t*
5468 #if __STD_C
5469 _int_pvalloc(mstate av, size_t bytes)
5470 #else
5471 _int_pvalloc(av, bytes) mstate av, size_t bytes;
5472 #endif
5473 {
5474 size_t pagesz;
5475
5476 /* Ensure initialization/consolidation */
5477 if (have_fastchunks(av)) malloc_consolidate(av);
5478 pagesz = mp_.pagesize;
5479 return _int_memalign(av, pagesz, (bytes + pagesz - 1) & ~(pagesz - 1));
5480 }
5481
5482
5483 /*
5484 ------------------------------ malloc_trim ------------------------------
5485 */
5486
5487 #if __STD_C
5488 int mTRIm(size_t pad)
5489 #else
5490 int mTRIm(pad) size_t pad;
5491 #endif
5492 {
5493 mstate av = &main_arena; /* already locked */
5494
5495 /* Ensure initialization/consolidation */
5496 malloc_consolidate(av);
5497
5498 #ifndef MORECORE_CANNOT_TRIM
5499 return sYSTRIm(pad, av);
5500 #else
5501 return 0;
5502 #endif
5503 }
5504
5505
5506 /*
5507 ------------------------- malloc_usable_size -------------------------
5508 */
5509
5510 #if __STD_C
5511 size_t mUSABLe(Void_t* mem)
5512 #else
5513 size_t mUSABLe(mem) Void_t* mem;
5514 #endif
5515 {
5516 mchunkptr p;
5517 if (mem != 0) {
5518 p = mem2chunk(mem);
5519 if (chunk_is_mmapped(p))
5520 return chunksize(p) - 2*SIZE_SZ;
5521 else if (inuse(p))
5522 return chunksize(p) - SIZE_SZ;
5523 }
5524 return 0;
5525 }
5526
5527 /*
5528 ------------------------------ mallinfo ------------------------------
5529 */
5530
5531 struct mallinfo mALLINFo(mstate av)
5532 {
5533 struct mallinfo mi;
5534 size_t i;
5535 mbinptr b;
5536 mchunkptr p;
5537 INTERNAL_SIZE_T avail;
5538 INTERNAL_SIZE_T fastavail;
5539 int nblocks;
5540 int nfastblocks;
5541
5542 /* Ensure initialization */
5543 if (av->top == 0) malloc_consolidate(av);
5544
5545 check_malloc_state(av);
5546
5547 /* Account for top */
5548 avail = chunksize(av->top);
5549 nblocks = 1; /* top always exists */
5550
5551 /* traverse fastbins */
5552 nfastblocks = 0;
5553 fastavail = 0;
5554
5555 for (i = 0; i < NFASTBINS; ++i) {
5556 for (p = av->fastbins[i]; p != 0; p = p->fd) {
5557 ++nfastblocks;
5558 fastavail += chunksize(p);
5559 }
5560 }
5561
5562 avail += fastavail;
5563
5564 /* traverse regular bins */
5565 for (i = 1; i < NBINS; ++i) {
5566 b = bin_at(av, i);
5567 for (p = last(b); p != b; p = p->bk) {
5568 ++nblocks;
5569 avail += chunksize(p);
5570 }
5571 }
5572
5573 mi.smblks = nfastblocks;
5574 mi.ordblks = nblocks;
5575 mi.fordblks = avail;
5576 mi.uordblks = av->system_mem - avail;
5577 mi.arena = av->system_mem;
5578 mi.hblks = mp_.n_mmaps;
5579 mi.hblkhd = mp_.mmapped_mem;
5580 mi.fsmblks = fastavail;
5581 mi.keepcost = chunksize(av->top);
5582 mi.usmblks = mp_.max_total_mem;
5583 return mi;
5584 }
5585
5586 /*
5587 ------------------------------ malloc_stats ------------------------------
5588 */
5589
5590 void mSTATs()
5591 {
5592 int i;
5593 mstate ar_ptr;
5594 struct mallinfo mi;
5595 unsigned int in_use_b = mp_.mmapped_mem, system_b = in_use_b;
5596 #if THREAD_STATS
5597 long stat_lock_direct = 0, stat_lock_loop = 0, stat_lock_wait = 0;
5598 #endif
5599
5600 if(__malloc_initialized < 0)
5601 ptmalloc_init ();
5602 #ifdef _LIBC
5603 _IO_flockfile (stderr);
5604 int old_flags2 = ((_IO_FILE *) stderr)->_flags2;
5605 ((_IO_FILE *) stderr)->_flags2 |= _IO_FLAGS2_NOTCANCEL;
5606 #endif
5607 for (i=0, ar_ptr = &main_arena;; i++) {
5608 (void)mutex_lock(&ar_ptr->mutex);
5609 mi = mALLINFo(ar_ptr);
5610 fprintf(stderr, "Arena %d:\n", i);
5611 fprintf(stderr, "system bytes = %10u\n", (unsigned int)mi.arena);
5612 fprintf(stderr, "in use bytes = %10u\n", (unsigned int)mi.uordblks);
5613 #if MALLOC_DEBUG > 1
5614 if (i > 0)
5615 dump_heap(heap_for_ptr(top(ar_ptr)));
5616 #endif
5617 system_b += mi.arena;
5618 in_use_b += mi.uordblks;
5619 #if THREAD_STATS
5620 stat_lock_direct += ar_ptr->stat_lock_direct;
5621 stat_lock_loop += ar_ptr->stat_lock_loop;
5622 stat_lock_wait += ar_ptr->stat_lock_wait;
5623 #endif
5624 (void)mutex_unlock(&ar_ptr->mutex);
5625 ar_ptr = ar_ptr->next;
5626 if(ar_ptr == &main_arena) break;
5627 }
5628 #if HAVE_MMAP
5629 fprintf(stderr, "Total (incl. mmap):\n");
5630 #else
5631 fprintf(stderr, "Total:\n");
5632 #endif
5633 fprintf(stderr, "system bytes = %10u\n", system_b);
5634 fprintf(stderr, "in use bytes = %10u\n", in_use_b);
5635 #ifdef NO_THREADS
5636 fprintf(stderr, "max system bytes = %10u\n", (unsigned int)mp_.max_total_mem);
5637 #endif
5638 #if HAVE_MMAP
5639 fprintf(stderr, "max mmap regions = %10u\n", (unsigned int)mp_.max_n_mmaps);
5640 fprintf(stderr, "max mmap bytes = %10lu\n",
5641 (unsigned long)mp_.max_mmapped_mem);
5642 #endif
5643 #if THREAD_STATS
5644 fprintf(stderr, "heaps created = %10d\n", stat_n_heaps);
5645 fprintf(stderr, "locked directly = %10ld\n", stat_lock_direct);
5646 fprintf(stderr, "locked in loop = %10ld\n", stat_lock_loop);
5647 fprintf(stderr, "locked waiting = %10ld\n", stat_lock_wait);
5648 fprintf(stderr, "locked total = %10ld\n",
5649 stat_lock_direct + stat_lock_loop + stat_lock_wait);
5650 #endif
5651 #ifdef _LIBC
5652 ((_IO_FILE *) stderr)->_flags2 |= old_flags2;
5653 _IO_funlockfile (stderr);
5654 #endif
5655 }
5656
5657
5658 /*
5659 ------------------------------ mallopt ------------------------------
5660 */
5661
5662 #if __STD_C
5663 int mALLOPt(int param_number, int value)
5664 #else
5665 int mALLOPt(param_number, value) int param_number; int value;
5666 #endif
5667 {
5668 mstate av = &main_arena;
5669 int res = 1;
5670
5671 if(__malloc_initialized < 0)
5672 ptmalloc_init ();
5673 (void)mutex_lock(&av->mutex);
5674 /* Ensure initialization/consolidation */
5675 malloc_consolidate(av);
5676
5677 switch(param_number) {
5678 case M_MXFAST:
5679 if (value >= 0 && value <= MAX_FAST_SIZE) {
5680 set_max_fast(value);
5681 }
5682 else
5683 res = 0;
5684 break;
5685
5686 case M_TRIM_THRESHOLD:
5687 mp_.trim_threshold = value;
5688 mp_.no_dyn_threshold = 1;
5689 break;
5690
5691 case M_TOP_PAD:
5692 mp_.top_pad = value;
5693 mp_.no_dyn_threshold = 1;
5694 break;
5695
5696 case M_MMAP_THRESHOLD:
5697 #if USE_ARENAS
5698 /* Forbid setting the threshold too high. */
5699 if((unsigned long)value > HEAP_MAX_SIZE/2)
5700 res = 0;
5701 else
5702 #endif
5703 mp_.mmap_threshold = value;
5704 mp_.no_dyn_threshold = 1;
5705 break;
5706
5707 case M_MMAP_MAX:
5708 #if !HAVE_MMAP
5709 if (value != 0)
5710 res = 0;
5711 else
5712 #endif
5713 mp_.n_mmaps_max = value;
5714 mp_.no_dyn_threshold = 1;
5715 break;
5716
5717 case M_CHECK_ACTION:
5718 check_action = value;
5719 break;
5720
5721 case M_PERTURB:
5722 perturb_byte = value;
5723 break;
5724 }
5725 (void)mutex_unlock(&av->mutex);
5726 return res;
5727 }
5728
5729
5730 /*
5731 -------------------- Alternative MORECORE functions --------------------
5732 */
5733
5734
5735 /*
5736 General Requirements for MORECORE.
5737
5738 The MORECORE function must have the following properties:
5739
5740 If MORECORE_CONTIGUOUS is false:
5741
5742 * MORECORE must allocate in multiples of pagesize. It will
5743 only be called with arguments that are multiples of pagesize.
5744
5745 * MORECORE(0) must return an address that is at least
5746 MALLOC_ALIGNMENT aligned. (Page-aligning always suffices.)
5747
5748 else (i.e. If MORECORE_CONTIGUOUS is true):
5749
5750 * Consecutive calls to MORECORE with positive arguments
5751 return increasing addresses, indicating that space has been
5752 contiguously extended.
5753
5754 * MORECORE need not allocate in multiples of pagesize.
5755 Calls to MORECORE need not have args of multiples of pagesize.
5756
5757 * MORECORE need not page-align.
5758
5759 In either case:
5760
5761 * MORECORE may allocate more memory than requested. (Or even less,
5762 but this will generally result in a malloc failure.)
5763
5764 * MORECORE must not allocate memory when given argument zero, but
5765 instead return one past the end address of memory from previous
5766 nonzero call. This malloc does NOT call MORECORE(0)
5767 until at least one call with positive arguments is made, so
5768 the initial value returned is not important.
5769
5770 * Even though consecutive calls to MORECORE need not return contiguous
5771 addresses, it must be OK for malloc'ed chunks to span multiple
5772 regions in those cases where they do happen to be contiguous.
5773
5774 * MORECORE need not handle negative arguments -- it may instead
5775 just return MORECORE_FAILURE when given negative arguments.
5776 Negative arguments are always multiples of pagesize. MORECORE
5777 must not misinterpret negative args as large positive unsigned
5778 args. You can suppress all such calls from even occurring by defining
5779 MORECORE_CANNOT_TRIM,
5780
5781 There is some variation across systems about the type of the
5782 argument to sbrk/MORECORE. If size_t is unsigned, then it cannot
5783 actually be size_t, because sbrk supports negative args, so it is
5784 normally the signed type of the same width as size_t (sometimes
5785 declared as "intptr_t", and sometimes "ptrdiff_t"). It doesn't much
5786 matter though. Internally, we use "long" as arguments, which should
5787 work across all reasonable possibilities.
5788
5789 Additionally, if MORECORE ever returns failure for a positive
5790 request, and HAVE_MMAP is true, then mmap is used as a noncontiguous
5791 system allocator. This is a useful backup strategy for systems with
5792 holes in address spaces -- in this case sbrk cannot contiguously
5793 expand the heap, but mmap may be able to map noncontiguous space.
5794
5795 If you'd like mmap to ALWAYS be used, you can define MORECORE to be
5796 a function that always returns MORECORE_FAILURE.
5797
5798 If you are using this malloc with something other than sbrk (or its
5799 emulation) to supply memory regions, you probably want to set
5800 MORECORE_CONTIGUOUS as false. As an example, here is a custom
5801 allocator kindly contributed for pre-OSX macOS. It uses virtually
5802 but not necessarily physically contiguous non-paged memory (locked
5803 in, present and won't get swapped out). You can use it by
5804 uncommenting this section, adding some #includes, and setting up the
5805 appropriate defines above:
5806
5807 #define MORECORE osMoreCore
5808 #define MORECORE_CONTIGUOUS 0
5809
5810 There is also a shutdown routine that should somehow be called for
5811 cleanup upon program exit.
5812
5813 #define MAX_POOL_ENTRIES 100
5814 #define MINIMUM_MORECORE_SIZE (64 * 1024)
5815 static int next_os_pool;
5816 void *our_os_pools[MAX_POOL_ENTRIES];
5817
5818 void *osMoreCore(int size)
5819 {
5820 void *ptr = 0;
5821 static void *sbrk_top = 0;
5822
5823 if (size > 0)
5824 {
5825 if (size < MINIMUM_MORECORE_SIZE)
5826 size = MINIMUM_MORECORE_SIZE;
5827 if (CurrentExecutionLevel() == kTaskLevel)
5828 ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
5829 if (ptr == 0)
5830 {
5831 return (void *) MORECORE_FAILURE;
5832 }
5833 // save ptrs so they can be freed during cleanup
5834 our_os_pools[next_os_pool] = ptr;
5835 next_os_pool++;
5836 ptr = (void *) ((((unsigned long) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);
5837 sbrk_top = (char *) ptr + size;
5838 return ptr;
5839 }
5840 else if (size < 0)
5841 {
5842 // we don't currently support shrink behavior
5843 return (void *) MORECORE_FAILURE;
5844 }
5845 else
5846 {
5847 return sbrk_top;
5848 }
5849 }
5850
5851 // cleanup any allocated memory pools
5852 // called as last thing before shutting down driver
5853
5854 void osCleanupMem(void)
5855 {
5856 void **ptr;
5857
5858 for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)
5859 if (*ptr)
5860 {
5861 PoolDeallocate(*ptr);
5862 *ptr = 0;
5863 }
5864 }
5865
5866 */
5867
5868
5869 /* Helper code. */
5870
5871 extern char **__libc_argv attribute_hidden;
5872
5873 static void
5874 malloc_printerr(int action, const char *str, void *ptr)
5875 {
5876 if ((action & 5) == 5)
5877 __libc_message (action & 2, "%s\n", str);
5878 else if (action & 1)
5879 {
5880 char buf[2 * sizeof (uintptr_t) + 1];
5881
5882 buf[sizeof (buf) - 1] = '\0';
5883 char *cp = _itoa_word ((uintptr_t) ptr, &buf[sizeof (buf) - 1], 16, 0);
5884 while (cp > buf)
5885 *--cp = '0';
5886
5887 __libc_message (action & 2,
5888 "*** glibc detected *** %s: %s: 0x%s ***\n",
5889 __libc_argv[0] ?: "<unknown>", str, cp);
5890 }
5891 else if (action & 2)
5892 abort ();
5893 }
5894
5895 #ifdef _LIBC
5896 # include <sys/param.h>
5897
5898 /* We need a wrapper function for one of the additions of POSIX. */
5899 int
5900 __posix_memalign (void **memptr, size_t alignment, size_t size)
5901 {
5902 void *mem;
5903 __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, size_t,
5904 __const __malloc_ptr_t)) =
5905 __memalign_hook;
5906
5907 /* Test whether the SIZE argument is valid. It must be a power of
5908 two multiple of sizeof (void *). */
5909 if (alignment % sizeof (void *) != 0
5910 || !powerof2 (alignment / sizeof (void *)) != 0
5911 || alignment == 0)
5912 return EINVAL;
5913
5914 /* Call the hook here, so that caller is posix_memalign's caller
5915 and not posix_memalign itself. */
5916 if (hook != NULL)
5917 mem = (*hook)(alignment, size, RETURN_ADDRESS (0));
5918 else
5919 mem = public_mEMALIGn (alignment, size);
5920
5921 if (mem != NULL) {
5922 *memptr = mem;
5923 return 0;
5924 }
5925
5926 return ENOMEM;
5927 }
5928 weak_alias (__posix_memalign, posix_memalign)
5929
5930 strong_alias (__libc_calloc, __calloc) weak_alias (__libc_calloc, calloc)
5931 strong_alias (__libc_free, __cfree) weak_alias (__libc_free, cfree)
5932 strong_alias (__libc_free, __free) strong_alias (__libc_free, free)
5933 strong_alias (__libc_malloc, __malloc) strong_alias (__libc_malloc, malloc)
5934 strong_alias (__libc_memalign, __memalign)
5935 weak_alias (__libc_memalign, memalign)
5936 strong_alias (__libc_realloc, __realloc) strong_alias (__libc_realloc, realloc)
5937 strong_alias (__libc_valloc, __valloc) weak_alias (__libc_valloc, valloc)
5938 strong_alias (__libc_pvalloc, __pvalloc) weak_alias (__libc_pvalloc, pvalloc)
5939 strong_alias (__libc_mallinfo, __mallinfo)
5940 weak_alias (__libc_mallinfo, mallinfo)
5941 strong_alias (__libc_mallopt, __mallopt) weak_alias (__libc_mallopt, mallopt)
5942
5943 weak_alias (__malloc_stats, malloc_stats)
5944 weak_alias (__malloc_usable_size, malloc_usable_size)
5945 weak_alias (__malloc_trim, malloc_trim)
5946 weak_alias (__malloc_get_state, malloc_get_state)
5947 weak_alias (__malloc_set_state, malloc_set_state)
5948
5949 #endif /* _LIBC */
5950
5951 /* ------------------------------------------------------------
5952 History:
5953
5954 [see ftp://g.oswego.edu/pub/misc/malloc.c for the history of dlmalloc]
5955
5956 */
5957 /*
5958 * Local variables:
5959 * c-basic-offset: 2
5960 * End:
5961 */