]>
Commit | Line | Data |
---|---|---|
d3290b09 | 1 | /* Malloc implementation for multiple threads without lock contention. |
aa420660 | 2 | Copyright (C) 1996-2002, 2003, 2004 Free Software Foundation, Inc. |
f65fd747 | 3 | This file is part of the GNU C Library. |
fa8d436c UD |
4 | Contributed by Wolfram Gloger <wg@malloc.de> |
5 | and Doug Lea <dl@cs.oswego.edu>, 2001. | |
f65fd747 UD |
6 | |
7 | The GNU C Library is free software; you can redistribute it and/or | |
cc7375ce RM |
8 | modify it under the terms of the GNU Lesser General Public License as |
9 | published by the Free Software Foundation; either version 2.1 of the | |
fa8d436c | 10 | License, or (at your option) any later version. |
f65fd747 UD |
11 | |
12 | The GNU C Library is distributed in the hope that it will be useful, | |
13 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
cc7375ce | 15 | Lesser General Public License for more details. |
f65fd747 | 16 | |
cc7375ce | 17 | You should have received a copy of the GNU Lesser General Public |
fa8d436c UD |
18 | License along with the GNU C Library; see the file COPYING.LIB. If not, |
19 | write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, | |
20 | Boston, MA 02111-1307, USA. */ | |
f65fd747 | 21 | |
fa8d436c UD |
22 | /* |
23 | This is a version (aka ptmalloc2) of malloc/free/realloc written by | |
24 | Doug Lea and adapted to multiple threads/arenas by Wolfram Gloger. | |
25 | ||
26 | * Version ptmalloc2-20011215 | |
27 | $Id$ | |
28 | based on: | |
29 | VERSION 2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee) | |
f65fd747 | 30 | |
fa8d436c UD |
31 | Note: There may be an updated version of this malloc obtainable at |
32 | http://www.malloc.de/malloc/ptmalloc2.tar.gz | |
33 | Check before installing! | |
f65fd747 | 34 | |
fa8d436c | 35 | * Quickstart |
f65fd747 | 36 | |
fa8d436c UD |
37 | In order to compile this implementation, a Makefile is provided with |
38 | the ptmalloc2 distribution, which has pre-defined targets for some | |
39 | popular systems (e.g. "make posix" for Posix threads). All that is | |
40 | typically required with regard to compiler flags is the selection of | |
41 | the thread package via defining one out of USE_PTHREADS, USE_THR or | |
42 | USE_SPROC. Check the thread-m.h file for what effects this has. | |
43 | Many/most systems will additionally require USE_TSD_DATA_HACK to be | |
44 | defined, so this is the default for "make posix". | |
f65fd747 UD |
45 | |
46 | * Why use this malloc? | |
47 | ||
48 | This is not the fastest, most space-conserving, most portable, or | |
49 | most tunable malloc ever written. However it is among the fastest | |
50 | while also being among the most space-conserving, portable and tunable. | |
51 | Consistent balance across these factors results in a good general-purpose | |
fa8d436c UD |
52 | allocator for malloc-intensive programs. |
53 | ||
54 | The main properties of the algorithms are: | |
55 | * For large (>= 512 bytes) requests, it is a pure best-fit allocator, | |
56 | with ties normally decided via FIFO (i.e. least recently used). | |
57 | * For small (<= 64 bytes by default) requests, it is a caching | |
58 | allocator, that maintains pools of quickly recycled chunks. | |
59 | * In between, and for combinations of large and small requests, it does | |
60 | the best it can trying to meet both goals at once. | |
61 | * For very large requests (>= 128KB by default), it relies on system | |
62 | memory mapping facilities, if supported. | |
63 | ||
64 | For a longer but slightly out of date high-level description, see | |
65 | http://gee.cs.oswego.edu/dl/html/malloc.html | |
66 | ||
67 | You may already by default be using a C library containing a malloc | |
68 | that is based on some version of this malloc (for example in | |
69 | linux). You might still want to use the one in this file in order to | |
70 | customize settings or to avoid overheads associated with library | |
71 | versions. | |
72 | ||
73 | * Contents, described in more detail in "description of public routines" below. | |
74 | ||
75 | Standard (ANSI/SVID/...) functions: | |
76 | malloc(size_t n); | |
77 | calloc(size_t n_elements, size_t element_size); | |
78 | free(Void_t* p); | |
79 | realloc(Void_t* p, size_t n); | |
80 | memalign(size_t alignment, size_t n); | |
81 | valloc(size_t n); | |
82 | mallinfo() | |
83 | mallopt(int parameter_number, int parameter_value) | |
84 | ||
85 | Additional functions: | |
86 | independent_calloc(size_t n_elements, size_t size, Void_t* chunks[]); | |
87 | independent_comalloc(size_t n_elements, size_t sizes[], Void_t* chunks[]); | |
88 | pvalloc(size_t n); | |
89 | cfree(Void_t* p); | |
90 | malloc_trim(size_t pad); | |
91 | malloc_usable_size(Void_t* p); | |
92 | malloc_stats(); | |
f65fd747 UD |
93 | |
94 | * Vital statistics: | |
95 | ||
fa8d436c | 96 | Supported pointer representation: 4 or 8 bytes |
a9177ff5 | 97 | Supported size_t representation: 4 or 8 bytes |
f65fd747 | 98 | Note that size_t is allowed to be 4 bytes even if pointers are 8. |
fa8d436c UD |
99 | You can adjust this by defining INTERNAL_SIZE_T |
100 | ||
101 | Alignment: 2 * sizeof(size_t) (default) | |
102 | (i.e., 8 byte alignment with 4byte size_t). This suffices for | |
103 | nearly all current machines and C compilers. However, you can | |
104 | define MALLOC_ALIGNMENT to be wider than this if necessary. | |
f65fd747 | 105 | |
fa8d436c UD |
106 | Minimum overhead per allocated chunk: 4 or 8 bytes |
107 | Each malloced chunk has a hidden word of overhead holding size | |
f65fd747 UD |
108 | and status information. |
109 | ||
110 | Minimum allocated size: 4-byte ptrs: 16 bytes (including 4 overhead) | |
111 | 8-byte ptrs: 24/32 bytes (including, 4/8 overhead) | |
112 | ||
113 | When a chunk is freed, 12 (for 4byte ptrs) or 20 (for 8 byte | |
114 | ptrs but 4 byte size) or 24 (for 8/8) additional bytes are | |
fa8d436c UD |
115 | needed; 4 (8) for a trailing size field and 8 (16) bytes for |
116 | free list pointers. Thus, the minimum allocatable size is | |
117 | 16/24/32 bytes. | |
f65fd747 UD |
118 | |
119 | Even a request for zero bytes (i.e., malloc(0)) returns a | |
120 | pointer to something of the minimum allocatable size. | |
121 | ||
fa8d436c UD |
122 | The maximum overhead wastage (i.e., number of extra bytes |
123 | allocated than were requested in malloc) is less than or equal | |
124 | to the minimum size, except for requests >= mmap_threshold that | |
125 | are serviced via mmap(), where the worst case wastage is 2 * | |
126 | sizeof(size_t) bytes plus the remainder from a system page (the | |
127 | minimal mmap unit); typically 4096 or 8192 bytes. | |
f65fd747 | 128 | |
a9177ff5 | 129 | Maximum allocated size: 4-byte size_t: 2^32 minus about two pages |
fa8d436c UD |
130 | 8-byte size_t: 2^64 minus about two pages |
131 | ||
132 | It is assumed that (possibly signed) size_t values suffice to | |
f65fd747 UD |
133 | represent chunk sizes. `Possibly signed' is due to the fact |
134 | that `size_t' may be defined on a system as either a signed or | |
fa8d436c UD |
135 | an unsigned type. The ISO C standard says that it must be |
136 | unsigned, but a few systems are known not to adhere to this. | |
137 | Additionally, even when size_t is unsigned, sbrk (which is by | |
138 | default used to obtain memory from system) accepts signed | |
139 | arguments, and may not be able to handle size_t-wide arguments | |
140 | with negative sign bit. Generally, values that would | |
141 | appear as negative after accounting for overhead and alignment | |
142 | are supported only via mmap(), which does not have this | |
143 | limitation. | |
144 | ||
145 | Requests for sizes outside the allowed range will perform an optional | |
146 | failure action and then return null. (Requests may also | |
147 | also fail because a system is out of memory.) | |
148 | ||
149 | Thread-safety: thread-safe unless NO_THREADS is defined | |
150 | ||
151 | Compliance: I believe it is compliant with the 1997 Single Unix Specification | |
a9177ff5 | 152 | (See http://www.opennc.org). Also SVID/XPG, ANSI C, and probably |
fa8d436c | 153 | others as well. |
f65fd747 UD |
154 | |
155 | * Synopsis of compile-time options: | |
156 | ||
157 | People have reported using previous versions of this malloc on all | |
158 | versions of Unix, sometimes by tweaking some of the defines | |
159 | below. It has been tested most extensively on Solaris and | |
fa8d436c UD |
160 | Linux. It is also reported to work on WIN32 platforms. |
161 | People also report using it in stand-alone embedded systems. | |
162 | ||
163 | The implementation is in straight, hand-tuned ANSI C. It is not | |
164 | at all modular. (Sorry!) It uses a lot of macros. To be at all | |
165 | usable, this code should be compiled using an optimizing compiler | |
166 | (for example gcc -O3) that can simplify expressions and control | |
167 | paths. (FAQ: some macros import variables as arguments rather than | |
168 | declare locals because people reported that some debuggers | |
169 | otherwise get confused.) | |
170 | ||
171 | OPTION DEFAULT VALUE | |
172 | ||
173 | Compilation Environment options: | |
174 | ||
175 | __STD_C derived from C compiler defines | |
176 | WIN32 NOT defined | |
177 | HAVE_MEMCPY defined | |
178 | USE_MEMCPY 1 if HAVE_MEMCPY is defined | |
a9177ff5 | 179 | HAVE_MMAP defined as 1 |
fa8d436c UD |
180 | MMAP_CLEARS 1 |
181 | HAVE_MREMAP 0 unless linux defined | |
182 | USE_ARENAS the same as HAVE_MMAP | |
183 | malloc_getpagesize derived from system #includes, or 4096 if not | |
184 | HAVE_USR_INCLUDE_MALLOC_H NOT defined | |
185 | LACKS_UNISTD_H NOT defined unless WIN32 | |
186 | LACKS_SYS_PARAM_H NOT defined unless WIN32 | |
187 | LACKS_SYS_MMAN_H NOT defined unless WIN32 | |
188 | ||
189 | Changing default word sizes: | |
190 | ||
191 | INTERNAL_SIZE_T size_t | |
192 | MALLOC_ALIGNMENT 2 * sizeof(INTERNAL_SIZE_T) | |
193 | ||
194 | Configuration and functionality options: | |
195 | ||
196 | USE_DL_PREFIX NOT defined | |
197 | USE_PUBLIC_MALLOC_WRAPPERS NOT defined | |
198 | USE_MALLOC_LOCK NOT defined | |
199 | MALLOC_DEBUG NOT defined | |
200 | REALLOC_ZERO_BYTES_FREES 1 | |
201 | MALLOC_FAILURE_ACTION errno = ENOMEM, if __STD_C defined, else no-op | |
202 | TRIM_FASTBINS 0 | |
203 | ||
204 | Options for customizing MORECORE: | |
205 | ||
206 | MORECORE sbrk | |
207 | MORECORE_FAILURE -1 | |
a9177ff5 | 208 | MORECORE_CONTIGUOUS 1 |
fa8d436c UD |
209 | MORECORE_CANNOT_TRIM NOT defined |
210 | MORECORE_CLEARS 1 | |
a9177ff5 | 211 | MMAP_AS_MORECORE_SIZE (1024 * 1024) |
fa8d436c UD |
212 | |
213 | Tuning options that are also dynamically changeable via mallopt: | |
214 | ||
215 | DEFAULT_MXFAST 64 | |
216 | DEFAULT_TRIM_THRESHOLD 128 * 1024 | |
217 | DEFAULT_TOP_PAD 0 | |
218 | DEFAULT_MMAP_THRESHOLD 128 * 1024 | |
219 | DEFAULT_MMAP_MAX 65536 | |
220 | ||
221 | There are several other #defined constants and macros that you | |
222 | probably don't want to touch unless you are extending or adapting malloc. */ | |
f65fd747 UD |
223 | |
224 | /* | |
fa8d436c UD |
225 | __STD_C should be nonzero if using ANSI-standard C compiler, a C++ |
226 | compiler, or a C compiler sufficiently close to ANSI to get away | |
227 | with it. | |
f65fd747 UD |
228 | */ |
229 | ||
f65fd747 | 230 | #ifndef __STD_C |
fa8d436c | 231 | #if defined(__STDC__) || defined(__cplusplus) |
f65fd747 UD |
232 | #define __STD_C 1 |
233 | #else | |
234 | #define __STD_C 0 | |
a9177ff5 | 235 | #endif |
f65fd747 UD |
236 | #endif /*__STD_C*/ |
237 | ||
fa8d436c UD |
238 | |
239 | /* | |
240 | Void_t* is the pointer type that malloc should say it returns | |
241 | */ | |
242 | ||
f65fd747 | 243 | #ifndef Void_t |
fa8d436c | 244 | #if (__STD_C || defined(WIN32)) |
f65fd747 UD |
245 | #define Void_t void |
246 | #else | |
247 | #define Void_t char | |
248 | #endif | |
249 | #endif /*Void_t*/ | |
250 | ||
251 | #if __STD_C | |
fa8d436c UD |
252 | #include <stddef.h> /* for size_t */ |
253 | #include <stdlib.h> /* for getenv(), abort() */ | |
f65fd747 | 254 | #else |
fa8d436c | 255 | #include <sys/types.h> |
f65fd747 UD |
256 | #endif |
257 | ||
3c6904fb UD |
258 | #include <malloc-machine.h> |
259 | ||
c56da3a3 UD |
260 | #ifdef _LIBC |
261 | #include <stdio-common/_itoa.h> | |
262 | #endif | |
263 | ||
f65fd747 UD |
264 | #ifdef __cplusplus |
265 | extern "C" { | |
266 | #endif | |
267 | ||
fa8d436c | 268 | /* define LACKS_UNISTD_H if your system does not have a <unistd.h>. */ |
f65fd747 | 269 | |
fa8d436c | 270 | /* #define LACKS_UNISTD_H */ |
f65fd747 | 271 | |
fa8d436c UD |
272 | #ifndef LACKS_UNISTD_H |
273 | #include <unistd.h> | |
274 | #endif | |
f65fd747 | 275 | |
fa8d436c UD |
276 | /* define LACKS_SYS_PARAM_H if your system does not have a <sys/param.h>. */ |
277 | ||
278 | /* #define LACKS_SYS_PARAM_H */ | |
279 | ||
280 | ||
281 | #include <stdio.h> /* needed for malloc_stats */ | |
282 | #include <errno.h> /* needed for optional MALLOC_FAILURE_ACTION */ | |
f65fd747 | 283 | |
5d78bb43 UD |
284 | /* For uintptr_t. */ |
285 | #include <stdint.h> | |
f65fd747 | 286 | |
3e030bd5 UD |
287 | /* For va_arg, va_start, va_end. */ |
288 | #include <stdarg.h> | |
289 | ||
6bf4302e UD |
290 | /* For writev and struct iovec. */ |
291 | #include <sys/uio.h> | |
c0f62c56 | 292 | /* For syslog. */ |
54915e9e | 293 | #include <sys/syslog.h> |
6bf4302e | 294 | |
c0f62c56 UD |
295 | /* For various dynamic linking things. */ |
296 | #include <dlfcn.h> | |
297 | ||
298 | ||
fa8d436c UD |
299 | /* |
300 | Debugging: | |
301 | ||
302 | Because freed chunks may be overwritten with bookkeeping fields, this | |
303 | malloc will often die when freed memory is overwritten by user | |
304 | programs. This can be very effective (albeit in an annoying way) | |
305 | in helping track down dangling pointers. | |
306 | ||
307 | If you compile with -DMALLOC_DEBUG, a number of assertion checks are | |
308 | enabled that will catch more memory errors. You probably won't be | |
309 | able to make much sense of the actual assertion errors, but they | |
310 | should help you locate incorrectly overwritten memory. The checking | |
311 | is fairly extensive, and will slow down execution | |
312 | noticeably. Calling malloc_stats or mallinfo with MALLOC_DEBUG set | |
313 | will attempt to check every non-mmapped allocated and free chunk in | |
314 | the course of computing the summmaries. (By nature, mmapped regions | |
315 | cannot be checked very much automatically.) | |
316 | ||
317 | Setting MALLOC_DEBUG may also be helpful if you are trying to modify | |
318 | this code. The assertions in the check routines spell out in more | |
319 | detail the assumptions and invariants underlying the algorithms. | |
320 | ||
321 | Setting MALLOC_DEBUG does NOT provide an automated mechanism for | |
322 | checking that all accesses to malloced memory stay within their | |
323 | bounds. However, there are several add-ons and adaptations of this | |
324 | or other mallocs available that do this. | |
f65fd747 UD |
325 | */ |
326 | ||
327 | #if MALLOC_DEBUG | |
328 | #include <assert.h> | |
329 | #else | |
57449fa3 | 330 | #undef assert |
f65fd747 UD |
331 | #define assert(x) ((void)0) |
332 | #endif | |
333 | ||
334 | ||
335 | /* | |
336 | INTERNAL_SIZE_T is the word-size used for internal bookkeeping | |
fa8d436c UD |
337 | of chunk sizes. |
338 | ||
339 | The default version is the same as size_t. | |
340 | ||
341 | While not strictly necessary, it is best to define this as an | |
342 | unsigned type, even if size_t is a signed type. This may avoid some | |
343 | artificial size limitations on some systems. | |
344 | ||
345 | On a 64-bit machine, you may be able to reduce malloc overhead by | |
346 | defining INTERNAL_SIZE_T to be a 32 bit `unsigned int' at the | |
347 | expense of not being able to handle more than 2^32 of malloced | |
348 | space. If this limitation is acceptable, you are encouraged to set | |
349 | this unless you are on a platform requiring 16byte alignments. In | |
350 | this case the alignment requirements turn out to negate any | |
351 | potential advantages of decreasing size_t word size. | |
352 | ||
353 | Implementors: Beware of the possible combinations of: | |
354 | - INTERNAL_SIZE_T might be signed or unsigned, might be 32 or 64 bits, | |
355 | and might be the same width as int or as long | |
356 | - size_t might have different width and signedness as INTERNAL_SIZE_T | |
357 | - int and long might be 32 or 64 bits, and might be the same width | |
358 | To deal with this, most comparisons and difference computations | |
359 | among INTERNAL_SIZE_Ts should cast them to unsigned long, being | |
360 | aware of the fact that casting an unsigned int to a wider long does | |
361 | not sign-extend. (This also makes checking for negative numbers | |
362 | awkward.) Some of these casts result in harmless compiler warnings | |
363 | on some systems. | |
f65fd747 UD |
364 | */ |
365 | ||
366 | #ifndef INTERNAL_SIZE_T | |
367 | #define INTERNAL_SIZE_T size_t | |
368 | #endif | |
369 | ||
fa8d436c UD |
370 | /* The corresponding word size */ |
371 | #define SIZE_SZ (sizeof(INTERNAL_SIZE_T)) | |
372 | ||
373 | ||
374 | /* | |
375 | MALLOC_ALIGNMENT is the minimum alignment for malloc'ed chunks. | |
376 | It must be a power of two at least 2 * SIZE_SZ, even on machines | |
377 | for which smaller alignments would suffice. It may be defined as | |
378 | larger than this though. Note however that code and data structures | |
379 | are optimized for the case of 8-byte alignment. | |
380 | */ | |
381 | ||
382 | ||
383 | #ifndef MALLOC_ALIGNMENT | |
384 | #define MALLOC_ALIGNMENT (2 * SIZE_SZ) | |
385 | #endif | |
386 | ||
387 | /* The corresponding bit mask value */ | |
388 | #define MALLOC_ALIGN_MASK (MALLOC_ALIGNMENT - 1) | |
389 | ||
390 | ||
391 | ||
392 | /* | |
393 | REALLOC_ZERO_BYTES_FREES should be set if a call to | |
394 | realloc with zero bytes should be the same as a call to free. | |
395 | This is required by the C standard. Otherwise, since this malloc | |
396 | returns a unique pointer for malloc(0), so does realloc(p, 0). | |
397 | */ | |
398 | ||
399 | #ifndef REALLOC_ZERO_BYTES_FREES | |
400 | #define REALLOC_ZERO_BYTES_FREES 1 | |
401 | #endif | |
402 | ||
403 | /* | |
404 | TRIM_FASTBINS controls whether free() of a very small chunk can | |
405 | immediately lead to trimming. Setting to true (1) can reduce memory | |
406 | footprint, but will almost always slow down programs that use a lot | |
407 | of small chunks. | |
408 | ||
409 | Define this only if you are willing to give up some speed to more | |
410 | aggressively reduce system-level memory footprint when releasing | |
411 | memory in programs that use many small chunks. You can get | |
412 | essentially the same effect by setting MXFAST to 0, but this can | |
413 | lead to even greater slowdowns in programs using many small chunks. | |
414 | TRIM_FASTBINS is an in-between compile-time option, that disables | |
415 | only those chunks bordering topmost memory from being placed in | |
416 | fastbins. | |
417 | */ | |
418 | ||
419 | #ifndef TRIM_FASTBINS | |
420 | #define TRIM_FASTBINS 0 | |
421 | #endif | |
422 | ||
423 | ||
f65fd747 | 424 | /* |
fa8d436c | 425 | USE_DL_PREFIX will prefix all public routines with the string 'dl'. |
a9177ff5 | 426 | This is necessary when you only want to use this malloc in one part |
fa8d436c UD |
427 | of a program, using your regular system malloc elsewhere. |
428 | */ | |
429 | ||
430 | /* #define USE_DL_PREFIX */ | |
431 | ||
432 | ||
a9177ff5 | 433 | /* |
fa8d436c UD |
434 | Two-phase name translation. |
435 | All of the actual routines are given mangled names. | |
436 | When wrappers are used, they become the public callable versions. | |
437 | When DL_PREFIX is used, the callable names are prefixed. | |
f65fd747 UD |
438 | */ |
439 | ||
fa8d436c UD |
440 | #ifdef USE_DL_PREFIX |
441 | #define public_cALLOc dlcalloc | |
442 | #define public_fREe dlfree | |
443 | #define public_cFREe dlcfree | |
444 | #define public_mALLOc dlmalloc | |
445 | #define public_mEMALIGn dlmemalign | |
446 | #define public_rEALLOc dlrealloc | |
447 | #define public_vALLOc dlvalloc | |
448 | #define public_pVALLOc dlpvalloc | |
449 | #define public_mALLINFo dlmallinfo | |
450 | #define public_mALLOPt dlmallopt | |
451 | #define public_mTRIm dlmalloc_trim | |
452 | #define public_mSTATs dlmalloc_stats | |
453 | #define public_mUSABLe dlmalloc_usable_size | |
454 | #define public_iCALLOc dlindependent_calloc | |
455 | #define public_iCOMALLOc dlindependent_comalloc | |
456 | #define public_gET_STATe dlget_state | |
457 | #define public_sET_STATe dlset_state | |
458 | #else /* USE_DL_PREFIX */ | |
459 | #ifdef _LIBC | |
460 | ||
461 | /* Special defines for the GNU C library. */ | |
462 | #define public_cALLOc __libc_calloc | |
463 | #define public_fREe __libc_free | |
464 | #define public_cFREe __libc_cfree | |
465 | #define public_mALLOc __libc_malloc | |
466 | #define public_mEMALIGn __libc_memalign | |
467 | #define public_rEALLOc __libc_realloc | |
468 | #define public_vALLOc __libc_valloc | |
469 | #define public_pVALLOc __libc_pvalloc | |
470 | #define public_mALLINFo __libc_mallinfo | |
471 | #define public_mALLOPt __libc_mallopt | |
472 | #define public_mTRIm __malloc_trim | |
473 | #define public_mSTATs __malloc_stats | |
474 | #define public_mUSABLe __malloc_usable_size | |
475 | #define public_iCALLOc __libc_independent_calloc | |
476 | #define public_iCOMALLOc __libc_independent_comalloc | |
477 | #define public_gET_STATe __malloc_get_state | |
478 | #define public_sET_STATe __malloc_set_state | |
479 | #define malloc_getpagesize __getpagesize() | |
480 | #define open __open | |
481 | #define mmap __mmap | |
482 | #define munmap __munmap | |
483 | #define mremap __mremap | |
484 | #define mprotect __mprotect | |
485 | #define MORECORE (*__morecore) | |
486 | #define MORECORE_FAILURE 0 | |
487 | ||
488 | Void_t * __default_morecore (ptrdiff_t); | |
489 | Void_t *(*__morecore)(ptrdiff_t) = __default_morecore; | |
f65fd747 | 490 | |
fa8d436c UD |
491 | #else /* !_LIBC */ |
492 | #define public_cALLOc calloc | |
493 | #define public_fREe free | |
494 | #define public_cFREe cfree | |
495 | #define public_mALLOc malloc | |
496 | #define public_mEMALIGn memalign | |
497 | #define public_rEALLOc realloc | |
498 | #define public_vALLOc valloc | |
499 | #define public_pVALLOc pvalloc | |
500 | #define public_mALLINFo mallinfo | |
501 | #define public_mALLOPt mallopt | |
502 | #define public_mTRIm malloc_trim | |
503 | #define public_mSTATs malloc_stats | |
504 | #define public_mUSABLe malloc_usable_size | |
505 | #define public_iCALLOc independent_calloc | |
506 | #define public_iCOMALLOc independent_comalloc | |
507 | #define public_gET_STATe malloc_get_state | |
508 | #define public_sET_STATe malloc_set_state | |
509 | #endif /* _LIBC */ | |
510 | #endif /* USE_DL_PREFIX */ | |
f65fd747 | 511 | |
d9af917d UD |
512 | #ifndef _LIBC |
513 | #define __builtin_expect(expr, val) (expr) | |
3ba06713 UD |
514 | |
515 | #define fwrite(buf, size, count, fp) _IO_fwrite (buf, size, count, fp) | |
d9af917d | 516 | #endif |
f65fd747 UD |
517 | |
518 | /* | |
519 | HAVE_MEMCPY should be defined if you are not otherwise using | |
520 | ANSI STD C, but still have memcpy and memset in your C library | |
521 | and want to use them in calloc and realloc. Otherwise simple | |
fa8d436c | 522 | macro versions are defined below. |
f65fd747 UD |
523 | |
524 | USE_MEMCPY should be defined as 1 if you actually want to | |
525 | have memset and memcpy called. People report that the macro | |
fa8d436c | 526 | versions are faster than libc versions on some systems. |
a9177ff5 | 527 | |
fa8d436c UD |
528 | Even if USE_MEMCPY is set to 1, loops to copy/clear small chunks |
529 | (of <= 36 bytes) are manually unrolled in realloc and calloc. | |
f65fd747 UD |
530 | */ |
531 | ||
fa8d436c | 532 | #define HAVE_MEMCPY |
f65fd747 UD |
533 | |
534 | #ifndef USE_MEMCPY | |
535 | #ifdef HAVE_MEMCPY | |
536 | #define USE_MEMCPY 1 | |
537 | #else | |
538 | #define USE_MEMCPY 0 | |
539 | #endif | |
540 | #endif | |
541 | ||
fa8d436c | 542 | |
f65fd747 UD |
543 | #if (__STD_C || defined(HAVE_MEMCPY)) |
544 | ||
c2afe833 RM |
545 | #ifdef _LIBC |
546 | # include <string.h> | |
547 | #else | |
fa8d436c UD |
548 | #ifdef WIN32 |
549 | /* On Win32 memset and memcpy are already declared in windows.h */ | |
550 | #else | |
f65fd747 UD |
551 | #if __STD_C |
552 | void* memset(void*, int, size_t); | |
553 | void* memcpy(void*, const void*, size_t); | |
554 | #else | |
555 | Void_t* memset(); | |
556 | Void_t* memcpy(); | |
fa8d436c | 557 | #endif |
f65fd747 UD |
558 | #endif |
559 | #endif | |
c2afe833 | 560 | #endif |
f65fd747 | 561 | |
fa8d436c UD |
562 | /* |
563 | MALLOC_FAILURE_ACTION is the action to take before "return 0" when | |
564 | malloc fails to be able to return memory, either because memory is | |
565 | exhausted or because of illegal arguments. | |
a9177ff5 RM |
566 | |
567 | By default, sets errno if running on STD_C platform, else does nothing. | |
fa8d436c | 568 | */ |
09f5e163 | 569 | |
fa8d436c UD |
570 | #ifndef MALLOC_FAILURE_ACTION |
571 | #if __STD_C | |
572 | #define MALLOC_FAILURE_ACTION \ | |
573 | errno = ENOMEM; | |
f65fd747 | 574 | |
fa8d436c UD |
575 | #else |
576 | #define MALLOC_FAILURE_ACTION | |
577 | #endif | |
578 | #endif | |
f65fd747 | 579 | |
fa8d436c UD |
580 | /* |
581 | MORECORE-related declarations. By default, rely on sbrk | |
582 | */ | |
09f5e163 | 583 | |
f65fd747 | 584 | |
fa8d436c UD |
585 | #ifdef LACKS_UNISTD_H |
586 | #if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__) | |
587 | #if __STD_C | |
588 | extern Void_t* sbrk(ptrdiff_t); | |
589 | #else | |
590 | extern Void_t* sbrk(); | |
591 | #endif | |
592 | #endif | |
593 | #endif | |
f65fd747 | 594 | |
fa8d436c UD |
595 | /* |
596 | MORECORE is the name of the routine to call to obtain more memory | |
597 | from the system. See below for general guidance on writing | |
598 | alternative MORECORE functions, as well as a version for WIN32 and a | |
599 | sample version for pre-OSX macos. | |
600 | */ | |
f65fd747 | 601 | |
fa8d436c UD |
602 | #ifndef MORECORE |
603 | #define MORECORE sbrk | |
604 | #endif | |
f65fd747 | 605 | |
fa8d436c UD |
606 | /* |
607 | MORECORE_FAILURE is the value returned upon failure of MORECORE | |
608 | as well as mmap. Since it cannot be an otherwise valid memory address, | |
609 | and must reflect values of standard sys calls, you probably ought not | |
610 | try to redefine it. | |
611 | */ | |
09f5e163 | 612 | |
fa8d436c UD |
613 | #ifndef MORECORE_FAILURE |
614 | #define MORECORE_FAILURE (-1) | |
615 | #endif | |
616 | ||
617 | /* | |
618 | If MORECORE_CONTIGUOUS is true, take advantage of fact that | |
619 | consecutive calls to MORECORE with positive arguments always return | |
620 | contiguous increasing addresses. This is true of unix sbrk. Even | |
621 | if not defined, when regions happen to be contiguous, malloc will | |
622 | permit allocations spanning regions obtained from different | |
623 | calls. But defining this when applicable enables some stronger | |
624 | consistency checks and space efficiencies. | |
625 | */ | |
f65fd747 | 626 | |
fa8d436c UD |
627 | #ifndef MORECORE_CONTIGUOUS |
628 | #define MORECORE_CONTIGUOUS 1 | |
f65fd747 UD |
629 | #endif |
630 | ||
fa8d436c UD |
631 | /* |
632 | Define MORECORE_CANNOT_TRIM if your version of MORECORE | |
633 | cannot release space back to the system when given negative | |
634 | arguments. This is generally necessary only if you are using | |
635 | a hand-crafted MORECORE function that cannot handle negative arguments. | |
636 | */ | |
637 | ||
638 | /* #define MORECORE_CANNOT_TRIM */ | |
f65fd747 | 639 | |
fa8d436c UD |
640 | /* MORECORE_CLEARS (default 1) |
641 | The degree to which the routine mapped to MORECORE zeroes out | |
642 | memory: never (0), only for newly allocated space (1) or always | |
643 | (2). The distinction between (1) and (2) is necessary because on | |
644 | some systems, if the application first decrements and then | |
645 | increments the break value, the contents of the reallocated space | |
646 | are unspecified. | |
647 | */ | |
648 | ||
649 | #ifndef MORECORE_CLEARS | |
650 | #define MORECORE_CLEARS 1 | |
7cabd57c UD |
651 | #endif |
652 | ||
fa8d436c | 653 | |
f65fd747 | 654 | /* |
fa8d436c UD |
655 | Define HAVE_MMAP as true to optionally make malloc() use mmap() to |
656 | allocate very large blocks. These will be returned to the | |
657 | operating system immediately after a free(). Also, if mmap | |
658 | is available, it is used as a backup strategy in cases where | |
659 | MORECORE fails to provide space from system. | |
660 | ||
661 | This malloc is best tuned to work with mmap for large requests. | |
662 | If you do not have mmap, operations involving very large chunks (1MB | |
663 | or so) may be slower than you'd like. | |
f65fd747 UD |
664 | */ |
665 | ||
666 | #ifndef HAVE_MMAP | |
fa8d436c UD |
667 | #define HAVE_MMAP 1 |
668 | ||
a9177ff5 | 669 | /* |
fa8d436c UD |
670 | Standard unix mmap using /dev/zero clears memory so calloc doesn't |
671 | need to. | |
672 | */ | |
673 | ||
674 | #ifndef MMAP_CLEARS | |
675 | #define MMAP_CLEARS 1 | |
676 | #endif | |
677 | ||
678 | #else /* no mmap */ | |
679 | #ifndef MMAP_CLEARS | |
680 | #define MMAP_CLEARS 0 | |
681 | #endif | |
682 | #endif | |
683 | ||
684 | ||
a9177ff5 | 685 | /* |
fa8d436c UD |
686 | MMAP_AS_MORECORE_SIZE is the minimum mmap size argument to use if |
687 | sbrk fails, and mmap is used as a backup (which is done only if | |
688 | HAVE_MMAP). The value must be a multiple of page size. This | |
689 | backup strategy generally applies only when systems have "holes" in | |
690 | address space, so sbrk cannot perform contiguous expansion, but | |
691 | there is still space available on system. On systems for which | |
692 | this is known to be useful (i.e. most linux kernels), this occurs | |
693 | only when programs allocate huge amounts of memory. Between this, | |
694 | and the fact that mmap regions tend to be limited, the size should | |
695 | be large, to avoid too many mmap calls and thus avoid running out | |
696 | of kernel resources. | |
697 | */ | |
698 | ||
699 | #ifndef MMAP_AS_MORECORE_SIZE | |
700 | #define MMAP_AS_MORECORE_SIZE (1024 * 1024) | |
f65fd747 UD |
701 | #endif |
702 | ||
703 | /* | |
704 | Define HAVE_MREMAP to make realloc() use mremap() to re-allocate | |
705 | large blocks. This is currently only possible on Linux with | |
706 | kernel versions newer than 1.3.77. | |
707 | */ | |
708 | ||
709 | #ifndef HAVE_MREMAP | |
fa8d436c UD |
710 | #ifdef linux |
711 | #define HAVE_MREMAP 1 | |
712 | #else | |
713 | #define HAVE_MREMAP 0 | |
f65fd747 UD |
714 | #endif |
715 | ||
fa8d436c UD |
716 | #endif /* HAVE_MMAP */ |
717 | ||
e9b3e3c5 UD |
718 | /* Define USE_ARENAS to enable support for multiple `arenas'. These |
719 | are allocated using mmap(), are necessary for threads and | |
720 | occasionally useful to overcome address space limitations affecting | |
721 | sbrk(). */ | |
722 | ||
723 | #ifndef USE_ARENAS | |
724 | #define USE_ARENAS HAVE_MMAP | |
725 | #endif | |
726 | ||
f65fd747 UD |
727 | |
728 | /* | |
fa8d436c UD |
729 | The system page size. To the extent possible, this malloc manages |
730 | memory from the system in page-size units. Note that this value is | |
731 | cached during initialization into a field of malloc_state. So even | |
732 | if malloc_getpagesize is a function, it is only called once. | |
733 | ||
734 | The following mechanics for getpagesize were adapted from bsd/gnu | |
735 | getpagesize.h. If none of the system-probes here apply, a value of | |
736 | 4096 is used, which should be OK: If they don't apply, then using | |
737 | the actual value probably doesn't impact performance. | |
f65fd747 UD |
738 | */ |
739 | ||
fa8d436c | 740 | |
f65fd747 | 741 | #ifndef malloc_getpagesize |
fa8d436c UD |
742 | |
743 | #ifndef LACKS_UNISTD_H | |
744 | # include <unistd.h> | |
745 | #endif | |
746 | ||
f65fd747 UD |
747 | # ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */ |
748 | # ifndef _SC_PAGE_SIZE | |
749 | # define _SC_PAGE_SIZE _SC_PAGESIZE | |
750 | # endif | |
751 | # endif | |
fa8d436c | 752 | |
f65fd747 UD |
753 | # ifdef _SC_PAGE_SIZE |
754 | # define malloc_getpagesize sysconf(_SC_PAGE_SIZE) | |
755 | # else | |
756 | # if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE) | |
757 | extern size_t getpagesize(); | |
758 | # define malloc_getpagesize getpagesize() | |
759 | # else | |
fa8d436c | 760 | # ifdef WIN32 /* use supplied emulation of getpagesize */ |
a9177ff5 | 761 | # define malloc_getpagesize getpagesize() |
f65fd747 | 762 | # else |
fa8d436c UD |
763 | # ifndef LACKS_SYS_PARAM_H |
764 | # include <sys/param.h> | |
765 | # endif | |
766 | # ifdef EXEC_PAGESIZE | |
767 | # define malloc_getpagesize EXEC_PAGESIZE | |
f65fd747 | 768 | # else |
fa8d436c UD |
769 | # ifdef NBPG |
770 | # ifndef CLSIZE | |
771 | # define malloc_getpagesize NBPG | |
772 | # else | |
773 | # define malloc_getpagesize (NBPG * CLSIZE) | |
774 | # endif | |
f65fd747 | 775 | # else |
fa8d436c UD |
776 | # ifdef NBPC |
777 | # define malloc_getpagesize NBPC | |
f65fd747 | 778 | # else |
fa8d436c UD |
779 | # ifdef PAGESIZE |
780 | # define malloc_getpagesize PAGESIZE | |
781 | # else /* just guess */ | |
a9177ff5 | 782 | # define malloc_getpagesize (4096) |
fa8d436c | 783 | # endif |
f65fd747 UD |
784 | # endif |
785 | # endif | |
786 | # endif | |
787 | # endif | |
788 | # endif | |
789 | # endif | |
790 | #endif | |
791 | ||
f65fd747 | 792 | /* |
f65fd747 | 793 | This version of malloc supports the standard SVID/XPG mallinfo |
fa8d436c UD |
794 | routine that returns a struct containing usage properties and |
795 | statistics. It should work on any SVID/XPG compliant system that has | |
796 | a /usr/include/malloc.h defining struct mallinfo. (If you'd like to | |
797 | install such a thing yourself, cut out the preliminary declarations | |
798 | as described above and below and save them in a malloc.h file. But | |
799 | there's no compelling reason to bother to do this.) | |
f65fd747 UD |
800 | |
801 | The main declaration needed is the mallinfo struct that is returned | |
802 | (by-copy) by mallinfo(). The SVID/XPG malloinfo struct contains a | |
fa8d436c UD |
803 | bunch of fields that are not even meaningful in this version of |
804 | malloc. These fields are are instead filled by mallinfo() with | |
805 | other numbers that might be of interest. | |
f65fd747 UD |
806 | |
807 | HAVE_USR_INCLUDE_MALLOC_H should be set if you have a | |
808 | /usr/include/malloc.h file that includes a declaration of struct | |
809 | mallinfo. If so, it is included; else an SVID2/XPG2 compliant | |
810 | version is declared below. These must be precisely the same for | |
fa8d436c UD |
811 | mallinfo() to work. The original SVID version of this struct, |
812 | defined on most systems with mallinfo, declares all fields as | |
813 | ints. But some others define as unsigned long. If your system | |
814 | defines the fields using a type of different width than listed here, | |
815 | you must #include your system version and #define | |
816 | HAVE_USR_INCLUDE_MALLOC_H. | |
f65fd747 UD |
817 | */ |
818 | ||
819 | /* #define HAVE_USR_INCLUDE_MALLOC_H */ | |
820 | ||
fa8d436c UD |
821 | #ifdef HAVE_USR_INCLUDE_MALLOC_H |
822 | #include "/usr/include/malloc.h" | |
f65fd747 UD |
823 | #endif |
824 | ||
f65fd747 | 825 | |
fa8d436c | 826 | /* ---------- description of public routines ------------ */ |
f65fd747 UD |
827 | |
828 | /* | |
fa8d436c UD |
829 | malloc(size_t n) |
830 | Returns a pointer to a newly allocated chunk of at least n bytes, or null | |
831 | if no space is available. Additionally, on failure, errno is | |
832 | set to ENOMEM on ANSI C systems. | |
833 | ||
834 | If n is zero, malloc returns a minumum-sized chunk. (The minimum | |
835 | size is 16 bytes on most 32bit systems, and 24 or 32 bytes on 64bit | |
836 | systems.) On most systems, size_t is an unsigned type, so calls | |
837 | with negative arguments are interpreted as requests for huge amounts | |
838 | of space, which will often fail. The maximum supported value of n | |
839 | differs across systems, but is in all cases less than the maximum | |
840 | representable value of a size_t. | |
f65fd747 | 841 | */ |
fa8d436c UD |
842 | #if __STD_C |
843 | Void_t* public_mALLOc(size_t); | |
844 | #else | |
845 | Void_t* public_mALLOc(); | |
846 | #endif | |
aa420660 UD |
847 | #ifdef libc_hidden_proto |
848 | libc_hidden_proto (public_mALLOc) | |
849 | #endif | |
f65fd747 | 850 | |
fa8d436c UD |
851 | /* |
852 | free(Void_t* p) | |
853 | Releases the chunk of memory pointed to by p, that had been previously | |
854 | allocated using malloc or a related routine such as realloc. | |
855 | It has no effect if p is null. It can have arbitrary (i.e., bad!) | |
856 | effects if p has already been freed. | |
857 | ||
858 | Unless disabled (using mallopt), freeing very large spaces will | |
859 | when possible, automatically trigger operations that give | |
860 | back unused memory to the system, thus reducing program footprint. | |
861 | */ | |
862 | #if __STD_C | |
863 | void public_fREe(Void_t*); | |
864 | #else | |
865 | void public_fREe(); | |
866 | #endif | |
aa420660 UD |
867 | #ifdef libc_hidden_proto |
868 | libc_hidden_proto (public_fREe) | |
869 | #endif | |
f65fd747 | 870 | |
fa8d436c UD |
871 | /* |
872 | calloc(size_t n_elements, size_t element_size); | |
873 | Returns a pointer to n_elements * element_size bytes, with all locations | |
874 | set to zero. | |
875 | */ | |
876 | #if __STD_C | |
877 | Void_t* public_cALLOc(size_t, size_t); | |
878 | #else | |
879 | Void_t* public_cALLOc(); | |
f65fd747 UD |
880 | #endif |
881 | ||
882 | /* | |
fa8d436c UD |
883 | realloc(Void_t* p, size_t n) |
884 | Returns a pointer to a chunk of size n that contains the same data | |
885 | as does chunk p up to the minimum of (n, p's size) bytes, or null | |
a9177ff5 | 886 | if no space is available. |
f65fd747 | 887 | |
fa8d436c UD |
888 | The returned pointer may or may not be the same as p. The algorithm |
889 | prefers extending p when possible, otherwise it employs the | |
890 | equivalent of a malloc-copy-free sequence. | |
f65fd747 | 891 | |
a9177ff5 | 892 | If p is null, realloc is equivalent to malloc. |
f65fd747 | 893 | |
fa8d436c UD |
894 | If space is not available, realloc returns null, errno is set (if on |
895 | ANSI) and p is NOT freed. | |
f65fd747 | 896 | |
fa8d436c UD |
897 | if n is for fewer bytes than already held by p, the newly unused |
898 | space is lopped off and freed if possible. Unless the #define | |
899 | REALLOC_ZERO_BYTES_FREES is set, realloc with a size argument of | |
900 | zero (re)allocates a minimum-sized chunk. | |
f65fd747 | 901 | |
fa8d436c UD |
902 | Large chunks that were internally obtained via mmap will always |
903 | be reallocated using malloc-copy-free sequences unless | |
904 | the system supports MREMAP (currently only linux). | |
f65fd747 | 905 | |
fa8d436c UD |
906 | The old unix realloc convention of allowing the last-free'd chunk |
907 | to be used as an argument to realloc is not supported. | |
f65fd747 | 908 | */ |
fa8d436c UD |
909 | #if __STD_C |
910 | Void_t* public_rEALLOc(Void_t*, size_t); | |
911 | #else | |
912 | Void_t* public_rEALLOc(); | |
913 | #endif | |
aa420660 UD |
914 | #ifdef libc_hidden_proto |
915 | libc_hidden_proto (public_rEALLOc) | |
916 | #endif | |
f65fd747 | 917 | |
fa8d436c UD |
918 | /* |
919 | memalign(size_t alignment, size_t n); | |
920 | Returns a pointer to a newly allocated chunk of n bytes, aligned | |
921 | in accord with the alignment argument. | |
922 | ||
923 | The alignment argument should be a power of two. If the argument is | |
924 | not a power of two, the nearest greater power is used. | |
925 | 8-byte alignment is guaranteed by normal malloc calls, so don't | |
926 | bother calling memalign with an argument of 8 or less. | |
927 | ||
928 | Overreliance on memalign is a sure way to fragment space. | |
929 | */ | |
930 | #if __STD_C | |
931 | Void_t* public_mEMALIGn(size_t, size_t); | |
932 | #else | |
933 | Void_t* public_mEMALIGn(); | |
f65fd747 | 934 | #endif |
aa420660 UD |
935 | #ifdef libc_hidden_proto |
936 | libc_hidden_proto (public_mEMALIGn) | |
937 | #endif | |
f65fd747 UD |
938 | |
939 | /* | |
fa8d436c UD |
940 | valloc(size_t n); |
941 | Equivalent to memalign(pagesize, n), where pagesize is the page | |
942 | size of the system. If the pagesize is unknown, 4096 is used. | |
943 | */ | |
944 | #if __STD_C | |
945 | Void_t* public_vALLOc(size_t); | |
946 | #else | |
947 | Void_t* public_vALLOc(); | |
948 | #endif | |
949 | ||
f65fd747 | 950 | |
f65fd747 | 951 | |
fa8d436c UD |
952 | /* |
953 | mallopt(int parameter_number, int parameter_value) | |
954 | Sets tunable parameters The format is to provide a | |
955 | (parameter-number, parameter-value) pair. mallopt then sets the | |
956 | corresponding parameter to the argument value if it can (i.e., so | |
957 | long as the value is meaningful), and returns 1 if successful else | |
958 | 0. SVID/XPG/ANSI defines four standard param numbers for mallopt, | |
959 | normally defined in malloc.h. Only one of these (M_MXFAST) is used | |
960 | in this malloc. The others (M_NLBLKS, M_GRAIN, M_KEEP) don't apply, | |
961 | so setting them has no effect. But this malloc also supports four | |
962 | other options in mallopt. See below for details. Briefly, supported | |
963 | parameters are as follows (listed defaults are for "typical" | |
964 | configurations). | |
965 | ||
966 | Symbol param # default allowed param values | |
967 | M_MXFAST 1 64 0-80 (0 disables fastbins) | |
968 | M_TRIM_THRESHOLD -1 128*1024 any (-1U disables trimming) | |
a9177ff5 | 969 | M_TOP_PAD -2 0 any |
fa8d436c UD |
970 | M_MMAP_THRESHOLD -3 128*1024 any (or 0 if no MMAP support) |
971 | M_MMAP_MAX -4 65536 any (0 disables use of mmap) | |
972 | */ | |
973 | #if __STD_C | |
974 | int public_mALLOPt(int, int); | |
975 | #else | |
976 | int public_mALLOPt(); | |
977 | #endif | |
978 | ||
979 | ||
980 | /* | |
981 | mallinfo() | |
982 | Returns (by copy) a struct containing various summary statistics: | |
983 | ||
a9177ff5 RM |
984 | arena: current total non-mmapped bytes allocated from system |
985 | ordblks: the number of free chunks | |
fa8d436c UD |
986 | smblks: the number of fastbin blocks (i.e., small chunks that |
987 | have been freed but not use resused or consolidated) | |
a9177ff5 RM |
988 | hblks: current number of mmapped regions |
989 | hblkhd: total bytes held in mmapped regions | |
fa8d436c UD |
990 | usmblks: the maximum total allocated space. This will be greater |
991 | than current total if trimming has occurred. | |
a9177ff5 | 992 | fsmblks: total bytes held in fastbin blocks |
fa8d436c | 993 | uordblks: current total allocated space (normal or mmapped) |
a9177ff5 | 994 | fordblks: total free space |
fa8d436c UD |
995 | keepcost: the maximum number of bytes that could ideally be released |
996 | back to system via malloc_trim. ("ideally" means that | |
997 | it ignores page restrictions etc.) | |
998 | ||
999 | Because these fields are ints, but internal bookkeeping may | |
a9177ff5 | 1000 | be kept as longs, the reported values may wrap around zero and |
fa8d436c UD |
1001 | thus be inaccurate. |
1002 | */ | |
1003 | #if __STD_C | |
1004 | struct mallinfo public_mALLINFo(void); | |
1005 | #else | |
1006 | struct mallinfo public_mALLINFo(); | |
1007 | #endif | |
f65fd747 | 1008 | |
fa8d436c UD |
1009 | /* |
1010 | independent_calloc(size_t n_elements, size_t element_size, Void_t* chunks[]); | |
1011 | ||
1012 | independent_calloc is similar to calloc, but instead of returning a | |
1013 | single cleared space, it returns an array of pointers to n_elements | |
1014 | independent elements that can hold contents of size elem_size, each | |
1015 | of which starts out cleared, and can be independently freed, | |
1016 | realloc'ed etc. The elements are guaranteed to be adjacently | |
1017 | allocated (this is not guaranteed to occur with multiple callocs or | |
1018 | mallocs), which may also improve cache locality in some | |
1019 | applications. | |
1020 | ||
1021 | The "chunks" argument is optional (i.e., may be null, which is | |
1022 | probably the most typical usage). If it is null, the returned array | |
1023 | is itself dynamically allocated and should also be freed when it is | |
1024 | no longer needed. Otherwise, the chunks array must be of at least | |
1025 | n_elements in length. It is filled in with the pointers to the | |
1026 | chunks. | |
1027 | ||
1028 | In either case, independent_calloc returns this pointer array, or | |
1029 | null if the allocation failed. If n_elements is zero and "chunks" | |
1030 | is null, it returns a chunk representing an array with zero elements | |
1031 | (which should be freed if not wanted). | |
1032 | ||
1033 | Each element must be individually freed when it is no longer | |
1034 | needed. If you'd like to instead be able to free all at once, you | |
1035 | should instead use regular calloc and assign pointers into this | |
1036 | space to represent elements. (In this case though, you cannot | |
1037 | independently free elements.) | |
a9177ff5 | 1038 | |
fa8d436c UD |
1039 | independent_calloc simplifies and speeds up implementations of many |
1040 | kinds of pools. It may also be useful when constructing large data | |
1041 | structures that initially have a fixed number of fixed-sized nodes, | |
1042 | but the number is not known at compile time, and some of the nodes | |
1043 | may later need to be freed. For example: | |
1044 | ||
1045 | struct Node { int item; struct Node* next; }; | |
a9177ff5 | 1046 | |
fa8d436c UD |
1047 | struct Node* build_list() { |
1048 | struct Node** pool; | |
1049 | int n = read_number_of_nodes_needed(); | |
1050 | if (n <= 0) return 0; | |
1051 | pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0); | |
a9177ff5 RM |
1052 | if (pool == 0) die(); |
1053 | // organize into a linked list... | |
fa8d436c | 1054 | struct Node* first = pool[0]; |
a9177ff5 | 1055 | for (i = 0; i < n-1; ++i) |
fa8d436c UD |
1056 | pool[i]->next = pool[i+1]; |
1057 | free(pool); // Can now free the array (or not, if it is needed later) | |
1058 | return first; | |
1059 | } | |
1060 | */ | |
1061 | #if __STD_C | |
1062 | Void_t** public_iCALLOc(size_t, size_t, Void_t**); | |
1063 | #else | |
1064 | Void_t** public_iCALLOc(); | |
1065 | #endif | |
f65fd747 | 1066 | |
fa8d436c UD |
1067 | /* |
1068 | independent_comalloc(size_t n_elements, size_t sizes[], Void_t* chunks[]); | |
1069 | ||
1070 | independent_comalloc allocates, all at once, a set of n_elements | |
1071 | chunks with sizes indicated in the "sizes" array. It returns | |
1072 | an array of pointers to these elements, each of which can be | |
1073 | independently freed, realloc'ed etc. The elements are guaranteed to | |
1074 | be adjacently allocated (this is not guaranteed to occur with | |
1075 | multiple callocs or mallocs), which may also improve cache locality | |
1076 | in some applications. | |
1077 | ||
1078 | The "chunks" argument is optional (i.e., may be null). If it is null | |
1079 | the returned array is itself dynamically allocated and should also | |
1080 | be freed when it is no longer needed. Otherwise, the chunks array | |
1081 | must be of at least n_elements in length. It is filled in with the | |
1082 | pointers to the chunks. | |
1083 | ||
1084 | In either case, independent_comalloc returns this pointer array, or | |
1085 | null if the allocation failed. If n_elements is zero and chunks is | |
1086 | null, it returns a chunk representing an array with zero elements | |
1087 | (which should be freed if not wanted). | |
a9177ff5 | 1088 | |
fa8d436c UD |
1089 | Each element must be individually freed when it is no longer |
1090 | needed. If you'd like to instead be able to free all at once, you | |
1091 | should instead use a single regular malloc, and assign pointers at | |
a9177ff5 | 1092 | particular offsets in the aggregate space. (In this case though, you |
fa8d436c UD |
1093 | cannot independently free elements.) |
1094 | ||
1095 | independent_comallac differs from independent_calloc in that each | |
1096 | element may have a different size, and also that it does not | |
1097 | automatically clear elements. | |
1098 | ||
1099 | independent_comalloc can be used to speed up allocation in cases | |
1100 | where several structs or objects must always be allocated at the | |
1101 | same time. For example: | |
1102 | ||
1103 | struct Head { ... } | |
1104 | struct Foot { ... } | |
1105 | ||
1106 | void send_message(char* msg) { | |
1107 | int msglen = strlen(msg); | |
1108 | size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) }; | |
1109 | void* chunks[3]; | |
1110 | if (independent_comalloc(3, sizes, chunks) == 0) | |
1111 | die(); | |
1112 | struct Head* head = (struct Head*)(chunks[0]); | |
1113 | char* body = (char*)(chunks[1]); | |
1114 | struct Foot* foot = (struct Foot*)(chunks[2]); | |
1115 | // ... | |
1116 | } | |
f65fd747 | 1117 | |
fa8d436c UD |
1118 | In general though, independent_comalloc is worth using only for |
1119 | larger values of n_elements. For small values, you probably won't | |
1120 | detect enough difference from series of malloc calls to bother. | |
f65fd747 | 1121 | |
fa8d436c UD |
1122 | Overuse of independent_comalloc can increase overall memory usage, |
1123 | since it cannot reuse existing noncontiguous small chunks that | |
1124 | might be available for some of the elements. | |
f65fd747 | 1125 | */ |
fa8d436c UD |
1126 | #if __STD_C |
1127 | Void_t** public_iCOMALLOc(size_t, size_t*, Void_t**); | |
1128 | #else | |
1129 | Void_t** public_iCOMALLOc(); | |
1130 | #endif | |
f65fd747 UD |
1131 | |
1132 | ||
fa8d436c UD |
1133 | /* |
1134 | pvalloc(size_t n); | |
1135 | Equivalent to valloc(minimum-page-that-holds(n)), that is, | |
1136 | round up n to nearest pagesize. | |
1137 | */ | |
1138 | #if __STD_C | |
1139 | Void_t* public_pVALLOc(size_t); | |
1140 | #else | |
1141 | Void_t* public_pVALLOc(); | |
1142 | #endif | |
f65fd747 | 1143 | |
fa8d436c UD |
1144 | /* |
1145 | cfree(Void_t* p); | |
1146 | Equivalent to free(p). | |
1147 | ||
1148 | cfree is needed/defined on some systems that pair it with calloc, | |
a9177ff5 | 1149 | for odd historical reasons (such as: cfree is used in example |
fa8d436c UD |
1150 | code in the first edition of K&R). |
1151 | */ | |
1152 | #if __STD_C | |
1153 | void public_cFREe(Void_t*); | |
f65fd747 | 1154 | #else |
fa8d436c UD |
1155 | void public_cFREe(); |
1156 | #endif | |
1157 | ||
1158 | /* | |
1159 | malloc_trim(size_t pad); | |
1160 | ||
1161 | If possible, gives memory back to the system (via negative | |
1162 | arguments to sbrk) if there is unused memory at the `high' end of | |
1163 | the malloc pool. You can call this after freeing large blocks of | |
1164 | memory to potentially reduce the system-level memory requirements | |
1165 | of a program. However, it cannot guarantee to reduce memory. Under | |
1166 | some allocation patterns, some large free blocks of memory will be | |
1167 | locked between two used chunks, so they cannot be given back to | |
1168 | the system. | |
a9177ff5 | 1169 | |
fa8d436c UD |
1170 | The `pad' argument to malloc_trim represents the amount of free |
1171 | trailing space to leave untrimmed. If this argument is zero, | |
1172 | only the minimum amount of memory to maintain internal data | |
1173 | structures will be left (one page or less). Non-zero arguments | |
1174 | can be supplied to maintain enough trailing space to service | |
1175 | future expected allocations without having to re-obtain memory | |
1176 | from the system. | |
a9177ff5 | 1177 | |
fa8d436c UD |
1178 | Malloc_trim returns 1 if it actually released any memory, else 0. |
1179 | On systems that do not support "negative sbrks", it will always | |
1180 | rreturn 0. | |
1181 | */ | |
1182 | #if __STD_C | |
1183 | int public_mTRIm(size_t); | |
1184 | #else | |
1185 | int public_mTRIm(); | |
1186 | #endif | |
1187 | ||
1188 | /* | |
1189 | malloc_usable_size(Void_t* p); | |
1190 | ||
1191 | Returns the number of bytes you can actually use in | |
1192 | an allocated chunk, which may be more than you requested (although | |
1193 | often not) due to alignment and minimum size constraints. | |
1194 | You can use this many bytes without worrying about | |
1195 | overwriting other allocated objects. This is not a particularly great | |
1196 | programming practice. malloc_usable_size can be more useful in | |
1197 | debugging and assertions, for example: | |
1198 | ||
1199 | p = malloc(n); | |
1200 | assert(malloc_usable_size(p) >= 256); | |
1201 | ||
1202 | */ | |
1203 | #if __STD_C | |
1204 | size_t public_mUSABLe(Void_t*); | |
1205 | #else | |
1206 | size_t public_mUSABLe(); | |
f65fd747 | 1207 | #endif |
fa8d436c UD |
1208 | |
1209 | /* | |
1210 | malloc_stats(); | |
1211 | Prints on stderr the amount of space obtained from the system (both | |
1212 | via sbrk and mmap), the maximum amount (which may be more than | |
1213 | current if malloc_trim and/or munmap got called), and the current | |
1214 | number of bytes allocated via malloc (or realloc, etc) but not yet | |
1215 | freed. Note that this is the number of bytes allocated, not the | |
1216 | number requested. It will be larger than the number requested | |
1217 | because of alignment and bookkeeping overhead. Because it includes | |
1218 | alignment wastage as being in use, this figure may be greater than | |
1219 | zero even when no user-level chunks are allocated. | |
1220 | ||
1221 | The reported current and maximum system memory can be inaccurate if | |
1222 | a program makes other calls to system memory allocation functions | |
1223 | (normally sbrk) outside of malloc. | |
1224 | ||
1225 | malloc_stats prints only the most commonly interesting statistics. | |
1226 | More information can be obtained by calling mallinfo. | |
1227 | ||
1228 | */ | |
1229 | #if __STD_C | |
1230 | void public_mSTATs(void); | |
1231 | #else | |
1232 | void public_mSTATs(); | |
f65fd747 UD |
1233 | #endif |
1234 | ||
f7ddf3d3 UD |
1235 | /* |
1236 | malloc_get_state(void); | |
1237 | ||
1238 | Returns the state of all malloc variables in an opaque data | |
1239 | structure. | |
1240 | */ | |
1241 | #if __STD_C | |
1242 | Void_t* public_gET_STATe(void); | |
1243 | #else | |
1244 | Void_t* public_gET_STATe(); | |
1245 | #endif | |
1246 | ||
1247 | /* | |
1248 | malloc_set_state(Void_t* state); | |
1249 | ||
1250 | Restore the state of all malloc variables from data obtained with | |
1251 | malloc_get_state(). | |
1252 | */ | |
1253 | #if __STD_C | |
1254 | int public_sET_STATe(Void_t*); | |
1255 | #else | |
1256 | int public_sET_STATe(); | |
1257 | #endif | |
1258 | ||
1259 | #ifdef _LIBC | |
1260 | /* | |
1261 | posix_memalign(void **memptr, size_t alignment, size_t size); | |
1262 | ||
1263 | POSIX wrapper like memalign(), checking for validity of size. | |
1264 | */ | |
1265 | int __posix_memalign(void **, size_t, size_t); | |
1266 | #endif | |
1267 | ||
fa8d436c UD |
1268 | /* mallopt tuning options */ |
1269 | ||
f65fd747 | 1270 | /* |
fa8d436c UD |
1271 | M_MXFAST is the maximum request size used for "fastbins", special bins |
1272 | that hold returned chunks without consolidating their spaces. This | |
1273 | enables future requests for chunks of the same size to be handled | |
1274 | very quickly, but can increase fragmentation, and thus increase the | |
1275 | overall memory footprint of a program. | |
1276 | ||
1277 | This malloc manages fastbins very conservatively yet still | |
1278 | efficiently, so fragmentation is rarely a problem for values less | |
1279 | than or equal to the default. The maximum supported value of MXFAST | |
1280 | is 80. You wouldn't want it any higher than this anyway. Fastbins | |
1281 | are designed especially for use with many small structs, objects or | |
1282 | strings -- the default handles structs/objects/arrays with sizes up | |
1283 | to 8 4byte fields, or small strings representing words, tokens, | |
1284 | etc. Using fastbins for larger objects normally worsens | |
1285 | fragmentation without improving speed. | |
1286 | ||
1287 | M_MXFAST is set in REQUEST size units. It is internally used in | |
1288 | chunksize units, which adds padding and alignment. You can reduce | |
1289 | M_MXFAST to 0 to disable all use of fastbins. This causes the malloc | |
1290 | algorithm to be a closer approximation of fifo-best-fit in all cases, | |
1291 | not just for larger requests, but will generally cause it to be | |
1292 | slower. | |
f65fd747 UD |
1293 | */ |
1294 | ||
1295 | ||
fa8d436c UD |
1296 | /* M_MXFAST is a standard SVID/XPG tuning option, usually listed in malloc.h */ |
1297 | #ifndef M_MXFAST | |
a9177ff5 | 1298 | #define M_MXFAST 1 |
fa8d436c | 1299 | #endif |
f65fd747 | 1300 | |
fa8d436c UD |
1301 | #ifndef DEFAULT_MXFAST |
1302 | #define DEFAULT_MXFAST 64 | |
10dc2a90 UD |
1303 | #endif |
1304 | ||
10dc2a90 | 1305 | |
fa8d436c UD |
1306 | /* |
1307 | M_TRIM_THRESHOLD is the maximum amount of unused top-most memory | |
1308 | to keep before releasing via malloc_trim in free(). | |
1309 | ||
1310 | Automatic trimming is mainly useful in long-lived programs. | |
1311 | Because trimming via sbrk can be slow on some systems, and can | |
1312 | sometimes be wasteful (in cases where programs immediately | |
1313 | afterward allocate more large chunks) the value should be high | |
1314 | enough so that your overall system performance would improve by | |
1315 | releasing this much memory. | |
1316 | ||
1317 | The trim threshold and the mmap control parameters (see below) | |
1318 | can be traded off with one another. Trimming and mmapping are | |
1319 | two different ways of releasing unused memory back to the | |
1320 | system. Between these two, it is often possible to keep | |
1321 | system-level demands of a long-lived program down to a bare | |
1322 | minimum. For example, in one test suite of sessions measuring | |
1323 | the XF86 X server on Linux, using a trim threshold of 128K and a | |
1324 | mmap threshold of 192K led to near-minimal long term resource | |
1325 | consumption. | |
1326 | ||
1327 | If you are using this malloc in a long-lived program, it should | |
1328 | pay to experiment with these values. As a rough guide, you | |
1329 | might set to a value close to the average size of a process | |
1330 | (program) running on your system. Releasing this much memory | |
1331 | would allow such a process to run in memory. Generally, it's | |
1332 | worth it to tune for trimming rather tham memory mapping when a | |
1333 | program undergoes phases where several large chunks are | |
1334 | allocated and released in ways that can reuse each other's | |
1335 | storage, perhaps mixed with phases where there are no such | |
1336 | chunks at all. And in well-behaved long-lived programs, | |
1337 | controlling release of large blocks via trimming versus mapping | |
1338 | is usually faster. | |
1339 | ||
1340 | However, in most programs, these parameters serve mainly as | |
1341 | protection against the system-level effects of carrying around | |
1342 | massive amounts of unneeded memory. Since frequent calls to | |
1343 | sbrk, mmap, and munmap otherwise degrade performance, the default | |
1344 | parameters are set to relatively high values that serve only as | |
1345 | safeguards. | |
1346 | ||
1347 | The trim value It must be greater than page size to have any useful | |
a9177ff5 | 1348 | effect. To disable trimming completely, you can set to |
fa8d436c UD |
1349 | (unsigned long)(-1) |
1350 | ||
1351 | Trim settings interact with fastbin (MXFAST) settings: Unless | |
1352 | TRIM_FASTBINS is defined, automatic trimming never takes place upon | |
1353 | freeing a chunk with size less than or equal to MXFAST. Trimming is | |
1354 | instead delayed until subsequent freeing of larger chunks. However, | |
1355 | you can still force an attempted trim by calling malloc_trim. | |
1356 | ||
1357 | Also, trimming is not generally possible in cases where | |
1358 | the main arena is obtained via mmap. | |
1359 | ||
1360 | Note that the trick some people use of mallocing a huge space and | |
1361 | then freeing it at program startup, in an attempt to reserve system | |
1362 | memory, doesn't have the intended effect under automatic trimming, | |
1363 | since that memory will immediately be returned to the system. | |
1364 | */ | |
1365 | ||
1366 | #define M_TRIM_THRESHOLD -1 | |
1367 | ||
1368 | #ifndef DEFAULT_TRIM_THRESHOLD | |
1369 | #define DEFAULT_TRIM_THRESHOLD (128 * 1024) | |
1370 | #endif | |
1371 | ||
1372 | /* | |
1373 | M_TOP_PAD is the amount of extra `padding' space to allocate or | |
1374 | retain whenever sbrk is called. It is used in two ways internally: | |
1375 | ||
1376 | * When sbrk is called to extend the top of the arena to satisfy | |
1377 | a new malloc request, this much padding is added to the sbrk | |
1378 | request. | |
1379 | ||
1380 | * When malloc_trim is called automatically from free(), | |
1381 | it is used as the `pad' argument. | |
1382 | ||
1383 | In both cases, the actual amount of padding is rounded | |
1384 | so that the end of the arena is always a system page boundary. | |
1385 | ||
1386 | The main reason for using padding is to avoid calling sbrk so | |
1387 | often. Having even a small pad greatly reduces the likelihood | |
1388 | that nearly every malloc request during program start-up (or | |
1389 | after trimming) will invoke sbrk, which needlessly wastes | |
1390 | time. | |
1391 | ||
1392 | Automatic rounding-up to page-size units is normally sufficient | |
1393 | to avoid measurable overhead, so the default is 0. However, in | |
1394 | systems where sbrk is relatively slow, it can pay to increase | |
1395 | this value, at the expense of carrying around more memory than | |
1396 | the program needs. | |
1397 | */ | |
10dc2a90 | 1398 | |
fa8d436c | 1399 | #define M_TOP_PAD -2 |
10dc2a90 | 1400 | |
fa8d436c UD |
1401 | #ifndef DEFAULT_TOP_PAD |
1402 | #define DEFAULT_TOP_PAD (0) | |
1403 | #endif | |
f65fd747 | 1404 | |
fa8d436c UD |
1405 | /* |
1406 | M_MMAP_THRESHOLD is the request size threshold for using mmap() | |
1407 | to service a request. Requests of at least this size that cannot | |
1408 | be allocated using already-existing space will be serviced via mmap. | |
1409 | (If enough normal freed space already exists it is used instead.) | |
1410 | ||
1411 | Using mmap segregates relatively large chunks of memory so that | |
1412 | they can be individually obtained and released from the host | |
1413 | system. A request serviced through mmap is never reused by any | |
1414 | other request (at least not directly; the system may just so | |
1415 | happen to remap successive requests to the same locations). | |
1416 | ||
1417 | Segregating space in this way has the benefits that: | |
1418 | ||
a9177ff5 RM |
1419 | 1. Mmapped space can ALWAYS be individually released back |
1420 | to the system, which helps keep the system level memory | |
1421 | demands of a long-lived program low. | |
fa8d436c UD |
1422 | 2. Mapped memory can never become `locked' between |
1423 | other chunks, as can happen with normally allocated chunks, which | |
1424 | means that even trimming via malloc_trim would not release them. | |
1425 | 3. On some systems with "holes" in address spaces, mmap can obtain | |
1426 | memory that sbrk cannot. | |
1427 | ||
1428 | However, it has the disadvantages that: | |
1429 | ||
1430 | 1. The space cannot be reclaimed, consolidated, and then | |
1431 | used to service later requests, as happens with normal chunks. | |
1432 | 2. It can lead to more wastage because of mmap page alignment | |
1433 | requirements | |
1434 | 3. It causes malloc performance to be more dependent on host | |
1435 | system memory management support routines which may vary in | |
1436 | implementation quality and may impose arbitrary | |
1437 | limitations. Generally, servicing a request via normal | |
1438 | malloc steps is faster than going through a system's mmap. | |
1439 | ||
1440 | The advantages of mmap nearly always outweigh disadvantages for | |
1441 | "large" chunks, but the value of "large" varies across systems. The | |
1442 | default is an empirically derived value that works well in most | |
1443 | systems. | |
f65fd747 UD |
1444 | */ |
1445 | ||
fa8d436c | 1446 | #define M_MMAP_THRESHOLD -3 |
f65fd747 | 1447 | |
fa8d436c UD |
1448 | #ifndef DEFAULT_MMAP_THRESHOLD |
1449 | #define DEFAULT_MMAP_THRESHOLD (128 * 1024) | |
1450 | #endif | |
1451 | ||
1452 | /* | |
1453 | M_MMAP_MAX is the maximum number of requests to simultaneously | |
1454 | service using mmap. This parameter exists because | |
1455 | some systems have a limited number of internal tables for | |
1456 | use by mmap, and using more than a few of them may degrade | |
1457 | performance. | |
1458 | ||
1459 | The default is set to a value that serves only as a safeguard. | |
1460 | Setting to 0 disables use of mmap for servicing large requests. If | |
1461 | HAVE_MMAP is not set, the default value is 0, and attempts to set it | |
1462 | to non-zero values in mallopt will fail. | |
1463 | */ | |
f65fd747 | 1464 | |
fa8d436c UD |
1465 | #define M_MMAP_MAX -4 |
1466 | ||
1467 | #ifndef DEFAULT_MMAP_MAX | |
1468 | #if HAVE_MMAP | |
1469 | #define DEFAULT_MMAP_MAX (65536) | |
1470 | #else | |
1471 | #define DEFAULT_MMAP_MAX (0) | |
1472 | #endif | |
f65fd747 UD |
1473 | #endif |
1474 | ||
fa8d436c | 1475 | #ifdef __cplusplus |
3c6904fb | 1476 | } /* end of extern "C" */ |
fa8d436c | 1477 | #endif |
f65fd747 | 1478 | |
100351c3 | 1479 | #include <malloc.h> |
f65fd747 | 1480 | |
fa8d436c UD |
1481 | #ifndef BOUNDED_N |
1482 | #define BOUNDED_N(ptr, sz) (ptr) | |
1483 | #endif | |
1484 | #ifndef RETURN_ADDRESS | |
1485 | #define RETURN_ADDRESS(X_) (NULL) | |
9ae6fc54 | 1486 | #endif |
431c33c0 UD |
1487 | |
1488 | /* On some platforms we can compile internal, not exported functions better. | |
1489 | Let the environment provide a macro and define it to be empty if it | |
1490 | is not available. */ | |
1491 | #ifndef internal_function | |
1492 | # define internal_function | |
1493 | #endif | |
1494 | ||
fa8d436c UD |
1495 | /* Forward declarations. */ |
1496 | struct malloc_chunk; | |
1497 | typedef struct malloc_chunk* mchunkptr; | |
431c33c0 | 1498 | |
fa8d436c | 1499 | /* Internal routines. */ |
f65fd747 | 1500 | |
fa8d436c | 1501 | #if __STD_C |
f65fd747 | 1502 | |
f1c5213d RM |
1503 | Void_t* _int_malloc(mstate, size_t); |
1504 | void _int_free(mstate, Void_t*); | |
1505 | Void_t* _int_realloc(mstate, Void_t*, size_t); | |
1506 | Void_t* _int_memalign(mstate, size_t, size_t); | |
1507 | Void_t* _int_valloc(mstate, size_t); | |
fa8d436c UD |
1508 | static Void_t* _int_pvalloc(mstate, size_t); |
1509 | /*static Void_t* cALLOc(size_t, size_t);*/ | |
1510 | static Void_t** _int_icalloc(mstate, size_t, size_t, Void_t**); | |
1511 | static Void_t** _int_icomalloc(mstate, size_t, size_t*, Void_t**); | |
1512 | static int mTRIm(size_t); | |
1513 | static size_t mUSABLe(Void_t*); | |
1514 | static void mSTATs(void); | |
1515 | static int mALLOPt(int, int); | |
1516 | static struct mallinfo mALLINFo(mstate); | |
6bf4302e | 1517 | static void malloc_printerr(int action, const char *str, void *ptr); |
fa8d436c UD |
1518 | |
1519 | static Void_t* internal_function mem2mem_check(Void_t *p, size_t sz); | |
1520 | static int internal_function top_check(void); | |
1521 | static void internal_function munmap_chunk(mchunkptr p); | |
a9177ff5 | 1522 | #if HAVE_MREMAP |
fa8d436c | 1523 | static mchunkptr internal_function mremap_chunk(mchunkptr p, size_t new_size); |
a9177ff5 | 1524 | #endif |
fa8d436c UD |
1525 | |
1526 | static Void_t* malloc_check(size_t sz, const Void_t *caller); | |
1527 | static void free_check(Void_t* mem, const Void_t *caller); | |
1528 | static Void_t* realloc_check(Void_t* oldmem, size_t bytes, | |
1529 | const Void_t *caller); | |
1530 | static Void_t* memalign_check(size_t alignment, size_t bytes, | |
1531 | const Void_t *caller); | |
1532 | #ifndef NO_THREADS | |
fde89ad0 RM |
1533 | # ifdef _LIBC |
1534 | # if USE___THREAD || (defined USE_TLS && !defined SHARED) | |
1535 | /* These routines are never needed in this configuration. */ | |
1536 | # define NO_STARTER | |
1537 | # endif | |
1538 | # endif | |
1539 | # ifdef NO_STARTER | |
1540 | # undef NO_STARTER | |
1541 | # else | |
fa8d436c | 1542 | static Void_t* malloc_starter(size_t sz, const Void_t *caller); |
fde89ad0 | 1543 | static Void_t* memalign_starter(size_t aln, size_t sz, const Void_t *caller); |
fa8d436c | 1544 | static void free_starter(Void_t* mem, const Void_t *caller); |
fde89ad0 | 1545 | # endif |
fa8d436c UD |
1546 | static Void_t* malloc_atfork(size_t sz, const Void_t *caller); |
1547 | static void free_atfork(Void_t* mem, const Void_t *caller); | |
1548 | #endif | |
f65fd747 | 1549 | |
fa8d436c | 1550 | #else |
f65fd747 | 1551 | |
fa8d436c UD |
1552 | Void_t* _int_malloc(); |
1553 | void _int_free(); | |
1554 | Void_t* _int_realloc(); | |
1555 | Void_t* _int_memalign(); | |
1556 | Void_t* _int_valloc(); | |
1557 | Void_t* _int_pvalloc(); | |
1558 | /*static Void_t* cALLOc();*/ | |
1559 | static Void_t** _int_icalloc(); | |
1560 | static Void_t** _int_icomalloc(); | |
1561 | static int mTRIm(); | |
1562 | static size_t mUSABLe(); | |
1563 | static void mSTATs(); | |
1564 | static int mALLOPt(); | |
1565 | static struct mallinfo mALLINFo(); | |
f65fd747 | 1566 | |
fa8d436c | 1567 | #endif |
f65fd747 | 1568 | |
f65fd747 | 1569 | |
f65fd747 | 1570 | |
f65fd747 | 1571 | |
fa8d436c | 1572 | /* ------------- Optional versions of memcopy ---------------- */ |
f65fd747 | 1573 | |
a1648746 | 1574 | |
fa8d436c | 1575 | #if USE_MEMCPY |
a1648746 | 1576 | |
a9177ff5 | 1577 | /* |
fa8d436c UD |
1578 | Note: memcpy is ONLY invoked with non-overlapping regions, |
1579 | so the (usually slower) memmove is not needed. | |
1580 | */ | |
a1648746 | 1581 | |
fa8d436c UD |
1582 | #define MALLOC_COPY(dest, src, nbytes) memcpy(dest, src, nbytes) |
1583 | #define MALLOC_ZERO(dest, nbytes) memset(dest, 0, nbytes) | |
f65fd747 | 1584 | |
fa8d436c | 1585 | #else /* !USE_MEMCPY */ |
f65fd747 | 1586 | |
fa8d436c | 1587 | /* Use Duff's device for good zeroing/copying performance. */ |
f65fd747 | 1588 | |
fa8d436c UD |
1589 | #define MALLOC_ZERO(charp, nbytes) \ |
1590 | do { \ | |
1591 | INTERNAL_SIZE_T* mzp = (INTERNAL_SIZE_T*)(charp); \ | |
1592 | unsigned long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T); \ | |
1593 | long mcn; \ | |
1594 | if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \ | |
1595 | switch (mctmp) { \ | |
1596 | case 0: for(;;) { *mzp++ = 0; \ | |
1597 | case 7: *mzp++ = 0; \ | |
1598 | case 6: *mzp++ = 0; \ | |
1599 | case 5: *mzp++ = 0; \ | |
1600 | case 4: *mzp++ = 0; \ | |
1601 | case 3: *mzp++ = 0; \ | |
1602 | case 2: *mzp++ = 0; \ | |
1603 | case 1: *mzp++ = 0; if(mcn <= 0) break; mcn--; } \ | |
1604 | } \ | |
1605 | } while(0) | |
f65fd747 | 1606 | |
fa8d436c UD |
1607 | #define MALLOC_COPY(dest,src,nbytes) \ |
1608 | do { \ | |
1609 | INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) src; \ | |
1610 | INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) dest; \ | |
1611 | unsigned long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T); \ | |
1612 | long mcn; \ | |
1613 | if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \ | |
1614 | switch (mctmp) { \ | |
1615 | case 0: for(;;) { *mcdst++ = *mcsrc++; \ | |
1616 | case 7: *mcdst++ = *mcsrc++; \ | |
1617 | case 6: *mcdst++ = *mcsrc++; \ | |
1618 | case 5: *mcdst++ = *mcsrc++; \ | |
1619 | case 4: *mcdst++ = *mcsrc++; \ | |
1620 | case 3: *mcdst++ = *mcsrc++; \ | |
1621 | case 2: *mcdst++ = *mcsrc++; \ | |
1622 | case 1: *mcdst++ = *mcsrc++; if(mcn <= 0) break; mcn--; } \ | |
1623 | } \ | |
1624 | } while(0) | |
f65fd747 | 1625 | |
f65fd747 UD |
1626 | #endif |
1627 | ||
fa8d436c | 1628 | /* ------------------ MMAP support ------------------ */ |
f65fd747 | 1629 | |
f65fd747 | 1630 | |
fa8d436c | 1631 | #if HAVE_MMAP |
f65fd747 | 1632 | |
fa8d436c UD |
1633 | #include <fcntl.h> |
1634 | #ifndef LACKS_SYS_MMAN_H | |
1635 | #include <sys/mman.h> | |
1636 | #endif | |
f65fd747 | 1637 | |
fa8d436c UD |
1638 | #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON) |
1639 | # define MAP_ANONYMOUS MAP_ANON | |
1640 | #endif | |
1641 | #if !defined(MAP_FAILED) | |
1642 | # define MAP_FAILED ((char*)-1) | |
1643 | #endif | |
f65fd747 | 1644 | |
fa8d436c UD |
1645 | #ifndef MAP_NORESERVE |
1646 | # ifdef MAP_AUTORESRV | |
1647 | # define MAP_NORESERVE MAP_AUTORESRV | |
1648 | # else | |
1649 | # define MAP_NORESERVE 0 | |
1650 | # endif | |
f65fd747 UD |
1651 | #endif |
1652 | ||
a9177ff5 RM |
1653 | /* |
1654 | Nearly all versions of mmap support MAP_ANONYMOUS, | |
fa8d436c UD |
1655 | so the following is unlikely to be needed, but is |
1656 | supplied just in case. | |
1657 | */ | |
f65fd747 | 1658 | |
fa8d436c | 1659 | #ifndef MAP_ANONYMOUS |
f65fd747 | 1660 | |
fa8d436c | 1661 | static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */ |
2f6d1f1b | 1662 | |
fa8d436c UD |
1663 | #define MMAP(addr, size, prot, flags) ((dev_zero_fd < 0) ? \ |
1664 | (dev_zero_fd = open("/dev/zero", O_RDWR), \ | |
1665 | mmap((addr), (size), (prot), (flags), dev_zero_fd, 0)) : \ | |
1666 | mmap((addr), (size), (prot), (flags), dev_zero_fd, 0)) | |
f65fd747 | 1667 | |
fa8d436c | 1668 | #else |
f65fd747 | 1669 | |
fa8d436c UD |
1670 | #define MMAP(addr, size, prot, flags) \ |
1671 | (mmap((addr), (size), (prot), (flags)|MAP_ANONYMOUS, -1, 0)) | |
f65fd747 | 1672 | |
e9b3e3c5 | 1673 | #endif |
f65fd747 UD |
1674 | |
1675 | ||
fa8d436c UD |
1676 | #endif /* HAVE_MMAP */ |
1677 | ||
1678 | ||
f65fd747 | 1679 | /* |
fa8d436c | 1680 | ----------------------- Chunk representations ----------------------- |
f65fd747 UD |
1681 | */ |
1682 | ||
1683 | ||
fa8d436c UD |
1684 | /* |
1685 | This struct declaration is misleading (but accurate and necessary). | |
1686 | It declares a "view" into memory allowing access to necessary | |
1687 | fields at known offsets from a given base. See explanation below. | |
1688 | */ | |
1689 | ||
1690 | struct malloc_chunk { | |
1691 | ||
1692 | INTERNAL_SIZE_T prev_size; /* Size of previous chunk (if free). */ | |
1693 | INTERNAL_SIZE_T size; /* Size in bytes, including overhead. */ | |
1694 | ||
1695 | struct malloc_chunk* fd; /* double links -- used only if free. */ | |
f65fd747 UD |
1696 | struct malloc_chunk* bk; |
1697 | }; | |
1698 | ||
f65fd747 UD |
1699 | |
1700 | /* | |
f65fd747 UD |
1701 | malloc_chunk details: |
1702 | ||
1703 | (The following includes lightly edited explanations by Colin Plumb.) | |
1704 | ||
1705 | Chunks of memory are maintained using a `boundary tag' method as | |
1706 | described in e.g., Knuth or Standish. (See the paper by Paul | |
1707 | Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a | |
1708 | survey of such techniques.) Sizes of free chunks are stored both | |
1709 | in the front of each chunk and at the end. This makes | |
1710 | consolidating fragmented chunks into bigger chunks very fast. The | |
1711 | size fields also hold bits representing whether chunks are free or | |
1712 | in use. | |
1713 | ||
1714 | An allocated chunk looks like this: | |
1715 | ||
1716 | ||
1717 | chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | |
1718 | | Size of previous chunk, if allocated | | | |
1719 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | |
8088488d | 1720 | | Size of chunk, in bytes |M|P| |
f65fd747 UD |
1721 | mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
1722 | | User data starts here... . | |
1723 | . . | |
1724 | . (malloc_usable_space() bytes) . | |
1725 | . | | |
1726 | nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | |
1727 | | Size of chunk | | |
1728 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | |
1729 | ||
1730 | ||
1731 | Where "chunk" is the front of the chunk for the purpose of most of | |
1732 | the malloc code, but "mem" is the pointer that is returned to the | |
1733 | user. "Nextchunk" is the beginning of the next contiguous chunk. | |
1734 | ||
fa8d436c | 1735 | Chunks always begin on even word boundries, so the mem portion |
f65fd747 | 1736 | (which is returned to the user) is also on an even word boundary, and |
fa8d436c | 1737 | thus at least double-word aligned. |
f65fd747 UD |
1738 | |
1739 | Free chunks are stored in circular doubly-linked lists, and look like this: | |
1740 | ||
1741 | chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | |
1742 | | Size of previous chunk | | |
1743 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | |
1744 | `head:' | Size of chunk, in bytes |P| | |
1745 | mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | |
1746 | | Forward pointer to next chunk in list | | |
1747 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | |
1748 | | Back pointer to previous chunk in list | | |
1749 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | |
1750 | | Unused space (may be 0 bytes long) . | |
1751 | . . | |
1752 | . | | |
1753 | nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | |
1754 | `foot:' | Size of chunk, in bytes | | |
1755 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | |
1756 | ||
1757 | The P (PREV_INUSE) bit, stored in the unused low-order bit of the | |
1758 | chunk size (which is always a multiple of two words), is an in-use | |
1759 | bit for the *previous* chunk. If that bit is *clear*, then the | |
1760 | word before the current chunk size contains the previous chunk | |
1761 | size, and can be used to find the front of the previous chunk. | |
fa8d436c UD |
1762 | The very first chunk allocated always has this bit set, |
1763 | preventing access to non-existent (or non-owned) memory. If | |
1764 | prev_inuse is set for any given chunk, then you CANNOT determine | |
1765 | the size of the previous chunk, and might even get a memory | |
1766 | addressing fault when trying to do so. | |
f65fd747 UD |
1767 | |
1768 | Note that the `foot' of the current chunk is actually represented | |
fa8d436c UD |
1769 | as the prev_size of the NEXT chunk. This makes it easier to |
1770 | deal with alignments etc but can be very confusing when trying | |
1771 | to extend or adapt this code. | |
f65fd747 UD |
1772 | |
1773 | The two exceptions to all this are | |
1774 | ||
fa8d436c UD |
1775 | 1. The special chunk `top' doesn't bother using the |
1776 | trailing size field since there is no next contiguous chunk | |
1777 | that would have to index off it. After initialization, `top' | |
1778 | is forced to always exist. If it would become less than | |
1779 | MINSIZE bytes long, it is replenished. | |
f65fd747 UD |
1780 | |
1781 | 2. Chunks allocated via mmap, which have the second-lowest-order | |
8088488d | 1782 | bit M (IS_MMAPPED) set in their size fields. Because they are |
fa8d436c | 1783 | allocated one-by-one, each must contain its own trailing size field. |
f65fd747 UD |
1784 | |
1785 | */ | |
1786 | ||
1787 | /* | |
fa8d436c UD |
1788 | ---------- Size and alignment checks and conversions ---------- |
1789 | */ | |
f65fd747 | 1790 | |
fa8d436c | 1791 | /* conversion from malloc headers to user pointers, and back */ |
f65fd747 | 1792 | |
fa8d436c UD |
1793 | #define chunk2mem(p) ((Void_t*)((char*)(p) + 2*SIZE_SZ)) |
1794 | #define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ)) | |
f65fd747 | 1795 | |
fa8d436c UD |
1796 | /* The smallest possible chunk */ |
1797 | #define MIN_CHUNK_SIZE (sizeof(struct malloc_chunk)) | |
f65fd747 | 1798 | |
fa8d436c | 1799 | /* The smallest size we can malloc is an aligned minimal chunk */ |
f65fd747 | 1800 | |
fa8d436c UD |
1801 | #define MINSIZE \ |
1802 | (unsigned long)(((MIN_CHUNK_SIZE+MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)) | |
f65fd747 | 1803 | |
fa8d436c | 1804 | /* Check if m has acceptable alignment */ |
f65fd747 | 1805 | |
fa8d436c | 1806 | #define aligned_OK(m) (((unsigned long)((m)) & (MALLOC_ALIGN_MASK)) == 0) |
f65fd747 | 1807 | |
f65fd747 | 1808 | |
a9177ff5 | 1809 | /* |
fa8d436c UD |
1810 | Check if a request is so large that it would wrap around zero when |
1811 | padded and aligned. To simplify some other code, the bound is made | |
1812 | low enough so that adding MINSIZE will also not wrap around zero. | |
1813 | */ | |
f65fd747 | 1814 | |
fa8d436c UD |
1815 | #define REQUEST_OUT_OF_RANGE(req) \ |
1816 | ((unsigned long)(req) >= \ | |
a9177ff5 | 1817 | (unsigned long)(INTERNAL_SIZE_T)(-2 * MINSIZE)) |
f65fd747 | 1818 | |
fa8d436c | 1819 | /* pad request bytes into a usable size -- internal version */ |
f65fd747 | 1820 | |
fa8d436c UD |
1821 | #define request2size(req) \ |
1822 | (((req) + SIZE_SZ + MALLOC_ALIGN_MASK < MINSIZE) ? \ | |
1823 | MINSIZE : \ | |
1824 | ((req) + SIZE_SZ + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK) | |
f65fd747 | 1825 | |
fa8d436c | 1826 | /* Same, except also perform argument check */ |
f65fd747 | 1827 | |
fa8d436c UD |
1828 | #define checked_request2size(req, sz) \ |
1829 | if (REQUEST_OUT_OF_RANGE(req)) { \ | |
1830 | MALLOC_FAILURE_ACTION; \ | |
1831 | return 0; \ | |
1832 | } \ | |
a9177ff5 | 1833 | (sz) = request2size(req); |
f65fd747 UD |
1834 | |
1835 | /* | |
fa8d436c | 1836 | --------------- Physical chunk operations --------------- |
f65fd747 UD |
1837 | */ |
1838 | ||
10dc2a90 | 1839 | |
fa8d436c UD |
1840 | /* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */ |
1841 | #define PREV_INUSE 0x1 | |
f65fd747 | 1842 | |
fa8d436c UD |
1843 | /* extract inuse bit of previous chunk */ |
1844 | #define prev_inuse(p) ((p)->size & PREV_INUSE) | |
f65fd747 | 1845 | |
f65fd747 | 1846 | |
fa8d436c UD |
1847 | /* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */ |
1848 | #define IS_MMAPPED 0x2 | |
f65fd747 | 1849 | |
fa8d436c UD |
1850 | /* check for mmap()'ed chunk */ |
1851 | #define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED) | |
f65fd747 | 1852 | |
f65fd747 | 1853 | |
fa8d436c UD |
1854 | /* size field is or'ed with NON_MAIN_ARENA if the chunk was obtained |
1855 | from a non-main arena. This is only set immediately before handing | |
1856 | the chunk to the user, if necessary. */ | |
1857 | #define NON_MAIN_ARENA 0x4 | |
f65fd747 | 1858 | |
fa8d436c UD |
1859 | /* check for chunk from non-main arena */ |
1860 | #define chunk_non_main_arena(p) ((p)->size & NON_MAIN_ARENA) | |
f65fd747 UD |
1861 | |
1862 | ||
a9177ff5 RM |
1863 | /* |
1864 | Bits to mask off when extracting size | |
f65fd747 | 1865 | |
fa8d436c UD |
1866 | Note: IS_MMAPPED is intentionally not masked off from size field in |
1867 | macros for which mmapped chunks should never be seen. This should | |
1868 | cause helpful core dumps to occur if it is tried by accident by | |
1869 | people extending or adapting this malloc. | |
f65fd747 | 1870 | */ |
fa8d436c | 1871 | #define SIZE_BITS (PREV_INUSE|IS_MMAPPED|NON_MAIN_ARENA) |
f65fd747 | 1872 | |
fa8d436c UD |
1873 | /* Get size, ignoring use bits */ |
1874 | #define chunksize(p) ((p)->size & ~(SIZE_BITS)) | |
f65fd747 | 1875 | |
f65fd747 | 1876 | |
fa8d436c UD |
1877 | /* Ptr to next physical malloc_chunk. */ |
1878 | #define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & ~SIZE_BITS) )) | |
f65fd747 | 1879 | |
fa8d436c UD |
1880 | /* Ptr to previous physical malloc_chunk */ |
1881 | #define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_size) )) | |
f65fd747 | 1882 | |
fa8d436c UD |
1883 | /* Treat space at ptr + offset as a chunk */ |
1884 | #define chunk_at_offset(p, s) ((mchunkptr)(((char*)(p)) + (s))) | |
1885 | ||
1886 | /* extract p's inuse bit */ | |
1887 | #define inuse(p)\ | |
1888 | ((((mchunkptr)(((char*)(p))+((p)->size & ~SIZE_BITS)))->size) & PREV_INUSE) | |
f65fd747 | 1889 | |
fa8d436c UD |
1890 | /* set/clear chunk as being inuse without otherwise disturbing */ |
1891 | #define set_inuse(p)\ | |
1892 | ((mchunkptr)(((char*)(p)) + ((p)->size & ~SIZE_BITS)))->size |= PREV_INUSE | |
f65fd747 | 1893 | |
fa8d436c UD |
1894 | #define clear_inuse(p)\ |
1895 | ((mchunkptr)(((char*)(p)) + ((p)->size & ~SIZE_BITS)))->size &= ~(PREV_INUSE) | |
f65fd747 UD |
1896 | |
1897 | ||
fa8d436c UD |
1898 | /* check/set/clear inuse bits in known places */ |
1899 | #define inuse_bit_at_offset(p, s)\ | |
1900 | (((mchunkptr)(((char*)(p)) + (s)))->size & PREV_INUSE) | |
f65fd747 | 1901 | |
fa8d436c UD |
1902 | #define set_inuse_bit_at_offset(p, s)\ |
1903 | (((mchunkptr)(((char*)(p)) + (s)))->size |= PREV_INUSE) | |
f65fd747 | 1904 | |
fa8d436c UD |
1905 | #define clear_inuse_bit_at_offset(p, s)\ |
1906 | (((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE)) | |
f65fd747 | 1907 | |
f65fd747 | 1908 | |
fa8d436c UD |
1909 | /* Set size at head, without disturbing its use bit */ |
1910 | #define set_head_size(p, s) ((p)->size = (((p)->size & SIZE_BITS) | (s))) | |
f65fd747 | 1911 | |
fa8d436c UD |
1912 | /* Set size/use field */ |
1913 | #define set_head(p, s) ((p)->size = (s)) | |
f65fd747 | 1914 | |
fa8d436c UD |
1915 | /* Set size at footer (only when chunk is not in use) */ |
1916 | #define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_size = (s)) | |
f65fd747 UD |
1917 | |
1918 | ||
fa8d436c UD |
1919 | /* |
1920 | -------------------- Internal data structures -------------------- | |
1921 | ||
1922 | All internal state is held in an instance of malloc_state defined | |
1923 | below. There are no other static variables, except in two optional | |
a9177ff5 RM |
1924 | cases: |
1925 | * If USE_MALLOC_LOCK is defined, the mALLOC_MUTEx declared above. | |
fa8d436c UD |
1926 | * If HAVE_MMAP is true, but mmap doesn't support |
1927 | MAP_ANONYMOUS, a dummy file descriptor for mmap. | |
1928 | ||
1929 | Beware of lots of tricks that minimize the total bookkeeping space | |
1930 | requirements. The result is a little over 1K bytes (for 4byte | |
1931 | pointers and size_t.) | |
1932 | */ | |
f65fd747 UD |
1933 | |
1934 | /* | |
fa8d436c UD |
1935 | Bins |
1936 | ||
1937 | An array of bin headers for free chunks. Each bin is doubly | |
1938 | linked. The bins are approximately proportionally (log) spaced. | |
1939 | There are a lot of these bins (128). This may look excessive, but | |
1940 | works very well in practice. Most bins hold sizes that are | |
1941 | unusual as malloc request sizes, but are more usual for fragments | |
1942 | and consolidated sets of chunks, which is what these bins hold, so | |
1943 | they can be found quickly. All procedures maintain the invariant | |
1944 | that no consolidated chunk physically borders another one, so each | |
1945 | chunk in a list is known to be preceeded and followed by either | |
1946 | inuse chunks or the ends of memory. | |
1947 | ||
1948 | Chunks in bins are kept in size order, with ties going to the | |
1949 | approximately least recently used chunk. Ordering isn't needed | |
1950 | for the small bins, which all contain the same-sized chunks, but | |
1951 | facilitates best-fit allocation for larger chunks. These lists | |
1952 | are just sequential. Keeping them in order almost never requires | |
1953 | enough traversal to warrant using fancier ordered data | |
a9177ff5 | 1954 | structures. |
fa8d436c UD |
1955 | |
1956 | Chunks of the same size are linked with the most | |
1957 | recently freed at the front, and allocations are taken from the | |
1958 | back. This results in LRU (FIFO) allocation order, which tends | |
1959 | to give each chunk an equal opportunity to be consolidated with | |
1960 | adjacent freed chunks, resulting in larger free chunks and less | |
1961 | fragmentation. | |
1962 | ||
1963 | To simplify use in double-linked lists, each bin header acts | |
1964 | as a malloc_chunk. This avoids special-casing for headers. | |
1965 | But to conserve space and improve locality, we allocate | |
1966 | only the fd/bk pointers of bins, and then use repositioning tricks | |
a9177ff5 | 1967 | to treat these as the fields of a malloc_chunk*. |
f65fd747 UD |
1968 | */ |
1969 | ||
fa8d436c | 1970 | typedef struct malloc_chunk* mbinptr; |
f65fd747 | 1971 | |
fa8d436c UD |
1972 | /* addressing -- note that bin_at(0) does not exist */ |
1973 | #define bin_at(m, i) ((mbinptr)((char*)&((m)->bins[(i)<<1]) - (SIZE_SZ<<1))) | |
f65fd747 | 1974 | |
fa8d436c UD |
1975 | /* analog of ++bin */ |
1976 | #define next_bin(b) ((mbinptr)((char*)(b) + (sizeof(mchunkptr)<<1))) | |
f65fd747 | 1977 | |
fa8d436c UD |
1978 | /* Reminders about list directionality within bins */ |
1979 | #define first(b) ((b)->fd) | |
1980 | #define last(b) ((b)->bk) | |
f65fd747 | 1981 | |
fa8d436c UD |
1982 | /* Take a chunk off a bin list */ |
1983 | #define unlink(P, BK, FD) { \ | |
1984 | FD = P->fd; \ | |
1985 | BK = P->bk; \ | |
3e030bd5 | 1986 | if (__builtin_expect (FD->bk != P || BK->fd != P, 0)) \ |
6bf4302e UD |
1987 | malloc_printerr (check_action, "corrupted double-linked list", P); \ |
1988 | else { \ | |
1989 | FD->bk = BK; \ | |
1990 | BK->fd = FD; \ | |
1991 | } \ | |
fa8d436c | 1992 | } |
f65fd747 | 1993 | |
fa8d436c UD |
1994 | /* |
1995 | Indexing | |
1996 | ||
1997 | Bins for sizes < 512 bytes contain chunks of all the same size, spaced | |
1998 | 8 bytes apart. Larger bins are approximately logarithmically spaced: | |
f65fd747 | 1999 | |
fa8d436c UD |
2000 | 64 bins of size 8 |
2001 | 32 bins of size 64 | |
2002 | 16 bins of size 512 | |
2003 | 8 bins of size 4096 | |
2004 | 4 bins of size 32768 | |
2005 | 2 bins of size 262144 | |
2006 | 1 bin of size what's left | |
f65fd747 | 2007 | |
fa8d436c UD |
2008 | There is actually a little bit of slop in the numbers in bin_index |
2009 | for the sake of speed. This makes no difference elsewhere. | |
f65fd747 | 2010 | |
fa8d436c UD |
2011 | The bins top out around 1MB because we expect to service large |
2012 | requests via mmap. | |
2013 | */ | |
f65fd747 | 2014 | |
fa8d436c UD |
2015 | #define NBINS 128 |
2016 | #define NSMALLBINS 64 | |
2017 | #define SMALLBIN_WIDTH 8 | |
2018 | #define MIN_LARGE_SIZE 512 | |
f65fd747 | 2019 | |
fa8d436c UD |
2020 | #define in_smallbin_range(sz) \ |
2021 | ((unsigned long)(sz) < (unsigned long)MIN_LARGE_SIZE) | |
f65fd747 | 2022 | |
fa8d436c | 2023 | #define smallbin_index(sz) (((unsigned)(sz)) >> 3) |
f65fd747 | 2024 | |
fa8d436c UD |
2025 | #define largebin_index(sz) \ |
2026 | (((((unsigned long)(sz)) >> 6) <= 32)? 56 + (((unsigned long)(sz)) >> 6): \ | |
2027 | ((((unsigned long)(sz)) >> 9) <= 20)? 91 + (((unsigned long)(sz)) >> 9): \ | |
2028 | ((((unsigned long)(sz)) >> 12) <= 10)? 110 + (((unsigned long)(sz)) >> 12): \ | |
2029 | ((((unsigned long)(sz)) >> 15) <= 4)? 119 + (((unsigned long)(sz)) >> 15): \ | |
2030 | ((((unsigned long)(sz)) >> 18) <= 2)? 124 + (((unsigned long)(sz)) >> 18): \ | |
2031 | 126) | |
f65fd747 | 2032 | |
fa8d436c UD |
2033 | #define bin_index(sz) \ |
2034 | ((in_smallbin_range(sz)) ? smallbin_index(sz) : largebin_index(sz)) | |
f65fd747 | 2035 | |
f65fd747 UD |
2036 | |
2037 | /* | |
fa8d436c UD |
2038 | Unsorted chunks |
2039 | ||
2040 | All remainders from chunk splits, as well as all returned chunks, | |
2041 | are first placed in the "unsorted" bin. They are then placed | |
2042 | in regular bins after malloc gives them ONE chance to be used before | |
2043 | binning. So, basically, the unsorted_chunks list acts as a queue, | |
2044 | with chunks being placed on it in free (and malloc_consolidate), | |
2045 | and taken off (to be either used or placed in bins) in malloc. | |
2046 | ||
2047 | The NON_MAIN_ARENA flag is never set for unsorted chunks, so it | |
2048 | does not have to be taken into account in size comparisons. | |
f65fd747 UD |
2049 | */ |
2050 | ||
fa8d436c UD |
2051 | /* The otherwise unindexable 1-bin is used to hold unsorted chunks. */ |
2052 | #define unsorted_chunks(M) (bin_at(M, 1)) | |
f65fd747 | 2053 | |
fa8d436c UD |
2054 | /* |
2055 | Top | |
2056 | ||
2057 | The top-most available chunk (i.e., the one bordering the end of | |
2058 | available memory) is treated specially. It is never included in | |
2059 | any bin, is used only if no other chunk is available, and is | |
2060 | released back to the system if it is very large (see | |
2061 | M_TRIM_THRESHOLD). Because top initially | |
2062 | points to its own bin with initial zero size, thus forcing | |
2063 | extension on the first malloc request, we avoid having any special | |
2064 | code in malloc to check whether it even exists yet. But we still | |
2065 | need to do so when getting memory from system, so we make | |
2066 | initial_top treat the bin as a legal but unusable chunk during the | |
2067 | interval between initialization and the first call to | |
2068 | sYSMALLOc. (This is somewhat delicate, since it relies on | |
2069 | the 2 preceding words to be zero during this interval as well.) | |
2070 | */ | |
f65fd747 | 2071 | |
fa8d436c UD |
2072 | /* Conveniently, the unsorted bin can be used as dummy top on first call */ |
2073 | #define initial_top(M) (unsorted_chunks(M)) | |
f65fd747 | 2074 | |
fa8d436c UD |
2075 | /* |
2076 | Binmap | |
f65fd747 | 2077 | |
fa8d436c UD |
2078 | To help compensate for the large number of bins, a one-level index |
2079 | structure is used for bin-by-bin searching. `binmap' is a | |
2080 | bitvector recording whether bins are definitely empty so they can | |
2081 | be skipped over during during traversals. The bits are NOT always | |
2082 | cleared as soon as bins are empty, but instead only | |
2083 | when they are noticed to be empty during traversal in malloc. | |
2084 | */ | |
f65fd747 | 2085 | |
fa8d436c UD |
2086 | /* Conservatively use 32 bits per map word, even if on 64bit system */ |
2087 | #define BINMAPSHIFT 5 | |
2088 | #define BITSPERMAP (1U << BINMAPSHIFT) | |
2089 | #define BINMAPSIZE (NBINS / BITSPERMAP) | |
f65fd747 | 2090 | |
fa8d436c UD |
2091 | #define idx2block(i) ((i) >> BINMAPSHIFT) |
2092 | #define idx2bit(i) ((1U << ((i) & ((1U << BINMAPSHIFT)-1)))) | |
f65fd747 | 2093 | |
fa8d436c UD |
2094 | #define mark_bin(m,i) ((m)->binmap[idx2block(i)] |= idx2bit(i)) |
2095 | #define unmark_bin(m,i) ((m)->binmap[idx2block(i)] &= ~(idx2bit(i))) | |
2096 | #define get_binmap(m,i) ((m)->binmap[idx2block(i)] & idx2bit(i)) | |
f65fd747 | 2097 | |
fa8d436c UD |
2098 | /* |
2099 | Fastbins | |
2100 | ||
2101 | An array of lists holding recently freed small chunks. Fastbins | |
2102 | are not doubly linked. It is faster to single-link them, and | |
2103 | since chunks are never removed from the middles of these lists, | |
2104 | double linking is not necessary. Also, unlike regular bins, they | |
2105 | are not even processed in FIFO order (they use faster LIFO) since | |
2106 | ordering doesn't much matter in the transient contexts in which | |
2107 | fastbins are normally used. | |
2108 | ||
2109 | Chunks in fastbins keep their inuse bit set, so they cannot | |
2110 | be consolidated with other free chunks. malloc_consolidate | |
2111 | releases all chunks in fastbins and consolidates them with | |
a9177ff5 | 2112 | other free chunks. |
fa8d436c | 2113 | */ |
f65fd747 | 2114 | |
fa8d436c | 2115 | typedef struct malloc_chunk* mfastbinptr; |
f65fd747 | 2116 | |
fa8d436c UD |
2117 | /* offset 2 to use otherwise unindexable first 2 bins */ |
2118 | #define fastbin_index(sz) ((((unsigned int)(sz)) >> 3) - 2) | |
f65fd747 | 2119 | |
fa8d436c UD |
2120 | /* The maximum fastbin request size we support */ |
2121 | #define MAX_FAST_SIZE 80 | |
f65fd747 | 2122 | |
fa8d436c | 2123 | #define NFASTBINS (fastbin_index(request2size(MAX_FAST_SIZE))+1) |
f65fd747 UD |
2124 | |
2125 | /* | |
fa8d436c UD |
2126 | FASTBIN_CONSOLIDATION_THRESHOLD is the size of a chunk in free() |
2127 | that triggers automatic consolidation of possibly-surrounding | |
2128 | fastbin chunks. This is a heuristic, so the exact value should not | |
2129 | matter too much. It is defined at half the default trim threshold as a | |
2130 | compromise heuristic to only attempt consolidation if it is likely | |
2131 | to lead to trimming. However, it is not dynamically tunable, since | |
a9177ff5 | 2132 | consolidation reduces fragmentation surrounding large chunks even |
fa8d436c | 2133 | if trimming is not used. |
f65fd747 UD |
2134 | */ |
2135 | ||
fa8d436c | 2136 | #define FASTBIN_CONSOLIDATION_THRESHOLD (65536UL) |
f65fd747 UD |
2137 | |
2138 | /* | |
a9177ff5 | 2139 | Since the lowest 2 bits in max_fast don't matter in size comparisons, |
fa8d436c | 2140 | they are used as flags. |
f65fd747 UD |
2141 | */ |
2142 | ||
fa8d436c UD |
2143 | /* |
2144 | FASTCHUNKS_BIT held in max_fast indicates that there are probably | |
2145 | some fastbin chunks. It is set true on entering a chunk into any | |
2146 | fastbin, and cleared only in malloc_consolidate. | |
f65fd747 | 2147 | |
fa8d436c UD |
2148 | The truth value is inverted so that have_fastchunks will be true |
2149 | upon startup (since statics are zero-filled), simplifying | |
2150 | initialization checks. | |
2151 | */ | |
f65fd747 | 2152 | |
fa8d436c | 2153 | #define FASTCHUNKS_BIT (1U) |
f65fd747 | 2154 | |
fa8d436c UD |
2155 | #define have_fastchunks(M) (((M)->max_fast & FASTCHUNKS_BIT) == 0) |
2156 | #define clear_fastchunks(M) ((M)->max_fast |= FASTCHUNKS_BIT) | |
2157 | #define set_fastchunks(M) ((M)->max_fast &= ~FASTCHUNKS_BIT) | |
f65fd747 UD |
2158 | |
2159 | /* | |
fa8d436c UD |
2160 | NONCONTIGUOUS_BIT indicates that MORECORE does not return contiguous |
2161 | regions. Otherwise, contiguity is exploited in merging together, | |
2162 | when possible, results from consecutive MORECORE calls. | |
f65fd747 | 2163 | |
fa8d436c UD |
2164 | The initial value comes from MORECORE_CONTIGUOUS, but is |
2165 | changed dynamically if mmap is ever used as an sbrk substitute. | |
f65fd747 UD |
2166 | */ |
2167 | ||
fa8d436c | 2168 | #define NONCONTIGUOUS_BIT (2U) |
f65fd747 | 2169 | |
fa8d436c UD |
2170 | #define contiguous(M) (((M)->max_fast & NONCONTIGUOUS_BIT) == 0) |
2171 | #define noncontiguous(M) (((M)->max_fast & NONCONTIGUOUS_BIT) != 0) | |
2172 | #define set_noncontiguous(M) ((M)->max_fast |= NONCONTIGUOUS_BIT) | |
2173 | #define set_contiguous(M) ((M)->max_fast &= ~NONCONTIGUOUS_BIT) | |
f65fd747 | 2174 | |
a9177ff5 RM |
2175 | /* |
2176 | Set value of max_fast. | |
fa8d436c UD |
2177 | Use impossibly small value if 0. |
2178 | Precondition: there are no existing fastbin chunks. | |
2179 | Setting the value clears fastchunk bit but preserves noncontiguous bit. | |
f65fd747 UD |
2180 | */ |
2181 | ||
fa8d436c UD |
2182 | #define set_max_fast(M, s) \ |
2183 | (M)->max_fast = (((s) == 0)? SMALLBIN_WIDTH: request2size(s)) | \ | |
2184 | FASTCHUNKS_BIT | \ | |
2185 | ((M)->max_fast & NONCONTIGUOUS_BIT) | |
f65fd747 | 2186 | |
f65fd747 UD |
2187 | |
2188 | /* | |
fa8d436c | 2189 | ----------- Internal state representation and initialization ----------- |
f65fd747 UD |
2190 | */ |
2191 | ||
fa8d436c UD |
2192 | struct malloc_state { |
2193 | /* Serialize access. */ | |
2194 | mutex_t mutex; | |
4f27c496 | 2195 | // Should we have padding to move the mutex to its own cache line? |
f65fd747 | 2196 | |
4f27c496 | 2197 | #if THREAD_STATS |
fa8d436c UD |
2198 | /* Statistics for locking. Only used if THREAD_STATS is defined. */ |
2199 | long stat_lock_direct, stat_lock_loop, stat_lock_wait; | |
4f27c496 | 2200 | #endif |
f65fd747 | 2201 | |
fa8d436c UD |
2202 | /* The maximum chunk size to be eligible for fastbin */ |
2203 | INTERNAL_SIZE_T max_fast; /* low 2 bits used as flags */ | |
f65fd747 | 2204 | |
fa8d436c UD |
2205 | /* Fastbins */ |
2206 | mfastbinptr fastbins[NFASTBINS]; | |
f65fd747 | 2207 | |
fa8d436c UD |
2208 | /* Base of the topmost chunk -- not otherwise kept in a bin */ |
2209 | mchunkptr top; | |
f65fd747 | 2210 | |
fa8d436c UD |
2211 | /* The remainder from the most recent split of a small request */ |
2212 | mchunkptr last_remainder; | |
f65fd747 | 2213 | |
fa8d436c UD |
2214 | /* Normal bins packed as described above */ |
2215 | mchunkptr bins[NBINS * 2]; | |
f65fd747 | 2216 | |
fa8d436c UD |
2217 | /* Bitmap of bins */ |
2218 | unsigned int binmap[BINMAPSIZE]; | |
f65fd747 | 2219 | |
fa8d436c UD |
2220 | /* Linked list */ |
2221 | struct malloc_state *next; | |
f65fd747 | 2222 | |
fa8d436c UD |
2223 | /* Memory allocated from the system in this arena. */ |
2224 | INTERNAL_SIZE_T system_mem; | |
2225 | INTERNAL_SIZE_T max_system_mem; | |
2226 | }; | |
f65fd747 | 2227 | |
fa8d436c UD |
2228 | struct malloc_par { |
2229 | /* Tunable parameters */ | |
2230 | unsigned long trim_threshold; | |
2231 | INTERNAL_SIZE_T top_pad; | |
2232 | INTERNAL_SIZE_T mmap_threshold; | |
2233 | ||
2234 | /* Memory map support */ | |
2235 | int n_mmaps; | |
2236 | int n_mmaps_max; | |
2237 | int max_n_mmaps; | |
2238 | ||
2239 | /* Cache malloc_getpagesize */ | |
a9177ff5 | 2240 | unsigned int pagesize; |
fa8d436c UD |
2241 | |
2242 | /* Statistics */ | |
2243 | INTERNAL_SIZE_T mmapped_mem; | |
2244 | /*INTERNAL_SIZE_T sbrked_mem;*/ | |
2245 | /*INTERNAL_SIZE_T max_sbrked_mem;*/ | |
2246 | INTERNAL_SIZE_T max_mmapped_mem; | |
2247 | INTERNAL_SIZE_T max_total_mem; /* only kept for NO_THREADS */ | |
2248 | ||
2249 | /* First address handed out by MORECORE/sbrk. */ | |
2250 | char* sbrk_base; | |
2251 | }; | |
f65fd747 | 2252 | |
fa8d436c UD |
2253 | /* There are several instances of this struct ("arenas") in this |
2254 | malloc. If you are adapting this malloc in a way that does NOT use | |
2255 | a static or mmapped malloc_state, you MUST explicitly zero-fill it | |
2256 | before using. This malloc relies on the property that malloc_state | |
2257 | is initialized to all zeroes (as is true of C statics). */ | |
f65fd747 | 2258 | |
fa8d436c | 2259 | static struct malloc_state main_arena; |
f65fd747 | 2260 | |
fa8d436c | 2261 | /* There is only one instance of the malloc parameters. */ |
f65fd747 | 2262 | |
fa8d436c | 2263 | static struct malloc_par mp_; |
f65fd747 | 2264 | |
fa8d436c UD |
2265 | /* |
2266 | Initialize a malloc_state struct. | |
f65fd747 | 2267 | |
fa8d436c UD |
2268 | This is called only from within malloc_consolidate, which needs |
2269 | be called in the same contexts anyway. It is never called directly | |
2270 | outside of malloc_consolidate because some optimizing compilers try | |
2271 | to inline it at all call points, which turns out not to be an | |
2272 | optimization at all. (Inlining it in malloc_consolidate is fine though.) | |
2273 | */ | |
f65fd747 | 2274 | |
fa8d436c UD |
2275 | #if __STD_C |
2276 | static void malloc_init_state(mstate av) | |
2277 | #else | |
2278 | static void malloc_init_state(av) mstate av; | |
2279 | #endif | |
2280 | { | |
2281 | int i; | |
2282 | mbinptr bin; | |
a9177ff5 | 2283 | |
fa8d436c | 2284 | /* Establish circular links for normal bins */ |
a9177ff5 | 2285 | for (i = 1; i < NBINS; ++i) { |
fa8d436c UD |
2286 | bin = bin_at(av,i); |
2287 | bin->fd = bin->bk = bin; | |
2288 | } | |
f65fd747 | 2289 | |
fa8d436c UD |
2290 | #if MORECORE_CONTIGUOUS |
2291 | if (av != &main_arena) | |
2292 | #endif | |
2293 | set_noncontiguous(av); | |
f65fd747 | 2294 | |
fa8d436c | 2295 | set_max_fast(av, DEFAULT_MXFAST); |
f65fd747 | 2296 | |
fa8d436c UD |
2297 | av->top = initial_top(av); |
2298 | } | |
e9b3e3c5 | 2299 | |
a9177ff5 | 2300 | /* |
fa8d436c UD |
2301 | Other internal utilities operating on mstates |
2302 | */ | |
f65fd747 | 2303 | |
fa8d436c UD |
2304 | #if __STD_C |
2305 | static Void_t* sYSMALLOc(INTERNAL_SIZE_T, mstate); | |
2306 | static int sYSTRIm(size_t, mstate); | |
2307 | static void malloc_consolidate(mstate); | |
2308 | static Void_t** iALLOc(mstate, size_t, size_t*, int, Void_t**); | |
831372e7 | 2309 | #else |
fa8d436c UD |
2310 | static Void_t* sYSMALLOc(); |
2311 | static int sYSTRIm(); | |
2312 | static void malloc_consolidate(); | |
2313 | static Void_t** iALLOc(); | |
831372e7 | 2314 | #endif |
7e3be507 | 2315 | |
404d4cef RM |
2316 | |
2317 | /* -------------- Early definitions for debugging hooks ---------------- */ | |
2318 | ||
2319 | /* Define and initialize the hook variables. These weak definitions must | |
2320 | appear before any use of the variables in a function (arena.c uses one). */ | |
2321 | #ifndef weak_variable | |
2322 | #ifndef _LIBC | |
2323 | #define weak_variable /**/ | |
2324 | #else | |
2325 | /* In GNU libc we want the hook variables to be weak definitions to | |
2326 | avoid a problem with Emacs. */ | |
2327 | #define weak_variable weak_function | |
2328 | #endif | |
2329 | #endif | |
2330 | ||
2331 | /* Forward declarations. */ | |
2332 | static Void_t* malloc_hook_ini __MALLOC_P ((size_t sz, | |
2333 | const __malloc_ptr_t caller)); | |
2334 | static Void_t* realloc_hook_ini __MALLOC_P ((Void_t* ptr, size_t sz, | |
2335 | const __malloc_ptr_t caller)); | |
2336 | static Void_t* memalign_hook_ini __MALLOC_P ((size_t alignment, size_t sz, | |
2337 | const __malloc_ptr_t caller)); | |
2338 | ||
06d6611a UD |
2339 | void weak_variable (*__malloc_initialize_hook) (void) = NULL; |
2340 | void weak_variable (*__free_hook) (__malloc_ptr_t __ptr, | |
2341 | const __malloc_ptr_t) = NULL; | |
404d4cef | 2342 | __malloc_ptr_t weak_variable (*__malloc_hook) |
06d6611a | 2343 | (size_t __size, const __malloc_ptr_t) = malloc_hook_ini; |
404d4cef | 2344 | __malloc_ptr_t weak_variable (*__realloc_hook) |
06d6611a | 2345 | (__malloc_ptr_t __ptr, size_t __size, const __malloc_ptr_t) |
404d4cef RM |
2346 | = realloc_hook_ini; |
2347 | __malloc_ptr_t weak_variable (*__memalign_hook) | |
06d6611a | 2348 | (size_t __alignment, size_t __size, const __malloc_ptr_t) |
404d4cef | 2349 | = memalign_hook_ini; |
06d6611a | 2350 | void weak_variable (*__after_morecore_hook) (void) = NULL; |
404d4cef RM |
2351 | |
2352 | ||
3e030bd5 UD |
2353 | /* ---------------- Error behavior ------------------------------------ */ |
2354 | ||
2355 | #ifndef DEFAULT_CHECK_ACTION | |
2356 | #define DEFAULT_CHECK_ACTION 3 | |
2357 | #endif | |
2358 | ||
2359 | static int check_action = DEFAULT_CHECK_ACTION; | |
2360 | ||
2361 | ||
fa8d436c UD |
2362 | /* ------------------- Support for multiple arenas -------------------- */ |
2363 | #include "arena.c" | |
f65fd747 | 2364 | |
fa8d436c UD |
2365 | /* |
2366 | Debugging support | |
f65fd747 | 2367 | |
fa8d436c UD |
2368 | These routines make a number of assertions about the states |
2369 | of data structures that should be true at all times. If any | |
2370 | are not true, it's very likely that a user program has somehow | |
2371 | trashed memory. (It's also possible that there is a coding error | |
2372 | in malloc. In which case, please report it!) | |
2373 | */ | |
ee74a442 | 2374 | |
fa8d436c | 2375 | #if ! MALLOC_DEBUG |
d8f00d46 | 2376 | |
fa8d436c UD |
2377 | #define check_chunk(A,P) |
2378 | #define check_free_chunk(A,P) | |
2379 | #define check_inuse_chunk(A,P) | |
2380 | #define check_remalloced_chunk(A,P,N) | |
2381 | #define check_malloced_chunk(A,P,N) | |
2382 | #define check_malloc_state(A) | |
d8f00d46 | 2383 | |
fa8d436c | 2384 | #else |
ca34d7a7 | 2385 | |
fa8d436c UD |
2386 | #define check_chunk(A,P) do_check_chunk(A,P) |
2387 | #define check_free_chunk(A,P) do_check_free_chunk(A,P) | |
2388 | #define check_inuse_chunk(A,P) do_check_inuse_chunk(A,P) | |
2389 | #define check_remalloced_chunk(A,P,N) do_check_remalloced_chunk(A,P,N) | |
2390 | #define check_malloced_chunk(A,P,N) do_check_malloced_chunk(A,P,N) | |
2391 | #define check_malloc_state(A) do_check_malloc_state(A) | |
ca34d7a7 | 2392 | |
fa8d436c UD |
2393 | /* |
2394 | Properties of all chunks | |
2395 | */ | |
ca34d7a7 | 2396 | |
fa8d436c UD |
2397 | #if __STD_C |
2398 | static void do_check_chunk(mstate av, mchunkptr p) | |
2399 | #else | |
2400 | static void do_check_chunk(av, p) mstate av; mchunkptr p; | |
ca34d7a7 | 2401 | #endif |
ca34d7a7 | 2402 | { |
fa8d436c UD |
2403 | unsigned long sz = chunksize(p); |
2404 | /* min and max possible addresses assuming contiguous allocation */ | |
2405 | char* max_address = (char*)(av->top) + chunksize(av->top); | |
2406 | char* min_address = max_address - av->system_mem; | |
2407 | ||
2408 | if (!chunk_is_mmapped(p)) { | |
a9177ff5 | 2409 | |
fa8d436c UD |
2410 | /* Has legal address ... */ |
2411 | if (p != av->top) { | |
2412 | if (contiguous(av)) { | |
2413 | assert(((char*)p) >= min_address); | |
2414 | assert(((char*)p + sz) <= ((char*)(av->top))); | |
2415 | } | |
2416 | } | |
2417 | else { | |
2418 | /* top size is always at least MINSIZE */ | |
2419 | assert((unsigned long)(sz) >= MINSIZE); | |
2420 | /* top predecessor always marked inuse */ | |
2421 | assert(prev_inuse(p)); | |
2422 | } | |
a9177ff5 | 2423 | |
ca34d7a7 | 2424 | } |
fa8d436c UD |
2425 | else { |
2426 | #if HAVE_MMAP | |
2427 | /* address is outside main heap */ | |
2428 | if (contiguous(av) && av->top != initial_top(av)) { | |
2429 | assert(((char*)p) < min_address || ((char*)p) > max_address); | |
2430 | } | |
2431 | /* chunk is page-aligned */ | |
2432 | assert(((p->prev_size + sz) & (mp_.pagesize-1)) == 0); | |
2433 | /* mem is aligned */ | |
2434 | assert(aligned_OK(chunk2mem(p))); | |
2435 | #else | |
2436 | /* force an appropriate assert violation if debug set */ | |
2437 | assert(!chunk_is_mmapped(p)); | |
eb406346 | 2438 | #endif |
eb406346 | 2439 | } |
eb406346 UD |
2440 | } |
2441 | ||
fa8d436c UD |
2442 | /* |
2443 | Properties of free chunks | |
2444 | */ | |
ee74a442 | 2445 | |
fa8d436c UD |
2446 | #if __STD_C |
2447 | static void do_check_free_chunk(mstate av, mchunkptr p) | |
2448 | #else | |
2449 | static void do_check_free_chunk(av, p) mstate av; mchunkptr p; | |
10dc2a90 | 2450 | #endif |
67c94753 | 2451 | { |
fa8d436c UD |
2452 | INTERNAL_SIZE_T sz = p->size & ~(PREV_INUSE|NON_MAIN_ARENA); |
2453 | mchunkptr next = chunk_at_offset(p, sz); | |
67c94753 | 2454 | |
fa8d436c | 2455 | do_check_chunk(av, p); |
67c94753 | 2456 | |
fa8d436c UD |
2457 | /* Chunk must claim to be free ... */ |
2458 | assert(!inuse(p)); | |
2459 | assert (!chunk_is_mmapped(p)); | |
67c94753 | 2460 | |
fa8d436c UD |
2461 | /* Unless a special marker, must have OK fields */ |
2462 | if ((unsigned long)(sz) >= MINSIZE) | |
2463 | { | |
2464 | assert((sz & MALLOC_ALIGN_MASK) == 0); | |
2465 | assert(aligned_OK(chunk2mem(p))); | |
2466 | /* ... matching footer field */ | |
2467 | assert(next->prev_size == sz); | |
2468 | /* ... and is fully consolidated */ | |
2469 | assert(prev_inuse(p)); | |
2470 | assert (next == av->top || inuse(next)); | |
2471 | ||
2472 | /* ... and has minimally sane links */ | |
2473 | assert(p->fd->bk == p); | |
2474 | assert(p->bk->fd == p); | |
2475 | } | |
2476 | else /* markers are always of size SIZE_SZ */ | |
2477 | assert(sz == SIZE_SZ); | |
67c94753 | 2478 | } |
67c94753 | 2479 | |
fa8d436c UD |
2480 | /* |
2481 | Properties of inuse chunks | |
2482 | */ | |
2483 | ||
2484 | #if __STD_C | |
2485 | static void do_check_inuse_chunk(mstate av, mchunkptr p) | |
f65fd747 | 2486 | #else |
fa8d436c | 2487 | static void do_check_inuse_chunk(av, p) mstate av; mchunkptr p; |
f65fd747 UD |
2488 | #endif |
2489 | { | |
fa8d436c | 2490 | mchunkptr next; |
f65fd747 | 2491 | |
fa8d436c | 2492 | do_check_chunk(av, p); |
f65fd747 | 2493 | |
fa8d436c UD |
2494 | if (chunk_is_mmapped(p)) |
2495 | return; /* mmapped chunks have no next/prev */ | |
ca34d7a7 | 2496 | |
fa8d436c UD |
2497 | /* Check whether it claims to be in use ... */ |
2498 | assert(inuse(p)); | |
10dc2a90 | 2499 | |
fa8d436c | 2500 | next = next_chunk(p); |
10dc2a90 | 2501 | |
fa8d436c UD |
2502 | /* ... and is surrounded by OK chunks. |
2503 | Since more things can be checked with free chunks than inuse ones, | |
2504 | if an inuse chunk borders them and debug is on, it's worth doing them. | |
2505 | */ | |
2506 | if (!prev_inuse(p)) { | |
2507 | /* Note that we cannot even look at prev unless it is not inuse */ | |
2508 | mchunkptr prv = prev_chunk(p); | |
2509 | assert(next_chunk(prv) == p); | |
2510 | do_check_free_chunk(av, prv); | |
2511 | } | |
2512 | ||
2513 | if (next == av->top) { | |
2514 | assert(prev_inuse(next)); | |
2515 | assert(chunksize(next) >= MINSIZE); | |
2516 | } | |
2517 | else if (!inuse(next)) | |
2518 | do_check_free_chunk(av, next); | |
10dc2a90 UD |
2519 | } |
2520 | ||
fa8d436c UD |
2521 | /* |
2522 | Properties of chunks recycled from fastbins | |
2523 | */ | |
2524 | ||
10dc2a90 | 2525 | #if __STD_C |
fa8d436c | 2526 | static void do_check_remalloced_chunk(mstate av, mchunkptr p, INTERNAL_SIZE_T s) |
10dc2a90 | 2527 | #else |
fa8d436c UD |
2528 | static void do_check_remalloced_chunk(av, p, s) |
2529 | mstate av; mchunkptr p; INTERNAL_SIZE_T s; | |
a2b08ee5 | 2530 | #endif |
10dc2a90 | 2531 | { |
fa8d436c UD |
2532 | INTERNAL_SIZE_T sz = p->size & ~(PREV_INUSE|NON_MAIN_ARENA); |
2533 | ||
2534 | if (!chunk_is_mmapped(p)) { | |
2535 | assert(av == arena_for_chunk(p)); | |
2536 | if (chunk_non_main_arena(p)) | |
2537 | assert(av != &main_arena); | |
2538 | else | |
2539 | assert(av == &main_arena); | |
2540 | } | |
2541 | ||
2542 | do_check_inuse_chunk(av, p); | |
2543 | ||
2544 | /* Legal size ... */ | |
2545 | assert((sz & MALLOC_ALIGN_MASK) == 0); | |
2546 | assert((unsigned long)(sz) >= MINSIZE); | |
2547 | /* ... and alignment */ | |
2548 | assert(aligned_OK(chunk2mem(p))); | |
2549 | /* chunk is less than MINSIZE more than request */ | |
2550 | assert((long)(sz) - (long)(s) >= 0); | |
2551 | assert((long)(sz) - (long)(s + MINSIZE) < 0); | |
10dc2a90 UD |
2552 | } |
2553 | ||
fa8d436c UD |
2554 | /* |
2555 | Properties of nonrecycled chunks at the point they are malloced | |
2556 | */ | |
2557 | ||
10dc2a90 | 2558 | #if __STD_C |
fa8d436c | 2559 | static void do_check_malloced_chunk(mstate av, mchunkptr p, INTERNAL_SIZE_T s) |
10dc2a90 | 2560 | #else |
fa8d436c UD |
2561 | static void do_check_malloced_chunk(av, p, s) |
2562 | mstate av; mchunkptr p; INTERNAL_SIZE_T s; | |
a2b08ee5 | 2563 | #endif |
10dc2a90 | 2564 | { |
fa8d436c UD |
2565 | /* same as recycled case ... */ |
2566 | do_check_remalloced_chunk(av, p, s); | |
10dc2a90 | 2567 | |
fa8d436c UD |
2568 | /* |
2569 | ... plus, must obey implementation invariant that prev_inuse is | |
2570 | always true of any allocated chunk; i.e., that each allocated | |
2571 | chunk borders either a previously allocated and still in-use | |
2572 | chunk, or the base of its memory arena. This is ensured | |
2573 | by making all allocations from the the `lowest' part of any found | |
2574 | chunk. This does not necessarily hold however for chunks | |
2575 | recycled via fastbins. | |
2576 | */ | |
10dc2a90 | 2577 | |
fa8d436c UD |
2578 | assert(prev_inuse(p)); |
2579 | } | |
10dc2a90 | 2580 | |
f65fd747 | 2581 | |
fa8d436c UD |
2582 | /* |
2583 | Properties of malloc_state. | |
f65fd747 | 2584 | |
fa8d436c UD |
2585 | This may be useful for debugging malloc, as well as detecting user |
2586 | programmer errors that somehow write into malloc_state. | |
f65fd747 | 2587 | |
fa8d436c UD |
2588 | If you are extending or experimenting with this malloc, you can |
2589 | probably figure out how to hack this routine to print out or | |
2590 | display chunk addresses, sizes, bins, and other instrumentation. | |
2591 | */ | |
f65fd747 | 2592 | |
fa8d436c UD |
2593 | static void do_check_malloc_state(mstate av) |
2594 | { | |
2595 | int i; | |
2596 | mchunkptr p; | |
2597 | mchunkptr q; | |
2598 | mbinptr b; | |
2599 | unsigned int binbit; | |
2600 | int empty; | |
2601 | unsigned int idx; | |
2602 | INTERNAL_SIZE_T size; | |
2603 | unsigned long total = 0; | |
2604 | int max_fast_bin; | |
f65fd747 | 2605 | |
fa8d436c UD |
2606 | /* internal size_t must be no wider than pointer type */ |
2607 | assert(sizeof(INTERNAL_SIZE_T) <= sizeof(char*)); | |
f65fd747 | 2608 | |
fa8d436c UD |
2609 | /* alignment is a power of 2 */ |
2610 | assert((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-1)) == 0); | |
f65fd747 | 2611 | |
fa8d436c UD |
2612 | /* cannot run remaining checks until fully initialized */ |
2613 | if (av->top == 0 || av->top == initial_top(av)) | |
2614 | return; | |
f65fd747 | 2615 | |
fa8d436c UD |
2616 | /* pagesize is a power of 2 */ |
2617 | assert((mp_.pagesize & (mp_.pagesize-1)) == 0); | |
f65fd747 | 2618 | |
fa8d436c UD |
2619 | /* A contiguous main_arena is consistent with sbrk_base. */ |
2620 | if (av == &main_arena && contiguous(av)) | |
2621 | assert((char*)mp_.sbrk_base + av->system_mem == | |
2622 | (char*)av->top + chunksize(av->top)); | |
2623 | ||
2624 | /* properties of fastbins */ | |
2625 | ||
2626 | /* max_fast is in allowed range */ | |
2627 | assert((av->max_fast & ~1) <= request2size(MAX_FAST_SIZE)); | |
2628 | ||
2629 | max_fast_bin = fastbin_index(av->max_fast); | |
2630 | ||
2631 | for (i = 0; i < NFASTBINS; ++i) { | |
2632 | p = av->fastbins[i]; | |
2633 | ||
2634 | /* all bins past max_fast are empty */ | |
2635 | if (i > max_fast_bin) | |
2636 | assert(p == 0); | |
2637 | ||
2638 | while (p != 0) { | |
2639 | /* each chunk claims to be inuse */ | |
2640 | do_check_inuse_chunk(av, p); | |
2641 | total += chunksize(p); | |
2642 | /* chunk belongs in this bin */ | |
2643 | assert(fastbin_index(chunksize(p)) == i); | |
2644 | p = p->fd; | |
2645 | } | |
2646 | } | |
2647 | ||
2648 | if (total != 0) | |
2649 | assert(have_fastchunks(av)); | |
2650 | else if (!have_fastchunks(av)) | |
2651 | assert(total == 0); | |
2652 | ||
2653 | /* check normal bins */ | |
2654 | for (i = 1; i < NBINS; ++i) { | |
2655 | b = bin_at(av,i); | |
2656 | ||
2657 | /* binmap is accurate (except for bin 1 == unsorted_chunks) */ | |
2658 | if (i >= 2) { | |
2659 | binbit = get_binmap(av,i); | |
2660 | empty = last(b) == b; | |
2661 | if (!binbit) | |
2662 | assert(empty); | |
2663 | else if (!empty) | |
2664 | assert(binbit); | |
2665 | } | |
2666 | ||
2667 | for (p = last(b); p != b; p = p->bk) { | |
2668 | /* each chunk claims to be free */ | |
2669 | do_check_free_chunk(av, p); | |
2670 | size = chunksize(p); | |
2671 | total += size; | |
2672 | if (i >= 2) { | |
2673 | /* chunk belongs in bin */ | |
2674 | idx = bin_index(size); | |
2675 | assert(idx == i); | |
2676 | /* lists are sorted */ | |
a9177ff5 | 2677 | assert(p->bk == b || |
fa8d436c UD |
2678 | (unsigned long)chunksize(p->bk) >= (unsigned long)chunksize(p)); |
2679 | } | |
2680 | /* chunk is followed by a legal chain of inuse chunks */ | |
2681 | for (q = next_chunk(p); | |
a9177ff5 | 2682 | (q != av->top && inuse(q) && |
fa8d436c UD |
2683 | (unsigned long)(chunksize(q)) >= MINSIZE); |
2684 | q = next_chunk(q)) | |
2685 | do_check_inuse_chunk(av, q); | |
2686 | } | |
2687 | } | |
f65fd747 | 2688 | |
fa8d436c UD |
2689 | /* top chunk is OK */ |
2690 | check_chunk(av, av->top); | |
2691 | ||
2692 | /* sanity checks for statistics */ | |
2693 | ||
2694 | #ifdef NO_THREADS | |
2695 | assert(total <= (unsigned long)(mp_.max_total_mem)); | |
2696 | assert(mp_.n_mmaps >= 0); | |
f65fd747 | 2697 | #endif |
fa8d436c UD |
2698 | assert(mp_.n_mmaps <= mp_.n_mmaps_max); |
2699 | assert(mp_.n_mmaps <= mp_.max_n_mmaps); | |
2700 | ||
2701 | assert((unsigned long)(av->system_mem) <= | |
2702 | (unsigned long)(av->max_system_mem)); | |
f65fd747 | 2703 | |
fa8d436c UD |
2704 | assert((unsigned long)(mp_.mmapped_mem) <= |
2705 | (unsigned long)(mp_.max_mmapped_mem)); | |
2706 | ||
2707 | #ifdef NO_THREADS | |
2708 | assert((unsigned long)(mp_.max_total_mem) >= | |
2709 | (unsigned long)(mp_.mmapped_mem) + (unsigned long)(av->system_mem)); | |
dfd2257a | 2710 | #endif |
fa8d436c UD |
2711 | } |
2712 | #endif | |
2713 | ||
2714 | ||
2715 | /* ----------------- Support for debugging hooks -------------------- */ | |
2716 | #include "hooks.c" | |
2717 | ||
2718 | ||
2719 | /* ----------- Routines dealing with system allocation -------------- */ | |
2720 | ||
2721 | /* | |
2722 | sysmalloc handles malloc cases requiring more memory from the system. | |
2723 | On entry, it is assumed that av->top does not have enough | |
2724 | space to service request for nb bytes, thus requiring that av->top | |
2725 | be extended or replaced. | |
2726 | */ | |
2727 | ||
f65fd747 | 2728 | #if __STD_C |
fa8d436c | 2729 | static Void_t* sYSMALLOc(INTERNAL_SIZE_T nb, mstate av) |
f65fd747 | 2730 | #else |
fa8d436c | 2731 | static Void_t* sYSMALLOc(nb, av) INTERNAL_SIZE_T nb; mstate av; |
f65fd747 UD |
2732 | #endif |
2733 | { | |
fa8d436c UD |
2734 | mchunkptr old_top; /* incoming value of av->top */ |
2735 | INTERNAL_SIZE_T old_size; /* its size */ | |
2736 | char* old_end; /* its end address */ | |
f65fd747 | 2737 | |
fa8d436c UD |
2738 | long size; /* arg to first MORECORE or mmap call */ |
2739 | char* brk; /* return value from MORECORE */ | |
f65fd747 | 2740 | |
fa8d436c UD |
2741 | long correction; /* arg to 2nd MORECORE call */ |
2742 | char* snd_brk; /* 2nd return val */ | |
f65fd747 | 2743 | |
fa8d436c UD |
2744 | INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */ |
2745 | INTERNAL_SIZE_T end_misalign; /* partial page left at end of new space */ | |
2746 | char* aligned_brk; /* aligned offset into brk */ | |
f65fd747 | 2747 | |
fa8d436c UD |
2748 | mchunkptr p; /* the allocated/returned chunk */ |
2749 | mchunkptr remainder; /* remainder from allocation */ | |
2750 | unsigned long remainder_size; /* its size */ | |
2751 | ||
2752 | unsigned long sum; /* for updating stats */ | |
2753 | ||
2754 | size_t pagemask = mp_.pagesize - 1; | |
2755 | ||
2756 | ||
2757 | #if HAVE_MMAP | |
2758 | ||
2759 | /* | |
2760 | If have mmap, and the request size meets the mmap threshold, and | |
2761 | the system supports mmap, and there are few enough currently | |
2762 | allocated mmapped regions, try to directly map this request | |
2763 | rather than expanding top. | |
2764 | */ | |
f65fd747 | 2765 | |
fa8d436c UD |
2766 | if ((unsigned long)(nb) >= (unsigned long)(mp_.mmap_threshold) && |
2767 | (mp_.n_mmaps < mp_.n_mmaps_max)) { | |
f65fd747 | 2768 | |
fa8d436c UD |
2769 | char* mm; /* return value from mmap call*/ |
2770 | ||
2771 | /* | |
2772 | Round up size to nearest page. For mmapped chunks, the overhead | |
2773 | is one SIZE_SZ unit larger than for normal chunks, because there | |
2774 | is no following chunk whose prev_size field could be used. | |
2775 | */ | |
2776 | size = (nb + SIZE_SZ + MALLOC_ALIGN_MASK + pagemask) & ~pagemask; | |
2777 | ||
2778 | /* Don't try if size wraps around 0 */ | |
2779 | if ((unsigned long)(size) > (unsigned long)(nb)) { | |
2780 | ||
2781 | mm = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE)); | |
a9177ff5 | 2782 | |
fa8d436c | 2783 | if (mm != MAP_FAILED) { |
a9177ff5 | 2784 | |
fa8d436c UD |
2785 | /* |
2786 | The offset to the start of the mmapped region is stored | |
2787 | in the prev_size field of the chunk. This allows us to adjust | |
a9177ff5 | 2788 | returned start address to meet alignment requirements here |
fa8d436c UD |
2789 | and in memalign(), and still be able to compute proper |
2790 | address argument for later munmap in free() and realloc(). | |
2791 | */ | |
a9177ff5 | 2792 | |
fa8d436c UD |
2793 | front_misalign = (INTERNAL_SIZE_T)chunk2mem(mm) & MALLOC_ALIGN_MASK; |
2794 | if (front_misalign > 0) { | |
2795 | correction = MALLOC_ALIGNMENT - front_misalign; | |
2796 | p = (mchunkptr)(mm + correction); | |
2797 | p->prev_size = correction; | |
2798 | set_head(p, (size - correction) |IS_MMAPPED); | |
2799 | } | |
2800 | else { | |
2801 | p = (mchunkptr)mm; | |
2802 | set_head(p, size|IS_MMAPPED); | |
2803 | } | |
a9177ff5 | 2804 | |
fa8d436c | 2805 | /* update statistics */ |
a9177ff5 RM |
2806 | |
2807 | if (++mp_.n_mmaps > mp_.max_n_mmaps) | |
fa8d436c | 2808 | mp_.max_n_mmaps = mp_.n_mmaps; |
a9177ff5 | 2809 | |
fa8d436c | 2810 | sum = mp_.mmapped_mem += size; |
a9177ff5 | 2811 | if (sum > (unsigned long)(mp_.max_mmapped_mem)) |
fa8d436c | 2812 | mp_.max_mmapped_mem = sum; |
8a4b65b4 | 2813 | #ifdef NO_THREADS |
fa8d436c | 2814 | sum += av->system_mem; |
a9177ff5 | 2815 | if (sum > (unsigned long)(mp_.max_total_mem)) |
fa8d436c | 2816 | mp_.max_total_mem = sum; |
8a4b65b4 | 2817 | #endif |
fa8d436c UD |
2818 | |
2819 | check_chunk(av, p); | |
a9177ff5 | 2820 | |
fa8d436c UD |
2821 | return chunk2mem(p); |
2822 | } | |
2823 | } | |
2824 | } | |
2825 | #endif | |
2826 | ||
2827 | /* Record incoming configuration of top */ | |
2828 | ||
2829 | old_top = av->top; | |
2830 | old_size = chunksize(old_top); | |
2831 | old_end = (char*)(chunk_at_offset(old_top, old_size)); | |
2832 | ||
a9177ff5 | 2833 | brk = snd_brk = (char*)(MORECORE_FAILURE); |
fa8d436c | 2834 | |
a9177ff5 | 2835 | /* |
fa8d436c UD |
2836 | If not the first time through, we require old_size to be |
2837 | at least MINSIZE and to have prev_inuse set. | |
2838 | */ | |
2839 | ||
a9177ff5 | 2840 | assert((old_top == initial_top(av) && old_size == 0) || |
fa8d436c UD |
2841 | ((unsigned long) (old_size) >= MINSIZE && |
2842 | prev_inuse(old_top) && | |
2843 | ((unsigned long)old_end & pagemask) == 0)); | |
2844 | ||
2845 | /* Precondition: not enough current space to satisfy nb request */ | |
2846 | assert((unsigned long)(old_size) < (unsigned long)(nb + MINSIZE)); | |
2847 | ||
2848 | /* Precondition: all fastbins are consolidated */ | |
2849 | assert(!have_fastchunks(av)); | |
2850 | ||
2851 | ||
2852 | if (av != &main_arena) { | |
2853 | ||
2854 | heap_info *old_heap, *heap; | |
2855 | size_t old_heap_size; | |
2856 | ||
2857 | /* First try to extend the current heap. */ | |
2858 | old_heap = heap_for_ptr(old_top); | |
2859 | old_heap_size = old_heap->size; | |
2860 | if (grow_heap(old_heap, MINSIZE + nb - old_size) == 0) { | |
2861 | av->system_mem += old_heap->size - old_heap_size; | |
2862 | arena_mem += old_heap->size - old_heap_size; | |
2863 | #if 0 | |
2864 | if(mmapped_mem + arena_mem + sbrked_mem > max_total_mem) | |
2865 | max_total_mem = mmapped_mem + arena_mem + sbrked_mem; | |
2866 | #endif | |
2867 | set_head(old_top, (((char *)old_heap + old_heap->size) - (char *)old_top) | |
2868 | | PREV_INUSE); | |
e6ac0e78 UD |
2869 | } |
2870 | else if ((heap = new_heap(nb + (MINSIZE + sizeof(*heap)), mp_.top_pad))) { | |
2871 | /* Use a newly allocated heap. */ | |
2872 | heap->ar_ptr = av; | |
2873 | heap->prev = old_heap; | |
2874 | av->system_mem += heap->size; | |
2875 | arena_mem += heap->size; | |
fa8d436c | 2876 | #if 0 |
e6ac0e78 UD |
2877 | if((unsigned long)(mmapped_mem + arena_mem + sbrked_mem) > max_total_mem) |
2878 | max_total_mem = mmapped_mem + arena_mem + sbrked_mem; | |
fa8d436c | 2879 | #endif |
fa8d436c UD |
2880 | /* Set up the new top. */ |
2881 | top(av) = chunk_at_offset(heap, sizeof(*heap)); | |
2882 | set_head(top(av), (heap->size - sizeof(*heap)) | PREV_INUSE); | |
2883 | ||
2884 | /* Setup fencepost and free the old top chunk. */ | |
2885 | /* The fencepost takes at least MINSIZE bytes, because it might | |
2886 | become the top chunk again later. Note that a footer is set | |
2887 | up, too, although the chunk is marked in use. */ | |
2888 | old_size -= MINSIZE; | |
2889 | set_head(chunk_at_offset(old_top, old_size + 2*SIZE_SZ), 0|PREV_INUSE); | |
2890 | if (old_size >= MINSIZE) { | |
2891 | set_head(chunk_at_offset(old_top, old_size), (2*SIZE_SZ)|PREV_INUSE); | |
2892 | set_foot(chunk_at_offset(old_top, old_size), (2*SIZE_SZ)); | |
2893 | set_head(old_top, old_size|PREV_INUSE|NON_MAIN_ARENA); | |
2894 | _int_free(av, chunk2mem(old_top)); | |
2895 | } else { | |
2896 | set_head(old_top, (old_size + 2*SIZE_SZ)|PREV_INUSE); | |
2897 | set_foot(old_top, (old_size + 2*SIZE_SZ)); | |
2898 | } | |
2899 | } | |
2900 | ||
2901 | } else { /* av == main_arena */ | |
2902 | ||
2903 | ||
2904 | /* Request enough space for nb + pad + overhead */ | |
2905 | ||
2906 | size = nb + mp_.top_pad + MINSIZE; | |
2907 | ||
2908 | /* | |
2909 | If contiguous, we can subtract out existing space that we hope to | |
2910 | combine with new space. We add it back later only if | |
2911 | we don't actually get contiguous space. | |
2912 | */ | |
2913 | ||
2914 | if (contiguous(av)) | |
2915 | size -= old_size; | |
2916 | ||
2917 | /* | |
2918 | Round to a multiple of page size. | |
2919 | If MORECORE is not contiguous, this ensures that we only call it | |
2920 | with whole-page arguments. And if MORECORE is contiguous and | |
2921 | this is not first time through, this preserves page-alignment of | |
2922 | previous calls. Otherwise, we correct to page-align below. | |
2923 | */ | |
2924 | ||
2925 | size = (size + pagemask) & ~pagemask; | |
2926 | ||
2927 | /* | |
2928 | Don't try to call MORECORE if argument is so big as to appear | |
2929 | negative. Note that since mmap takes size_t arg, it may succeed | |
2930 | below even if we cannot call MORECORE. | |
2931 | */ | |
2932 | ||
a9177ff5 | 2933 | if (size > 0) |
fa8d436c UD |
2934 | brk = (char*)(MORECORE(size)); |
2935 | ||
2936 | if (brk != (char*)(MORECORE_FAILURE)) { | |
2937 | /* Call the `morecore' hook if necessary. */ | |
2938 | if (__after_morecore_hook) | |
2939 | (*__after_morecore_hook) (); | |
2940 | } else { | |
2941 | /* | |
2942 | If have mmap, try using it as a backup when MORECORE fails or | |
2943 | cannot be used. This is worth doing on systems that have "holes" in | |
2944 | address space, so sbrk cannot extend to give contiguous space, but | |
2945 | space is available elsewhere. Note that we ignore mmap max count | |
2946 | and threshold limits, since the space will not be used as a | |
2947 | segregated mmap region. | |
2948 | */ | |
2949 | ||
2950 | #if HAVE_MMAP | |
2951 | /* Cannot merge with old top, so add its size back in */ | |
2952 | if (contiguous(av)) | |
2953 | size = (size + old_size + pagemask) & ~pagemask; | |
2954 | ||
2955 | /* If we are relying on mmap as backup, then use larger units */ | |
2956 | if ((unsigned long)(size) < (unsigned long)(MMAP_AS_MORECORE_SIZE)) | |
2957 | size = MMAP_AS_MORECORE_SIZE; | |
2958 | ||
2959 | /* Don't try if size wraps around 0 */ | |
2960 | if ((unsigned long)(size) > (unsigned long)(nb)) { | |
2961 | ||
75bfdfc7 | 2962 | char *mbrk = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE)); |
a9177ff5 | 2963 | |
75bfdfc7 | 2964 | if (mbrk != MAP_FAILED) { |
a9177ff5 | 2965 | |
fa8d436c | 2966 | /* We do not need, and cannot use, another sbrk call to find end */ |
75bfdfc7 | 2967 | brk = mbrk; |
fa8d436c | 2968 | snd_brk = brk + size; |
a9177ff5 RM |
2969 | |
2970 | /* | |
2971 | Record that we no longer have a contiguous sbrk region. | |
fa8d436c UD |
2972 | After the first time mmap is used as backup, we do not |
2973 | ever rely on contiguous space since this could incorrectly | |
2974 | bridge regions. | |
2975 | */ | |
2976 | set_noncontiguous(av); | |
2977 | } | |
2978 | } | |
2979 | #endif | |
2980 | } | |
2981 | ||
2982 | if (brk != (char*)(MORECORE_FAILURE)) { | |
2983 | if (mp_.sbrk_base == 0) | |
2984 | mp_.sbrk_base = brk; | |
2985 | av->system_mem += size; | |
2986 | ||
2987 | /* | |
2988 | If MORECORE extends previous space, we can likewise extend top size. | |
2989 | */ | |
a9177ff5 | 2990 | |
fa8d436c UD |
2991 | if (brk == old_end && snd_brk == (char*)(MORECORE_FAILURE)) |
2992 | set_head(old_top, (size + old_size) | PREV_INUSE); | |
2993 | ||
886d5973 | 2994 | else if (contiguous(av) && old_size && brk < old_end) { |
fa8d436c UD |
2995 | /* Oops! Someone else killed our space.. Can't touch anything. */ |
2996 | assert(0); | |
2997 | } | |
2998 | ||
2999 | /* | |
3000 | Otherwise, make adjustments: | |
a9177ff5 | 3001 | |
fa8d436c UD |
3002 | * If the first time through or noncontiguous, we need to call sbrk |
3003 | just to find out where the end of memory lies. | |
3004 | ||
3005 | * We need to ensure that all returned chunks from malloc will meet | |
3006 | MALLOC_ALIGNMENT | |
3007 | ||
3008 | * If there was an intervening foreign sbrk, we need to adjust sbrk | |
3009 | request size to account for fact that we will not be able to | |
3010 | combine new space with existing space in old_top. | |
3011 | ||
3012 | * Almost all systems internally allocate whole pages at a time, in | |
3013 | which case we might as well use the whole last page of request. | |
3014 | So we allocate enough more memory to hit a page boundary now, | |
3015 | which in turn causes future contiguous calls to page-align. | |
3016 | */ | |
a9177ff5 | 3017 | |
fa8d436c | 3018 | else { |
fa8d436c UD |
3019 | front_misalign = 0; |
3020 | end_misalign = 0; | |
3021 | correction = 0; | |
3022 | aligned_brk = brk; | |
a9177ff5 | 3023 | |
fa8d436c | 3024 | /* handle contiguous cases */ |
a9177ff5 RM |
3025 | if (contiguous(av)) { |
3026 | ||
0cb71e02 UD |
3027 | /* Count foreign sbrk as system_mem. */ |
3028 | if (old_size) | |
3029 | av->system_mem += brk - old_end; | |
3030 | ||
fa8d436c UD |
3031 | /* Guarantee alignment of first new chunk made from this space */ |
3032 | ||
3033 | front_misalign = (INTERNAL_SIZE_T)chunk2mem(brk) & MALLOC_ALIGN_MASK; | |
3034 | if (front_misalign > 0) { | |
3035 | ||
3036 | /* | |
3037 | Skip over some bytes to arrive at an aligned position. | |
3038 | We don't need to specially mark these wasted front bytes. | |
3039 | They will never be accessed anyway because | |
3040 | prev_inuse of av->top (and any chunk created from its start) | |
3041 | is always true after initialization. | |
3042 | */ | |
3043 | ||
3044 | correction = MALLOC_ALIGNMENT - front_misalign; | |
3045 | aligned_brk += correction; | |
3046 | } | |
a9177ff5 | 3047 | |
fa8d436c UD |
3048 | /* |
3049 | If this isn't adjacent to existing space, then we will not | |
3050 | be able to merge with old_top space, so must add to 2nd request. | |
3051 | */ | |
a9177ff5 | 3052 | |
fa8d436c | 3053 | correction += old_size; |
a9177ff5 | 3054 | |
fa8d436c UD |
3055 | /* Extend the end address to hit a page boundary */ |
3056 | end_misalign = (INTERNAL_SIZE_T)(brk + size + correction); | |
3057 | correction += ((end_misalign + pagemask) & ~pagemask) - end_misalign; | |
a9177ff5 | 3058 | |
fa8d436c UD |
3059 | assert(correction >= 0); |
3060 | snd_brk = (char*)(MORECORE(correction)); | |
a9177ff5 | 3061 | |
fa8d436c UD |
3062 | /* |
3063 | If can't allocate correction, try to at least find out current | |
3064 | brk. It might be enough to proceed without failing. | |
a9177ff5 | 3065 | |
fa8d436c UD |
3066 | Note that if second sbrk did NOT fail, we assume that space |
3067 | is contiguous with first sbrk. This is a safe assumption unless | |
3068 | program is multithreaded but doesn't use locks and a foreign sbrk | |
3069 | occurred between our first and second calls. | |
3070 | */ | |
a9177ff5 | 3071 | |
fa8d436c UD |
3072 | if (snd_brk == (char*)(MORECORE_FAILURE)) { |
3073 | correction = 0; | |
3074 | snd_brk = (char*)(MORECORE(0)); | |
3075 | } else | |
3076 | /* Call the `morecore' hook if necessary. */ | |
3077 | if (__after_morecore_hook) | |
3078 | (*__after_morecore_hook) (); | |
3079 | } | |
a9177ff5 | 3080 | |
fa8d436c | 3081 | /* handle non-contiguous cases */ |
a9177ff5 | 3082 | else { |
fa8d436c UD |
3083 | /* MORECORE/mmap must correctly align */ |
3084 | assert(((unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK) == 0); | |
a9177ff5 | 3085 | |
fa8d436c UD |
3086 | /* Find out current end of memory */ |
3087 | if (snd_brk == (char*)(MORECORE_FAILURE)) { | |
3088 | snd_brk = (char*)(MORECORE(0)); | |
3089 | } | |
3090 | } | |
a9177ff5 | 3091 | |
fa8d436c UD |
3092 | /* Adjust top based on results of second sbrk */ |
3093 | if (snd_brk != (char*)(MORECORE_FAILURE)) { | |
3094 | av->top = (mchunkptr)aligned_brk; | |
3095 | set_head(av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE); | |
3096 | av->system_mem += correction; | |
a9177ff5 | 3097 | |
fa8d436c UD |
3098 | /* |
3099 | If not the first time through, we either have a | |
3100 | gap due to foreign sbrk or a non-contiguous region. Insert a | |
3101 | double fencepost at old_top to prevent consolidation with space | |
3102 | we don't own. These fenceposts are artificial chunks that are | |
3103 | marked as inuse and are in any case too small to use. We need | |
3104 | two to make sizes and alignments work out. | |
3105 | */ | |
a9177ff5 | 3106 | |
fa8d436c | 3107 | if (old_size != 0) { |
a9177ff5 | 3108 | /* |
fa8d436c UD |
3109 | Shrink old_top to insert fenceposts, keeping size a |
3110 | multiple of MALLOC_ALIGNMENT. We know there is at least | |
3111 | enough space in old_top to do this. | |
3112 | */ | |
3113 | old_size = (old_size - 4*SIZE_SZ) & ~MALLOC_ALIGN_MASK; | |
3114 | set_head(old_top, old_size | PREV_INUSE); | |
a9177ff5 | 3115 | |
fa8d436c UD |
3116 | /* |
3117 | Note that the following assignments completely overwrite | |
3118 | old_top when old_size was previously MINSIZE. This is | |
3119 | intentional. We need the fencepost, even if old_top otherwise gets | |
3120 | lost. | |
3121 | */ | |
3122 | chunk_at_offset(old_top, old_size )->size = | |
3123 | (2*SIZE_SZ)|PREV_INUSE; | |
3124 | ||
3125 | chunk_at_offset(old_top, old_size + 2*SIZE_SZ)->size = | |
3126 | (2*SIZE_SZ)|PREV_INUSE; | |
3127 | ||
3128 | /* If possible, release the rest. */ | |
3129 | if (old_size >= MINSIZE) { | |
3130 | _int_free(av, chunk2mem(old_top)); | |
3131 | } | |
3132 | ||
3133 | } | |
3134 | } | |
3135 | } | |
a9177ff5 | 3136 | |
fa8d436c UD |
3137 | /* Update statistics */ |
3138 | #ifdef NO_THREADS | |
3139 | sum = av->system_mem + mp_.mmapped_mem; | |
3140 | if (sum > (unsigned long)(mp_.max_total_mem)) | |
3141 | mp_.max_total_mem = sum; | |
3142 | #endif | |
3143 | ||
3144 | } | |
3145 | ||
3146 | } /* if (av != &main_arena) */ | |
3147 | ||
3148 | if ((unsigned long)av->system_mem > (unsigned long)(av->max_system_mem)) | |
3149 | av->max_system_mem = av->system_mem; | |
3150 | check_malloc_state(av); | |
a9177ff5 | 3151 | |
fa8d436c UD |
3152 | /* finally, do the allocation */ |
3153 | p = av->top; | |
3154 | size = chunksize(p); | |
3155 | ||
3156 | /* check that one of the above allocation paths succeeded */ | |
3157 | if ((unsigned long)(size) >= (unsigned long)(nb + MINSIZE)) { | |
3158 | remainder_size = size - nb; | |
3159 | remainder = chunk_at_offset(p, nb); | |
3160 | av->top = remainder; | |
3161 | set_head(p, nb | PREV_INUSE | (av != &main_arena ? NON_MAIN_ARENA : 0)); | |
3162 | set_head(remainder, remainder_size | PREV_INUSE); | |
3163 | check_malloced_chunk(av, p, nb); | |
3164 | return chunk2mem(p); | |
3165 | } | |
3166 | ||
3167 | /* catch all failure paths */ | |
3168 | MALLOC_FAILURE_ACTION; | |
3169 | return 0; | |
3170 | } | |
3171 | ||
3172 | ||
3173 | /* | |
3174 | sYSTRIm is an inverse of sorts to sYSMALLOc. It gives memory back | |
3175 | to the system (via negative arguments to sbrk) if there is unused | |
3176 | memory at the `high' end of the malloc pool. It is called | |
3177 | automatically by free() when top space exceeds the trim | |
3178 | threshold. It is also called by the public malloc_trim routine. It | |
3179 | returns 1 if it actually released any memory, else 0. | |
3180 | */ | |
3181 | ||
3182 | #if __STD_C | |
3183 | static int sYSTRIm(size_t pad, mstate av) | |
3184 | #else | |
3185 | static int sYSTRIm(pad, av) size_t pad; mstate av; | |
3186 | #endif | |
3187 | { | |
3188 | long top_size; /* Amount of top-most memory */ | |
3189 | long extra; /* Amount to release */ | |
3190 | long released; /* Amount actually released */ | |
3191 | char* current_brk; /* address returned by pre-check sbrk call */ | |
3192 | char* new_brk; /* address returned by post-check sbrk call */ | |
3193 | size_t pagesz; | |
3194 | ||
3195 | pagesz = mp_.pagesize; | |
3196 | top_size = chunksize(av->top); | |
a9177ff5 | 3197 | |
fa8d436c UD |
3198 | /* Release in pagesize units, keeping at least one page */ |
3199 | extra = ((top_size - pad - MINSIZE + (pagesz-1)) / pagesz - 1) * pagesz; | |
a9177ff5 | 3200 | |
fa8d436c | 3201 | if (extra > 0) { |
a9177ff5 | 3202 | |
fa8d436c UD |
3203 | /* |
3204 | Only proceed if end of memory is where we last set it. | |
3205 | This avoids problems if there were foreign sbrk calls. | |
3206 | */ | |
3207 | current_brk = (char*)(MORECORE(0)); | |
3208 | if (current_brk == (char*)(av->top) + top_size) { | |
a9177ff5 | 3209 | |
fa8d436c UD |
3210 | /* |
3211 | Attempt to release memory. We ignore MORECORE return value, | |
3212 | and instead call again to find out where new end of memory is. | |
3213 | This avoids problems if first call releases less than we asked, | |
3214 | of if failure somehow altered brk value. (We could still | |
3215 | encounter problems if it altered brk in some very bad way, | |
3216 | but the only thing we can do is adjust anyway, which will cause | |
3217 | some downstream failure.) | |
3218 | */ | |
a9177ff5 | 3219 | |
fa8d436c UD |
3220 | MORECORE(-extra); |
3221 | /* Call the `morecore' hook if necessary. */ | |
3222 | if (__after_morecore_hook) | |
3223 | (*__after_morecore_hook) (); | |
3224 | new_brk = (char*)(MORECORE(0)); | |
a9177ff5 | 3225 | |
fa8d436c UD |
3226 | if (new_brk != (char*)MORECORE_FAILURE) { |
3227 | released = (long)(current_brk - new_brk); | |
a9177ff5 | 3228 | |
fa8d436c UD |
3229 | if (released != 0) { |
3230 | /* Success. Adjust top. */ | |
3231 | av->system_mem -= released; | |
3232 | set_head(av->top, (top_size - released) | PREV_INUSE); | |
3233 | check_malloc_state(av); | |
3234 | return 1; | |
3235 | } | |
3236 | } | |
3237 | } | |
3238 | } | |
3239 | return 0; | |
f65fd747 UD |
3240 | } |
3241 | ||
fa8d436c UD |
3242 | #ifdef HAVE_MMAP |
3243 | ||
431c33c0 UD |
3244 | static void |
3245 | internal_function | |
f65fd747 | 3246 | #if __STD_C |
431c33c0 | 3247 | munmap_chunk(mchunkptr p) |
f65fd747 | 3248 | #else |
431c33c0 | 3249 | munmap_chunk(p) mchunkptr p; |
f65fd747 UD |
3250 | #endif |
3251 | { | |
3252 | INTERNAL_SIZE_T size = chunksize(p); | |
3253 | int ret; | |
3254 | ||
3255 | assert (chunk_is_mmapped(p)); | |
fa8d436c UD |
3256 | #if 0 |
3257 | assert(! ((char*)p >= mp_.sbrk_base && (char*)p < mp_.sbrk_base + mp_.sbrked_mem)); | |
3258 | assert((mp_.n_mmaps > 0)); | |
3259 | #endif | |
3260 | assert(((p->prev_size + size) & (mp_.pagesize-1)) == 0); | |
f65fd747 | 3261 | |
fa8d436c UD |
3262 | mp_.n_mmaps--; |
3263 | mp_.mmapped_mem -= (size + p->prev_size); | |
f65fd747 UD |
3264 | |
3265 | ret = munmap((char *)p - p->prev_size, size + p->prev_size); | |
3266 | ||
3267 | /* munmap returns non-zero on failure */ | |
3268 | assert(ret == 0); | |
3269 | } | |
3270 | ||
3271 | #if HAVE_MREMAP | |
3272 | ||
431c33c0 UD |
3273 | static mchunkptr |
3274 | internal_function | |
f65fd747 | 3275 | #if __STD_C |
431c33c0 | 3276 | mremap_chunk(mchunkptr p, size_t new_size) |
f65fd747 | 3277 | #else |
431c33c0 | 3278 | mremap_chunk(p, new_size) mchunkptr p; size_t new_size; |
f65fd747 UD |
3279 | #endif |
3280 | { | |
fa8d436c | 3281 | size_t page_mask = mp_.pagesize - 1; |
f65fd747 UD |
3282 | INTERNAL_SIZE_T offset = p->prev_size; |
3283 | INTERNAL_SIZE_T size = chunksize(p); | |
3284 | char *cp; | |
3285 | ||
3286 | assert (chunk_is_mmapped(p)); | |
fa8d436c UD |
3287 | #if 0 |
3288 | assert(! ((char*)p >= mp_.sbrk_base && (char*)p < mp_.sbrk_base + mp_.sbrked_mem)); | |
3289 | assert((mp_.n_mmaps > 0)); | |
3290 | #endif | |
3291 | assert(((size + offset) & (mp_.pagesize-1)) == 0); | |
f65fd747 UD |
3292 | |
3293 | /* Note the extra SIZE_SZ overhead as in mmap_chunk(). */ | |
3294 | new_size = (new_size + offset + SIZE_SZ + page_mask) & ~page_mask; | |
3295 | ||
3296 | cp = (char *)mremap((char *)p - offset, size + offset, new_size, | |
3297 | MREMAP_MAYMOVE); | |
3298 | ||
431c33c0 | 3299 | if (cp == MAP_FAILED) return 0; |
f65fd747 UD |
3300 | |
3301 | p = (mchunkptr)(cp + offset); | |
3302 | ||
3303 | assert(aligned_OK(chunk2mem(p))); | |
3304 | ||
3305 | assert((p->prev_size == offset)); | |
3306 | set_head(p, (new_size - offset)|IS_MMAPPED); | |
3307 | ||
fa8d436c UD |
3308 | mp_.mmapped_mem -= size + offset; |
3309 | mp_.mmapped_mem += new_size; | |
3310 | if ((unsigned long)mp_.mmapped_mem > (unsigned long)mp_.max_mmapped_mem) | |
3311 | mp_.max_mmapped_mem = mp_.mmapped_mem; | |
8a4b65b4 | 3312 | #ifdef NO_THREADS |
fa8d436c UD |
3313 | if ((unsigned long)(mp_.mmapped_mem + arena_mem + main_arena.system_mem) > |
3314 | mp_.max_total_mem) | |
3315 | mp_.max_total_mem = mp_.mmapped_mem + arena_mem + main_arena.system_mem; | |
8a4b65b4 | 3316 | #endif |
f65fd747 UD |
3317 | return p; |
3318 | } | |
3319 | ||
3320 | #endif /* HAVE_MREMAP */ | |
3321 | ||
3322 | #endif /* HAVE_MMAP */ | |
3323 | ||
fa8d436c | 3324 | /*------------------------ Public wrappers. --------------------------------*/ |
f65fd747 | 3325 | |
fa8d436c UD |
3326 | Void_t* |
3327 | public_mALLOc(size_t bytes) | |
3328 | { | |
3329 | mstate ar_ptr; | |
3330 | Void_t *victim; | |
f65fd747 | 3331 | |
06d6611a | 3332 | __malloc_ptr_t (*hook) (size_t, __const __malloc_ptr_t) = __malloc_hook; |
fa8d436c UD |
3333 | if (hook != NULL) |
3334 | return (*hook)(bytes, RETURN_ADDRESS (0)); | |
f65fd747 | 3335 | |
fa8d436c UD |
3336 | arena_get(ar_ptr, bytes); |
3337 | if(!ar_ptr) | |
f65fd747 | 3338 | return 0; |
fa8d436c UD |
3339 | victim = _int_malloc(ar_ptr, bytes); |
3340 | if(!victim) { | |
3341 | /* Maybe the failure is due to running out of mmapped areas. */ | |
3342 | if(ar_ptr != &main_arena) { | |
3343 | (void)mutex_unlock(&ar_ptr->mutex); | |
3344 | (void)mutex_lock(&main_arena.mutex); | |
3345 | victim = _int_malloc(&main_arena, bytes); | |
3346 | (void)mutex_unlock(&main_arena.mutex); | |
3347 | } else { | |
3348 | #if USE_ARENAS | |
3349 | /* ... or sbrk() has failed and there is still a chance to mmap() */ | |
3350 | ar_ptr = arena_get2(ar_ptr->next ? ar_ptr : 0, bytes); | |
3351 | (void)mutex_unlock(&main_arena.mutex); | |
3352 | if(ar_ptr) { | |
3353 | victim = _int_malloc(ar_ptr, bytes); | |
3354 | (void)mutex_unlock(&ar_ptr->mutex); | |
3355 | } | |
3356 | #endif | |
60f0e64b | 3357 | } |
fa8d436c UD |
3358 | } else |
3359 | (void)mutex_unlock(&ar_ptr->mutex); | |
3360 | assert(!victim || chunk_is_mmapped(mem2chunk(victim)) || | |
3361 | ar_ptr == arena_for_chunk(mem2chunk(victim))); | |
3362 | return victim; | |
f65fd747 | 3363 | } |
aa420660 UD |
3364 | #ifdef libc_hidden_def |
3365 | libc_hidden_def(public_mALLOc) | |
3366 | #endif | |
f65fd747 | 3367 | |
fa8d436c UD |
3368 | void |
3369 | public_fREe(Void_t* mem) | |
f65fd747 | 3370 | { |
fa8d436c UD |
3371 | mstate ar_ptr; |
3372 | mchunkptr p; /* chunk corresponding to mem */ | |
3373 | ||
06d6611a | 3374 | void (*hook) (__malloc_ptr_t, __const __malloc_ptr_t) = __free_hook; |
fa8d436c UD |
3375 | if (hook != NULL) { |
3376 | (*hook)(mem, RETURN_ADDRESS (0)); | |
3377 | return; | |
f65fd747 | 3378 | } |
f65fd747 | 3379 | |
fa8d436c UD |
3380 | if (mem == 0) /* free(0) has no effect */ |
3381 | return; | |
f65fd747 | 3382 | |
fa8d436c | 3383 | p = mem2chunk(mem); |
f65fd747 | 3384 | |
fa8d436c UD |
3385 | #if HAVE_MMAP |
3386 | if (chunk_is_mmapped(p)) /* release mmapped memory. */ | |
3387 | { | |
3388 | munmap_chunk(p); | |
3389 | return; | |
8a4b65b4 | 3390 | } |
f65fd747 | 3391 | #endif |
f65fd747 | 3392 | |
fa8d436c UD |
3393 | ar_ptr = arena_for_chunk(p); |
3394 | #if THREAD_STATS | |
3395 | if(!mutex_trylock(&ar_ptr->mutex)) | |
3396 | ++(ar_ptr->stat_lock_direct); | |
3397 | else { | |
3398 | (void)mutex_lock(&ar_ptr->mutex); | |
3399 | ++(ar_ptr->stat_lock_wait); | |
f65fd747 | 3400 | } |
f65fd747 | 3401 | #else |
fa8d436c | 3402 | (void)mutex_lock(&ar_ptr->mutex); |
f65fd747 | 3403 | #endif |
fa8d436c UD |
3404 | _int_free(ar_ptr, mem); |
3405 | (void)mutex_unlock(&ar_ptr->mutex); | |
f65fd747 | 3406 | } |
aa420660 UD |
3407 | #ifdef libc_hidden_def |
3408 | libc_hidden_def (public_fREe) | |
3409 | #endif | |
f65fd747 | 3410 | |
fa8d436c UD |
3411 | Void_t* |
3412 | public_rEALLOc(Void_t* oldmem, size_t bytes) | |
f65fd747 | 3413 | { |
fa8d436c UD |
3414 | mstate ar_ptr; |
3415 | INTERNAL_SIZE_T nb; /* padded request size */ | |
f65fd747 | 3416 | |
fa8d436c UD |
3417 | mchunkptr oldp; /* chunk corresponding to oldmem */ |
3418 | INTERNAL_SIZE_T oldsize; /* its size */ | |
8a4b65b4 | 3419 | |
fa8d436c | 3420 | Void_t* newp; /* chunk to return */ |
f65fd747 | 3421 | |
06d6611a | 3422 | __malloc_ptr_t (*hook) (__malloc_ptr_t, size_t, __const __malloc_ptr_t) = |
fa8d436c UD |
3423 | __realloc_hook; |
3424 | if (hook != NULL) | |
3425 | return (*hook)(oldmem, bytes, RETURN_ADDRESS (0)); | |
f65fd747 | 3426 | |
fa8d436c UD |
3427 | #if REALLOC_ZERO_BYTES_FREES |
3428 | if (bytes == 0 && oldmem != NULL) { public_fREe(oldmem); return 0; } | |
f65fd747 | 3429 | #endif |
f65fd747 | 3430 | |
fa8d436c UD |
3431 | /* realloc of null is supposed to be same as malloc */ |
3432 | if (oldmem == 0) return public_mALLOc(bytes); | |
f65fd747 | 3433 | |
fa8d436c UD |
3434 | oldp = mem2chunk(oldmem); |
3435 | oldsize = chunksize(oldp); | |
f65fd747 | 3436 | |
fa8d436c | 3437 | checked_request2size(bytes, nb); |
f65fd747 | 3438 | |
fa8d436c UD |
3439 | #if HAVE_MMAP |
3440 | if (chunk_is_mmapped(oldp)) | |
3441 | { | |
3442 | Void_t* newmem; | |
f65fd747 | 3443 | |
fa8d436c UD |
3444 | #if HAVE_MREMAP |
3445 | newp = mremap_chunk(oldp, nb); | |
3446 | if(newp) return chunk2mem(newp); | |
f65fd747 | 3447 | #endif |
fa8d436c UD |
3448 | /* Note the extra SIZE_SZ overhead. */ |
3449 | if(oldsize - SIZE_SZ >= nb) return oldmem; /* do nothing */ | |
3450 | /* Must alloc, copy, free. */ | |
3451 | newmem = public_mALLOc(bytes); | |
3452 | if (newmem == 0) return 0; /* propagate failure */ | |
3453 | MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ); | |
3454 | munmap_chunk(oldp); | |
3455 | return newmem; | |
3456 | } | |
dfd2257a | 3457 | #endif |
fa8d436c UD |
3458 | |
3459 | ar_ptr = arena_for_chunk(oldp); | |
3460 | #if THREAD_STATS | |
3461 | if(!mutex_trylock(&ar_ptr->mutex)) | |
3462 | ++(ar_ptr->stat_lock_direct); | |
3463 | else { | |
3464 | (void)mutex_lock(&ar_ptr->mutex); | |
3465 | ++(ar_ptr->stat_lock_wait); | |
3466 | } | |
f65fd747 | 3467 | #else |
fa8d436c | 3468 | (void)mutex_lock(&ar_ptr->mutex); |
f65fd747 | 3469 | #endif |
f65fd747 | 3470 | |
fa8d436c UD |
3471 | #ifndef NO_THREADS |
3472 | /* As in malloc(), remember this arena for the next allocation. */ | |
3473 | tsd_setspecific(arena_key, (Void_t *)ar_ptr); | |
f65fd747 UD |
3474 | #endif |
3475 | ||
fa8d436c | 3476 | newp = _int_realloc(ar_ptr, oldmem, bytes); |
f65fd747 | 3477 | |
fa8d436c UD |
3478 | (void)mutex_unlock(&ar_ptr->mutex); |
3479 | assert(!newp || chunk_is_mmapped(mem2chunk(newp)) || | |
3480 | ar_ptr == arena_for_chunk(mem2chunk(newp))); | |
3481 | return newp; | |
3482 | } | |
aa420660 UD |
3483 | #ifdef libc_hidden_def |
3484 | libc_hidden_def (public_rEALLOc) | |
3485 | #endif | |
f65fd747 | 3486 | |
fa8d436c UD |
3487 | Void_t* |
3488 | public_mEMALIGn(size_t alignment, size_t bytes) | |
3489 | { | |
3490 | mstate ar_ptr; | |
3491 | Void_t *p; | |
f65fd747 | 3492 | |
fa8d436c UD |
3493 | __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, size_t, |
3494 | __const __malloc_ptr_t)) = | |
3495 | __memalign_hook; | |
3496 | if (hook != NULL) | |
3497 | return (*hook)(alignment, bytes, RETURN_ADDRESS (0)); | |
f65fd747 | 3498 | |
fa8d436c UD |
3499 | /* If need less alignment than we give anyway, just relay to malloc */ |
3500 | if (alignment <= MALLOC_ALIGNMENT) return public_mALLOc(bytes); | |
1228ed5c | 3501 | |
fa8d436c UD |
3502 | /* Otherwise, ensure that it is at least a minimum chunk size */ |
3503 | if (alignment < MINSIZE) alignment = MINSIZE; | |
f65fd747 | 3504 | |
fa8d436c UD |
3505 | arena_get(ar_ptr, bytes + alignment + MINSIZE); |
3506 | if(!ar_ptr) | |
3507 | return 0; | |
3508 | p = _int_memalign(ar_ptr, alignment, bytes); | |
3509 | (void)mutex_unlock(&ar_ptr->mutex); | |
3510 | if(!p) { | |
3511 | /* Maybe the failure is due to running out of mmapped areas. */ | |
3512 | if(ar_ptr != &main_arena) { | |
3513 | (void)mutex_lock(&main_arena.mutex); | |
3514 | p = _int_memalign(&main_arena, alignment, bytes); | |
3515 | (void)mutex_unlock(&main_arena.mutex); | |
f65fd747 | 3516 | } else { |
e9b3e3c5 | 3517 | #if USE_ARENAS |
fa8d436c UD |
3518 | /* ... or sbrk() has failed and there is still a chance to mmap() */ |
3519 | ar_ptr = arena_get2(ar_ptr->next ? ar_ptr : 0, bytes); | |
3520 | if(ar_ptr) { | |
3521 | p = _int_memalign(ar_ptr, alignment, bytes); | |
3522 | (void)mutex_unlock(&ar_ptr->mutex); | |
3523 | } | |
e9b3e3c5 | 3524 | #endif |
f65fd747 UD |
3525 | } |
3526 | } | |
fa8d436c UD |
3527 | assert(!p || chunk_is_mmapped(mem2chunk(p)) || |
3528 | ar_ptr == arena_for_chunk(mem2chunk(p))); | |
3529 | return p; | |
f65fd747 | 3530 | } |
aa420660 UD |
3531 | #ifdef libc_hidden_def |
3532 | libc_hidden_def (public_mEMALIGn) | |
3533 | #endif | |
f65fd747 | 3534 | |
fa8d436c UD |
3535 | Void_t* |
3536 | public_vALLOc(size_t bytes) | |
3537 | { | |
3538 | mstate ar_ptr; | |
3539 | Void_t *p; | |
f65fd747 | 3540 | |
fa8d436c UD |
3541 | if(__malloc_initialized < 0) |
3542 | ptmalloc_init (); | |
8088488d UD |
3543 | |
3544 | __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, size_t, | |
3545 | __const __malloc_ptr_t)) = | |
3546 | __memalign_hook; | |
3547 | if (hook != NULL) | |
3548 | return (*hook)(mp_.pagesize, bytes, RETURN_ADDRESS (0)); | |
3549 | ||
fa8d436c UD |
3550 | arena_get(ar_ptr, bytes + mp_.pagesize + MINSIZE); |
3551 | if(!ar_ptr) | |
3552 | return 0; | |
3553 | p = _int_valloc(ar_ptr, bytes); | |
3554 | (void)mutex_unlock(&ar_ptr->mutex); | |
3555 | return p; | |
3556 | } | |
f65fd747 | 3557 | |
fa8d436c UD |
3558 | Void_t* |
3559 | public_pVALLOc(size_t bytes) | |
3560 | { | |
3561 | mstate ar_ptr; | |
3562 | Void_t *p; | |
f65fd747 | 3563 | |
fa8d436c UD |
3564 | if(__malloc_initialized < 0) |
3565 | ptmalloc_init (); | |
8088488d UD |
3566 | |
3567 | __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, size_t, | |
3568 | __const __malloc_ptr_t)) = | |
3569 | __memalign_hook; | |
3570 | if (hook != NULL) | |
3571 | return (*hook)(mp_.pagesize, | |
3572 | (bytes + mp_.pagesize - 1) & ~(mp_.pagesize - 1), | |
3573 | RETURN_ADDRESS (0)); | |
3574 | ||
fa8d436c UD |
3575 | arena_get(ar_ptr, bytes + 2*mp_.pagesize + MINSIZE); |
3576 | p = _int_pvalloc(ar_ptr, bytes); | |
3577 | (void)mutex_unlock(&ar_ptr->mutex); | |
3578 | return p; | |
3579 | } | |
f65fd747 | 3580 | |
fa8d436c UD |
3581 | Void_t* |
3582 | public_cALLOc(size_t n, size_t elem_size) | |
f65fd747 | 3583 | { |
fa8d436c UD |
3584 | mstate av; |
3585 | mchunkptr oldtop, p; | |
0950889b | 3586 | INTERNAL_SIZE_T bytes, sz, csz, oldtopsize; |
fa8d436c UD |
3587 | Void_t* mem; |
3588 | unsigned long clearsize; | |
3589 | unsigned long nclears; | |
3590 | INTERNAL_SIZE_T* d; | |
6c6bb055 | 3591 | __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, __const __malloc_ptr_t)) = |
fa8d436c | 3592 | __malloc_hook; |
0950889b UD |
3593 | |
3594 | /* size_t is unsigned so the behavior on overflow is defined. */ | |
3595 | bytes = n * elem_size; | |
d9af917d UD |
3596 | #define HALF_INTERNAL_SIZE_T \ |
3597 | (((INTERNAL_SIZE_T) 1) << (8 * sizeof (INTERNAL_SIZE_T) / 2)) | |
3598 | if (__builtin_expect ((n | elem_size) >= HALF_INTERNAL_SIZE_T, 0)) { | |
0be405c2 | 3599 | if (elem_size != 0 && bytes / elem_size != n) { |
d9af917d UD |
3600 | MALLOC_FAILURE_ACTION; |
3601 | return 0; | |
3602 | } | |
0950889b UD |
3603 | } |
3604 | ||
6c6bb055 | 3605 | if (hook != NULL) { |
0950889b | 3606 | sz = bytes; |
fa8d436c UD |
3607 | mem = (*hook)(sz, RETURN_ADDRESS (0)); |
3608 | if(mem == 0) | |
3609 | return 0; | |
3610 | #ifdef HAVE_MEMCPY | |
3611 | return memset(mem, 0, sz); | |
a2b08ee5 | 3612 | #else |
fa8d436c UD |
3613 | while(sz > 0) ((char*)mem)[--sz] = 0; /* rather inefficient */ |
3614 | return mem; | |
a2b08ee5 | 3615 | #endif |
10dc2a90 | 3616 | } |
10dc2a90 | 3617 | |
0950889b | 3618 | sz = bytes; |
fa8d436c UD |
3619 | |
3620 | arena_get(av, sz); | |
3621 | if(!av) | |
f65fd747 | 3622 | return 0; |
fa8d436c UD |
3623 | |
3624 | /* Check if we hand out the top chunk, in which case there may be no | |
3625 | need to clear. */ | |
3626 | #if MORECORE_CLEARS | |
3627 | oldtop = top(av); | |
3628 | oldtopsize = chunksize(top(av)); | |
3629 | #if MORECORE_CLEARS < 2 | |
3630 | /* Only newly allocated memory is guaranteed to be cleared. */ | |
3631 | if (av == &main_arena && | |
3632 | oldtopsize < mp_.sbrk_base + av->max_system_mem - (char *)oldtop) | |
3633 | oldtopsize = (mp_.sbrk_base + av->max_system_mem - (char *)oldtop); | |
3634 | #endif | |
3635 | #endif | |
3636 | mem = _int_malloc(av, sz); | |
3637 | ||
3638 | /* Only clearing follows, so we can unlock early. */ | |
3639 | (void)mutex_unlock(&av->mutex); | |
3640 | ||
3641 | assert(!mem || chunk_is_mmapped(mem2chunk(mem)) || | |
3642 | av == arena_for_chunk(mem2chunk(mem))); | |
3643 | ||
3644 | if (mem == 0) { | |
7799b7b3 | 3645 | /* Maybe the failure is due to running out of mmapped areas. */ |
fa8d436c | 3646 | if(av != &main_arena) { |
7799b7b3 | 3647 | (void)mutex_lock(&main_arena.mutex); |
fa8d436c | 3648 | mem = _int_malloc(&main_arena, sz); |
7799b7b3 | 3649 | (void)mutex_unlock(&main_arena.mutex); |
e9b3e3c5 UD |
3650 | } else { |
3651 | #if USE_ARENAS | |
3652 | /* ... or sbrk() has failed and there is still a chance to mmap() */ | |
fa8d436c UD |
3653 | (void)mutex_lock(&main_arena.mutex); |
3654 | av = arena_get2(av->next ? av : 0, sz); | |
e9b3e3c5 | 3655 | (void)mutex_unlock(&main_arena.mutex); |
fa8d436c UD |
3656 | if(av) { |
3657 | mem = _int_malloc(av, sz); | |
3658 | (void)mutex_unlock(&av->mutex); | |
e9b3e3c5 UD |
3659 | } |
3660 | #endif | |
7799b7b3 | 3661 | } |
fa8d436c UD |
3662 | if (mem == 0) return 0; |
3663 | } | |
3664 | p = mem2chunk(mem); | |
f65fd747 | 3665 | |
fa8d436c UD |
3666 | /* Two optional cases in which clearing not necessary */ |
3667 | #if HAVE_MMAP | |
3668 | if (chunk_is_mmapped(p)) | |
3669 | return mem; | |
f65fd747 | 3670 | #endif |
f65fd747 | 3671 | |
fa8d436c | 3672 | csz = chunksize(p); |
f65fd747 | 3673 | |
fa8d436c UD |
3674 | #if MORECORE_CLEARS |
3675 | if (p == oldtop && csz > oldtopsize) { | |
3676 | /* clear only the bytes from non-freshly-sbrked memory */ | |
3677 | csz = oldtopsize; | |
f65fd747 | 3678 | } |
fa8d436c | 3679 | #endif |
f65fd747 | 3680 | |
fa8d436c UD |
3681 | /* Unroll clear of <= 36 bytes (72 if 8byte sizes). We know that |
3682 | contents have an odd number of INTERNAL_SIZE_T-sized words; | |
3683 | minimally 3. */ | |
3684 | d = (INTERNAL_SIZE_T*)mem; | |
3685 | clearsize = csz - SIZE_SZ; | |
3686 | nclears = clearsize / sizeof(INTERNAL_SIZE_T); | |
3687 | assert(nclears >= 3); | |
f65fd747 | 3688 | |
fa8d436c UD |
3689 | if (nclears > 9) |
3690 | MALLOC_ZERO(d, clearsize); | |
f65fd747 | 3691 | |
fa8d436c UD |
3692 | else { |
3693 | *(d+0) = 0; | |
3694 | *(d+1) = 0; | |
3695 | *(d+2) = 0; | |
3696 | if (nclears > 4) { | |
3697 | *(d+3) = 0; | |
3698 | *(d+4) = 0; | |
3699 | if (nclears > 6) { | |
3700 | *(d+5) = 0; | |
3701 | *(d+6) = 0; | |
3702 | if (nclears > 8) { | |
3703 | *(d+7) = 0; | |
3704 | *(d+8) = 0; | |
3705 | } | |
f65fd747 UD |
3706 | } |
3707 | } | |
f65fd747 UD |
3708 | } |
3709 | ||
fa8d436c UD |
3710 | return mem; |
3711 | } | |
f65fd747 | 3712 | |
fa8d436c UD |
3713 | Void_t** |
3714 | public_iCALLOc(size_t n, size_t elem_size, Void_t** chunks) | |
3715 | { | |
3716 | mstate ar_ptr; | |
3717 | Void_t** m; | |
f65fd747 | 3718 | |
fa8d436c UD |
3719 | arena_get(ar_ptr, n*elem_size); |
3720 | if(!ar_ptr) | |
3721 | return 0; | |
f65fd747 | 3722 | |
fa8d436c UD |
3723 | m = _int_icalloc(ar_ptr, n, elem_size, chunks); |
3724 | (void)mutex_unlock(&ar_ptr->mutex); | |
3725 | return m; | |
3726 | } | |
f65fd747 | 3727 | |
fa8d436c UD |
3728 | Void_t** |
3729 | public_iCOMALLOc(size_t n, size_t sizes[], Void_t** chunks) | |
3730 | { | |
3731 | mstate ar_ptr; | |
3732 | Void_t** m; | |
f65fd747 | 3733 | |
fa8d436c UD |
3734 | arena_get(ar_ptr, 0); |
3735 | if(!ar_ptr) | |
3736 | return 0; | |
f65fd747 | 3737 | |
fa8d436c UD |
3738 | m = _int_icomalloc(ar_ptr, n, sizes, chunks); |
3739 | (void)mutex_unlock(&ar_ptr->mutex); | |
3740 | return m; | |
3741 | } | |
f65fd747 | 3742 | |
fa8d436c | 3743 | #ifndef _LIBC |
f65fd747 | 3744 | |
fa8d436c UD |
3745 | void |
3746 | public_cFREe(Void_t* m) | |
3747 | { | |
3748 | public_fREe(m); | |
3749 | } | |
f65fd747 | 3750 | |
fa8d436c | 3751 | #endif /* _LIBC */ |
f65fd747 | 3752 | |
fa8d436c UD |
3753 | int |
3754 | public_mTRIm(size_t s) | |
3755 | { | |
3756 | int result; | |
f65fd747 | 3757 | |
fa8d436c UD |
3758 | (void)mutex_lock(&main_arena.mutex); |
3759 | result = mTRIm(s); | |
3760 | (void)mutex_unlock(&main_arena.mutex); | |
3761 | return result; | |
3762 | } | |
f65fd747 | 3763 | |
fa8d436c UD |
3764 | size_t |
3765 | public_mUSABLe(Void_t* m) | |
3766 | { | |
3767 | size_t result; | |
f65fd747 | 3768 | |
fa8d436c UD |
3769 | result = mUSABLe(m); |
3770 | return result; | |
3771 | } | |
f65fd747 | 3772 | |
fa8d436c UD |
3773 | void |
3774 | public_mSTATs() | |
3775 | { | |
3776 | mSTATs(); | |
3777 | } | |
f65fd747 | 3778 | |
fa8d436c UD |
3779 | struct mallinfo public_mALLINFo() |
3780 | { | |
3781 | struct mallinfo m; | |
f65fd747 | 3782 | |
6a00759b UD |
3783 | if(__malloc_initialized < 0) |
3784 | ptmalloc_init (); | |
fa8d436c UD |
3785 | (void)mutex_lock(&main_arena.mutex); |
3786 | m = mALLINFo(&main_arena); | |
3787 | (void)mutex_unlock(&main_arena.mutex); | |
3788 | return m; | |
f65fd747 UD |
3789 | } |
3790 | ||
fa8d436c UD |
3791 | int |
3792 | public_mALLOPt(int p, int v) | |
3793 | { | |
3794 | int result; | |
3795 | result = mALLOPt(p, v); | |
3796 | return result; | |
3797 | } | |
f65fd747 UD |
3798 | |
3799 | /* | |
fa8d436c | 3800 | ------------------------------ malloc ------------------------------ |
f65fd747 UD |
3801 | */ |
3802 | ||
f1c5213d | 3803 | Void_t* |
fa8d436c | 3804 | _int_malloc(mstate av, size_t bytes) |
f65fd747 | 3805 | { |
fa8d436c UD |
3806 | INTERNAL_SIZE_T nb; /* normalized request size */ |
3807 | unsigned int idx; /* associated bin index */ | |
3808 | mbinptr bin; /* associated bin */ | |
3809 | mfastbinptr* fb; /* associated fastbin */ | |
f65fd747 | 3810 | |
fa8d436c UD |
3811 | mchunkptr victim; /* inspected/selected chunk */ |
3812 | INTERNAL_SIZE_T size; /* its size */ | |
3813 | int victim_index; /* its bin index */ | |
f65fd747 | 3814 | |
fa8d436c UD |
3815 | mchunkptr remainder; /* remainder from a split */ |
3816 | unsigned long remainder_size; /* its size */ | |
8a4b65b4 | 3817 | |
fa8d436c UD |
3818 | unsigned int block; /* bit map traverser */ |
3819 | unsigned int bit; /* bit map traverser */ | |
3820 | unsigned int map; /* current word of binmap */ | |
8a4b65b4 | 3821 | |
fa8d436c UD |
3822 | mchunkptr fwd; /* misc temp for linking */ |
3823 | mchunkptr bck; /* misc temp for linking */ | |
8a4b65b4 | 3824 | |
fa8d436c UD |
3825 | /* |
3826 | Convert request size to internal form by adding SIZE_SZ bytes | |
3827 | overhead plus possibly more to obtain necessary alignment and/or | |
3828 | to obtain a size of at least MINSIZE, the smallest allocatable | |
3829 | size. Also, checked_request2size traps (returning 0) request sizes | |
3830 | that are so large that they wrap around zero when padded and | |
3831 | aligned. | |
3832 | */ | |
f65fd747 | 3833 | |
fa8d436c | 3834 | checked_request2size(bytes, nb); |
f65fd747 | 3835 | |
fa8d436c UD |
3836 | /* |
3837 | If the size qualifies as a fastbin, first check corresponding bin. | |
3838 | This code is safe to execute even if av is not yet initialized, so we | |
3839 | can try it without checking, which saves some time on this fast path. | |
3840 | */ | |
f65fd747 | 3841 | |
a9177ff5 | 3842 | if ((unsigned long)(nb) <= (unsigned long)(av->max_fast)) { |
6cce6540 UD |
3843 | long int idx = fastbin_index(nb); |
3844 | fb = &(av->fastbins[idx]); | |
fa8d436c | 3845 | if ( (victim = *fb) != 0) { |
6cce6540 UD |
3846 | if (__builtin_expect (fastbin_index (chunksize (victim)) != idx, 0)) |
3847 | malloc_printerr (check_action, "malloc(): memory corruption (fast)", | |
3848 | chunk2mem (victim)); | |
fa8d436c UD |
3849 | *fb = victim->fd; |
3850 | check_remalloced_chunk(av, victim, nb); | |
3851 | return chunk2mem(victim); | |
3852 | } | |
f65fd747 UD |
3853 | } |
3854 | ||
fa8d436c UD |
3855 | /* |
3856 | If a small request, check regular bin. Since these "smallbins" | |
3857 | hold one size each, no searching within bins is necessary. | |
3858 | (For a large request, we need to wait until unsorted chunks are | |
3859 | processed to find best fit. But for small ones, fits are exact | |
3860 | anyway, so we can check now, which is faster.) | |
3861 | */ | |
f65fd747 | 3862 | |
fa8d436c UD |
3863 | if (in_smallbin_range(nb)) { |
3864 | idx = smallbin_index(nb); | |
3865 | bin = bin_at(av,idx); | |
7799b7b3 | 3866 | |
fa8d436c UD |
3867 | if ( (victim = last(bin)) != bin) { |
3868 | if (victim == 0) /* initialization check */ | |
3869 | malloc_consolidate(av); | |
3870 | else { | |
3871 | bck = victim->bk; | |
3872 | set_inuse_bit_at_offset(victim, nb); | |
3873 | bin->bk = bck; | |
3874 | bck->fd = bin; | |
3875 | ||
3876 | if (av != &main_arena) | |
3877 | victim->size |= NON_MAIN_ARENA; | |
3878 | check_malloced_chunk(av, victim, nb); | |
3879 | return chunk2mem(victim); | |
3880 | } | |
3881 | } | |
f65fd747 UD |
3882 | } |
3883 | ||
a9177ff5 | 3884 | /* |
fa8d436c UD |
3885 | If this is a large request, consolidate fastbins before continuing. |
3886 | While it might look excessive to kill all fastbins before | |
3887 | even seeing if there is space available, this avoids | |
3888 | fragmentation problems normally associated with fastbins. | |
3889 | Also, in practice, programs tend to have runs of either small or | |
a9177ff5 | 3890 | large requests, but less often mixtures, so consolidation is not |
fa8d436c UD |
3891 | invoked all that often in most programs. And the programs that |
3892 | it is called frequently in otherwise tend to fragment. | |
3893 | */ | |
7799b7b3 | 3894 | |
fa8d436c UD |
3895 | else { |
3896 | idx = largebin_index(nb); | |
a9177ff5 | 3897 | if (have_fastchunks(av)) |
fa8d436c | 3898 | malloc_consolidate(av); |
7799b7b3 | 3899 | } |
f65fd747 | 3900 | |
fa8d436c UD |
3901 | /* |
3902 | Process recently freed or remaindered chunks, taking one only if | |
3903 | it is exact fit, or, if this a small request, the chunk is remainder from | |
3904 | the most recent non-exact fit. Place other traversed chunks in | |
3905 | bins. Note that this step is the only place in any routine where | |
3906 | chunks are placed in bins. | |
3907 | ||
3908 | The outer loop here is needed because we might not realize until | |
3909 | near the end of malloc that we should have consolidated, so must | |
3910 | do so and retry. This happens at most once, and only when we would | |
3911 | otherwise need to expand memory to service a "small" request. | |
3912 | */ | |
a9177ff5 RM |
3913 | |
3914 | for(;;) { | |
3915 | ||
fa8d436c UD |
3916 | while ( (victim = unsorted_chunks(av)->bk) != unsorted_chunks(av)) { |
3917 | bck = victim->bk; | |
6cce6540 UD |
3918 | if (__builtin_expect (victim->size <= 2 * SIZE_SZ, 0) |
3919 | || __builtin_expect (victim->size > av->system_mem, 0)) | |
3920 | malloc_printerr (check_action, "malloc(): memory corruption", | |
3921 | chunk2mem (victim)); | |
fa8d436c UD |
3922 | size = chunksize(victim); |
3923 | ||
a9177ff5 | 3924 | /* |
fa8d436c UD |
3925 | If a small request, try to use last remainder if it is the |
3926 | only chunk in unsorted bin. This helps promote locality for | |
3927 | runs of consecutive small requests. This is the only | |
3928 | exception to best-fit, and applies only when there is | |
3929 | no exact fit for a small chunk. | |
3930 | */ | |
3931 | ||
a9177ff5 | 3932 | if (in_smallbin_range(nb) && |
fa8d436c UD |
3933 | bck == unsorted_chunks(av) && |
3934 | victim == av->last_remainder && | |
3935 | (unsigned long)(size) > (unsigned long)(nb + MINSIZE)) { | |
3936 | ||
3937 | /* split and reattach remainder */ | |
3938 | remainder_size = size - nb; | |
3939 | remainder = chunk_at_offset(victim, nb); | |
3940 | unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder; | |
a9177ff5 | 3941 | av->last_remainder = remainder; |
fa8d436c | 3942 | remainder->bk = remainder->fd = unsorted_chunks(av); |
a9177ff5 | 3943 | |
fa8d436c UD |
3944 | set_head(victim, nb | PREV_INUSE | |
3945 | (av != &main_arena ? NON_MAIN_ARENA : 0)); | |
3946 | set_head(remainder, remainder_size | PREV_INUSE); | |
3947 | set_foot(remainder, remainder_size); | |
a9177ff5 | 3948 | |
fa8d436c UD |
3949 | check_malloced_chunk(av, victim, nb); |
3950 | return chunk2mem(victim); | |
3951 | } | |
f65fd747 | 3952 | |
fa8d436c UD |
3953 | /* remove from unsorted list */ |
3954 | unsorted_chunks(av)->bk = bck; | |
3955 | bck->fd = unsorted_chunks(av); | |
a9177ff5 | 3956 | |
fa8d436c | 3957 | /* Take now instead of binning if exact fit */ |
a9177ff5 | 3958 | |
fa8d436c UD |
3959 | if (size == nb) { |
3960 | set_inuse_bit_at_offset(victim, size); | |
3961 | if (av != &main_arena) | |
3962 | victim->size |= NON_MAIN_ARENA; | |
3963 | check_malloced_chunk(av, victim, nb); | |
3964 | return chunk2mem(victim); | |
3965 | } | |
a9177ff5 | 3966 | |
fa8d436c | 3967 | /* place chunk in bin */ |
a9177ff5 | 3968 | |
fa8d436c UD |
3969 | if (in_smallbin_range(size)) { |
3970 | victim_index = smallbin_index(size); | |
3971 | bck = bin_at(av, victim_index); | |
3972 | fwd = bck->fd; | |
3973 | } | |
3974 | else { | |
3975 | victim_index = largebin_index(size); | |
3976 | bck = bin_at(av, victim_index); | |
3977 | fwd = bck->fd; | |
3978 | ||
3979 | /* maintain large bins in sorted order */ | |
3980 | if (fwd != bck) { | |
3981 | /* Or with inuse bit to speed comparisons */ | |
3982 | size |= PREV_INUSE; | |
3983 | /* if smaller than smallest, bypass loop below */ | |
3984 | assert((bck->bk->size & NON_MAIN_ARENA) == 0); | |
3985 | if ((unsigned long)(size) <= (unsigned long)(bck->bk->size)) { | |
3986 | fwd = bck; | |
3987 | bck = bck->bk; | |
3988 | } | |
3989 | else { | |
3990 | assert((fwd->size & NON_MAIN_ARENA) == 0); | |
3991 | while ((unsigned long)(size) < (unsigned long)(fwd->size)) { | |
3992 | fwd = fwd->fd; | |
3993 | assert((fwd->size & NON_MAIN_ARENA) == 0); | |
3994 | } | |
3995 | bck = fwd->bk; | |
3996 | } | |
3997 | } | |
3998 | } | |
a9177ff5 | 3999 | |
fa8d436c UD |
4000 | mark_bin(av, victim_index); |
4001 | victim->bk = bck; | |
4002 | victim->fd = fwd; | |
4003 | fwd->bk = victim; | |
4004 | bck->fd = victim; | |
4005 | } | |
a9177ff5 | 4006 | |
fa8d436c UD |
4007 | /* |
4008 | If a large request, scan through the chunks of current bin in | |
4009 | sorted order to find smallest that fits. This is the only step | |
4010 | where an unbounded number of chunks might be scanned without doing | |
4011 | anything useful with them. However the lists tend to be short. | |
4012 | */ | |
a9177ff5 | 4013 | |
fa8d436c UD |
4014 | if (!in_smallbin_range(nb)) { |
4015 | bin = bin_at(av, idx); | |
f65fd747 | 4016 | |
fa8d436c UD |
4017 | /* skip scan if empty or largest chunk is too small */ |
4018 | if ((victim = last(bin)) != bin && | |
4019 | (unsigned long)(first(bin)->size) >= (unsigned long)(nb)) { | |
f65fd747 | 4020 | |
a9177ff5 | 4021 | while (((unsigned long)(size = chunksize(victim)) < |
fa8d436c UD |
4022 | (unsigned long)(nb))) |
4023 | victim = victim->bk; | |
f65fd747 | 4024 | |
fa8d436c UD |
4025 | remainder_size = size - nb; |
4026 | unlink(victim, bck, fwd); | |
a9177ff5 | 4027 | |
fa8d436c UD |
4028 | /* Exhaust */ |
4029 | if (remainder_size < MINSIZE) { | |
4030 | set_inuse_bit_at_offset(victim, size); | |
4031 | if (av != &main_arena) | |
4032 | victim->size |= NON_MAIN_ARENA; | |
4033 | check_malloced_chunk(av, victim, nb); | |
4034 | return chunk2mem(victim); | |
4035 | } | |
4036 | /* Split */ | |
4037 | else { | |
4038 | remainder = chunk_at_offset(victim, nb); | |
4039 | unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder; | |
4040 | remainder->bk = remainder->fd = unsorted_chunks(av); | |
4041 | set_head(victim, nb | PREV_INUSE | | |
4042 | (av != &main_arena ? NON_MAIN_ARENA : 0)); | |
4043 | set_head(remainder, remainder_size | PREV_INUSE); | |
4044 | set_foot(remainder, remainder_size); | |
4045 | check_malloced_chunk(av, victim, nb); | |
4046 | return chunk2mem(victim); | |
a9177ff5 | 4047 | } |
fa8d436c | 4048 | } |
a9177ff5 | 4049 | } |
f65fd747 | 4050 | |
fa8d436c UD |
4051 | /* |
4052 | Search for a chunk by scanning bins, starting with next largest | |
4053 | bin. This search is strictly by best-fit; i.e., the smallest | |
4054 | (with ties going to approximately the least recently used) chunk | |
4055 | that fits is selected. | |
a9177ff5 | 4056 | |
fa8d436c UD |
4057 | The bitmap avoids needing to check that most blocks are nonempty. |
4058 | The particular case of skipping all bins during warm-up phases | |
4059 | when no chunks have been returned yet is faster than it might look. | |
4060 | */ | |
a9177ff5 | 4061 | |
fa8d436c UD |
4062 | ++idx; |
4063 | bin = bin_at(av,idx); | |
4064 | block = idx2block(idx); | |
4065 | map = av->binmap[block]; | |
4066 | bit = idx2bit(idx); | |
a9177ff5 | 4067 | |
fa8d436c UD |
4068 | for (;;) { |
4069 | ||
4070 | /* Skip rest of block if there are no more set bits in this block. */ | |
4071 | if (bit > map || bit == 0) { | |
4072 | do { | |
4073 | if (++block >= BINMAPSIZE) /* out of bins */ | |
4074 | goto use_top; | |
4075 | } while ( (map = av->binmap[block]) == 0); | |
4076 | ||
4077 | bin = bin_at(av, (block << BINMAPSHIFT)); | |
4078 | bit = 1; | |
4079 | } | |
a9177ff5 | 4080 | |
fa8d436c UD |
4081 | /* Advance to bin with set bit. There must be one. */ |
4082 | while ((bit & map) == 0) { | |
4083 | bin = next_bin(bin); | |
4084 | bit <<= 1; | |
4085 | assert(bit != 0); | |
4086 | } | |
a9177ff5 | 4087 | |
fa8d436c UD |
4088 | /* Inspect the bin. It is likely to be non-empty */ |
4089 | victim = last(bin); | |
a9177ff5 | 4090 | |
fa8d436c UD |
4091 | /* If a false alarm (empty bin), clear the bit. */ |
4092 | if (victim == bin) { | |
4093 | av->binmap[block] = map &= ~bit; /* Write through */ | |
4094 | bin = next_bin(bin); | |
4095 | bit <<= 1; | |
4096 | } | |
a9177ff5 | 4097 | |
fa8d436c UD |
4098 | else { |
4099 | size = chunksize(victim); | |
4100 | ||
4101 | /* We know the first chunk in this bin is big enough to use. */ | |
4102 | assert((unsigned long)(size) >= (unsigned long)(nb)); | |
4103 | ||
4104 | remainder_size = size - nb; | |
a9177ff5 | 4105 | |
fa8d436c UD |
4106 | /* unlink */ |
4107 | bck = victim->bk; | |
4108 | bin->bk = bck; | |
4109 | bck->fd = bin; | |
a9177ff5 | 4110 | |
fa8d436c UD |
4111 | /* Exhaust */ |
4112 | if (remainder_size < MINSIZE) { | |
4113 | set_inuse_bit_at_offset(victim, size); | |
4114 | if (av != &main_arena) | |
4115 | victim->size |= NON_MAIN_ARENA; | |
4116 | check_malloced_chunk(av, victim, nb); | |
4117 | return chunk2mem(victim); | |
4118 | } | |
a9177ff5 | 4119 | |
fa8d436c UD |
4120 | /* Split */ |
4121 | else { | |
4122 | remainder = chunk_at_offset(victim, nb); | |
a9177ff5 | 4123 | |
fa8d436c UD |
4124 | unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder; |
4125 | remainder->bk = remainder->fd = unsorted_chunks(av); | |
4126 | /* advertise as last remainder */ | |
a9177ff5 RM |
4127 | if (in_smallbin_range(nb)) |
4128 | av->last_remainder = remainder; | |
4129 | ||
fa8d436c UD |
4130 | set_head(victim, nb | PREV_INUSE | |
4131 | (av != &main_arena ? NON_MAIN_ARENA : 0)); | |
4132 | set_head(remainder, remainder_size | PREV_INUSE); | |
4133 | set_foot(remainder, remainder_size); | |
4134 | check_malloced_chunk(av, victim, nb); | |
4135 | return chunk2mem(victim); | |
4136 | } | |
4137 | } | |
4138 | } | |
f65fd747 | 4139 | |
a9177ff5 | 4140 | use_top: |
fa8d436c UD |
4141 | /* |
4142 | If large enough, split off the chunk bordering the end of memory | |
4143 | (held in av->top). Note that this is in accord with the best-fit | |
4144 | search rule. In effect, av->top is treated as larger (and thus | |
4145 | less well fitting) than any other available chunk since it can | |
4146 | be extended to be as large as necessary (up to system | |
4147 | limitations). | |
4148 | ||
4149 | We require that av->top always exists (i.e., has size >= | |
4150 | MINSIZE) after initialization, so if it would otherwise be | |
4151 | exhuasted by current request, it is replenished. (The main | |
4152 | reason for ensuring it exists is that we may need MINSIZE space | |
4153 | to put in fenceposts in sysmalloc.) | |
4154 | */ | |
f65fd747 | 4155 | |
fa8d436c UD |
4156 | victim = av->top; |
4157 | size = chunksize(victim); | |
a9177ff5 | 4158 | |
fa8d436c UD |
4159 | if ((unsigned long)(size) >= (unsigned long)(nb + MINSIZE)) { |
4160 | remainder_size = size - nb; | |
4161 | remainder = chunk_at_offset(victim, nb); | |
4162 | av->top = remainder; | |
4163 | set_head(victim, nb | PREV_INUSE | | |
4164 | (av != &main_arena ? NON_MAIN_ARENA : 0)); | |
4165 | set_head(remainder, remainder_size | PREV_INUSE); | |
f65fd747 | 4166 | |
fa8d436c UD |
4167 | check_malloced_chunk(av, victim, nb); |
4168 | return chunk2mem(victim); | |
4169 | } | |
f65fd747 | 4170 | |
fa8d436c UD |
4171 | /* |
4172 | If there is space available in fastbins, consolidate and retry, | |
4173 | to possibly avoid expanding memory. This can occur only if nb is | |
4174 | in smallbin range so we didn't consolidate upon entry. | |
4175 | */ | |
f65fd747 | 4176 | |
fa8d436c UD |
4177 | else if (have_fastchunks(av)) { |
4178 | assert(in_smallbin_range(nb)); | |
4179 | malloc_consolidate(av); | |
4180 | idx = smallbin_index(nb); /* restore original bin index */ | |
4181 | } | |
f65fd747 | 4182 | |
a9177ff5 RM |
4183 | /* |
4184 | Otherwise, relay to handle system-dependent cases | |
fa8d436c | 4185 | */ |
a9177ff5 RM |
4186 | else |
4187 | return sYSMALLOc(nb, av); | |
fa8d436c UD |
4188 | } |
4189 | } | |
f65fd747 | 4190 | |
fa8d436c UD |
4191 | /* |
4192 | ------------------------------ free ------------------------------ | |
f65fd747 UD |
4193 | */ |
4194 | ||
f1c5213d | 4195 | void |
fa8d436c | 4196 | _int_free(mstate av, Void_t* mem) |
f65fd747 | 4197 | { |
fa8d436c UD |
4198 | mchunkptr p; /* chunk corresponding to mem */ |
4199 | INTERNAL_SIZE_T size; /* its size */ | |
4200 | mfastbinptr* fb; /* associated fastbin */ | |
4201 | mchunkptr nextchunk; /* next contiguous chunk */ | |
4202 | INTERNAL_SIZE_T nextsize; /* its size */ | |
4203 | int nextinuse; /* true if nextchunk is used */ | |
4204 | INTERNAL_SIZE_T prevsize; /* size of previous contiguous chunk */ | |
4205 | mchunkptr bck; /* misc temp for linking */ | |
4206 | mchunkptr fwd; /* misc temp for linking */ | |
4207 | ||
4208 | ||
37fa1953 | 4209 | const char *errstr = NULL; |
f65fd747 | 4210 | |
37fa1953 UD |
4211 | p = mem2chunk(mem); |
4212 | size = chunksize(p); | |
f65fd747 | 4213 | |
37fa1953 UD |
4214 | /* Little security check which won't hurt performance: the |
4215 | allocator never wrapps around at the end of the address space. | |
4216 | Therefore we can exclude some size values which might appear | |
4217 | here by accident or by "design" from some intruder. */ | |
4218 | if (__builtin_expect ((uintptr_t) p > (uintptr_t) -size, 0)) | |
4219 | { | |
4220 | errstr = "free(): invalid pointer"; | |
4221 | errout: | |
4222 | malloc_printerr (check_action, errstr, mem); | |
4223 | return; | |
fa8d436c | 4224 | } |
f65fd747 | 4225 | |
37fa1953 | 4226 | check_inuse_chunk(av, p); |
f65fd747 | 4227 | |
37fa1953 UD |
4228 | /* |
4229 | If eligible, place chunk on a fastbin so it can be found | |
4230 | and used quickly in malloc. | |
4231 | */ | |
6bf4302e | 4232 | |
37fa1953 | 4233 | if ((unsigned long)(size) <= (unsigned long)(av->max_fast) |
6bf4302e | 4234 | |
37fa1953 UD |
4235 | #if TRIM_FASTBINS |
4236 | /* | |
4237 | If TRIM_FASTBINS set, don't place chunks | |
4238 | bordering top into fastbins | |
4239 | */ | |
4240 | && (chunk_at_offset(p, size) != av->top) | |
4241 | #endif | |
4242 | ) { | |
fa8d436c | 4243 | |
893e6098 UD |
4244 | if (__builtin_expect (chunk_at_offset (p, size)->size <= 2 * SIZE_SZ, 0) |
4245 | || __builtin_expect (chunksize (chunk_at_offset (p, size)) | |
4246 | >= av->system_mem, 0)) | |
4247 | { | |
4248 | errstr = "invalid next size (fast)"; | |
4249 | goto errout; | |
4250 | } | |
4251 | ||
37fa1953 UD |
4252 | set_fastchunks(av); |
4253 | fb = &(av->fastbins[fastbin_index(size)]); | |
4254 | /* Another simple check: make sure the top of the bin is not the | |
4255 | record we are going to add (i.e., double free). */ | |
4256 | if (__builtin_expect (*fb == p, 0)) | |
4257 | { | |
4258 | errstr = "double free or corruption (fasttop)"; | |
4259 | goto errout; | |
fa8d436c | 4260 | } |
37fa1953 UD |
4261 | p->fd = *fb; |
4262 | *fb = p; | |
4263 | } | |
f65fd747 | 4264 | |
37fa1953 UD |
4265 | /* |
4266 | Consolidate other non-mmapped chunks as they arrive. | |
4267 | */ | |
fa8d436c | 4268 | |
37fa1953 UD |
4269 | else if (!chunk_is_mmapped(p)) { |
4270 | nextchunk = chunk_at_offset(p, size); | |
fa8d436c | 4271 | |
37fa1953 UD |
4272 | /* Lightweight tests: check whether the block is already the |
4273 | top block. */ | |
4274 | if (__builtin_expect (p == av->top, 0)) | |
4275 | { | |
4276 | errstr = "double free or corruption (top)"; | |
4277 | goto errout; | |
4278 | } | |
4279 | /* Or whether the next chunk is beyond the boundaries of the arena. */ | |
4280 | if (__builtin_expect (contiguous (av) | |
4281 | && (char *) nextchunk | |
4282 | >= ((char *) av->top + chunksize(av->top)), 0)) | |
4283 | { | |
4284 | errstr = "double free or corruption (out)"; | |
4285 | goto errout; | |
4286 | } | |
4287 | /* Or whether the block is actually not marked used. */ | |
4288 | if (__builtin_expect (!prev_inuse(nextchunk), 0)) | |
4289 | { | |
4290 | errstr = "double free or corruption (!prev)"; | |
4291 | goto errout; | |
4292 | } | |
fa8d436c | 4293 | |
37fa1953 | 4294 | nextsize = chunksize(nextchunk); |
893e6098 UD |
4295 | if (__builtin_expect (nextchunk->size <= 2 * SIZE_SZ, 0) |
4296 | || __builtin_expect (nextsize >= av->system_mem, 0)) | |
4297 | { | |
4298 | errstr = "invalid next size (normal)"; | |
4299 | goto errout; | |
4300 | } | |
fa8d436c | 4301 | |
37fa1953 UD |
4302 | /* consolidate backward */ |
4303 | if (!prev_inuse(p)) { | |
4304 | prevsize = p->prev_size; | |
4305 | size += prevsize; | |
4306 | p = chunk_at_offset(p, -((long) prevsize)); | |
4307 | unlink(p, bck, fwd); | |
4308 | } | |
a9177ff5 | 4309 | |
37fa1953 UD |
4310 | if (nextchunk != av->top) { |
4311 | /* get and clear inuse bit */ | |
4312 | nextinuse = inuse_bit_at_offset(nextchunk, nextsize); | |
4313 | ||
4314 | /* consolidate forward */ | |
4315 | if (!nextinuse) { | |
4316 | unlink(nextchunk, bck, fwd); | |
4317 | size += nextsize; | |
4318 | } else | |
4319 | clear_inuse_bit_at_offset(nextchunk, 0); | |
10dc2a90 | 4320 | |
fa8d436c | 4321 | /* |
37fa1953 UD |
4322 | Place the chunk in unsorted chunk list. Chunks are |
4323 | not placed into regular bins until after they have | |
4324 | been given one chance to be used in malloc. | |
fa8d436c | 4325 | */ |
f65fd747 | 4326 | |
37fa1953 UD |
4327 | bck = unsorted_chunks(av); |
4328 | fwd = bck->fd; | |
4329 | p->bk = bck; | |
4330 | p->fd = fwd; | |
4331 | bck->fd = p; | |
4332 | fwd->bk = p; | |
8a4b65b4 | 4333 | |
37fa1953 UD |
4334 | set_head(p, size | PREV_INUSE); |
4335 | set_foot(p, size); | |
4336 | ||
4337 | check_free_chunk(av, p); | |
4338 | } | |
4339 | ||
4340 | /* | |
4341 | If the chunk borders the current high end of memory, | |
4342 | consolidate into top | |
4343 | */ | |
4344 | ||
4345 | else { | |
4346 | size += nextsize; | |
4347 | set_head(p, size | PREV_INUSE); | |
4348 | av->top = p; | |
4349 | check_chunk(av, p); | |
4350 | } | |
4351 | ||
4352 | /* | |
4353 | If freeing a large space, consolidate possibly-surrounding | |
4354 | chunks. Then, if the total unused topmost memory exceeds trim | |
4355 | threshold, ask malloc_trim to reduce top. | |
4356 | ||
4357 | Unless max_fast is 0, we don't know if there are fastbins | |
4358 | bordering top, so we cannot tell for sure whether threshold | |
4359 | has been reached unless fastbins are consolidated. But we | |
4360 | don't want to consolidate on each free. As a compromise, | |
4361 | consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD | |
4362 | is reached. | |
4363 | */ | |
fa8d436c | 4364 | |
37fa1953 UD |
4365 | if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) { |
4366 | if (have_fastchunks(av)) | |
4367 | malloc_consolidate(av); | |
fa8d436c | 4368 | |
37fa1953 | 4369 | if (av == &main_arena) { |
a9177ff5 | 4370 | #ifndef MORECORE_CANNOT_TRIM |
37fa1953 UD |
4371 | if ((unsigned long)(chunksize(av->top)) >= |
4372 | (unsigned long)(mp_.trim_threshold)) | |
4373 | sYSTRIm(mp_.top_pad, av); | |
fa8d436c | 4374 | #endif |
37fa1953 UD |
4375 | } else { |
4376 | /* Always try heap_trim(), even if the top chunk is not | |
4377 | large, because the corresponding heap might go away. */ | |
4378 | heap_info *heap = heap_for_ptr(top(av)); | |
fa8d436c | 4379 | |
37fa1953 UD |
4380 | assert(heap->ar_ptr == av); |
4381 | heap_trim(heap, mp_.top_pad); | |
fa8d436c | 4382 | } |
fa8d436c | 4383 | } |
10dc2a90 | 4384 | |
37fa1953 UD |
4385 | } |
4386 | /* | |
4387 | If the chunk was allocated via mmap, release via munmap(). Note | |
4388 | that if HAVE_MMAP is false but chunk_is_mmapped is true, then | |
4389 | user must have overwritten memory. There's nothing we can do to | |
4390 | catch this error unless MALLOC_DEBUG is set, in which case | |
4391 | check_inuse_chunk (above) will have triggered error. | |
4392 | */ | |
4393 | ||
4394 | else { | |
fa8d436c | 4395 | #if HAVE_MMAP |
c120d94d | 4396 | munmap_chunk (p); |
fa8d436c | 4397 | #endif |
fa8d436c | 4398 | } |
10dc2a90 UD |
4399 | } |
4400 | ||
fa8d436c UD |
4401 | /* |
4402 | ------------------------- malloc_consolidate ------------------------- | |
4403 | ||
4404 | malloc_consolidate is a specialized version of free() that tears | |
4405 | down chunks held in fastbins. Free itself cannot be used for this | |
4406 | purpose since, among other things, it might place chunks back onto | |
4407 | fastbins. So, instead, we need to use a minor variant of the same | |
4408 | code. | |
a9177ff5 | 4409 | |
fa8d436c UD |
4410 | Also, because this routine needs to be called the first time through |
4411 | malloc anyway, it turns out to be the perfect place to trigger | |
4412 | initialization code. | |
4413 | */ | |
4414 | ||
10dc2a90 | 4415 | #if __STD_C |
fa8d436c | 4416 | static void malloc_consolidate(mstate av) |
10dc2a90 | 4417 | #else |
fa8d436c | 4418 | static void malloc_consolidate(av) mstate av; |
10dc2a90 UD |
4419 | #endif |
4420 | { | |
fa8d436c UD |
4421 | mfastbinptr* fb; /* current fastbin being consolidated */ |
4422 | mfastbinptr* maxfb; /* last fastbin (for loop control) */ | |
4423 | mchunkptr p; /* current chunk being consolidated */ | |
4424 | mchunkptr nextp; /* next chunk to consolidate */ | |
4425 | mchunkptr unsorted_bin; /* bin header */ | |
4426 | mchunkptr first_unsorted; /* chunk to link to */ | |
4427 | ||
4428 | /* These have same use as in free() */ | |
4429 | mchunkptr nextchunk; | |
4430 | INTERNAL_SIZE_T size; | |
4431 | INTERNAL_SIZE_T nextsize; | |
4432 | INTERNAL_SIZE_T prevsize; | |
4433 | int nextinuse; | |
4434 | mchunkptr bck; | |
4435 | mchunkptr fwd; | |
10dc2a90 | 4436 | |
fa8d436c UD |
4437 | /* |
4438 | If max_fast is 0, we know that av hasn't | |
4439 | yet been initialized, in which case do so below | |
4440 | */ | |
10dc2a90 | 4441 | |
fa8d436c UD |
4442 | if (av->max_fast != 0) { |
4443 | clear_fastchunks(av); | |
10dc2a90 | 4444 | |
fa8d436c | 4445 | unsorted_bin = unsorted_chunks(av); |
10dc2a90 | 4446 | |
fa8d436c UD |
4447 | /* |
4448 | Remove each chunk from fast bin and consolidate it, placing it | |
4449 | then in unsorted bin. Among other reasons for doing this, | |
4450 | placing in unsorted bin avoids needing to calculate actual bins | |
4451 | until malloc is sure that chunks aren't immediately going to be | |
4452 | reused anyway. | |
4453 | */ | |
a9177ff5 | 4454 | |
fa8d436c UD |
4455 | maxfb = &(av->fastbins[fastbin_index(av->max_fast)]); |
4456 | fb = &(av->fastbins[0]); | |
4457 | do { | |
4458 | if ( (p = *fb) != 0) { | |
4459 | *fb = 0; | |
a9177ff5 | 4460 | |
fa8d436c UD |
4461 | do { |
4462 | check_inuse_chunk(av, p); | |
4463 | nextp = p->fd; | |
a9177ff5 | 4464 | |
fa8d436c UD |
4465 | /* Slightly streamlined version of consolidation code in free() */ |
4466 | size = p->size & ~(PREV_INUSE|NON_MAIN_ARENA); | |
4467 | nextchunk = chunk_at_offset(p, size); | |
4468 | nextsize = chunksize(nextchunk); | |
a9177ff5 | 4469 | |
fa8d436c UD |
4470 | if (!prev_inuse(p)) { |
4471 | prevsize = p->prev_size; | |
4472 | size += prevsize; | |
4473 | p = chunk_at_offset(p, -((long) prevsize)); | |
4474 | unlink(p, bck, fwd); | |
4475 | } | |
a9177ff5 | 4476 | |
fa8d436c UD |
4477 | if (nextchunk != av->top) { |
4478 | nextinuse = inuse_bit_at_offset(nextchunk, nextsize); | |
a9177ff5 | 4479 | |
fa8d436c UD |
4480 | if (!nextinuse) { |
4481 | size += nextsize; | |
4482 | unlink(nextchunk, bck, fwd); | |
4483 | } else | |
4484 | clear_inuse_bit_at_offset(nextchunk, 0); | |
a9177ff5 | 4485 | |
fa8d436c UD |
4486 | first_unsorted = unsorted_bin->fd; |
4487 | unsorted_bin->fd = p; | |
4488 | first_unsorted->bk = p; | |
a9177ff5 | 4489 | |
fa8d436c UD |
4490 | set_head(p, size | PREV_INUSE); |
4491 | p->bk = unsorted_bin; | |
4492 | p->fd = first_unsorted; | |
4493 | set_foot(p, size); | |
4494 | } | |
a9177ff5 | 4495 | |
fa8d436c UD |
4496 | else { |
4497 | size += nextsize; | |
4498 | set_head(p, size | PREV_INUSE); | |
4499 | av->top = p; | |
4500 | } | |
a9177ff5 | 4501 | |
fa8d436c | 4502 | } while ( (p = nextp) != 0); |
a9177ff5 | 4503 | |
fa8d436c UD |
4504 | } |
4505 | } while (fb++ != maxfb); | |
4506 | } | |
4507 | else { | |
4508 | malloc_init_state(av); | |
4509 | check_malloc_state(av); | |
4510 | } | |
4511 | } | |
10dc2a90 | 4512 | |
fa8d436c UD |
4513 | /* |
4514 | ------------------------------ realloc ------------------------------ | |
4515 | */ | |
f65fd747 | 4516 | |
f1c5213d | 4517 | Void_t* |
fa8d436c UD |
4518 | _int_realloc(mstate av, Void_t* oldmem, size_t bytes) |
4519 | { | |
4520 | INTERNAL_SIZE_T nb; /* padded request size */ | |
f65fd747 | 4521 | |
fa8d436c UD |
4522 | mchunkptr oldp; /* chunk corresponding to oldmem */ |
4523 | INTERNAL_SIZE_T oldsize; /* its size */ | |
f65fd747 | 4524 | |
fa8d436c UD |
4525 | mchunkptr newp; /* chunk to return */ |
4526 | INTERNAL_SIZE_T newsize; /* its size */ | |
4527 | Void_t* newmem; /* corresponding user mem */ | |
f65fd747 | 4528 | |
fa8d436c | 4529 | mchunkptr next; /* next contiguous chunk after oldp */ |
f65fd747 | 4530 | |
fa8d436c UD |
4531 | mchunkptr remainder; /* extra space at end of newp */ |
4532 | unsigned long remainder_size; /* its size */ | |
f65fd747 | 4533 | |
fa8d436c UD |
4534 | mchunkptr bck; /* misc temp for linking */ |
4535 | mchunkptr fwd; /* misc temp for linking */ | |
2ed5fd9a | 4536 | |
fa8d436c UD |
4537 | unsigned long copysize; /* bytes to copy */ |
4538 | unsigned int ncopies; /* INTERNAL_SIZE_T words to copy */ | |
a9177ff5 | 4539 | INTERNAL_SIZE_T* s; /* copy source */ |
fa8d436c | 4540 | INTERNAL_SIZE_T* d; /* copy destination */ |
f65fd747 | 4541 | |
f65fd747 | 4542 | |
fa8d436c UD |
4543 | #if REALLOC_ZERO_BYTES_FREES |
4544 | if (bytes == 0) { | |
37fa1953 UD |
4545 | if (oldmem != 0) |
4546 | _int_free(av, oldmem); | |
fa8d436c UD |
4547 | return 0; |
4548 | } | |
4549 | #endif | |
f65fd747 | 4550 | |
fa8d436c UD |
4551 | /* realloc of null is supposed to be same as malloc */ |
4552 | if (oldmem == 0) return _int_malloc(av, bytes); | |
f65fd747 | 4553 | |
fa8d436c | 4554 | checked_request2size(bytes, nb); |
f65fd747 | 4555 | |
fa8d436c UD |
4556 | oldp = mem2chunk(oldmem); |
4557 | oldsize = chunksize(oldp); | |
f65fd747 | 4558 | |
fa8d436c | 4559 | check_inuse_chunk(av, oldp); |
f65fd747 | 4560 | |
fa8d436c | 4561 | if (!chunk_is_mmapped(oldp)) { |
f65fd747 | 4562 | |
fa8d436c UD |
4563 | if ((unsigned long)(oldsize) >= (unsigned long)(nb)) { |
4564 | /* already big enough; split below */ | |
4565 | newp = oldp; | |
4566 | newsize = oldsize; | |
7799b7b3 | 4567 | } |
f65fd747 | 4568 | |
fa8d436c UD |
4569 | else { |
4570 | next = chunk_at_offset(oldp, oldsize); | |
4571 | ||
4572 | /* Try to expand forward into top */ | |
4573 | if (next == av->top && | |
4574 | (unsigned long)(newsize = oldsize + chunksize(next)) >= | |
4575 | (unsigned long)(nb + MINSIZE)) { | |
4576 | set_head_size(oldp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0)); | |
4577 | av->top = chunk_at_offset(oldp, nb); | |
4578 | set_head(av->top, (newsize - nb) | PREV_INUSE); | |
4579 | check_inuse_chunk(av, oldp); | |
4580 | return chunk2mem(oldp); | |
4581 | } | |
a9177ff5 | 4582 | |
fa8d436c | 4583 | /* Try to expand forward into next chunk; split off remainder below */ |
a9177ff5 | 4584 | else if (next != av->top && |
fa8d436c UD |
4585 | !inuse(next) && |
4586 | (unsigned long)(newsize = oldsize + chunksize(next)) >= | |
4587 | (unsigned long)(nb)) { | |
4588 | newp = oldp; | |
4589 | unlink(next, bck, fwd); | |
4590 | } | |
f65fd747 | 4591 | |
fa8d436c UD |
4592 | /* allocate, copy, free */ |
4593 | else { | |
4594 | newmem = _int_malloc(av, nb - MALLOC_ALIGN_MASK); | |
4595 | if (newmem == 0) | |
4596 | return 0; /* propagate failure */ | |
a9177ff5 | 4597 | |
fa8d436c UD |
4598 | newp = mem2chunk(newmem); |
4599 | newsize = chunksize(newp); | |
a9177ff5 | 4600 | |
fa8d436c UD |
4601 | /* |
4602 | Avoid copy if newp is next chunk after oldp. | |
4603 | */ | |
4604 | if (newp == next) { | |
4605 | newsize += oldsize; | |
4606 | newp = oldp; | |
4607 | } | |
4608 | else { | |
4609 | /* | |
4610 | Unroll copy of <= 36 bytes (72 if 8byte sizes) | |
4611 | We know that contents have an odd number of | |
4612 | INTERNAL_SIZE_T-sized words; minimally 3. | |
4613 | */ | |
a9177ff5 | 4614 | |
fa8d436c UD |
4615 | copysize = oldsize - SIZE_SZ; |
4616 | s = (INTERNAL_SIZE_T*)(oldmem); | |
4617 | d = (INTERNAL_SIZE_T*)(newmem); | |
4618 | ncopies = copysize / sizeof(INTERNAL_SIZE_T); | |
4619 | assert(ncopies >= 3); | |
a9177ff5 | 4620 | |
fa8d436c UD |
4621 | if (ncopies > 9) |
4622 | MALLOC_COPY(d, s, copysize); | |
a9177ff5 | 4623 | |
fa8d436c UD |
4624 | else { |
4625 | *(d+0) = *(s+0); | |
4626 | *(d+1) = *(s+1); | |
4627 | *(d+2) = *(s+2); | |
4628 | if (ncopies > 4) { | |
4629 | *(d+3) = *(s+3); | |
4630 | *(d+4) = *(s+4); | |
4631 | if (ncopies > 6) { | |
4632 | *(d+5) = *(s+5); | |
4633 | *(d+6) = *(s+6); | |
4634 | if (ncopies > 8) { | |
4635 | *(d+7) = *(s+7); | |
4636 | *(d+8) = *(s+8); | |
4637 | } | |
4638 | } | |
4639 | } | |
4640 | } | |
a9177ff5 | 4641 | |
fa8d436c UD |
4642 | _int_free(av, oldmem); |
4643 | check_inuse_chunk(av, newp); | |
4644 | return chunk2mem(newp); | |
4645 | } | |
4646 | } | |
f65fd747 UD |
4647 | } |
4648 | ||
fa8d436c | 4649 | /* If possible, free extra space in old or extended chunk */ |
f65fd747 | 4650 | |
fa8d436c | 4651 | assert((unsigned long)(newsize) >= (unsigned long)(nb)); |
f65fd747 | 4652 | |
f65fd747 | 4653 | remainder_size = newsize - nb; |
f65fd747 | 4654 | |
fa8d436c UD |
4655 | if (remainder_size < MINSIZE) { /* not enough extra to split off */ |
4656 | set_head_size(newp, newsize | (av != &main_arena ? NON_MAIN_ARENA : 0)); | |
4657 | set_inuse_bit_at_offset(newp, newsize); | |
4658 | } | |
4659 | else { /* split remainder */ | |
4660 | remainder = chunk_at_offset(newp, nb); | |
4661 | set_head_size(newp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0)); | |
4662 | set_head(remainder, remainder_size | PREV_INUSE | | |
4663 | (av != &main_arena ? NON_MAIN_ARENA : 0)); | |
4664 | /* Mark remainder as inuse so free() won't complain */ | |
4665 | set_inuse_bit_at_offset(remainder, remainder_size); | |
a9177ff5 | 4666 | _int_free(av, chunk2mem(remainder)); |
fa8d436c | 4667 | } |
f65fd747 | 4668 | |
fa8d436c UD |
4669 | check_inuse_chunk(av, newp); |
4670 | return chunk2mem(newp); | |
4671 | } | |
f65fd747 | 4672 | |
fa8d436c UD |
4673 | /* |
4674 | Handle mmap cases | |
4675 | */ | |
f65fd747 | 4676 | |
fa8d436c UD |
4677 | else { |
4678 | #if HAVE_MMAP | |
f65fd747 | 4679 | |
fa8d436c UD |
4680 | #if HAVE_MREMAP |
4681 | INTERNAL_SIZE_T offset = oldp->prev_size; | |
4682 | size_t pagemask = mp_.pagesize - 1; | |
4683 | char *cp; | |
4684 | unsigned long sum; | |
a9177ff5 | 4685 | |
fa8d436c UD |
4686 | /* Note the extra SIZE_SZ overhead */ |
4687 | newsize = (nb + offset + SIZE_SZ + pagemask) & ~pagemask; | |
4688 | ||
4689 | /* don't need to remap if still within same page */ | |
a9177ff5 | 4690 | if (oldsize == newsize - offset) |
fa8d436c UD |
4691 | return oldmem; |
4692 | ||
4693 | cp = (char*)mremap((char*)oldp - offset, oldsize + offset, newsize, 1); | |
a9177ff5 | 4694 | |
fa8d436c UD |
4695 | if (cp != MAP_FAILED) { |
4696 | ||
4697 | newp = (mchunkptr)(cp + offset); | |
4698 | set_head(newp, (newsize - offset)|IS_MMAPPED); | |
a9177ff5 | 4699 | |
fa8d436c UD |
4700 | assert(aligned_OK(chunk2mem(newp))); |
4701 | assert((newp->prev_size == offset)); | |
a9177ff5 | 4702 | |
fa8d436c UD |
4703 | /* update statistics */ |
4704 | sum = mp_.mmapped_mem += newsize - oldsize; | |
a9177ff5 | 4705 | if (sum > (unsigned long)(mp_.max_mmapped_mem)) |
fa8d436c UD |
4706 | mp_.max_mmapped_mem = sum; |
4707 | #ifdef NO_THREADS | |
4708 | sum += main_arena.system_mem; | |
a9177ff5 | 4709 | if (sum > (unsigned long)(mp_.max_total_mem)) |
fa8d436c UD |
4710 | mp_.max_total_mem = sum; |
4711 | #endif | |
a9177ff5 | 4712 | |
fa8d436c UD |
4713 | return chunk2mem(newp); |
4714 | } | |
f65fd747 | 4715 | #endif |
10dc2a90 | 4716 | |
fa8d436c | 4717 | /* Note the extra SIZE_SZ overhead. */ |
a9177ff5 | 4718 | if ((unsigned long)(oldsize) >= (unsigned long)(nb + SIZE_SZ)) |
fa8d436c UD |
4719 | newmem = oldmem; /* do nothing */ |
4720 | else { | |
4721 | /* Must alloc, copy, free. */ | |
4722 | newmem = _int_malloc(av, nb - MALLOC_ALIGN_MASK); | |
4723 | if (newmem != 0) { | |
4724 | MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ); | |
4725 | _int_free(av, oldmem); | |
4726 | } | |
4727 | } | |
4728 | return newmem; | |
10dc2a90 | 4729 | |
a9177ff5 | 4730 | #else |
fa8d436c UD |
4731 | /* If !HAVE_MMAP, but chunk_is_mmapped, user must have overwritten mem */ |
4732 | check_malloc_state(av); | |
4733 | MALLOC_FAILURE_ACTION; | |
4734 | return 0; | |
a2b08ee5 | 4735 | #endif |
10dc2a90 | 4736 | } |
fa8d436c UD |
4737 | } |
4738 | ||
4739 | /* | |
4740 | ------------------------------ memalign ------------------------------ | |
4741 | */ | |
4742 | ||
f1c5213d | 4743 | Void_t* |
fa8d436c UD |
4744 | _int_memalign(mstate av, size_t alignment, size_t bytes) |
4745 | { | |
4746 | INTERNAL_SIZE_T nb; /* padded request size */ | |
4747 | char* m; /* memory returned by malloc call */ | |
4748 | mchunkptr p; /* corresponding chunk */ | |
4749 | char* brk; /* alignment point within p */ | |
4750 | mchunkptr newp; /* chunk to return */ | |
4751 | INTERNAL_SIZE_T newsize; /* its size */ | |
4752 | INTERNAL_SIZE_T leadsize; /* leading space before alignment point */ | |
4753 | mchunkptr remainder; /* spare room at end to split off */ | |
4754 | unsigned long remainder_size; /* its size */ | |
4755 | INTERNAL_SIZE_T size; | |
f65fd747 UD |
4756 | |
4757 | /* If need less alignment than we give anyway, just relay to malloc */ | |
4758 | ||
fa8d436c | 4759 | if (alignment <= MALLOC_ALIGNMENT) return _int_malloc(av, bytes); |
f65fd747 UD |
4760 | |
4761 | /* Otherwise, ensure that it is at least a minimum chunk size */ | |
4762 | ||
4763 | if (alignment < MINSIZE) alignment = MINSIZE; | |
4764 | ||
fa8d436c UD |
4765 | /* Make sure alignment is power of 2 (in case MINSIZE is not). */ |
4766 | if ((alignment & (alignment - 1)) != 0) { | |
4767 | size_t a = MALLOC_ALIGNMENT * 2; | |
4768 | while ((unsigned long)a < (unsigned long)alignment) a <<= 1; | |
4769 | alignment = a; | |
7799b7b3 | 4770 | } |
f65fd747 | 4771 | |
fa8d436c UD |
4772 | checked_request2size(bytes, nb); |
4773 | ||
4774 | /* | |
4775 | Strategy: find a spot within that chunk that meets the alignment | |
4776 | request, and then possibly free the leading and trailing space. | |
4777 | */ | |
4778 | ||
4779 | ||
4780 | /* Call malloc with worst case padding to hit alignment. */ | |
4781 | ||
4782 | m = (char*)(_int_malloc(av, nb + alignment + MINSIZE)); | |
4783 | ||
4784 | if (m == 0) return 0; /* propagate failure */ | |
4785 | ||
4786 | p = mem2chunk(m); | |
4787 | ||
4788 | if ((((unsigned long)(m)) % alignment) != 0) { /* misaligned */ | |
4789 | ||
f65fd747 | 4790 | /* |
fa8d436c UD |
4791 | Find an aligned spot inside chunk. Since we need to give back |
4792 | leading space in a chunk of at least MINSIZE, if the first | |
4793 | calculation places us at a spot with less than MINSIZE leader, | |
4794 | we can move to the next aligned spot -- we've allocated enough | |
4795 | total room so that this is always possible. | |
f65fd747 UD |
4796 | */ |
4797 | ||
fa8d436c UD |
4798 | brk = (char*)mem2chunk(((unsigned long)(m + alignment - 1)) & |
4799 | -((signed long) alignment)); | |
4800 | if ((unsigned long)(brk - (char*)(p)) < MINSIZE) | |
4801 | brk += alignment; | |
f65fd747 | 4802 | |
fa8d436c | 4803 | newp = (mchunkptr)brk; |
f65fd747 UD |
4804 | leadsize = brk - (char*)(p); |
4805 | newsize = chunksize(p) - leadsize; | |
4806 | ||
fa8d436c UD |
4807 | /* For mmapped chunks, just adjust offset */ |
4808 | if (chunk_is_mmapped(p)) { | |
f65fd747 UD |
4809 | newp->prev_size = p->prev_size + leadsize; |
4810 | set_head(newp, newsize|IS_MMAPPED); | |
fa8d436c | 4811 | return chunk2mem(newp); |
f65fd747 | 4812 | } |
f65fd747 | 4813 | |
fa8d436c UD |
4814 | /* Otherwise, give back leader, use the rest */ |
4815 | set_head(newp, newsize | PREV_INUSE | | |
4816 | (av != &main_arena ? NON_MAIN_ARENA : 0)); | |
f65fd747 | 4817 | set_inuse_bit_at_offset(newp, newsize); |
fa8d436c UD |
4818 | set_head_size(p, leadsize | (av != &main_arena ? NON_MAIN_ARENA : 0)); |
4819 | _int_free(av, chunk2mem(p)); | |
f65fd747 UD |
4820 | p = newp; |
4821 | ||
fa8d436c UD |
4822 | assert (newsize >= nb && |
4823 | (((unsigned long)(chunk2mem(p))) % alignment) == 0); | |
f65fd747 UD |
4824 | } |
4825 | ||
4826 | /* Also give back spare room at the end */ | |
fa8d436c UD |
4827 | if (!chunk_is_mmapped(p)) { |
4828 | size = chunksize(p); | |
4829 | if ((unsigned long)(size) > (unsigned long)(nb + MINSIZE)) { | |
4830 | remainder_size = size - nb; | |
4831 | remainder = chunk_at_offset(p, nb); | |
4832 | set_head(remainder, remainder_size | PREV_INUSE | | |
4833 | (av != &main_arena ? NON_MAIN_ARENA : 0)); | |
4834 | set_head_size(p, nb); | |
4835 | _int_free(av, chunk2mem(remainder)); | |
4836 | } | |
f65fd747 UD |
4837 | } |
4838 | ||
fa8d436c UD |
4839 | check_inuse_chunk(av, p); |
4840 | return chunk2mem(p); | |
f65fd747 UD |
4841 | } |
4842 | ||
fa8d436c UD |
4843 | #if 0 |
4844 | /* | |
4845 | ------------------------------ calloc ------------------------------ | |
4846 | */ | |
4847 | ||
4848 | #if __STD_C | |
4849 | Void_t* cALLOc(size_t n_elements, size_t elem_size) | |
4850 | #else | |
4851 | Void_t* cALLOc(n_elements, elem_size) size_t n_elements; size_t elem_size; | |
4852 | #endif | |
4853 | { | |
4854 | mchunkptr p; | |
4855 | unsigned long clearsize; | |
4856 | unsigned long nclears; | |
4857 | INTERNAL_SIZE_T* d; | |
4858 | ||
4859 | Void_t* mem = mALLOc(n_elements * elem_size); | |
4860 | ||
4861 | if (mem != 0) { | |
4862 | p = mem2chunk(mem); | |
4863 | ||
4864 | #if MMAP_CLEARS | |
4865 | if (!chunk_is_mmapped(p)) /* don't need to clear mmapped space */ | |
4866 | #endif | |
a9177ff5 | 4867 | { |
fa8d436c UD |
4868 | /* |
4869 | Unroll clear of <= 36 bytes (72 if 8byte sizes) | |
4870 | We know that contents have an odd number of | |
4871 | INTERNAL_SIZE_T-sized words; minimally 3. | |
4872 | */ | |
4873 | ||
4874 | d = (INTERNAL_SIZE_T*)mem; | |
4875 | clearsize = chunksize(p) - SIZE_SZ; | |
4876 | nclears = clearsize / sizeof(INTERNAL_SIZE_T); | |
4877 | assert(nclears >= 3); | |
f65fd747 | 4878 | |
fa8d436c UD |
4879 | if (nclears > 9) |
4880 | MALLOC_ZERO(d, clearsize); | |
4881 | ||
4882 | else { | |
4883 | *(d+0) = 0; | |
4884 | *(d+1) = 0; | |
4885 | *(d+2) = 0; | |
4886 | if (nclears > 4) { | |
4887 | *(d+3) = 0; | |
4888 | *(d+4) = 0; | |
4889 | if (nclears > 6) { | |
4890 | *(d+5) = 0; | |
4891 | *(d+6) = 0; | |
4892 | if (nclears > 8) { | |
4893 | *(d+7) = 0; | |
4894 | *(d+8) = 0; | |
4895 | } | |
4896 | } | |
4897 | } | |
4898 | } | |
4899 | } | |
4900 | } | |
4901 | return mem; | |
4902 | } | |
4903 | #endif /* 0 */ | |
f65fd747 UD |
4904 | |
4905 | /* | |
fa8d436c | 4906 | ------------------------- independent_calloc ------------------------- |
f65fd747 UD |
4907 | */ |
4908 | ||
f1c5213d | 4909 | Void_t** |
f65fd747 | 4910 | #if __STD_C |
fa8d436c | 4911 | _int_icalloc(mstate av, size_t n_elements, size_t elem_size, Void_t* chunks[]) |
f65fd747 | 4912 | #else |
fa8d436c UD |
4913 | _int_icalloc(av, n_elements, elem_size, chunks) |
4914 | mstate av; size_t n_elements; size_t elem_size; Void_t* chunks[]; | |
f65fd747 UD |
4915 | #endif |
4916 | { | |
fa8d436c UD |
4917 | size_t sz = elem_size; /* serves as 1-element array */ |
4918 | /* opts arg of 3 means all elements are same size, and should be cleared */ | |
4919 | return iALLOc(av, n_elements, &sz, 3, chunks); | |
f65fd747 UD |
4920 | } |
4921 | ||
4922 | /* | |
fa8d436c | 4923 | ------------------------- independent_comalloc ------------------------- |
f65fd747 UD |
4924 | */ |
4925 | ||
f1c5213d | 4926 | Void_t** |
f65fd747 | 4927 | #if __STD_C |
fa8d436c | 4928 | _int_icomalloc(mstate av, size_t n_elements, size_t sizes[], Void_t* chunks[]) |
f65fd747 | 4929 | #else |
fa8d436c UD |
4930 | _int_icomalloc(av, n_elements, sizes, chunks) |
4931 | mstate av; size_t n_elements; size_t sizes[]; Void_t* chunks[]; | |
f65fd747 UD |
4932 | #endif |
4933 | { | |
fa8d436c | 4934 | return iALLOc(av, n_elements, sizes, 0, chunks); |
f65fd747 UD |
4935 | } |
4936 | ||
f65fd747 | 4937 | |
fa8d436c UD |
4938 | /* |
4939 | ------------------------------ ialloc ------------------------------ | |
4940 | ialloc provides common support for independent_X routines, handling all of | |
4941 | the combinations that can result. | |
f65fd747 | 4942 | |
fa8d436c UD |
4943 | The opts arg has: |
4944 | bit 0 set if all elements are same size (using sizes[0]) | |
4945 | bit 1 set if elements should be zeroed | |
f65fd747 UD |
4946 | */ |
4947 | ||
fa8d436c UD |
4948 | |
4949 | static Void_t** | |
f65fd747 | 4950 | #if __STD_C |
fa8d436c | 4951 | iALLOc(mstate av, size_t n_elements, size_t* sizes, int opts, Void_t* chunks[]) |
f65fd747 | 4952 | #else |
fa8d436c UD |
4953 | iALLOc(av, n_elements, sizes, opts, chunks) |
4954 | mstate av; size_t n_elements; size_t* sizes; int opts; Void_t* chunks[]; | |
f65fd747 UD |
4955 | #endif |
4956 | { | |
fa8d436c UD |
4957 | INTERNAL_SIZE_T element_size; /* chunksize of each element, if all same */ |
4958 | INTERNAL_SIZE_T contents_size; /* total size of elements */ | |
4959 | INTERNAL_SIZE_T array_size; /* request size of pointer array */ | |
4960 | Void_t* mem; /* malloced aggregate space */ | |
4961 | mchunkptr p; /* corresponding chunk */ | |
4962 | INTERNAL_SIZE_T remainder_size; /* remaining bytes while splitting */ | |
4963 | Void_t** marray; /* either "chunks" or malloced ptr array */ | |
4964 | mchunkptr array_chunk; /* chunk for malloced ptr array */ | |
4965 | int mmx; /* to disable mmap */ | |
a9177ff5 | 4966 | INTERNAL_SIZE_T size; |
fa8d436c UD |
4967 | INTERNAL_SIZE_T size_flags; |
4968 | size_t i; | |
4969 | ||
4970 | /* Ensure initialization/consolidation */ | |
4971 | if (have_fastchunks(av)) malloc_consolidate(av); | |
4972 | ||
4973 | /* compute array length, if needed */ | |
4974 | if (chunks != 0) { | |
4975 | if (n_elements == 0) | |
4976 | return chunks; /* nothing to do */ | |
4977 | marray = chunks; | |
4978 | array_size = 0; | |
4979 | } | |
4980 | else { | |
4981 | /* if empty req, must still return chunk representing empty array */ | |
a9177ff5 | 4982 | if (n_elements == 0) |
fa8d436c UD |
4983 | return (Void_t**) _int_malloc(av, 0); |
4984 | marray = 0; | |
4985 | array_size = request2size(n_elements * (sizeof(Void_t*))); | |
4986 | } | |
f65fd747 | 4987 | |
fa8d436c UD |
4988 | /* compute total element size */ |
4989 | if (opts & 0x1) { /* all-same-size */ | |
4990 | element_size = request2size(*sizes); | |
4991 | contents_size = n_elements * element_size; | |
4992 | } | |
4993 | else { /* add up all the sizes */ | |
4994 | element_size = 0; | |
4995 | contents_size = 0; | |
a9177ff5 RM |
4996 | for (i = 0; i != n_elements; ++i) |
4997 | contents_size += request2size(sizes[i]); | |
10dc2a90 | 4998 | } |
f65fd747 | 4999 | |
fa8d436c UD |
5000 | /* subtract out alignment bytes from total to minimize overallocation */ |
5001 | size = contents_size + array_size - MALLOC_ALIGN_MASK; | |
a9177ff5 RM |
5002 | |
5003 | /* | |
fa8d436c UD |
5004 | Allocate the aggregate chunk. |
5005 | But first disable mmap so malloc won't use it, since | |
5006 | we would not be able to later free/realloc space internal | |
5007 | to a segregated mmap region. | |
5008 | */ | |
5009 | mmx = mp_.n_mmaps_max; /* disable mmap */ | |
5010 | mp_.n_mmaps_max = 0; | |
5011 | mem = _int_malloc(av, size); | |
5012 | mp_.n_mmaps_max = mmx; /* reset mmap */ | |
a9177ff5 | 5013 | if (mem == 0) |
f65fd747 UD |
5014 | return 0; |
5015 | ||
fa8d436c | 5016 | p = mem2chunk(mem); |
a9177ff5 | 5017 | assert(!chunk_is_mmapped(p)); |
fa8d436c | 5018 | remainder_size = chunksize(p); |
f65fd747 | 5019 | |
fa8d436c UD |
5020 | if (opts & 0x2) { /* optionally clear the elements */ |
5021 | MALLOC_ZERO(mem, remainder_size - SIZE_SZ - array_size); | |
7799b7b3 | 5022 | } |
f65fd747 | 5023 | |
fa8d436c | 5024 | size_flags = PREV_INUSE | (av != &main_arena ? NON_MAIN_ARENA : 0); |
f65fd747 | 5025 | |
fa8d436c UD |
5026 | /* If not provided, allocate the pointer array as final part of chunk */ |
5027 | if (marray == 0) { | |
5028 | array_chunk = chunk_at_offset(p, contents_size); | |
5029 | marray = (Void_t**) (chunk2mem(array_chunk)); | |
5030 | set_head(array_chunk, (remainder_size - contents_size) | size_flags); | |
5031 | remainder_size = contents_size; | |
5032 | } | |
f65fd747 | 5033 | |
fa8d436c UD |
5034 | /* split out elements */ |
5035 | for (i = 0; ; ++i) { | |
5036 | marray[i] = chunk2mem(p); | |
5037 | if (i != n_elements-1) { | |
a9177ff5 | 5038 | if (element_size != 0) |
fa8d436c UD |
5039 | size = element_size; |
5040 | else | |
a9177ff5 | 5041 | size = request2size(sizes[i]); |
fa8d436c UD |
5042 | remainder_size -= size; |
5043 | set_head(p, size | size_flags); | |
5044 | p = chunk_at_offset(p, size); | |
5045 | } | |
5046 | else { /* the final element absorbs any overallocation slop */ | |
5047 | set_head(p, remainder_size | size_flags); | |
5048 | break; | |
5049 | } | |
5050 | } | |
f65fd747 | 5051 | |
fa8d436c UD |
5052 | #if MALLOC_DEBUG |
5053 | if (marray != chunks) { | |
5054 | /* final element must have exactly exhausted chunk */ | |
a9177ff5 | 5055 | if (element_size != 0) |
fa8d436c UD |
5056 | assert(remainder_size == element_size); |
5057 | else | |
5058 | assert(remainder_size == request2size(sizes[i])); | |
5059 | check_inuse_chunk(av, mem2chunk(marray)); | |
7799b7b3 | 5060 | } |
fa8d436c UD |
5061 | |
5062 | for (i = 0; i != n_elements; ++i) | |
5063 | check_inuse_chunk(av, mem2chunk(marray[i])); | |
f65fd747 UD |
5064 | #endif |
5065 | ||
fa8d436c | 5066 | return marray; |
f65fd747 UD |
5067 | } |
5068 | ||
f65fd747 | 5069 | |
fa8d436c UD |
5070 | /* |
5071 | ------------------------------ valloc ------------------------------ | |
f65fd747 UD |
5072 | */ |
5073 | ||
f1c5213d | 5074 | Void_t* |
f65fd747 | 5075 | #if __STD_C |
fa8d436c | 5076 | _int_valloc(mstate av, size_t bytes) |
f65fd747 | 5077 | #else |
fa8d436c | 5078 | _int_valloc(av, bytes) mstate av; size_t bytes; |
f65fd747 UD |
5079 | #endif |
5080 | { | |
fa8d436c UD |
5081 | /* Ensure initialization/consolidation */ |
5082 | if (have_fastchunks(av)) malloc_consolidate(av); | |
5083 | return _int_memalign(av, mp_.pagesize, bytes); | |
f65fd747 | 5084 | } |
f65fd747 UD |
5085 | |
5086 | /* | |
fa8d436c | 5087 | ------------------------------ pvalloc ------------------------------ |
f65fd747 UD |
5088 | */ |
5089 | ||
fa8d436c | 5090 | |
f1c5213d | 5091 | Void_t* |
f65fd747 | 5092 | #if __STD_C |
fa8d436c | 5093 | _int_pvalloc(mstate av, size_t bytes) |
f65fd747 | 5094 | #else |
fa8d436c | 5095 | _int_pvalloc(av, bytes) mstate av, size_t bytes; |
f65fd747 UD |
5096 | #endif |
5097 | { | |
fa8d436c | 5098 | size_t pagesz; |
f65fd747 | 5099 | |
fa8d436c UD |
5100 | /* Ensure initialization/consolidation */ |
5101 | if (have_fastchunks(av)) malloc_consolidate(av); | |
5102 | pagesz = mp_.pagesize; | |
5103 | return _int_memalign(av, pagesz, (bytes + pagesz - 1) & ~(pagesz - 1)); | |
f65fd747 | 5104 | } |
a9177ff5 | 5105 | |
f65fd747 | 5106 | |
fa8d436c UD |
5107 | /* |
5108 | ------------------------------ malloc_trim ------------------------------ | |
5109 | */ | |
8a4b65b4 | 5110 | |
f65fd747 | 5111 | #if __STD_C |
fa8d436c | 5112 | int mTRIm(size_t pad) |
f65fd747 | 5113 | #else |
fa8d436c | 5114 | int mTRIm(pad) size_t pad; |
f65fd747 UD |
5115 | #endif |
5116 | { | |
fa8d436c | 5117 | mstate av = &main_arena; /* already locked */ |
f65fd747 | 5118 | |
fa8d436c UD |
5119 | /* Ensure initialization/consolidation */ |
5120 | malloc_consolidate(av); | |
8a4b65b4 | 5121 | |
a9177ff5 | 5122 | #ifndef MORECORE_CANNOT_TRIM |
fa8d436c | 5123 | return sYSTRIm(pad, av); |
8a4b65b4 | 5124 | #else |
fa8d436c | 5125 | return 0; |
f65fd747 | 5126 | #endif |
f65fd747 UD |
5127 | } |
5128 | ||
f65fd747 UD |
5129 | |
5130 | /* | |
fa8d436c | 5131 | ------------------------- malloc_usable_size ------------------------- |
f65fd747 UD |
5132 | */ |
5133 | ||
5134 | #if __STD_C | |
fa8d436c | 5135 | size_t mUSABLe(Void_t* mem) |
f65fd747 | 5136 | #else |
fa8d436c | 5137 | size_t mUSABLe(mem) Void_t* mem; |
f65fd747 UD |
5138 | #endif |
5139 | { | |
5140 | mchunkptr p; | |
fa8d436c | 5141 | if (mem != 0) { |
f65fd747 | 5142 | p = mem2chunk(mem); |
fa8d436c UD |
5143 | if (chunk_is_mmapped(p)) |
5144 | return chunksize(p) - 2*SIZE_SZ; | |
5145 | else if (inuse(p)) | |
f65fd747 | 5146 | return chunksize(p) - SIZE_SZ; |
f65fd747 | 5147 | } |
fa8d436c | 5148 | return 0; |
f65fd747 UD |
5149 | } |
5150 | ||
fa8d436c UD |
5151 | /* |
5152 | ------------------------------ mallinfo ------------------------------ | |
5153 | */ | |
f65fd747 | 5154 | |
fa8d436c | 5155 | struct mallinfo mALLINFo(mstate av) |
f65fd747 | 5156 | { |
fa8d436c | 5157 | struct mallinfo mi; |
6dd67bd5 | 5158 | size_t i; |
f65fd747 UD |
5159 | mbinptr b; |
5160 | mchunkptr p; | |
f65fd747 | 5161 | INTERNAL_SIZE_T avail; |
fa8d436c UD |
5162 | INTERNAL_SIZE_T fastavail; |
5163 | int nblocks; | |
5164 | int nfastblocks; | |
f65fd747 | 5165 | |
fa8d436c UD |
5166 | /* Ensure initialization */ |
5167 | if (av->top == 0) malloc_consolidate(av); | |
8a4b65b4 | 5168 | |
fa8d436c | 5169 | check_malloc_state(av); |
8a4b65b4 | 5170 | |
fa8d436c UD |
5171 | /* Account for top */ |
5172 | avail = chunksize(av->top); | |
5173 | nblocks = 1; /* top always exists */ | |
f65fd747 | 5174 | |
fa8d436c UD |
5175 | /* traverse fastbins */ |
5176 | nfastblocks = 0; | |
5177 | fastavail = 0; | |
5178 | ||
5179 | for (i = 0; i < NFASTBINS; ++i) { | |
5180 | for (p = av->fastbins[i]; p != 0; p = p->fd) { | |
5181 | ++nfastblocks; | |
5182 | fastavail += chunksize(p); | |
5183 | } | |
5184 | } | |
5185 | ||
5186 | avail += fastavail; | |
f65fd747 | 5187 | |
fa8d436c UD |
5188 | /* traverse regular bins */ |
5189 | for (i = 1; i < NBINS; ++i) { | |
5190 | b = bin_at(av, i); | |
5191 | for (p = last(b); p != b; p = p->bk) { | |
5192 | ++nblocks; | |
5193 | avail += chunksize(p); | |
5194 | } | |
5195 | } | |
f65fd747 | 5196 | |
fa8d436c UD |
5197 | mi.smblks = nfastblocks; |
5198 | mi.ordblks = nblocks; | |
5199 | mi.fordblks = avail; | |
5200 | mi.uordblks = av->system_mem - avail; | |
5201 | mi.arena = av->system_mem; | |
5202 | mi.hblks = mp_.n_mmaps; | |
5203 | mi.hblkhd = mp_.mmapped_mem; | |
5204 | mi.fsmblks = fastavail; | |
5205 | mi.keepcost = chunksize(av->top); | |
5206 | mi.usmblks = mp_.max_total_mem; | |
5207 | return mi; | |
5208 | } | |
f65fd747 | 5209 | |
fa8d436c UD |
5210 | /* |
5211 | ------------------------------ malloc_stats ------------------------------ | |
f65fd747 UD |
5212 | */ |
5213 | ||
fa8d436c | 5214 | void mSTATs() |
f65fd747 | 5215 | { |
8a4b65b4 | 5216 | int i; |
fa8d436c | 5217 | mstate ar_ptr; |
8a4b65b4 | 5218 | struct mallinfo mi; |
fa8d436c | 5219 | unsigned int in_use_b = mp_.mmapped_mem, system_b = in_use_b; |
8a4b65b4 UD |
5220 | #if THREAD_STATS |
5221 | long stat_lock_direct = 0, stat_lock_loop = 0, stat_lock_wait = 0; | |
5222 | #endif | |
5223 | ||
a234e27d UD |
5224 | if(__malloc_initialized < 0) |
5225 | ptmalloc_init (); | |
8dab36a1 UD |
5226 | #ifdef _LIBC |
5227 | _IO_flockfile (stderr); | |
5228 | int old_flags2 = ((_IO_FILE *) stderr)->_flags2; | |
5229 | ((_IO_FILE *) stderr)->_flags2 |= _IO_FLAGS2_NOTCANCEL; | |
5230 | #endif | |
fa8d436c UD |
5231 | for (i=0, ar_ptr = &main_arena;; i++) { |
5232 | (void)mutex_lock(&ar_ptr->mutex); | |
5233 | mi = mALLINFo(ar_ptr); | |
8a4b65b4 UD |
5234 | fprintf(stderr, "Arena %d:\n", i); |
5235 | fprintf(stderr, "system bytes = %10u\n", (unsigned int)mi.arena); | |
5236 | fprintf(stderr, "in use bytes = %10u\n", (unsigned int)mi.uordblks); | |
fa8d436c UD |
5237 | #if MALLOC_DEBUG > 1 |
5238 | if (i > 0) | |
5239 | dump_heap(heap_for_ptr(top(ar_ptr))); | |
5240 | #endif | |
8a4b65b4 UD |
5241 | system_b += mi.arena; |
5242 | in_use_b += mi.uordblks; | |
5243 | #if THREAD_STATS | |
5244 | stat_lock_direct += ar_ptr->stat_lock_direct; | |
5245 | stat_lock_loop += ar_ptr->stat_lock_loop; | |
5246 | stat_lock_wait += ar_ptr->stat_lock_wait; | |
5247 | #endif | |
fa8d436c | 5248 | (void)mutex_unlock(&ar_ptr->mutex); |
7e3be507 UD |
5249 | ar_ptr = ar_ptr->next; |
5250 | if(ar_ptr == &main_arena) break; | |
8a4b65b4 | 5251 | } |
7799b7b3 | 5252 | #if HAVE_MMAP |
8a4b65b4 | 5253 | fprintf(stderr, "Total (incl. mmap):\n"); |
7799b7b3 UD |
5254 | #else |
5255 | fprintf(stderr, "Total:\n"); | |
5256 | #endif | |
8a4b65b4 UD |
5257 | fprintf(stderr, "system bytes = %10u\n", system_b); |
5258 | fprintf(stderr, "in use bytes = %10u\n", in_use_b); | |
5259 | #ifdef NO_THREADS | |
fa8d436c | 5260 | fprintf(stderr, "max system bytes = %10u\n", (unsigned int)mp_.max_total_mem); |
8a4b65b4 | 5261 | #endif |
f65fd747 | 5262 | #if HAVE_MMAP |
fa8d436c UD |
5263 | fprintf(stderr, "max mmap regions = %10u\n", (unsigned int)mp_.max_n_mmaps); |
5264 | fprintf(stderr, "max mmap bytes = %10lu\n", | |
5265 | (unsigned long)mp_.max_mmapped_mem); | |
f65fd747 UD |
5266 | #endif |
5267 | #if THREAD_STATS | |
8a4b65b4 | 5268 | fprintf(stderr, "heaps created = %10d\n", stat_n_heaps); |
f65fd747 UD |
5269 | fprintf(stderr, "locked directly = %10ld\n", stat_lock_direct); |
5270 | fprintf(stderr, "locked in loop = %10ld\n", stat_lock_loop); | |
8a4b65b4 UD |
5271 | fprintf(stderr, "locked waiting = %10ld\n", stat_lock_wait); |
5272 | fprintf(stderr, "locked total = %10ld\n", | |
5273 | stat_lock_direct + stat_lock_loop + stat_lock_wait); | |
f65fd747 | 5274 | #endif |
8dab36a1 UD |
5275 | #ifdef _LIBC |
5276 | ((_IO_FILE *) stderr)->_flags2 |= old_flags2; | |
5277 | _IO_funlockfile (stderr); | |
5278 | #endif | |
f65fd747 UD |
5279 | } |
5280 | ||
f65fd747 UD |
5281 | |
5282 | /* | |
fa8d436c | 5283 | ------------------------------ mallopt ------------------------------ |
f65fd747 UD |
5284 | */ |
5285 | ||
5286 | #if __STD_C | |
5287 | int mALLOPt(int param_number, int value) | |
5288 | #else | |
5289 | int mALLOPt(param_number, value) int param_number; int value; | |
5290 | #endif | |
5291 | { | |
fa8d436c UD |
5292 | mstate av = &main_arena; |
5293 | int res = 1; | |
f65fd747 | 5294 | |
0cb71e02 UD |
5295 | if(__malloc_initialized < 0) |
5296 | ptmalloc_init (); | |
fa8d436c UD |
5297 | (void)mutex_lock(&av->mutex); |
5298 | /* Ensure initialization/consolidation */ | |
5299 | malloc_consolidate(av); | |
2f6d1f1b | 5300 | |
fa8d436c UD |
5301 | switch(param_number) { |
5302 | case M_MXFAST: | |
5303 | if (value >= 0 && value <= MAX_FAST_SIZE) { | |
5304 | set_max_fast(av, value); | |
5305 | } | |
5306 | else | |
5307 | res = 0; | |
5308 | break; | |
2f6d1f1b | 5309 | |
fa8d436c UD |
5310 | case M_TRIM_THRESHOLD: |
5311 | mp_.trim_threshold = value; | |
5312 | break; | |
2f6d1f1b | 5313 | |
fa8d436c UD |
5314 | case M_TOP_PAD: |
5315 | mp_.top_pad = value; | |
5316 | break; | |
2f6d1f1b | 5317 | |
fa8d436c UD |
5318 | case M_MMAP_THRESHOLD: |
5319 | #if USE_ARENAS | |
5320 | /* Forbid setting the threshold too high. */ | |
5321 | if((unsigned long)value > HEAP_MAX_SIZE/2) | |
5322 | res = 0; | |
5323 | else | |
2f6d1f1b | 5324 | #endif |
fa8d436c UD |
5325 | mp_.mmap_threshold = value; |
5326 | break; | |
2f6d1f1b | 5327 | |
fa8d436c UD |
5328 | case M_MMAP_MAX: |
5329 | #if !HAVE_MMAP | |
5330 | if (value != 0) | |
5331 | res = 0; | |
5332 | else | |
9a51759b | 5333 | #endif |
fa8d436c UD |
5334 | mp_.n_mmaps_max = value; |
5335 | break; | |
10dc2a90 | 5336 | |
fa8d436c UD |
5337 | case M_CHECK_ACTION: |
5338 | check_action = value; | |
5339 | break; | |
b22fc5f5 | 5340 | } |
fa8d436c UD |
5341 | (void)mutex_unlock(&av->mutex); |
5342 | return res; | |
b22fc5f5 UD |
5343 | } |
5344 | ||
10dc2a90 | 5345 | |
a9177ff5 | 5346 | /* |
fa8d436c UD |
5347 | -------------------- Alternative MORECORE functions -------------------- |
5348 | */ | |
10dc2a90 | 5349 | |
b22fc5f5 | 5350 | |
fa8d436c UD |
5351 | /* |
5352 | General Requirements for MORECORE. | |
b22fc5f5 | 5353 | |
fa8d436c | 5354 | The MORECORE function must have the following properties: |
b22fc5f5 | 5355 | |
fa8d436c | 5356 | If MORECORE_CONTIGUOUS is false: |
10dc2a90 | 5357 | |
fa8d436c UD |
5358 | * MORECORE must allocate in multiples of pagesize. It will |
5359 | only be called with arguments that are multiples of pagesize. | |
10dc2a90 | 5360 | |
a9177ff5 | 5361 | * MORECORE(0) must return an address that is at least |
fa8d436c | 5362 | MALLOC_ALIGNMENT aligned. (Page-aligning always suffices.) |
10dc2a90 | 5363 | |
fa8d436c | 5364 | else (i.e. If MORECORE_CONTIGUOUS is true): |
10dc2a90 | 5365 | |
fa8d436c UD |
5366 | * Consecutive calls to MORECORE with positive arguments |
5367 | return increasing addresses, indicating that space has been | |
5368 | contiguously extended. | |
10dc2a90 | 5369 | |
fa8d436c UD |
5370 | * MORECORE need not allocate in multiples of pagesize. |
5371 | Calls to MORECORE need not have args of multiples of pagesize. | |
10dc2a90 | 5372 | |
fa8d436c | 5373 | * MORECORE need not page-align. |
10dc2a90 | 5374 | |
fa8d436c | 5375 | In either case: |
10dc2a90 | 5376 | |
fa8d436c UD |
5377 | * MORECORE may allocate more memory than requested. (Or even less, |
5378 | but this will generally result in a malloc failure.) | |
10dc2a90 | 5379 | |
fa8d436c UD |
5380 | * MORECORE must not allocate memory when given argument zero, but |
5381 | instead return one past the end address of memory from previous | |
5382 | nonzero call. This malloc does NOT call MORECORE(0) | |
5383 | until at least one call with positive arguments is made, so | |
5384 | the initial value returned is not important. | |
10dc2a90 | 5385 | |
fa8d436c UD |
5386 | * Even though consecutive calls to MORECORE need not return contiguous |
5387 | addresses, it must be OK for malloc'ed chunks to span multiple | |
5388 | regions in those cases where they do happen to be contiguous. | |
10dc2a90 | 5389 | |
fa8d436c UD |
5390 | * MORECORE need not handle negative arguments -- it may instead |
5391 | just return MORECORE_FAILURE when given negative arguments. | |
5392 | Negative arguments are always multiples of pagesize. MORECORE | |
5393 | must not misinterpret negative args as large positive unsigned | |
5394 | args. You can suppress all such calls from even occurring by defining | |
5395 | MORECORE_CANNOT_TRIM, | |
10dc2a90 | 5396 | |
fa8d436c UD |
5397 | There is some variation across systems about the type of the |
5398 | argument to sbrk/MORECORE. If size_t is unsigned, then it cannot | |
5399 | actually be size_t, because sbrk supports negative args, so it is | |
5400 | normally the signed type of the same width as size_t (sometimes | |
5401 | declared as "intptr_t", and sometimes "ptrdiff_t"). It doesn't much | |
5402 | matter though. Internally, we use "long" as arguments, which should | |
5403 | work across all reasonable possibilities. | |
ee74a442 | 5404 | |
fa8d436c UD |
5405 | Additionally, if MORECORE ever returns failure for a positive |
5406 | request, and HAVE_MMAP is true, then mmap is used as a noncontiguous | |
5407 | system allocator. This is a useful backup strategy for systems with | |
5408 | holes in address spaces -- in this case sbrk cannot contiguously | |
5409 | expand the heap, but mmap may be able to map noncontiguous space. | |
7e3be507 | 5410 | |
fa8d436c UD |
5411 | If you'd like mmap to ALWAYS be used, you can define MORECORE to be |
5412 | a function that always returns MORECORE_FAILURE. | |
2e65ca2b | 5413 | |
fa8d436c UD |
5414 | If you are using this malloc with something other than sbrk (or its |
5415 | emulation) to supply memory regions, you probably want to set | |
5416 | MORECORE_CONTIGUOUS as false. As an example, here is a custom | |
5417 | allocator kindly contributed for pre-OSX macOS. It uses virtually | |
5418 | but not necessarily physically contiguous non-paged memory (locked | |
5419 | in, present and won't get swapped out). You can use it by | |
5420 | uncommenting this section, adding some #includes, and setting up the | |
5421 | appropriate defines above: | |
7e3be507 | 5422 | |
fa8d436c UD |
5423 | #define MORECORE osMoreCore |
5424 | #define MORECORE_CONTIGUOUS 0 | |
7e3be507 | 5425 | |
fa8d436c UD |
5426 | There is also a shutdown routine that should somehow be called for |
5427 | cleanup upon program exit. | |
7e3be507 | 5428 | |
fa8d436c UD |
5429 | #define MAX_POOL_ENTRIES 100 |
5430 | #define MINIMUM_MORECORE_SIZE (64 * 1024) | |
5431 | static int next_os_pool; | |
5432 | void *our_os_pools[MAX_POOL_ENTRIES]; | |
7e3be507 | 5433 | |
fa8d436c UD |
5434 | void *osMoreCore(int size) |
5435 | { | |
5436 | void *ptr = 0; | |
5437 | static void *sbrk_top = 0; | |
ca34d7a7 | 5438 | |
fa8d436c UD |
5439 | if (size > 0) |
5440 | { | |
5441 | if (size < MINIMUM_MORECORE_SIZE) | |
5442 | size = MINIMUM_MORECORE_SIZE; | |
5443 | if (CurrentExecutionLevel() == kTaskLevel) | |
5444 | ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0); | |
5445 | if (ptr == 0) | |
5446 | { | |
5447 | return (void *) MORECORE_FAILURE; | |
5448 | } | |
5449 | // save ptrs so they can be freed during cleanup | |
5450 | our_os_pools[next_os_pool] = ptr; | |
5451 | next_os_pool++; | |
5452 | ptr = (void *) ((((unsigned long) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK); | |
5453 | sbrk_top = (char *) ptr + size; | |
5454 | return ptr; | |
5455 | } | |
5456 | else if (size < 0) | |
5457 | { | |
5458 | // we don't currently support shrink behavior | |
5459 | return (void *) MORECORE_FAILURE; | |
5460 | } | |
5461 | else | |
5462 | { | |
5463 | return sbrk_top; | |
431c33c0 | 5464 | } |
ca34d7a7 | 5465 | } |
ca34d7a7 | 5466 | |
fa8d436c UD |
5467 | // cleanup any allocated memory pools |
5468 | // called as last thing before shutting down driver | |
ca34d7a7 | 5469 | |
fa8d436c | 5470 | void osCleanupMem(void) |
ca34d7a7 | 5471 | { |
fa8d436c | 5472 | void **ptr; |
ca34d7a7 | 5473 | |
fa8d436c UD |
5474 | for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++) |
5475 | if (*ptr) | |
5476 | { | |
5477 | PoolDeallocate(*ptr); | |
5478 | *ptr = 0; | |
5479 | } | |
5480 | } | |
ee74a442 | 5481 | |
fa8d436c | 5482 | */ |
f65fd747 | 5483 | |
7e3be507 | 5484 | |
3e030bd5 UD |
5485 | /* Helper code. */ |
5486 | ||
5487 | static void | |
6bf4302e | 5488 | malloc_printerr(int action, const char *str, void *ptr) |
3e030bd5 UD |
5489 | { |
5490 | if (action & 1) | |
5491 | { | |
a9055cab | 5492 | char buf[2 * sizeof (uintptr_t) + 1]; |
3e030bd5 | 5493 | |
a9055cab UD |
5494 | buf[sizeof (buf) - 1] = '\0'; |
5495 | char *cp = _itoa_word ((uintptr_t) ptr, &buf[sizeof (buf) - 1], 16, 0); | |
5496 | while (cp > buf) | |
5497 | *--cp = '0'; | |
5498 | ||
5499 | __libc_message (action & 2, | |
5500 | action & 4 | |
5501 | ? "%s\n" : "*** glibc detected *** %s: 0x%s ***\n", | |
5502 | str, cp); | |
3e030bd5 | 5503 | } |
a9055cab | 5504 | else if (action & 2) |
3e030bd5 UD |
5505 | abort (); |
5506 | } | |
5507 | ||
7e3be507 | 5508 | #ifdef _LIBC |
b2bffca2 | 5509 | # include <sys/param.h> |
fa8d436c | 5510 | |
a204dbb2 UD |
5511 | /* We need a wrapper function for one of the additions of POSIX. */ |
5512 | int | |
5513 | __posix_memalign (void **memptr, size_t alignment, size_t size) | |
5514 | { | |
5515 | void *mem; | |
e796f92f UD |
5516 | __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, size_t, |
5517 | __const __malloc_ptr_t)) = | |
5518 | __memalign_hook; | |
a204dbb2 UD |
5519 | |
5520 | /* Test whether the SIZE argument is valid. It must be a power of | |
5521 | two multiple of sizeof (void *). */ | |
de02bd05 UD |
5522 | if (alignment % sizeof (void *) != 0 |
5523 | || !powerof2 (alignment / sizeof (void *)) != 0 | |
5524 | || alignment == 0) | |
a204dbb2 UD |
5525 | return EINVAL; |
5526 | ||
e796f92f UD |
5527 | /* Call the hook here, so that caller is posix_memalign's caller |
5528 | and not posix_memalign itself. */ | |
5529 | if (hook != NULL) | |
5530 | mem = (*hook)(alignment, size, RETURN_ADDRESS (0)); | |
5531 | else | |
aa420660 | 5532 | mem = public_mEMALIGn (alignment, size); |
a204dbb2 | 5533 | |
fa8d436c UD |
5534 | if (mem != NULL) { |
5535 | *memptr = mem; | |
5536 | return 0; | |
5537 | } | |
a204dbb2 UD |
5538 | |
5539 | return ENOMEM; | |
5540 | } | |
5541 | weak_alias (__posix_memalign, posix_memalign) | |
5542 | ||
eba19d2b UD |
5543 | strong_alias (__libc_calloc, __calloc) weak_alias (__libc_calloc, calloc) |
5544 | strong_alias (__libc_free, __cfree) weak_alias (__libc_free, cfree) | |
5545 | strong_alias (__libc_free, __free) strong_alias (__libc_free, free) | |
5546 | strong_alias (__libc_malloc, __malloc) strong_alias (__libc_malloc, malloc) | |
5547 | strong_alias (__libc_memalign, __memalign) | |
5548 | weak_alias (__libc_memalign, memalign) | |
5549 | strong_alias (__libc_realloc, __realloc) strong_alias (__libc_realloc, realloc) | |
5550 | strong_alias (__libc_valloc, __valloc) weak_alias (__libc_valloc, valloc) | |
5551 | strong_alias (__libc_pvalloc, __pvalloc) weak_alias (__libc_pvalloc, pvalloc) | |
5552 | strong_alias (__libc_mallinfo, __mallinfo) | |
5553 | weak_alias (__libc_mallinfo, mallinfo) | |
5554 | strong_alias (__libc_mallopt, __mallopt) weak_alias (__libc_mallopt, mallopt) | |
7e3be507 UD |
5555 | |
5556 | weak_alias (__malloc_stats, malloc_stats) | |
5557 | weak_alias (__malloc_usable_size, malloc_usable_size) | |
5558 | weak_alias (__malloc_trim, malloc_trim) | |
2f6d1f1b UD |
5559 | weak_alias (__malloc_get_state, malloc_get_state) |
5560 | weak_alias (__malloc_set_state, malloc_set_state) | |
7e3be507 | 5561 | |
fa8d436c | 5562 | #endif /* _LIBC */ |
f65fd747 | 5563 | |
fa8d436c | 5564 | /* ------------------------------------------------------------ |
f65fd747 UD |
5565 | History: |
5566 | ||
fa8d436c | 5567 | [see ftp://g.oswego.edu/pub/misc/malloc.c for the history of dlmalloc] |
f65fd747 UD |
5568 | |
5569 | */ | |
fa8d436c UD |
5570 | /* |
5571 | * Local variables: | |
5572 | * c-basic-offset: 2 | |
5573 | * End: | |
5574 | */ |