]>
Commit | Line | Data |
---|---|---|
56137dbc | 1 | /* Malloc implementation for multiple threads without lock contention. |
ded5b9b7 | 2 | Copyright (C) 1996-2009, 2010, 2011 Free Software Foundation, Inc. |
f65fd747 | 3 | This file is part of the GNU C Library. |
fa8d436c UD |
4 | Contributed by Wolfram Gloger <wg@malloc.de> |
5 | and Doug Lea <dl@cs.oswego.edu>, 2001. | |
f65fd747 UD |
6 | |
7 | The GNU C Library is free software; you can redistribute it and/or | |
cc7375ce RM |
8 | modify it under the terms of the GNU Lesser General Public License as |
9 | published by the Free Software Foundation; either version 2.1 of the | |
fa8d436c | 10 | License, or (at your option) any later version. |
f65fd747 UD |
11 | |
12 | The GNU C Library is distributed in the hope that it will be useful, | |
13 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
cc7375ce | 15 | Lesser General Public License for more details. |
f65fd747 | 16 | |
cc7375ce | 17 | You should have received a copy of the GNU Lesser General Public |
fa8d436c UD |
18 | License along with the GNU C Library; see the file COPYING.LIB. If not, |
19 | write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, | |
20 | Boston, MA 02111-1307, USA. */ | |
f65fd747 | 21 | |
fa8d436c UD |
22 | /* |
23 | This is a version (aka ptmalloc2) of malloc/free/realloc written by | |
24 | Doug Lea and adapted to multiple threads/arenas by Wolfram Gloger. | |
25 | ||
da2d2fb6 UD |
26 | There have been substantial changesmade after the integration into |
27 | glibc in all parts of the code. Do not look for much commonality | |
28 | with the ptmalloc2 version. | |
29 | ||
fa8d436c | 30 | * Version ptmalloc2-20011215 |
fa8d436c UD |
31 | based on: |
32 | VERSION 2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee) | |
f65fd747 | 33 | |
fa8d436c | 34 | * Quickstart |
f65fd747 | 35 | |
fa8d436c UD |
36 | In order to compile this implementation, a Makefile is provided with |
37 | the ptmalloc2 distribution, which has pre-defined targets for some | |
38 | popular systems (e.g. "make posix" for Posix threads). All that is | |
39 | typically required with regard to compiler flags is the selection of | |
40 | the thread package via defining one out of USE_PTHREADS, USE_THR or | |
41 | USE_SPROC. Check the thread-m.h file for what effects this has. | |
42 | Many/most systems will additionally require USE_TSD_DATA_HACK to be | |
43 | defined, so this is the default for "make posix". | |
f65fd747 UD |
44 | |
45 | * Why use this malloc? | |
46 | ||
47 | This is not the fastest, most space-conserving, most portable, or | |
48 | most tunable malloc ever written. However it is among the fastest | |
49 | while also being among the most space-conserving, portable and tunable. | |
50 | Consistent balance across these factors results in a good general-purpose | |
fa8d436c UD |
51 | allocator for malloc-intensive programs. |
52 | ||
53 | The main properties of the algorithms are: | |
54 | * For large (>= 512 bytes) requests, it is a pure best-fit allocator, | |
55 | with ties normally decided via FIFO (i.e. least recently used). | |
56 | * For small (<= 64 bytes by default) requests, it is a caching | |
57 | allocator, that maintains pools of quickly recycled chunks. | |
58 | * In between, and for combinations of large and small requests, it does | |
59 | the best it can trying to meet both goals at once. | |
60 | * For very large requests (>= 128KB by default), it relies on system | |
61 | memory mapping facilities, if supported. | |
62 | ||
63 | For a longer but slightly out of date high-level description, see | |
64 | http://gee.cs.oswego.edu/dl/html/malloc.html | |
65 | ||
66 | You may already by default be using a C library containing a malloc | |
67 | that is based on some version of this malloc (for example in | |
68 | linux). You might still want to use the one in this file in order to | |
69 | customize settings or to avoid overheads associated with library | |
70 | versions. | |
71 | ||
72 | * Contents, described in more detail in "description of public routines" below. | |
73 | ||
74 | Standard (ANSI/SVID/...) functions: | |
75 | malloc(size_t n); | |
76 | calloc(size_t n_elements, size_t element_size); | |
22a89187 UD |
77 | free(void* p); |
78 | realloc(void* p, size_t n); | |
fa8d436c UD |
79 | memalign(size_t alignment, size_t n); |
80 | valloc(size_t n); | |
81 | mallinfo() | |
82 | mallopt(int parameter_number, int parameter_value) | |
83 | ||
84 | Additional functions: | |
22a89187 UD |
85 | independent_calloc(size_t n_elements, size_t size, void* chunks[]); |
86 | independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]); | |
fa8d436c | 87 | pvalloc(size_t n); |
22a89187 | 88 | cfree(void* p); |
fa8d436c | 89 | malloc_trim(size_t pad); |
22a89187 | 90 | malloc_usable_size(void* p); |
fa8d436c | 91 | malloc_stats(); |
f65fd747 UD |
92 | |
93 | * Vital statistics: | |
94 | ||
fa8d436c | 95 | Supported pointer representation: 4 or 8 bytes |
a9177ff5 | 96 | Supported size_t representation: 4 or 8 bytes |
f65fd747 | 97 | Note that size_t is allowed to be 4 bytes even if pointers are 8. |
fa8d436c UD |
98 | You can adjust this by defining INTERNAL_SIZE_T |
99 | ||
100 | Alignment: 2 * sizeof(size_t) (default) | |
101 | (i.e., 8 byte alignment with 4byte size_t). This suffices for | |
102 | nearly all current machines and C compilers. However, you can | |
103 | define MALLOC_ALIGNMENT to be wider than this if necessary. | |
f65fd747 | 104 | |
fa8d436c UD |
105 | Minimum overhead per allocated chunk: 4 or 8 bytes |
106 | Each malloced chunk has a hidden word of overhead holding size | |
f65fd747 UD |
107 | and status information. |
108 | ||
109 | Minimum allocated size: 4-byte ptrs: 16 bytes (including 4 overhead) | |
72f90263 | 110 | 8-byte ptrs: 24/32 bytes (including, 4/8 overhead) |
f65fd747 UD |
111 | |
112 | When a chunk is freed, 12 (for 4byte ptrs) or 20 (for 8 byte | |
113 | ptrs but 4 byte size) or 24 (for 8/8) additional bytes are | |
fa8d436c UD |
114 | needed; 4 (8) for a trailing size field and 8 (16) bytes for |
115 | free list pointers. Thus, the minimum allocatable size is | |
116 | 16/24/32 bytes. | |
f65fd747 UD |
117 | |
118 | Even a request for zero bytes (i.e., malloc(0)) returns a | |
119 | pointer to something of the minimum allocatable size. | |
120 | ||
fa8d436c UD |
121 | The maximum overhead wastage (i.e., number of extra bytes |
122 | allocated than were requested in malloc) is less than or equal | |
123 | to the minimum size, except for requests >= mmap_threshold that | |
124 | are serviced via mmap(), where the worst case wastage is 2 * | |
125 | sizeof(size_t) bytes plus the remainder from a system page (the | |
126 | minimal mmap unit); typically 4096 or 8192 bytes. | |
f65fd747 | 127 | |
a9177ff5 | 128 | Maximum allocated size: 4-byte size_t: 2^32 minus about two pages |
72f90263 | 129 | 8-byte size_t: 2^64 minus about two pages |
fa8d436c UD |
130 | |
131 | It is assumed that (possibly signed) size_t values suffice to | |
f65fd747 UD |
132 | represent chunk sizes. `Possibly signed' is due to the fact |
133 | that `size_t' may be defined on a system as either a signed or | |
fa8d436c UD |
134 | an unsigned type. The ISO C standard says that it must be |
135 | unsigned, but a few systems are known not to adhere to this. | |
136 | Additionally, even when size_t is unsigned, sbrk (which is by | |
137 | default used to obtain memory from system) accepts signed | |
138 | arguments, and may not be able to handle size_t-wide arguments | |
139 | with negative sign bit. Generally, values that would | |
140 | appear as negative after accounting for overhead and alignment | |
141 | are supported only via mmap(), which does not have this | |
142 | limitation. | |
143 | ||
144 | Requests for sizes outside the allowed range will perform an optional | |
145 | failure action and then return null. (Requests may also | |
146 | also fail because a system is out of memory.) | |
147 | ||
22a89187 | 148 | Thread-safety: thread-safe |
fa8d436c UD |
149 | |
150 | Compliance: I believe it is compliant with the 1997 Single Unix Specification | |
2b0fba75 | 151 | Also SVID/XPG, ANSI C, and probably others as well. |
f65fd747 UD |
152 | |
153 | * Synopsis of compile-time options: | |
154 | ||
155 | People have reported using previous versions of this malloc on all | |
156 | versions of Unix, sometimes by tweaking some of the defines | |
22a89187 | 157 | below. It has been tested most extensively on Solaris and Linux. |
fa8d436c UD |
158 | People also report using it in stand-alone embedded systems. |
159 | ||
160 | The implementation is in straight, hand-tuned ANSI C. It is not | |
161 | at all modular. (Sorry!) It uses a lot of macros. To be at all | |
162 | usable, this code should be compiled using an optimizing compiler | |
163 | (for example gcc -O3) that can simplify expressions and control | |
164 | paths. (FAQ: some macros import variables as arguments rather than | |
165 | declare locals because people reported that some debuggers | |
166 | otherwise get confused.) | |
167 | ||
168 | OPTION DEFAULT VALUE | |
169 | ||
170 | Compilation Environment options: | |
171 | ||
fa8d436c | 172 | HAVE_MREMAP 0 unless linux defined |
fa8d436c UD |
173 | |
174 | Changing default word sizes: | |
175 | ||
176 | INTERNAL_SIZE_T size_t | |
073f560e UD |
177 | MALLOC_ALIGNMENT MAX (2 * sizeof(INTERNAL_SIZE_T), |
178 | __alignof__ (long double)) | |
fa8d436c UD |
179 | |
180 | Configuration and functionality options: | |
181 | ||
182 | USE_DL_PREFIX NOT defined | |
183 | USE_PUBLIC_MALLOC_WRAPPERS NOT defined | |
184 | USE_MALLOC_LOCK NOT defined | |
185 | MALLOC_DEBUG NOT defined | |
186 | REALLOC_ZERO_BYTES_FREES 1 | |
fa8d436c UD |
187 | TRIM_FASTBINS 0 |
188 | ||
189 | Options for customizing MORECORE: | |
190 | ||
191 | MORECORE sbrk | |
192 | MORECORE_FAILURE -1 | |
a9177ff5 | 193 | MORECORE_CONTIGUOUS 1 |
fa8d436c UD |
194 | MORECORE_CANNOT_TRIM NOT defined |
195 | MORECORE_CLEARS 1 | |
a9177ff5 | 196 | MMAP_AS_MORECORE_SIZE (1024 * 1024) |
fa8d436c UD |
197 | |
198 | Tuning options that are also dynamically changeable via mallopt: | |
199 | ||
425ce2ed | 200 | DEFAULT_MXFAST 64 (for 32bit), 128 (for 64bit) |
fa8d436c UD |
201 | DEFAULT_TRIM_THRESHOLD 128 * 1024 |
202 | DEFAULT_TOP_PAD 0 | |
203 | DEFAULT_MMAP_THRESHOLD 128 * 1024 | |
204 | DEFAULT_MMAP_MAX 65536 | |
205 | ||
206 | There are several other #defined constants and macros that you | |
207 | probably don't want to touch unless you are extending or adapting malloc. */ | |
f65fd747 UD |
208 | |
209 | /* | |
22a89187 | 210 | void* is the pointer type that malloc should say it returns |
f65fd747 UD |
211 | */ |
212 | ||
22a89187 UD |
213 | #ifndef void |
214 | #define void void | |
215 | #endif /*void*/ | |
f65fd747 | 216 | |
fa8d436c UD |
217 | #include <stddef.h> /* for size_t */ |
218 | #include <stdlib.h> /* for getenv(), abort() */ | |
f65fd747 | 219 | |
3c6904fb UD |
220 | #include <malloc-machine.h> |
221 | ||
425ce2ed | 222 | #include <atomic.h> |
c56da3a3 | 223 | #include <stdio-common/_itoa.h> |
e404fb16 | 224 | #include <bits/wordsize.h> |
425ce2ed | 225 | #include <sys/sysinfo.h> |
c56da3a3 | 226 | |
02d46fc4 UD |
227 | #include <ldsodefs.h> |
228 | ||
f65fd747 UD |
229 | #ifdef __cplusplus |
230 | extern "C" { | |
231 | #endif | |
232 | ||
fa8d436c | 233 | #include <unistd.h> |
fa8d436c | 234 | #include <stdio.h> /* needed for malloc_stats */ |
8e58439c | 235 | #include <errno.h> |
f65fd747 | 236 | |
5d78bb43 UD |
237 | /* For uintptr_t. */ |
238 | #include <stdint.h> | |
f65fd747 | 239 | |
3e030bd5 UD |
240 | /* For va_arg, va_start, va_end. */ |
241 | #include <stdarg.h> | |
242 | ||
6bf4302e UD |
243 | /* For writev and struct iovec. */ |
244 | #include <sys/uio.h> | |
c0f62c56 | 245 | /* For syslog. */ |
54915e9e | 246 | #include <sys/syslog.h> |
6bf4302e | 247 | |
c0f62c56 UD |
248 | /* For various dynamic linking things. */ |
249 | #include <dlfcn.h> | |
250 | ||
251 | ||
fa8d436c UD |
252 | /* |
253 | Debugging: | |
254 | ||
255 | Because freed chunks may be overwritten with bookkeeping fields, this | |
256 | malloc will often die when freed memory is overwritten by user | |
257 | programs. This can be very effective (albeit in an annoying way) | |
258 | in helping track down dangling pointers. | |
259 | ||
260 | If you compile with -DMALLOC_DEBUG, a number of assertion checks are | |
261 | enabled that will catch more memory errors. You probably won't be | |
262 | able to make much sense of the actual assertion errors, but they | |
263 | should help you locate incorrectly overwritten memory. The checking | |
264 | is fairly extensive, and will slow down execution | |
265 | noticeably. Calling malloc_stats or mallinfo with MALLOC_DEBUG set | |
266 | will attempt to check every non-mmapped allocated and free chunk in | |
267 | the course of computing the summmaries. (By nature, mmapped regions | |
268 | cannot be checked very much automatically.) | |
269 | ||
270 | Setting MALLOC_DEBUG may also be helpful if you are trying to modify | |
271 | this code. The assertions in the check routines spell out in more | |
272 | detail the assumptions and invariants underlying the algorithms. | |
273 | ||
274 | Setting MALLOC_DEBUG does NOT provide an automated mechanism for | |
275 | checking that all accesses to malloced memory stay within their | |
276 | bounds. However, there are several add-ons and adaptations of this | |
277 | or other mallocs available that do this. | |
f65fd747 UD |
278 | */ |
279 | ||
72f90263 UD |
280 | #ifdef NDEBUG |
281 | # define assert(expr) ((void) 0) | |
282 | #else | |
283 | # define assert(expr) \ | |
284 | ((expr) \ | |
285 | ? ((void) 0) \ | |
286 | : __malloc_assert (__STRING (expr), __FILE__, __LINE__, __func__)) | |
287 | ||
288 | extern const char *__progname; | |
289 | ||
290 | static void | |
291 | __malloc_assert (const char *assertion, const char *file, unsigned int line, | |
292 | const char *function) | |
293 | { | |
294 | (void) __fxprintf (NULL, "%s%s%s:%u: %s%sAssertion `%s' failed.\n", | |
295 | __progname, __progname[0] ? ": " : "", | |
296 | file, line, | |
297 | function ? function : "", function ? ": " : "", | |
298 | assertion); | |
299 | fflush (stderr); | |
300 | abort (); | |
301 | } | |
302 | #endif | |
f65fd747 UD |
303 | |
304 | ||
305 | /* | |
306 | INTERNAL_SIZE_T is the word-size used for internal bookkeeping | |
fa8d436c UD |
307 | of chunk sizes. |
308 | ||
309 | The default version is the same as size_t. | |
310 | ||
311 | While not strictly necessary, it is best to define this as an | |
312 | unsigned type, even if size_t is a signed type. This may avoid some | |
313 | artificial size limitations on some systems. | |
314 | ||
315 | On a 64-bit machine, you may be able to reduce malloc overhead by | |
316 | defining INTERNAL_SIZE_T to be a 32 bit `unsigned int' at the | |
317 | expense of not being able to handle more than 2^32 of malloced | |
318 | space. If this limitation is acceptable, you are encouraged to set | |
319 | this unless you are on a platform requiring 16byte alignments. In | |
320 | this case the alignment requirements turn out to negate any | |
321 | potential advantages of decreasing size_t word size. | |
322 | ||
323 | Implementors: Beware of the possible combinations of: | |
324 | - INTERNAL_SIZE_T might be signed or unsigned, might be 32 or 64 bits, | |
325 | and might be the same width as int or as long | |
326 | - size_t might have different width and signedness as INTERNAL_SIZE_T | |
327 | - int and long might be 32 or 64 bits, and might be the same width | |
328 | To deal with this, most comparisons and difference computations | |
329 | among INTERNAL_SIZE_Ts should cast them to unsigned long, being | |
330 | aware of the fact that casting an unsigned int to a wider long does | |
331 | not sign-extend. (This also makes checking for negative numbers | |
332 | awkward.) Some of these casts result in harmless compiler warnings | |
333 | on some systems. | |
f65fd747 UD |
334 | */ |
335 | ||
336 | #ifndef INTERNAL_SIZE_T | |
337 | #define INTERNAL_SIZE_T size_t | |
338 | #endif | |
339 | ||
fa8d436c UD |
340 | /* The corresponding word size */ |
341 | #define SIZE_SZ (sizeof(INTERNAL_SIZE_T)) | |
342 | ||
343 | ||
344 | /* | |
345 | MALLOC_ALIGNMENT is the minimum alignment for malloc'ed chunks. | |
346 | It must be a power of two at least 2 * SIZE_SZ, even on machines | |
347 | for which smaller alignments would suffice. It may be defined as | |
348 | larger than this though. Note however that code and data structures | |
349 | are optimized for the case of 8-byte alignment. | |
350 | */ | |
351 | ||
352 | ||
353 | #ifndef MALLOC_ALIGNMENT | |
7d013a64 RM |
354 | /* XXX This is the correct definition. It differs from 2*SIZE_SZ only on |
355 | powerpc32. For the time being, changing this is causing more | |
356 | compatibility problems due to malloc_get_state/malloc_set_state than | |
357 | will returning blocks not adequately aligned for long double objects | |
16a10468 RM |
358 | under -mlong-double-128. |
359 | ||
073f560e UD |
360 | #define MALLOC_ALIGNMENT (2 * SIZE_SZ < __alignof__ (long double) \ |
361 | ? __alignof__ (long double) : 2 * SIZE_SZ) | |
7d013a64 RM |
362 | */ |
363 | #define MALLOC_ALIGNMENT (2 * SIZE_SZ) | |
fa8d436c UD |
364 | #endif |
365 | ||
366 | /* The corresponding bit mask value */ | |
367 | #define MALLOC_ALIGN_MASK (MALLOC_ALIGNMENT - 1) | |
368 | ||
369 | ||
370 | ||
371 | /* | |
372 | REALLOC_ZERO_BYTES_FREES should be set if a call to | |
373 | realloc with zero bytes should be the same as a call to free. | |
374 | This is required by the C standard. Otherwise, since this malloc | |
375 | returns a unique pointer for malloc(0), so does realloc(p, 0). | |
376 | */ | |
377 | ||
378 | #ifndef REALLOC_ZERO_BYTES_FREES | |
379 | #define REALLOC_ZERO_BYTES_FREES 1 | |
380 | #endif | |
381 | ||
382 | /* | |
383 | TRIM_FASTBINS controls whether free() of a very small chunk can | |
384 | immediately lead to trimming. Setting to true (1) can reduce memory | |
385 | footprint, but will almost always slow down programs that use a lot | |
386 | of small chunks. | |
387 | ||
388 | Define this only if you are willing to give up some speed to more | |
389 | aggressively reduce system-level memory footprint when releasing | |
390 | memory in programs that use many small chunks. You can get | |
391 | essentially the same effect by setting MXFAST to 0, but this can | |
392 | lead to even greater slowdowns in programs using many small chunks. | |
393 | TRIM_FASTBINS is an in-between compile-time option, that disables | |
394 | only those chunks bordering topmost memory from being placed in | |
395 | fastbins. | |
396 | */ | |
397 | ||
398 | #ifndef TRIM_FASTBINS | |
399 | #define TRIM_FASTBINS 0 | |
400 | #endif | |
401 | ||
402 | ||
f65fd747 | 403 | /* |
fa8d436c | 404 | USE_DL_PREFIX will prefix all public routines with the string 'dl'. |
a9177ff5 | 405 | This is necessary when you only want to use this malloc in one part |
fa8d436c UD |
406 | of a program, using your regular system malloc elsewhere. |
407 | */ | |
408 | ||
409 | /* #define USE_DL_PREFIX */ | |
410 | ||
411 | ||
a9177ff5 | 412 | /* |
fa8d436c UD |
413 | Two-phase name translation. |
414 | All of the actual routines are given mangled names. | |
415 | When wrappers are used, they become the public callable versions. | |
416 | When DL_PREFIX is used, the callable names are prefixed. | |
f65fd747 UD |
417 | */ |
418 | ||
fa8d436c UD |
419 | #ifdef USE_DL_PREFIX |
420 | #define public_cALLOc dlcalloc | |
421 | #define public_fREe dlfree | |
422 | #define public_cFREe dlcfree | |
423 | #define public_mALLOc dlmalloc | |
424 | #define public_mEMALIGn dlmemalign | |
425 | #define public_rEALLOc dlrealloc | |
426 | #define public_vALLOc dlvalloc | |
427 | #define public_pVALLOc dlpvalloc | |
428 | #define public_mALLINFo dlmallinfo | |
429 | #define public_mALLOPt dlmallopt | |
430 | #define public_mTRIm dlmalloc_trim | |
431 | #define public_mSTATs dlmalloc_stats | |
432 | #define public_mUSABLe dlmalloc_usable_size | |
433 | #define public_iCALLOc dlindependent_calloc | |
434 | #define public_iCOMALLOc dlindependent_comalloc | |
435 | #define public_gET_STATe dlget_state | |
436 | #define public_sET_STATe dlset_state | |
437 | #else /* USE_DL_PREFIX */ | |
fa8d436c UD |
438 | |
439 | /* Special defines for the GNU C library. */ | |
440 | #define public_cALLOc __libc_calloc | |
441 | #define public_fREe __libc_free | |
442 | #define public_cFREe __libc_cfree | |
443 | #define public_mALLOc __libc_malloc | |
444 | #define public_mEMALIGn __libc_memalign | |
445 | #define public_rEALLOc __libc_realloc | |
446 | #define public_vALLOc __libc_valloc | |
447 | #define public_pVALLOc __libc_pvalloc | |
448 | #define public_mALLINFo __libc_mallinfo | |
449 | #define public_mALLOPt __libc_mallopt | |
450 | #define public_mTRIm __malloc_trim | |
451 | #define public_mSTATs __malloc_stats | |
452 | #define public_mUSABLe __malloc_usable_size | |
453 | #define public_iCALLOc __libc_independent_calloc | |
454 | #define public_iCOMALLOc __libc_independent_comalloc | |
455 | #define public_gET_STATe __malloc_get_state | |
456 | #define public_sET_STATe __malloc_set_state | |
fa8d436c UD |
457 | #define open __open |
458 | #define mmap __mmap | |
459 | #define munmap __munmap | |
460 | #define mremap __mremap | |
461 | #define mprotect __mprotect | |
462 | #define MORECORE (*__morecore) | |
463 | #define MORECORE_FAILURE 0 | |
464 | ||
22a89187 UD |
465 | void * __default_morecore (ptrdiff_t); |
466 | void *(*__morecore)(ptrdiff_t) = __default_morecore; | |
f65fd747 | 467 | |
22a89187 | 468 | #endif /* USE_DL_PREFIX */ |
fa8d436c | 469 | |
f65fd747 | 470 | |
22a89187 | 471 | #include <string.h> |
f65fd747 | 472 | |
df77455c UD |
473 | |
474 | /* Force a value to be in a register and stop the compiler referring | |
475 | to the source (mostly memory location) again. */ | |
476 | #define force_reg(val) \ | |
477 | ({ __typeof (val) _v; asm ("" : "=r" (_v) : "0" (val)); _v; }) | |
478 | ||
479 | ||
fa8d436c UD |
480 | /* |
481 | MORECORE-related declarations. By default, rely on sbrk | |
482 | */ | |
09f5e163 | 483 | |
f65fd747 | 484 | |
fa8d436c UD |
485 | /* |
486 | MORECORE is the name of the routine to call to obtain more memory | |
487 | from the system. See below for general guidance on writing | |
488 | alternative MORECORE functions, as well as a version for WIN32 and a | |
489 | sample version for pre-OSX macos. | |
490 | */ | |
f65fd747 | 491 | |
fa8d436c UD |
492 | #ifndef MORECORE |
493 | #define MORECORE sbrk | |
494 | #endif | |
f65fd747 | 495 | |
fa8d436c UD |
496 | /* |
497 | MORECORE_FAILURE is the value returned upon failure of MORECORE | |
498 | as well as mmap. Since it cannot be an otherwise valid memory address, | |
499 | and must reflect values of standard sys calls, you probably ought not | |
500 | try to redefine it. | |
501 | */ | |
09f5e163 | 502 | |
fa8d436c UD |
503 | #ifndef MORECORE_FAILURE |
504 | #define MORECORE_FAILURE (-1) | |
505 | #endif | |
506 | ||
507 | /* | |
508 | If MORECORE_CONTIGUOUS is true, take advantage of fact that | |
509 | consecutive calls to MORECORE with positive arguments always return | |
510 | contiguous increasing addresses. This is true of unix sbrk. Even | |
511 | if not defined, when regions happen to be contiguous, malloc will | |
512 | permit allocations spanning regions obtained from different | |
513 | calls. But defining this when applicable enables some stronger | |
514 | consistency checks and space efficiencies. | |
515 | */ | |
f65fd747 | 516 | |
fa8d436c UD |
517 | #ifndef MORECORE_CONTIGUOUS |
518 | #define MORECORE_CONTIGUOUS 1 | |
f65fd747 UD |
519 | #endif |
520 | ||
fa8d436c UD |
521 | /* |
522 | Define MORECORE_CANNOT_TRIM if your version of MORECORE | |
523 | cannot release space back to the system when given negative | |
524 | arguments. This is generally necessary only if you are using | |
525 | a hand-crafted MORECORE function that cannot handle negative arguments. | |
526 | */ | |
527 | ||
528 | /* #define MORECORE_CANNOT_TRIM */ | |
f65fd747 | 529 | |
fa8d436c UD |
530 | /* MORECORE_CLEARS (default 1) |
531 | The degree to which the routine mapped to MORECORE zeroes out | |
532 | memory: never (0), only for newly allocated space (1) or always | |
533 | (2). The distinction between (1) and (2) is necessary because on | |
534 | some systems, if the application first decrements and then | |
535 | increments the break value, the contents of the reallocated space | |
536 | are unspecified. | |
537 | */ | |
538 | ||
539 | #ifndef MORECORE_CLEARS | |
540 | #define MORECORE_CLEARS 1 | |
7cabd57c UD |
541 | #endif |
542 | ||
fa8d436c | 543 | |
a9177ff5 | 544 | /* |
fa8d436c | 545 | MMAP_AS_MORECORE_SIZE is the minimum mmap size argument to use if |
22a89187 UD |
546 | sbrk fails, and mmap is used as a backup. The value must be a |
547 | multiple of page size. This backup strategy generally applies only | |
548 | when systems have "holes" in address space, so sbrk cannot perform | |
549 | contiguous expansion, but there is still space available on system. | |
550 | On systems for which this is known to be useful (i.e. most linux | |
551 | kernels), this occurs only when programs allocate huge amounts of | |
552 | memory. Between this, and the fact that mmap regions tend to be | |
553 | limited, the size should be large, to avoid too many mmap calls and | |
554 | thus avoid running out of kernel resources. */ | |
fa8d436c UD |
555 | |
556 | #ifndef MMAP_AS_MORECORE_SIZE | |
557 | #define MMAP_AS_MORECORE_SIZE (1024 * 1024) | |
f65fd747 UD |
558 | #endif |
559 | ||
560 | /* | |
561 | Define HAVE_MREMAP to make realloc() use mremap() to re-allocate | |
562 | large blocks. This is currently only possible on Linux with | |
563 | kernel versions newer than 1.3.77. | |
564 | */ | |
565 | ||
566 | #ifndef HAVE_MREMAP | |
fa8d436c UD |
567 | #ifdef linux |
568 | #define HAVE_MREMAP 1 | |
569 | #else | |
570 | #define HAVE_MREMAP 0 | |
f65fd747 UD |
571 | #endif |
572 | ||
22a89187 | 573 | #endif /* HAVE_MREMAP */ |
e9b3e3c5 | 574 | |
f65fd747 | 575 | |
f65fd747 | 576 | /* |
f65fd747 | 577 | This version of malloc supports the standard SVID/XPG mallinfo |
fa8d436c UD |
578 | routine that returns a struct containing usage properties and |
579 | statistics. It should work on any SVID/XPG compliant system that has | |
580 | a /usr/include/malloc.h defining struct mallinfo. (If you'd like to | |
581 | install such a thing yourself, cut out the preliminary declarations | |
582 | as described above and below and save them in a malloc.h file. But | |
583 | there's no compelling reason to bother to do this.) | |
f65fd747 UD |
584 | |
585 | The main declaration needed is the mallinfo struct that is returned | |
586 | (by-copy) by mallinfo(). The SVID/XPG malloinfo struct contains a | |
fa8d436c UD |
587 | bunch of fields that are not even meaningful in this version of |
588 | malloc. These fields are are instead filled by mallinfo() with | |
589 | other numbers that might be of interest. | |
f65fd747 UD |
590 | */ |
591 | ||
f65fd747 | 592 | |
fa8d436c | 593 | /* ---------- description of public routines ------------ */ |
f65fd747 UD |
594 | |
595 | /* | |
fa8d436c UD |
596 | malloc(size_t n) |
597 | Returns a pointer to a newly allocated chunk of at least n bytes, or null | |
598 | if no space is available. Additionally, on failure, errno is | |
599 | set to ENOMEM on ANSI C systems. | |
600 | ||
601 | If n is zero, malloc returns a minumum-sized chunk. (The minimum | |
602 | size is 16 bytes on most 32bit systems, and 24 or 32 bytes on 64bit | |
603 | systems.) On most systems, size_t is an unsigned type, so calls | |
604 | with negative arguments are interpreted as requests for huge amounts | |
605 | of space, which will often fail. The maximum supported value of n | |
606 | differs across systems, but is in all cases less than the maximum | |
607 | representable value of a size_t. | |
f65fd747 | 608 | */ |
22a89187 | 609 | void* public_mALLOc(size_t); |
aa420660 | 610 | libc_hidden_proto (public_mALLOc) |
f65fd747 | 611 | |
fa8d436c | 612 | /* |
22a89187 | 613 | free(void* p) |
fa8d436c UD |
614 | Releases the chunk of memory pointed to by p, that had been previously |
615 | allocated using malloc or a related routine such as realloc. | |
616 | It has no effect if p is null. It can have arbitrary (i.e., bad!) | |
617 | effects if p has already been freed. | |
618 | ||
619 | Unless disabled (using mallopt), freeing very large spaces will | |
620 | when possible, automatically trigger operations that give | |
621 | back unused memory to the system, thus reducing program footprint. | |
622 | */ | |
22a89187 | 623 | void public_fREe(void*); |
aa420660 | 624 | libc_hidden_proto (public_fREe) |
f65fd747 | 625 | |
fa8d436c UD |
626 | /* |
627 | calloc(size_t n_elements, size_t element_size); | |
628 | Returns a pointer to n_elements * element_size bytes, with all locations | |
629 | set to zero. | |
630 | */ | |
22a89187 | 631 | void* public_cALLOc(size_t, size_t); |
f65fd747 UD |
632 | |
633 | /* | |
22a89187 | 634 | realloc(void* p, size_t n) |
fa8d436c UD |
635 | Returns a pointer to a chunk of size n that contains the same data |
636 | as does chunk p up to the minimum of (n, p's size) bytes, or null | |
a9177ff5 | 637 | if no space is available. |
f65fd747 | 638 | |
fa8d436c UD |
639 | The returned pointer may or may not be the same as p. The algorithm |
640 | prefers extending p when possible, otherwise it employs the | |
641 | equivalent of a malloc-copy-free sequence. | |
f65fd747 | 642 | |
a9177ff5 | 643 | If p is null, realloc is equivalent to malloc. |
f65fd747 | 644 | |
fa8d436c UD |
645 | If space is not available, realloc returns null, errno is set (if on |
646 | ANSI) and p is NOT freed. | |
f65fd747 | 647 | |
fa8d436c UD |
648 | if n is for fewer bytes than already held by p, the newly unused |
649 | space is lopped off and freed if possible. Unless the #define | |
650 | REALLOC_ZERO_BYTES_FREES is set, realloc with a size argument of | |
651 | zero (re)allocates a minimum-sized chunk. | |
f65fd747 | 652 | |
fa8d436c UD |
653 | Large chunks that were internally obtained via mmap will always |
654 | be reallocated using malloc-copy-free sequences unless | |
655 | the system supports MREMAP (currently only linux). | |
f65fd747 | 656 | |
fa8d436c UD |
657 | The old unix realloc convention of allowing the last-free'd chunk |
658 | to be used as an argument to realloc is not supported. | |
f65fd747 | 659 | */ |
22a89187 | 660 | void* public_rEALLOc(void*, size_t); |
aa420660 | 661 | libc_hidden_proto (public_rEALLOc) |
f65fd747 | 662 | |
fa8d436c UD |
663 | /* |
664 | memalign(size_t alignment, size_t n); | |
665 | Returns a pointer to a newly allocated chunk of n bytes, aligned | |
666 | in accord with the alignment argument. | |
667 | ||
668 | The alignment argument should be a power of two. If the argument is | |
669 | not a power of two, the nearest greater power is used. | |
670 | 8-byte alignment is guaranteed by normal malloc calls, so don't | |
671 | bother calling memalign with an argument of 8 or less. | |
672 | ||
673 | Overreliance on memalign is a sure way to fragment space. | |
674 | */ | |
22a89187 | 675 | void* public_mEMALIGn(size_t, size_t); |
aa420660 | 676 | libc_hidden_proto (public_mEMALIGn) |
f65fd747 UD |
677 | |
678 | /* | |
fa8d436c UD |
679 | valloc(size_t n); |
680 | Equivalent to memalign(pagesize, n), where pagesize is the page | |
681 | size of the system. If the pagesize is unknown, 4096 is used. | |
682 | */ | |
22a89187 | 683 | void* public_vALLOc(size_t); |
fa8d436c | 684 | |
f65fd747 | 685 | |
f65fd747 | 686 | |
fa8d436c UD |
687 | /* |
688 | mallopt(int parameter_number, int parameter_value) | |
689 | Sets tunable parameters The format is to provide a | |
690 | (parameter-number, parameter-value) pair. mallopt then sets the | |
691 | corresponding parameter to the argument value if it can (i.e., so | |
692 | long as the value is meaningful), and returns 1 if successful else | |
693 | 0. SVID/XPG/ANSI defines four standard param numbers for mallopt, | |
694 | normally defined in malloc.h. Only one of these (M_MXFAST) is used | |
695 | in this malloc. The others (M_NLBLKS, M_GRAIN, M_KEEP) don't apply, | |
696 | so setting them has no effect. But this malloc also supports four | |
697 | other options in mallopt. See below for details. Briefly, supported | |
698 | parameters are as follows (listed defaults are for "typical" | |
699 | configurations). | |
700 | ||
701 | Symbol param # default allowed param values | |
702 | M_MXFAST 1 64 0-80 (0 disables fastbins) | |
703 | M_TRIM_THRESHOLD -1 128*1024 any (-1U disables trimming) | |
a9177ff5 | 704 | M_TOP_PAD -2 0 any |
fa8d436c UD |
705 | M_MMAP_THRESHOLD -3 128*1024 any (or 0 if no MMAP support) |
706 | M_MMAP_MAX -4 65536 any (0 disables use of mmap) | |
707 | */ | |
fa8d436c | 708 | int public_mALLOPt(int, int); |
fa8d436c UD |
709 | |
710 | ||
711 | /* | |
712 | mallinfo() | |
713 | Returns (by copy) a struct containing various summary statistics: | |
714 | ||
a9177ff5 RM |
715 | arena: current total non-mmapped bytes allocated from system |
716 | ordblks: the number of free chunks | |
fa8d436c | 717 | smblks: the number of fastbin blocks (i.e., small chunks that |
72f90263 | 718 | have been freed but not use resused or consolidated) |
a9177ff5 RM |
719 | hblks: current number of mmapped regions |
720 | hblkhd: total bytes held in mmapped regions | |
fa8d436c | 721 | usmblks: the maximum total allocated space. This will be greater |
72f90263 | 722 | than current total if trimming has occurred. |
a9177ff5 | 723 | fsmblks: total bytes held in fastbin blocks |
fa8d436c | 724 | uordblks: current total allocated space (normal or mmapped) |
a9177ff5 | 725 | fordblks: total free space |
fa8d436c | 726 | keepcost: the maximum number of bytes that could ideally be released |
72f90263 UD |
727 | back to system via malloc_trim. ("ideally" means that |
728 | it ignores page restrictions etc.) | |
fa8d436c UD |
729 | |
730 | Because these fields are ints, but internal bookkeeping may | |
a9177ff5 | 731 | be kept as longs, the reported values may wrap around zero and |
fa8d436c UD |
732 | thus be inaccurate. |
733 | */ | |
fa8d436c | 734 | struct mallinfo public_mALLINFo(void); |
88764ae2 | 735 | |
f65fd747 | 736 | |
fa8d436c UD |
737 | /* |
738 | pvalloc(size_t n); | |
739 | Equivalent to valloc(minimum-page-that-holds(n)), that is, | |
740 | round up n to nearest pagesize. | |
741 | */ | |
22a89187 | 742 | void* public_pVALLOc(size_t); |
f65fd747 | 743 | |
fa8d436c | 744 | /* |
22a89187 | 745 | cfree(void* p); |
fa8d436c UD |
746 | Equivalent to free(p). |
747 | ||
748 | cfree is needed/defined on some systems that pair it with calloc, | |
a9177ff5 | 749 | for odd historical reasons (such as: cfree is used in example |
fa8d436c UD |
750 | code in the first edition of K&R). |
751 | */ | |
22a89187 | 752 | void public_cFREe(void*); |
fa8d436c UD |
753 | |
754 | /* | |
755 | malloc_trim(size_t pad); | |
756 | ||
757 | If possible, gives memory back to the system (via negative | |
758 | arguments to sbrk) if there is unused memory at the `high' end of | |
759 | the malloc pool. You can call this after freeing large blocks of | |
760 | memory to potentially reduce the system-level memory requirements | |
761 | of a program. However, it cannot guarantee to reduce memory. Under | |
762 | some allocation patterns, some large free blocks of memory will be | |
763 | locked between two used chunks, so they cannot be given back to | |
764 | the system. | |
a9177ff5 | 765 | |
fa8d436c UD |
766 | The `pad' argument to malloc_trim represents the amount of free |
767 | trailing space to leave untrimmed. If this argument is zero, | |
768 | only the minimum amount of memory to maintain internal data | |
769 | structures will be left (one page or less). Non-zero arguments | |
770 | can be supplied to maintain enough trailing space to service | |
771 | future expected allocations without having to re-obtain memory | |
772 | from the system. | |
a9177ff5 | 773 | |
fa8d436c UD |
774 | Malloc_trim returns 1 if it actually released any memory, else 0. |
775 | On systems that do not support "negative sbrks", it will always | |
c958a6a4 | 776 | return 0. |
fa8d436c | 777 | */ |
fa8d436c | 778 | int public_mTRIm(size_t); |
fa8d436c UD |
779 | |
780 | /* | |
22a89187 | 781 | malloc_usable_size(void* p); |
fa8d436c UD |
782 | |
783 | Returns the number of bytes you can actually use in | |
784 | an allocated chunk, which may be more than you requested (although | |
785 | often not) due to alignment and minimum size constraints. | |
786 | You can use this many bytes without worrying about | |
787 | overwriting other allocated objects. This is not a particularly great | |
788 | programming practice. malloc_usable_size can be more useful in | |
789 | debugging and assertions, for example: | |
790 | ||
791 | p = malloc(n); | |
792 | assert(malloc_usable_size(p) >= 256); | |
793 | ||
794 | */ | |
22a89187 | 795 | size_t public_mUSABLe(void*); |
fa8d436c UD |
796 | |
797 | /* | |
798 | malloc_stats(); | |
799 | Prints on stderr the amount of space obtained from the system (both | |
800 | via sbrk and mmap), the maximum amount (which may be more than | |
801 | current if malloc_trim and/or munmap got called), and the current | |
802 | number of bytes allocated via malloc (or realloc, etc) but not yet | |
803 | freed. Note that this is the number of bytes allocated, not the | |
804 | number requested. It will be larger than the number requested | |
805 | because of alignment and bookkeeping overhead. Because it includes | |
806 | alignment wastage as being in use, this figure may be greater than | |
807 | zero even when no user-level chunks are allocated. | |
808 | ||
809 | The reported current and maximum system memory can be inaccurate if | |
810 | a program makes other calls to system memory allocation functions | |
811 | (normally sbrk) outside of malloc. | |
812 | ||
813 | malloc_stats prints only the most commonly interesting statistics. | |
814 | More information can be obtained by calling mallinfo. | |
815 | ||
816 | */ | |
fa8d436c | 817 | void public_mSTATs(void); |
f65fd747 | 818 | |
f7ddf3d3 UD |
819 | /* |
820 | malloc_get_state(void); | |
821 | ||
822 | Returns the state of all malloc variables in an opaque data | |
823 | structure. | |
824 | */ | |
22a89187 | 825 | void* public_gET_STATe(void); |
f7ddf3d3 UD |
826 | |
827 | /* | |
22a89187 | 828 | malloc_set_state(void* state); |
f7ddf3d3 UD |
829 | |
830 | Restore the state of all malloc variables from data obtained with | |
831 | malloc_get_state(). | |
832 | */ | |
22a89187 | 833 | int public_sET_STATe(void*); |
f7ddf3d3 | 834 | |
f7ddf3d3 UD |
835 | /* |
836 | posix_memalign(void **memptr, size_t alignment, size_t size); | |
837 | ||
838 | POSIX wrapper like memalign(), checking for validity of size. | |
839 | */ | |
840 | int __posix_memalign(void **, size_t, size_t); | |
f7ddf3d3 | 841 | |
fa8d436c UD |
842 | /* mallopt tuning options */ |
843 | ||
f65fd747 | 844 | /* |
fa8d436c UD |
845 | M_MXFAST is the maximum request size used for "fastbins", special bins |
846 | that hold returned chunks without consolidating their spaces. This | |
847 | enables future requests for chunks of the same size to be handled | |
848 | very quickly, but can increase fragmentation, and thus increase the | |
849 | overall memory footprint of a program. | |
850 | ||
851 | This malloc manages fastbins very conservatively yet still | |
852 | efficiently, so fragmentation is rarely a problem for values less | |
853 | than or equal to the default. The maximum supported value of MXFAST | |
854 | is 80. You wouldn't want it any higher than this anyway. Fastbins | |
855 | are designed especially for use with many small structs, objects or | |
856 | strings -- the default handles structs/objects/arrays with sizes up | |
857 | to 8 4byte fields, or small strings representing words, tokens, | |
858 | etc. Using fastbins for larger objects normally worsens | |
859 | fragmentation without improving speed. | |
860 | ||
861 | M_MXFAST is set in REQUEST size units. It is internally used in | |
862 | chunksize units, which adds padding and alignment. You can reduce | |
863 | M_MXFAST to 0 to disable all use of fastbins. This causes the malloc | |
864 | algorithm to be a closer approximation of fifo-best-fit in all cases, | |
865 | not just for larger requests, but will generally cause it to be | |
866 | slower. | |
f65fd747 UD |
867 | */ |
868 | ||
869 | ||
fa8d436c UD |
870 | /* M_MXFAST is a standard SVID/XPG tuning option, usually listed in malloc.h */ |
871 | #ifndef M_MXFAST | |
a9177ff5 | 872 | #define M_MXFAST 1 |
fa8d436c | 873 | #endif |
f65fd747 | 874 | |
fa8d436c | 875 | #ifndef DEFAULT_MXFAST |
425ce2ed | 876 | #define DEFAULT_MXFAST (64 * SIZE_SZ / 4) |
10dc2a90 UD |
877 | #endif |
878 | ||
10dc2a90 | 879 | |
fa8d436c UD |
880 | /* |
881 | M_TRIM_THRESHOLD is the maximum amount of unused top-most memory | |
882 | to keep before releasing via malloc_trim in free(). | |
883 | ||
884 | Automatic trimming is mainly useful in long-lived programs. | |
885 | Because trimming via sbrk can be slow on some systems, and can | |
886 | sometimes be wasteful (in cases where programs immediately | |
887 | afterward allocate more large chunks) the value should be high | |
888 | enough so that your overall system performance would improve by | |
889 | releasing this much memory. | |
890 | ||
891 | The trim threshold and the mmap control parameters (see below) | |
892 | can be traded off with one another. Trimming and mmapping are | |
893 | two different ways of releasing unused memory back to the | |
894 | system. Between these two, it is often possible to keep | |
895 | system-level demands of a long-lived program down to a bare | |
896 | minimum. For example, in one test suite of sessions measuring | |
897 | the XF86 X server on Linux, using a trim threshold of 128K and a | |
898 | mmap threshold of 192K led to near-minimal long term resource | |
899 | consumption. | |
900 | ||
901 | If you are using this malloc in a long-lived program, it should | |
902 | pay to experiment with these values. As a rough guide, you | |
903 | might set to a value close to the average size of a process | |
904 | (program) running on your system. Releasing this much memory | |
905 | would allow such a process to run in memory. Generally, it's | |
906 | worth it to tune for trimming rather tham memory mapping when a | |
907 | program undergoes phases where several large chunks are | |
908 | allocated and released in ways that can reuse each other's | |
909 | storage, perhaps mixed with phases where there are no such | |
910 | chunks at all. And in well-behaved long-lived programs, | |
911 | controlling release of large blocks via trimming versus mapping | |
912 | is usually faster. | |
913 | ||
914 | However, in most programs, these parameters serve mainly as | |
915 | protection against the system-level effects of carrying around | |
916 | massive amounts of unneeded memory. Since frequent calls to | |
917 | sbrk, mmap, and munmap otherwise degrade performance, the default | |
918 | parameters are set to relatively high values that serve only as | |
919 | safeguards. | |
920 | ||
921 | The trim value It must be greater than page size to have any useful | |
a9177ff5 | 922 | effect. To disable trimming completely, you can set to |
fa8d436c UD |
923 | (unsigned long)(-1) |
924 | ||
925 | Trim settings interact with fastbin (MXFAST) settings: Unless | |
926 | TRIM_FASTBINS is defined, automatic trimming never takes place upon | |
927 | freeing a chunk with size less than or equal to MXFAST. Trimming is | |
928 | instead delayed until subsequent freeing of larger chunks. However, | |
929 | you can still force an attempted trim by calling malloc_trim. | |
930 | ||
931 | Also, trimming is not generally possible in cases where | |
932 | the main arena is obtained via mmap. | |
933 | ||
934 | Note that the trick some people use of mallocing a huge space and | |
935 | then freeing it at program startup, in an attempt to reserve system | |
936 | memory, doesn't have the intended effect under automatic trimming, | |
937 | since that memory will immediately be returned to the system. | |
938 | */ | |
939 | ||
940 | #define M_TRIM_THRESHOLD -1 | |
941 | ||
942 | #ifndef DEFAULT_TRIM_THRESHOLD | |
943 | #define DEFAULT_TRIM_THRESHOLD (128 * 1024) | |
944 | #endif | |
945 | ||
946 | /* | |
947 | M_TOP_PAD is the amount of extra `padding' space to allocate or | |
948 | retain whenever sbrk is called. It is used in two ways internally: | |
949 | ||
950 | * When sbrk is called to extend the top of the arena to satisfy | |
951 | a new malloc request, this much padding is added to the sbrk | |
952 | request. | |
953 | ||
954 | * When malloc_trim is called automatically from free(), | |
955 | it is used as the `pad' argument. | |
956 | ||
957 | In both cases, the actual amount of padding is rounded | |
958 | so that the end of the arena is always a system page boundary. | |
959 | ||
960 | The main reason for using padding is to avoid calling sbrk so | |
961 | often. Having even a small pad greatly reduces the likelihood | |
962 | that nearly every malloc request during program start-up (or | |
963 | after trimming) will invoke sbrk, which needlessly wastes | |
964 | time. | |
965 | ||
966 | Automatic rounding-up to page-size units is normally sufficient | |
967 | to avoid measurable overhead, so the default is 0. However, in | |
968 | systems where sbrk is relatively slow, it can pay to increase | |
969 | this value, at the expense of carrying around more memory than | |
970 | the program needs. | |
971 | */ | |
10dc2a90 | 972 | |
fa8d436c | 973 | #define M_TOP_PAD -2 |
10dc2a90 | 974 | |
fa8d436c UD |
975 | #ifndef DEFAULT_TOP_PAD |
976 | #define DEFAULT_TOP_PAD (0) | |
977 | #endif | |
f65fd747 | 978 | |
1d05c2fb UD |
979 | /* |
980 | MMAP_THRESHOLD_MAX and _MIN are the bounds on the dynamically | |
981 | adjusted MMAP_THRESHOLD. | |
982 | */ | |
983 | ||
984 | #ifndef DEFAULT_MMAP_THRESHOLD_MIN | |
985 | #define DEFAULT_MMAP_THRESHOLD_MIN (128 * 1024) | |
986 | #endif | |
987 | ||
988 | #ifndef DEFAULT_MMAP_THRESHOLD_MAX | |
e404fb16 UD |
989 | /* For 32-bit platforms we cannot increase the maximum mmap |
990 | threshold much because it is also the minimum value for the | |
bd2c2341 UD |
991 | maximum heap size and its alignment. Going above 512k (i.e., 1M |
992 | for new heaps) wastes too much address space. */ | |
e404fb16 | 993 | # if __WORDSIZE == 32 |
bd2c2341 | 994 | # define DEFAULT_MMAP_THRESHOLD_MAX (512 * 1024) |
e404fb16 | 995 | # else |
bd2c2341 | 996 | # define DEFAULT_MMAP_THRESHOLD_MAX (4 * 1024 * 1024 * sizeof(long)) |
e404fb16 | 997 | # endif |
1d05c2fb UD |
998 | #endif |
999 | ||
fa8d436c UD |
1000 | /* |
1001 | M_MMAP_THRESHOLD is the request size threshold for using mmap() | |
1002 | to service a request. Requests of at least this size that cannot | |
1003 | be allocated using already-existing space will be serviced via mmap. | |
1004 | (If enough normal freed space already exists it is used instead.) | |
1005 | ||
1006 | Using mmap segregates relatively large chunks of memory so that | |
1007 | they can be individually obtained and released from the host | |
1008 | system. A request serviced through mmap is never reused by any | |
1009 | other request (at least not directly; the system may just so | |
1010 | happen to remap successive requests to the same locations). | |
1011 | ||
1012 | Segregating space in this way has the benefits that: | |
1013 | ||
a9177ff5 RM |
1014 | 1. Mmapped space can ALWAYS be individually released back |
1015 | to the system, which helps keep the system level memory | |
1016 | demands of a long-lived program low. | |
fa8d436c UD |
1017 | 2. Mapped memory can never become `locked' between |
1018 | other chunks, as can happen with normally allocated chunks, which | |
1019 | means that even trimming via malloc_trim would not release them. | |
1020 | 3. On some systems with "holes" in address spaces, mmap can obtain | |
1021 | memory that sbrk cannot. | |
1022 | ||
1023 | However, it has the disadvantages that: | |
1024 | ||
1025 | 1. The space cannot be reclaimed, consolidated, and then | |
1026 | used to service later requests, as happens with normal chunks. | |
1027 | 2. It can lead to more wastage because of mmap page alignment | |
1028 | requirements | |
1029 | 3. It causes malloc performance to be more dependent on host | |
1030 | system memory management support routines which may vary in | |
1031 | implementation quality and may impose arbitrary | |
1032 | limitations. Generally, servicing a request via normal | |
1033 | malloc steps is faster than going through a system's mmap. | |
1034 | ||
1035 | The advantages of mmap nearly always outweigh disadvantages for | |
1036 | "large" chunks, but the value of "large" varies across systems. The | |
1037 | default is an empirically derived value that works well in most | |
1038 | systems. | |
1d05c2fb UD |
1039 | |
1040 | ||
1041 | Update in 2006: | |
1042 | The above was written in 2001. Since then the world has changed a lot. | |
1043 | Memory got bigger. Applications got bigger. The virtual address space | |
1044 | layout in 32 bit linux changed. | |
1045 | ||
1046 | In the new situation, brk() and mmap space is shared and there are no | |
1047 | artificial limits on brk size imposed by the kernel. What is more, | |
1048 | applications have started using transient allocations larger than the | |
1049 | 128Kb as was imagined in 2001. | |
1050 | ||
1051 | The price for mmap is also high now; each time glibc mmaps from the | |
1052 | kernel, the kernel is forced to zero out the memory it gives to the | |
1053 | application. Zeroing memory is expensive and eats a lot of cache and | |
1054 | memory bandwidth. This has nothing to do with the efficiency of the | |
1055 | virtual memory system, by doing mmap the kernel just has no choice but | |
1056 | to zero. | |
1057 | ||
1058 | In 2001, the kernel had a maximum size for brk() which was about 800 | |
1059 | megabytes on 32 bit x86, at that point brk() would hit the first | |
1060 | mmaped shared libaries and couldn't expand anymore. With current 2.6 | |
1061 | kernels, the VA space layout is different and brk() and mmap | |
1062 | both can span the entire heap at will. | |
1063 | ||
1064 | Rather than using a static threshold for the brk/mmap tradeoff, | |
1065 | we are now using a simple dynamic one. The goal is still to avoid | |
1066 | fragmentation. The old goals we kept are | |
1067 | 1) try to get the long lived large allocations to use mmap() | |
1068 | 2) really large allocations should always use mmap() | |
1069 | and we're adding now: | |
1070 | 3) transient allocations should use brk() to avoid forcing the kernel | |
1071 | having to zero memory over and over again | |
1072 | ||
1073 | The implementation works with a sliding threshold, which is by default | |
1074 | limited to go between 128Kb and 32Mb (64Mb for 64 bitmachines) and starts | |
1075 | out at 128Kb as per the 2001 default. | |
1076 | ||
1077 | This allows us to satisfy requirement 1) under the assumption that long | |
1078 | lived allocations are made early in the process' lifespan, before it has | |
1079 | started doing dynamic allocations of the same size (which will | |
1080 | increase the threshold). | |
1081 | ||
1082 | The upperbound on the threshold satisfies requirement 2) | |
1083 | ||
1084 | The threshold goes up in value when the application frees memory that was | |
1085 | allocated with the mmap allocator. The idea is that once the application | |
1086 | starts freeing memory of a certain size, it's highly probable that this is | |
1087 | a size the application uses for transient allocations. This estimator | |
1088 | is there to satisfy the new third requirement. | |
1089 | ||
f65fd747 UD |
1090 | */ |
1091 | ||
fa8d436c | 1092 | #define M_MMAP_THRESHOLD -3 |
f65fd747 | 1093 | |
fa8d436c | 1094 | #ifndef DEFAULT_MMAP_THRESHOLD |
1d05c2fb | 1095 | #define DEFAULT_MMAP_THRESHOLD DEFAULT_MMAP_THRESHOLD_MIN |
fa8d436c UD |
1096 | #endif |
1097 | ||
1098 | /* | |
1099 | M_MMAP_MAX is the maximum number of requests to simultaneously | |
1100 | service using mmap. This parameter exists because | |
1101 | some systems have a limited number of internal tables for | |
1102 | use by mmap, and using more than a few of them may degrade | |
1103 | performance. | |
1104 | ||
1105 | The default is set to a value that serves only as a safeguard. | |
22a89187 | 1106 | Setting to 0 disables use of mmap for servicing large requests. |
fa8d436c | 1107 | */ |
f65fd747 | 1108 | |
fa8d436c UD |
1109 | #define M_MMAP_MAX -4 |
1110 | ||
1111 | #ifndef DEFAULT_MMAP_MAX | |
fa8d436c | 1112 | #define DEFAULT_MMAP_MAX (65536) |
f65fd747 UD |
1113 | #endif |
1114 | ||
fa8d436c | 1115 | #ifdef __cplusplus |
3c6904fb | 1116 | } /* end of extern "C" */ |
fa8d436c | 1117 | #endif |
f65fd747 | 1118 | |
100351c3 | 1119 | #include <malloc.h> |
f65fd747 | 1120 | |
fa8d436c UD |
1121 | #ifndef BOUNDED_N |
1122 | #define BOUNDED_N(ptr, sz) (ptr) | |
1123 | #endif | |
1124 | #ifndef RETURN_ADDRESS | |
1125 | #define RETURN_ADDRESS(X_) (NULL) | |
9ae6fc54 | 1126 | #endif |
431c33c0 UD |
1127 | |
1128 | /* On some platforms we can compile internal, not exported functions better. | |
1129 | Let the environment provide a macro and define it to be empty if it | |
1130 | is not available. */ | |
1131 | #ifndef internal_function | |
1132 | # define internal_function | |
1133 | #endif | |
1134 | ||
fa8d436c UD |
1135 | /* Forward declarations. */ |
1136 | struct malloc_chunk; | |
1137 | typedef struct malloc_chunk* mchunkptr; | |
431c33c0 | 1138 | |
fa8d436c | 1139 | /* Internal routines. */ |
f65fd747 | 1140 | |
22a89187 | 1141 | static void* _int_malloc(mstate, size_t); |
425ce2ed | 1142 | static void _int_free(mstate, mchunkptr, int); |
22a89187 | 1143 | static void* _int_realloc(mstate, mchunkptr, INTERNAL_SIZE_T, |
4c8b8cc3 | 1144 | INTERNAL_SIZE_T); |
22a89187 UD |
1145 | static void* _int_memalign(mstate, size_t, size_t); |
1146 | static void* _int_valloc(mstate, size_t); | |
1147 | static void* _int_pvalloc(mstate, size_t); | |
1148 | /*static void* cALLOc(size_t, size_t);*/ | |
68631c8e | 1149 | static int mTRIm(mstate, size_t); |
22a89187 | 1150 | static size_t mUSABLe(void*); |
fa8d436c UD |
1151 | static void mSTATs(void); |
1152 | static int mALLOPt(int, int); | |
1153 | static struct mallinfo mALLINFo(mstate); | |
6bf4302e | 1154 | static void malloc_printerr(int action, const char *str, void *ptr); |
fa8d436c | 1155 | |
22a89187 | 1156 | static void* internal_function mem2mem_check(void *p, size_t sz); |
fa8d436c UD |
1157 | static int internal_function top_check(void); |
1158 | static void internal_function munmap_chunk(mchunkptr p); | |
a9177ff5 | 1159 | #if HAVE_MREMAP |
fa8d436c | 1160 | static mchunkptr internal_function mremap_chunk(mchunkptr p, size_t new_size); |
a9177ff5 | 1161 | #endif |
fa8d436c | 1162 | |
22a89187 UD |
1163 | static void* malloc_check(size_t sz, const void *caller); |
1164 | static void free_check(void* mem, const void *caller); | |
1165 | static void* realloc_check(void* oldmem, size_t bytes, | |
1166 | const void *caller); | |
1167 | static void* memalign_check(size_t alignment, size_t bytes, | |
1168 | const void *caller); | |
d063d164 | 1169 | /* These routines are never needed in this configuration. */ |
22a89187 UD |
1170 | static void* malloc_atfork(size_t sz, const void *caller); |
1171 | static void free_atfork(void* mem, const void *caller); | |
f65fd747 | 1172 | |
f65fd747 | 1173 | |
fa8d436c | 1174 | /* ------------- Optional versions of memcopy ---------------- */ |
f65fd747 | 1175 | |
a1648746 | 1176 | |
a9177ff5 | 1177 | /* |
fa8d436c UD |
1178 | Note: memcpy is ONLY invoked with non-overlapping regions, |
1179 | so the (usually slower) memmove is not needed. | |
1180 | */ | |
a1648746 | 1181 | |
fa8d436c UD |
1182 | #define MALLOC_COPY(dest, src, nbytes) memcpy(dest, src, nbytes) |
1183 | #define MALLOC_ZERO(dest, nbytes) memset(dest, 0, nbytes) | |
f65fd747 | 1184 | |
f65fd747 | 1185 | |
fa8d436c | 1186 | /* ------------------ MMAP support ------------------ */ |
f65fd747 | 1187 | |
f65fd747 | 1188 | |
fa8d436c | 1189 | #include <fcntl.h> |
fa8d436c | 1190 | #include <sys/mman.h> |
f65fd747 | 1191 | |
fa8d436c UD |
1192 | #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON) |
1193 | # define MAP_ANONYMOUS MAP_ANON | |
1194 | #endif | |
1195 | #if !defined(MAP_FAILED) | |
1196 | # define MAP_FAILED ((char*)-1) | |
1197 | #endif | |
f65fd747 | 1198 | |
fa8d436c UD |
1199 | #ifndef MAP_NORESERVE |
1200 | # ifdef MAP_AUTORESRV | |
1201 | # define MAP_NORESERVE MAP_AUTORESRV | |
1202 | # else | |
1203 | # define MAP_NORESERVE 0 | |
1204 | # endif | |
f65fd747 UD |
1205 | #endif |
1206 | ||
a9177ff5 RM |
1207 | /* |
1208 | Nearly all versions of mmap support MAP_ANONYMOUS, | |
fa8d436c UD |
1209 | so the following is unlikely to be needed, but is |
1210 | supplied just in case. | |
1211 | */ | |
f65fd747 | 1212 | |
fa8d436c | 1213 | #ifndef MAP_ANONYMOUS |
f65fd747 | 1214 | |
fa8d436c | 1215 | static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */ |
2f6d1f1b | 1216 | |
fa8d436c UD |
1217 | #define MMAP(addr, size, prot, flags) ((dev_zero_fd < 0) ? \ |
1218 | (dev_zero_fd = open("/dev/zero", O_RDWR), \ | |
1219 | mmap((addr), (size), (prot), (flags), dev_zero_fd, 0)) : \ | |
1220 | mmap((addr), (size), (prot), (flags), dev_zero_fd, 0)) | |
f65fd747 | 1221 | |
fa8d436c | 1222 | #else |
f65fd747 | 1223 | |
fa8d436c UD |
1224 | #define MMAP(addr, size, prot, flags) \ |
1225 | (mmap((addr), (size), (prot), (flags)|MAP_ANONYMOUS, -1, 0)) | |
f65fd747 | 1226 | |
e9b3e3c5 | 1227 | #endif |
f65fd747 UD |
1228 | |
1229 | ||
1230 | /* | |
fa8d436c | 1231 | ----------------------- Chunk representations ----------------------- |
f65fd747 UD |
1232 | */ |
1233 | ||
1234 | ||
fa8d436c UD |
1235 | /* |
1236 | This struct declaration is misleading (but accurate and necessary). | |
1237 | It declares a "view" into memory allowing access to necessary | |
1238 | fields at known offsets from a given base. See explanation below. | |
1239 | */ | |
1240 | ||
1241 | struct malloc_chunk { | |
1242 | ||
1243 | INTERNAL_SIZE_T prev_size; /* Size of previous chunk (if free). */ | |
1244 | INTERNAL_SIZE_T size; /* Size in bytes, including overhead. */ | |
1245 | ||
1246 | struct malloc_chunk* fd; /* double links -- used only if free. */ | |
f65fd747 | 1247 | struct malloc_chunk* bk; |
7ecfbd38 UD |
1248 | |
1249 | /* Only used for large blocks: pointer to next larger size. */ | |
1250 | struct malloc_chunk* fd_nextsize; /* double links -- used only if free. */ | |
1251 | struct malloc_chunk* bk_nextsize; | |
f65fd747 UD |
1252 | }; |
1253 | ||
f65fd747 UD |
1254 | |
1255 | /* | |
f65fd747 UD |
1256 | malloc_chunk details: |
1257 | ||
1258 | (The following includes lightly edited explanations by Colin Plumb.) | |
1259 | ||
1260 | Chunks of memory are maintained using a `boundary tag' method as | |
1261 | described in e.g., Knuth or Standish. (See the paper by Paul | |
1262 | Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a | |
1263 | survey of such techniques.) Sizes of free chunks are stored both | |
1264 | in the front of each chunk and at the end. This makes | |
1265 | consolidating fragmented chunks into bigger chunks very fast. The | |
1266 | size fields also hold bits representing whether chunks are free or | |
1267 | in use. | |
1268 | ||
1269 | An allocated chunk looks like this: | |
1270 | ||
1271 | ||
1272 | chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | |
72f90263 UD |
1273 | | Size of previous chunk, if allocated | | |
1274 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | |
1275 | | Size of chunk, in bytes |M|P| | |
f65fd747 | 1276 | mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
72f90263 UD |
1277 | | User data starts here... . |
1278 | . . | |
1279 | . (malloc_usable_size() bytes) . | |
1280 | . | | |
f65fd747 | 1281 | nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
72f90263 UD |
1282 | | Size of chunk | |
1283 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | |
f65fd747 UD |
1284 | |
1285 | ||
1286 | Where "chunk" is the front of the chunk for the purpose of most of | |
1287 | the malloc code, but "mem" is the pointer that is returned to the | |
1288 | user. "Nextchunk" is the beginning of the next contiguous chunk. | |
1289 | ||
fa8d436c | 1290 | Chunks always begin on even word boundries, so the mem portion |
f65fd747 | 1291 | (which is returned to the user) is also on an even word boundary, and |
fa8d436c | 1292 | thus at least double-word aligned. |
f65fd747 UD |
1293 | |
1294 | Free chunks are stored in circular doubly-linked lists, and look like this: | |
1295 | ||
1296 | chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | |
72f90263 UD |
1297 | | Size of previous chunk | |
1298 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | |
f65fd747 UD |
1299 | `head:' | Size of chunk, in bytes |P| |
1300 | mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | |
72f90263 UD |
1301 | | Forward pointer to next chunk in list | |
1302 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | |
1303 | | Back pointer to previous chunk in list | | |
1304 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | |
1305 | | Unused space (may be 0 bytes long) . | |
1306 | . . | |
1307 | . | | |
f65fd747 UD |
1308 | nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
1309 | `foot:' | Size of chunk, in bytes | | |
72f90263 | 1310 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
f65fd747 UD |
1311 | |
1312 | The P (PREV_INUSE) bit, stored in the unused low-order bit of the | |
1313 | chunk size (which is always a multiple of two words), is an in-use | |
1314 | bit for the *previous* chunk. If that bit is *clear*, then the | |
1315 | word before the current chunk size contains the previous chunk | |
1316 | size, and can be used to find the front of the previous chunk. | |
fa8d436c UD |
1317 | The very first chunk allocated always has this bit set, |
1318 | preventing access to non-existent (or non-owned) memory. If | |
1319 | prev_inuse is set for any given chunk, then you CANNOT determine | |
1320 | the size of the previous chunk, and might even get a memory | |
1321 | addressing fault when trying to do so. | |
f65fd747 UD |
1322 | |
1323 | Note that the `foot' of the current chunk is actually represented | |
fa8d436c UD |
1324 | as the prev_size of the NEXT chunk. This makes it easier to |
1325 | deal with alignments etc but can be very confusing when trying | |
1326 | to extend or adapt this code. | |
f65fd747 UD |
1327 | |
1328 | The two exceptions to all this are | |
1329 | ||
fa8d436c | 1330 | 1. The special chunk `top' doesn't bother using the |
72f90263 UD |
1331 | trailing size field since there is no next contiguous chunk |
1332 | that would have to index off it. After initialization, `top' | |
1333 | is forced to always exist. If it would become less than | |
1334 | MINSIZE bytes long, it is replenished. | |
f65fd747 UD |
1335 | |
1336 | 2. Chunks allocated via mmap, which have the second-lowest-order | |
72f90263 UD |
1337 | bit M (IS_MMAPPED) set in their size fields. Because they are |
1338 | allocated one-by-one, each must contain its own trailing size field. | |
f65fd747 UD |
1339 | |
1340 | */ | |
1341 | ||
1342 | /* | |
fa8d436c UD |
1343 | ---------- Size and alignment checks and conversions ---------- |
1344 | */ | |
f65fd747 | 1345 | |
fa8d436c | 1346 | /* conversion from malloc headers to user pointers, and back */ |
f65fd747 | 1347 | |
22a89187 | 1348 | #define chunk2mem(p) ((void*)((char*)(p) + 2*SIZE_SZ)) |
fa8d436c | 1349 | #define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ)) |
f65fd747 | 1350 | |
fa8d436c | 1351 | /* The smallest possible chunk */ |
7ecfbd38 | 1352 | #define MIN_CHUNK_SIZE (offsetof(struct malloc_chunk, fd_nextsize)) |
f65fd747 | 1353 | |
fa8d436c | 1354 | /* The smallest size we can malloc is an aligned minimal chunk */ |
f65fd747 | 1355 | |
fa8d436c UD |
1356 | #define MINSIZE \ |
1357 | (unsigned long)(((MIN_CHUNK_SIZE+MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)) | |
f65fd747 | 1358 | |
fa8d436c | 1359 | /* Check if m has acceptable alignment */ |
f65fd747 | 1360 | |
073f560e UD |
1361 | #define aligned_OK(m) (((unsigned long)(m) & MALLOC_ALIGN_MASK) == 0) |
1362 | ||
1363 | #define misaligned_chunk(p) \ | |
1364 | ((uintptr_t)(MALLOC_ALIGNMENT == 2 * SIZE_SZ ? (p) : chunk2mem (p)) \ | |
1365 | & MALLOC_ALIGN_MASK) | |
f65fd747 | 1366 | |
f65fd747 | 1367 | |
a9177ff5 | 1368 | /* |
fa8d436c UD |
1369 | Check if a request is so large that it would wrap around zero when |
1370 | padded and aligned. To simplify some other code, the bound is made | |
1371 | low enough so that adding MINSIZE will also not wrap around zero. | |
1372 | */ | |
f65fd747 | 1373 | |
fa8d436c UD |
1374 | #define REQUEST_OUT_OF_RANGE(req) \ |
1375 | ((unsigned long)(req) >= \ | |
a9177ff5 | 1376 | (unsigned long)(INTERNAL_SIZE_T)(-2 * MINSIZE)) |
f65fd747 | 1377 | |
fa8d436c | 1378 | /* pad request bytes into a usable size -- internal version */ |
f65fd747 | 1379 | |
fa8d436c UD |
1380 | #define request2size(req) \ |
1381 | (((req) + SIZE_SZ + MALLOC_ALIGN_MASK < MINSIZE) ? \ | |
1382 | MINSIZE : \ | |
1383 | ((req) + SIZE_SZ + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK) | |
f65fd747 | 1384 | |
fa8d436c | 1385 | /* Same, except also perform argument check */ |
f65fd747 | 1386 | |
fa8d436c UD |
1387 | #define checked_request2size(req, sz) \ |
1388 | if (REQUEST_OUT_OF_RANGE(req)) { \ | |
8e58439c | 1389 | __set_errno (ENOMEM); \ |
fa8d436c UD |
1390 | return 0; \ |
1391 | } \ | |
a9177ff5 | 1392 | (sz) = request2size(req); |
f65fd747 UD |
1393 | |
1394 | /* | |
fa8d436c | 1395 | --------------- Physical chunk operations --------------- |
f65fd747 UD |
1396 | */ |
1397 | ||
10dc2a90 | 1398 | |
fa8d436c UD |
1399 | /* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */ |
1400 | #define PREV_INUSE 0x1 | |
f65fd747 | 1401 | |
fa8d436c UD |
1402 | /* extract inuse bit of previous chunk */ |
1403 | #define prev_inuse(p) ((p)->size & PREV_INUSE) | |
f65fd747 | 1404 | |
f65fd747 | 1405 | |
fa8d436c UD |
1406 | /* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */ |
1407 | #define IS_MMAPPED 0x2 | |
f65fd747 | 1408 | |
fa8d436c UD |
1409 | /* check for mmap()'ed chunk */ |
1410 | #define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED) | |
f65fd747 | 1411 | |
f65fd747 | 1412 | |
fa8d436c UD |
1413 | /* size field is or'ed with NON_MAIN_ARENA if the chunk was obtained |
1414 | from a non-main arena. This is only set immediately before handing | |
1415 | the chunk to the user, if necessary. */ | |
1416 | #define NON_MAIN_ARENA 0x4 | |
f65fd747 | 1417 | |
fa8d436c UD |
1418 | /* check for chunk from non-main arena */ |
1419 | #define chunk_non_main_arena(p) ((p)->size & NON_MAIN_ARENA) | |
f65fd747 UD |
1420 | |
1421 | ||
a9177ff5 RM |
1422 | /* |
1423 | Bits to mask off when extracting size | |
f65fd747 | 1424 | |
fa8d436c UD |
1425 | Note: IS_MMAPPED is intentionally not masked off from size field in |
1426 | macros for which mmapped chunks should never be seen. This should | |
1427 | cause helpful core dumps to occur if it is tried by accident by | |
1428 | people extending or adapting this malloc. | |
f65fd747 | 1429 | */ |
fa8d436c | 1430 | #define SIZE_BITS (PREV_INUSE|IS_MMAPPED|NON_MAIN_ARENA) |
f65fd747 | 1431 | |
fa8d436c UD |
1432 | /* Get size, ignoring use bits */ |
1433 | #define chunksize(p) ((p)->size & ~(SIZE_BITS)) | |
f65fd747 | 1434 | |
f65fd747 | 1435 | |
fa8d436c UD |
1436 | /* Ptr to next physical malloc_chunk. */ |
1437 | #define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & ~SIZE_BITS) )) | |
f65fd747 | 1438 | |
fa8d436c UD |
1439 | /* Ptr to previous physical malloc_chunk */ |
1440 | #define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_size) )) | |
f65fd747 | 1441 | |
fa8d436c UD |
1442 | /* Treat space at ptr + offset as a chunk */ |
1443 | #define chunk_at_offset(p, s) ((mchunkptr)(((char*)(p)) + (s))) | |
1444 | ||
1445 | /* extract p's inuse bit */ | |
1446 | #define inuse(p)\ | |
1447 | ((((mchunkptr)(((char*)(p))+((p)->size & ~SIZE_BITS)))->size) & PREV_INUSE) | |
f65fd747 | 1448 | |
fa8d436c UD |
1449 | /* set/clear chunk as being inuse without otherwise disturbing */ |
1450 | #define set_inuse(p)\ | |
1451 | ((mchunkptr)(((char*)(p)) + ((p)->size & ~SIZE_BITS)))->size |= PREV_INUSE | |
f65fd747 | 1452 | |
fa8d436c UD |
1453 | #define clear_inuse(p)\ |
1454 | ((mchunkptr)(((char*)(p)) + ((p)->size & ~SIZE_BITS)))->size &= ~(PREV_INUSE) | |
f65fd747 UD |
1455 | |
1456 | ||
fa8d436c UD |
1457 | /* check/set/clear inuse bits in known places */ |
1458 | #define inuse_bit_at_offset(p, s)\ | |
1459 | (((mchunkptr)(((char*)(p)) + (s)))->size & PREV_INUSE) | |
f65fd747 | 1460 | |
fa8d436c UD |
1461 | #define set_inuse_bit_at_offset(p, s)\ |
1462 | (((mchunkptr)(((char*)(p)) + (s)))->size |= PREV_INUSE) | |
f65fd747 | 1463 | |
fa8d436c UD |
1464 | #define clear_inuse_bit_at_offset(p, s)\ |
1465 | (((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE)) | |
f65fd747 | 1466 | |
f65fd747 | 1467 | |
fa8d436c UD |
1468 | /* Set size at head, without disturbing its use bit */ |
1469 | #define set_head_size(p, s) ((p)->size = (((p)->size & SIZE_BITS) | (s))) | |
f65fd747 | 1470 | |
fa8d436c UD |
1471 | /* Set size/use field */ |
1472 | #define set_head(p, s) ((p)->size = (s)) | |
f65fd747 | 1473 | |
fa8d436c UD |
1474 | /* Set size at footer (only when chunk is not in use) */ |
1475 | #define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_size = (s)) | |
f65fd747 UD |
1476 | |
1477 | ||
fa8d436c UD |
1478 | /* |
1479 | -------------------- Internal data structures -------------------- | |
1480 | ||
1481 | All internal state is held in an instance of malloc_state defined | |
1482 | below. There are no other static variables, except in two optional | |
a9177ff5 RM |
1483 | cases: |
1484 | * If USE_MALLOC_LOCK is defined, the mALLOC_MUTEx declared above. | |
22a89187 UD |
1485 | * If mmap doesn't support MAP_ANONYMOUS, a dummy file descriptor |
1486 | for mmap. | |
fa8d436c UD |
1487 | |
1488 | Beware of lots of tricks that minimize the total bookkeeping space | |
1489 | requirements. The result is a little over 1K bytes (for 4byte | |
1490 | pointers and size_t.) | |
1491 | */ | |
f65fd747 UD |
1492 | |
1493 | /* | |
fa8d436c UD |
1494 | Bins |
1495 | ||
1496 | An array of bin headers for free chunks. Each bin is doubly | |
1497 | linked. The bins are approximately proportionally (log) spaced. | |
1498 | There are a lot of these bins (128). This may look excessive, but | |
1499 | works very well in practice. Most bins hold sizes that are | |
1500 | unusual as malloc request sizes, but are more usual for fragments | |
1501 | and consolidated sets of chunks, which is what these bins hold, so | |
1502 | they can be found quickly. All procedures maintain the invariant | |
1503 | that no consolidated chunk physically borders another one, so each | |
1504 | chunk in a list is known to be preceeded and followed by either | |
1505 | inuse chunks or the ends of memory. | |
1506 | ||
1507 | Chunks in bins are kept in size order, with ties going to the | |
1508 | approximately least recently used chunk. Ordering isn't needed | |
1509 | for the small bins, which all contain the same-sized chunks, but | |
1510 | facilitates best-fit allocation for larger chunks. These lists | |
1511 | are just sequential. Keeping them in order almost never requires | |
1512 | enough traversal to warrant using fancier ordered data | |
a9177ff5 | 1513 | structures. |
fa8d436c UD |
1514 | |
1515 | Chunks of the same size are linked with the most | |
1516 | recently freed at the front, and allocations are taken from the | |
1517 | back. This results in LRU (FIFO) allocation order, which tends | |
1518 | to give each chunk an equal opportunity to be consolidated with | |
1519 | adjacent freed chunks, resulting in larger free chunks and less | |
1520 | fragmentation. | |
1521 | ||
1522 | To simplify use in double-linked lists, each bin header acts | |
1523 | as a malloc_chunk. This avoids special-casing for headers. | |
1524 | But to conserve space and improve locality, we allocate | |
1525 | only the fd/bk pointers of bins, and then use repositioning tricks | |
a9177ff5 | 1526 | to treat these as the fields of a malloc_chunk*. |
f65fd747 UD |
1527 | */ |
1528 | ||
fa8d436c | 1529 | typedef struct malloc_chunk* mbinptr; |
f65fd747 | 1530 | |
fa8d436c | 1531 | /* addressing -- note that bin_at(0) does not exist */ |
41999a1a UD |
1532 | #define bin_at(m, i) \ |
1533 | (mbinptr) (((char *) &((m)->bins[((i) - 1) * 2])) \ | |
1534 | - offsetof (struct malloc_chunk, fd)) | |
f65fd747 | 1535 | |
fa8d436c UD |
1536 | /* analog of ++bin */ |
1537 | #define next_bin(b) ((mbinptr)((char*)(b) + (sizeof(mchunkptr)<<1))) | |
f65fd747 | 1538 | |
fa8d436c UD |
1539 | /* Reminders about list directionality within bins */ |
1540 | #define first(b) ((b)->fd) | |
1541 | #define last(b) ((b)->bk) | |
f65fd747 | 1542 | |
fa8d436c UD |
1543 | /* Take a chunk off a bin list */ |
1544 | #define unlink(P, BK, FD) { \ | |
1545 | FD = P->fd; \ | |
1546 | BK = P->bk; \ | |
3e030bd5 | 1547 | if (__builtin_expect (FD->bk != P || BK->fd != P, 0)) \ |
6bf4302e UD |
1548 | malloc_printerr (check_action, "corrupted double-linked list", P); \ |
1549 | else { \ | |
1550 | FD->bk = BK; \ | |
1551 | BK->fd = FD; \ | |
7ecfbd38 UD |
1552 | if (!in_smallbin_range (P->size) \ |
1553 | && __builtin_expect (P->fd_nextsize != NULL, 0)) { \ | |
1554 | assert (P->fd_nextsize->bk_nextsize == P); \ | |
1555 | assert (P->bk_nextsize->fd_nextsize == P); \ | |
1556 | if (FD->fd_nextsize == NULL) { \ | |
1557 | if (P->fd_nextsize == P) \ | |
1558 | FD->fd_nextsize = FD->bk_nextsize = FD; \ | |
1559 | else { \ | |
1560 | FD->fd_nextsize = P->fd_nextsize; \ | |
1561 | FD->bk_nextsize = P->bk_nextsize; \ | |
1562 | P->fd_nextsize->bk_nextsize = FD; \ | |
1563 | P->bk_nextsize->fd_nextsize = FD; \ | |
1564 | } \ | |
1565 | } else { \ | |
1566 | P->fd_nextsize->bk_nextsize = P->bk_nextsize; \ | |
1567 | P->bk_nextsize->fd_nextsize = P->fd_nextsize; \ | |
1568 | } \ | |
1569 | } \ | |
6bf4302e | 1570 | } \ |
fa8d436c | 1571 | } |
f65fd747 | 1572 | |
fa8d436c UD |
1573 | /* |
1574 | Indexing | |
1575 | ||
1576 | Bins for sizes < 512 bytes contain chunks of all the same size, spaced | |
1577 | 8 bytes apart. Larger bins are approximately logarithmically spaced: | |
f65fd747 | 1578 | |
fa8d436c UD |
1579 | 64 bins of size 8 |
1580 | 32 bins of size 64 | |
1581 | 16 bins of size 512 | |
1582 | 8 bins of size 4096 | |
1583 | 4 bins of size 32768 | |
1584 | 2 bins of size 262144 | |
1585 | 1 bin of size what's left | |
f65fd747 | 1586 | |
fa8d436c UD |
1587 | There is actually a little bit of slop in the numbers in bin_index |
1588 | for the sake of speed. This makes no difference elsewhere. | |
f65fd747 | 1589 | |
fa8d436c UD |
1590 | The bins top out around 1MB because we expect to service large |
1591 | requests via mmap. | |
1592 | */ | |
f65fd747 | 1593 | |
fa8d436c UD |
1594 | #define NBINS 128 |
1595 | #define NSMALLBINS 64 | |
1d47e92f UD |
1596 | #define SMALLBIN_WIDTH MALLOC_ALIGNMENT |
1597 | #define MIN_LARGE_SIZE (NSMALLBINS * SMALLBIN_WIDTH) | |
f65fd747 | 1598 | |
fa8d436c UD |
1599 | #define in_smallbin_range(sz) \ |
1600 | ((unsigned long)(sz) < (unsigned long)MIN_LARGE_SIZE) | |
f65fd747 | 1601 | |
1d47e92f UD |
1602 | #define smallbin_index(sz) \ |
1603 | (SMALLBIN_WIDTH == 16 ? (((unsigned)(sz)) >> 4) : (((unsigned)(sz)) >> 3)) | |
f65fd747 | 1604 | |
1d47e92f | 1605 | #define largebin_index_32(sz) \ |
1a31b586 | 1606 | (((((unsigned long)(sz)) >> 6) <= 38)? 56 + (((unsigned long)(sz)) >> 6): \ |
fa8d436c UD |
1607 | ((((unsigned long)(sz)) >> 9) <= 20)? 91 + (((unsigned long)(sz)) >> 9): \ |
1608 | ((((unsigned long)(sz)) >> 12) <= 10)? 110 + (((unsigned long)(sz)) >> 12): \ | |
1609 | ((((unsigned long)(sz)) >> 15) <= 4)? 119 + (((unsigned long)(sz)) >> 15): \ | |
1610 | ((((unsigned long)(sz)) >> 18) <= 2)? 124 + (((unsigned long)(sz)) >> 18): \ | |
72f90263 | 1611 | 126) |
f65fd747 | 1612 | |
1d47e92f UD |
1613 | // XXX It remains to be seen whether it is good to keep the widths of |
1614 | // XXX the buckets the same or whether it should be scaled by a factor | |
1615 | // XXX of two as well. | |
1616 | #define largebin_index_64(sz) \ | |
1617 | (((((unsigned long)(sz)) >> 6) <= 48)? 48 + (((unsigned long)(sz)) >> 6): \ | |
1618 | ((((unsigned long)(sz)) >> 9) <= 20)? 91 + (((unsigned long)(sz)) >> 9): \ | |
1619 | ((((unsigned long)(sz)) >> 12) <= 10)? 110 + (((unsigned long)(sz)) >> 12): \ | |
1620 | ((((unsigned long)(sz)) >> 15) <= 4)? 119 + (((unsigned long)(sz)) >> 15): \ | |
1621 | ((((unsigned long)(sz)) >> 18) <= 2)? 124 + (((unsigned long)(sz)) >> 18): \ | |
72f90263 | 1622 | 126) |
1d47e92f UD |
1623 | |
1624 | #define largebin_index(sz) \ | |
1625 | (SIZE_SZ == 8 ? largebin_index_64 (sz) : largebin_index_32 (sz)) | |
1626 | ||
fa8d436c UD |
1627 | #define bin_index(sz) \ |
1628 | ((in_smallbin_range(sz)) ? smallbin_index(sz) : largebin_index(sz)) | |
f65fd747 | 1629 | |
f65fd747 UD |
1630 | |
1631 | /* | |
fa8d436c UD |
1632 | Unsorted chunks |
1633 | ||
1634 | All remainders from chunk splits, as well as all returned chunks, | |
1635 | are first placed in the "unsorted" bin. They are then placed | |
1636 | in regular bins after malloc gives them ONE chance to be used before | |
1637 | binning. So, basically, the unsorted_chunks list acts as a queue, | |
1638 | with chunks being placed on it in free (and malloc_consolidate), | |
1639 | and taken off (to be either used or placed in bins) in malloc. | |
1640 | ||
1641 | The NON_MAIN_ARENA flag is never set for unsorted chunks, so it | |
1642 | does not have to be taken into account in size comparisons. | |
f65fd747 UD |
1643 | */ |
1644 | ||
fa8d436c UD |
1645 | /* The otherwise unindexable 1-bin is used to hold unsorted chunks. */ |
1646 | #define unsorted_chunks(M) (bin_at(M, 1)) | |
f65fd747 | 1647 | |
fa8d436c UD |
1648 | /* |
1649 | Top | |
1650 | ||
1651 | The top-most available chunk (i.e., the one bordering the end of | |
1652 | available memory) is treated specially. It is never included in | |
1653 | any bin, is used only if no other chunk is available, and is | |
1654 | released back to the system if it is very large (see | |
1655 | M_TRIM_THRESHOLD). Because top initially | |
1656 | points to its own bin with initial zero size, thus forcing | |
1657 | extension on the first malloc request, we avoid having any special | |
1658 | code in malloc to check whether it even exists yet. But we still | |
1659 | need to do so when getting memory from system, so we make | |
1660 | initial_top treat the bin as a legal but unusable chunk during the | |
1661 | interval between initialization and the first call to | |
1662 | sYSMALLOc. (This is somewhat delicate, since it relies on | |
1663 | the 2 preceding words to be zero during this interval as well.) | |
1664 | */ | |
f65fd747 | 1665 | |
fa8d436c UD |
1666 | /* Conveniently, the unsorted bin can be used as dummy top on first call */ |
1667 | #define initial_top(M) (unsorted_chunks(M)) | |
f65fd747 | 1668 | |
fa8d436c UD |
1669 | /* |
1670 | Binmap | |
f65fd747 | 1671 | |
fa8d436c UD |
1672 | To help compensate for the large number of bins, a one-level index |
1673 | structure is used for bin-by-bin searching. `binmap' is a | |
1674 | bitvector recording whether bins are definitely empty so they can | |
1675 | be skipped over during during traversals. The bits are NOT always | |
1676 | cleared as soon as bins are empty, but instead only | |
1677 | when they are noticed to be empty during traversal in malloc. | |
1678 | */ | |
f65fd747 | 1679 | |
fa8d436c UD |
1680 | /* Conservatively use 32 bits per map word, even if on 64bit system */ |
1681 | #define BINMAPSHIFT 5 | |
1682 | #define BITSPERMAP (1U << BINMAPSHIFT) | |
1683 | #define BINMAPSIZE (NBINS / BITSPERMAP) | |
f65fd747 | 1684 | |
fa8d436c UD |
1685 | #define idx2block(i) ((i) >> BINMAPSHIFT) |
1686 | #define idx2bit(i) ((1U << ((i) & ((1U << BINMAPSHIFT)-1)))) | |
f65fd747 | 1687 | |
fa8d436c UD |
1688 | #define mark_bin(m,i) ((m)->binmap[idx2block(i)] |= idx2bit(i)) |
1689 | #define unmark_bin(m,i) ((m)->binmap[idx2block(i)] &= ~(idx2bit(i))) | |
1690 | #define get_binmap(m,i) ((m)->binmap[idx2block(i)] & idx2bit(i)) | |
f65fd747 | 1691 | |
fa8d436c UD |
1692 | /* |
1693 | Fastbins | |
1694 | ||
1695 | An array of lists holding recently freed small chunks. Fastbins | |
1696 | are not doubly linked. It is faster to single-link them, and | |
1697 | since chunks are never removed from the middles of these lists, | |
1698 | double linking is not necessary. Also, unlike regular bins, they | |
1699 | are not even processed in FIFO order (they use faster LIFO) since | |
1700 | ordering doesn't much matter in the transient contexts in which | |
1701 | fastbins are normally used. | |
1702 | ||
1703 | Chunks in fastbins keep their inuse bit set, so they cannot | |
1704 | be consolidated with other free chunks. malloc_consolidate | |
1705 | releases all chunks in fastbins and consolidates them with | |
a9177ff5 | 1706 | other free chunks. |
fa8d436c | 1707 | */ |
f65fd747 | 1708 | |
fa8d436c | 1709 | typedef struct malloc_chunk* mfastbinptr; |
425ce2ed | 1710 | #define fastbin(ar_ptr, idx) ((ar_ptr)->fastbinsY[idx]) |
f65fd747 | 1711 | |
fa8d436c | 1712 | /* offset 2 to use otherwise unindexable first 2 bins */ |
425ce2ed UD |
1713 | #define fastbin_index(sz) \ |
1714 | ((((unsigned int)(sz)) >> (SIZE_SZ == 8 ? 4 : 3)) - 2) | |
1715 | ||
f65fd747 | 1716 | |
fa8d436c | 1717 | /* The maximum fastbin request size we support */ |
425ce2ed | 1718 | #define MAX_FAST_SIZE (80 * SIZE_SZ / 4) |
f65fd747 | 1719 | |
fa8d436c | 1720 | #define NFASTBINS (fastbin_index(request2size(MAX_FAST_SIZE))+1) |
f65fd747 UD |
1721 | |
1722 | /* | |
fa8d436c UD |
1723 | FASTBIN_CONSOLIDATION_THRESHOLD is the size of a chunk in free() |
1724 | that triggers automatic consolidation of possibly-surrounding | |
1725 | fastbin chunks. This is a heuristic, so the exact value should not | |
1726 | matter too much. It is defined at half the default trim threshold as a | |
1727 | compromise heuristic to only attempt consolidation if it is likely | |
1728 | to lead to trimming. However, it is not dynamically tunable, since | |
a9177ff5 | 1729 | consolidation reduces fragmentation surrounding large chunks even |
fa8d436c | 1730 | if trimming is not used. |
f65fd747 UD |
1731 | */ |
1732 | ||
fa8d436c | 1733 | #define FASTBIN_CONSOLIDATION_THRESHOLD (65536UL) |
f65fd747 UD |
1734 | |
1735 | /* | |
a9177ff5 | 1736 | Since the lowest 2 bits in max_fast don't matter in size comparisons, |
fa8d436c | 1737 | they are used as flags. |
f65fd747 UD |
1738 | */ |
1739 | ||
fa8d436c UD |
1740 | /* |
1741 | FASTCHUNKS_BIT held in max_fast indicates that there are probably | |
1742 | some fastbin chunks. It is set true on entering a chunk into any | |
1743 | fastbin, and cleared only in malloc_consolidate. | |
f65fd747 | 1744 | |
fa8d436c UD |
1745 | The truth value is inverted so that have_fastchunks will be true |
1746 | upon startup (since statics are zero-filled), simplifying | |
1747 | initialization checks. | |
1748 | */ | |
f65fd747 | 1749 | |
fa8d436c | 1750 | #define FASTCHUNKS_BIT (1U) |
f65fd747 | 1751 | |
9bf248c6 | 1752 | #define have_fastchunks(M) (((M)->flags & FASTCHUNKS_BIT) == 0) |
425ce2ed UD |
1753 | #define clear_fastchunks(M) catomic_or (&(M)->flags, FASTCHUNKS_BIT) |
1754 | #define set_fastchunks(M) catomic_and (&(M)->flags, ~FASTCHUNKS_BIT) | |
f65fd747 UD |
1755 | |
1756 | /* | |
fa8d436c UD |
1757 | NONCONTIGUOUS_BIT indicates that MORECORE does not return contiguous |
1758 | regions. Otherwise, contiguity is exploited in merging together, | |
1759 | when possible, results from consecutive MORECORE calls. | |
f65fd747 | 1760 | |
fa8d436c UD |
1761 | The initial value comes from MORECORE_CONTIGUOUS, but is |
1762 | changed dynamically if mmap is ever used as an sbrk substitute. | |
f65fd747 UD |
1763 | */ |
1764 | ||
fa8d436c | 1765 | #define NONCONTIGUOUS_BIT (2U) |
f65fd747 | 1766 | |
9bf248c6 UD |
1767 | #define contiguous(M) (((M)->flags & NONCONTIGUOUS_BIT) == 0) |
1768 | #define noncontiguous(M) (((M)->flags & NONCONTIGUOUS_BIT) != 0) | |
1769 | #define set_noncontiguous(M) ((M)->flags |= NONCONTIGUOUS_BIT) | |
1770 | #define set_contiguous(M) ((M)->flags &= ~NONCONTIGUOUS_BIT) | |
f65fd747 | 1771 | |
a9177ff5 RM |
1772 | /* |
1773 | Set value of max_fast. | |
fa8d436c UD |
1774 | Use impossibly small value if 0. |
1775 | Precondition: there are no existing fastbin chunks. | |
1776 | Setting the value clears fastchunk bit but preserves noncontiguous bit. | |
f65fd747 UD |
1777 | */ |
1778 | ||
9bf248c6 | 1779 | #define set_max_fast(s) \ |
991eda1e UD |
1780 | global_max_fast = (((s) == 0) \ |
1781 | ? SMALLBIN_WIDTH: ((s + SIZE_SZ) & ~MALLOC_ALIGN_MASK)) | |
9bf248c6 | 1782 | #define get_max_fast() global_max_fast |
f65fd747 | 1783 | |
f65fd747 UD |
1784 | |
1785 | /* | |
fa8d436c | 1786 | ----------- Internal state representation and initialization ----------- |
f65fd747 UD |
1787 | */ |
1788 | ||
fa8d436c UD |
1789 | struct malloc_state { |
1790 | /* Serialize access. */ | |
1791 | mutex_t mutex; | |
9bf248c6 UD |
1792 | |
1793 | /* Flags (formerly in max_fast). */ | |
1794 | int flags; | |
f65fd747 | 1795 | |
4f27c496 | 1796 | #if THREAD_STATS |
fa8d436c UD |
1797 | /* Statistics for locking. Only used if THREAD_STATS is defined. */ |
1798 | long stat_lock_direct, stat_lock_loop, stat_lock_wait; | |
4f27c496 | 1799 | #endif |
f65fd747 | 1800 | |
fa8d436c | 1801 | /* Fastbins */ |
425ce2ed | 1802 | mfastbinptr fastbinsY[NFASTBINS]; |
f65fd747 | 1803 | |
fa8d436c UD |
1804 | /* Base of the topmost chunk -- not otherwise kept in a bin */ |
1805 | mchunkptr top; | |
f65fd747 | 1806 | |
fa8d436c UD |
1807 | /* The remainder from the most recent split of a small request */ |
1808 | mchunkptr last_remainder; | |
f65fd747 | 1809 | |
fa8d436c | 1810 | /* Normal bins packed as described above */ |
41999a1a | 1811 | mchunkptr bins[NBINS * 2 - 2]; |
f65fd747 | 1812 | |
fa8d436c UD |
1813 | /* Bitmap of bins */ |
1814 | unsigned int binmap[BINMAPSIZE]; | |
f65fd747 | 1815 | |
fa8d436c UD |
1816 | /* Linked list */ |
1817 | struct malloc_state *next; | |
f65fd747 | 1818 | |
425ce2ed UD |
1819 | #ifdef PER_THREAD |
1820 | /* Linked list for free arenas. */ | |
1821 | struct malloc_state *next_free; | |
1822 | #endif | |
1823 | ||
fa8d436c UD |
1824 | /* Memory allocated from the system in this arena. */ |
1825 | INTERNAL_SIZE_T system_mem; | |
1826 | INTERNAL_SIZE_T max_system_mem; | |
1827 | }; | |
f65fd747 | 1828 | |
fa8d436c UD |
1829 | struct malloc_par { |
1830 | /* Tunable parameters */ | |
1831 | unsigned long trim_threshold; | |
1832 | INTERNAL_SIZE_T top_pad; | |
1833 | INTERNAL_SIZE_T mmap_threshold; | |
425ce2ed UD |
1834 | #ifdef PER_THREAD |
1835 | INTERNAL_SIZE_T arena_test; | |
1836 | INTERNAL_SIZE_T arena_max; | |
1837 | #endif | |
fa8d436c UD |
1838 | |
1839 | /* Memory map support */ | |
1840 | int n_mmaps; | |
1841 | int n_mmaps_max; | |
1842 | int max_n_mmaps; | |
1d05c2fb UD |
1843 | /* the mmap_threshold is dynamic, until the user sets |
1844 | it manually, at which point we need to disable any | |
1845 | dynamic behavior. */ | |
1846 | int no_dyn_threshold; | |
fa8d436c | 1847 | |
fa8d436c UD |
1848 | /* Statistics */ |
1849 | INTERNAL_SIZE_T mmapped_mem; | |
1850 | /*INTERNAL_SIZE_T sbrked_mem;*/ | |
1851 | /*INTERNAL_SIZE_T max_sbrked_mem;*/ | |
1852 | INTERNAL_SIZE_T max_mmapped_mem; | |
1853 | INTERNAL_SIZE_T max_total_mem; /* only kept for NO_THREADS */ | |
1854 | ||
1855 | /* First address handed out by MORECORE/sbrk. */ | |
1856 | char* sbrk_base; | |
1857 | }; | |
f65fd747 | 1858 | |
fa8d436c UD |
1859 | /* There are several instances of this struct ("arenas") in this |
1860 | malloc. If you are adapting this malloc in a way that does NOT use | |
1861 | a static or mmapped malloc_state, you MUST explicitly zero-fill it | |
1862 | before using. This malloc relies on the property that malloc_state | |
1863 | is initialized to all zeroes (as is true of C statics). */ | |
f65fd747 | 1864 | |
02d46fc4 UD |
1865 | static struct malloc_state main_arena = |
1866 | { | |
1867 | .mutex = MUTEX_INITIALIZER, | |
1868 | .next = &main_arena | |
1869 | }; | |
f65fd747 | 1870 | |
fa8d436c | 1871 | /* There is only one instance of the malloc parameters. */ |
f65fd747 | 1872 | |
02d46fc4 UD |
1873 | static struct malloc_par mp_ = |
1874 | { | |
1875 | .top_pad = DEFAULT_TOP_PAD, | |
1876 | .n_mmaps_max = DEFAULT_MMAP_MAX, | |
1877 | .mmap_threshold = DEFAULT_MMAP_THRESHOLD, | |
1878 | .trim_threshold = DEFAULT_TRIM_THRESHOLD, | |
1879 | #ifdef PER_THREAD | |
1880 | # define NARENAS_FROM_NCORES(n) ((n) * (sizeof(long) == 4 ? 2 : 8)) | |
1881 | .arena_test = NARENAS_FROM_NCORES (1) | |
1882 | #endif | |
1883 | }; | |
f65fd747 | 1884 | |
9bf248c6 | 1885 | |
425ce2ed UD |
1886 | #ifdef PER_THREAD |
1887 | /* Non public mallopt parameters. */ | |
1888 | #define M_ARENA_TEST -7 | |
1889 | #define M_ARENA_MAX -8 | |
1890 | #endif | |
1891 | ||
1892 | ||
9bf248c6 UD |
1893 | /* Maximum size of memory handled in fastbins. */ |
1894 | static INTERNAL_SIZE_T global_max_fast; | |
1895 | ||
fa8d436c UD |
1896 | /* |
1897 | Initialize a malloc_state struct. | |
f65fd747 | 1898 | |
fa8d436c UD |
1899 | This is called only from within malloc_consolidate, which needs |
1900 | be called in the same contexts anyway. It is never called directly | |
1901 | outside of malloc_consolidate because some optimizing compilers try | |
1902 | to inline it at all call points, which turns out not to be an | |
1903 | optimization at all. (Inlining it in malloc_consolidate is fine though.) | |
1904 | */ | |
f65fd747 | 1905 | |
fa8d436c | 1906 | static void malloc_init_state(mstate av) |
fa8d436c UD |
1907 | { |
1908 | int i; | |
1909 | mbinptr bin; | |
a9177ff5 | 1910 | |
fa8d436c | 1911 | /* Establish circular links for normal bins */ |
a9177ff5 | 1912 | for (i = 1; i < NBINS; ++i) { |
fa8d436c UD |
1913 | bin = bin_at(av,i); |
1914 | bin->fd = bin->bk = bin; | |
1915 | } | |
f65fd747 | 1916 | |
fa8d436c UD |
1917 | #if MORECORE_CONTIGUOUS |
1918 | if (av != &main_arena) | |
1919 | #endif | |
1920 | set_noncontiguous(av); | |
9bf248c6 UD |
1921 | if (av == &main_arena) |
1922 | set_max_fast(DEFAULT_MXFAST); | |
1923 | av->flags |= FASTCHUNKS_BIT; | |
f65fd747 | 1924 | |
fa8d436c UD |
1925 | av->top = initial_top(av); |
1926 | } | |
e9b3e3c5 | 1927 | |
a9177ff5 | 1928 | /* |
fa8d436c UD |
1929 | Other internal utilities operating on mstates |
1930 | */ | |
f65fd747 | 1931 | |
22a89187 | 1932 | static void* sYSMALLOc(INTERNAL_SIZE_T, mstate); |
fa8d436c UD |
1933 | static int sYSTRIm(size_t, mstate); |
1934 | static void malloc_consolidate(mstate); | |
7e3be507 | 1935 | |
404d4cef RM |
1936 | |
1937 | /* -------------- Early definitions for debugging hooks ---------------- */ | |
1938 | ||
1939 | /* Define and initialize the hook variables. These weak definitions must | |
1940 | appear before any use of the variables in a function (arena.c uses one). */ | |
1941 | #ifndef weak_variable | |
404d4cef RM |
1942 | /* In GNU libc we want the hook variables to be weak definitions to |
1943 | avoid a problem with Emacs. */ | |
22a89187 | 1944 | # define weak_variable weak_function |
404d4cef RM |
1945 | #endif |
1946 | ||
1947 | /* Forward declarations. */ | |
22a89187 | 1948 | static void* malloc_hook_ini __MALLOC_P ((size_t sz, |
404d4cef | 1949 | const __malloc_ptr_t caller)); |
22a89187 | 1950 | static void* realloc_hook_ini __MALLOC_P ((void* ptr, size_t sz, |
404d4cef | 1951 | const __malloc_ptr_t caller)); |
22a89187 | 1952 | static void* memalign_hook_ini __MALLOC_P ((size_t alignment, size_t sz, |
404d4cef RM |
1953 | const __malloc_ptr_t caller)); |
1954 | ||
06d6611a UD |
1955 | void weak_variable (*__malloc_initialize_hook) (void) = NULL; |
1956 | void weak_variable (*__free_hook) (__malloc_ptr_t __ptr, | |
1957 | const __malloc_ptr_t) = NULL; | |
404d4cef | 1958 | __malloc_ptr_t weak_variable (*__malloc_hook) |
06d6611a | 1959 | (size_t __size, const __malloc_ptr_t) = malloc_hook_ini; |
404d4cef | 1960 | __malloc_ptr_t weak_variable (*__realloc_hook) |
06d6611a | 1961 | (__malloc_ptr_t __ptr, size_t __size, const __malloc_ptr_t) |
404d4cef RM |
1962 | = realloc_hook_ini; |
1963 | __malloc_ptr_t weak_variable (*__memalign_hook) | |
06d6611a | 1964 | (size_t __alignment, size_t __size, const __malloc_ptr_t) |
404d4cef | 1965 | = memalign_hook_ini; |
06d6611a | 1966 | void weak_variable (*__after_morecore_hook) (void) = NULL; |
404d4cef RM |
1967 | |
1968 | ||
3e030bd5 UD |
1969 | /* ---------------- Error behavior ------------------------------------ */ |
1970 | ||
1971 | #ifndef DEFAULT_CHECK_ACTION | |
1972 | #define DEFAULT_CHECK_ACTION 3 | |
1973 | #endif | |
1974 | ||
1975 | static int check_action = DEFAULT_CHECK_ACTION; | |
1976 | ||
1977 | ||
854278df UD |
1978 | /* ------------------ Testing support ----------------------------------*/ |
1979 | ||
1980 | static int perturb_byte; | |
1981 | ||
1982 | #define alloc_perturb(p, n) memset (p, (perturb_byte ^ 0xff) & 0xff, n) | |
1983 | #define free_perturb(p, n) memset (p, perturb_byte & 0xff, n) | |
1984 | ||
1985 | ||
fa8d436c UD |
1986 | /* ------------------- Support for multiple arenas -------------------- */ |
1987 | #include "arena.c" | |
f65fd747 | 1988 | |
fa8d436c UD |
1989 | /* |
1990 | Debugging support | |
f65fd747 | 1991 | |
fa8d436c UD |
1992 | These routines make a number of assertions about the states |
1993 | of data structures that should be true at all times. If any | |
1994 | are not true, it's very likely that a user program has somehow | |
1995 | trashed memory. (It's also possible that there is a coding error | |
1996 | in malloc. In which case, please report it!) | |
1997 | */ | |
ee74a442 | 1998 | |
fa8d436c | 1999 | #if ! MALLOC_DEBUG |
d8f00d46 | 2000 | |
fa8d436c UD |
2001 | #define check_chunk(A,P) |
2002 | #define check_free_chunk(A,P) | |
2003 | #define check_inuse_chunk(A,P) | |
2004 | #define check_remalloced_chunk(A,P,N) | |
2005 | #define check_malloced_chunk(A,P,N) | |
2006 | #define check_malloc_state(A) | |
d8f00d46 | 2007 | |
fa8d436c | 2008 | #else |
ca34d7a7 | 2009 | |
fa8d436c UD |
2010 | #define check_chunk(A,P) do_check_chunk(A,P) |
2011 | #define check_free_chunk(A,P) do_check_free_chunk(A,P) | |
2012 | #define check_inuse_chunk(A,P) do_check_inuse_chunk(A,P) | |
2013 | #define check_remalloced_chunk(A,P,N) do_check_remalloced_chunk(A,P,N) | |
2014 | #define check_malloced_chunk(A,P,N) do_check_malloced_chunk(A,P,N) | |
2015 | #define check_malloc_state(A) do_check_malloc_state(A) | |
ca34d7a7 | 2016 | |
fa8d436c UD |
2017 | /* |
2018 | Properties of all chunks | |
2019 | */ | |
ca34d7a7 | 2020 | |
fa8d436c | 2021 | static void do_check_chunk(mstate av, mchunkptr p) |
ca34d7a7 | 2022 | { |
fa8d436c UD |
2023 | unsigned long sz = chunksize(p); |
2024 | /* min and max possible addresses assuming contiguous allocation */ | |
2025 | char* max_address = (char*)(av->top) + chunksize(av->top); | |
2026 | char* min_address = max_address - av->system_mem; | |
2027 | ||
2028 | if (!chunk_is_mmapped(p)) { | |
a9177ff5 | 2029 | |
fa8d436c UD |
2030 | /* Has legal address ... */ |
2031 | if (p != av->top) { | |
2032 | if (contiguous(av)) { | |
72f90263 UD |
2033 | assert(((char*)p) >= min_address); |
2034 | assert(((char*)p + sz) <= ((char*)(av->top))); | |
fa8d436c UD |
2035 | } |
2036 | } | |
2037 | else { | |
2038 | /* top size is always at least MINSIZE */ | |
2039 | assert((unsigned long)(sz) >= MINSIZE); | |
2040 | /* top predecessor always marked inuse */ | |
2041 | assert(prev_inuse(p)); | |
2042 | } | |
a9177ff5 | 2043 | |
ca34d7a7 | 2044 | } |
fa8d436c | 2045 | else { |
fa8d436c UD |
2046 | /* address is outside main heap */ |
2047 | if (contiguous(av) && av->top != initial_top(av)) { | |
2acd01ac | 2048 | assert(((char*)p) < min_address || ((char*)p) >= max_address); |
fa8d436c UD |
2049 | } |
2050 | /* chunk is page-aligned */ | |
02d46fc4 | 2051 | assert(((p->prev_size + sz) & (GLRO(dl_pagesize)-1)) == 0); |
fa8d436c UD |
2052 | /* mem is aligned */ |
2053 | assert(aligned_OK(chunk2mem(p))); | |
eb406346 | 2054 | } |
eb406346 UD |
2055 | } |
2056 | ||
fa8d436c UD |
2057 | /* |
2058 | Properties of free chunks | |
2059 | */ | |
ee74a442 | 2060 | |
fa8d436c | 2061 | static void do_check_free_chunk(mstate av, mchunkptr p) |
67c94753 | 2062 | { |
fa8d436c UD |
2063 | INTERNAL_SIZE_T sz = p->size & ~(PREV_INUSE|NON_MAIN_ARENA); |
2064 | mchunkptr next = chunk_at_offset(p, sz); | |
67c94753 | 2065 | |
fa8d436c | 2066 | do_check_chunk(av, p); |
67c94753 | 2067 | |
fa8d436c UD |
2068 | /* Chunk must claim to be free ... */ |
2069 | assert(!inuse(p)); | |
2070 | assert (!chunk_is_mmapped(p)); | |
67c94753 | 2071 | |
fa8d436c UD |
2072 | /* Unless a special marker, must have OK fields */ |
2073 | if ((unsigned long)(sz) >= MINSIZE) | |
2074 | { | |
2075 | assert((sz & MALLOC_ALIGN_MASK) == 0); | |
2076 | assert(aligned_OK(chunk2mem(p))); | |
2077 | /* ... matching footer field */ | |
2078 | assert(next->prev_size == sz); | |
2079 | /* ... and is fully consolidated */ | |
2080 | assert(prev_inuse(p)); | |
2081 | assert (next == av->top || inuse(next)); | |
2082 | ||
2083 | /* ... and has minimally sane links */ | |
2084 | assert(p->fd->bk == p); | |
2085 | assert(p->bk->fd == p); | |
2086 | } | |
2087 | else /* markers are always of size SIZE_SZ */ | |
2088 | assert(sz == SIZE_SZ); | |
67c94753 | 2089 | } |
67c94753 | 2090 | |
fa8d436c UD |
2091 | /* |
2092 | Properties of inuse chunks | |
2093 | */ | |
2094 | ||
fa8d436c | 2095 | static void do_check_inuse_chunk(mstate av, mchunkptr p) |
f65fd747 | 2096 | { |
fa8d436c | 2097 | mchunkptr next; |
f65fd747 | 2098 | |
fa8d436c | 2099 | do_check_chunk(av, p); |
f65fd747 | 2100 | |
fa8d436c UD |
2101 | if (chunk_is_mmapped(p)) |
2102 | return; /* mmapped chunks have no next/prev */ | |
ca34d7a7 | 2103 | |
fa8d436c UD |
2104 | /* Check whether it claims to be in use ... */ |
2105 | assert(inuse(p)); | |
10dc2a90 | 2106 | |
fa8d436c | 2107 | next = next_chunk(p); |
10dc2a90 | 2108 | |
fa8d436c UD |
2109 | /* ... and is surrounded by OK chunks. |
2110 | Since more things can be checked with free chunks than inuse ones, | |
2111 | if an inuse chunk borders them and debug is on, it's worth doing them. | |
2112 | */ | |
2113 | if (!prev_inuse(p)) { | |
2114 | /* Note that we cannot even look at prev unless it is not inuse */ | |
2115 | mchunkptr prv = prev_chunk(p); | |
2116 | assert(next_chunk(prv) == p); | |
2117 | do_check_free_chunk(av, prv); | |
2118 | } | |
2119 | ||
2120 | if (next == av->top) { | |
2121 | assert(prev_inuse(next)); | |
2122 | assert(chunksize(next) >= MINSIZE); | |
2123 | } | |
2124 | else if (!inuse(next)) | |
2125 | do_check_free_chunk(av, next); | |
10dc2a90 UD |
2126 | } |
2127 | ||
fa8d436c UD |
2128 | /* |
2129 | Properties of chunks recycled from fastbins | |
2130 | */ | |
2131 | ||
fa8d436c | 2132 | static void do_check_remalloced_chunk(mstate av, mchunkptr p, INTERNAL_SIZE_T s) |
10dc2a90 | 2133 | { |
fa8d436c UD |
2134 | INTERNAL_SIZE_T sz = p->size & ~(PREV_INUSE|NON_MAIN_ARENA); |
2135 | ||
2136 | if (!chunk_is_mmapped(p)) { | |
2137 | assert(av == arena_for_chunk(p)); | |
2138 | if (chunk_non_main_arena(p)) | |
2139 | assert(av != &main_arena); | |
2140 | else | |
2141 | assert(av == &main_arena); | |
2142 | } | |
2143 | ||
2144 | do_check_inuse_chunk(av, p); | |
2145 | ||
2146 | /* Legal size ... */ | |
2147 | assert((sz & MALLOC_ALIGN_MASK) == 0); | |
2148 | assert((unsigned long)(sz) >= MINSIZE); | |
2149 | /* ... and alignment */ | |
2150 | assert(aligned_OK(chunk2mem(p))); | |
2151 | /* chunk is less than MINSIZE more than request */ | |
2152 | assert((long)(sz) - (long)(s) >= 0); | |
2153 | assert((long)(sz) - (long)(s + MINSIZE) < 0); | |
10dc2a90 UD |
2154 | } |
2155 | ||
fa8d436c UD |
2156 | /* |
2157 | Properties of nonrecycled chunks at the point they are malloced | |
2158 | */ | |
2159 | ||
fa8d436c | 2160 | static void do_check_malloced_chunk(mstate av, mchunkptr p, INTERNAL_SIZE_T s) |
10dc2a90 | 2161 | { |
fa8d436c UD |
2162 | /* same as recycled case ... */ |
2163 | do_check_remalloced_chunk(av, p, s); | |
10dc2a90 | 2164 | |
fa8d436c UD |
2165 | /* |
2166 | ... plus, must obey implementation invariant that prev_inuse is | |
2167 | always true of any allocated chunk; i.e., that each allocated | |
2168 | chunk borders either a previously allocated and still in-use | |
2169 | chunk, or the base of its memory arena. This is ensured | |
ded5b9b7 | 2170 | by making all allocations from the `lowest' part of any found |
fa8d436c UD |
2171 | chunk. This does not necessarily hold however for chunks |
2172 | recycled via fastbins. | |
2173 | */ | |
10dc2a90 | 2174 | |
fa8d436c UD |
2175 | assert(prev_inuse(p)); |
2176 | } | |
10dc2a90 | 2177 | |
f65fd747 | 2178 | |
fa8d436c UD |
2179 | /* |
2180 | Properties of malloc_state. | |
f65fd747 | 2181 | |
fa8d436c UD |
2182 | This may be useful for debugging malloc, as well as detecting user |
2183 | programmer errors that somehow write into malloc_state. | |
f65fd747 | 2184 | |
fa8d436c UD |
2185 | If you are extending or experimenting with this malloc, you can |
2186 | probably figure out how to hack this routine to print out or | |
2187 | display chunk addresses, sizes, bins, and other instrumentation. | |
2188 | */ | |
f65fd747 | 2189 | |
fa8d436c UD |
2190 | static void do_check_malloc_state(mstate av) |
2191 | { | |
2192 | int i; | |
2193 | mchunkptr p; | |
2194 | mchunkptr q; | |
2195 | mbinptr b; | |
fa8d436c UD |
2196 | unsigned int idx; |
2197 | INTERNAL_SIZE_T size; | |
2198 | unsigned long total = 0; | |
2199 | int max_fast_bin; | |
f65fd747 | 2200 | |
fa8d436c UD |
2201 | /* internal size_t must be no wider than pointer type */ |
2202 | assert(sizeof(INTERNAL_SIZE_T) <= sizeof(char*)); | |
f65fd747 | 2203 | |
fa8d436c UD |
2204 | /* alignment is a power of 2 */ |
2205 | assert((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-1)) == 0); | |
f65fd747 | 2206 | |
fa8d436c UD |
2207 | /* cannot run remaining checks until fully initialized */ |
2208 | if (av->top == 0 || av->top == initial_top(av)) | |
2209 | return; | |
f65fd747 | 2210 | |
fa8d436c | 2211 | /* pagesize is a power of 2 */ |
02d46fc4 | 2212 | assert((GLRO(dl_pagesize) & (GLRO(dl_pagesize)-1)) == 0); |
f65fd747 | 2213 | |
fa8d436c UD |
2214 | /* A contiguous main_arena is consistent with sbrk_base. */ |
2215 | if (av == &main_arena && contiguous(av)) | |
2216 | assert((char*)mp_.sbrk_base + av->system_mem == | |
2217 | (char*)av->top + chunksize(av->top)); | |
2218 | ||
2219 | /* properties of fastbins */ | |
2220 | ||
2221 | /* max_fast is in allowed range */ | |
9bf248c6 | 2222 | assert((get_max_fast () & ~1) <= request2size(MAX_FAST_SIZE)); |
fa8d436c | 2223 | |
9bf248c6 | 2224 | max_fast_bin = fastbin_index(get_max_fast ()); |
fa8d436c UD |
2225 | |
2226 | for (i = 0; i < NFASTBINS; ++i) { | |
97983464 | 2227 | p = fastbin (av, i); |
fa8d436c | 2228 | |
11bf311e UD |
2229 | /* The following test can only be performed for the main arena. |
2230 | While mallopt calls malloc_consolidate to get rid of all fast | |
2231 | bins (especially those larger than the new maximum) this does | |
2232 | only happen for the main arena. Trying to do this for any | |
2233 | other arena would mean those arenas have to be locked and | |
2234 | malloc_consolidate be called for them. This is excessive. And | |
2235 | even if this is acceptable to somebody it still cannot solve | |
2236 | the problem completely since if the arena is locked a | |
2237 | concurrent malloc call might create a new arena which then | |
2238 | could use the newly invalid fast bins. */ | |
2239 | ||
fa8d436c | 2240 | /* all bins past max_fast are empty */ |
11bf311e | 2241 | if (av == &main_arena && i > max_fast_bin) |
fa8d436c UD |
2242 | assert(p == 0); |
2243 | ||
2244 | while (p != 0) { | |
2245 | /* each chunk claims to be inuse */ | |
2246 | do_check_inuse_chunk(av, p); | |
2247 | total += chunksize(p); | |
2248 | /* chunk belongs in this bin */ | |
2249 | assert(fastbin_index(chunksize(p)) == i); | |
2250 | p = p->fd; | |
2251 | } | |
2252 | } | |
2253 | ||
2254 | if (total != 0) | |
2255 | assert(have_fastchunks(av)); | |
2256 | else if (!have_fastchunks(av)) | |
2257 | assert(total == 0); | |
2258 | ||
2259 | /* check normal bins */ | |
2260 | for (i = 1; i < NBINS; ++i) { | |
2261 | b = bin_at(av,i); | |
2262 | ||
2263 | /* binmap is accurate (except for bin 1 == unsorted_chunks) */ | |
2264 | if (i >= 2) { | |
68631c8e UD |
2265 | unsigned int binbit = get_binmap(av,i); |
2266 | int empty = last(b) == b; | |
fa8d436c | 2267 | if (!binbit) |
72f90263 | 2268 | assert(empty); |
fa8d436c | 2269 | else if (!empty) |
72f90263 | 2270 | assert(binbit); |
fa8d436c UD |
2271 | } |
2272 | ||
2273 | for (p = last(b); p != b; p = p->bk) { | |
2274 | /* each chunk claims to be free */ | |
2275 | do_check_free_chunk(av, p); | |
2276 | size = chunksize(p); | |
2277 | total += size; | |
2278 | if (i >= 2) { | |
72f90263 UD |
2279 | /* chunk belongs in bin */ |
2280 | idx = bin_index(size); | |
2281 | assert(idx == i); | |
2282 | /* lists are sorted */ | |
2283 | assert(p->bk == b || | |
2284 | (unsigned long)chunksize(p->bk) >= (unsigned long)chunksize(p)); | |
7ecfbd38 UD |
2285 | |
2286 | if (!in_smallbin_range(size)) | |
2287 | { | |
2288 | if (p->fd_nextsize != NULL) | |
2289 | { | |
2290 | if (p->fd_nextsize == p) | |
2291 | assert (p->bk_nextsize == p); | |
2292 | else | |
2293 | { | |
2294 | if (p->fd_nextsize == first (b)) | |
2295 | assert (chunksize (p) < chunksize (p->fd_nextsize)); | |
2296 | else | |
2297 | assert (chunksize (p) > chunksize (p->fd_nextsize)); | |
2298 | ||
2299 | if (p == first (b)) | |
2300 | assert (chunksize (p) > chunksize (p->bk_nextsize)); | |
2301 | else | |
2302 | assert (chunksize (p) < chunksize (p->bk_nextsize)); | |
2303 | } | |
2304 | } | |
2305 | else | |
2306 | assert (p->bk_nextsize == NULL); | |
2307 | } | |
2308 | } else if (!in_smallbin_range(size)) | |
2309 | assert (p->fd_nextsize == NULL && p->bk_nextsize == NULL); | |
fa8d436c UD |
2310 | /* chunk is followed by a legal chain of inuse chunks */ |
2311 | for (q = next_chunk(p); | |
72f90263 UD |
2312 | (q != av->top && inuse(q) && |
2313 | (unsigned long)(chunksize(q)) >= MINSIZE); | |
2314 | q = next_chunk(q)) | |
2315 | do_check_inuse_chunk(av, q); | |
fa8d436c UD |
2316 | } |
2317 | } | |
f65fd747 | 2318 | |
fa8d436c UD |
2319 | /* top chunk is OK */ |
2320 | check_chunk(av, av->top); | |
2321 | ||
2322 | /* sanity checks for statistics */ | |
2323 | ||
fa8d436c UD |
2324 | assert(mp_.n_mmaps <= mp_.max_n_mmaps); |
2325 | ||
2326 | assert((unsigned long)(av->system_mem) <= | |
72f90263 | 2327 | (unsigned long)(av->max_system_mem)); |
f65fd747 | 2328 | |
fa8d436c | 2329 | assert((unsigned long)(mp_.mmapped_mem) <= |
72f90263 | 2330 | (unsigned long)(mp_.max_mmapped_mem)); |
fa8d436c UD |
2331 | } |
2332 | #endif | |
2333 | ||
2334 | ||
2335 | /* ----------------- Support for debugging hooks -------------------- */ | |
2336 | #include "hooks.c" | |
2337 | ||
2338 | ||
2339 | /* ----------- Routines dealing with system allocation -------------- */ | |
2340 | ||
2341 | /* | |
2342 | sysmalloc handles malloc cases requiring more memory from the system. | |
2343 | On entry, it is assumed that av->top does not have enough | |
2344 | space to service request for nb bytes, thus requiring that av->top | |
2345 | be extended or replaced. | |
2346 | */ | |
2347 | ||
22a89187 | 2348 | static void* sYSMALLOc(INTERNAL_SIZE_T nb, mstate av) |
f65fd747 | 2349 | { |
fa8d436c UD |
2350 | mchunkptr old_top; /* incoming value of av->top */ |
2351 | INTERNAL_SIZE_T old_size; /* its size */ | |
2352 | char* old_end; /* its end address */ | |
f65fd747 | 2353 | |
fa8d436c UD |
2354 | long size; /* arg to first MORECORE or mmap call */ |
2355 | char* brk; /* return value from MORECORE */ | |
f65fd747 | 2356 | |
fa8d436c UD |
2357 | long correction; /* arg to 2nd MORECORE call */ |
2358 | char* snd_brk; /* 2nd return val */ | |
f65fd747 | 2359 | |
fa8d436c UD |
2360 | INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */ |
2361 | INTERNAL_SIZE_T end_misalign; /* partial page left at end of new space */ | |
2362 | char* aligned_brk; /* aligned offset into brk */ | |
f65fd747 | 2363 | |
fa8d436c UD |
2364 | mchunkptr p; /* the allocated/returned chunk */ |
2365 | mchunkptr remainder; /* remainder from allocation */ | |
2366 | unsigned long remainder_size; /* its size */ | |
2367 | ||
2368 | unsigned long sum; /* for updating stats */ | |
2369 | ||
02d46fc4 | 2370 | size_t pagemask = GLRO(dl_pagesize) - 1; |
7463d5cb | 2371 | bool tried_mmap = false; |
fa8d436c UD |
2372 | |
2373 | ||
fa8d436c UD |
2374 | /* |
2375 | If have mmap, and the request size meets the mmap threshold, and | |
2376 | the system supports mmap, and there are few enough currently | |
2377 | allocated mmapped regions, try to directly map this request | |
2378 | rather than expanding top. | |
2379 | */ | |
f65fd747 | 2380 | |
fa8d436c UD |
2381 | if ((unsigned long)(nb) >= (unsigned long)(mp_.mmap_threshold) && |
2382 | (mp_.n_mmaps < mp_.n_mmaps_max)) { | |
f65fd747 | 2383 | |
fa8d436c UD |
2384 | char* mm; /* return value from mmap call*/ |
2385 | ||
e404fb16 | 2386 | try_mmap: |
fa8d436c UD |
2387 | /* |
2388 | Round up size to nearest page. For mmapped chunks, the overhead | |
2389 | is one SIZE_SZ unit larger than for normal chunks, because there | |
2390 | is no following chunk whose prev_size field could be used. | |
22a89187 UD |
2391 | |
2392 | See the front_misalign handling below, for glibc there is no | |
2393 | need for further alignments. */ | |
11bf311e | 2394 | size = (nb + SIZE_SZ + pagemask) & ~pagemask; |
7463d5cb | 2395 | tried_mmap = true; |
fa8d436c UD |
2396 | |
2397 | /* Don't try if size wraps around 0 */ | |
2398 | if ((unsigned long)(size) > (unsigned long)(nb)) { | |
2399 | ||
2400 | mm = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE)); | |
a9177ff5 | 2401 | |
fa8d436c | 2402 | if (mm != MAP_FAILED) { |
a9177ff5 | 2403 | |
72f90263 UD |
2404 | /* |
2405 | The offset to the start of the mmapped region is stored | |
2406 | in the prev_size field of the chunk. This allows us to adjust | |
2407 | returned start address to meet alignment requirements here | |
2408 | and in memalign(), and still be able to compute proper | |
2409 | address argument for later munmap in free() and realloc(). | |
a9177ff5 | 2410 | |
22a89187 UD |
2411 | For glibc, chunk2mem increases the address by 2*SIZE_SZ and |
2412 | MALLOC_ALIGN_MASK is 2*SIZE_SZ-1. Each mmap'ed area is page | |
2413 | aligned and therefore definitely MALLOC_ALIGN_MASK-aligned. */ | |
72f90263 | 2414 | assert (((INTERNAL_SIZE_T)chunk2mem(mm) & MALLOC_ALIGN_MASK) == 0); |
22a89187 UD |
2415 | |
2416 | p = (mchunkptr)mm; | |
2417 | set_head(p, size|IS_MMAPPED); | |
a9177ff5 | 2418 | |
72f90263 | 2419 | /* update statistics */ |
a9177ff5 | 2420 | |
72f90263 UD |
2421 | if (++mp_.n_mmaps > mp_.max_n_mmaps) |
2422 | mp_.max_n_mmaps = mp_.n_mmaps; | |
a9177ff5 | 2423 | |
72f90263 UD |
2424 | sum = mp_.mmapped_mem += size; |
2425 | if (sum > (unsigned long)(mp_.max_mmapped_mem)) | |
2426 | mp_.max_mmapped_mem = sum; | |
fa8d436c | 2427 | |
72f90263 | 2428 | check_chunk(av, p); |
a9177ff5 | 2429 | |
72f90263 | 2430 | return chunk2mem(p); |
fa8d436c UD |
2431 | } |
2432 | } | |
2433 | } | |
fa8d436c UD |
2434 | |
2435 | /* Record incoming configuration of top */ | |
2436 | ||
2437 | old_top = av->top; | |
2438 | old_size = chunksize(old_top); | |
2439 | old_end = (char*)(chunk_at_offset(old_top, old_size)); | |
2440 | ||
a9177ff5 | 2441 | brk = snd_brk = (char*)(MORECORE_FAILURE); |
fa8d436c | 2442 | |
a9177ff5 | 2443 | /* |
fa8d436c UD |
2444 | If not the first time through, we require old_size to be |
2445 | at least MINSIZE and to have prev_inuse set. | |
2446 | */ | |
2447 | ||
a9177ff5 | 2448 | assert((old_top == initial_top(av) && old_size == 0) || |
72f90263 UD |
2449 | ((unsigned long) (old_size) >= MINSIZE && |
2450 | prev_inuse(old_top) && | |
fa8d436c UD |
2451 | ((unsigned long)old_end & pagemask) == 0)); |
2452 | ||
2453 | /* Precondition: not enough current space to satisfy nb request */ | |
2454 | assert((unsigned long)(old_size) < (unsigned long)(nb + MINSIZE)); | |
2455 | ||
fa8d436c UD |
2456 | |
2457 | if (av != &main_arena) { | |
2458 | ||
2459 | heap_info *old_heap, *heap; | |
2460 | size_t old_heap_size; | |
2461 | ||
2462 | /* First try to extend the current heap. */ | |
2463 | old_heap = heap_for_ptr(old_top); | |
2464 | old_heap_size = old_heap->size; | |
469615bd UD |
2465 | if ((long) (MINSIZE + nb - old_size) > 0 |
2466 | && grow_heap(old_heap, MINSIZE + nb - old_size) == 0) { | |
fa8d436c UD |
2467 | av->system_mem += old_heap->size - old_heap_size; |
2468 | arena_mem += old_heap->size - old_heap_size; | |
fa8d436c UD |
2469 | set_head(old_top, (((char *)old_heap + old_heap->size) - (char *)old_top) |
2470 | | PREV_INUSE); | |
e6ac0e78 UD |
2471 | } |
2472 | else if ((heap = new_heap(nb + (MINSIZE + sizeof(*heap)), mp_.top_pad))) { | |
2473 | /* Use a newly allocated heap. */ | |
2474 | heap->ar_ptr = av; | |
2475 | heap->prev = old_heap; | |
2476 | av->system_mem += heap->size; | |
2477 | arena_mem += heap->size; | |
fa8d436c UD |
2478 | /* Set up the new top. */ |
2479 | top(av) = chunk_at_offset(heap, sizeof(*heap)); | |
2480 | set_head(top(av), (heap->size - sizeof(*heap)) | PREV_INUSE); | |
2481 | ||
2482 | /* Setup fencepost and free the old top chunk. */ | |
2483 | /* The fencepost takes at least MINSIZE bytes, because it might | |
2484 | become the top chunk again later. Note that a footer is set | |
2485 | up, too, although the chunk is marked in use. */ | |
2486 | old_size -= MINSIZE; | |
2487 | set_head(chunk_at_offset(old_top, old_size + 2*SIZE_SZ), 0|PREV_INUSE); | |
2488 | if (old_size >= MINSIZE) { | |
2489 | set_head(chunk_at_offset(old_top, old_size), (2*SIZE_SZ)|PREV_INUSE); | |
2490 | set_foot(chunk_at_offset(old_top, old_size), (2*SIZE_SZ)); | |
2491 | set_head(old_top, old_size|PREV_INUSE|NON_MAIN_ARENA); | |
425ce2ed | 2492 | _int_free(av, old_top, 1); |
fa8d436c UD |
2493 | } else { |
2494 | set_head(old_top, (old_size + 2*SIZE_SZ)|PREV_INUSE); | |
2495 | set_foot(old_top, (old_size + 2*SIZE_SZ)); | |
2496 | } | |
2497 | } | |
7463d5cb | 2498 | else if (!tried_mmap) |
e404fb16 UD |
2499 | /* We can at least try to use to mmap memory. */ |
2500 | goto try_mmap; | |
fa8d436c UD |
2501 | |
2502 | } else { /* av == main_arena */ | |
2503 | ||
2504 | ||
2505 | /* Request enough space for nb + pad + overhead */ | |
2506 | ||
2507 | size = nb + mp_.top_pad + MINSIZE; | |
2508 | ||
2509 | /* | |
2510 | If contiguous, we can subtract out existing space that we hope to | |
2511 | combine with new space. We add it back later only if | |
2512 | we don't actually get contiguous space. | |
2513 | */ | |
2514 | ||
2515 | if (contiguous(av)) | |
2516 | size -= old_size; | |
2517 | ||
2518 | /* | |
2519 | Round to a multiple of page size. | |
2520 | If MORECORE is not contiguous, this ensures that we only call it | |
2521 | with whole-page arguments. And if MORECORE is contiguous and | |
2522 | this is not first time through, this preserves page-alignment of | |
2523 | previous calls. Otherwise, we correct to page-align below. | |
2524 | */ | |
2525 | ||
2526 | size = (size + pagemask) & ~pagemask; | |
2527 | ||
2528 | /* | |
2529 | Don't try to call MORECORE if argument is so big as to appear | |
2530 | negative. Note that since mmap takes size_t arg, it may succeed | |
2531 | below even if we cannot call MORECORE. | |
2532 | */ | |
2533 | ||
a9177ff5 | 2534 | if (size > 0) |
fa8d436c UD |
2535 | brk = (char*)(MORECORE(size)); |
2536 | ||
2537 | if (brk != (char*)(MORECORE_FAILURE)) { | |
2538 | /* Call the `morecore' hook if necessary. */ | |
df77455c UD |
2539 | void (*hook) (void) = force_reg (__after_morecore_hook); |
2540 | if (__builtin_expect (hook != NULL, 0)) | |
2541 | (*hook) (); | |
fa8d436c UD |
2542 | } else { |
2543 | /* | |
2544 | If have mmap, try using it as a backup when MORECORE fails or | |
2545 | cannot be used. This is worth doing on systems that have "holes" in | |
2546 | address space, so sbrk cannot extend to give contiguous space, but | |
2547 | space is available elsewhere. Note that we ignore mmap max count | |
2548 | and threshold limits, since the space will not be used as a | |
2549 | segregated mmap region. | |
2550 | */ | |
2551 | ||
fa8d436c UD |
2552 | /* Cannot merge with old top, so add its size back in */ |
2553 | if (contiguous(av)) | |
2554 | size = (size + old_size + pagemask) & ~pagemask; | |
2555 | ||
2556 | /* If we are relying on mmap as backup, then use larger units */ | |
2557 | if ((unsigned long)(size) < (unsigned long)(MMAP_AS_MORECORE_SIZE)) | |
2558 | size = MMAP_AS_MORECORE_SIZE; | |
2559 | ||
2560 | /* Don't try if size wraps around 0 */ | |
2561 | if ((unsigned long)(size) > (unsigned long)(nb)) { | |
2562 | ||
75bfdfc7 | 2563 | char *mbrk = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE)); |
a9177ff5 | 2564 | |
75bfdfc7 | 2565 | if (mbrk != MAP_FAILED) { |
a9177ff5 | 2566 | |
72f90263 UD |
2567 | /* We do not need, and cannot use, another sbrk call to find end */ |
2568 | brk = mbrk; | |
2569 | snd_brk = brk + size; | |
2570 | ||
2571 | /* | |
2572 | Record that we no longer have a contiguous sbrk region. | |
2573 | After the first time mmap is used as backup, we do not | |
2574 | ever rely on contiguous space since this could incorrectly | |
2575 | bridge regions. | |
2576 | */ | |
2577 | set_noncontiguous(av); | |
fa8d436c UD |
2578 | } |
2579 | } | |
fa8d436c UD |
2580 | } |
2581 | ||
2582 | if (brk != (char*)(MORECORE_FAILURE)) { | |
2583 | if (mp_.sbrk_base == 0) | |
2584 | mp_.sbrk_base = brk; | |
2585 | av->system_mem += size; | |
2586 | ||
2587 | /* | |
2588 | If MORECORE extends previous space, we can likewise extend top size. | |
2589 | */ | |
a9177ff5 | 2590 | |
fa8d436c UD |
2591 | if (brk == old_end && snd_brk == (char*)(MORECORE_FAILURE)) |
2592 | set_head(old_top, (size + old_size) | PREV_INUSE); | |
2593 | ||
886d5973 | 2594 | else if (contiguous(av) && old_size && brk < old_end) { |
fa8d436c | 2595 | /* Oops! Someone else killed our space.. Can't touch anything. */ |
1fdd89a7 | 2596 | malloc_printerr (3, "break adjusted to free malloc space", brk); |
fa8d436c UD |
2597 | } |
2598 | ||
2599 | /* | |
2600 | Otherwise, make adjustments: | |
a9177ff5 | 2601 | |
fa8d436c | 2602 | * If the first time through or noncontiguous, we need to call sbrk |
72f90263 | 2603 | just to find out where the end of memory lies. |
fa8d436c UD |
2604 | |
2605 | * We need to ensure that all returned chunks from malloc will meet | |
72f90263 | 2606 | MALLOC_ALIGNMENT |
fa8d436c UD |
2607 | |
2608 | * If there was an intervening foreign sbrk, we need to adjust sbrk | |
72f90263 UD |
2609 | request size to account for fact that we will not be able to |
2610 | combine new space with existing space in old_top. | |
fa8d436c UD |
2611 | |
2612 | * Almost all systems internally allocate whole pages at a time, in | |
72f90263 UD |
2613 | which case we might as well use the whole last page of request. |
2614 | So we allocate enough more memory to hit a page boundary now, | |
2615 | which in turn causes future contiguous calls to page-align. | |
fa8d436c | 2616 | */ |
a9177ff5 | 2617 | |
fa8d436c | 2618 | else { |
fa8d436c UD |
2619 | front_misalign = 0; |
2620 | end_misalign = 0; | |
2621 | correction = 0; | |
2622 | aligned_brk = brk; | |
a9177ff5 | 2623 | |
fa8d436c | 2624 | /* handle contiguous cases */ |
a9177ff5 RM |
2625 | if (contiguous(av)) { |
2626 | ||
0cb71e02 UD |
2627 | /* Count foreign sbrk as system_mem. */ |
2628 | if (old_size) | |
2629 | av->system_mem += brk - old_end; | |
2630 | ||
72f90263 | 2631 | /* Guarantee alignment of first new chunk made from this space */ |
fa8d436c | 2632 | |
72f90263 UD |
2633 | front_misalign = (INTERNAL_SIZE_T)chunk2mem(brk) & MALLOC_ALIGN_MASK; |
2634 | if (front_misalign > 0) { | |
fa8d436c | 2635 | |
72f90263 UD |
2636 | /* |
2637 | Skip over some bytes to arrive at an aligned position. | |
2638 | We don't need to specially mark these wasted front bytes. | |
2639 | They will never be accessed anyway because | |
2640 | prev_inuse of av->top (and any chunk created from its start) | |
2641 | is always true after initialization. | |
2642 | */ | |
fa8d436c | 2643 | |
72f90263 UD |
2644 | correction = MALLOC_ALIGNMENT - front_misalign; |
2645 | aligned_brk += correction; | |
2646 | } | |
a9177ff5 | 2647 | |
72f90263 UD |
2648 | /* |
2649 | If this isn't adjacent to existing space, then we will not | |
2650 | be able to merge with old_top space, so must add to 2nd request. | |
2651 | */ | |
a9177ff5 | 2652 | |
72f90263 | 2653 | correction += old_size; |
a9177ff5 | 2654 | |
72f90263 UD |
2655 | /* Extend the end address to hit a page boundary */ |
2656 | end_misalign = (INTERNAL_SIZE_T)(brk + size + correction); | |
2657 | correction += ((end_misalign + pagemask) & ~pagemask) - end_misalign; | |
a9177ff5 | 2658 | |
72f90263 UD |
2659 | assert(correction >= 0); |
2660 | snd_brk = (char*)(MORECORE(correction)); | |
a9177ff5 | 2661 | |
72f90263 UD |
2662 | /* |
2663 | If can't allocate correction, try to at least find out current | |
2664 | brk. It might be enough to proceed without failing. | |
a9177ff5 | 2665 | |
72f90263 UD |
2666 | Note that if second sbrk did NOT fail, we assume that space |
2667 | is contiguous with first sbrk. This is a safe assumption unless | |
2668 | program is multithreaded but doesn't use locks and a foreign sbrk | |
2669 | occurred between our first and second calls. | |
2670 | */ | |
a9177ff5 | 2671 | |
72f90263 UD |
2672 | if (snd_brk == (char*)(MORECORE_FAILURE)) { |
2673 | correction = 0; | |
2674 | snd_brk = (char*)(MORECORE(0)); | |
2675 | } else { | |
fa8d436c | 2676 | /* Call the `morecore' hook if necessary. */ |
df77455c UD |
2677 | void (*hook) (void) = force_reg (__after_morecore_hook); |
2678 | if (__builtin_expect (hook != NULL, 0)) | |
2679 | (*hook) (); | |
2680 | } | |
fa8d436c | 2681 | } |
a9177ff5 | 2682 | |
fa8d436c | 2683 | /* handle non-contiguous cases */ |
a9177ff5 | 2684 | else { |
72f90263 UD |
2685 | /* MORECORE/mmap must correctly align */ |
2686 | assert(((unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK) == 0); | |
a9177ff5 | 2687 | |
72f90263 UD |
2688 | /* Find out current end of memory */ |
2689 | if (snd_brk == (char*)(MORECORE_FAILURE)) { | |
2690 | snd_brk = (char*)(MORECORE(0)); | |
2691 | } | |
fa8d436c | 2692 | } |
a9177ff5 | 2693 | |
fa8d436c UD |
2694 | /* Adjust top based on results of second sbrk */ |
2695 | if (snd_brk != (char*)(MORECORE_FAILURE)) { | |
72f90263 UD |
2696 | av->top = (mchunkptr)aligned_brk; |
2697 | set_head(av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE); | |
2698 | av->system_mem += correction; | |
2699 | ||
2700 | /* | |
2701 | If not the first time through, we either have a | |
2702 | gap due to foreign sbrk or a non-contiguous region. Insert a | |
2703 | double fencepost at old_top to prevent consolidation with space | |
2704 | we don't own. These fenceposts are artificial chunks that are | |
2705 | marked as inuse and are in any case too small to use. We need | |
2706 | two to make sizes and alignments work out. | |
2707 | */ | |
2708 | ||
2709 | if (old_size != 0) { | |
2710 | /* | |
2711 | Shrink old_top to insert fenceposts, keeping size a | |
2712 | multiple of MALLOC_ALIGNMENT. We know there is at least | |
2713 | enough space in old_top to do this. | |
2714 | */ | |
2715 | old_size = (old_size - 4*SIZE_SZ) & ~MALLOC_ALIGN_MASK; | |
2716 | set_head(old_top, old_size | PREV_INUSE); | |
2717 | ||
2718 | /* | |
2719 | Note that the following assignments completely overwrite | |
2720 | old_top when old_size was previously MINSIZE. This is | |
2721 | intentional. We need the fencepost, even if old_top otherwise gets | |
2722 | lost. | |
2723 | */ | |
2724 | chunk_at_offset(old_top, old_size )->size = | |
2725 | (2*SIZE_SZ)|PREV_INUSE; | |
2726 | ||
2727 | chunk_at_offset(old_top, old_size + 2*SIZE_SZ)->size = | |
2728 | (2*SIZE_SZ)|PREV_INUSE; | |
2729 | ||
2730 | /* If possible, release the rest. */ | |
2731 | if (old_size >= MINSIZE) { | |
72f90263 | 2732 | _int_free(av, old_top, 1); |
72f90263 | 2733 | } |
fa8d436c | 2734 | |
72f90263 | 2735 | } |
fa8d436c UD |
2736 | } |
2737 | } | |
fa8d436c UD |
2738 | } |
2739 | ||
2740 | } /* if (av != &main_arena) */ | |
2741 | ||
2742 | if ((unsigned long)av->system_mem > (unsigned long)(av->max_system_mem)) | |
2743 | av->max_system_mem = av->system_mem; | |
2744 | check_malloc_state(av); | |
a9177ff5 | 2745 | |
fa8d436c UD |
2746 | /* finally, do the allocation */ |
2747 | p = av->top; | |
2748 | size = chunksize(p); | |
2749 | ||
2750 | /* check that one of the above allocation paths succeeded */ | |
2751 | if ((unsigned long)(size) >= (unsigned long)(nb + MINSIZE)) { | |
2752 | remainder_size = size - nb; | |
2753 | remainder = chunk_at_offset(p, nb); | |
2754 | av->top = remainder; | |
2755 | set_head(p, nb | PREV_INUSE | (av != &main_arena ? NON_MAIN_ARENA : 0)); | |
2756 | set_head(remainder, remainder_size | PREV_INUSE); | |
2757 | check_malloced_chunk(av, p, nb); | |
2758 | return chunk2mem(p); | |
2759 | } | |
2760 | ||
2761 | /* catch all failure paths */ | |
8e58439c | 2762 | __set_errno (ENOMEM); |
fa8d436c UD |
2763 | return 0; |
2764 | } | |
2765 | ||
2766 | ||
2767 | /* | |
2768 | sYSTRIm is an inverse of sorts to sYSMALLOc. It gives memory back | |
2769 | to the system (via negative arguments to sbrk) if there is unused | |
2770 | memory at the `high' end of the malloc pool. It is called | |
2771 | automatically by free() when top space exceeds the trim | |
2772 | threshold. It is also called by the public malloc_trim routine. It | |
2773 | returns 1 if it actually released any memory, else 0. | |
2774 | */ | |
2775 | ||
fa8d436c | 2776 | static int sYSTRIm(size_t pad, mstate av) |
fa8d436c UD |
2777 | { |
2778 | long top_size; /* Amount of top-most memory */ | |
2779 | long extra; /* Amount to release */ | |
2780 | long released; /* Amount actually released */ | |
2781 | char* current_brk; /* address returned by pre-check sbrk call */ | |
2782 | char* new_brk; /* address returned by post-check sbrk call */ | |
2783 | size_t pagesz; | |
2784 | ||
02d46fc4 | 2785 | pagesz = GLRO(dl_pagesize); |
fa8d436c | 2786 | top_size = chunksize(av->top); |
a9177ff5 | 2787 | |
fa8d436c | 2788 | /* Release in pagesize units, keeping at least one page */ |
b9b42ee0 | 2789 | extra = (top_size - pad - MINSIZE - 1) & ~(pagesz - 1); |
a9177ff5 | 2790 | |
fa8d436c | 2791 | if (extra > 0) { |
a9177ff5 | 2792 | |
fa8d436c UD |
2793 | /* |
2794 | Only proceed if end of memory is where we last set it. | |
2795 | This avoids problems if there were foreign sbrk calls. | |
2796 | */ | |
2797 | current_brk = (char*)(MORECORE(0)); | |
2798 | if (current_brk == (char*)(av->top) + top_size) { | |
a9177ff5 | 2799 | |
fa8d436c | 2800 | /* |
72f90263 UD |
2801 | Attempt to release memory. We ignore MORECORE return value, |
2802 | and instead call again to find out where new end of memory is. | |
2803 | This avoids problems if first call releases less than we asked, | |
2804 | of if failure somehow altered brk value. (We could still | |
2805 | encounter problems if it altered brk in some very bad way, | |
2806 | but the only thing we can do is adjust anyway, which will cause | |
2807 | some downstream failure.) | |
fa8d436c | 2808 | */ |
a9177ff5 | 2809 | |
fa8d436c UD |
2810 | MORECORE(-extra); |
2811 | /* Call the `morecore' hook if necessary. */ | |
df77455c UD |
2812 | void (*hook) (void) = force_reg (__after_morecore_hook); |
2813 | if (__builtin_expect (hook != NULL, 0)) | |
2814 | (*hook) (); | |
fa8d436c | 2815 | new_brk = (char*)(MORECORE(0)); |
a9177ff5 | 2816 | |
fa8d436c | 2817 | if (new_brk != (char*)MORECORE_FAILURE) { |
72f90263 UD |
2818 | released = (long)(current_brk - new_brk); |
2819 | ||
2820 | if (released != 0) { | |
2821 | /* Success. Adjust top. */ | |
2822 | av->system_mem -= released; | |
2823 | set_head(av->top, (top_size - released) | PREV_INUSE); | |
2824 | check_malloc_state(av); | |
2825 | return 1; | |
2826 | } | |
fa8d436c UD |
2827 | } |
2828 | } | |
2829 | } | |
2830 | return 0; | |
f65fd747 UD |
2831 | } |
2832 | ||
431c33c0 UD |
2833 | static void |
2834 | internal_function | |
431c33c0 | 2835 | munmap_chunk(mchunkptr p) |
f65fd747 UD |
2836 | { |
2837 | INTERNAL_SIZE_T size = chunksize(p); | |
f65fd747 UD |
2838 | |
2839 | assert (chunk_is_mmapped(p)); | |
8e635611 UD |
2840 | |
2841 | uintptr_t block = (uintptr_t) p - p->prev_size; | |
2842 | size_t total_size = p->prev_size + size; | |
2843 | /* Unfortunately we have to do the compilers job by hand here. Normally | |
2844 | we would test BLOCK and TOTAL-SIZE separately for compliance with the | |
2845 | page size. But gcc does not recognize the optimization possibility | |
2846 | (in the moment at least) so we combine the two values into one before | |
2847 | the bit test. */ | |
02d46fc4 | 2848 | if (__builtin_expect (((block | total_size) & (GLRO(dl_pagesize) - 1)) != 0, 0)) |
8e635611 UD |
2849 | { |
2850 | malloc_printerr (check_action, "munmap_chunk(): invalid pointer", | |
2851 | chunk2mem (p)); | |
2852 | return; | |
2853 | } | |
f65fd747 | 2854 | |
fa8d436c | 2855 | mp_.n_mmaps--; |
8e635611 | 2856 | mp_.mmapped_mem -= total_size; |
f65fd747 | 2857 | |
6ef76f3b UD |
2858 | /* If munmap failed the process virtual memory address space is in a |
2859 | bad shape. Just leave the block hanging around, the process will | |
2860 | terminate shortly anyway since not much can be done. */ | |
2861 | munmap((char *)block, total_size); | |
f65fd747 UD |
2862 | } |
2863 | ||
2864 | #if HAVE_MREMAP | |
2865 | ||
431c33c0 UD |
2866 | static mchunkptr |
2867 | internal_function | |
431c33c0 | 2868 | mremap_chunk(mchunkptr p, size_t new_size) |
f65fd747 | 2869 | { |
02d46fc4 | 2870 | size_t page_mask = GLRO(dl_pagesize) - 1; |
f65fd747 UD |
2871 | INTERNAL_SIZE_T offset = p->prev_size; |
2872 | INTERNAL_SIZE_T size = chunksize(p); | |
2873 | char *cp; | |
2874 | ||
2875 | assert (chunk_is_mmapped(p)); | |
02d46fc4 | 2876 | assert(((size + offset) & (GLRO(dl_pagesize)-1)) == 0); |
f65fd747 UD |
2877 | |
2878 | /* Note the extra SIZE_SZ overhead as in mmap_chunk(). */ | |
2879 | new_size = (new_size + offset + SIZE_SZ + page_mask) & ~page_mask; | |
2880 | ||
68f3802d UD |
2881 | /* No need to remap if the number of pages does not change. */ |
2882 | if (size + offset == new_size) | |
2883 | return p; | |
2884 | ||
f65fd747 | 2885 | cp = (char *)mremap((char *)p - offset, size + offset, new_size, |
72f90263 | 2886 | MREMAP_MAYMOVE); |
f65fd747 | 2887 | |
431c33c0 | 2888 | if (cp == MAP_FAILED) return 0; |
f65fd747 UD |
2889 | |
2890 | p = (mchunkptr)(cp + offset); | |
2891 | ||
2892 | assert(aligned_OK(chunk2mem(p))); | |
2893 | ||
2894 | assert((p->prev_size == offset)); | |
2895 | set_head(p, (new_size - offset)|IS_MMAPPED); | |
2896 | ||
fa8d436c UD |
2897 | mp_.mmapped_mem -= size + offset; |
2898 | mp_.mmapped_mem += new_size; | |
2899 | if ((unsigned long)mp_.mmapped_mem > (unsigned long)mp_.max_mmapped_mem) | |
2900 | mp_.max_mmapped_mem = mp_.mmapped_mem; | |
f65fd747 UD |
2901 | return p; |
2902 | } | |
2903 | ||
2904 | #endif /* HAVE_MREMAP */ | |
2905 | ||
fa8d436c | 2906 | /*------------------------ Public wrappers. --------------------------------*/ |
f65fd747 | 2907 | |
22a89187 | 2908 | void* |
fa8d436c UD |
2909 | public_mALLOc(size_t bytes) |
2910 | { | |
2911 | mstate ar_ptr; | |
22a89187 | 2912 | void *victim; |
f65fd747 | 2913 | |
df77455c UD |
2914 | __malloc_ptr_t (*hook) (size_t, __const __malloc_ptr_t) |
2915 | = force_reg (__malloc_hook); | |
bfacf1af | 2916 | if (__builtin_expect (hook != NULL, 0)) |
fa8d436c | 2917 | return (*hook)(bytes, RETURN_ADDRESS (0)); |
f65fd747 | 2918 | |
425ce2ed | 2919 | arena_lookup(ar_ptr); |
425ce2ed UD |
2920 | |
2921 | arena_lock(ar_ptr, bytes); | |
fa8d436c | 2922 | if(!ar_ptr) |
f65fd747 | 2923 | return 0; |
fa8d436c UD |
2924 | victim = _int_malloc(ar_ptr, bytes); |
2925 | if(!victim) { | |
2926 | /* Maybe the failure is due to running out of mmapped areas. */ | |
2927 | if(ar_ptr != &main_arena) { | |
2928 | (void)mutex_unlock(&ar_ptr->mutex); | |
b3443701 UD |
2929 | ar_ptr = &main_arena; |
2930 | (void)mutex_lock(&ar_ptr->mutex); | |
2931 | victim = _int_malloc(ar_ptr, bytes); | |
2932 | (void)mutex_unlock(&ar_ptr->mutex); | |
fa8d436c | 2933 | } else { |
fa8d436c UD |
2934 | /* ... or sbrk() has failed and there is still a chance to mmap() */ |
2935 | ar_ptr = arena_get2(ar_ptr->next ? ar_ptr : 0, bytes); | |
2936 | (void)mutex_unlock(&main_arena.mutex); | |
2937 | if(ar_ptr) { | |
72f90263 UD |
2938 | victim = _int_malloc(ar_ptr, bytes); |
2939 | (void)mutex_unlock(&ar_ptr->mutex); | |
fa8d436c | 2940 | } |
60f0e64b | 2941 | } |
fa8d436c UD |
2942 | } else |
2943 | (void)mutex_unlock(&ar_ptr->mutex); | |
2944 | assert(!victim || chunk_is_mmapped(mem2chunk(victim)) || | |
2945 | ar_ptr == arena_for_chunk(mem2chunk(victim))); | |
2946 | return victim; | |
f65fd747 | 2947 | } |
aa420660 | 2948 | libc_hidden_def(public_mALLOc) |
f65fd747 | 2949 | |
fa8d436c | 2950 | void |
22a89187 | 2951 | public_fREe(void* mem) |
f65fd747 | 2952 | { |
fa8d436c UD |
2953 | mstate ar_ptr; |
2954 | mchunkptr p; /* chunk corresponding to mem */ | |
2955 | ||
df77455c UD |
2956 | void (*hook) (__malloc_ptr_t, __const __malloc_ptr_t) |
2957 | = force_reg (__free_hook); | |
bfacf1af | 2958 | if (__builtin_expect (hook != NULL, 0)) { |
fa8d436c UD |
2959 | (*hook)(mem, RETURN_ADDRESS (0)); |
2960 | return; | |
f65fd747 | 2961 | } |
f65fd747 | 2962 | |
fa8d436c UD |
2963 | if (mem == 0) /* free(0) has no effect */ |
2964 | return; | |
f65fd747 | 2965 | |
fa8d436c | 2966 | p = mem2chunk(mem); |
f65fd747 | 2967 | |
fa8d436c UD |
2968 | if (chunk_is_mmapped(p)) /* release mmapped memory. */ |
2969 | { | |
1d05c2fb UD |
2970 | /* see if the dynamic brk/mmap threshold needs adjusting */ |
2971 | if (!mp_.no_dyn_threshold | |
2972 | && p->size > mp_.mmap_threshold | |
72f90263 | 2973 | && p->size <= DEFAULT_MMAP_THRESHOLD_MAX) |
1d05c2fb UD |
2974 | { |
2975 | mp_.mmap_threshold = chunksize (p); | |
2976 | mp_.trim_threshold = 2 * mp_.mmap_threshold; | |
2977 | } | |
fa8d436c UD |
2978 | munmap_chunk(p); |
2979 | return; | |
8a4b65b4 | 2980 | } |
f65fd747 | 2981 | |
fa8d436c | 2982 | ar_ptr = arena_for_chunk(p); |
425ce2ed | 2983 | _int_free(ar_ptr, p, 0); |
f65fd747 | 2984 | } |
aa420660 | 2985 | libc_hidden_def (public_fREe) |
f65fd747 | 2986 | |
22a89187 UD |
2987 | void* |
2988 | public_rEALLOc(void* oldmem, size_t bytes) | |
f65fd747 | 2989 | { |
fa8d436c UD |
2990 | mstate ar_ptr; |
2991 | INTERNAL_SIZE_T nb; /* padded request size */ | |
f65fd747 | 2992 | |
22a89187 | 2993 | void* newp; /* chunk to return */ |
f65fd747 | 2994 | |
06d6611a | 2995 | __malloc_ptr_t (*hook) (__malloc_ptr_t, size_t, __const __malloc_ptr_t) = |
df77455c | 2996 | force_reg (__realloc_hook); |
bfacf1af | 2997 | if (__builtin_expect (hook != NULL, 0)) |
fa8d436c | 2998 | return (*hook)(oldmem, bytes, RETURN_ADDRESS (0)); |
f65fd747 | 2999 | |
fa8d436c UD |
3000 | #if REALLOC_ZERO_BYTES_FREES |
3001 | if (bytes == 0 && oldmem != NULL) { public_fREe(oldmem); return 0; } | |
f65fd747 | 3002 | #endif |
f65fd747 | 3003 | |
fa8d436c UD |
3004 | /* realloc of null is supposed to be same as malloc */ |
3005 | if (oldmem == 0) return public_mALLOc(bytes); | |
f65fd747 | 3006 | |
78ac92ad UD |
3007 | /* chunk corresponding to oldmem */ |
3008 | const mchunkptr oldp = mem2chunk(oldmem); | |
3009 | /* its size */ | |
3010 | const INTERNAL_SIZE_T oldsize = chunksize(oldp); | |
f65fd747 | 3011 | |
dc165f7b UD |
3012 | /* Little security check which won't hurt performance: the |
3013 | allocator never wrapps around at the end of the address space. | |
3014 | Therefore we can exclude some size values which might appear | |
3015 | here by accident or by "design" from some intruder. */ | |
3016 | if (__builtin_expect ((uintptr_t) oldp > (uintptr_t) -oldsize, 0) | |
073f560e | 3017 | || __builtin_expect (misaligned_chunk (oldp), 0)) |
dc165f7b UD |
3018 | { |
3019 | malloc_printerr (check_action, "realloc(): invalid pointer", oldmem); | |
3020 | return NULL; | |
3021 | } | |
3022 | ||
fa8d436c | 3023 | checked_request2size(bytes, nb); |
f65fd747 | 3024 | |
fa8d436c UD |
3025 | if (chunk_is_mmapped(oldp)) |
3026 | { | |
22a89187 | 3027 | void* newmem; |
f65fd747 | 3028 | |
fa8d436c UD |
3029 | #if HAVE_MREMAP |
3030 | newp = mremap_chunk(oldp, nb); | |
3031 | if(newp) return chunk2mem(newp); | |
f65fd747 | 3032 | #endif |
fa8d436c UD |
3033 | /* Note the extra SIZE_SZ overhead. */ |
3034 | if(oldsize - SIZE_SZ >= nb) return oldmem; /* do nothing */ | |
3035 | /* Must alloc, copy, free. */ | |
3036 | newmem = public_mALLOc(bytes); | |
3037 | if (newmem == 0) return 0; /* propagate failure */ | |
3038 | MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ); | |
3039 | munmap_chunk(oldp); | |
3040 | return newmem; | |
3041 | } | |
fa8d436c UD |
3042 | |
3043 | ar_ptr = arena_for_chunk(oldp); | |
3044 | #if THREAD_STATS | |
3045 | if(!mutex_trylock(&ar_ptr->mutex)) | |
3046 | ++(ar_ptr->stat_lock_direct); | |
3047 | else { | |
3048 | (void)mutex_lock(&ar_ptr->mutex); | |
3049 | ++(ar_ptr->stat_lock_wait); | |
3050 | } | |
f65fd747 | 3051 | #else |
fa8d436c | 3052 | (void)mutex_lock(&ar_ptr->mutex); |
f65fd747 | 3053 | #endif |
f65fd747 | 3054 | |
22a89187 | 3055 | #if !defined PER_THREAD |
fa8d436c | 3056 | /* As in malloc(), remember this arena for the next allocation. */ |
22a89187 | 3057 | tsd_setspecific(arena_key, (void *)ar_ptr); |
f65fd747 UD |
3058 | #endif |
3059 | ||
4c8b8cc3 | 3060 | newp = _int_realloc(ar_ptr, oldp, oldsize, nb); |
f65fd747 | 3061 | |
fa8d436c UD |
3062 | (void)mutex_unlock(&ar_ptr->mutex); |
3063 | assert(!newp || chunk_is_mmapped(mem2chunk(newp)) || | |
3064 | ar_ptr == arena_for_chunk(mem2chunk(newp))); | |
07014fca UD |
3065 | |
3066 | if (newp == NULL) | |
3067 | { | |
3068 | /* Try harder to allocate memory in other arenas. */ | |
3069 | newp = public_mALLOc(bytes); | |
3070 | if (newp != NULL) | |
3071 | { | |
486bdb88 | 3072 | MALLOC_COPY (newp, oldmem, oldsize - SIZE_SZ); |
425ce2ed | 3073 | _int_free(ar_ptr, oldp, 0); |
07014fca UD |
3074 | } |
3075 | } | |
3076 | ||
fa8d436c UD |
3077 | return newp; |
3078 | } | |
aa420660 | 3079 | libc_hidden_def (public_rEALLOc) |
f65fd747 | 3080 | |
22a89187 | 3081 | void* |
fa8d436c UD |
3082 | public_mEMALIGn(size_t alignment, size_t bytes) |
3083 | { | |
3084 | mstate ar_ptr; | |
22a89187 | 3085 | void *p; |
f65fd747 | 3086 | |
fa8d436c UD |
3087 | __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, size_t, |
3088 | __const __malloc_ptr_t)) = | |
df77455c | 3089 | force_reg (__memalign_hook); |
bfacf1af | 3090 | if (__builtin_expect (hook != NULL, 0)) |
fa8d436c | 3091 | return (*hook)(alignment, bytes, RETURN_ADDRESS (0)); |
f65fd747 | 3092 | |
fa8d436c UD |
3093 | /* If need less alignment than we give anyway, just relay to malloc */ |
3094 | if (alignment <= MALLOC_ALIGNMENT) return public_mALLOc(bytes); | |
1228ed5c | 3095 | |
fa8d436c UD |
3096 | /* Otherwise, ensure that it is at least a minimum chunk size */ |
3097 | if (alignment < MINSIZE) alignment = MINSIZE; | |
f65fd747 | 3098 | |
fa8d436c UD |
3099 | arena_get(ar_ptr, bytes + alignment + MINSIZE); |
3100 | if(!ar_ptr) | |
3101 | return 0; | |
3102 | p = _int_memalign(ar_ptr, alignment, bytes); | |
fa8d436c UD |
3103 | if(!p) { |
3104 | /* Maybe the failure is due to running out of mmapped areas. */ | |
3105 | if(ar_ptr != &main_arena) { | |
b3443701 UD |
3106 | (void)mutex_unlock(&ar_ptr->mutex); |
3107 | ar_ptr = &main_arena; | |
3108 | (void)mutex_lock(&ar_ptr->mutex); | |
3109 | p = _int_memalign(ar_ptr, alignment, bytes); | |
3110 | (void)mutex_unlock(&ar_ptr->mutex); | |
f65fd747 | 3111 | } else { |
fa8d436c | 3112 | /* ... or sbrk() has failed and there is still a chance to mmap() */ |
f6bacb8e | 3113 | mstate prev = ar_ptr->next ? ar_ptr : 0; |
b3443701 | 3114 | (void)mutex_unlock(&ar_ptr->mutex); |
f6bacb8e | 3115 | ar_ptr = arena_get2(prev, bytes); |
fa8d436c | 3116 | if(ar_ptr) { |
72f90263 UD |
3117 | p = _int_memalign(ar_ptr, alignment, bytes); |
3118 | (void)mutex_unlock(&ar_ptr->mutex); | |
fa8d436c | 3119 | } |
f65fd747 | 3120 | } |
ab355d9a UD |
3121 | } else |
3122 | (void)mutex_unlock(&ar_ptr->mutex); | |
fa8d436c UD |
3123 | assert(!p || chunk_is_mmapped(mem2chunk(p)) || |
3124 | ar_ptr == arena_for_chunk(mem2chunk(p))); | |
3125 | return p; | |
f65fd747 | 3126 | } |
aa420660 | 3127 | libc_hidden_def (public_mEMALIGn) |
f65fd747 | 3128 | |
22a89187 | 3129 | void* |
fa8d436c UD |
3130 | public_vALLOc(size_t bytes) |
3131 | { | |
3132 | mstate ar_ptr; | |
22a89187 | 3133 | void *p; |
f65fd747 | 3134 | |
fa8d436c UD |
3135 | if(__malloc_initialized < 0) |
3136 | ptmalloc_init (); | |
8088488d | 3137 | |
02d46fc4 | 3138 | size_t pagesz = GLRO(dl_pagesize); |
dba38551 | 3139 | |
8088488d UD |
3140 | __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, size_t, |
3141 | __const __malloc_ptr_t)) = | |
df77455c | 3142 | force_reg (__memalign_hook); |
bfacf1af | 3143 | if (__builtin_expect (hook != NULL, 0)) |
dba38551 | 3144 | return (*hook)(pagesz, bytes, RETURN_ADDRESS (0)); |
8088488d | 3145 | |
dba38551 | 3146 | arena_get(ar_ptr, bytes + pagesz + MINSIZE); |
fa8d436c UD |
3147 | if(!ar_ptr) |
3148 | return 0; | |
3149 | p = _int_valloc(ar_ptr, bytes); | |
3150 | (void)mutex_unlock(&ar_ptr->mutex); | |
dba38551 UD |
3151 | if(!p) { |
3152 | /* Maybe the failure is due to running out of mmapped areas. */ | |
3153 | if(ar_ptr != &main_arena) { | |
8c0b5f14 PB |
3154 | ar_ptr = &main_arena; |
3155 | (void)mutex_lock(&ar_ptr->mutex); | |
9d256984 | 3156 | p = _int_memalign(ar_ptr, pagesz, bytes); |
8c0b5f14 | 3157 | (void)mutex_unlock(&ar_ptr->mutex); |
dba38551 | 3158 | } else { |
dba38551 UD |
3159 | /* ... or sbrk() has failed and there is still a chance to mmap() */ |
3160 | ar_ptr = arena_get2(ar_ptr->next ? ar_ptr : 0, bytes); | |
3161 | if(ar_ptr) { | |
72f90263 UD |
3162 | p = _int_memalign(ar_ptr, pagesz, bytes); |
3163 | (void)mutex_unlock(&ar_ptr->mutex); | |
dba38551 | 3164 | } |
dba38551 UD |
3165 | } |
3166 | } | |
3167 | assert(!p || chunk_is_mmapped(mem2chunk(p)) || | |
3168 | ar_ptr == arena_for_chunk(mem2chunk(p))); | |
3169 | ||
fa8d436c UD |
3170 | return p; |
3171 | } | |
f65fd747 | 3172 | |
22a89187 | 3173 | void* |
fa8d436c UD |
3174 | public_pVALLOc(size_t bytes) |
3175 | { | |
3176 | mstate ar_ptr; | |
22a89187 | 3177 | void *p; |
f65fd747 | 3178 | |
fa8d436c UD |
3179 | if(__malloc_initialized < 0) |
3180 | ptmalloc_init (); | |
8088488d | 3181 | |
02d46fc4 UD |
3182 | size_t pagesz = GLRO(dl_pagesize); |
3183 | size_t page_mask = GLRO(dl_pagesize) - 1; | |
dba38551 UD |
3184 | size_t rounded_bytes = (bytes + page_mask) & ~(page_mask); |
3185 | ||
8088488d UD |
3186 | __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, size_t, |
3187 | __const __malloc_ptr_t)) = | |
df77455c | 3188 | force_reg (__memalign_hook); |
bfacf1af | 3189 | if (__builtin_expect (hook != NULL, 0)) |
dba38551 | 3190 | return (*hook)(pagesz, rounded_bytes, RETURN_ADDRESS (0)); |
8088488d | 3191 | |
dba38551 | 3192 | arena_get(ar_ptr, bytes + 2*pagesz + MINSIZE); |
fa8d436c UD |
3193 | p = _int_pvalloc(ar_ptr, bytes); |
3194 | (void)mutex_unlock(&ar_ptr->mutex); | |
dba38551 UD |
3195 | if(!p) { |
3196 | /* Maybe the failure is due to running out of mmapped areas. */ | |
3197 | if(ar_ptr != &main_arena) { | |
8c0b5f14 PB |
3198 | ar_ptr = &main_arena; |
3199 | (void)mutex_lock(&ar_ptr->mutex); | |
9d256984 | 3200 | p = _int_memalign(ar_ptr, pagesz, rounded_bytes); |
8c0b5f14 | 3201 | (void)mutex_unlock(&ar_ptr->mutex); |
dba38551 | 3202 | } else { |
dba38551 UD |
3203 | /* ... or sbrk() has failed and there is still a chance to mmap() */ |
3204 | ar_ptr = arena_get2(ar_ptr->next ? ar_ptr : 0, | |
3205 | bytes + 2*pagesz + MINSIZE); | |
3206 | if(ar_ptr) { | |
72f90263 UD |
3207 | p = _int_memalign(ar_ptr, pagesz, rounded_bytes); |
3208 | (void)mutex_unlock(&ar_ptr->mutex); | |
dba38551 | 3209 | } |
dba38551 UD |
3210 | } |
3211 | } | |
3212 | assert(!p || chunk_is_mmapped(mem2chunk(p)) || | |
3213 | ar_ptr == arena_for_chunk(mem2chunk(p))); | |
3214 | ||
fa8d436c UD |
3215 | return p; |
3216 | } | |
f65fd747 | 3217 | |
22a89187 | 3218 | void* |
fa8d436c | 3219 | public_cALLOc(size_t n, size_t elem_size) |
f65fd747 | 3220 | { |
fa8d436c UD |
3221 | mstate av; |
3222 | mchunkptr oldtop, p; | |
0950889b | 3223 | INTERNAL_SIZE_T bytes, sz, csz, oldtopsize; |
22a89187 | 3224 | void* mem; |
fa8d436c UD |
3225 | unsigned long clearsize; |
3226 | unsigned long nclears; | |
3227 | INTERNAL_SIZE_T* d; | |
0950889b UD |
3228 | |
3229 | /* size_t is unsigned so the behavior on overflow is defined. */ | |
3230 | bytes = n * elem_size; | |
d9af917d UD |
3231 | #define HALF_INTERNAL_SIZE_T \ |
3232 | (((INTERNAL_SIZE_T) 1) << (8 * sizeof (INTERNAL_SIZE_T) / 2)) | |
3233 | if (__builtin_expect ((n | elem_size) >= HALF_INTERNAL_SIZE_T, 0)) { | |
0be405c2 | 3234 | if (elem_size != 0 && bytes / elem_size != n) { |
8e58439c | 3235 | __set_errno (ENOMEM); |
d9af917d UD |
3236 | return 0; |
3237 | } | |
0950889b UD |
3238 | } |
3239 | ||
df77455c UD |
3240 | __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, __const __malloc_ptr_t)) = |
3241 | force_reg (__malloc_hook); | |
bfacf1af | 3242 | if (__builtin_expect (hook != NULL, 0)) { |
0950889b | 3243 | sz = bytes; |
fa8d436c UD |
3244 | mem = (*hook)(sz, RETURN_ADDRESS (0)); |
3245 | if(mem == 0) | |
3246 | return 0; | |
fa8d436c | 3247 | return memset(mem, 0, sz); |
10dc2a90 | 3248 | } |
10dc2a90 | 3249 | |
0950889b | 3250 | sz = bytes; |
fa8d436c UD |
3251 | |
3252 | arena_get(av, sz); | |
3253 | if(!av) | |
f65fd747 | 3254 | return 0; |
fa8d436c UD |
3255 | |
3256 | /* Check if we hand out the top chunk, in which case there may be no | |
3257 | need to clear. */ | |
3258 | #if MORECORE_CLEARS | |
3259 | oldtop = top(av); | |
3260 | oldtopsize = chunksize(top(av)); | |
3261 | #if MORECORE_CLEARS < 2 | |
3262 | /* Only newly allocated memory is guaranteed to be cleared. */ | |
3263 | if (av == &main_arena && | |
3264 | oldtopsize < mp_.sbrk_base + av->max_system_mem - (char *)oldtop) | |
3265 | oldtopsize = (mp_.sbrk_base + av->max_system_mem - (char *)oldtop); | |
3266 | #endif | |
4cd4c5d6 UD |
3267 | if (av != &main_arena) |
3268 | { | |
3269 | heap_info *heap = heap_for_ptr (oldtop); | |
3270 | if (oldtopsize < (char *) heap + heap->mprotect_size - (char *) oldtop) | |
3271 | oldtopsize = (char *) heap + heap->mprotect_size - (char *) oldtop; | |
3272 | } | |
fa8d436c UD |
3273 | #endif |
3274 | mem = _int_malloc(av, sz); | |
3275 | ||
3276 | /* Only clearing follows, so we can unlock early. */ | |
3277 | (void)mutex_unlock(&av->mutex); | |
3278 | ||
3279 | assert(!mem || chunk_is_mmapped(mem2chunk(mem)) || | |
3280 | av == arena_for_chunk(mem2chunk(mem))); | |
3281 | ||
3282 | if (mem == 0) { | |
7799b7b3 | 3283 | /* Maybe the failure is due to running out of mmapped areas. */ |
fa8d436c | 3284 | if(av != &main_arena) { |
7799b7b3 | 3285 | (void)mutex_lock(&main_arena.mutex); |
fa8d436c | 3286 | mem = _int_malloc(&main_arena, sz); |
7799b7b3 | 3287 | (void)mutex_unlock(&main_arena.mutex); |
e9b3e3c5 | 3288 | } else { |
e9b3e3c5 | 3289 | /* ... or sbrk() has failed and there is still a chance to mmap() */ |
fa8d436c UD |
3290 | (void)mutex_lock(&main_arena.mutex); |
3291 | av = arena_get2(av->next ? av : 0, sz); | |
e9b3e3c5 | 3292 | (void)mutex_unlock(&main_arena.mutex); |
fa8d436c | 3293 | if(av) { |
72f90263 UD |
3294 | mem = _int_malloc(av, sz); |
3295 | (void)mutex_unlock(&av->mutex); | |
e9b3e3c5 | 3296 | } |
7799b7b3 | 3297 | } |
fa8d436c UD |
3298 | if (mem == 0) return 0; |
3299 | } | |
3300 | p = mem2chunk(mem); | |
f65fd747 | 3301 | |
fa8d436c | 3302 | /* Two optional cases in which clearing not necessary */ |
9ea9af19 UD |
3303 | if (chunk_is_mmapped (p)) |
3304 | { | |
3305 | if (__builtin_expect (perturb_byte, 0)) | |
3306 | MALLOC_ZERO (mem, sz); | |
3307 | return mem; | |
3308 | } | |
f65fd747 | 3309 | |
fa8d436c | 3310 | csz = chunksize(p); |
f65fd747 | 3311 | |
fa8d436c | 3312 | #if MORECORE_CLEARS |
56137dbc | 3313 | if (perturb_byte == 0 && (p == oldtop && csz > oldtopsize)) { |
fa8d436c UD |
3314 | /* clear only the bytes from non-freshly-sbrked memory */ |
3315 | csz = oldtopsize; | |
f65fd747 | 3316 | } |
fa8d436c | 3317 | #endif |
f65fd747 | 3318 | |
fa8d436c UD |
3319 | /* Unroll clear of <= 36 bytes (72 if 8byte sizes). We know that |
3320 | contents have an odd number of INTERNAL_SIZE_T-sized words; | |
3321 | minimally 3. */ | |
3322 | d = (INTERNAL_SIZE_T*)mem; | |
3323 | clearsize = csz - SIZE_SZ; | |
3324 | nclears = clearsize / sizeof(INTERNAL_SIZE_T); | |
3325 | assert(nclears >= 3); | |
f65fd747 | 3326 | |
fa8d436c UD |
3327 | if (nclears > 9) |
3328 | MALLOC_ZERO(d, clearsize); | |
f65fd747 | 3329 | |
fa8d436c UD |
3330 | else { |
3331 | *(d+0) = 0; | |
3332 | *(d+1) = 0; | |
3333 | *(d+2) = 0; | |
3334 | if (nclears > 4) { | |
3335 | *(d+3) = 0; | |
3336 | *(d+4) = 0; | |
3337 | if (nclears > 6) { | |
3338 | *(d+5) = 0; | |
3339 | *(d+6) = 0; | |
3340 | if (nclears > 8) { | |
3341 | *(d+7) = 0; | |
3342 | *(d+8) = 0; | |
3343 | } | |
f65fd747 UD |
3344 | } |
3345 | } | |
f65fd747 UD |
3346 | } |
3347 | ||
fa8d436c UD |
3348 | return mem; |
3349 | } | |
f65fd747 | 3350 | |
88764ae2 | 3351 | |
22a89187 UD |
3352 | int |
3353 | public_mTRIm(size_t s) | |
fa8d436c | 3354 | { |
22a89187 | 3355 | int result = 0; |
f65fd747 | 3356 | |
88764ae2 UD |
3357 | if(__malloc_initialized < 0) |
3358 | ptmalloc_init (); | |
68631c8e UD |
3359 | |
3360 | mstate ar_ptr = &main_arena; | |
3361 | do | |
3362 | { | |
3363 | (void) mutex_lock (&ar_ptr->mutex); | |
3364 | result |= mTRIm (ar_ptr, s); | |
3365 | (void) mutex_unlock (&ar_ptr->mutex); | |
3366 | ||
3367 | ar_ptr = ar_ptr->next; | |
3368 | } | |
3369 | while (ar_ptr != &main_arena); | |
3370 | ||
fa8d436c UD |
3371 | return result; |
3372 | } | |
f65fd747 | 3373 | |
fa8d436c | 3374 | size_t |
22a89187 | 3375 | public_mUSABLe(void* m) |
fa8d436c UD |
3376 | { |
3377 | size_t result; | |
f65fd747 | 3378 | |
fa8d436c UD |
3379 | result = mUSABLe(m); |
3380 | return result; | |
3381 | } | |
f65fd747 | 3382 | |
fa8d436c UD |
3383 | void |
3384 | public_mSTATs() | |
3385 | { | |
3386 | mSTATs(); | |
3387 | } | |
f65fd747 | 3388 | |
fa8d436c UD |
3389 | struct mallinfo public_mALLINFo() |
3390 | { | |
3391 | struct mallinfo m; | |
f65fd747 | 3392 | |
6a00759b UD |
3393 | if(__malloc_initialized < 0) |
3394 | ptmalloc_init (); | |
fa8d436c UD |
3395 | (void)mutex_lock(&main_arena.mutex); |
3396 | m = mALLINFo(&main_arena); | |
3397 | (void)mutex_unlock(&main_arena.mutex); | |
3398 | return m; | |
f65fd747 UD |
3399 | } |
3400 | ||
fa8d436c UD |
3401 | int |
3402 | public_mALLOPt(int p, int v) | |
3403 | { | |
3404 | int result; | |
3405 | result = mALLOPt(p, v); | |
3406 | return result; | |
3407 | } | |
f65fd747 UD |
3408 | |
3409 | /* | |
fa8d436c | 3410 | ------------------------------ malloc ------------------------------ |
f65fd747 UD |
3411 | */ |
3412 | ||
22a89187 | 3413 | static void* |
fa8d436c | 3414 | _int_malloc(mstate av, size_t bytes) |
f65fd747 | 3415 | { |
fa8d436c UD |
3416 | INTERNAL_SIZE_T nb; /* normalized request size */ |
3417 | unsigned int idx; /* associated bin index */ | |
3418 | mbinptr bin; /* associated bin */ | |
f65fd747 | 3419 | |
fa8d436c UD |
3420 | mchunkptr victim; /* inspected/selected chunk */ |
3421 | INTERNAL_SIZE_T size; /* its size */ | |
3422 | int victim_index; /* its bin index */ | |
f65fd747 | 3423 | |
fa8d436c UD |
3424 | mchunkptr remainder; /* remainder from a split */ |
3425 | unsigned long remainder_size; /* its size */ | |
8a4b65b4 | 3426 | |
fa8d436c UD |
3427 | unsigned int block; /* bit map traverser */ |
3428 | unsigned int bit; /* bit map traverser */ | |
3429 | unsigned int map; /* current word of binmap */ | |
8a4b65b4 | 3430 | |
fa8d436c UD |
3431 | mchunkptr fwd; /* misc temp for linking */ |
3432 | mchunkptr bck; /* misc temp for linking */ | |
8a4b65b4 | 3433 | |
f6887a0d UD |
3434 | const char *errstr = NULL; |
3435 | ||
fa8d436c UD |
3436 | /* |
3437 | Convert request size to internal form by adding SIZE_SZ bytes | |
3438 | overhead plus possibly more to obtain necessary alignment and/or | |
3439 | to obtain a size of at least MINSIZE, the smallest allocatable | |
3440 | size. Also, checked_request2size traps (returning 0) request sizes | |
3441 | that are so large that they wrap around zero when padded and | |
3442 | aligned. | |
3443 | */ | |
f65fd747 | 3444 | |
fa8d436c | 3445 | checked_request2size(bytes, nb); |
f65fd747 | 3446 | |
fa8d436c UD |
3447 | /* |
3448 | If the size qualifies as a fastbin, first check corresponding bin. | |
3449 | This code is safe to execute even if av is not yet initialized, so we | |
3450 | can try it without checking, which saves some time on this fast path. | |
3451 | */ | |
f65fd747 | 3452 | |
9bf248c6 | 3453 | if ((unsigned long)(nb) <= (unsigned long)(get_max_fast ())) { |
425ce2ed UD |
3454 | idx = fastbin_index(nb); |
3455 | mfastbinptr* fb = &fastbin (av, idx); | |
425ce2ed UD |
3456 | mchunkptr pp = *fb; |
3457 | do | |
3458 | { | |
3459 | victim = pp; | |
3460 | if (victim == NULL) | |
3461 | break; | |
3462 | } | |
3463 | while ((pp = catomic_compare_and_exchange_val_acq (fb, victim->fd, victim)) | |
3464 | != victim); | |
425ce2ed | 3465 | if (victim != 0) { |
6cce6540 | 3466 | if (__builtin_expect (fastbin_index (chunksize (victim)) != idx, 0)) |
f6887a0d UD |
3467 | { |
3468 | errstr = "malloc(): memory corruption (fast)"; | |
3469 | errout: | |
3470 | malloc_printerr (check_action, errstr, chunk2mem (victim)); | |
ad3d3e8f | 3471 | return NULL; |
f6887a0d | 3472 | } |
fa8d436c | 3473 | check_remalloced_chunk(av, victim, nb); |
854278df UD |
3474 | void *p = chunk2mem(victim); |
3475 | if (__builtin_expect (perturb_byte, 0)) | |
3476 | alloc_perturb (p, bytes); | |
3477 | return p; | |
fa8d436c | 3478 | } |
f65fd747 UD |
3479 | } |
3480 | ||
fa8d436c UD |
3481 | /* |
3482 | If a small request, check regular bin. Since these "smallbins" | |
3483 | hold one size each, no searching within bins is necessary. | |
3484 | (For a large request, we need to wait until unsorted chunks are | |
3485 | processed to find best fit. But for small ones, fits are exact | |
3486 | anyway, so we can check now, which is faster.) | |
3487 | */ | |
f65fd747 | 3488 | |
fa8d436c UD |
3489 | if (in_smallbin_range(nb)) { |
3490 | idx = smallbin_index(nb); | |
3491 | bin = bin_at(av,idx); | |
7799b7b3 | 3492 | |
fa8d436c UD |
3493 | if ( (victim = last(bin)) != bin) { |
3494 | if (victim == 0) /* initialization check */ | |
72f90263 | 3495 | malloc_consolidate(av); |
fa8d436c | 3496 | else { |
72f90263 | 3497 | bck = victim->bk; |
f6887a0d UD |
3498 | if (__builtin_expect (bck->fd != victim, 0)) |
3499 | { | |
3500 | errstr = "malloc(): smallbin double linked list corrupted"; | |
3501 | goto errout; | |
3502 | } | |
72f90263 UD |
3503 | set_inuse_bit_at_offset(victim, nb); |
3504 | bin->bk = bck; | |
3505 | bck->fd = bin; | |
fa8d436c | 3506 | |
72f90263 | 3507 | if (av != &main_arena) |
fa8d436c | 3508 | victim->size |= NON_MAIN_ARENA; |
72f90263 | 3509 | check_malloced_chunk(av, victim, nb); |
854278df UD |
3510 | void *p = chunk2mem(victim); |
3511 | if (__builtin_expect (perturb_byte, 0)) | |
3512 | alloc_perturb (p, bytes); | |
3513 | return p; | |
fa8d436c UD |
3514 | } |
3515 | } | |
f65fd747 UD |
3516 | } |
3517 | ||
a9177ff5 | 3518 | /* |
fa8d436c UD |
3519 | If this is a large request, consolidate fastbins before continuing. |
3520 | While it might look excessive to kill all fastbins before | |
3521 | even seeing if there is space available, this avoids | |
3522 | fragmentation problems normally associated with fastbins. | |
3523 | Also, in practice, programs tend to have runs of either small or | |
a9177ff5 | 3524 | large requests, but less often mixtures, so consolidation is not |
fa8d436c UD |
3525 | invoked all that often in most programs. And the programs that |
3526 | it is called frequently in otherwise tend to fragment. | |
3527 | */ | |
7799b7b3 | 3528 | |
fa8d436c UD |
3529 | else { |
3530 | idx = largebin_index(nb); | |
a9177ff5 | 3531 | if (have_fastchunks(av)) |
fa8d436c | 3532 | malloc_consolidate(av); |
7799b7b3 | 3533 | } |
f65fd747 | 3534 | |
fa8d436c UD |
3535 | /* |
3536 | Process recently freed or remaindered chunks, taking one only if | |
3537 | it is exact fit, or, if this a small request, the chunk is remainder from | |
3538 | the most recent non-exact fit. Place other traversed chunks in | |
3539 | bins. Note that this step is the only place in any routine where | |
3540 | chunks are placed in bins. | |
3541 | ||
3542 | The outer loop here is needed because we might not realize until | |
3543 | near the end of malloc that we should have consolidated, so must | |
3544 | do so and retry. This happens at most once, and only when we would | |
3545 | otherwise need to expand memory to service a "small" request. | |
3546 | */ | |
a9177ff5 RM |
3547 | |
3548 | for(;;) { | |
3549 | ||
72320021 | 3550 | int iters = 0; |
fa8d436c UD |
3551 | while ( (victim = unsorted_chunks(av)->bk) != unsorted_chunks(av)) { |
3552 | bck = victim->bk; | |
6cce6540 UD |
3553 | if (__builtin_expect (victim->size <= 2 * SIZE_SZ, 0) |
3554 | || __builtin_expect (victim->size > av->system_mem, 0)) | |
3555 | malloc_printerr (check_action, "malloc(): memory corruption", | |
3556 | chunk2mem (victim)); | |
fa8d436c UD |
3557 | size = chunksize(victim); |
3558 | ||
a9177ff5 | 3559 | /* |
72f90263 UD |
3560 | If a small request, try to use last remainder if it is the |
3561 | only chunk in unsorted bin. This helps promote locality for | |
3562 | runs of consecutive small requests. This is the only | |
3563 | exception to best-fit, and applies only when there is | |
3564 | no exact fit for a small chunk. | |
fa8d436c UD |
3565 | */ |
3566 | ||
a9177ff5 | 3567 | if (in_smallbin_range(nb) && |
72f90263 UD |
3568 | bck == unsorted_chunks(av) && |
3569 | victim == av->last_remainder && | |
3570 | (unsigned long)(size) > (unsigned long)(nb + MINSIZE)) { | |
3571 | ||
3572 | /* split and reattach remainder */ | |
3573 | remainder_size = size - nb; | |
3574 | remainder = chunk_at_offset(victim, nb); | |
3575 | unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder; | |
3576 | av->last_remainder = remainder; | |
3577 | remainder->bk = remainder->fd = unsorted_chunks(av); | |
7ecfbd38 UD |
3578 | if (!in_smallbin_range(remainder_size)) |
3579 | { | |
3580 | remainder->fd_nextsize = NULL; | |
3581 | remainder->bk_nextsize = NULL; | |
3582 | } | |
a9177ff5 | 3583 | |
72f90263 | 3584 | set_head(victim, nb | PREV_INUSE | |
fa8d436c | 3585 | (av != &main_arena ? NON_MAIN_ARENA : 0)); |
72f90263 UD |
3586 | set_head(remainder, remainder_size | PREV_INUSE); |
3587 | set_foot(remainder, remainder_size); | |
a9177ff5 | 3588 | |
72f90263 | 3589 | check_malloced_chunk(av, victim, nb); |
854278df UD |
3590 | void *p = chunk2mem(victim); |
3591 | if (__builtin_expect (perturb_byte, 0)) | |
3592 | alloc_perturb (p, bytes); | |
3593 | return p; | |
fa8d436c | 3594 | } |
f65fd747 | 3595 | |
fa8d436c UD |
3596 | /* remove from unsorted list */ |
3597 | unsorted_chunks(av)->bk = bck; | |
3598 | bck->fd = unsorted_chunks(av); | |
a9177ff5 | 3599 | |
fa8d436c | 3600 | /* Take now instead of binning if exact fit */ |
a9177ff5 | 3601 | |
fa8d436c | 3602 | if (size == nb) { |
72f90263 | 3603 | set_inuse_bit_at_offset(victim, size); |
fa8d436c UD |
3604 | if (av != &main_arena) |
3605 | victim->size |= NON_MAIN_ARENA; | |
72f90263 | 3606 | check_malloced_chunk(av, victim, nb); |
854278df UD |
3607 | void *p = chunk2mem(victim); |
3608 | if (__builtin_expect (perturb_byte, 0)) | |
3609 | alloc_perturb (p, bytes); | |
3610 | return p; | |
fa8d436c | 3611 | } |
a9177ff5 | 3612 | |
fa8d436c | 3613 | /* place chunk in bin */ |
a9177ff5 | 3614 | |
fa8d436c | 3615 | if (in_smallbin_range(size)) { |
72f90263 UD |
3616 | victim_index = smallbin_index(size); |
3617 | bck = bin_at(av, victim_index); | |
3618 | fwd = bck->fd; | |
fa8d436c UD |
3619 | } |
3620 | else { | |
72f90263 UD |
3621 | victim_index = largebin_index(size); |
3622 | bck = bin_at(av, victim_index); | |
3623 | fwd = bck->fd; | |
fa8d436c | 3624 | |
72f90263 UD |
3625 | /* maintain large bins in sorted order */ |
3626 | if (fwd != bck) { | |
fa8d436c | 3627 | /* Or with inuse bit to speed comparisons */ |
72f90263 UD |
3628 | size |= PREV_INUSE; |
3629 | /* if smaller than smallest, bypass loop below */ | |
fa8d436c | 3630 | assert((bck->bk->size & NON_MAIN_ARENA) == 0); |
7ecfbd38 | 3631 | if ((unsigned long)(size) < (unsigned long)(bck->bk->size)) { |
72f90263 UD |
3632 | fwd = bck; |
3633 | bck = bck->bk; | |
7ecfbd38 UD |
3634 | |
3635 | victim->fd_nextsize = fwd->fd; | |
3636 | victim->bk_nextsize = fwd->fd->bk_nextsize; | |
3637 | fwd->fd->bk_nextsize = victim->bk_nextsize->fd_nextsize = victim; | |
72f90263 UD |
3638 | } |
3639 | else { | |
fa8d436c | 3640 | assert((fwd->size & NON_MAIN_ARENA) == 0); |
7ecfbd38 UD |
3641 | while ((unsigned long) size < fwd->size) |
3642 | { | |
3643 | fwd = fwd->fd_nextsize; | |
3644 | assert((fwd->size & NON_MAIN_ARENA) == 0); | |
3645 | } | |
3646 | ||
3647 | if ((unsigned long) size == (unsigned long) fwd->size) | |
3648 | /* Always insert in the second position. */ | |
3649 | fwd = fwd->fd; | |
3650 | else | |
3651 | { | |
3652 | victim->fd_nextsize = fwd; | |
3653 | victim->bk_nextsize = fwd->bk_nextsize; | |
3654 | fwd->bk_nextsize = victim; | |
3655 | victim->bk_nextsize->fd_nextsize = victim; | |
3656 | } | |
3657 | bck = fwd->bk; | |
72f90263 | 3658 | } |
7ecfbd38 UD |
3659 | } else |
3660 | victim->fd_nextsize = victim->bk_nextsize = victim; | |
fa8d436c | 3661 | } |
a9177ff5 | 3662 | |
fa8d436c UD |
3663 | mark_bin(av, victim_index); |
3664 | victim->bk = bck; | |
3665 | victim->fd = fwd; | |
3666 | fwd->bk = victim; | |
3667 | bck->fd = victim; | |
3997b7c4 | 3668 | |
3997b7c4 UD |
3669 | #define MAX_ITERS 10000 |
3670 | if (++iters >= MAX_ITERS) | |
3671 | break; | |
fa8d436c | 3672 | } |
a9177ff5 | 3673 | |
fa8d436c UD |
3674 | /* |
3675 | If a large request, scan through the chunks of current bin in | |
7ecfbd38 | 3676 | sorted order to find smallest that fits. Use the skip list for this. |
fa8d436c | 3677 | */ |
a9177ff5 | 3678 | |
fa8d436c UD |
3679 | if (!in_smallbin_range(nb)) { |
3680 | bin = bin_at(av, idx); | |
f65fd747 | 3681 | |
fa8d436c | 3682 | /* skip scan if empty or largest chunk is too small */ |
7ecfbd38 | 3683 | if ((victim = first(bin)) != bin && |
72f90263 | 3684 | (unsigned long)(victim->size) >= (unsigned long)(nb)) { |
f65fd747 | 3685 | |
7ecfbd38 | 3686 | victim = victim->bk_nextsize; |
72f90263 UD |
3687 | while (((unsigned long)(size = chunksize(victim)) < |
3688 | (unsigned long)(nb))) | |
3689 | victim = victim->bk_nextsize; | |
7ecfbd38 UD |
3690 | |
3691 | /* Avoid removing the first entry for a size so that the skip | |
3692 | list does not have to be rerouted. */ | |
3693 | if (victim != last(bin) && victim->size == victim->fd->size) | |
3694 | victim = victim->fd; | |
f65fd747 | 3695 | |
72f90263 UD |
3696 | remainder_size = size - nb; |
3697 | unlink(victim, bck, fwd); | |
a9177ff5 | 3698 | |
72f90263 UD |
3699 | /* Exhaust */ |
3700 | if (remainder_size < MINSIZE) { | |
3701 | set_inuse_bit_at_offset(victim, size); | |
fa8d436c UD |
3702 | if (av != &main_arena) |
3703 | victim->size |= NON_MAIN_ARENA; | |
72f90263 UD |
3704 | } |
3705 | /* Split */ | |
3706 | else { | |
3707 | remainder = chunk_at_offset(victim, nb); | |
3708 | /* We cannot assume the unsorted list is empty and therefore | |
3709 | have to perform a complete insert here. */ | |
b80770b2 UD |
3710 | bck = unsorted_chunks(av); |
3711 | fwd = bck->fd; | |
f6887a0d UD |
3712 | if (__builtin_expect (fwd->bk != bck, 0)) |
3713 | { | |
3714 | errstr = "malloc(): corrupted unsorted chunks"; | |
3715 | goto errout; | |
3716 | } | |
b80770b2 UD |
3717 | remainder->bk = bck; |
3718 | remainder->fd = fwd; | |
3719 | bck->fd = remainder; | |
3720 | fwd->bk = remainder; | |
7ecfbd38 UD |
3721 | if (!in_smallbin_range(remainder_size)) |
3722 | { | |
3723 | remainder->fd_nextsize = NULL; | |
3724 | remainder->bk_nextsize = NULL; | |
3725 | } | |
72f90263 | 3726 | set_head(victim, nb | PREV_INUSE | |
fa8d436c | 3727 | (av != &main_arena ? NON_MAIN_ARENA : 0)); |
72f90263 UD |
3728 | set_head(remainder, remainder_size | PREV_INUSE); |
3729 | set_foot(remainder, remainder_size); | |
3730 | } | |
854278df UD |
3731 | check_malloced_chunk(av, victim, nb); |
3732 | void *p = chunk2mem(victim); | |
3733 | if (__builtin_expect (perturb_byte, 0)) | |
3734 | alloc_perturb (p, bytes); | |
3735 | return p; | |
fa8d436c | 3736 | } |
a9177ff5 | 3737 | } |
f65fd747 | 3738 | |
fa8d436c UD |
3739 | /* |
3740 | Search for a chunk by scanning bins, starting with next largest | |
3741 | bin. This search is strictly by best-fit; i.e., the smallest | |
3742 | (with ties going to approximately the least recently used) chunk | |
3743 | that fits is selected. | |
a9177ff5 | 3744 | |
fa8d436c UD |
3745 | The bitmap avoids needing to check that most blocks are nonempty. |
3746 | The particular case of skipping all bins during warm-up phases | |
3747 | when no chunks have been returned yet is faster than it might look. | |
3748 | */ | |
a9177ff5 | 3749 | |
fa8d436c UD |
3750 | ++idx; |
3751 | bin = bin_at(av,idx); | |
3752 | block = idx2block(idx); | |
3753 | map = av->binmap[block]; | |
3754 | bit = idx2bit(idx); | |
a9177ff5 | 3755 | |
fa8d436c UD |
3756 | for (;;) { |
3757 | ||
3758 | /* Skip rest of block if there are no more set bits in this block. */ | |
3759 | if (bit > map || bit == 0) { | |
72f90263 UD |
3760 | do { |
3761 | if (++block >= BINMAPSIZE) /* out of bins */ | |
3762 | goto use_top; | |
3763 | } while ( (map = av->binmap[block]) == 0); | |
fa8d436c | 3764 | |
72f90263 UD |
3765 | bin = bin_at(av, (block << BINMAPSHIFT)); |
3766 | bit = 1; | |
fa8d436c | 3767 | } |
a9177ff5 | 3768 | |
fa8d436c UD |
3769 | /* Advance to bin with set bit. There must be one. */ |
3770 | while ((bit & map) == 0) { | |
72f90263 UD |
3771 | bin = next_bin(bin); |
3772 | bit <<= 1; | |
3773 | assert(bit != 0); | |
fa8d436c | 3774 | } |
a9177ff5 | 3775 | |
fa8d436c UD |
3776 | /* Inspect the bin. It is likely to be non-empty */ |
3777 | victim = last(bin); | |
a9177ff5 | 3778 | |
fa8d436c UD |
3779 | /* If a false alarm (empty bin), clear the bit. */ |
3780 | if (victim == bin) { | |
72f90263 UD |
3781 | av->binmap[block] = map &= ~bit; /* Write through */ |
3782 | bin = next_bin(bin); | |
3783 | bit <<= 1; | |
fa8d436c | 3784 | } |
a9177ff5 | 3785 | |
fa8d436c | 3786 | else { |
72f90263 | 3787 | size = chunksize(victim); |
fa8d436c | 3788 | |
72f90263 UD |
3789 | /* We know the first chunk in this bin is big enough to use. */ |
3790 | assert((unsigned long)(size) >= (unsigned long)(nb)); | |
fa8d436c | 3791 | |
72f90263 | 3792 | remainder_size = size - nb; |
a9177ff5 | 3793 | |
72f90263 UD |
3794 | /* unlink */ |
3795 | unlink(victim, bck, fwd); | |
a9177ff5 | 3796 | |
72f90263 UD |
3797 | /* Exhaust */ |
3798 | if (remainder_size < MINSIZE) { | |
3799 | set_inuse_bit_at_offset(victim, size); | |
fa8d436c UD |
3800 | if (av != &main_arena) |
3801 | victim->size |= NON_MAIN_ARENA; | |
72f90263 | 3802 | } |
a9177ff5 | 3803 | |
72f90263 UD |
3804 | /* Split */ |
3805 | else { | |
3806 | remainder = chunk_at_offset(victim, nb); | |
a9177ff5 | 3807 | |
41999a1a UD |
3808 | /* We cannot assume the unsorted list is empty and therefore |
3809 | have to perform a complete insert here. */ | |
3810 | bck = unsorted_chunks(av); | |
3811 | fwd = bck->fd; | |
f6887a0d UD |
3812 | if (__builtin_expect (fwd->bk != bck, 0)) |
3813 | { | |
3814 | errstr = "malloc(): corrupted unsorted chunks 2"; | |
3815 | goto errout; | |
3816 | } | |
41999a1a UD |
3817 | remainder->bk = bck; |
3818 | remainder->fd = fwd; | |
3819 | bck->fd = remainder; | |
3820 | fwd->bk = remainder; | |
3821 | ||
72f90263 UD |
3822 | /* advertise as last remainder */ |
3823 | if (in_smallbin_range(nb)) | |
3824 | av->last_remainder = remainder; | |
7ecfbd38 UD |
3825 | if (!in_smallbin_range(remainder_size)) |
3826 | { | |
3827 | remainder->fd_nextsize = NULL; | |
3828 | remainder->bk_nextsize = NULL; | |
3829 | } | |
72f90263 | 3830 | set_head(victim, nb | PREV_INUSE | |
fa8d436c | 3831 | (av != &main_arena ? NON_MAIN_ARENA : 0)); |
72f90263 UD |
3832 | set_head(remainder, remainder_size | PREV_INUSE); |
3833 | set_foot(remainder, remainder_size); | |
3834 | } | |
854278df UD |
3835 | check_malloced_chunk(av, victim, nb); |
3836 | void *p = chunk2mem(victim); | |
3837 | if (__builtin_expect (perturb_byte, 0)) | |
3838 | alloc_perturb (p, bytes); | |
3839 | return p; | |
fa8d436c UD |
3840 | } |
3841 | } | |
f65fd747 | 3842 | |
a9177ff5 | 3843 | use_top: |
fa8d436c UD |
3844 | /* |
3845 | If large enough, split off the chunk bordering the end of memory | |
3846 | (held in av->top). Note that this is in accord with the best-fit | |
3847 | search rule. In effect, av->top is treated as larger (and thus | |
3848 | less well fitting) than any other available chunk since it can | |
3849 | be extended to be as large as necessary (up to system | |
3850 | limitations). | |
3851 | ||
3852 | We require that av->top always exists (i.e., has size >= | |
3853 | MINSIZE) after initialization, so if it would otherwise be | |
17edb30c | 3854 | exhausted by current request, it is replenished. (The main |
fa8d436c UD |
3855 | reason for ensuring it exists is that we may need MINSIZE space |
3856 | to put in fenceposts in sysmalloc.) | |
3857 | */ | |
f65fd747 | 3858 | |
fa8d436c UD |
3859 | victim = av->top; |
3860 | size = chunksize(victim); | |
a9177ff5 | 3861 | |
fa8d436c UD |
3862 | if ((unsigned long)(size) >= (unsigned long)(nb + MINSIZE)) { |
3863 | remainder_size = size - nb; | |
3864 | remainder = chunk_at_offset(victim, nb); | |
3865 | av->top = remainder; | |
3866 | set_head(victim, nb | PREV_INUSE | | |
3867 | (av != &main_arena ? NON_MAIN_ARENA : 0)); | |
3868 | set_head(remainder, remainder_size | PREV_INUSE); | |
f65fd747 | 3869 | |
fa8d436c | 3870 | check_malloced_chunk(av, victim, nb); |
854278df UD |
3871 | void *p = chunk2mem(victim); |
3872 | if (__builtin_expect (perturb_byte, 0)) | |
3873 | alloc_perturb (p, bytes); | |
3874 | return p; | |
fa8d436c | 3875 | } |
f65fd747 | 3876 | |
425ce2ed UD |
3877 | /* When we are using atomic ops to free fast chunks we can get |
3878 | here for all block sizes. */ | |
3879 | else if (have_fastchunks(av)) { | |
3880 | malloc_consolidate(av); | |
3881 | /* restore original bin index */ | |
3882 | if (in_smallbin_range(nb)) | |
3883 | idx = smallbin_index(nb); | |
3884 | else | |
3885 | idx = largebin_index(nb); | |
3886 | } | |
f65fd747 | 3887 | |
a9177ff5 RM |
3888 | /* |
3889 | Otherwise, relay to handle system-dependent cases | |
fa8d436c | 3890 | */ |
854278df UD |
3891 | else { |
3892 | void *p = sYSMALLOc(nb, av); | |
17edb30c | 3893 | if (p != NULL && __builtin_expect (perturb_byte, 0)) |
854278df UD |
3894 | alloc_perturb (p, bytes); |
3895 | return p; | |
3896 | } | |
fa8d436c UD |
3897 | } |
3898 | } | |
f65fd747 | 3899 | |
fa8d436c UD |
3900 | /* |
3901 | ------------------------------ free ------------------------------ | |
f65fd747 UD |
3902 | */ |
3903 | ||
78ac92ad | 3904 | static void |
425ce2ed | 3905 | _int_free(mstate av, mchunkptr p, int have_lock) |
f65fd747 | 3906 | { |
fa8d436c UD |
3907 | INTERNAL_SIZE_T size; /* its size */ |
3908 | mfastbinptr* fb; /* associated fastbin */ | |
3909 | mchunkptr nextchunk; /* next contiguous chunk */ | |
3910 | INTERNAL_SIZE_T nextsize; /* its size */ | |
3911 | int nextinuse; /* true if nextchunk is used */ | |
3912 | INTERNAL_SIZE_T prevsize; /* size of previous contiguous chunk */ | |
3913 | mchunkptr bck; /* misc temp for linking */ | |
3914 | mchunkptr fwd; /* misc temp for linking */ | |
3915 | ||
37fa1953 | 3916 | const char *errstr = NULL; |
425ce2ed | 3917 | int locked = 0; |
f65fd747 | 3918 | |
37fa1953 | 3919 | size = chunksize(p); |
f65fd747 | 3920 | |
37fa1953 UD |
3921 | /* Little security check which won't hurt performance: the |
3922 | allocator never wrapps around at the end of the address space. | |
3923 | Therefore we can exclude some size values which might appear | |
3924 | here by accident or by "design" from some intruder. */ | |
dc165f7b | 3925 | if (__builtin_expect ((uintptr_t) p > (uintptr_t) -size, 0) |
073f560e | 3926 | || __builtin_expect (misaligned_chunk (p), 0)) |
37fa1953 UD |
3927 | { |
3928 | errstr = "free(): invalid pointer"; | |
3929 | errout: | |
425ce2ed UD |
3930 | if (! have_lock && locked) |
3931 | (void)mutex_unlock(&av->mutex); | |
78ac92ad | 3932 | malloc_printerr (check_action, errstr, chunk2mem(p)); |
37fa1953 | 3933 | return; |
fa8d436c | 3934 | } |
bf589066 UD |
3935 | /* We know that each chunk is at least MINSIZE bytes in size. */ |
3936 | if (__builtin_expect (size < MINSIZE, 0)) | |
3937 | { | |
3938 | errstr = "free(): invalid size"; | |
3939 | goto errout; | |
3940 | } | |
f65fd747 | 3941 | |
37fa1953 | 3942 | check_inuse_chunk(av, p); |
f65fd747 | 3943 | |
37fa1953 UD |
3944 | /* |
3945 | If eligible, place chunk on a fastbin so it can be found | |
3946 | and used quickly in malloc. | |
3947 | */ | |
6bf4302e | 3948 | |
9bf248c6 | 3949 | if ((unsigned long)(size) <= (unsigned long)(get_max_fast ()) |
6bf4302e | 3950 | |
37fa1953 UD |
3951 | #if TRIM_FASTBINS |
3952 | /* | |
3953 | If TRIM_FASTBINS set, don't place chunks | |
3954 | bordering top into fastbins | |
3955 | */ | |
3956 | && (chunk_at_offset(p, size) != av->top) | |
3957 | #endif | |
3958 | ) { | |
fa8d436c | 3959 | |
893e6098 UD |
3960 | if (__builtin_expect (chunk_at_offset (p, size)->size <= 2 * SIZE_SZ, 0) |
3961 | || __builtin_expect (chunksize (chunk_at_offset (p, size)) | |
3962 | >= av->system_mem, 0)) | |
3963 | { | |
bec466d9 UD |
3964 | /* We might not have a lock at this point and concurrent modifications |
3965 | of system_mem might have let to a false positive. Redo the test | |
3966 | after getting the lock. */ | |
3967 | if (have_lock | |
3968 | || ({ assert (locked == 0); | |
3969 | mutex_lock(&av->mutex); | |
3970 | locked = 1; | |
3971 | chunk_at_offset (p, size)->size <= 2 * SIZE_SZ | |
3972 | || chunksize (chunk_at_offset (p, size)) >= av->system_mem; | |
3973 | })) | |
bec466d9 UD |
3974 | { |
3975 | errstr = "free(): invalid next size (fast)"; | |
3976 | goto errout; | |
3977 | } | |
bec466d9 UD |
3978 | if (! have_lock) |
3979 | { | |
3980 | (void)mutex_unlock(&av->mutex); | |
3981 | locked = 0; | |
3982 | } | |
893e6098 UD |
3983 | } |
3984 | ||
425ce2ed | 3985 | if (__builtin_expect (perturb_byte, 0)) |
fdc0f374 | 3986 | free_perturb (chunk2mem(p), size - 2 * SIZE_SZ); |
425ce2ed | 3987 | |
37fa1953 | 3988 | set_fastchunks(av); |
90a3055e UD |
3989 | unsigned int idx = fastbin_index(size); |
3990 | fb = &fastbin (av, idx); | |
425ce2ed | 3991 | |
425ce2ed UD |
3992 | mchunkptr fd; |
3993 | mchunkptr old = *fb; | |
5f24d53a | 3994 | unsigned int old_idx = ~0u; |
425ce2ed UD |
3995 | do |
3996 | { | |
3997 | /* Another simple check: make sure the top of the bin is not the | |
3998 | record we are going to add (i.e., double free). */ | |
3999 | if (__builtin_expect (old == p, 0)) | |
4000 | { | |
4001 | errstr = "double free or corruption (fasttop)"; | |
4002 | goto errout; | |
4003 | } | |
5f24d53a UD |
4004 | if (old != NULL) |
4005 | old_idx = fastbin_index(chunksize(old)); | |
425ce2ed UD |
4006 | p->fd = fd = old; |
4007 | } | |
bea0ac1d | 4008 | while ((old = catomic_compare_and_exchange_val_rel (fb, p, fd)) != fd); |
5f24d53a UD |
4009 | |
4010 | if (fd != NULL && __builtin_expect (old_idx != idx, 0)) | |
4011 | { | |
4012 | errstr = "invalid fastbin entry (free)"; | |
4013 | goto errout; | |
4014 | } | |
37fa1953 | 4015 | } |
f65fd747 | 4016 | |
37fa1953 UD |
4017 | /* |
4018 | Consolidate other non-mmapped chunks as they arrive. | |
4019 | */ | |
fa8d436c | 4020 | |
37fa1953 | 4021 | else if (!chunk_is_mmapped(p)) { |
425ce2ed | 4022 | if (! have_lock) { |
22a89187 | 4023 | #if THREAD_STATS |
425ce2ed UD |
4024 | if(!mutex_trylock(&av->mutex)) |
4025 | ++(av->stat_lock_direct); | |
4026 | else { | |
4027 | (void)mutex_lock(&av->mutex); | |
4028 | ++(av->stat_lock_wait); | |
4029 | } | |
22a89187 | 4030 | #else |
425ce2ed | 4031 | (void)mutex_lock(&av->mutex); |
22a89187 | 4032 | #endif |
425ce2ed UD |
4033 | locked = 1; |
4034 | } | |
425ce2ed | 4035 | |
37fa1953 | 4036 | nextchunk = chunk_at_offset(p, size); |
fa8d436c | 4037 | |
37fa1953 UD |
4038 | /* Lightweight tests: check whether the block is already the |
4039 | top block. */ | |
4040 | if (__builtin_expect (p == av->top, 0)) | |
4041 | { | |
4042 | errstr = "double free or corruption (top)"; | |
4043 | goto errout; | |
4044 | } | |
4045 | /* Or whether the next chunk is beyond the boundaries of the arena. */ | |
4046 | if (__builtin_expect (contiguous (av) | |
4047 | && (char *) nextchunk | |
4048 | >= ((char *) av->top + chunksize(av->top)), 0)) | |
4049 | { | |
4050 | errstr = "double free or corruption (out)"; | |
4051 | goto errout; | |
4052 | } | |
4053 | /* Or whether the block is actually not marked used. */ | |
4054 | if (__builtin_expect (!prev_inuse(nextchunk), 0)) | |
4055 | { | |
4056 | errstr = "double free or corruption (!prev)"; | |
4057 | goto errout; | |
4058 | } | |
fa8d436c | 4059 | |
37fa1953 | 4060 | nextsize = chunksize(nextchunk); |
893e6098 UD |
4061 | if (__builtin_expect (nextchunk->size <= 2 * SIZE_SZ, 0) |
4062 | || __builtin_expect (nextsize >= av->system_mem, 0)) | |
4063 | { | |
76761b63 | 4064 | errstr = "free(): invalid next size (normal)"; |
893e6098 UD |
4065 | goto errout; |
4066 | } | |
fa8d436c | 4067 | |
854278df | 4068 | if (__builtin_expect (perturb_byte, 0)) |
fdc0f374 | 4069 | free_perturb (chunk2mem(p), size - 2 * SIZE_SZ); |
854278df | 4070 | |
37fa1953 UD |
4071 | /* consolidate backward */ |
4072 | if (!prev_inuse(p)) { | |
4073 | prevsize = p->prev_size; | |
4074 | size += prevsize; | |
4075 | p = chunk_at_offset(p, -((long) prevsize)); | |
4076 | unlink(p, bck, fwd); | |
4077 | } | |
a9177ff5 | 4078 | |
37fa1953 UD |
4079 | if (nextchunk != av->top) { |
4080 | /* get and clear inuse bit */ | |
4081 | nextinuse = inuse_bit_at_offset(nextchunk, nextsize); | |
4082 | ||
4083 | /* consolidate forward */ | |
4084 | if (!nextinuse) { | |
4085 | unlink(nextchunk, bck, fwd); | |
4086 | size += nextsize; | |
4087 | } else | |
4088 | clear_inuse_bit_at_offset(nextchunk, 0); | |
10dc2a90 | 4089 | |
fa8d436c | 4090 | /* |
37fa1953 UD |
4091 | Place the chunk in unsorted chunk list. Chunks are |
4092 | not placed into regular bins until after they have | |
4093 | been given one chance to be used in malloc. | |
fa8d436c | 4094 | */ |
f65fd747 | 4095 | |
37fa1953 UD |
4096 | bck = unsorted_chunks(av); |
4097 | fwd = bck->fd; | |
f6887a0d UD |
4098 | if (__builtin_expect (fwd->bk != bck, 0)) |
4099 | { | |
4100 | errstr = "free(): corrupted unsorted chunks"; | |
4101 | goto errout; | |
4102 | } | |
37fa1953 | 4103 | p->fd = fwd; |
7ecfbd38 UD |
4104 | p->bk = bck; |
4105 | if (!in_smallbin_range(size)) | |
4106 | { | |
4107 | p->fd_nextsize = NULL; | |
4108 | p->bk_nextsize = NULL; | |
4109 | } | |
37fa1953 UD |
4110 | bck->fd = p; |
4111 | fwd->bk = p; | |
8a4b65b4 | 4112 | |
37fa1953 UD |
4113 | set_head(p, size | PREV_INUSE); |
4114 | set_foot(p, size); | |
4115 | ||
4116 | check_free_chunk(av, p); | |
4117 | } | |
4118 | ||
4119 | /* | |
4120 | If the chunk borders the current high end of memory, | |
4121 | consolidate into top | |
4122 | */ | |
4123 | ||
4124 | else { | |
4125 | size += nextsize; | |
4126 | set_head(p, size | PREV_INUSE); | |
4127 | av->top = p; | |
4128 | check_chunk(av, p); | |
4129 | } | |
4130 | ||
4131 | /* | |
4132 | If freeing a large space, consolidate possibly-surrounding | |
4133 | chunks. Then, if the total unused topmost memory exceeds trim | |
4134 | threshold, ask malloc_trim to reduce top. | |
4135 | ||
4136 | Unless max_fast is 0, we don't know if there are fastbins | |
4137 | bordering top, so we cannot tell for sure whether threshold | |
4138 | has been reached unless fastbins are consolidated. But we | |
4139 | don't want to consolidate on each free. As a compromise, | |
4140 | consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD | |
4141 | is reached. | |
4142 | */ | |
fa8d436c | 4143 | |
37fa1953 UD |
4144 | if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) { |
4145 | if (have_fastchunks(av)) | |
4146 | malloc_consolidate(av); | |
fa8d436c | 4147 | |
37fa1953 | 4148 | if (av == &main_arena) { |
a9177ff5 | 4149 | #ifndef MORECORE_CANNOT_TRIM |
37fa1953 UD |
4150 | if ((unsigned long)(chunksize(av->top)) >= |
4151 | (unsigned long)(mp_.trim_threshold)) | |
4152 | sYSTRIm(mp_.top_pad, av); | |
fa8d436c | 4153 | #endif |
37fa1953 UD |
4154 | } else { |
4155 | /* Always try heap_trim(), even if the top chunk is not | |
4156 | large, because the corresponding heap might go away. */ | |
4157 | heap_info *heap = heap_for_ptr(top(av)); | |
fa8d436c | 4158 | |
37fa1953 UD |
4159 | assert(heap->ar_ptr == av); |
4160 | heap_trim(heap, mp_.top_pad); | |
fa8d436c | 4161 | } |
fa8d436c | 4162 | } |
10dc2a90 | 4163 | |
425ce2ed UD |
4164 | if (! have_lock) { |
4165 | assert (locked); | |
4166 | (void)mutex_unlock(&av->mutex); | |
4167 | } | |
37fa1953 UD |
4168 | } |
4169 | /* | |
22a89187 | 4170 | If the chunk was allocated via mmap, release via munmap(). |
37fa1953 UD |
4171 | */ |
4172 | ||
4173 | else { | |
c120d94d | 4174 | munmap_chunk (p); |
fa8d436c | 4175 | } |
10dc2a90 UD |
4176 | } |
4177 | ||
fa8d436c UD |
4178 | /* |
4179 | ------------------------- malloc_consolidate ------------------------- | |
4180 | ||
4181 | malloc_consolidate is a specialized version of free() that tears | |
4182 | down chunks held in fastbins. Free itself cannot be used for this | |
4183 | purpose since, among other things, it might place chunks back onto | |
4184 | fastbins. So, instead, we need to use a minor variant of the same | |
4185 | code. | |
a9177ff5 | 4186 | |
fa8d436c UD |
4187 | Also, because this routine needs to be called the first time through |
4188 | malloc anyway, it turns out to be the perfect place to trigger | |
4189 | initialization code. | |
4190 | */ | |
4191 | ||
fa8d436c | 4192 | static void malloc_consolidate(mstate av) |
10dc2a90 | 4193 | { |
fa8d436c UD |
4194 | mfastbinptr* fb; /* current fastbin being consolidated */ |
4195 | mfastbinptr* maxfb; /* last fastbin (for loop control) */ | |
4196 | mchunkptr p; /* current chunk being consolidated */ | |
4197 | mchunkptr nextp; /* next chunk to consolidate */ | |
4198 | mchunkptr unsorted_bin; /* bin header */ | |
4199 | mchunkptr first_unsorted; /* chunk to link to */ | |
4200 | ||
4201 | /* These have same use as in free() */ | |
4202 | mchunkptr nextchunk; | |
4203 | INTERNAL_SIZE_T size; | |
4204 | INTERNAL_SIZE_T nextsize; | |
4205 | INTERNAL_SIZE_T prevsize; | |
4206 | int nextinuse; | |
4207 | mchunkptr bck; | |
4208 | mchunkptr fwd; | |
10dc2a90 | 4209 | |
fa8d436c UD |
4210 | /* |
4211 | If max_fast is 0, we know that av hasn't | |
4212 | yet been initialized, in which case do so below | |
4213 | */ | |
10dc2a90 | 4214 | |
9bf248c6 | 4215 | if (get_max_fast () != 0) { |
fa8d436c | 4216 | clear_fastchunks(av); |
10dc2a90 | 4217 | |
fa8d436c | 4218 | unsorted_bin = unsorted_chunks(av); |
10dc2a90 | 4219 | |
fa8d436c UD |
4220 | /* |
4221 | Remove each chunk from fast bin and consolidate it, placing it | |
4222 | then in unsorted bin. Among other reasons for doing this, | |
4223 | placing in unsorted bin avoids needing to calculate actual bins | |
4224 | until malloc is sure that chunks aren't immediately going to be | |
4225 | reused anyway. | |
4226 | */ | |
a9177ff5 | 4227 | |
425ce2ed | 4228 | maxfb = &fastbin (av, NFASTBINS - 1); |
425ce2ed | 4229 | fb = &fastbin (av, 0); |
fa8d436c | 4230 | do { |
425ce2ed | 4231 | p = atomic_exchange_acq (fb, 0); |
425ce2ed | 4232 | if (p != 0) { |
72f90263 UD |
4233 | do { |
4234 | check_inuse_chunk(av, p); | |
4235 | nextp = p->fd; | |
4236 | ||
4237 | /* Slightly streamlined version of consolidation code in free() */ | |
4238 | size = p->size & ~(PREV_INUSE|NON_MAIN_ARENA); | |
4239 | nextchunk = chunk_at_offset(p, size); | |
4240 | nextsize = chunksize(nextchunk); | |
4241 | ||
4242 | if (!prev_inuse(p)) { | |
4243 | prevsize = p->prev_size; | |
4244 | size += prevsize; | |
4245 | p = chunk_at_offset(p, -((long) prevsize)); | |
4246 | unlink(p, bck, fwd); | |
4247 | } | |
4248 | ||
4249 | if (nextchunk != av->top) { | |
4250 | nextinuse = inuse_bit_at_offset(nextchunk, nextsize); | |
4251 | ||
4252 | if (!nextinuse) { | |
4253 | size += nextsize; | |
4254 | unlink(nextchunk, bck, fwd); | |
4255 | } else | |
fa8d436c | 4256 | clear_inuse_bit_at_offset(nextchunk, 0); |
a9177ff5 | 4257 | |
72f90263 UD |
4258 | first_unsorted = unsorted_bin->fd; |
4259 | unsorted_bin->fd = p; | |
4260 | first_unsorted->bk = p; | |
a9177ff5 | 4261 | |
72f90263 | 4262 | if (!in_smallbin_range (size)) { |
7ecfbd38 UD |
4263 | p->fd_nextsize = NULL; |
4264 | p->bk_nextsize = NULL; | |
4265 | } | |
4266 | ||
72f90263 UD |
4267 | set_head(p, size | PREV_INUSE); |
4268 | p->bk = unsorted_bin; | |
4269 | p->fd = first_unsorted; | |
4270 | set_foot(p, size); | |
4271 | } | |
a9177ff5 | 4272 | |
72f90263 UD |
4273 | else { |
4274 | size += nextsize; | |
4275 | set_head(p, size | PREV_INUSE); | |
4276 | av->top = p; | |
4277 | } | |
a9177ff5 | 4278 | |
72f90263 | 4279 | } while ( (p = nextp) != 0); |
a9177ff5 | 4280 | |
fa8d436c UD |
4281 | } |
4282 | } while (fb++ != maxfb); | |
4283 | } | |
4284 | else { | |
4285 | malloc_init_state(av); | |
4286 | check_malloc_state(av); | |
4287 | } | |
4288 | } | |
10dc2a90 | 4289 | |
fa8d436c UD |
4290 | /* |
4291 | ------------------------------ realloc ------------------------------ | |
4292 | */ | |
f65fd747 | 4293 | |
22a89187 | 4294 | void* |
4c8b8cc3 UD |
4295 | _int_realloc(mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize, |
4296 | INTERNAL_SIZE_T nb) | |
fa8d436c | 4297 | { |
fa8d436c UD |
4298 | mchunkptr newp; /* chunk to return */ |
4299 | INTERNAL_SIZE_T newsize; /* its size */ | |
22a89187 | 4300 | void* newmem; /* corresponding user mem */ |
f65fd747 | 4301 | |
fa8d436c | 4302 | mchunkptr next; /* next contiguous chunk after oldp */ |
f65fd747 | 4303 | |
fa8d436c UD |
4304 | mchunkptr remainder; /* extra space at end of newp */ |
4305 | unsigned long remainder_size; /* its size */ | |
f65fd747 | 4306 | |
fa8d436c UD |
4307 | mchunkptr bck; /* misc temp for linking */ |
4308 | mchunkptr fwd; /* misc temp for linking */ | |
2ed5fd9a | 4309 | |
fa8d436c UD |
4310 | unsigned long copysize; /* bytes to copy */ |
4311 | unsigned int ncopies; /* INTERNAL_SIZE_T words to copy */ | |
a9177ff5 | 4312 | INTERNAL_SIZE_T* s; /* copy source */ |
fa8d436c | 4313 | INTERNAL_SIZE_T* d; /* copy destination */ |
f65fd747 | 4314 | |
76761b63 | 4315 | const char *errstr = NULL; |
f65fd747 | 4316 | |
6dd6a580 | 4317 | /* oldmem size */ |
76761b63 UD |
4318 | if (__builtin_expect (oldp->size <= 2 * SIZE_SZ, 0) |
4319 | || __builtin_expect (oldsize >= av->system_mem, 0)) | |
4320 | { | |
4b04154d | 4321 | errstr = "realloc(): invalid old size"; |
4c8b8cc3 UD |
4322 | errout: |
4323 | malloc_printerr (check_action, errstr, chunk2mem(oldp)); | |
4324 | return NULL; | |
76761b63 UD |
4325 | } |
4326 | ||
fa8d436c | 4327 | check_inuse_chunk(av, oldp); |
f65fd747 | 4328 | |
4c8b8cc3 | 4329 | /* All callers already filter out mmap'ed chunks. */ |
4c8b8cc3 | 4330 | assert (!chunk_is_mmapped(oldp)); |
f65fd747 | 4331 | |
22a89187 UD |
4332 | next = chunk_at_offset(oldp, oldsize); |
4333 | INTERNAL_SIZE_T nextsize = chunksize(next); | |
4334 | if (__builtin_expect (next->size <= 2 * SIZE_SZ, 0) | |
4335 | || __builtin_expect (nextsize >= av->system_mem, 0)) | |
4336 | { | |
4337 | errstr = "realloc(): invalid next size"; | |
4338 | goto errout; | |
4339 | } | |
4340 | ||
4341 | if ((unsigned long)(oldsize) >= (unsigned long)(nb)) { | |
4342 | /* already big enough; split below */ | |
4343 | newp = oldp; | |
4344 | newsize = oldsize; | |
4345 | } | |
4346 | ||
4347 | else { | |
4348 | /* Try to expand forward into top */ | |
4349 | if (next == av->top && | |
4350 | (unsigned long)(newsize = oldsize + nextsize) >= | |
4351 | (unsigned long)(nb + MINSIZE)) { | |
4352 | set_head_size(oldp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0)); | |
4353 | av->top = chunk_at_offset(oldp, nb); | |
4354 | set_head(av->top, (newsize - nb) | PREV_INUSE); | |
4355 | check_inuse_chunk(av, oldp); | |
4356 | return chunk2mem(oldp); | |
4357 | } | |
76761b63 | 4358 | |
22a89187 UD |
4359 | /* Try to expand forward into next chunk; split off remainder below */ |
4360 | else if (next != av->top && | |
4361 | !inuse(next) && | |
4362 | (unsigned long)(newsize = oldsize + nextsize) >= | |
4363 | (unsigned long)(nb)) { | |
fa8d436c | 4364 | newp = oldp; |
22a89187 | 4365 | unlink(next, bck, fwd); |
7799b7b3 | 4366 | } |
f65fd747 | 4367 | |
22a89187 | 4368 | /* allocate, copy, free */ |
fa8d436c | 4369 | else { |
22a89187 UD |
4370 | newmem = _int_malloc(av, nb - MALLOC_ALIGN_MASK); |
4371 | if (newmem == 0) | |
4372 | return 0; /* propagate failure */ | |
4373 | ||
4374 | newp = mem2chunk(newmem); | |
4375 | newsize = chunksize(newp); | |
a9177ff5 | 4376 | |
22a89187 UD |
4377 | /* |
4378 | Avoid copy if newp is next chunk after oldp. | |
4379 | */ | |
4380 | if (newp == next) { | |
4381 | newsize += oldsize; | |
72f90263 | 4382 | newp = oldp; |
fa8d436c | 4383 | } |
fa8d436c | 4384 | else { |
72f90263 | 4385 | /* |
22a89187 UD |
4386 | Unroll copy of <= 36 bytes (72 if 8byte sizes) |
4387 | We know that contents have an odd number of | |
4388 | INTERNAL_SIZE_T-sized words; minimally 3. | |
72f90263 | 4389 | */ |
72f90263 | 4390 | |
22a89187 UD |
4391 | copysize = oldsize - SIZE_SZ; |
4392 | s = (INTERNAL_SIZE_T*)(chunk2mem(oldp)); | |
4393 | d = (INTERNAL_SIZE_T*)(newmem); | |
4394 | ncopies = copysize / sizeof(INTERNAL_SIZE_T); | |
4395 | assert(ncopies >= 3); | |
72f90263 | 4396 | |
22a89187 UD |
4397 | if (ncopies > 9) |
4398 | MALLOC_COPY(d, s, copysize); | |
72f90263 | 4399 | |
22a89187 UD |
4400 | else { |
4401 | *(d+0) = *(s+0); | |
4402 | *(d+1) = *(s+1); | |
4403 | *(d+2) = *(s+2); | |
4404 | if (ncopies > 4) { | |
4405 | *(d+3) = *(s+3); | |
4406 | *(d+4) = *(s+4); | |
4407 | if (ncopies > 6) { | |
4408 | *(d+5) = *(s+5); | |
4409 | *(d+6) = *(s+6); | |
4410 | if (ncopies > 8) { | |
4411 | *(d+7) = *(s+7); | |
4412 | *(d+8) = *(s+8); | |
72f90263 UD |
4413 | } |
4414 | } | |
4415 | } | |
72f90263 | 4416 | } |
f65fd747 | 4417 | |
22a89187 UD |
4418 | _int_free(av, oldp, 1); |
4419 | check_inuse_chunk(av, newp); | |
4420 | return chunk2mem(newp); | |
4421 | } | |
fa8d436c | 4422 | } |
fa8d436c | 4423 | } |
f65fd747 | 4424 | |
22a89187 | 4425 | /* If possible, free extra space in old or extended chunk */ |
f65fd747 | 4426 | |
22a89187 | 4427 | assert((unsigned long)(newsize) >= (unsigned long)(nb)); |
f65fd747 | 4428 | |
22a89187 | 4429 | remainder_size = newsize - nb; |
10dc2a90 | 4430 | |
22a89187 UD |
4431 | if (remainder_size < MINSIZE) { /* not enough extra to split off */ |
4432 | set_head_size(newp, newsize | (av != &main_arena ? NON_MAIN_ARENA : 0)); | |
4433 | set_inuse_bit_at_offset(newp, newsize); | |
10dc2a90 | 4434 | } |
22a89187 UD |
4435 | else { /* split remainder */ |
4436 | remainder = chunk_at_offset(newp, nb); | |
4437 | set_head_size(newp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0)); | |
4438 | set_head(remainder, remainder_size | PREV_INUSE | | |
4439 | (av != &main_arena ? NON_MAIN_ARENA : 0)); | |
4440 | /* Mark remainder as inuse so free() won't complain */ | |
4441 | set_inuse_bit_at_offset(remainder, remainder_size); | |
4442 | _int_free(av, remainder, 1); | |
4443 | } | |
4444 | ||
4445 | check_inuse_chunk(av, newp); | |
4446 | return chunk2mem(newp); | |
fa8d436c UD |
4447 | } |
4448 | ||
4449 | /* | |
4450 | ------------------------------ memalign ------------------------------ | |
4451 | */ | |
4452 | ||
22a89187 | 4453 | static void* |
fa8d436c UD |
4454 | _int_memalign(mstate av, size_t alignment, size_t bytes) |
4455 | { | |
4456 | INTERNAL_SIZE_T nb; /* padded request size */ | |
4457 | char* m; /* memory returned by malloc call */ | |
4458 | mchunkptr p; /* corresponding chunk */ | |
4459 | char* brk; /* alignment point within p */ | |
4460 | mchunkptr newp; /* chunk to return */ | |
4461 | INTERNAL_SIZE_T newsize; /* its size */ | |
4462 | INTERNAL_SIZE_T leadsize; /* leading space before alignment point */ | |
4463 | mchunkptr remainder; /* spare room at end to split off */ | |
4464 | unsigned long remainder_size; /* its size */ | |
4465 | INTERNAL_SIZE_T size; | |
f65fd747 UD |
4466 | |
4467 | /* If need less alignment than we give anyway, just relay to malloc */ | |
4468 | ||
fa8d436c | 4469 | if (alignment <= MALLOC_ALIGNMENT) return _int_malloc(av, bytes); |
f65fd747 UD |
4470 | |
4471 | /* Otherwise, ensure that it is at least a minimum chunk size */ | |
4472 | ||
4473 | if (alignment < MINSIZE) alignment = MINSIZE; | |
4474 | ||
fa8d436c UD |
4475 | /* Make sure alignment is power of 2 (in case MINSIZE is not). */ |
4476 | if ((alignment & (alignment - 1)) != 0) { | |
4477 | size_t a = MALLOC_ALIGNMENT * 2; | |
4478 | while ((unsigned long)a < (unsigned long)alignment) a <<= 1; | |
4479 | alignment = a; | |
7799b7b3 | 4480 | } |
f65fd747 | 4481 | |
fa8d436c UD |
4482 | checked_request2size(bytes, nb); |
4483 | ||
4484 | /* | |
4485 | Strategy: find a spot within that chunk that meets the alignment | |
4486 | request, and then possibly free the leading and trailing space. | |
4487 | */ | |
4488 | ||
4489 | ||
4490 | /* Call malloc with worst case padding to hit alignment. */ | |
4491 | ||
4492 | m = (char*)(_int_malloc(av, nb + alignment + MINSIZE)); | |
4493 | ||
4494 | if (m == 0) return 0; /* propagate failure */ | |
4495 | ||
4496 | p = mem2chunk(m); | |
4497 | ||
4498 | if ((((unsigned long)(m)) % alignment) != 0) { /* misaligned */ | |
4499 | ||
f65fd747 | 4500 | /* |
fa8d436c UD |
4501 | Find an aligned spot inside chunk. Since we need to give back |
4502 | leading space in a chunk of at least MINSIZE, if the first | |
4503 | calculation places us at a spot with less than MINSIZE leader, | |
4504 | we can move to the next aligned spot -- we've allocated enough | |
4505 | total room so that this is always possible. | |
f65fd747 UD |
4506 | */ |
4507 | ||
fa8d436c | 4508 | brk = (char*)mem2chunk(((unsigned long)(m + alignment - 1)) & |
72f90263 | 4509 | -((signed long) alignment)); |
fa8d436c UD |
4510 | if ((unsigned long)(brk - (char*)(p)) < MINSIZE) |
4511 | brk += alignment; | |
f65fd747 | 4512 | |
fa8d436c | 4513 | newp = (mchunkptr)brk; |
f65fd747 UD |
4514 | leadsize = brk - (char*)(p); |
4515 | newsize = chunksize(p) - leadsize; | |
4516 | ||
fa8d436c UD |
4517 | /* For mmapped chunks, just adjust offset */ |
4518 | if (chunk_is_mmapped(p)) { | |
f65fd747 UD |
4519 | newp->prev_size = p->prev_size + leadsize; |
4520 | set_head(newp, newsize|IS_MMAPPED); | |
fa8d436c | 4521 | return chunk2mem(newp); |
f65fd747 | 4522 | } |
f65fd747 | 4523 | |
fa8d436c UD |
4524 | /* Otherwise, give back leader, use the rest */ |
4525 | set_head(newp, newsize | PREV_INUSE | | |
4526 | (av != &main_arena ? NON_MAIN_ARENA : 0)); | |
f65fd747 | 4527 | set_inuse_bit_at_offset(newp, newsize); |
fa8d436c | 4528 | set_head_size(p, leadsize | (av != &main_arena ? NON_MAIN_ARENA : 0)); |
425ce2ed | 4529 | _int_free(av, p, 1); |
f65fd747 UD |
4530 | p = newp; |
4531 | ||
fa8d436c | 4532 | assert (newsize >= nb && |
72f90263 | 4533 | (((unsigned long)(chunk2mem(p))) % alignment) == 0); |
f65fd747 UD |
4534 | } |
4535 | ||
4536 | /* Also give back spare room at the end */ | |
fa8d436c UD |
4537 | if (!chunk_is_mmapped(p)) { |
4538 | size = chunksize(p); | |
4539 | if ((unsigned long)(size) > (unsigned long)(nb + MINSIZE)) { | |
4540 | remainder_size = size - nb; | |
4541 | remainder = chunk_at_offset(p, nb); | |
4542 | set_head(remainder, remainder_size | PREV_INUSE | | |
4543 | (av != &main_arena ? NON_MAIN_ARENA : 0)); | |
4544 | set_head_size(p, nb); | |
425ce2ed | 4545 | _int_free(av, remainder, 1); |
fa8d436c | 4546 | } |
f65fd747 UD |
4547 | } |
4548 | ||
fa8d436c UD |
4549 | check_inuse_chunk(av, p); |
4550 | return chunk2mem(p); | |
f65fd747 UD |
4551 | } |
4552 | ||
f65fd747 | 4553 | |
fa8d436c UD |
4554 | /* |
4555 | ------------------------------ valloc ------------------------------ | |
f65fd747 UD |
4556 | */ |
4557 | ||
22a89187 | 4558 | static void* |
fa8d436c | 4559 | _int_valloc(mstate av, size_t bytes) |
f65fd747 | 4560 | { |
fa8d436c UD |
4561 | /* Ensure initialization/consolidation */ |
4562 | if (have_fastchunks(av)) malloc_consolidate(av); | |
02d46fc4 | 4563 | return _int_memalign(av, GLRO(dl_pagesize), bytes); |
f65fd747 | 4564 | } |
f65fd747 UD |
4565 | |
4566 | /* | |
fa8d436c | 4567 | ------------------------------ pvalloc ------------------------------ |
f65fd747 UD |
4568 | */ |
4569 | ||
fa8d436c | 4570 | |
22a89187 | 4571 | static void* |
fa8d436c | 4572 | _int_pvalloc(mstate av, size_t bytes) |
f65fd747 | 4573 | { |
fa8d436c | 4574 | size_t pagesz; |
f65fd747 | 4575 | |
fa8d436c UD |
4576 | /* Ensure initialization/consolidation */ |
4577 | if (have_fastchunks(av)) malloc_consolidate(av); | |
02d46fc4 | 4578 | pagesz = GLRO(dl_pagesize); |
fa8d436c | 4579 | return _int_memalign(av, pagesz, (bytes + pagesz - 1) & ~(pagesz - 1)); |
f65fd747 | 4580 | } |
a9177ff5 | 4581 | |
f65fd747 | 4582 | |
fa8d436c UD |
4583 | /* |
4584 | ------------------------------ malloc_trim ------------------------------ | |
4585 | */ | |
8a4b65b4 | 4586 | |
68631c8e | 4587 | static int mTRIm(mstate av, size_t pad) |
f65fd747 | 4588 | { |
fa8d436c | 4589 | /* Ensure initialization/consolidation */ |
68631c8e UD |
4590 | malloc_consolidate (av); |
4591 | ||
02d46fc4 | 4592 | const size_t ps = GLRO(dl_pagesize); |
68631c8e UD |
4593 | int psindex = bin_index (ps); |
4594 | const size_t psm1 = ps - 1; | |
4595 | ||
4596 | int result = 0; | |
4597 | for (int i = 1; i < NBINS; ++i) | |
4598 | if (i == 1 || i >= psindex) | |
4599 | { | |
72f90263 | 4600 | mbinptr bin = bin_at (av, i); |
68631c8e | 4601 | |
72f90263 | 4602 | for (mchunkptr p = last (bin); p != bin; p = p->bk) |
68631c8e UD |
4603 | { |
4604 | INTERNAL_SIZE_T size = chunksize (p); | |
4605 | ||
4606 | if (size > psm1 + sizeof (struct malloc_chunk)) | |
4607 | { | |
4608 | /* See whether the chunk contains at least one unused page. */ | |
4609 | char *paligned_mem = (char *) (((uintptr_t) p | |
4610 | + sizeof (struct malloc_chunk) | |
4611 | + psm1) & ~psm1); | |
4612 | ||
4613 | assert ((char *) chunk2mem (p) + 4 * SIZE_SZ <= paligned_mem); | |
4614 | assert ((char *) p + size > paligned_mem); | |
4615 | ||
4616 | /* This is the size we could potentially free. */ | |
4617 | size -= paligned_mem - (char *) p; | |
4618 | ||
4619 | if (size > psm1) | |
4620 | { | |
4621 | #ifdef MALLOC_DEBUG | |
4622 | /* When debugging we simulate destroying the memory | |
4623 | content. */ | |
4624 | memset (paligned_mem, 0x89, size & ~psm1); | |
4625 | #endif | |
4626 | madvise (paligned_mem, size & ~psm1, MADV_DONTNEED); | |
4627 | ||
4628 | result = 1; | |
4629 | } | |
4630 | } | |
4631 | } | |
4632 | } | |
8a4b65b4 | 4633 | |
a9177ff5 | 4634 | #ifndef MORECORE_CANNOT_TRIM |
68631c8e | 4635 | return result | (av == &main_arena ? sYSTRIm (pad, av) : 0); |
8a4b65b4 | 4636 | #else |
68631c8e | 4637 | return result; |
f65fd747 | 4638 | #endif |
f65fd747 UD |
4639 | } |
4640 | ||
f65fd747 UD |
4641 | |
4642 | /* | |
fa8d436c | 4643 | ------------------------- malloc_usable_size ------------------------- |
f65fd747 UD |
4644 | */ |
4645 | ||
22a89187 | 4646 | size_t mUSABLe(void* mem) |
f65fd747 UD |
4647 | { |
4648 | mchunkptr p; | |
fa8d436c | 4649 | if (mem != 0) { |
f65fd747 | 4650 | p = mem2chunk(mem); |
fa8d436c UD |
4651 | if (chunk_is_mmapped(p)) |
4652 | return chunksize(p) - 2*SIZE_SZ; | |
4653 | else if (inuse(p)) | |
f65fd747 | 4654 | return chunksize(p) - SIZE_SZ; |
f65fd747 | 4655 | } |
fa8d436c | 4656 | return 0; |
f65fd747 UD |
4657 | } |
4658 | ||
fa8d436c UD |
4659 | /* |
4660 | ------------------------------ mallinfo ------------------------------ | |
4661 | */ | |
f65fd747 | 4662 | |
fa8d436c | 4663 | struct mallinfo mALLINFo(mstate av) |
f65fd747 | 4664 | { |
fa8d436c | 4665 | struct mallinfo mi; |
6dd67bd5 | 4666 | size_t i; |
f65fd747 UD |
4667 | mbinptr b; |
4668 | mchunkptr p; | |
f65fd747 | 4669 | INTERNAL_SIZE_T avail; |
fa8d436c UD |
4670 | INTERNAL_SIZE_T fastavail; |
4671 | int nblocks; | |
4672 | int nfastblocks; | |
f65fd747 | 4673 | |
fa8d436c UD |
4674 | /* Ensure initialization */ |
4675 | if (av->top == 0) malloc_consolidate(av); | |
8a4b65b4 | 4676 | |
fa8d436c | 4677 | check_malloc_state(av); |
8a4b65b4 | 4678 | |
fa8d436c UD |
4679 | /* Account for top */ |
4680 | avail = chunksize(av->top); | |
4681 | nblocks = 1; /* top always exists */ | |
f65fd747 | 4682 | |
fa8d436c UD |
4683 | /* traverse fastbins */ |
4684 | nfastblocks = 0; | |
4685 | fastavail = 0; | |
4686 | ||
4687 | for (i = 0; i < NFASTBINS; ++i) { | |
425ce2ed | 4688 | for (p = fastbin (av, i); p != 0; p = p->fd) { |
fa8d436c UD |
4689 | ++nfastblocks; |
4690 | fastavail += chunksize(p); | |
4691 | } | |
4692 | } | |
4693 | ||
4694 | avail += fastavail; | |
f65fd747 | 4695 | |
fa8d436c UD |
4696 | /* traverse regular bins */ |
4697 | for (i = 1; i < NBINS; ++i) { | |
4698 | b = bin_at(av, i); | |
4699 | for (p = last(b); p != b; p = p->bk) { | |
4700 | ++nblocks; | |
4701 | avail += chunksize(p); | |
4702 | } | |
4703 | } | |
f65fd747 | 4704 | |
fa8d436c UD |
4705 | mi.smblks = nfastblocks; |
4706 | mi.ordblks = nblocks; | |
4707 | mi.fordblks = avail; | |
4708 | mi.uordblks = av->system_mem - avail; | |
4709 | mi.arena = av->system_mem; | |
4710 | mi.hblks = mp_.n_mmaps; | |
4711 | mi.hblkhd = mp_.mmapped_mem; | |
4712 | mi.fsmblks = fastavail; | |
4713 | mi.keepcost = chunksize(av->top); | |
4714 | mi.usmblks = mp_.max_total_mem; | |
4715 | return mi; | |
4716 | } | |
f65fd747 | 4717 | |
fa8d436c UD |
4718 | /* |
4719 | ------------------------------ malloc_stats ------------------------------ | |
f65fd747 UD |
4720 | */ |
4721 | ||
fa8d436c | 4722 | void mSTATs() |
f65fd747 | 4723 | { |
8a4b65b4 | 4724 | int i; |
fa8d436c | 4725 | mstate ar_ptr; |
8a4b65b4 | 4726 | struct mallinfo mi; |
fa8d436c | 4727 | unsigned int in_use_b = mp_.mmapped_mem, system_b = in_use_b; |
8a4b65b4 UD |
4728 | #if THREAD_STATS |
4729 | long stat_lock_direct = 0, stat_lock_loop = 0, stat_lock_wait = 0; | |
4730 | #endif | |
4731 | ||
a234e27d UD |
4732 | if(__malloc_initialized < 0) |
4733 | ptmalloc_init (); | |
8dab36a1 UD |
4734 | _IO_flockfile (stderr); |
4735 | int old_flags2 = ((_IO_FILE *) stderr)->_flags2; | |
4736 | ((_IO_FILE *) stderr)->_flags2 |= _IO_FLAGS2_NOTCANCEL; | |
fa8d436c UD |
4737 | for (i=0, ar_ptr = &main_arena;; i++) { |
4738 | (void)mutex_lock(&ar_ptr->mutex); | |
4739 | mi = mALLINFo(ar_ptr); | |
8a4b65b4 UD |
4740 | fprintf(stderr, "Arena %d:\n", i); |
4741 | fprintf(stderr, "system bytes = %10u\n", (unsigned int)mi.arena); | |
4742 | fprintf(stderr, "in use bytes = %10u\n", (unsigned int)mi.uordblks); | |
fa8d436c UD |
4743 | #if MALLOC_DEBUG > 1 |
4744 | if (i > 0) | |
4745 | dump_heap(heap_for_ptr(top(ar_ptr))); | |
4746 | #endif | |
8a4b65b4 UD |
4747 | system_b += mi.arena; |
4748 | in_use_b += mi.uordblks; | |
4749 | #if THREAD_STATS | |
4750 | stat_lock_direct += ar_ptr->stat_lock_direct; | |
4751 | stat_lock_loop += ar_ptr->stat_lock_loop; | |
4752 | stat_lock_wait += ar_ptr->stat_lock_wait; | |
4753 | #endif | |
fa8d436c | 4754 | (void)mutex_unlock(&ar_ptr->mutex); |
7e3be507 UD |
4755 | ar_ptr = ar_ptr->next; |
4756 | if(ar_ptr == &main_arena) break; | |
8a4b65b4 UD |
4757 | } |
4758 | fprintf(stderr, "Total (incl. mmap):\n"); | |
4759 | fprintf(stderr, "system bytes = %10u\n", system_b); | |
4760 | fprintf(stderr, "in use bytes = %10u\n", in_use_b); | |
fa8d436c UD |
4761 | fprintf(stderr, "max mmap regions = %10u\n", (unsigned int)mp_.max_n_mmaps); |
4762 | fprintf(stderr, "max mmap bytes = %10lu\n", | |
4763 | (unsigned long)mp_.max_mmapped_mem); | |
f65fd747 | 4764 | #if THREAD_STATS |
8a4b65b4 | 4765 | fprintf(stderr, "heaps created = %10d\n", stat_n_heaps); |
f65fd747 UD |
4766 | fprintf(stderr, "locked directly = %10ld\n", stat_lock_direct); |
4767 | fprintf(stderr, "locked in loop = %10ld\n", stat_lock_loop); | |
8a4b65b4 UD |
4768 | fprintf(stderr, "locked waiting = %10ld\n", stat_lock_wait); |
4769 | fprintf(stderr, "locked total = %10ld\n", | |
72f90263 | 4770 | stat_lock_direct + stat_lock_loop + stat_lock_wait); |
f65fd747 | 4771 | #endif |
8dab36a1 UD |
4772 | ((_IO_FILE *) stderr)->_flags2 |= old_flags2; |
4773 | _IO_funlockfile (stderr); | |
f65fd747 UD |
4774 | } |
4775 | ||
f65fd747 UD |
4776 | |
4777 | /* | |
fa8d436c | 4778 | ------------------------------ mallopt ------------------------------ |
f65fd747 UD |
4779 | */ |
4780 | ||
f65fd747 | 4781 | int mALLOPt(int param_number, int value) |
f65fd747 | 4782 | { |
fa8d436c UD |
4783 | mstate av = &main_arena; |
4784 | int res = 1; | |
f65fd747 | 4785 | |
0cb71e02 UD |
4786 | if(__malloc_initialized < 0) |
4787 | ptmalloc_init (); | |
fa8d436c UD |
4788 | (void)mutex_lock(&av->mutex); |
4789 | /* Ensure initialization/consolidation */ | |
4790 | malloc_consolidate(av); | |
2f6d1f1b | 4791 | |
fa8d436c UD |
4792 | switch(param_number) { |
4793 | case M_MXFAST: | |
4794 | if (value >= 0 && value <= MAX_FAST_SIZE) { | |
9bf248c6 | 4795 | set_max_fast(value); |
fa8d436c UD |
4796 | } |
4797 | else | |
4798 | res = 0; | |
4799 | break; | |
2f6d1f1b | 4800 | |
fa8d436c UD |
4801 | case M_TRIM_THRESHOLD: |
4802 | mp_.trim_threshold = value; | |
1d05c2fb | 4803 | mp_.no_dyn_threshold = 1; |
fa8d436c | 4804 | break; |
2f6d1f1b | 4805 | |
fa8d436c UD |
4806 | case M_TOP_PAD: |
4807 | mp_.top_pad = value; | |
1d05c2fb | 4808 | mp_.no_dyn_threshold = 1; |
fa8d436c | 4809 | break; |
2f6d1f1b | 4810 | |
fa8d436c | 4811 | case M_MMAP_THRESHOLD: |
fa8d436c UD |
4812 | /* Forbid setting the threshold too high. */ |
4813 | if((unsigned long)value > HEAP_MAX_SIZE/2) | |
4814 | res = 0; | |
4815 | else | |
fa8d436c | 4816 | mp_.mmap_threshold = value; |
1d05c2fb | 4817 | mp_.no_dyn_threshold = 1; |
fa8d436c | 4818 | break; |
2f6d1f1b | 4819 | |
fa8d436c | 4820 | case M_MMAP_MAX: |
1f4843fb UD |
4821 | mp_.n_mmaps_max = value; |
4822 | mp_.no_dyn_threshold = 1; | |
fa8d436c | 4823 | break; |
10dc2a90 | 4824 | |
fa8d436c UD |
4825 | case M_CHECK_ACTION: |
4826 | check_action = value; | |
4827 | break; | |
854278df UD |
4828 | |
4829 | case M_PERTURB: | |
4830 | perturb_byte = value; | |
4831 | break; | |
425ce2ed UD |
4832 | |
4833 | #ifdef PER_THREAD | |
4834 | case M_ARENA_TEST: | |
4835 | if (value > 0) | |
4836 | mp_.arena_test = value; | |
4837 | break; | |
4838 | ||
4839 | case M_ARENA_MAX: | |
4840 | if (value > 0) | |
4841 | mp_.arena_max = value; | |
4842 | break; | |
4843 | #endif | |
b22fc5f5 | 4844 | } |
fa8d436c UD |
4845 | (void)mutex_unlock(&av->mutex); |
4846 | return res; | |
b22fc5f5 UD |
4847 | } |
4848 | ||
10dc2a90 | 4849 | |
a9177ff5 | 4850 | /* |
fa8d436c UD |
4851 | -------------------- Alternative MORECORE functions -------------------- |
4852 | */ | |
10dc2a90 | 4853 | |
b22fc5f5 | 4854 | |
fa8d436c UD |
4855 | /* |
4856 | General Requirements for MORECORE. | |
b22fc5f5 | 4857 | |
fa8d436c | 4858 | The MORECORE function must have the following properties: |
b22fc5f5 | 4859 | |
fa8d436c | 4860 | If MORECORE_CONTIGUOUS is false: |
10dc2a90 | 4861 | |
fa8d436c UD |
4862 | * MORECORE must allocate in multiples of pagesize. It will |
4863 | only be called with arguments that are multiples of pagesize. | |
10dc2a90 | 4864 | |
a9177ff5 | 4865 | * MORECORE(0) must return an address that is at least |
fa8d436c | 4866 | MALLOC_ALIGNMENT aligned. (Page-aligning always suffices.) |
10dc2a90 | 4867 | |
fa8d436c | 4868 | else (i.e. If MORECORE_CONTIGUOUS is true): |
10dc2a90 | 4869 | |
fa8d436c UD |
4870 | * Consecutive calls to MORECORE with positive arguments |
4871 | return increasing addresses, indicating that space has been | |
4872 | contiguously extended. | |
10dc2a90 | 4873 | |
fa8d436c UD |
4874 | * MORECORE need not allocate in multiples of pagesize. |
4875 | Calls to MORECORE need not have args of multiples of pagesize. | |
10dc2a90 | 4876 | |
fa8d436c | 4877 | * MORECORE need not page-align. |
10dc2a90 | 4878 | |
fa8d436c | 4879 | In either case: |
10dc2a90 | 4880 | |
fa8d436c UD |
4881 | * MORECORE may allocate more memory than requested. (Or even less, |
4882 | but this will generally result in a malloc failure.) | |
10dc2a90 | 4883 | |
fa8d436c UD |
4884 | * MORECORE must not allocate memory when given argument zero, but |
4885 | instead return one past the end address of memory from previous | |
4886 | nonzero call. This malloc does NOT call MORECORE(0) | |
4887 | until at least one call with positive arguments is made, so | |
4888 | the initial value returned is not important. | |
10dc2a90 | 4889 | |
fa8d436c UD |
4890 | * Even though consecutive calls to MORECORE need not return contiguous |
4891 | addresses, it must be OK for malloc'ed chunks to span multiple | |
4892 | regions in those cases where they do happen to be contiguous. | |
10dc2a90 | 4893 | |
fa8d436c UD |
4894 | * MORECORE need not handle negative arguments -- it may instead |
4895 | just return MORECORE_FAILURE when given negative arguments. | |
4896 | Negative arguments are always multiples of pagesize. MORECORE | |
4897 | must not misinterpret negative args as large positive unsigned | |
4898 | args. You can suppress all such calls from even occurring by defining | |
4899 | MORECORE_CANNOT_TRIM, | |
10dc2a90 | 4900 | |
fa8d436c UD |
4901 | There is some variation across systems about the type of the |
4902 | argument to sbrk/MORECORE. If size_t is unsigned, then it cannot | |
4903 | actually be size_t, because sbrk supports negative args, so it is | |
4904 | normally the signed type of the same width as size_t (sometimes | |
4905 | declared as "intptr_t", and sometimes "ptrdiff_t"). It doesn't much | |
4906 | matter though. Internally, we use "long" as arguments, which should | |
4907 | work across all reasonable possibilities. | |
ee74a442 | 4908 | |
fa8d436c | 4909 | Additionally, if MORECORE ever returns failure for a positive |
22a89187 UD |
4910 | request, then mmap is used as a noncontiguous system allocator. This |
4911 | is a useful backup strategy for systems with holes in address spaces | |
4912 | -- in this case sbrk cannot contiguously expand the heap, but mmap | |
4913 | may be able to map noncontiguous space. | |
7e3be507 | 4914 | |
fa8d436c UD |
4915 | If you'd like mmap to ALWAYS be used, you can define MORECORE to be |
4916 | a function that always returns MORECORE_FAILURE. | |
2e65ca2b | 4917 | |
fa8d436c UD |
4918 | If you are using this malloc with something other than sbrk (or its |
4919 | emulation) to supply memory regions, you probably want to set | |
4920 | MORECORE_CONTIGUOUS as false. As an example, here is a custom | |
4921 | allocator kindly contributed for pre-OSX macOS. It uses virtually | |
4922 | but not necessarily physically contiguous non-paged memory (locked | |
4923 | in, present and won't get swapped out). You can use it by | |
4924 | uncommenting this section, adding some #includes, and setting up the | |
4925 | appropriate defines above: | |
7e3be507 | 4926 | |
fa8d436c UD |
4927 | #define MORECORE osMoreCore |
4928 | #define MORECORE_CONTIGUOUS 0 | |
7e3be507 | 4929 | |
fa8d436c UD |
4930 | There is also a shutdown routine that should somehow be called for |
4931 | cleanup upon program exit. | |
7e3be507 | 4932 | |
fa8d436c UD |
4933 | #define MAX_POOL_ENTRIES 100 |
4934 | #define MINIMUM_MORECORE_SIZE (64 * 1024) | |
4935 | static int next_os_pool; | |
4936 | void *our_os_pools[MAX_POOL_ENTRIES]; | |
7e3be507 | 4937 | |
fa8d436c UD |
4938 | void *osMoreCore(int size) |
4939 | { | |
4940 | void *ptr = 0; | |
4941 | static void *sbrk_top = 0; | |
ca34d7a7 | 4942 | |
fa8d436c UD |
4943 | if (size > 0) |
4944 | { | |
4945 | if (size < MINIMUM_MORECORE_SIZE) | |
72f90263 | 4946 | size = MINIMUM_MORECORE_SIZE; |
fa8d436c | 4947 | if (CurrentExecutionLevel() == kTaskLevel) |
72f90263 | 4948 | ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0); |
fa8d436c UD |
4949 | if (ptr == 0) |
4950 | { | |
72f90263 | 4951 | return (void *) MORECORE_FAILURE; |
fa8d436c UD |
4952 | } |
4953 | // save ptrs so they can be freed during cleanup | |
4954 | our_os_pools[next_os_pool] = ptr; | |
4955 | next_os_pool++; | |
4956 | ptr = (void *) ((((unsigned long) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK); | |
4957 | sbrk_top = (char *) ptr + size; | |
4958 | return ptr; | |
4959 | } | |
4960 | else if (size < 0) | |
4961 | { | |
4962 | // we don't currently support shrink behavior | |
4963 | return (void *) MORECORE_FAILURE; | |
4964 | } | |
4965 | else | |
4966 | { | |
4967 | return sbrk_top; | |
431c33c0 | 4968 | } |
ca34d7a7 | 4969 | } |
ca34d7a7 | 4970 | |
fa8d436c UD |
4971 | // cleanup any allocated memory pools |
4972 | // called as last thing before shutting down driver | |
ca34d7a7 | 4973 | |
fa8d436c | 4974 | void osCleanupMem(void) |
ca34d7a7 | 4975 | { |
fa8d436c | 4976 | void **ptr; |
ca34d7a7 | 4977 | |
fa8d436c UD |
4978 | for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++) |
4979 | if (*ptr) | |
4980 | { | |
72f90263 UD |
4981 | PoolDeallocate(*ptr); |
4982 | *ptr = 0; | |
fa8d436c UD |
4983 | } |
4984 | } | |
ee74a442 | 4985 | |
fa8d436c | 4986 | */ |
f65fd747 | 4987 | |
7e3be507 | 4988 | |
3e030bd5 UD |
4989 | /* Helper code. */ |
4990 | ||
ae7f5313 UD |
4991 | extern char **__libc_argv attribute_hidden; |
4992 | ||
3e030bd5 | 4993 | static void |
6bf4302e | 4994 | malloc_printerr(int action, const char *str, void *ptr) |
3e030bd5 | 4995 | { |
553cc5f9 UD |
4996 | if ((action & 5) == 5) |
4997 | __libc_message (action & 2, "%s\n", str); | |
4998 | else if (action & 1) | |
3e030bd5 | 4999 | { |
a9055cab | 5000 | char buf[2 * sizeof (uintptr_t) + 1]; |
3e030bd5 | 5001 | |
a9055cab UD |
5002 | buf[sizeof (buf) - 1] = '\0'; |
5003 | char *cp = _itoa_word ((uintptr_t) ptr, &buf[sizeof (buf) - 1], 16, 0); | |
5004 | while (cp > buf) | |
5005 | *--cp = '0'; | |
5006 | ||
5007 | __libc_message (action & 2, | |
553cc5f9 | 5008 | "*** glibc detected *** %s: %s: 0x%s ***\n", |
ae7f5313 | 5009 | __libc_argv[0] ?: "<unknown>", str, cp); |
3e030bd5 | 5010 | } |
a9055cab | 5011 | else if (action & 2) |
3e030bd5 UD |
5012 | abort (); |
5013 | } | |
5014 | ||
22a89187 | 5015 | #include <sys/param.h> |
fa8d436c | 5016 | |
a204dbb2 UD |
5017 | /* We need a wrapper function for one of the additions of POSIX. */ |
5018 | int | |
5019 | __posix_memalign (void **memptr, size_t alignment, size_t size) | |
5020 | { | |
5021 | void *mem; | |
5022 | ||
5023 | /* Test whether the SIZE argument is valid. It must be a power of | |
5024 | two multiple of sizeof (void *). */ | |
de02bd05 UD |
5025 | if (alignment % sizeof (void *) != 0 |
5026 | || !powerof2 (alignment / sizeof (void *)) != 0 | |
5027 | || alignment == 0) | |
a204dbb2 UD |
5028 | return EINVAL; |
5029 | ||
e796f92f UD |
5030 | /* Call the hook here, so that caller is posix_memalign's caller |
5031 | and not posix_memalign itself. */ | |
df77455c UD |
5032 | __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, size_t, |
5033 | __const __malloc_ptr_t)) = | |
5034 | force_reg (__memalign_hook); | |
bfacf1af | 5035 | if (__builtin_expect (hook != NULL, 0)) |
e796f92f UD |
5036 | mem = (*hook)(alignment, size, RETURN_ADDRESS (0)); |
5037 | else | |
aa420660 | 5038 | mem = public_mEMALIGn (alignment, size); |
a204dbb2 | 5039 | |
fa8d436c UD |
5040 | if (mem != NULL) { |
5041 | *memptr = mem; | |
5042 | return 0; | |
5043 | } | |
a204dbb2 UD |
5044 | |
5045 | return ENOMEM; | |
5046 | } | |
5047 | weak_alias (__posix_memalign, posix_memalign) | |
5048 | ||
bb066545 UD |
5049 | |
5050 | int | |
5051 | malloc_info (int options, FILE *fp) | |
5052 | { | |
5053 | /* For now, at least. */ | |
5054 | if (options != 0) | |
5055 | return EINVAL; | |
5056 | ||
5057 | int n = 0; | |
5058 | size_t total_nblocks = 0; | |
5059 | size_t total_nfastblocks = 0; | |
5060 | size_t total_avail = 0; | |
5061 | size_t total_fastavail = 0; | |
da2d2fb6 UD |
5062 | size_t total_system = 0; |
5063 | size_t total_max_system = 0; | |
0588a9cb UD |
5064 | size_t total_aspace = 0; |
5065 | size_t total_aspace_mprotect = 0; | |
bb066545 UD |
5066 | |
5067 | void mi_arena (mstate ar_ptr) | |
5068 | { | |
5069 | fprintf (fp, "<heap nr=\"%d\">\n<sizes>\n", n++); | |
5070 | ||
5071 | size_t nblocks = 0; | |
5072 | size_t nfastblocks = 0; | |
5073 | size_t avail = 0; | |
5074 | size_t fastavail = 0; | |
5075 | struct | |
5076 | { | |
5077 | size_t from; | |
5078 | size_t to; | |
5079 | size_t total; | |
5080 | size_t count; | |
5081 | } sizes[NFASTBINS + NBINS - 1]; | |
5082 | #define nsizes (sizeof (sizes) / sizeof (sizes[0])) | |
5083 | ||
5084 | mutex_lock (&ar_ptr->mutex); | |
5085 | ||
5086 | for (size_t i = 0; i < NFASTBINS; ++i) | |
5087 | { | |
5088 | mchunkptr p = fastbin (ar_ptr, i); | |
5089 | if (p != NULL) | |
5090 | { | |
5091 | size_t nthissize = 0; | |
5092 | size_t thissize = chunksize (p); | |
5093 | ||
5094 | while (p != NULL) | |
5095 | { | |
5096 | ++nthissize; | |
5097 | p = p->fd; | |
5098 | } | |
5099 | ||
5100 | fastavail += nthissize * thissize; | |
5101 | nfastblocks += nthissize; | |
5102 | sizes[i].from = thissize - (MALLOC_ALIGNMENT - 1); | |
5103 | sizes[i].to = thissize; | |
5104 | sizes[i].count = nthissize; | |
5105 | } | |
5106 | else | |
5107 | sizes[i].from = sizes[i].to = sizes[i].count = 0; | |
5108 | ||
5109 | sizes[i].total = sizes[i].count * sizes[i].to; | |
5110 | } | |
5111 | ||
5112 | mbinptr bin = bin_at (ar_ptr, 1); | |
5113 | struct malloc_chunk *r = bin->fd; | |
346bc35c | 5114 | if (r != NULL) |
bb066545 | 5115 | { |
346bc35c UD |
5116 | while (r != bin) |
5117 | { | |
5118 | ++sizes[NFASTBINS].count; | |
5119 | sizes[NFASTBINS].total += r->size; | |
5120 | sizes[NFASTBINS].from = MIN (sizes[NFASTBINS].from, r->size); | |
5121 | sizes[NFASTBINS].to = MAX (sizes[NFASTBINS].to, r->size); | |
5122 | r = r->fd; | |
5123 | } | |
5124 | nblocks += sizes[NFASTBINS].count; | |
5125 | avail += sizes[NFASTBINS].total; | |
bb066545 | 5126 | } |
bb066545 UD |
5127 | |
5128 | for (size_t i = 2; i < NBINS; ++i) | |
5129 | { | |
5130 | bin = bin_at (ar_ptr, i); | |
5131 | r = bin->fd; | |
5132 | sizes[NFASTBINS - 1 + i].from = ~((size_t) 0); | |
5133 | sizes[NFASTBINS - 1 + i].to = sizes[NFASTBINS - 1 + i].total | |
5134 | = sizes[NFASTBINS - 1 + i].count = 0; | |
5135 | ||
346bc35c UD |
5136 | if (r != NULL) |
5137 | while (r != bin) | |
5138 | { | |
5139 | ++sizes[NFASTBINS - 1 + i].count; | |
5140 | sizes[NFASTBINS - 1 + i].total += r->size; | |
5141 | sizes[NFASTBINS - 1 + i].from | |
5142 | = MIN (sizes[NFASTBINS - 1 + i].from, r->size); | |
5143 | sizes[NFASTBINS - 1 + i].to = MAX (sizes[NFASTBINS - 1 + i].to, | |
bb066545 | 5144 | r->size); |
bb066545 | 5145 | |
346bc35c UD |
5146 | r = r->fd; |
5147 | } | |
bb066545 UD |
5148 | |
5149 | if (sizes[NFASTBINS - 1 + i].count == 0) | |
5150 | sizes[NFASTBINS - 1 + i].from = 0; | |
5151 | nblocks += sizes[NFASTBINS - 1 + i].count; | |
5152 | avail += sizes[NFASTBINS - 1 + i].total; | |
5153 | } | |
5154 | ||
5155 | mutex_unlock (&ar_ptr->mutex); | |
5156 | ||
5157 | total_nfastblocks += nfastblocks; | |
5158 | total_fastavail += fastavail; | |
5159 | ||
5160 | total_nblocks += nblocks; | |
5161 | total_avail += avail; | |
5162 | ||
5163 | for (size_t i = 0; i < nsizes; ++i) | |
5164 | if (sizes[i].count != 0 && i != NFASTBINS) | |
5165 | fprintf (fp, "\ | |
5166 | <size from=\"%zu\" to=\"%zu\" total=\"%zu\" count=\"%zu\"/>\n", | |
5167 | sizes[i].from, sizes[i].to, sizes[i].total, sizes[i].count); | |
5168 | ||
5169 | if (sizes[NFASTBINS].count != 0) | |
5170 | fprintf (fp, "\ | |
5171 | <unsorted from=\"%zu\" to=\"%zu\" total=\"%zu\" count=\"%zu\"/>\n", | |
5172 | sizes[NFASTBINS].from, sizes[NFASTBINS].to, | |
5173 | sizes[NFASTBINS].total, sizes[NFASTBINS].count); | |
5174 | ||
da2d2fb6 UD |
5175 | total_system += ar_ptr->system_mem; |
5176 | total_max_system += ar_ptr->max_system_mem; | |
5177 | ||
bb066545 UD |
5178 | fprintf (fp, |
5179 | "</sizes>\n<total type=\"fast\" count=\"%zu\" size=\"%zu\"/>\n" | |
5180 | "<total type=\"rest\" count=\"%zu\" size=\"%zu\"/>\n" | |
da2d2fb6 | 5181 | "<system type=\"current\" size=\"%zu\"/>\n" |
0588a9cb | 5182 | "<system type=\"max\" size=\"%zu\"/>\n", |
da2d2fb6 UD |
5183 | nfastblocks, fastavail, nblocks, avail, |
5184 | ar_ptr->system_mem, ar_ptr->max_system_mem); | |
0588a9cb UD |
5185 | |
5186 | if (ar_ptr != &main_arena) | |
5187 | { | |
5188 | heap_info *heap = heap_for_ptr(top(ar_ptr)); | |
5189 | fprintf (fp, | |
5190 | "<aspace type=\"total\" size=\"%zu\"/>\n" | |
5191 | "<aspace type=\"mprotect\" size=\"%zu\"/>\n", | |
5192 | heap->size, heap->mprotect_size); | |
5193 | total_aspace += heap->size; | |
5194 | total_aspace_mprotect += heap->mprotect_size; | |
5195 | } | |
5196 | else | |
5197 | { | |
5198 | fprintf (fp, | |
5199 | "<aspace type=\"total\" size=\"%zu\"/>\n" | |
5200 | "<aspace type=\"mprotect\" size=\"%zu\"/>\n", | |
5201 | ar_ptr->system_mem, ar_ptr->system_mem); | |
5202 | total_aspace += ar_ptr->system_mem; | |
5203 | total_aspace_mprotect += ar_ptr->system_mem; | |
5204 | } | |
5205 | ||
5206 | fputs ("</heap>\n", fp); | |
bb066545 UD |
5207 | } |
5208 | ||
346bc35c UD |
5209 | if(__malloc_initialized < 0) |
5210 | ptmalloc_init (); | |
5211 | ||
bb066545 UD |
5212 | fputs ("<malloc version=\"1\">\n", fp); |
5213 | ||
5214 | /* Iterate over all arenas currently in use. */ | |
5215 | mstate ar_ptr = &main_arena; | |
5216 | do | |
5217 | { | |
5218 | mi_arena (ar_ptr); | |
5219 | ar_ptr = ar_ptr->next; | |
5220 | } | |
5221 | while (ar_ptr != &main_arena); | |
5222 | ||
5223 | fprintf (fp, | |
5224 | "<total type=\"fast\" count=\"%zu\" size=\"%zu\"/>\n" | |
5225 | "<total type=\"rest\" count=\"%zu\" size=\"%zu\"/>\n" | |
ee6f3538 JS |
5226 | "<system type=\"current\" size=\"%zu\"/>\n" |
5227 | "<system type=\"max\" size=\"%zu\"/>\n" | |
0588a9cb UD |
5228 | "<aspace type=\"total\" size=\"%zu\"/>\n" |
5229 | "<aspace type=\"mprotect\" size=\"%zu\"/>\n" | |
bb066545 | 5230 | "</malloc>\n", |
da2d2fb6 | 5231 | total_nfastblocks, total_fastavail, total_nblocks, total_avail, |
0588a9cb UD |
5232 | total_system, total_max_system, |
5233 | total_aspace, total_aspace_mprotect); | |
bb066545 UD |
5234 | |
5235 | return 0; | |
5236 | } | |
5237 | ||
5238 | ||
eba19d2b UD |
5239 | strong_alias (__libc_calloc, __calloc) weak_alias (__libc_calloc, calloc) |
5240 | strong_alias (__libc_free, __cfree) weak_alias (__libc_free, cfree) | |
5241 | strong_alias (__libc_free, __free) strong_alias (__libc_free, free) | |
5242 | strong_alias (__libc_malloc, __malloc) strong_alias (__libc_malloc, malloc) | |
5243 | strong_alias (__libc_memalign, __memalign) | |
5244 | weak_alias (__libc_memalign, memalign) | |
5245 | strong_alias (__libc_realloc, __realloc) strong_alias (__libc_realloc, realloc) | |
5246 | strong_alias (__libc_valloc, __valloc) weak_alias (__libc_valloc, valloc) | |
5247 | strong_alias (__libc_pvalloc, __pvalloc) weak_alias (__libc_pvalloc, pvalloc) | |
5248 | strong_alias (__libc_mallinfo, __mallinfo) | |
5249 | weak_alias (__libc_mallinfo, mallinfo) | |
5250 | strong_alias (__libc_mallopt, __mallopt) weak_alias (__libc_mallopt, mallopt) | |
7e3be507 UD |
5251 | |
5252 | weak_alias (__malloc_stats, malloc_stats) | |
5253 | weak_alias (__malloc_usable_size, malloc_usable_size) | |
5254 | weak_alias (__malloc_trim, malloc_trim) | |
2f6d1f1b UD |
5255 | weak_alias (__malloc_get_state, malloc_get_state) |
5256 | weak_alias (__malloc_set_state, malloc_set_state) | |
7e3be507 | 5257 | |
f65fd747 | 5258 | |
fa8d436c | 5259 | /* ------------------------------------------------------------ |
f65fd747 UD |
5260 | History: |
5261 | ||
fa8d436c | 5262 | [see ftp://g.oswego.edu/pub/misc/malloc.c for the history of dlmalloc] |
f65fd747 UD |
5263 | |
5264 | */ | |
fa8d436c UD |
5265 | /* |
5266 | * Local variables: | |
5267 | * c-basic-offset: 2 | |
5268 | * End: | |
5269 | */ |