]>
Commit | Line | Data |
---|---|---|
56137dbc | 1 | /* Malloc implementation for multiple threads without lock contention. |
581c785b | 2 | Copyright (C) 1996-2022 Free Software Foundation, Inc. |
88e316b0 | 3 | Copyright The GNU Toolchain Authors. |
f65fd747 | 4 | This file is part of the GNU C Library. |
f65fd747 UD |
5 | |
6 | The GNU C Library is free software; you can redistribute it and/or | |
cc7375ce RM |
7 | modify it under the terms of the GNU Lesser General Public License as |
8 | published by the Free Software Foundation; either version 2.1 of the | |
fa8d436c | 9 | License, or (at your option) any later version. |
f65fd747 UD |
10 | |
11 | The GNU C Library is distributed in the hope that it will be useful, | |
12 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
cc7375ce | 14 | Lesser General Public License for more details. |
f65fd747 | 15 | |
cc7375ce | 16 | You should have received a copy of the GNU Lesser General Public |
59ba27a6 | 17 | License along with the GNU C Library; see the file COPYING.LIB. If |
5a82c748 | 18 | not, see <https://www.gnu.org/licenses/>. */ |
f65fd747 | 19 | |
fa8d436c UD |
20 | /* |
21 | This is a version (aka ptmalloc2) of malloc/free/realloc written by | |
22 | Doug Lea and adapted to multiple threads/arenas by Wolfram Gloger. | |
23 | ||
bb2ce416 | 24 | There have been substantial changes made after the integration into |
da2d2fb6 UD |
25 | glibc in all parts of the code. Do not look for much commonality |
26 | with the ptmalloc2 version. | |
27 | ||
fa8d436c | 28 | * Version ptmalloc2-20011215 |
fa8d436c UD |
29 | based on: |
30 | VERSION 2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee) | |
f65fd747 | 31 | |
fa8d436c | 32 | * Quickstart |
f65fd747 | 33 | |
fa8d436c UD |
34 | In order to compile this implementation, a Makefile is provided with |
35 | the ptmalloc2 distribution, which has pre-defined targets for some | |
36 | popular systems (e.g. "make posix" for Posix threads). All that is | |
37 | typically required with regard to compiler flags is the selection of | |
38 | the thread package via defining one out of USE_PTHREADS, USE_THR or | |
39 | USE_SPROC. Check the thread-m.h file for what effects this has. | |
40 | Many/most systems will additionally require USE_TSD_DATA_HACK to be | |
41 | defined, so this is the default for "make posix". | |
f65fd747 UD |
42 | |
43 | * Why use this malloc? | |
44 | ||
45 | This is not the fastest, most space-conserving, most portable, or | |
46 | most tunable malloc ever written. However it is among the fastest | |
47 | while also being among the most space-conserving, portable and tunable. | |
48 | Consistent balance across these factors results in a good general-purpose | |
fa8d436c UD |
49 | allocator for malloc-intensive programs. |
50 | ||
51 | The main properties of the algorithms are: | |
52 | * For large (>= 512 bytes) requests, it is a pure best-fit allocator, | |
53 | with ties normally decided via FIFO (i.e. least recently used). | |
54 | * For small (<= 64 bytes by default) requests, it is a caching | |
55 | allocator, that maintains pools of quickly recycled chunks. | |
56 | * In between, and for combinations of large and small requests, it does | |
57 | the best it can trying to meet both goals at once. | |
58 | * For very large requests (>= 128KB by default), it relies on system | |
59 | memory mapping facilities, if supported. | |
60 | ||
61 | For a longer but slightly out of date high-level description, see | |
62 | http://gee.cs.oswego.edu/dl/html/malloc.html | |
63 | ||
64 | You may already by default be using a C library containing a malloc | |
65 | that is based on some version of this malloc (for example in | |
66 | linux). You might still want to use the one in this file in order to | |
67 | customize settings or to avoid overheads associated with library | |
68 | versions. | |
69 | ||
70 | * Contents, described in more detail in "description of public routines" below. | |
71 | ||
72 | Standard (ANSI/SVID/...) functions: | |
73 | malloc(size_t n); | |
74 | calloc(size_t n_elements, size_t element_size); | |
22a89187 UD |
75 | free(void* p); |
76 | realloc(void* p, size_t n); | |
fa8d436c UD |
77 | memalign(size_t alignment, size_t n); |
78 | valloc(size_t n); | |
79 | mallinfo() | |
80 | mallopt(int parameter_number, int parameter_value) | |
81 | ||
82 | Additional functions: | |
22a89187 UD |
83 | independent_calloc(size_t n_elements, size_t size, void* chunks[]); |
84 | independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]); | |
fa8d436c | 85 | pvalloc(size_t n); |
fa8d436c | 86 | malloc_trim(size_t pad); |
22a89187 | 87 | malloc_usable_size(void* p); |
fa8d436c | 88 | malloc_stats(); |
f65fd747 UD |
89 | |
90 | * Vital statistics: | |
91 | ||
fa8d436c | 92 | Supported pointer representation: 4 or 8 bytes |
a9177ff5 | 93 | Supported size_t representation: 4 or 8 bytes |
f65fd747 | 94 | Note that size_t is allowed to be 4 bytes even if pointers are 8. |
fa8d436c UD |
95 | You can adjust this by defining INTERNAL_SIZE_T |
96 | ||
97 | Alignment: 2 * sizeof(size_t) (default) | |
98 | (i.e., 8 byte alignment with 4byte size_t). This suffices for | |
99 | nearly all current machines and C compilers. However, you can | |
100 | define MALLOC_ALIGNMENT to be wider than this if necessary. | |
f65fd747 | 101 | |
fa8d436c UD |
102 | Minimum overhead per allocated chunk: 4 or 8 bytes |
103 | Each malloced chunk has a hidden word of overhead holding size | |
f65fd747 UD |
104 | and status information. |
105 | ||
106 | Minimum allocated size: 4-byte ptrs: 16 bytes (including 4 overhead) | |
72f90263 | 107 | 8-byte ptrs: 24/32 bytes (including, 4/8 overhead) |
f65fd747 UD |
108 | |
109 | When a chunk is freed, 12 (for 4byte ptrs) or 20 (for 8 byte | |
110 | ptrs but 4 byte size) or 24 (for 8/8) additional bytes are | |
fa8d436c UD |
111 | needed; 4 (8) for a trailing size field and 8 (16) bytes for |
112 | free list pointers. Thus, the minimum allocatable size is | |
113 | 16/24/32 bytes. | |
f65fd747 UD |
114 | |
115 | Even a request for zero bytes (i.e., malloc(0)) returns a | |
116 | pointer to something of the minimum allocatable size. | |
117 | ||
fa8d436c UD |
118 | The maximum overhead wastage (i.e., number of extra bytes |
119 | allocated than were requested in malloc) is less than or equal | |
120 | to the minimum size, except for requests >= mmap_threshold that | |
121 | are serviced via mmap(), where the worst case wastage is 2 * | |
122 | sizeof(size_t) bytes plus the remainder from a system page (the | |
123 | minimal mmap unit); typically 4096 or 8192 bytes. | |
f65fd747 | 124 | |
a9177ff5 | 125 | Maximum allocated size: 4-byte size_t: 2^32 minus about two pages |
72f90263 | 126 | 8-byte size_t: 2^64 minus about two pages |
fa8d436c UD |
127 | |
128 | It is assumed that (possibly signed) size_t values suffice to | |
f65fd747 UD |
129 | represent chunk sizes. `Possibly signed' is due to the fact |
130 | that `size_t' may be defined on a system as either a signed or | |
fa8d436c UD |
131 | an unsigned type. The ISO C standard says that it must be |
132 | unsigned, but a few systems are known not to adhere to this. | |
133 | Additionally, even when size_t is unsigned, sbrk (which is by | |
134 | default used to obtain memory from system) accepts signed | |
135 | arguments, and may not be able to handle size_t-wide arguments | |
136 | with negative sign bit. Generally, values that would | |
137 | appear as negative after accounting for overhead and alignment | |
138 | are supported only via mmap(), which does not have this | |
139 | limitation. | |
140 | ||
141 | Requests for sizes outside the allowed range will perform an optional | |
142 | failure action and then return null. (Requests may also | |
143 | also fail because a system is out of memory.) | |
144 | ||
22a89187 | 145 | Thread-safety: thread-safe |
fa8d436c UD |
146 | |
147 | Compliance: I believe it is compliant with the 1997 Single Unix Specification | |
2b0fba75 | 148 | Also SVID/XPG, ANSI C, and probably others as well. |
f65fd747 UD |
149 | |
150 | * Synopsis of compile-time options: | |
151 | ||
152 | People have reported using previous versions of this malloc on all | |
153 | versions of Unix, sometimes by tweaking some of the defines | |
22a89187 | 154 | below. It has been tested most extensively on Solaris and Linux. |
fa8d436c UD |
155 | People also report using it in stand-alone embedded systems. |
156 | ||
157 | The implementation is in straight, hand-tuned ANSI C. It is not | |
158 | at all modular. (Sorry!) It uses a lot of macros. To be at all | |
159 | usable, this code should be compiled using an optimizing compiler | |
160 | (for example gcc -O3) that can simplify expressions and control | |
161 | paths. (FAQ: some macros import variables as arguments rather than | |
162 | declare locals because people reported that some debuggers | |
163 | otherwise get confused.) | |
164 | ||
165 | OPTION DEFAULT VALUE | |
166 | ||
167 | Compilation Environment options: | |
168 | ||
2a26ef3a | 169 | HAVE_MREMAP 0 |
fa8d436c UD |
170 | |
171 | Changing default word sizes: | |
172 | ||
173 | INTERNAL_SIZE_T size_t | |
fa8d436c UD |
174 | |
175 | Configuration and functionality options: | |
176 | ||
fa8d436c UD |
177 | USE_PUBLIC_MALLOC_WRAPPERS NOT defined |
178 | USE_MALLOC_LOCK NOT defined | |
179 | MALLOC_DEBUG NOT defined | |
180 | REALLOC_ZERO_BYTES_FREES 1 | |
fa8d436c UD |
181 | TRIM_FASTBINS 0 |
182 | ||
183 | Options for customizing MORECORE: | |
184 | ||
185 | MORECORE sbrk | |
186 | MORECORE_FAILURE -1 | |
a9177ff5 | 187 | MORECORE_CONTIGUOUS 1 |
fa8d436c UD |
188 | MORECORE_CANNOT_TRIM NOT defined |
189 | MORECORE_CLEARS 1 | |
a9177ff5 | 190 | MMAP_AS_MORECORE_SIZE (1024 * 1024) |
fa8d436c UD |
191 | |
192 | Tuning options that are also dynamically changeable via mallopt: | |
193 | ||
425ce2ed | 194 | DEFAULT_MXFAST 64 (for 32bit), 128 (for 64bit) |
fa8d436c UD |
195 | DEFAULT_TRIM_THRESHOLD 128 * 1024 |
196 | DEFAULT_TOP_PAD 0 | |
197 | DEFAULT_MMAP_THRESHOLD 128 * 1024 | |
198 | DEFAULT_MMAP_MAX 65536 | |
199 | ||
200 | There are several other #defined constants and macros that you | |
201 | probably don't want to touch unless you are extending or adapting malloc. */ | |
f65fd747 UD |
202 | |
203 | /* | |
22a89187 | 204 | void* is the pointer type that malloc should say it returns |
f65fd747 UD |
205 | */ |
206 | ||
22a89187 UD |
207 | #ifndef void |
208 | #define void void | |
209 | #endif /*void*/ | |
f65fd747 | 210 | |
fa8d436c UD |
211 | #include <stddef.h> /* for size_t */ |
212 | #include <stdlib.h> /* for getenv(), abort() */ | |
2a26ef3a | 213 | #include <unistd.h> /* for __libc_enable_secure */ |
f65fd747 | 214 | |
425ce2ed | 215 | #include <atomic.h> |
eb96ffb0 | 216 | #include <_itoa.h> |
e404fb16 | 217 | #include <bits/wordsize.h> |
425ce2ed | 218 | #include <sys/sysinfo.h> |
c56da3a3 | 219 | |
02d46fc4 UD |
220 | #include <ldsodefs.h> |
221 | ||
fa8d436c | 222 | #include <unistd.h> |
fa8d436c | 223 | #include <stdio.h> /* needed for malloc_stats */ |
8e58439c | 224 | #include <errno.h> |
406e7a0a | 225 | #include <assert.h> |
f65fd747 | 226 | |
66274218 AJ |
227 | #include <shlib-compat.h> |
228 | ||
5d78bb43 UD |
229 | /* For uintptr_t. */ |
230 | #include <stdint.h> | |
f65fd747 | 231 | |
3e030bd5 UD |
232 | /* For va_arg, va_start, va_end. */ |
233 | #include <stdarg.h> | |
234 | ||
070906ff RM |
235 | /* For MIN, MAX, powerof2. */ |
236 | #include <sys/param.h> | |
237 | ||
ca6be165 | 238 | /* For ALIGN_UP et. al. */ |
9090848d | 239 | #include <libc-pointer-arith.h> |
8a35c3fe | 240 | |
d5c3fafc DD |
241 | /* For DIAG_PUSH/POP_NEEDS_COMMENT et al. */ |
242 | #include <libc-diag.h> | |
243 | ||
3784dfc0 RE |
244 | /* For memory tagging. */ |
245 | #include <libc-mtag.h> | |
246 | ||
29d79486 | 247 | #include <malloc/malloc-internal.h> |
c0f62c56 | 248 | |
6d43de4b WD |
249 | /* For SINGLE_THREAD_P. */ |
250 | #include <sysdep-cancel.h> | |
251 | ||
29a4db29 FW |
252 | #include <libc-internal.h> |
253 | ||
fc859c30 SP |
254 | /* For tcache double-free check. */ |
255 | #include <random-bits.h> | |
256 | #include <sys/random.h> | |
7187efd0 | 257 | #include <not-cancel.h> |
fc859c30 | 258 | |
fa8d436c UD |
259 | /* |
260 | Debugging: | |
261 | ||
262 | Because freed chunks may be overwritten with bookkeeping fields, this | |
263 | malloc will often die when freed memory is overwritten by user | |
264 | programs. This can be very effective (albeit in an annoying way) | |
265 | in helping track down dangling pointers. | |
266 | ||
267 | If you compile with -DMALLOC_DEBUG, a number of assertion checks are | |
268 | enabled that will catch more memory errors. You probably won't be | |
269 | able to make much sense of the actual assertion errors, but they | |
270 | should help you locate incorrectly overwritten memory. The checking | |
271 | is fairly extensive, and will slow down execution | |
272 | noticeably. Calling malloc_stats or mallinfo with MALLOC_DEBUG set | |
273 | will attempt to check every non-mmapped allocated and free chunk in | |
274 | the course of computing the summmaries. (By nature, mmapped regions | |
275 | cannot be checked very much automatically.) | |
276 | ||
277 | Setting MALLOC_DEBUG may also be helpful if you are trying to modify | |
278 | this code. The assertions in the check routines spell out in more | |
279 | detail the assumptions and invariants underlying the algorithms. | |
280 | ||
281 | Setting MALLOC_DEBUG does NOT provide an automated mechanism for | |
282 | checking that all accesses to malloced memory stay within their | |
283 | bounds. However, there are several add-ons and adaptations of this | |
284 | or other mallocs available that do this. | |
f65fd747 UD |
285 | */ |
286 | ||
439bda32 WN |
287 | #ifndef MALLOC_DEBUG |
288 | #define MALLOC_DEBUG 0 | |
289 | #endif | |
290 | ||
d5c3fafc DD |
291 | #if USE_TCACHE |
292 | /* We want 64 entries. This is an arbitrary limit, which tunables can reduce. */ | |
293 | # define TCACHE_MAX_BINS 64 | |
294 | # define MAX_TCACHE_SIZE tidx2usize (TCACHE_MAX_BINS-1) | |
295 | ||
296 | /* Only used to pre-fill the tunables. */ | |
297 | # define tidx2usize(idx) (((size_t) idx) * MALLOC_ALIGNMENT + MINSIZE - SIZE_SZ) | |
298 | ||
299 | /* When "x" is from chunksize(). */ | |
300 | # define csize2tidx(x) (((x) - MINSIZE + MALLOC_ALIGNMENT - 1) / MALLOC_ALIGNMENT) | |
301 | /* When "x" is a user-provided size. */ | |
302 | # define usize2tidx(x) csize2tidx (request2size (x)) | |
303 | ||
304 | /* With rounding and alignment, the bins are... | |
305 | idx 0 bytes 0..24 (64-bit) or 0..12 (32-bit) | |
306 | idx 1 bytes 25..40 or 13..20 | |
307 | idx 2 bytes 41..56 or 21..28 | |
308 | etc. */ | |
309 | ||
310 | /* This is another arbitrary limit, which tunables can change. Each | |
311 | tcache bin will hold at most this number of chunks. */ | |
312 | # define TCACHE_FILL_COUNT 7 | |
1f50f2ad WD |
313 | |
314 | /* Maximum chunks in tcache bins for tunables. This value must fit the range | |
315 | of tcache->counts[] entries, else they may overflow. */ | |
316 | # define MAX_TCACHE_COUNT UINT16_MAX | |
d5c3fafc DD |
317 | #endif |
318 | ||
a1a486d7 EI |
319 | /* Safe-Linking: |
320 | Use randomness from ASLR (mmap_base) to protect single-linked lists | |
321 | of Fast-Bins and TCache. That is, mask the "next" pointers of the | |
322 | lists' chunks, and also perform allocation alignment checks on them. | |
323 | This mechanism reduces the risk of pointer hijacking, as was done with | |
324 | Safe-Unlinking in the double-linked lists of Small-Bins. | |
325 | It assumes a minimum page size of 4096 bytes (12 bits). Systems with | |
326 | larger pages provide less entropy, although the pointer mangling | |
327 | still works. */ | |
328 | #define PROTECT_PTR(pos, ptr) \ | |
329 | ((__typeof (ptr)) ((((size_t) pos) >> 12) ^ ((size_t) ptr))) | |
330 | #define REVEAL_PTR(ptr) PROTECT_PTR (&ptr, ptr) | |
f65fd747 | 331 | |
fa8d436c | 332 | /* |
9f1bed18 PE |
333 | The REALLOC_ZERO_BYTES_FREES macro controls the behavior of realloc (p, 0) |
334 | when p is nonnull. If the macro is nonzero, the realloc call returns NULL; | |
335 | otherwise, the call returns what malloc (0) would. In either case, | |
336 | p is freed. Glibc uses a nonzero REALLOC_ZERO_BYTES_FREES, which | |
337 | implements common historical practice. | |
338 | ||
339 | ISO C17 says the realloc call has implementation-defined behavior, | |
340 | and it might not even free p. | |
fa8d436c UD |
341 | */ |
342 | ||
343 | #ifndef REALLOC_ZERO_BYTES_FREES | |
344 | #define REALLOC_ZERO_BYTES_FREES 1 | |
345 | #endif | |
346 | ||
347 | /* | |
348 | TRIM_FASTBINS controls whether free() of a very small chunk can | |
349 | immediately lead to trimming. Setting to true (1) can reduce memory | |
350 | footprint, but will almost always slow down programs that use a lot | |
351 | of small chunks. | |
352 | ||
353 | Define this only if you are willing to give up some speed to more | |
354 | aggressively reduce system-level memory footprint when releasing | |
355 | memory in programs that use many small chunks. You can get | |
356 | essentially the same effect by setting MXFAST to 0, but this can | |
357 | lead to even greater slowdowns in programs using many small chunks. | |
358 | TRIM_FASTBINS is an in-between compile-time option, that disables | |
359 | only those chunks bordering topmost memory from being placed in | |
360 | fastbins. | |
361 | */ | |
362 | ||
363 | #ifndef TRIM_FASTBINS | |
364 | #define TRIM_FASTBINS 0 | |
365 | #endif | |
366 | ||
3b49edc0 | 367 | /* Definition for getting more memory from the OS. */ |
55a4dd39 SP |
368 | #include "morecore.c" |
369 | ||
370 | #define MORECORE (*__glibc_morecore) | |
fa8d436c | 371 | #define MORECORE_FAILURE 0 |
f65fd747 | 372 | |
3784dfc0 RE |
373 | /* Memory tagging. */ |
374 | ||
375 | /* Some systems support the concept of tagging (sometimes known as | |
376 | coloring) memory locations on a fine grained basis. Each memory | |
377 | location is given a color (normally allocated randomly) and | |
378 | pointers are also colored. When the pointer is dereferenced, the | |
379 | pointer's color is checked against the memory's color and if they | |
380 | differ the access is faulted (sometimes lazily). | |
381 | ||
382 | We use this in glibc by maintaining a single color for the malloc | |
383 | data structures that are interleaved with the user data and then | |
384 | assigning separate colors for each block allocation handed out. In | |
385 | this way simple buffer overruns will be rapidly detected. When | |
386 | memory is freed, the memory is recolored back to the glibc default | |
387 | so that simple use-after-free errors can also be detected. | |
388 | ||
389 | If memory is reallocated the buffer is recolored even if the | |
390 | address remains the same. This has a performance impact, but | |
391 | guarantees that the old pointer cannot mistakenly be reused (code | |
392 | that compares old against new will see a mismatch and will then | |
393 | need to behave as though realloc moved the data to a new location). | |
394 | ||
395 | Internal API for memory tagging support. | |
396 | ||
397 | The aim is to keep the code for memory tagging support as close to | |
398 | the normal APIs in glibc as possible, so that if tagging is not | |
399 | enabled in the library, or is disabled at runtime then standard | |
400 | operations can continue to be used. Support macros are used to do | |
401 | this: | |
402 | ||
c076a0bc | 403 | void *tag_new_zero_region (void *ptr, size_t size) |
3784dfc0 | 404 | |
c076a0bc SN |
405 | Allocates a new tag, colors the memory with that tag, zeros the |
406 | memory and returns a pointer that is correctly colored for that | |
407 | location. The non-tagging version will simply call memset with 0. | |
3784dfc0 | 408 | |
0c719cf4 | 409 | void *tag_region (void *ptr, size_t size) |
3784dfc0 RE |
410 | |
411 | Color the region of memory pointed to by PTR and size SIZE with | |
412 | the color of PTR. Returns the original pointer. | |
413 | ||
0c719cf4 | 414 | void *tag_new_usable (void *ptr) |
3784dfc0 RE |
415 | |
416 | Allocate a new random color and use it to color the user region of | |
417 | a chunk; this may include data from the subsequent chunk's header | |
418 | if tagging is sufficiently fine grained. Returns PTR suitably | |
419 | recolored for accessing the memory there. | |
420 | ||
0c719cf4 | 421 | void *tag_at (void *ptr) |
3784dfc0 RE |
422 | |
423 | Read the current color of the memory at the address pointed to by | |
424 | PTR (ignoring it's current color) and return PTR recolored to that | |
425 | color. PTR must be valid address in all other respects. When | |
426 | tagging is not enabled, it simply returns the original pointer. | |
427 | */ | |
428 | ||
429 | #ifdef USE_MTAG | |
42bac88a SN |
430 | static bool mtag_enabled = false; |
431 | static int mtag_mmap_flags = 0; | |
42bac88a SN |
432 | #else |
433 | # define mtag_enabled false | |
434 | # define mtag_mmap_flags 0 | |
435 | #endif | |
3784dfc0 | 436 | |
42bac88a SN |
437 | static __always_inline void * |
438 | tag_region (void *ptr, size_t size) | |
3784dfc0 | 439 | { |
42bac88a SN |
440 | if (__glibc_unlikely (mtag_enabled)) |
441 | return __libc_mtag_tag_region (ptr, size); | |
3784dfc0 RE |
442 | return ptr; |
443 | } | |
444 | ||
42bac88a | 445 | static __always_inline void * |
c076a0bc | 446 | tag_new_zero_region (void *ptr, size_t size) |
3784dfc0 | 447 | { |
42bac88a | 448 | if (__glibc_unlikely (mtag_enabled)) |
c076a0bc SN |
449 | return __libc_mtag_tag_zero_region (__libc_mtag_new_tag (ptr), size); |
450 | return memset (ptr, 0, size); | |
3784dfc0 RE |
451 | } |
452 | ||
42bac88a SN |
453 | /* Defined later. */ |
454 | static void * | |
455 | tag_new_usable (void *ptr); | |
3784dfc0 | 456 | |
42bac88a SN |
457 | static __always_inline void * |
458 | tag_at (void *ptr) | |
459 | { | |
460 | if (__glibc_unlikely (mtag_enabled)) | |
461 | return __libc_mtag_address_get_tag (ptr); | |
462 | return ptr; | |
463 | } | |
f65fd747 | 464 | |
22a89187 | 465 | #include <string.h> |
f65fd747 | 466 | |
fa8d436c UD |
467 | /* |
468 | MORECORE-related declarations. By default, rely on sbrk | |
469 | */ | |
09f5e163 | 470 | |
f65fd747 | 471 | |
fa8d436c UD |
472 | /* |
473 | MORECORE is the name of the routine to call to obtain more memory | |
474 | from the system. See below for general guidance on writing | |
475 | alternative MORECORE functions, as well as a version for WIN32 and a | |
476 | sample version for pre-OSX macos. | |
477 | */ | |
f65fd747 | 478 | |
fa8d436c UD |
479 | #ifndef MORECORE |
480 | #define MORECORE sbrk | |
481 | #endif | |
f65fd747 | 482 | |
fa8d436c UD |
483 | /* |
484 | MORECORE_FAILURE is the value returned upon failure of MORECORE | |
485 | as well as mmap. Since it cannot be an otherwise valid memory address, | |
486 | and must reflect values of standard sys calls, you probably ought not | |
487 | try to redefine it. | |
488 | */ | |
09f5e163 | 489 | |
fa8d436c UD |
490 | #ifndef MORECORE_FAILURE |
491 | #define MORECORE_FAILURE (-1) | |
492 | #endif | |
493 | ||
494 | /* | |
495 | If MORECORE_CONTIGUOUS is true, take advantage of fact that | |
496 | consecutive calls to MORECORE with positive arguments always return | |
497 | contiguous increasing addresses. This is true of unix sbrk. Even | |
498 | if not defined, when regions happen to be contiguous, malloc will | |
499 | permit allocations spanning regions obtained from different | |
500 | calls. But defining this when applicable enables some stronger | |
501 | consistency checks and space efficiencies. | |
502 | */ | |
f65fd747 | 503 | |
fa8d436c UD |
504 | #ifndef MORECORE_CONTIGUOUS |
505 | #define MORECORE_CONTIGUOUS 1 | |
f65fd747 UD |
506 | #endif |
507 | ||
fa8d436c UD |
508 | /* |
509 | Define MORECORE_CANNOT_TRIM if your version of MORECORE | |
510 | cannot release space back to the system when given negative | |
511 | arguments. This is generally necessary only if you are using | |
512 | a hand-crafted MORECORE function that cannot handle negative arguments. | |
513 | */ | |
514 | ||
515 | /* #define MORECORE_CANNOT_TRIM */ | |
f65fd747 | 516 | |
fa8d436c UD |
517 | /* MORECORE_CLEARS (default 1) |
518 | The degree to which the routine mapped to MORECORE zeroes out | |
519 | memory: never (0), only for newly allocated space (1) or always | |
520 | (2). The distinction between (1) and (2) is necessary because on | |
521 | some systems, if the application first decrements and then | |
522 | increments the break value, the contents of the reallocated space | |
523 | are unspecified. | |
6c8dbf00 | 524 | */ |
fa8d436c UD |
525 | |
526 | #ifndef MORECORE_CLEARS | |
6c8dbf00 | 527 | # define MORECORE_CLEARS 1 |
7cabd57c UD |
528 | #endif |
529 | ||
fa8d436c | 530 | |
a9177ff5 | 531 | /* |
fa8d436c | 532 | MMAP_AS_MORECORE_SIZE is the minimum mmap size argument to use if |
22a89187 UD |
533 | sbrk fails, and mmap is used as a backup. The value must be a |
534 | multiple of page size. This backup strategy generally applies only | |
535 | when systems have "holes" in address space, so sbrk cannot perform | |
536 | contiguous expansion, but there is still space available on system. | |
537 | On systems for which this is known to be useful (i.e. most linux | |
538 | kernels), this occurs only when programs allocate huge amounts of | |
539 | memory. Between this, and the fact that mmap regions tend to be | |
540 | limited, the size should be large, to avoid too many mmap calls and | |
541 | thus avoid running out of kernel resources. */ | |
fa8d436c UD |
542 | |
543 | #ifndef MMAP_AS_MORECORE_SIZE | |
544 | #define MMAP_AS_MORECORE_SIZE (1024 * 1024) | |
f65fd747 UD |
545 | #endif |
546 | ||
547 | /* | |
548 | Define HAVE_MREMAP to make realloc() use mremap() to re-allocate | |
2a26ef3a | 549 | large blocks. |
f65fd747 UD |
550 | */ |
551 | ||
552 | #ifndef HAVE_MREMAP | |
fa8d436c | 553 | #define HAVE_MREMAP 0 |
f65fd747 UD |
554 | #endif |
555 | ||
f65fd747 | 556 | /* |
f65fd747 | 557 | This version of malloc supports the standard SVID/XPG mallinfo |
fa8d436c UD |
558 | routine that returns a struct containing usage properties and |
559 | statistics. It should work on any SVID/XPG compliant system that has | |
560 | a /usr/include/malloc.h defining struct mallinfo. (If you'd like to | |
561 | install such a thing yourself, cut out the preliminary declarations | |
562 | as described above and below and save them in a malloc.h file. But | |
563 | there's no compelling reason to bother to do this.) | |
f65fd747 UD |
564 | |
565 | The main declaration needed is the mallinfo struct that is returned | |
566 | (by-copy) by mallinfo(). The SVID/XPG malloinfo struct contains a | |
fa8d436c UD |
567 | bunch of fields that are not even meaningful in this version of |
568 | malloc. These fields are are instead filled by mallinfo() with | |
569 | other numbers that might be of interest. | |
f65fd747 UD |
570 | */ |
571 | ||
f65fd747 | 572 | |
fa8d436c | 573 | /* ---------- description of public routines ------------ */ |
f65fd747 | 574 | |
b5bd5bfe | 575 | #if IS_IN (libc) |
f65fd747 | 576 | /* |
fa8d436c UD |
577 | malloc(size_t n) |
578 | Returns a pointer to a newly allocated chunk of at least n bytes, or null | |
579 | if no space is available. Additionally, on failure, errno is | |
580 | set to ENOMEM on ANSI C systems. | |
581 | ||
862897d2 | 582 | If n is zero, malloc returns a minimum-sized chunk. (The minimum |
fa8d436c UD |
583 | size is 16 bytes on most 32bit systems, and 24 or 32 bytes on 64bit |
584 | systems.) On most systems, size_t is an unsigned type, so calls | |
585 | with negative arguments are interpreted as requests for huge amounts | |
586 | of space, which will often fail. The maximum supported value of n | |
587 | differs across systems, but is in all cases less than the maximum | |
588 | representable value of a size_t. | |
f65fd747 | 589 | */ |
3b49edc0 UD |
590 | void* __libc_malloc(size_t); |
591 | libc_hidden_proto (__libc_malloc) | |
f65fd747 | 592 | |
fa8d436c | 593 | /* |
22a89187 | 594 | free(void* p) |
fa8d436c UD |
595 | Releases the chunk of memory pointed to by p, that had been previously |
596 | allocated using malloc or a related routine such as realloc. | |
597 | It has no effect if p is null. It can have arbitrary (i.e., bad!) | |
598 | effects if p has already been freed. | |
599 | ||
600 | Unless disabled (using mallopt), freeing very large spaces will | |
601 | when possible, automatically trigger operations that give | |
602 | back unused memory to the system, thus reducing program footprint. | |
603 | */ | |
3b49edc0 UD |
604 | void __libc_free(void*); |
605 | libc_hidden_proto (__libc_free) | |
f65fd747 | 606 | |
fa8d436c UD |
607 | /* |
608 | calloc(size_t n_elements, size_t element_size); | |
609 | Returns a pointer to n_elements * element_size bytes, with all locations | |
610 | set to zero. | |
611 | */ | |
3b49edc0 | 612 | void* __libc_calloc(size_t, size_t); |
f65fd747 UD |
613 | |
614 | /* | |
22a89187 | 615 | realloc(void* p, size_t n) |
fa8d436c UD |
616 | Returns a pointer to a chunk of size n that contains the same data |
617 | as does chunk p up to the minimum of (n, p's size) bytes, or null | |
a9177ff5 | 618 | if no space is available. |
f65fd747 | 619 | |
fa8d436c UD |
620 | The returned pointer may or may not be the same as p. The algorithm |
621 | prefers extending p when possible, otherwise it employs the | |
622 | equivalent of a malloc-copy-free sequence. | |
f65fd747 | 623 | |
a9177ff5 | 624 | If p is null, realloc is equivalent to malloc. |
f65fd747 | 625 | |
fa8d436c UD |
626 | If space is not available, realloc returns null, errno is set (if on |
627 | ANSI) and p is NOT freed. | |
f65fd747 | 628 | |
fa8d436c UD |
629 | if n is for fewer bytes than already held by p, the newly unused |
630 | space is lopped off and freed if possible. Unless the #define | |
631 | REALLOC_ZERO_BYTES_FREES is set, realloc with a size argument of | |
632 | zero (re)allocates a minimum-sized chunk. | |
f65fd747 | 633 | |
3b5f801d DD |
634 | Large chunks that were internally obtained via mmap will always be |
635 | grown using malloc-copy-free sequences unless the system supports | |
636 | MREMAP (currently only linux). | |
f65fd747 | 637 | |
fa8d436c UD |
638 | The old unix realloc convention of allowing the last-free'd chunk |
639 | to be used as an argument to realloc is not supported. | |
f65fd747 | 640 | */ |
3b49edc0 UD |
641 | void* __libc_realloc(void*, size_t); |
642 | libc_hidden_proto (__libc_realloc) | |
f65fd747 | 643 | |
fa8d436c UD |
644 | /* |
645 | memalign(size_t alignment, size_t n); | |
646 | Returns a pointer to a newly allocated chunk of n bytes, aligned | |
647 | in accord with the alignment argument. | |
648 | ||
649 | The alignment argument should be a power of two. If the argument is | |
650 | not a power of two, the nearest greater power is used. | |
651 | 8-byte alignment is guaranteed by normal malloc calls, so don't | |
652 | bother calling memalign with an argument of 8 or less. | |
653 | ||
654 | Overreliance on memalign is a sure way to fragment space. | |
655 | */ | |
3b49edc0 UD |
656 | void* __libc_memalign(size_t, size_t); |
657 | libc_hidden_proto (__libc_memalign) | |
f65fd747 UD |
658 | |
659 | /* | |
fa8d436c UD |
660 | valloc(size_t n); |
661 | Equivalent to memalign(pagesize, n), where pagesize is the page | |
662 | size of the system. If the pagesize is unknown, 4096 is used. | |
663 | */ | |
3b49edc0 | 664 | void* __libc_valloc(size_t); |
fa8d436c | 665 | |
f65fd747 | 666 | |
f65fd747 | 667 | |
fa8d436c UD |
668 | /* |
669 | mallinfo() | |
670 | Returns (by copy) a struct containing various summary statistics: | |
671 | ||
a9177ff5 RM |
672 | arena: current total non-mmapped bytes allocated from system |
673 | ordblks: the number of free chunks | |
fa8d436c | 674 | smblks: the number of fastbin blocks (i.e., small chunks that |
72f90263 | 675 | have been freed but not use resused or consolidated) |
a9177ff5 RM |
676 | hblks: current number of mmapped regions |
677 | hblkhd: total bytes held in mmapped regions | |
ca135f82 | 678 | usmblks: always 0 |
a9177ff5 | 679 | fsmblks: total bytes held in fastbin blocks |
fa8d436c | 680 | uordblks: current total allocated space (normal or mmapped) |
a9177ff5 | 681 | fordblks: total free space |
fa8d436c | 682 | keepcost: the maximum number of bytes that could ideally be released |
72f90263 UD |
683 | back to system via malloc_trim. ("ideally" means that |
684 | it ignores page restrictions etc.) | |
fa8d436c UD |
685 | |
686 | Because these fields are ints, but internal bookkeeping may | |
a9177ff5 | 687 | be kept as longs, the reported values may wrap around zero and |
fa8d436c UD |
688 | thus be inaccurate. |
689 | */ | |
e3960d1c | 690 | struct mallinfo2 __libc_mallinfo2(void); |
cdf64542 | 691 | libc_hidden_proto (__libc_mallinfo2) |
e3960d1c | 692 | |
3b49edc0 | 693 | struct mallinfo __libc_mallinfo(void); |
88764ae2 | 694 | |
f65fd747 | 695 | |
fa8d436c UD |
696 | /* |
697 | pvalloc(size_t n); | |
698 | Equivalent to valloc(minimum-page-that-holds(n)), that is, | |
699 | round up n to nearest pagesize. | |
700 | */ | |
3b49edc0 | 701 | void* __libc_pvalloc(size_t); |
fa8d436c UD |
702 | |
703 | /* | |
704 | malloc_trim(size_t pad); | |
705 | ||
706 | If possible, gives memory back to the system (via negative | |
707 | arguments to sbrk) if there is unused memory at the `high' end of | |
708 | the malloc pool. You can call this after freeing large blocks of | |
709 | memory to potentially reduce the system-level memory requirements | |
710 | of a program. However, it cannot guarantee to reduce memory. Under | |
711 | some allocation patterns, some large free blocks of memory will be | |
712 | locked between two used chunks, so they cannot be given back to | |
713 | the system. | |
a9177ff5 | 714 | |
fa8d436c UD |
715 | The `pad' argument to malloc_trim represents the amount of free |
716 | trailing space to leave untrimmed. If this argument is zero, | |
717 | only the minimum amount of memory to maintain internal data | |
718 | structures will be left (one page or less). Non-zero arguments | |
719 | can be supplied to maintain enough trailing space to service | |
720 | future expected allocations without having to re-obtain memory | |
721 | from the system. | |
a9177ff5 | 722 | |
fa8d436c UD |
723 | Malloc_trim returns 1 if it actually released any memory, else 0. |
724 | On systems that do not support "negative sbrks", it will always | |
c958a6a4 | 725 | return 0. |
fa8d436c | 726 | */ |
3b49edc0 | 727 | int __malloc_trim(size_t); |
fa8d436c UD |
728 | |
729 | /* | |
22a89187 | 730 | malloc_usable_size(void* p); |
fa8d436c UD |
731 | |
732 | Returns the number of bytes you can actually use in | |
733 | an allocated chunk, which may be more than you requested (although | |
734 | often not) due to alignment and minimum size constraints. | |
735 | You can use this many bytes without worrying about | |
736 | overwriting other allocated objects. This is not a particularly great | |
737 | programming practice. malloc_usable_size can be more useful in | |
738 | debugging and assertions, for example: | |
739 | ||
740 | p = malloc(n); | |
741 | assert(malloc_usable_size(p) >= 256); | |
742 | ||
743 | */ | |
3b49edc0 | 744 | size_t __malloc_usable_size(void*); |
fa8d436c UD |
745 | |
746 | /* | |
747 | malloc_stats(); | |
748 | Prints on stderr the amount of space obtained from the system (both | |
749 | via sbrk and mmap), the maximum amount (which may be more than | |
750 | current if malloc_trim and/or munmap got called), and the current | |
751 | number of bytes allocated via malloc (or realloc, etc) but not yet | |
752 | freed. Note that this is the number of bytes allocated, not the | |
753 | number requested. It will be larger than the number requested | |
754 | because of alignment and bookkeeping overhead. Because it includes | |
755 | alignment wastage as being in use, this figure may be greater than | |
756 | zero even when no user-level chunks are allocated. | |
757 | ||
758 | The reported current and maximum system memory can be inaccurate if | |
759 | a program makes other calls to system memory allocation functions | |
760 | (normally sbrk) outside of malloc. | |
761 | ||
762 | malloc_stats prints only the most commonly interesting statistics. | |
763 | More information can be obtained by calling mallinfo. | |
764 | ||
765 | */ | |
3b49edc0 | 766 | void __malloc_stats(void); |
f65fd747 | 767 | |
f7ddf3d3 UD |
768 | /* |
769 | posix_memalign(void **memptr, size_t alignment, size_t size); | |
770 | ||
771 | POSIX wrapper like memalign(), checking for validity of size. | |
772 | */ | |
773 | int __posix_memalign(void **, size_t, size_t); | |
b5bd5bfe | 774 | #endif /* IS_IN (libc) */ |
f7ddf3d3 | 775 | |
5b8d2715 SP |
776 | /* |
777 | mallopt(int parameter_number, int parameter_value) | |
778 | Sets tunable parameters The format is to provide a | |
779 | (parameter-number, parameter-value) pair. mallopt then sets the | |
780 | corresponding parameter to the argument value if it can (i.e., so | |
781 | long as the value is meaningful), and returns 1 if successful else | |
782 | 0. SVID/XPG/ANSI defines four standard param numbers for mallopt, | |
783 | normally defined in malloc.h. Only one of these (M_MXFAST) is used | |
784 | in this malloc. The others (M_NLBLKS, M_GRAIN, M_KEEP) don't apply, | |
785 | so setting them has no effect. But this malloc also supports four | |
786 | other options in mallopt. See below for details. Briefly, supported | |
787 | parameters are as follows (listed defaults are for "typical" | |
788 | configurations). | |
789 | ||
790 | Symbol param # default allowed param values | |
791 | M_MXFAST 1 64 0-80 (0 disables fastbins) | |
792 | M_TRIM_THRESHOLD -1 128*1024 any (-1U disables trimming) | |
793 | M_TOP_PAD -2 0 any | |
794 | M_MMAP_THRESHOLD -3 128*1024 any (or 0 if no MMAP support) | |
795 | M_MMAP_MAX -4 65536 any (0 disables use of mmap) | |
796 | */ | |
797 | int __libc_mallopt(int, int); | |
798 | #if IS_IN (libc) | |
799 | libc_hidden_proto (__libc_mallopt) | |
800 | #endif | |
801 | ||
fa8d436c UD |
802 | /* mallopt tuning options */ |
803 | ||
f65fd747 | 804 | /* |
fa8d436c UD |
805 | M_MXFAST is the maximum request size used for "fastbins", special bins |
806 | that hold returned chunks without consolidating their spaces. This | |
807 | enables future requests for chunks of the same size to be handled | |
808 | very quickly, but can increase fragmentation, and thus increase the | |
809 | overall memory footprint of a program. | |
810 | ||
811 | This malloc manages fastbins very conservatively yet still | |
812 | efficiently, so fragmentation is rarely a problem for values less | |
813 | than or equal to the default. The maximum supported value of MXFAST | |
814 | is 80. You wouldn't want it any higher than this anyway. Fastbins | |
815 | are designed especially for use with many small structs, objects or | |
816 | strings -- the default handles structs/objects/arrays with sizes up | |
817 | to 8 4byte fields, or small strings representing words, tokens, | |
818 | etc. Using fastbins for larger objects normally worsens | |
819 | fragmentation without improving speed. | |
820 | ||
821 | M_MXFAST is set in REQUEST size units. It is internally used in | |
822 | chunksize units, which adds padding and alignment. You can reduce | |
823 | M_MXFAST to 0 to disable all use of fastbins. This causes the malloc | |
824 | algorithm to be a closer approximation of fifo-best-fit in all cases, | |
825 | not just for larger requests, but will generally cause it to be | |
826 | slower. | |
f65fd747 UD |
827 | */ |
828 | ||
829 | ||
fa8d436c UD |
830 | /* M_MXFAST is a standard SVID/XPG tuning option, usually listed in malloc.h */ |
831 | #ifndef M_MXFAST | |
a9177ff5 | 832 | #define M_MXFAST 1 |
fa8d436c | 833 | #endif |
f65fd747 | 834 | |
fa8d436c | 835 | #ifndef DEFAULT_MXFAST |
425ce2ed | 836 | #define DEFAULT_MXFAST (64 * SIZE_SZ / 4) |
10dc2a90 UD |
837 | #endif |
838 | ||
10dc2a90 | 839 | |
fa8d436c UD |
840 | /* |
841 | M_TRIM_THRESHOLD is the maximum amount of unused top-most memory | |
842 | to keep before releasing via malloc_trim in free(). | |
843 | ||
844 | Automatic trimming is mainly useful in long-lived programs. | |
845 | Because trimming via sbrk can be slow on some systems, and can | |
846 | sometimes be wasteful (in cases where programs immediately | |
847 | afterward allocate more large chunks) the value should be high | |
848 | enough so that your overall system performance would improve by | |
849 | releasing this much memory. | |
850 | ||
851 | The trim threshold and the mmap control parameters (see below) | |
852 | can be traded off with one another. Trimming and mmapping are | |
853 | two different ways of releasing unused memory back to the | |
854 | system. Between these two, it is often possible to keep | |
855 | system-level demands of a long-lived program down to a bare | |
856 | minimum. For example, in one test suite of sessions measuring | |
857 | the XF86 X server on Linux, using a trim threshold of 128K and a | |
858 | mmap threshold of 192K led to near-minimal long term resource | |
859 | consumption. | |
860 | ||
861 | If you are using this malloc in a long-lived program, it should | |
862 | pay to experiment with these values. As a rough guide, you | |
863 | might set to a value close to the average size of a process | |
864 | (program) running on your system. Releasing this much memory | |
865 | would allow such a process to run in memory. Generally, it's | |
866 | worth it to tune for trimming rather tham memory mapping when a | |
867 | program undergoes phases where several large chunks are | |
868 | allocated and released in ways that can reuse each other's | |
869 | storage, perhaps mixed with phases where there are no such | |
870 | chunks at all. And in well-behaved long-lived programs, | |
871 | controlling release of large blocks via trimming versus mapping | |
872 | is usually faster. | |
873 | ||
874 | However, in most programs, these parameters serve mainly as | |
875 | protection against the system-level effects of carrying around | |
876 | massive amounts of unneeded memory. Since frequent calls to | |
877 | sbrk, mmap, and munmap otherwise degrade performance, the default | |
878 | parameters are set to relatively high values that serve only as | |
879 | safeguards. | |
880 | ||
881 | The trim value It must be greater than page size to have any useful | |
a9177ff5 | 882 | effect. To disable trimming completely, you can set to |
fa8d436c UD |
883 | (unsigned long)(-1) |
884 | ||
885 | Trim settings interact with fastbin (MXFAST) settings: Unless | |
886 | TRIM_FASTBINS is defined, automatic trimming never takes place upon | |
887 | freeing a chunk with size less than or equal to MXFAST. Trimming is | |
888 | instead delayed until subsequent freeing of larger chunks. However, | |
889 | you can still force an attempted trim by calling malloc_trim. | |
890 | ||
891 | Also, trimming is not generally possible in cases where | |
892 | the main arena is obtained via mmap. | |
893 | ||
894 | Note that the trick some people use of mallocing a huge space and | |
895 | then freeing it at program startup, in an attempt to reserve system | |
896 | memory, doesn't have the intended effect under automatic trimming, | |
897 | since that memory will immediately be returned to the system. | |
898 | */ | |
899 | ||
900 | #define M_TRIM_THRESHOLD -1 | |
901 | ||
902 | #ifndef DEFAULT_TRIM_THRESHOLD | |
903 | #define DEFAULT_TRIM_THRESHOLD (128 * 1024) | |
904 | #endif | |
905 | ||
906 | /* | |
907 | M_TOP_PAD is the amount of extra `padding' space to allocate or | |
908 | retain whenever sbrk is called. It is used in two ways internally: | |
909 | ||
910 | * When sbrk is called to extend the top of the arena to satisfy | |
911 | a new malloc request, this much padding is added to the sbrk | |
912 | request. | |
913 | ||
914 | * When malloc_trim is called automatically from free(), | |
915 | it is used as the `pad' argument. | |
916 | ||
917 | In both cases, the actual amount of padding is rounded | |
918 | so that the end of the arena is always a system page boundary. | |
919 | ||
920 | The main reason for using padding is to avoid calling sbrk so | |
921 | often. Having even a small pad greatly reduces the likelihood | |
922 | that nearly every malloc request during program start-up (or | |
923 | after trimming) will invoke sbrk, which needlessly wastes | |
924 | time. | |
925 | ||
926 | Automatic rounding-up to page-size units is normally sufficient | |
927 | to avoid measurable overhead, so the default is 0. However, in | |
928 | systems where sbrk is relatively slow, it can pay to increase | |
929 | this value, at the expense of carrying around more memory than | |
930 | the program needs. | |
931 | */ | |
10dc2a90 | 932 | |
fa8d436c | 933 | #define M_TOP_PAD -2 |
10dc2a90 | 934 | |
fa8d436c UD |
935 | #ifndef DEFAULT_TOP_PAD |
936 | #define DEFAULT_TOP_PAD (0) | |
937 | #endif | |
f65fd747 | 938 | |
1d05c2fb UD |
939 | /* |
940 | MMAP_THRESHOLD_MAX and _MIN are the bounds on the dynamically | |
941 | adjusted MMAP_THRESHOLD. | |
942 | */ | |
943 | ||
944 | #ifndef DEFAULT_MMAP_THRESHOLD_MIN | |
945 | #define DEFAULT_MMAP_THRESHOLD_MIN (128 * 1024) | |
946 | #endif | |
947 | ||
948 | #ifndef DEFAULT_MMAP_THRESHOLD_MAX | |
e404fb16 UD |
949 | /* For 32-bit platforms we cannot increase the maximum mmap |
950 | threshold much because it is also the minimum value for the | |
bd2c2341 UD |
951 | maximum heap size and its alignment. Going above 512k (i.e., 1M |
952 | for new heaps) wastes too much address space. */ | |
e404fb16 | 953 | # if __WORDSIZE == 32 |
bd2c2341 | 954 | # define DEFAULT_MMAP_THRESHOLD_MAX (512 * 1024) |
e404fb16 | 955 | # else |
bd2c2341 | 956 | # define DEFAULT_MMAP_THRESHOLD_MAX (4 * 1024 * 1024 * sizeof(long)) |
e404fb16 | 957 | # endif |
1d05c2fb UD |
958 | #endif |
959 | ||
fa8d436c UD |
960 | /* |
961 | M_MMAP_THRESHOLD is the request size threshold for using mmap() | |
962 | to service a request. Requests of at least this size that cannot | |
963 | be allocated using already-existing space will be serviced via mmap. | |
964 | (If enough normal freed space already exists it is used instead.) | |
965 | ||
966 | Using mmap segregates relatively large chunks of memory so that | |
967 | they can be individually obtained and released from the host | |
968 | system. A request serviced through mmap is never reused by any | |
969 | other request (at least not directly; the system may just so | |
970 | happen to remap successive requests to the same locations). | |
971 | ||
972 | Segregating space in this way has the benefits that: | |
973 | ||
a9177ff5 RM |
974 | 1. Mmapped space can ALWAYS be individually released back |
975 | to the system, which helps keep the system level memory | |
976 | demands of a long-lived program low. | |
fa8d436c UD |
977 | 2. Mapped memory can never become `locked' between |
978 | other chunks, as can happen with normally allocated chunks, which | |
979 | means that even trimming via malloc_trim would not release them. | |
980 | 3. On some systems with "holes" in address spaces, mmap can obtain | |
981 | memory that sbrk cannot. | |
982 | ||
983 | However, it has the disadvantages that: | |
984 | ||
985 | 1. The space cannot be reclaimed, consolidated, and then | |
986 | used to service later requests, as happens with normal chunks. | |
987 | 2. It can lead to more wastage because of mmap page alignment | |
988 | requirements | |
989 | 3. It causes malloc performance to be more dependent on host | |
990 | system memory management support routines which may vary in | |
991 | implementation quality and may impose arbitrary | |
992 | limitations. Generally, servicing a request via normal | |
993 | malloc steps is faster than going through a system's mmap. | |
994 | ||
995 | The advantages of mmap nearly always outweigh disadvantages for | |
996 | "large" chunks, but the value of "large" varies across systems. The | |
997 | default is an empirically derived value that works well in most | |
998 | systems. | |
1d05c2fb UD |
999 | |
1000 | ||
1001 | Update in 2006: | |
1002 | The above was written in 2001. Since then the world has changed a lot. | |
1003 | Memory got bigger. Applications got bigger. The virtual address space | |
1004 | layout in 32 bit linux changed. | |
1005 | ||
1006 | In the new situation, brk() and mmap space is shared and there are no | |
1007 | artificial limits on brk size imposed by the kernel. What is more, | |
1008 | applications have started using transient allocations larger than the | |
1009 | 128Kb as was imagined in 2001. | |
1010 | ||
1011 | The price for mmap is also high now; each time glibc mmaps from the | |
1012 | kernel, the kernel is forced to zero out the memory it gives to the | |
1013 | application. Zeroing memory is expensive and eats a lot of cache and | |
1014 | memory bandwidth. This has nothing to do with the efficiency of the | |
1015 | virtual memory system, by doing mmap the kernel just has no choice but | |
1016 | to zero. | |
1017 | ||
1018 | In 2001, the kernel had a maximum size for brk() which was about 800 | |
1019 | megabytes on 32 bit x86, at that point brk() would hit the first | |
1020 | mmaped shared libaries and couldn't expand anymore. With current 2.6 | |
1021 | kernels, the VA space layout is different and brk() and mmap | |
1022 | both can span the entire heap at will. | |
1023 | ||
1024 | Rather than using a static threshold for the brk/mmap tradeoff, | |
1025 | we are now using a simple dynamic one. The goal is still to avoid | |
1026 | fragmentation. The old goals we kept are | |
1027 | 1) try to get the long lived large allocations to use mmap() | |
1028 | 2) really large allocations should always use mmap() | |
1029 | and we're adding now: | |
1030 | 3) transient allocations should use brk() to avoid forcing the kernel | |
1031 | having to zero memory over and over again | |
1032 | ||
1033 | The implementation works with a sliding threshold, which is by default | |
1034 | limited to go between 128Kb and 32Mb (64Mb for 64 bitmachines) and starts | |
1035 | out at 128Kb as per the 2001 default. | |
1036 | ||
1037 | This allows us to satisfy requirement 1) under the assumption that long | |
1038 | lived allocations are made early in the process' lifespan, before it has | |
1039 | started doing dynamic allocations of the same size (which will | |
1040 | increase the threshold). | |
1041 | ||
1042 | The upperbound on the threshold satisfies requirement 2) | |
1043 | ||
1044 | The threshold goes up in value when the application frees memory that was | |
1045 | allocated with the mmap allocator. The idea is that once the application | |
1046 | starts freeing memory of a certain size, it's highly probable that this is | |
1047 | a size the application uses for transient allocations. This estimator | |
1048 | is there to satisfy the new third requirement. | |
1049 | ||
f65fd747 UD |
1050 | */ |
1051 | ||
fa8d436c | 1052 | #define M_MMAP_THRESHOLD -3 |
f65fd747 | 1053 | |
fa8d436c | 1054 | #ifndef DEFAULT_MMAP_THRESHOLD |
1d05c2fb | 1055 | #define DEFAULT_MMAP_THRESHOLD DEFAULT_MMAP_THRESHOLD_MIN |
fa8d436c UD |
1056 | #endif |
1057 | ||
1058 | /* | |
1059 | M_MMAP_MAX is the maximum number of requests to simultaneously | |
1060 | service using mmap. This parameter exists because | |
1061 | some systems have a limited number of internal tables for | |
1062 | use by mmap, and using more than a few of them may degrade | |
1063 | performance. | |
1064 | ||
1065 | The default is set to a value that serves only as a safeguard. | |
22a89187 | 1066 | Setting to 0 disables use of mmap for servicing large requests. |
fa8d436c | 1067 | */ |
f65fd747 | 1068 | |
fa8d436c UD |
1069 | #define M_MMAP_MAX -4 |
1070 | ||
1071 | #ifndef DEFAULT_MMAP_MAX | |
fa8d436c | 1072 | #define DEFAULT_MMAP_MAX (65536) |
f65fd747 UD |
1073 | #endif |
1074 | ||
100351c3 | 1075 | #include <malloc.h> |
f65fd747 | 1076 | |
fa8d436c UD |
1077 | #ifndef RETURN_ADDRESS |
1078 | #define RETURN_ADDRESS(X_) (NULL) | |
9ae6fc54 | 1079 | #endif |
431c33c0 | 1080 | |
fa8d436c UD |
1081 | /* Forward declarations. */ |
1082 | struct malloc_chunk; | |
1083 | typedef struct malloc_chunk* mchunkptr; | |
431c33c0 | 1084 | |
fa8d436c | 1085 | /* Internal routines. */ |
f65fd747 | 1086 | |
22a89187 | 1087 | static void* _int_malloc(mstate, size_t); |
425ce2ed | 1088 | static void _int_free(mstate, mchunkptr, int); |
22a89187 | 1089 | static void* _int_realloc(mstate, mchunkptr, INTERNAL_SIZE_T, |
6e4b2107 | 1090 | INTERNAL_SIZE_T); |
22a89187 | 1091 | static void* _int_memalign(mstate, size_t, size_t); |
b5bd5bfe | 1092 | #if IS_IN (libc) |
10ad46bc | 1093 | static void* _mid_memalign(size_t, size_t, void *); |
b5bd5bfe | 1094 | #endif |
10ad46bc | 1095 | |
ac3ed168 | 1096 | static void malloc_printerr(const char *str) __attribute__ ((noreturn)); |
fa8d436c | 1097 | |
0c71122c | 1098 | static void munmap_chunk(mchunkptr p); |
a9177ff5 | 1099 | #if HAVE_MREMAP |
0c71122c | 1100 | static mchunkptr mremap_chunk(mchunkptr p, size_t new_size); |
a9177ff5 | 1101 | #endif |
fa8d436c | 1102 | |
fa8d436c | 1103 | /* ------------------ MMAP support ------------------ */ |
f65fd747 | 1104 | |
f65fd747 | 1105 | |
fa8d436c | 1106 | #include <fcntl.h> |
fa8d436c | 1107 | #include <sys/mman.h> |
f65fd747 | 1108 | |
fa8d436c UD |
1109 | #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON) |
1110 | # define MAP_ANONYMOUS MAP_ANON | |
1111 | #endif | |
f65fd747 | 1112 | |
fa8d436c | 1113 | #define MMAP(addr, size, prot, flags) \ |
3b49edc0 | 1114 | __mmap((addr), (size), (prot), (flags)|MAP_ANONYMOUS|MAP_PRIVATE, -1, 0) |
f65fd747 | 1115 | |
f65fd747 UD |
1116 | |
1117 | /* | |
fa8d436c | 1118 | ----------------------- Chunk representations ----------------------- |
f65fd747 UD |
1119 | */ |
1120 | ||
1121 | ||
fa8d436c UD |
1122 | /* |
1123 | This struct declaration is misleading (but accurate and necessary). | |
1124 | It declares a "view" into memory allowing access to necessary | |
1125 | fields at known offsets from a given base. See explanation below. | |
1126 | */ | |
1127 | ||
1128 | struct malloc_chunk { | |
1129 | ||
e9c4fe93 FW |
1130 | INTERNAL_SIZE_T mchunk_prev_size; /* Size of previous chunk (if free). */ |
1131 | INTERNAL_SIZE_T mchunk_size; /* Size in bytes, including overhead. */ | |
fa8d436c UD |
1132 | |
1133 | struct malloc_chunk* fd; /* double links -- used only if free. */ | |
f65fd747 | 1134 | struct malloc_chunk* bk; |
7ecfbd38 UD |
1135 | |
1136 | /* Only used for large blocks: pointer to next larger size. */ | |
1137 | struct malloc_chunk* fd_nextsize; /* double links -- used only if free. */ | |
1138 | struct malloc_chunk* bk_nextsize; | |
f65fd747 UD |
1139 | }; |
1140 | ||
f65fd747 UD |
1141 | |
1142 | /* | |
f65fd747 UD |
1143 | malloc_chunk details: |
1144 | ||
1145 | (The following includes lightly edited explanations by Colin Plumb.) | |
1146 | ||
1147 | Chunks of memory are maintained using a `boundary tag' method as | |
1148 | described in e.g., Knuth or Standish. (See the paper by Paul | |
1149 | Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a | |
1150 | survey of such techniques.) Sizes of free chunks are stored both | |
1151 | in the front of each chunk and at the end. This makes | |
1152 | consolidating fragmented chunks into bigger chunks very fast. The | |
1153 | size fields also hold bits representing whether chunks are free or | |
1154 | in use. | |
1155 | ||
1156 | An allocated chunk looks like this: | |
1157 | ||
1158 | ||
1159 | chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | |
ae9166f2 | 1160 | | Size of previous chunk, if unallocated (P clear) | |
72f90263 | 1161 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
ae9166f2 | 1162 | | Size of chunk, in bytes |A|M|P| |
f65fd747 | 1163 | mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
72f90263 UD |
1164 | | User data starts here... . |
1165 | . . | |
1166 | . (malloc_usable_size() bytes) . | |
1167 | . | | |
f65fd747 | 1168 | nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
ae9166f2 FW |
1169 | | (size of chunk, but used for application data) | |
1170 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | |
1171 | | Size of next chunk, in bytes |A|0|1| | |
72f90263 | 1172 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
f65fd747 UD |
1173 | |
1174 | Where "chunk" is the front of the chunk for the purpose of most of | |
1175 | the malloc code, but "mem" is the pointer that is returned to the | |
1176 | user. "Nextchunk" is the beginning of the next contiguous chunk. | |
1177 | ||
6f65e668 | 1178 | Chunks always begin on even word boundaries, so the mem portion |
f65fd747 | 1179 | (which is returned to the user) is also on an even word boundary, and |
fa8d436c | 1180 | thus at least double-word aligned. |
f65fd747 UD |
1181 | |
1182 | Free chunks are stored in circular doubly-linked lists, and look like this: | |
1183 | ||
1184 | chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | |
ae9166f2 | 1185 | | Size of previous chunk, if unallocated (P clear) | |
72f90263 | 1186 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
ae9166f2 | 1187 | `head:' | Size of chunk, in bytes |A|0|P| |
f65fd747 | 1188 | mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
72f90263 UD |
1189 | | Forward pointer to next chunk in list | |
1190 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | |
1191 | | Back pointer to previous chunk in list | | |
1192 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | |
1193 | | Unused space (may be 0 bytes long) . | |
1194 | . . | |
1195 | . | | |
f65fd747 UD |
1196 | nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
1197 | `foot:' | Size of chunk, in bytes | | |
72f90263 | 1198 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
ae9166f2 FW |
1199 | | Size of next chunk, in bytes |A|0|0| |
1200 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | |
f65fd747 UD |
1201 | |
1202 | The P (PREV_INUSE) bit, stored in the unused low-order bit of the | |
1203 | chunk size (which is always a multiple of two words), is an in-use | |
1204 | bit for the *previous* chunk. If that bit is *clear*, then the | |
1205 | word before the current chunk size contains the previous chunk | |
1206 | size, and can be used to find the front of the previous chunk. | |
fa8d436c UD |
1207 | The very first chunk allocated always has this bit set, |
1208 | preventing access to non-existent (or non-owned) memory. If | |
1209 | prev_inuse is set for any given chunk, then you CANNOT determine | |
1210 | the size of the previous chunk, and might even get a memory | |
1211 | addressing fault when trying to do so. | |
f65fd747 | 1212 | |
ae9166f2 FW |
1213 | The A (NON_MAIN_ARENA) bit is cleared for chunks on the initial, |
1214 | main arena, described by the main_arena variable. When additional | |
1215 | threads are spawned, each thread receives its own arena (up to a | |
1216 | configurable limit, after which arenas are reused for multiple | |
1217 | threads), and the chunks in these arenas have the A bit set. To | |
1218 | find the arena for a chunk on such a non-main arena, heap_for_ptr | |
1219 | performs a bit mask operation and indirection through the ar_ptr | |
1220 | member of the per-heap header heap_info (see arena.c). | |
1221 | ||
f65fd747 | 1222 | Note that the `foot' of the current chunk is actually represented |
fa8d436c UD |
1223 | as the prev_size of the NEXT chunk. This makes it easier to |
1224 | deal with alignments etc but can be very confusing when trying | |
1225 | to extend or adapt this code. | |
f65fd747 | 1226 | |
ae9166f2 | 1227 | The three exceptions to all this are: |
f65fd747 | 1228 | |
fa8d436c | 1229 | 1. The special chunk `top' doesn't bother using the |
72f90263 UD |
1230 | trailing size field since there is no next contiguous chunk |
1231 | that would have to index off it. After initialization, `top' | |
1232 | is forced to always exist. If it would become less than | |
1233 | MINSIZE bytes long, it is replenished. | |
f65fd747 UD |
1234 | |
1235 | 2. Chunks allocated via mmap, which have the second-lowest-order | |
72f90263 | 1236 | bit M (IS_MMAPPED) set in their size fields. Because they are |
ae9166f2 FW |
1237 | allocated one-by-one, each must contain its own trailing size |
1238 | field. If the M bit is set, the other bits are ignored | |
1239 | (because mmapped chunks are neither in an arena, nor adjacent | |
1240 | to a freed chunk). The M bit is also used for chunks which | |
1241 | originally came from a dumped heap via malloc_set_state in | |
1242 | hooks.c. | |
1243 | ||
1244 | 3. Chunks in fastbins are treated as allocated chunks from the | |
1245 | point of view of the chunk allocator. They are consolidated | |
1246 | with their neighbors only in bulk, in malloc_consolidate. | |
f65fd747 UD |
1247 | */ |
1248 | ||
1249 | /* | |
fa8d436c UD |
1250 | ---------- Size and alignment checks and conversions ---------- |
1251 | */ | |
f65fd747 | 1252 | |
3784dfc0 RE |
1253 | /* Conversion from malloc headers to user pointers, and back. When |
1254 | using memory tagging the user data and the malloc data structure | |
1255 | headers have distinct tags. Converting fully from one to the other | |
1256 | involves extracting the tag at the other address and creating a | |
1257 | suitable pointer using it. That can be quite expensive. There are | |
4eac0ab1 SN |
1258 | cases when the pointers are not dereferenced (for example only used |
1259 | for alignment check) so the tags are not relevant, and there are | |
1260 | cases when user data is not tagged distinctly from malloc headers | |
1261 | (user data is untagged because tagging is done late in malloc and | |
1262 | early in free). User memory tagging across internal interfaces: | |
1263 | ||
1264 | sysmalloc: Returns untagged memory. | |
1265 | _int_malloc: Returns untagged memory. | |
1266 | _int_free: Takes untagged memory. | |
1267 | _int_memalign: Returns untagged memory. | |
1268 | _int_memalign: Returns untagged memory. | |
1269 | _mid_memalign: Returns tagged memory. | |
1270 | _int_realloc: Takes and returns tagged memory. | |
1271 | */ | |
3784dfc0 RE |
1272 | |
1273 | /* The chunk header is two SIZE_SZ elements, but this is used widely, so | |
1274 | we define it here for clarity later. */ | |
1275 | #define CHUNK_HDR_SZ (2 * SIZE_SZ) | |
1276 | ||
4eac0ab1 | 1277 | /* Convert a chunk address to a user mem pointer without correcting |
3784dfc0 | 1278 | the tag. */ |
ca89f1c7 | 1279 | #define chunk2mem(p) ((void*)((char*)(p) + CHUNK_HDR_SZ)) |
f65fd747 | 1280 | |
ca89f1c7 SN |
1281 | /* Convert a chunk address to a user mem pointer and extract the right tag. */ |
1282 | #define chunk2mem_tag(p) ((void*)tag_at ((char*)(p) + CHUNK_HDR_SZ)) | |
1283 | ||
1284 | /* Convert a user mem pointer to a chunk address and extract the right tag. */ | |
0c719cf4 | 1285 | #define mem2chunk(mem) ((mchunkptr)tag_at (((char*)(mem) - CHUNK_HDR_SZ))) |
f65fd747 | 1286 | |
fa8d436c | 1287 | /* The smallest possible chunk */ |
7ecfbd38 | 1288 | #define MIN_CHUNK_SIZE (offsetof(struct malloc_chunk, fd_nextsize)) |
f65fd747 | 1289 | |
fa8d436c | 1290 | /* The smallest size we can malloc is an aligned minimal chunk */ |
f65fd747 | 1291 | |
fa8d436c UD |
1292 | #define MINSIZE \ |
1293 | (unsigned long)(((MIN_CHUNK_SIZE+MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)) | |
f65fd747 | 1294 | |
fa8d436c | 1295 | /* Check if m has acceptable alignment */ |
f65fd747 | 1296 | |
073f560e UD |
1297 | #define aligned_OK(m) (((unsigned long)(m) & MALLOC_ALIGN_MASK) == 0) |
1298 | ||
1299 | #define misaligned_chunk(p) \ | |
ca89f1c7 | 1300 | ((uintptr_t)(MALLOC_ALIGNMENT == CHUNK_HDR_SZ ? (p) : chunk2mem (p)) \ |
073f560e | 1301 | & MALLOC_ALIGN_MASK) |
f65fd747 | 1302 | |
fa8d436c | 1303 | /* pad request bytes into a usable size -- internal version */ |
3784dfc0 RE |
1304 | /* Note: This must be a macro that evaluates to a compile time constant |
1305 | if passed a literal constant. */ | |
fa8d436c UD |
1306 | #define request2size(req) \ |
1307 | (((req) + SIZE_SZ + MALLOC_ALIGN_MASK < MINSIZE) ? \ | |
1308 | MINSIZE : \ | |
1309 | ((req) + SIZE_SZ + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK) | |
f65fd747 | 1310 | |
7519dee3 FW |
1311 | /* Check if REQ overflows when padded and aligned and if the resulting |
1312 | value is less than PTRDIFF_T. Returns the requested size or | |
1313 | MINSIZE in case the value is less than MINSIZE, or 0 if any of the | |
1314 | previous checks fail. */ | |
1315 | static inline size_t | |
1316 | checked_request2size (size_t req) __nonnull (1) | |
9bf8e29c AZ |
1317 | { |
1318 | if (__glibc_unlikely (req > PTRDIFF_MAX)) | |
7519dee3 | 1319 | return 0; |
3784dfc0 | 1320 | |
3784dfc0 RE |
1321 | /* When using tagged memory, we cannot share the end of the user |
1322 | block with the header for the next chunk, so ensure that we | |
1323 | allocate blocks that are rounded up to the granule size. Take | |
1324 | care not to overflow from close to MAX_SIZE_T to a small | |
1325 | number. Ideally, this would be part of request2size(), but that | |
1326 | must be a macro that produces a compile time constant if passed | |
1327 | a constant literal. */ | |
63a20eb0 | 1328 | if (__glibc_unlikely (mtag_enabled)) |
850dbf24 SN |
1329 | { |
1330 | /* Ensure this is not evaluated if !mtag_enabled, see gcc PR 99551. */ | |
1331 | asm (""); | |
1332 | ||
1333 | req = (req + (__MTAG_GRANULE_SIZE - 1)) & | |
1334 | ~(size_t)(__MTAG_GRANULE_SIZE - 1); | |
1335 | } | |
3784dfc0 | 1336 | |
7519dee3 | 1337 | return request2size (req); |
9bf8e29c | 1338 | } |
f65fd747 UD |
1339 | |
1340 | /* | |
6c8dbf00 OB |
1341 | --------------- Physical chunk operations --------------- |
1342 | */ | |
f65fd747 | 1343 | |
10dc2a90 | 1344 | |
fa8d436c UD |
1345 | /* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */ |
1346 | #define PREV_INUSE 0x1 | |
f65fd747 | 1347 | |
fa8d436c | 1348 | /* extract inuse bit of previous chunk */ |
e9c4fe93 | 1349 | #define prev_inuse(p) ((p)->mchunk_size & PREV_INUSE) |
f65fd747 | 1350 | |
f65fd747 | 1351 | |
fa8d436c UD |
1352 | /* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */ |
1353 | #define IS_MMAPPED 0x2 | |
f65fd747 | 1354 | |
fa8d436c | 1355 | /* check for mmap()'ed chunk */ |
e9c4fe93 | 1356 | #define chunk_is_mmapped(p) ((p)->mchunk_size & IS_MMAPPED) |
f65fd747 | 1357 | |
f65fd747 | 1358 | |
fa8d436c UD |
1359 | /* size field is or'ed with NON_MAIN_ARENA if the chunk was obtained |
1360 | from a non-main arena. This is only set immediately before handing | |
1361 | the chunk to the user, if necessary. */ | |
1362 | #define NON_MAIN_ARENA 0x4 | |
f65fd747 | 1363 | |
ae9166f2 | 1364 | /* Check for chunk from main arena. */ |
e9c4fe93 FW |
1365 | #define chunk_main_arena(p) (((p)->mchunk_size & NON_MAIN_ARENA) == 0) |
1366 | ||
1367 | /* Mark a chunk as not being on the main arena. */ | |
1368 | #define set_non_main_arena(p) ((p)->mchunk_size |= NON_MAIN_ARENA) | |
f65fd747 UD |
1369 | |
1370 | ||
a9177ff5 | 1371 | /* |
6c8dbf00 | 1372 | Bits to mask off when extracting size |
f65fd747 | 1373 | |
6c8dbf00 OB |
1374 | Note: IS_MMAPPED is intentionally not masked off from size field in |
1375 | macros for which mmapped chunks should never be seen. This should | |
1376 | cause helpful core dumps to occur if it is tried by accident by | |
1377 | people extending or adapting this malloc. | |
1378 | */ | |
1379 | #define SIZE_BITS (PREV_INUSE | IS_MMAPPED | NON_MAIN_ARENA) | |
f65fd747 | 1380 | |
fa8d436c | 1381 | /* Get size, ignoring use bits */ |
e9c4fe93 | 1382 | #define chunksize(p) (chunksize_nomask (p) & ~(SIZE_BITS)) |
f65fd747 | 1383 | |
e9c4fe93 FW |
1384 | /* Like chunksize, but do not mask SIZE_BITS. */ |
1385 | #define chunksize_nomask(p) ((p)->mchunk_size) | |
f65fd747 | 1386 | |
fa8d436c | 1387 | /* Ptr to next physical malloc_chunk. */ |
e9c4fe93 FW |
1388 | #define next_chunk(p) ((mchunkptr) (((char *) (p)) + chunksize (p))) |
1389 | ||
229855e5 | 1390 | /* Size of the chunk below P. Only valid if !prev_inuse (P). */ |
e9c4fe93 FW |
1391 | #define prev_size(p) ((p)->mchunk_prev_size) |
1392 | ||
229855e5 | 1393 | /* Set the size of the chunk below P. Only valid if !prev_inuse (P). */ |
e9c4fe93 | 1394 | #define set_prev_size(p, sz) ((p)->mchunk_prev_size = (sz)) |
f65fd747 | 1395 | |
229855e5 | 1396 | /* Ptr to previous physical malloc_chunk. Only valid if !prev_inuse (P). */ |
e9c4fe93 | 1397 | #define prev_chunk(p) ((mchunkptr) (((char *) (p)) - prev_size (p))) |
f65fd747 | 1398 | |
fa8d436c | 1399 | /* Treat space at ptr + offset as a chunk */ |
6c8dbf00 | 1400 | #define chunk_at_offset(p, s) ((mchunkptr) (((char *) (p)) + (s))) |
fa8d436c UD |
1401 | |
1402 | /* extract p's inuse bit */ | |
6c8dbf00 | 1403 | #define inuse(p) \ |
e9c4fe93 | 1404 | ((((mchunkptr) (((char *) (p)) + chunksize (p)))->mchunk_size) & PREV_INUSE) |
f65fd747 | 1405 | |
fa8d436c | 1406 | /* set/clear chunk as being inuse without otherwise disturbing */ |
6c8dbf00 | 1407 | #define set_inuse(p) \ |
e9c4fe93 | 1408 | ((mchunkptr) (((char *) (p)) + chunksize (p)))->mchunk_size |= PREV_INUSE |
f65fd747 | 1409 | |
6c8dbf00 | 1410 | #define clear_inuse(p) \ |
e9c4fe93 | 1411 | ((mchunkptr) (((char *) (p)) + chunksize (p)))->mchunk_size &= ~(PREV_INUSE) |
f65fd747 UD |
1412 | |
1413 | ||
fa8d436c | 1414 | /* check/set/clear inuse bits in known places */ |
6c8dbf00 | 1415 | #define inuse_bit_at_offset(p, s) \ |
e9c4fe93 | 1416 | (((mchunkptr) (((char *) (p)) + (s)))->mchunk_size & PREV_INUSE) |
f65fd747 | 1417 | |
6c8dbf00 | 1418 | #define set_inuse_bit_at_offset(p, s) \ |
e9c4fe93 | 1419 | (((mchunkptr) (((char *) (p)) + (s)))->mchunk_size |= PREV_INUSE) |
f65fd747 | 1420 | |
6c8dbf00 | 1421 | #define clear_inuse_bit_at_offset(p, s) \ |
e9c4fe93 | 1422 | (((mchunkptr) (((char *) (p)) + (s)))->mchunk_size &= ~(PREV_INUSE)) |
f65fd747 | 1423 | |
f65fd747 | 1424 | |
fa8d436c | 1425 | /* Set size at head, without disturbing its use bit */ |
e9c4fe93 | 1426 | #define set_head_size(p, s) ((p)->mchunk_size = (((p)->mchunk_size & SIZE_BITS) | (s))) |
f65fd747 | 1427 | |
fa8d436c | 1428 | /* Set size/use field */ |
e9c4fe93 | 1429 | #define set_head(p, s) ((p)->mchunk_size = (s)) |
f65fd747 | 1430 | |
fa8d436c | 1431 | /* Set size at footer (only when chunk is not in use) */ |
e9c4fe93 | 1432 | #define set_foot(p, s) (((mchunkptr) ((char *) (p) + (s)))->mchunk_prev_size = (s)) |
f65fd747 | 1433 | |
e9c4fe93 FW |
1434 | #pragma GCC poison mchunk_size |
1435 | #pragma GCC poison mchunk_prev_size | |
1436 | ||
faf003ed SN |
1437 | /* This is the size of the real usable data in the chunk. Not valid for |
1438 | dumped heap chunks. */ | |
1439 | #define memsize(p) \ | |
1440 | (__MTAG_GRANULE_SIZE > SIZE_SZ && __glibc_unlikely (mtag_enabled) ? \ | |
1441 | chunksize (p) - CHUNK_HDR_SZ : \ | |
1442 | chunksize (p) - CHUNK_HDR_SZ + (chunk_is_mmapped (p) ? 0 : SIZE_SZ)) | |
1443 | ||
5295172e | 1444 | /* If memory tagging is enabled the layout changes to accommodate the granule |
faf003ed SN |
1445 | size, this is wasteful for small allocations so not done by default. |
1446 | Both the chunk header and user data has to be granule aligned. */ | |
1447 | _Static_assert (__MTAG_GRANULE_SIZE <= CHUNK_HDR_SZ, | |
1448 | "memory tagging is not supported with large granule."); | |
1449 | ||
42bac88a SN |
1450 | static __always_inline void * |
1451 | tag_new_usable (void *ptr) | |
1452 | { | |
1453 | if (__glibc_unlikely (mtag_enabled) && ptr) | |
1454 | { | |
1455 | mchunkptr cp = mem2chunk(ptr); | |
faf003ed | 1456 | ptr = __libc_mtag_tag_region (__libc_mtag_new_tag (ptr), memsize (cp)); |
42bac88a SN |
1457 | } |
1458 | return ptr; | |
1459 | } | |
1460 | ||
fa8d436c | 1461 | /* |
6c8dbf00 | 1462 | -------------------- Internal data structures -------------------- |
fa8d436c UD |
1463 | |
1464 | All internal state is held in an instance of malloc_state defined | |
1465 | below. There are no other static variables, except in two optional | |
a9177ff5 | 1466 | cases: |
6c8dbf00 OB |
1467 | * If USE_MALLOC_LOCK is defined, the mALLOC_MUTEx declared above. |
1468 | * If mmap doesn't support MAP_ANONYMOUS, a dummy file descriptor | |
22a89187 | 1469 | for mmap. |
fa8d436c UD |
1470 | |
1471 | Beware of lots of tricks that minimize the total bookkeeping space | |
1472 | requirements. The result is a little over 1K bytes (for 4byte | |
1473 | pointers and size_t.) | |
6c8dbf00 | 1474 | */ |
f65fd747 UD |
1475 | |
1476 | /* | |
6c8dbf00 | 1477 | Bins |
fa8d436c UD |
1478 | |
1479 | An array of bin headers for free chunks. Each bin is doubly | |
1480 | linked. The bins are approximately proportionally (log) spaced. | |
1481 | There are a lot of these bins (128). This may look excessive, but | |
1482 | works very well in practice. Most bins hold sizes that are | |
1483 | unusual as malloc request sizes, but are more usual for fragments | |
1484 | and consolidated sets of chunks, which is what these bins hold, so | |
1485 | they can be found quickly. All procedures maintain the invariant | |
1486 | that no consolidated chunk physically borders another one, so each | |
1487 | chunk in a list is known to be preceeded and followed by either | |
1488 | inuse chunks or the ends of memory. | |
1489 | ||
1490 | Chunks in bins are kept in size order, with ties going to the | |
1491 | approximately least recently used chunk. Ordering isn't needed | |
1492 | for the small bins, which all contain the same-sized chunks, but | |
1493 | facilitates best-fit allocation for larger chunks. These lists | |
1494 | are just sequential. Keeping them in order almost never requires | |
1495 | enough traversal to warrant using fancier ordered data | |
a9177ff5 | 1496 | structures. |
fa8d436c UD |
1497 | |
1498 | Chunks of the same size are linked with the most | |
1499 | recently freed at the front, and allocations are taken from the | |
1500 | back. This results in LRU (FIFO) allocation order, which tends | |
1501 | to give each chunk an equal opportunity to be consolidated with | |
1502 | adjacent freed chunks, resulting in larger free chunks and less | |
1503 | fragmentation. | |
1504 | ||
1505 | To simplify use in double-linked lists, each bin header acts | |
1506 | as a malloc_chunk. This avoids special-casing for headers. | |
1507 | But to conserve space and improve locality, we allocate | |
1508 | only the fd/bk pointers of bins, and then use repositioning tricks | |
a9177ff5 | 1509 | to treat these as the fields of a malloc_chunk*. |
6c8dbf00 | 1510 | */ |
f65fd747 | 1511 | |
6c8dbf00 | 1512 | typedef struct malloc_chunk *mbinptr; |
f65fd747 | 1513 | |
fa8d436c | 1514 | /* addressing -- note that bin_at(0) does not exist */ |
41999a1a UD |
1515 | #define bin_at(m, i) \ |
1516 | (mbinptr) (((char *) &((m)->bins[((i) - 1) * 2])) \ | |
6c8dbf00 | 1517 | - offsetof (struct malloc_chunk, fd)) |
f65fd747 | 1518 | |
fa8d436c | 1519 | /* analog of ++bin */ |
6c8dbf00 | 1520 | #define next_bin(b) ((mbinptr) ((char *) (b) + (sizeof (mchunkptr) << 1))) |
f65fd747 | 1521 | |
fa8d436c UD |
1522 | /* Reminders about list directionality within bins */ |
1523 | #define first(b) ((b)->fd) | |
1524 | #define last(b) ((b)->bk) | |
f65fd747 | 1525 | |
fa8d436c | 1526 | /* |
6c8dbf00 | 1527 | Indexing |
fa8d436c UD |
1528 | |
1529 | Bins for sizes < 512 bytes contain chunks of all the same size, spaced | |
1530 | 8 bytes apart. Larger bins are approximately logarithmically spaced: | |
f65fd747 | 1531 | |
fa8d436c UD |
1532 | 64 bins of size 8 |
1533 | 32 bins of size 64 | |
1534 | 16 bins of size 512 | |
1535 | 8 bins of size 4096 | |
1536 | 4 bins of size 32768 | |
1537 | 2 bins of size 262144 | |
1538 | 1 bin of size what's left | |
f65fd747 | 1539 | |
fa8d436c UD |
1540 | There is actually a little bit of slop in the numbers in bin_index |
1541 | for the sake of speed. This makes no difference elsewhere. | |
f65fd747 | 1542 | |
fa8d436c UD |
1543 | The bins top out around 1MB because we expect to service large |
1544 | requests via mmap. | |
b5a2bbe6 L |
1545 | |
1546 | Bin 0 does not exist. Bin 1 is the unordered list; if that would be | |
1547 | a valid chunk size the small bins are bumped up one. | |
6c8dbf00 | 1548 | */ |
f65fd747 | 1549 | |
fa8d436c UD |
1550 | #define NBINS 128 |
1551 | #define NSMALLBINS 64 | |
1d47e92f | 1552 | #define SMALLBIN_WIDTH MALLOC_ALIGNMENT |
3784dfc0 | 1553 | #define SMALLBIN_CORRECTION (MALLOC_ALIGNMENT > CHUNK_HDR_SZ) |
b5a2bbe6 | 1554 | #define MIN_LARGE_SIZE ((NSMALLBINS - SMALLBIN_CORRECTION) * SMALLBIN_WIDTH) |
f65fd747 | 1555 | |
fa8d436c | 1556 | #define in_smallbin_range(sz) \ |
6c8dbf00 | 1557 | ((unsigned long) (sz) < (unsigned long) MIN_LARGE_SIZE) |
f65fd747 | 1558 | |
1d47e92f | 1559 | #define smallbin_index(sz) \ |
6c8dbf00 | 1560 | ((SMALLBIN_WIDTH == 16 ? (((unsigned) (sz)) >> 4) : (((unsigned) (sz)) >> 3))\ |
b5a2bbe6 | 1561 | + SMALLBIN_CORRECTION) |
f65fd747 | 1562 | |
1d47e92f | 1563 | #define largebin_index_32(sz) \ |
6c8dbf00 OB |
1564 | (((((unsigned long) (sz)) >> 6) <= 38) ? 56 + (((unsigned long) (sz)) >> 6) :\ |
1565 | ((((unsigned long) (sz)) >> 9) <= 20) ? 91 + (((unsigned long) (sz)) >> 9) :\ | |
1566 | ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\ | |
1567 | ((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\ | |
1568 | ((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\ | |
1569 | 126) | |
f65fd747 | 1570 | |
b5a2bbe6 | 1571 | #define largebin_index_32_big(sz) \ |
6c8dbf00 OB |
1572 | (((((unsigned long) (sz)) >> 6) <= 45) ? 49 + (((unsigned long) (sz)) >> 6) :\ |
1573 | ((((unsigned long) (sz)) >> 9) <= 20) ? 91 + (((unsigned long) (sz)) >> 9) :\ | |
1574 | ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\ | |
1575 | ((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\ | |
1576 | ((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\ | |
1577 | 126) | |
b5a2bbe6 | 1578 | |
1d47e92f UD |
1579 | // XXX It remains to be seen whether it is good to keep the widths of |
1580 | // XXX the buckets the same or whether it should be scaled by a factor | |
1581 | // XXX of two as well. | |
1582 | #define largebin_index_64(sz) \ | |
6c8dbf00 OB |
1583 | (((((unsigned long) (sz)) >> 6) <= 48) ? 48 + (((unsigned long) (sz)) >> 6) :\ |
1584 | ((((unsigned long) (sz)) >> 9) <= 20) ? 91 + (((unsigned long) (sz)) >> 9) :\ | |
1585 | ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\ | |
1586 | ((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\ | |
1587 | ((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\ | |
1588 | 126) | |
1d47e92f UD |
1589 | |
1590 | #define largebin_index(sz) \ | |
b5a2bbe6 L |
1591 | (SIZE_SZ == 8 ? largebin_index_64 (sz) \ |
1592 | : MALLOC_ALIGNMENT == 16 ? largebin_index_32_big (sz) \ | |
1593 | : largebin_index_32 (sz)) | |
1d47e92f | 1594 | |
fa8d436c | 1595 | #define bin_index(sz) \ |
6c8dbf00 | 1596 | ((in_smallbin_range (sz)) ? smallbin_index (sz) : largebin_index (sz)) |
f65fd747 | 1597 | |
1ecba1fa FW |
1598 | /* Take a chunk off a bin list. */ |
1599 | static void | |
1600 | unlink_chunk (mstate av, mchunkptr p) | |
1601 | { | |
1602 | if (chunksize (p) != prev_size (next_chunk (p))) | |
1603 | malloc_printerr ("corrupted size vs. prev_size"); | |
1604 | ||
1605 | mchunkptr fd = p->fd; | |
1606 | mchunkptr bk = p->bk; | |
1607 | ||
1608 | if (__builtin_expect (fd->bk != p || bk->fd != p, 0)) | |
1609 | malloc_printerr ("corrupted double-linked list"); | |
1610 | ||
1611 | fd->bk = bk; | |
1612 | bk->fd = fd; | |
1613 | if (!in_smallbin_range (chunksize_nomask (p)) && p->fd_nextsize != NULL) | |
1614 | { | |
1615 | if (p->fd_nextsize->bk_nextsize != p | |
1616 | || p->bk_nextsize->fd_nextsize != p) | |
1617 | malloc_printerr ("corrupted double-linked list (not small)"); | |
1618 | ||
1619 | if (fd->fd_nextsize == NULL) | |
1620 | { | |
1621 | if (p->fd_nextsize == p) | |
1622 | fd->fd_nextsize = fd->bk_nextsize = fd; | |
1623 | else | |
1624 | { | |
1625 | fd->fd_nextsize = p->fd_nextsize; | |
1626 | fd->bk_nextsize = p->bk_nextsize; | |
1627 | p->fd_nextsize->bk_nextsize = fd; | |
1628 | p->bk_nextsize->fd_nextsize = fd; | |
1629 | } | |
1630 | } | |
1631 | else | |
1632 | { | |
1633 | p->fd_nextsize->bk_nextsize = p->bk_nextsize; | |
1634 | p->bk_nextsize->fd_nextsize = p->fd_nextsize; | |
1635 | } | |
1636 | } | |
1637 | } | |
f65fd747 UD |
1638 | |
1639 | /* | |
6c8dbf00 | 1640 | Unsorted chunks |
fa8d436c UD |
1641 | |
1642 | All remainders from chunk splits, as well as all returned chunks, | |
1643 | are first placed in the "unsorted" bin. They are then placed | |
1644 | in regular bins after malloc gives them ONE chance to be used before | |
1645 | binning. So, basically, the unsorted_chunks list acts as a queue, | |
1646 | with chunks being placed on it in free (and malloc_consolidate), | |
1647 | and taken off (to be either used or placed in bins) in malloc. | |
1648 | ||
1649 | The NON_MAIN_ARENA flag is never set for unsorted chunks, so it | |
1650 | does not have to be taken into account in size comparisons. | |
6c8dbf00 | 1651 | */ |
f65fd747 | 1652 | |
fa8d436c | 1653 | /* The otherwise unindexable 1-bin is used to hold unsorted chunks. */ |
6c8dbf00 | 1654 | #define unsorted_chunks(M) (bin_at (M, 1)) |
f65fd747 | 1655 | |
fa8d436c | 1656 | /* |
6c8dbf00 | 1657 | Top |
fa8d436c UD |
1658 | |
1659 | The top-most available chunk (i.e., the one bordering the end of | |
1660 | available memory) is treated specially. It is never included in | |
1661 | any bin, is used only if no other chunk is available, and is | |
1662 | released back to the system if it is very large (see | |
1663 | M_TRIM_THRESHOLD). Because top initially | |
1664 | points to its own bin with initial zero size, thus forcing | |
1665 | extension on the first malloc request, we avoid having any special | |
1666 | code in malloc to check whether it even exists yet. But we still | |
1667 | need to do so when getting memory from system, so we make | |
1668 | initial_top treat the bin as a legal but unusable chunk during the | |
1669 | interval between initialization and the first call to | |
3b49edc0 | 1670 | sysmalloc. (This is somewhat delicate, since it relies on |
fa8d436c | 1671 | the 2 preceding words to be zero during this interval as well.) |
6c8dbf00 | 1672 | */ |
f65fd747 | 1673 | |
fa8d436c | 1674 | /* Conveniently, the unsorted bin can be used as dummy top on first call */ |
6c8dbf00 | 1675 | #define initial_top(M) (unsorted_chunks (M)) |
f65fd747 | 1676 | |
fa8d436c | 1677 | /* |
6c8dbf00 | 1678 | Binmap |
f65fd747 | 1679 | |
fa8d436c UD |
1680 | To help compensate for the large number of bins, a one-level index |
1681 | structure is used for bin-by-bin searching. `binmap' is a | |
1682 | bitvector recording whether bins are definitely empty so they can | |
1683 | be skipped over during during traversals. The bits are NOT always | |
1684 | cleared as soon as bins are empty, but instead only | |
1685 | when they are noticed to be empty during traversal in malloc. | |
6c8dbf00 | 1686 | */ |
f65fd747 | 1687 | |
fa8d436c UD |
1688 | /* Conservatively use 32 bits per map word, even if on 64bit system */ |
1689 | #define BINMAPSHIFT 5 | |
1690 | #define BITSPERMAP (1U << BINMAPSHIFT) | |
1691 | #define BINMAPSIZE (NBINS / BITSPERMAP) | |
f65fd747 | 1692 | |
fa8d436c | 1693 | #define idx2block(i) ((i) >> BINMAPSHIFT) |
6c8dbf00 | 1694 | #define idx2bit(i) ((1U << ((i) & ((1U << BINMAPSHIFT) - 1)))) |
f65fd747 | 1695 | |
6c8dbf00 OB |
1696 | #define mark_bin(m, i) ((m)->binmap[idx2block (i)] |= idx2bit (i)) |
1697 | #define unmark_bin(m, i) ((m)->binmap[idx2block (i)] &= ~(idx2bit (i))) | |
1698 | #define get_binmap(m, i) ((m)->binmap[idx2block (i)] & idx2bit (i)) | |
f65fd747 | 1699 | |
fa8d436c | 1700 | /* |
6c8dbf00 | 1701 | Fastbins |
fa8d436c UD |
1702 | |
1703 | An array of lists holding recently freed small chunks. Fastbins | |
1704 | are not doubly linked. It is faster to single-link them, and | |
1705 | since chunks are never removed from the middles of these lists, | |
1706 | double linking is not necessary. Also, unlike regular bins, they | |
1707 | are not even processed in FIFO order (they use faster LIFO) since | |
1708 | ordering doesn't much matter in the transient contexts in which | |
1709 | fastbins are normally used. | |
1710 | ||
1711 | Chunks in fastbins keep their inuse bit set, so they cannot | |
1712 | be consolidated with other free chunks. malloc_consolidate | |
1713 | releases all chunks in fastbins and consolidates them with | |
a9177ff5 | 1714 | other free chunks. |
6c8dbf00 | 1715 | */ |
f65fd747 | 1716 | |
6c8dbf00 | 1717 | typedef struct malloc_chunk *mfastbinptr; |
425ce2ed | 1718 | #define fastbin(ar_ptr, idx) ((ar_ptr)->fastbinsY[idx]) |
f65fd747 | 1719 | |
fa8d436c | 1720 | /* offset 2 to use otherwise unindexable first 2 bins */ |
425ce2ed | 1721 | #define fastbin_index(sz) \ |
6c8dbf00 | 1722 | ((((unsigned int) (sz)) >> (SIZE_SZ == 8 ? 4 : 3)) - 2) |
425ce2ed | 1723 | |
f65fd747 | 1724 | |
fa8d436c | 1725 | /* The maximum fastbin request size we support */ |
425ce2ed | 1726 | #define MAX_FAST_SIZE (80 * SIZE_SZ / 4) |
f65fd747 | 1727 | |
6c8dbf00 | 1728 | #define NFASTBINS (fastbin_index (request2size (MAX_FAST_SIZE)) + 1) |
f65fd747 UD |
1729 | |
1730 | /* | |
6c8dbf00 OB |
1731 | FASTBIN_CONSOLIDATION_THRESHOLD is the size of a chunk in free() |
1732 | that triggers automatic consolidation of possibly-surrounding | |
1733 | fastbin chunks. This is a heuristic, so the exact value should not | |
1734 | matter too much. It is defined at half the default trim threshold as a | |
1735 | compromise heuristic to only attempt consolidation if it is likely | |
1736 | to lead to trimming. However, it is not dynamically tunable, since | |
1737 | consolidation reduces fragmentation surrounding large chunks even | |
1738 | if trimming is not used. | |
1739 | */ | |
f65fd747 | 1740 | |
fa8d436c | 1741 | #define FASTBIN_CONSOLIDATION_THRESHOLD (65536UL) |
f65fd747 | 1742 | |
f65fd747 | 1743 | /* |
6c8dbf00 OB |
1744 | NONCONTIGUOUS_BIT indicates that MORECORE does not return contiguous |
1745 | regions. Otherwise, contiguity is exploited in merging together, | |
1746 | when possible, results from consecutive MORECORE calls. | |
f65fd747 | 1747 | |
6c8dbf00 OB |
1748 | The initial value comes from MORECORE_CONTIGUOUS, but is |
1749 | changed dynamically if mmap is ever used as an sbrk substitute. | |
1750 | */ | |
f65fd747 | 1751 | |
fa8d436c | 1752 | #define NONCONTIGUOUS_BIT (2U) |
f65fd747 | 1753 | |
6c8dbf00 OB |
1754 | #define contiguous(M) (((M)->flags & NONCONTIGUOUS_BIT) == 0) |
1755 | #define noncontiguous(M) (((M)->flags & NONCONTIGUOUS_BIT) != 0) | |
1756 | #define set_noncontiguous(M) ((M)->flags |= NONCONTIGUOUS_BIT) | |
9bf248c6 | 1757 | #define set_contiguous(M) ((M)->flags &= ~NONCONTIGUOUS_BIT) |
f65fd747 | 1758 | |
eac43cbb FW |
1759 | /* Maximum size of memory handled in fastbins. */ |
1760 | static INTERNAL_SIZE_T global_max_fast; | |
1761 | ||
a9177ff5 RM |
1762 | /* |
1763 | Set value of max_fast. | |
fa8d436c | 1764 | Use impossibly small value if 0. |
3381be5c WD |
1765 | Precondition: there are no existing fastbin chunks in the main arena. |
1766 | Since do_check_malloc_state () checks this, we call malloc_consolidate () | |
1767 | before changing max_fast. Note other arenas will leak their fast bin | |
1768 | entries if max_fast is reduced. | |
6c8dbf00 | 1769 | */ |
f65fd747 | 1770 | |
9bf248c6 | 1771 | #define set_max_fast(s) \ |
b9cde4e3 | 1772 | global_max_fast = (((size_t) (s) <= MALLOC_ALIGN_MASK - SIZE_SZ) \ |
ff12e0fb | 1773 | ? MIN_CHUNK_SIZE / 2 : ((s + SIZE_SZ) & ~MALLOC_ALIGN_MASK)) |
f65fd747 | 1774 | |
eac43cbb FW |
1775 | static inline INTERNAL_SIZE_T |
1776 | get_max_fast (void) | |
1777 | { | |
1778 | /* Tell the GCC optimizers that global_max_fast is never larger | |
1779 | than MAX_FAST_SIZE. This avoids out-of-bounds array accesses in | |
1780 | _int_malloc after constant propagation of the size parameter. | |
1781 | (The code never executes because malloc preserves the | |
1782 | global_max_fast invariant, but the optimizers may not recognize | |
1783 | this.) */ | |
1784 | if (global_max_fast > MAX_FAST_SIZE) | |
1785 | __builtin_unreachable (); | |
1786 | return global_max_fast; | |
1787 | } | |
f65fd747 UD |
1788 | |
1789 | /* | |
fa8d436c | 1790 | ----------- Internal state representation and initialization ----------- |
6c8dbf00 | 1791 | */ |
f65fd747 | 1792 | |
e956075a WD |
1793 | /* |
1794 | have_fastchunks indicates that there are probably some fastbin chunks. | |
1795 | It is set true on entering a chunk into any fastbin, and cleared early in | |
1796 | malloc_consolidate. The value is approximate since it may be set when there | |
1797 | are no fastbin chunks, or it may be clear even if there are fastbin chunks | |
1798 | available. Given it's sole purpose is to reduce number of redundant calls to | |
1799 | malloc_consolidate, it does not affect correctness. As a result we can safely | |
1800 | use relaxed atomic accesses. | |
1801 | */ | |
1802 | ||
1803 | ||
6c8dbf00 OB |
1804 | struct malloc_state |
1805 | { | |
fa8d436c | 1806 | /* Serialize access. */ |
cbb47fa1 | 1807 | __libc_lock_define (, mutex); |
9bf248c6 UD |
1808 | |
1809 | /* Flags (formerly in max_fast). */ | |
1810 | int flags; | |
f65fd747 | 1811 | |
e956075a | 1812 | /* Set if the fastbin chunks contain recently inserted free blocks. */ |
2c2245b9 WD |
1813 | /* Note this is a bool but not all targets support atomics on booleans. */ |
1814 | int have_fastchunks; | |
e956075a | 1815 | |
fa8d436c | 1816 | /* Fastbins */ |
6c8dbf00 | 1817 | mfastbinptr fastbinsY[NFASTBINS]; |
f65fd747 | 1818 | |
fa8d436c | 1819 | /* Base of the topmost chunk -- not otherwise kept in a bin */ |
6c8dbf00 | 1820 | mchunkptr top; |
f65fd747 | 1821 | |
fa8d436c | 1822 | /* The remainder from the most recent split of a small request */ |
6c8dbf00 | 1823 | mchunkptr last_remainder; |
f65fd747 | 1824 | |
fa8d436c | 1825 | /* Normal bins packed as described above */ |
6c8dbf00 | 1826 | mchunkptr bins[NBINS * 2 - 2]; |
f65fd747 | 1827 | |
fa8d436c | 1828 | /* Bitmap of bins */ |
6c8dbf00 | 1829 | unsigned int binmap[BINMAPSIZE]; |
f65fd747 | 1830 | |
fa8d436c UD |
1831 | /* Linked list */ |
1832 | struct malloc_state *next; | |
f65fd747 | 1833 | |
a62719ba | 1834 | /* Linked list for free arenas. Access to this field is serialized |
90c400bd | 1835 | by free_list_lock in arena.c. */ |
425ce2ed | 1836 | struct malloc_state *next_free; |
425ce2ed | 1837 | |
a62719ba | 1838 | /* Number of threads attached to this arena. 0 if the arena is on |
90c400bd FW |
1839 | the free list. Access to this field is serialized by |
1840 | free_list_lock in arena.c. */ | |
a62719ba FW |
1841 | INTERNAL_SIZE_T attached_threads; |
1842 | ||
fa8d436c UD |
1843 | /* Memory allocated from the system in this arena. */ |
1844 | INTERNAL_SIZE_T system_mem; | |
1845 | INTERNAL_SIZE_T max_system_mem; | |
1846 | }; | |
f65fd747 | 1847 | |
6c8dbf00 OB |
1848 | struct malloc_par |
1849 | { | |
fa8d436c | 1850 | /* Tunable parameters */ |
6c8dbf00 OB |
1851 | unsigned long trim_threshold; |
1852 | INTERNAL_SIZE_T top_pad; | |
1853 | INTERNAL_SIZE_T mmap_threshold; | |
1854 | INTERNAL_SIZE_T arena_test; | |
1855 | INTERNAL_SIZE_T arena_max; | |
fa8d436c | 1856 | |
5f6d8d97 AZ |
1857 | #if HAVE_TUNABLES |
1858 | /* Transparent Large Page support. */ | |
1859 | INTERNAL_SIZE_T thp_pagesize; | |
98d5fcb8 AZ |
1860 | /* A value different than 0 means to align mmap allocation to hp_pagesize |
1861 | add hp_flags on flags. */ | |
1862 | INTERNAL_SIZE_T hp_pagesize; | |
1863 | int hp_flags; | |
5f6d8d97 AZ |
1864 | #endif |
1865 | ||
fa8d436c | 1866 | /* Memory map support */ |
6c8dbf00 OB |
1867 | int n_mmaps; |
1868 | int n_mmaps_max; | |
1869 | int max_n_mmaps; | |
1d05c2fb UD |
1870 | /* the mmap_threshold is dynamic, until the user sets |
1871 | it manually, at which point we need to disable any | |
1872 | dynamic behavior. */ | |
6c8dbf00 | 1873 | int no_dyn_threshold; |
fa8d436c | 1874 | |
fa8d436c | 1875 | /* Statistics */ |
6c8dbf00 | 1876 | INTERNAL_SIZE_T mmapped_mem; |
6c8dbf00 | 1877 | INTERNAL_SIZE_T max_mmapped_mem; |
fa8d436c UD |
1878 | |
1879 | /* First address handed out by MORECORE/sbrk. */ | |
6c8dbf00 | 1880 | char *sbrk_base; |
d5c3fafc DD |
1881 | |
1882 | #if USE_TCACHE | |
1883 | /* Maximum number of buckets to use. */ | |
1884 | size_t tcache_bins; | |
1885 | size_t tcache_max_bytes; | |
1886 | /* Maximum number of chunks in each bucket. */ | |
1887 | size_t tcache_count; | |
1888 | /* Maximum number of chunks to remove from the unsorted list, which | |
1889 | aren't used to prefill the cache. */ | |
1890 | size_t tcache_unsorted_limit; | |
1891 | #endif | |
fa8d436c | 1892 | }; |
f65fd747 | 1893 | |
fa8d436c UD |
1894 | /* There are several instances of this struct ("arenas") in this |
1895 | malloc. If you are adapting this malloc in a way that does NOT use | |
1896 | a static or mmapped malloc_state, you MUST explicitly zero-fill it | |
1897 | before using. This malloc relies on the property that malloc_state | |
1898 | is initialized to all zeroes (as is true of C statics). */ | |
f65fd747 | 1899 | |
02d46fc4 | 1900 | static struct malloc_state main_arena = |
6c8dbf00 | 1901 | { |
400e1226 | 1902 | .mutex = _LIBC_LOCK_INITIALIZER, |
a62719ba FW |
1903 | .next = &main_arena, |
1904 | .attached_threads = 1 | |
6c8dbf00 | 1905 | }; |
f65fd747 | 1906 | |
fa8d436c | 1907 | /* There is only one instance of the malloc parameters. */ |
f65fd747 | 1908 | |
02d46fc4 | 1909 | static struct malloc_par mp_ = |
6c8dbf00 OB |
1910 | { |
1911 | .top_pad = DEFAULT_TOP_PAD, | |
1912 | .n_mmaps_max = DEFAULT_MMAP_MAX, | |
1913 | .mmap_threshold = DEFAULT_MMAP_THRESHOLD, | |
1914 | .trim_threshold = DEFAULT_TRIM_THRESHOLD, | |
1915 | #define NARENAS_FROM_NCORES(n) ((n) * (sizeof (long) == 4 ? 2 : 8)) | |
1916 | .arena_test = NARENAS_FROM_NCORES (1) | |
d5c3fafc DD |
1917 | #if USE_TCACHE |
1918 | , | |
1919 | .tcache_count = TCACHE_FILL_COUNT, | |
1920 | .tcache_bins = TCACHE_MAX_BINS, | |
1921 | .tcache_max_bytes = tidx2usize (TCACHE_MAX_BINS-1), | |
1922 | .tcache_unsorted_limit = 0 /* No limit. */ | |
1923 | #endif | |
6c8dbf00 | 1924 | }; |
f65fd747 | 1925 | |
fa8d436c | 1926 | /* |
6c8dbf00 | 1927 | Initialize a malloc_state struct. |
f65fd747 | 1928 | |
3381be5c WD |
1929 | This is called from ptmalloc_init () or from _int_new_arena () |
1930 | when creating a new arena. | |
6c8dbf00 | 1931 | */ |
f65fd747 | 1932 | |
6c8dbf00 OB |
1933 | static void |
1934 | malloc_init_state (mstate av) | |
fa8d436c | 1935 | { |
6c8dbf00 | 1936 | int i; |
fa8d436c | 1937 | mbinptr bin; |
a9177ff5 | 1938 | |
fa8d436c | 1939 | /* Establish circular links for normal bins */ |
6c8dbf00 OB |
1940 | for (i = 1; i < NBINS; ++i) |
1941 | { | |
1942 | bin = bin_at (av, i); | |
1943 | bin->fd = bin->bk = bin; | |
1944 | } | |
f65fd747 | 1945 | |
fa8d436c UD |
1946 | #if MORECORE_CONTIGUOUS |
1947 | if (av != &main_arena) | |
1948 | #endif | |
6c8dbf00 | 1949 | set_noncontiguous (av); |
9bf248c6 | 1950 | if (av == &main_arena) |
6c8dbf00 | 1951 | set_max_fast (DEFAULT_MXFAST); |
e956075a | 1952 | atomic_store_relaxed (&av->have_fastchunks, false); |
f65fd747 | 1953 | |
6c8dbf00 | 1954 | av->top = initial_top (av); |
fa8d436c | 1955 | } |
e9b3e3c5 | 1956 | |
a9177ff5 | 1957 | /* |
fa8d436c | 1958 | Other internal utilities operating on mstates |
6c8dbf00 | 1959 | */ |
f65fd747 | 1960 | |
6c8dbf00 OB |
1961 | static void *sysmalloc (INTERNAL_SIZE_T, mstate); |
1962 | static int systrim (size_t, mstate); | |
1963 | static void malloc_consolidate (mstate); | |
7e3be507 | 1964 | |
404d4cef RM |
1965 | |
1966 | /* -------------- Early definitions for debugging hooks ---------------- */ | |
1967 | ||
0a947e06 FW |
1968 | /* This function is called from the arena shutdown hook, to free the |
1969 | thread cache (if it exists). */ | |
1970 | static void tcache_thread_shutdown (void); | |
404d4cef | 1971 | |
854278df UD |
1972 | /* ------------------ Testing support ----------------------------------*/ |
1973 | ||
1974 | static int perturb_byte; | |
1975 | ||
af102d95 | 1976 | static void |
e8349efd OB |
1977 | alloc_perturb (char *p, size_t n) |
1978 | { | |
1979 | if (__glibc_unlikely (perturb_byte)) | |
1980 | memset (p, perturb_byte ^ 0xff, n); | |
1981 | } | |
1982 | ||
af102d95 | 1983 | static void |
e8349efd OB |
1984 | free_perturb (char *p, size_t n) |
1985 | { | |
1986 | if (__glibc_unlikely (perturb_byte)) | |
1987 | memset (p, perturb_byte, n); | |
1988 | } | |
1989 | ||
854278df UD |
1990 | |
1991 | ||
3ea5be54 AO |
1992 | #include <stap-probe.h> |
1993 | ||
5f6d8d97 AZ |
1994 | /* ----------- Routines dealing with transparent huge pages ----------- */ |
1995 | ||
1996 | static inline void | |
1997 | madvise_thp (void *p, INTERNAL_SIZE_T size) | |
1998 | { | |
1999 | #if HAVE_TUNABLES && defined (MADV_HUGEPAGE) | |
2000 | /* Do not consider areas smaller than a huge page or if the tunable is | |
2001 | not active. */ | |
2002 | if (mp_.thp_pagesize == 0 || size < mp_.thp_pagesize) | |
2003 | return; | |
7478c995 AZ |
2004 | |
2005 | /* Linux requires the input address to be page-aligned, and unaligned | |
2006 | inputs happens only for initial data segment. */ | |
2007 | if (__glibc_unlikely (!PTR_IS_ALIGNED (p, GLRO (dl_pagesize)))) | |
2008 | { | |
2009 | void *q = PTR_ALIGN_DOWN (p, GLRO (dl_pagesize)); | |
2010 | size += PTR_DIFF (p, q); | |
2011 | p = q; | |
2012 | } | |
2013 | ||
5f6d8d97 AZ |
2014 | __madvise (p, size, MADV_HUGEPAGE); |
2015 | #endif | |
2016 | } | |
2017 | ||
fa8d436c UD |
2018 | /* ------------------- Support for multiple arenas -------------------- */ |
2019 | #include "arena.c" | |
f65fd747 | 2020 | |
fa8d436c | 2021 | /* |
6c8dbf00 | 2022 | Debugging support |
f65fd747 | 2023 | |
6c8dbf00 OB |
2024 | These routines make a number of assertions about the states |
2025 | of data structures that should be true at all times. If any | |
2026 | are not true, it's very likely that a user program has somehow | |
2027 | trashed memory. (It's also possible that there is a coding error | |
2028 | in malloc. In which case, please report it!) | |
2029 | */ | |
ee74a442 | 2030 | |
6c8dbf00 | 2031 | #if !MALLOC_DEBUG |
d8f00d46 | 2032 | |
6c8dbf00 OB |
2033 | # define check_chunk(A, P) |
2034 | # define check_free_chunk(A, P) | |
2035 | # define check_inuse_chunk(A, P) | |
2036 | # define check_remalloced_chunk(A, P, N) | |
2037 | # define check_malloced_chunk(A, P, N) | |
2038 | # define check_malloc_state(A) | |
d8f00d46 | 2039 | |
fa8d436c | 2040 | #else |
ca34d7a7 | 2041 | |
6c8dbf00 OB |
2042 | # define check_chunk(A, P) do_check_chunk (A, P) |
2043 | # define check_free_chunk(A, P) do_check_free_chunk (A, P) | |
2044 | # define check_inuse_chunk(A, P) do_check_inuse_chunk (A, P) | |
2045 | # define check_remalloced_chunk(A, P, N) do_check_remalloced_chunk (A, P, N) | |
2046 | # define check_malloced_chunk(A, P, N) do_check_malloced_chunk (A, P, N) | |
2047 | # define check_malloc_state(A) do_check_malloc_state (A) | |
ca34d7a7 | 2048 | |
fa8d436c | 2049 | /* |
6c8dbf00 OB |
2050 | Properties of all chunks |
2051 | */ | |
ca34d7a7 | 2052 | |
6c8dbf00 OB |
2053 | static void |
2054 | do_check_chunk (mstate av, mchunkptr p) | |
ca34d7a7 | 2055 | { |
6c8dbf00 | 2056 | unsigned long sz = chunksize (p); |
fa8d436c | 2057 | /* min and max possible addresses assuming contiguous allocation */ |
6c8dbf00 OB |
2058 | char *max_address = (char *) (av->top) + chunksize (av->top); |
2059 | char *min_address = max_address - av->system_mem; | |
fa8d436c | 2060 | |
6c8dbf00 OB |
2061 | if (!chunk_is_mmapped (p)) |
2062 | { | |
2063 | /* Has legal address ... */ | |
2064 | if (p != av->top) | |
2065 | { | |
2066 | if (contiguous (av)) | |
2067 | { | |
2068 | assert (((char *) p) >= min_address); | |
2069 | assert (((char *) p + sz) <= ((char *) (av->top))); | |
2070 | } | |
2071 | } | |
2072 | else | |
2073 | { | |
2074 | /* top size is always at least MINSIZE */ | |
2075 | assert ((unsigned long) (sz) >= MINSIZE); | |
2076 | /* top predecessor always marked inuse */ | |
2077 | assert (prev_inuse (p)); | |
2078 | } | |
fa8d436c | 2079 | } |
0552fd2c | 2080 | else |
6c8dbf00 OB |
2081 | { |
2082 | /* address is outside main heap */ | |
2083 | if (contiguous (av) && av->top != initial_top (av)) | |
2084 | { | |
2085 | assert (((char *) p) < min_address || ((char *) p) >= max_address); | |
2086 | } | |
2087 | /* chunk is page-aligned */ | |
e9c4fe93 | 2088 | assert (((prev_size (p) + sz) & (GLRO (dl_pagesize) - 1)) == 0); |
6c8dbf00 | 2089 | /* mem is aligned */ |
ca89f1c7 | 2090 | assert (aligned_OK (chunk2mem (p))); |
fa8d436c | 2091 | } |
eb406346 UD |
2092 | } |
2093 | ||
fa8d436c | 2094 | /* |
6c8dbf00 OB |
2095 | Properties of free chunks |
2096 | */ | |
ee74a442 | 2097 | |
6c8dbf00 OB |
2098 | static void |
2099 | do_check_free_chunk (mstate av, mchunkptr p) | |
67c94753 | 2100 | { |
3381be5c | 2101 | INTERNAL_SIZE_T sz = chunksize_nomask (p) & ~(PREV_INUSE | NON_MAIN_ARENA); |
6c8dbf00 | 2102 | mchunkptr next = chunk_at_offset (p, sz); |
67c94753 | 2103 | |
6c8dbf00 | 2104 | do_check_chunk (av, p); |
67c94753 | 2105 | |
fa8d436c | 2106 | /* Chunk must claim to be free ... */ |
6c8dbf00 OB |
2107 | assert (!inuse (p)); |
2108 | assert (!chunk_is_mmapped (p)); | |
67c94753 | 2109 | |
fa8d436c | 2110 | /* Unless a special marker, must have OK fields */ |
6c8dbf00 OB |
2111 | if ((unsigned long) (sz) >= MINSIZE) |
2112 | { | |
2113 | assert ((sz & MALLOC_ALIGN_MASK) == 0); | |
ca89f1c7 | 2114 | assert (aligned_OK (chunk2mem (p))); |
6c8dbf00 | 2115 | /* ... matching footer field */ |
3381be5c | 2116 | assert (prev_size (next_chunk (p)) == sz); |
6c8dbf00 OB |
2117 | /* ... and is fully consolidated */ |
2118 | assert (prev_inuse (p)); | |
2119 | assert (next == av->top || inuse (next)); | |
2120 | ||
2121 | /* ... and has minimally sane links */ | |
2122 | assert (p->fd->bk == p); | |
2123 | assert (p->bk->fd == p); | |
2124 | } | |
fa8d436c | 2125 | else /* markers are always of size SIZE_SZ */ |
6c8dbf00 | 2126 | assert (sz == SIZE_SZ); |
67c94753 | 2127 | } |
67c94753 | 2128 | |
fa8d436c | 2129 | /* |
6c8dbf00 OB |
2130 | Properties of inuse chunks |
2131 | */ | |
fa8d436c | 2132 | |
6c8dbf00 OB |
2133 | static void |
2134 | do_check_inuse_chunk (mstate av, mchunkptr p) | |
f65fd747 | 2135 | { |
fa8d436c | 2136 | mchunkptr next; |
f65fd747 | 2137 | |
6c8dbf00 | 2138 | do_check_chunk (av, p); |
f65fd747 | 2139 | |
6c8dbf00 | 2140 | if (chunk_is_mmapped (p)) |
fa8d436c | 2141 | return; /* mmapped chunks have no next/prev */ |
ca34d7a7 | 2142 | |
fa8d436c | 2143 | /* Check whether it claims to be in use ... */ |
6c8dbf00 | 2144 | assert (inuse (p)); |
10dc2a90 | 2145 | |
6c8dbf00 | 2146 | next = next_chunk (p); |
10dc2a90 | 2147 | |
fa8d436c | 2148 | /* ... and is surrounded by OK chunks. |
6c8dbf00 OB |
2149 | Since more things can be checked with free chunks than inuse ones, |
2150 | if an inuse chunk borders them and debug is on, it's worth doing them. | |
2151 | */ | |
2152 | if (!prev_inuse (p)) | |
2153 | { | |
2154 | /* Note that we cannot even look at prev unless it is not inuse */ | |
2155 | mchunkptr prv = prev_chunk (p); | |
2156 | assert (next_chunk (prv) == p); | |
2157 | do_check_free_chunk (av, prv); | |
2158 | } | |
fa8d436c | 2159 | |
6c8dbf00 OB |
2160 | if (next == av->top) |
2161 | { | |
2162 | assert (prev_inuse (next)); | |
2163 | assert (chunksize (next) >= MINSIZE); | |
2164 | } | |
2165 | else if (!inuse (next)) | |
2166 | do_check_free_chunk (av, next); | |
10dc2a90 UD |
2167 | } |
2168 | ||
fa8d436c | 2169 | /* |
6c8dbf00 OB |
2170 | Properties of chunks recycled from fastbins |
2171 | */ | |
fa8d436c | 2172 | |
6c8dbf00 OB |
2173 | static void |
2174 | do_check_remalloced_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T s) | |
10dc2a90 | 2175 | { |
3381be5c | 2176 | INTERNAL_SIZE_T sz = chunksize_nomask (p) & ~(PREV_INUSE | NON_MAIN_ARENA); |
fa8d436c | 2177 | |
6c8dbf00 OB |
2178 | if (!chunk_is_mmapped (p)) |
2179 | { | |
2180 | assert (av == arena_for_chunk (p)); | |
e9c4fe93 | 2181 | if (chunk_main_arena (p)) |
6c8dbf00 | 2182 | assert (av == &main_arena); |
e9c4fe93 FW |
2183 | else |
2184 | assert (av != &main_arena); | |
6c8dbf00 | 2185 | } |
fa8d436c | 2186 | |
6c8dbf00 | 2187 | do_check_inuse_chunk (av, p); |
fa8d436c UD |
2188 | |
2189 | /* Legal size ... */ | |
6c8dbf00 OB |
2190 | assert ((sz & MALLOC_ALIGN_MASK) == 0); |
2191 | assert ((unsigned long) (sz) >= MINSIZE); | |
fa8d436c | 2192 | /* ... and alignment */ |
ca89f1c7 | 2193 | assert (aligned_OK (chunk2mem (p))); |
fa8d436c | 2194 | /* chunk is less than MINSIZE more than request */ |
6c8dbf00 OB |
2195 | assert ((long) (sz) - (long) (s) >= 0); |
2196 | assert ((long) (sz) - (long) (s + MINSIZE) < 0); | |
10dc2a90 UD |
2197 | } |
2198 | ||
fa8d436c | 2199 | /* |
6c8dbf00 OB |
2200 | Properties of nonrecycled chunks at the point they are malloced |
2201 | */ | |
fa8d436c | 2202 | |
6c8dbf00 OB |
2203 | static void |
2204 | do_check_malloced_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T s) | |
10dc2a90 | 2205 | { |
fa8d436c | 2206 | /* same as recycled case ... */ |
6c8dbf00 | 2207 | do_check_remalloced_chunk (av, p, s); |
10dc2a90 | 2208 | |
fa8d436c | 2209 | /* |
6c8dbf00 OB |
2210 | ... plus, must obey implementation invariant that prev_inuse is |
2211 | always true of any allocated chunk; i.e., that each allocated | |
2212 | chunk borders either a previously allocated and still in-use | |
2213 | chunk, or the base of its memory arena. This is ensured | |
2214 | by making all allocations from the `lowest' part of any found | |
2215 | chunk. This does not necessarily hold however for chunks | |
2216 | recycled via fastbins. | |
2217 | */ | |
2218 | ||
2219 | assert (prev_inuse (p)); | |
fa8d436c | 2220 | } |
10dc2a90 | 2221 | |
f65fd747 | 2222 | |
fa8d436c | 2223 | /* |
6c8dbf00 | 2224 | Properties of malloc_state. |
f65fd747 | 2225 | |
6c8dbf00 OB |
2226 | This may be useful for debugging malloc, as well as detecting user |
2227 | programmer errors that somehow write into malloc_state. | |
f65fd747 | 2228 | |
6c8dbf00 OB |
2229 | If you are extending or experimenting with this malloc, you can |
2230 | probably figure out how to hack this routine to print out or | |
2231 | display chunk addresses, sizes, bins, and other instrumentation. | |
2232 | */ | |
f65fd747 | 2233 | |
6c8dbf00 OB |
2234 | static void |
2235 | do_check_malloc_state (mstate av) | |
fa8d436c UD |
2236 | { |
2237 | int i; | |
2238 | mchunkptr p; | |
2239 | mchunkptr q; | |
2240 | mbinptr b; | |
fa8d436c UD |
2241 | unsigned int idx; |
2242 | INTERNAL_SIZE_T size; | |
2243 | unsigned long total = 0; | |
2244 | int max_fast_bin; | |
f65fd747 | 2245 | |
fa8d436c | 2246 | /* internal size_t must be no wider than pointer type */ |
6c8dbf00 | 2247 | assert (sizeof (INTERNAL_SIZE_T) <= sizeof (char *)); |
f65fd747 | 2248 | |
fa8d436c | 2249 | /* alignment is a power of 2 */ |
6c8dbf00 | 2250 | assert ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT - 1)) == 0); |
f65fd747 | 2251 | |
3381be5c WD |
2252 | /* Check the arena is initialized. */ |
2253 | assert (av->top != 0); | |
2254 | ||
2255 | /* No memory has been allocated yet, so doing more tests is not possible. */ | |
2256 | if (av->top == initial_top (av)) | |
fa8d436c | 2257 | return; |
f65fd747 | 2258 | |
fa8d436c | 2259 | /* pagesize is a power of 2 */ |
8a35c3fe | 2260 | assert (powerof2(GLRO (dl_pagesize))); |
f65fd747 | 2261 | |
fa8d436c | 2262 | /* A contiguous main_arena is consistent with sbrk_base. */ |
6c8dbf00 OB |
2263 | if (av == &main_arena && contiguous (av)) |
2264 | assert ((char *) mp_.sbrk_base + av->system_mem == | |
2265 | (char *) av->top + chunksize (av->top)); | |
fa8d436c UD |
2266 | |
2267 | /* properties of fastbins */ | |
2268 | ||
2269 | /* max_fast is in allowed range */ | |
6c8dbf00 OB |
2270 | assert ((get_max_fast () & ~1) <= request2size (MAX_FAST_SIZE)); |
2271 | ||
2272 | max_fast_bin = fastbin_index (get_max_fast ()); | |
2273 | ||
2274 | for (i = 0; i < NFASTBINS; ++i) | |
2275 | { | |
2276 | p = fastbin (av, i); | |
2277 | ||
2278 | /* The following test can only be performed for the main arena. | |
2279 | While mallopt calls malloc_consolidate to get rid of all fast | |
2280 | bins (especially those larger than the new maximum) this does | |
2281 | only happen for the main arena. Trying to do this for any | |
2282 | other arena would mean those arenas have to be locked and | |
2283 | malloc_consolidate be called for them. This is excessive. And | |
2284 | even if this is acceptable to somebody it still cannot solve | |
2285 | the problem completely since if the arena is locked a | |
2286 | concurrent malloc call might create a new arena which then | |
2287 | could use the newly invalid fast bins. */ | |
2288 | ||
2289 | /* all bins past max_fast are empty */ | |
2290 | if (av == &main_arena && i > max_fast_bin) | |
2291 | assert (p == 0); | |
2292 | ||
2293 | while (p != 0) | |
2294 | { | |
49c3c376 | 2295 | if (__glibc_unlikely (misaligned_chunk (p))) |
768358b6 | 2296 | malloc_printerr ("do_check_malloc_state(): " |
a1a486d7 | 2297 | "unaligned fastbin chunk detected"); |
6c8dbf00 OB |
2298 | /* each chunk claims to be inuse */ |
2299 | do_check_inuse_chunk (av, p); | |
2300 | total += chunksize (p); | |
2301 | /* chunk belongs in this bin */ | |
2302 | assert (fastbin_index (chunksize (p)) == i); | |
a1a486d7 | 2303 | p = REVEAL_PTR (p->fd); |
6c8dbf00 | 2304 | } |
fa8d436c | 2305 | } |
fa8d436c | 2306 | |
fa8d436c | 2307 | /* check normal bins */ |
6c8dbf00 OB |
2308 | for (i = 1; i < NBINS; ++i) |
2309 | { | |
2310 | b = bin_at (av, i); | |
2311 | ||
2312 | /* binmap is accurate (except for bin 1 == unsorted_chunks) */ | |
2313 | if (i >= 2) | |
2314 | { | |
2315 | unsigned int binbit = get_binmap (av, i); | |
2316 | int empty = last (b) == b; | |
2317 | if (!binbit) | |
2318 | assert (empty); | |
2319 | else if (!empty) | |
2320 | assert (binbit); | |
2321 | } | |
2322 | ||
2323 | for (p = last (b); p != b; p = p->bk) | |
2324 | { | |
2325 | /* each chunk claims to be free */ | |
2326 | do_check_free_chunk (av, p); | |
2327 | size = chunksize (p); | |
2328 | total += size; | |
2329 | if (i >= 2) | |
2330 | { | |
2331 | /* chunk belongs in bin */ | |
2332 | idx = bin_index (size); | |
2333 | assert (idx == i); | |
2334 | /* lists are sorted */ | |
2335 | assert (p->bk == b || | |
2336 | (unsigned long) chunksize (p->bk) >= (unsigned long) chunksize (p)); | |
2337 | ||
2338 | if (!in_smallbin_range (size)) | |
2339 | { | |
2340 | if (p->fd_nextsize != NULL) | |
2341 | { | |
2342 | if (p->fd_nextsize == p) | |
2343 | assert (p->bk_nextsize == p); | |
2344 | else | |
2345 | { | |
2346 | if (p->fd_nextsize == first (b)) | |
2347 | assert (chunksize (p) < chunksize (p->fd_nextsize)); | |
2348 | else | |
2349 | assert (chunksize (p) > chunksize (p->fd_nextsize)); | |
2350 | ||
2351 | if (p == first (b)) | |
2352 | assert (chunksize (p) > chunksize (p->bk_nextsize)); | |
2353 | else | |
2354 | assert (chunksize (p) < chunksize (p->bk_nextsize)); | |
2355 | } | |
2356 | } | |
2357 | else | |
2358 | assert (p->bk_nextsize == NULL); | |
2359 | } | |
2360 | } | |
2361 | else if (!in_smallbin_range (size)) | |
2362 | assert (p->fd_nextsize == NULL && p->bk_nextsize == NULL); | |
2363 | /* chunk is followed by a legal chain of inuse chunks */ | |
2364 | for (q = next_chunk (p); | |
2365 | (q != av->top && inuse (q) && | |
2366 | (unsigned long) (chunksize (q)) >= MINSIZE); | |
2367 | q = next_chunk (q)) | |
2368 | do_check_inuse_chunk (av, q); | |
2369 | } | |
fa8d436c | 2370 | } |
f65fd747 | 2371 | |
fa8d436c | 2372 | /* top chunk is OK */ |
6c8dbf00 | 2373 | check_chunk (av, av->top); |
fa8d436c UD |
2374 | } |
2375 | #endif | |
2376 | ||
2377 | ||
2378 | /* ----------------- Support for debugging hooks -------------------- */ | |
b5bd5bfe | 2379 | #if IS_IN (libc) |
fa8d436c | 2380 | #include "hooks.c" |
b5bd5bfe | 2381 | #endif |
fa8d436c UD |
2382 | |
2383 | ||
2384 | /* ----------- Routines dealing with system allocation -------------- */ | |
2385 | ||
2386 | /* | |
6c8dbf00 OB |
2387 | sysmalloc handles malloc cases requiring more memory from the system. |
2388 | On entry, it is assumed that av->top does not have enough | |
2389 | space to service request for nb bytes, thus requiring that av->top | |
2390 | be extended or replaced. | |
2391 | */ | |
fa8d436c | 2392 | |
6cc3ccc6 AZ |
2393 | static void * |
2394 | sysmalloc_mmap (INTERNAL_SIZE_T nb, size_t pagesize, int extra_flags, mstate av) | |
2395 | { | |
2396 | long int size; | |
2397 | ||
2398 | /* | |
2399 | Round up size to nearest page. For mmapped chunks, the overhead is one | |
2400 | SIZE_SZ unit larger than for normal chunks, because there is no | |
2401 | following chunk whose prev_size field could be used. | |
2402 | ||
2403 | See the front_misalign handling below, for glibc there is no need for | |
2404 | further alignments unless we have have high alignment. | |
2405 | */ | |
2406 | if (MALLOC_ALIGNMENT == CHUNK_HDR_SZ) | |
2407 | size = ALIGN_UP (nb + SIZE_SZ, pagesize); | |
2408 | else | |
2409 | size = ALIGN_UP (nb + SIZE_SZ + MALLOC_ALIGN_MASK, pagesize); | |
2410 | ||
2411 | /* Don't try if size wraps around 0. */ | |
2412 | if ((unsigned long) (size) <= (unsigned long) (nb)) | |
2413 | return MAP_FAILED; | |
2414 | ||
2415 | char *mm = (char *) MMAP (0, size, | |
2416 | mtag_mmap_flags | PROT_READ | PROT_WRITE, | |
2417 | extra_flags); | |
2418 | if (mm == MAP_FAILED) | |
2419 | return mm; | |
2420 | ||
98d5fcb8 AZ |
2421 | #ifdef MAP_HUGETLB |
2422 | if (!(extra_flags & MAP_HUGETLB)) | |
2423 | madvise_thp (mm, size); | |
2424 | #endif | |
6cc3ccc6 AZ |
2425 | |
2426 | /* | |
2427 | The offset to the start of the mmapped region is stored in the prev_size | |
2428 | field of the chunk. This allows us to adjust returned start address to | |
2429 | meet alignment requirements here and in memalign(), and still be able to | |
2430 | compute proper address argument for later munmap in free() and realloc(). | |
2431 | */ | |
2432 | ||
2433 | INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */ | |
2434 | ||
2435 | if (MALLOC_ALIGNMENT == CHUNK_HDR_SZ) | |
2436 | { | |
2437 | /* For glibc, chunk2mem increases the address by CHUNK_HDR_SZ and | |
2438 | MALLOC_ALIGN_MASK is CHUNK_HDR_SZ-1. Each mmap'ed area is page | |
2439 | aligned and therefore definitely MALLOC_ALIGN_MASK-aligned. */ | |
2440 | assert (((INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK) == 0); | |
2441 | front_misalign = 0; | |
2442 | } | |
2443 | else | |
2444 | front_misalign = (INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK; | |
2445 | ||
2446 | mchunkptr p; /* the allocated/returned chunk */ | |
2447 | ||
2448 | if (front_misalign > 0) | |
2449 | { | |
2450 | ptrdiff_t correction = MALLOC_ALIGNMENT - front_misalign; | |
2451 | p = (mchunkptr) (mm + correction); | |
2452 | set_prev_size (p, correction); | |
2453 | set_head (p, (size - correction) | IS_MMAPPED); | |
2454 | } | |
2455 | else | |
2456 | { | |
2457 | p = (mchunkptr) mm; | |
2458 | set_prev_size (p, 0); | |
2459 | set_head (p, size | IS_MMAPPED); | |
2460 | } | |
2461 | ||
2462 | /* update statistics */ | |
89d40cac | 2463 | int new = atomic_fetch_add_relaxed (&mp_.n_mmaps, 1) + 1; |
6cc3ccc6 AZ |
2464 | atomic_max (&mp_.max_n_mmaps, new); |
2465 | ||
2466 | unsigned long sum; | |
89d40cac | 2467 | sum = atomic_fetch_add_relaxed (&mp_.mmapped_mem, size) + size; |
6cc3ccc6 AZ |
2468 | atomic_max (&mp_.max_mmapped_mem, sum); |
2469 | ||
2470 | check_chunk (av, p); | |
2471 | ||
2472 | return chunk2mem (p); | |
2473 | } | |
2474 | ||
0849eed4 AZ |
2475 | /* |
2476 | Allocate memory using mmap() based on S and NB requested size, aligning to | |
2477 | PAGESIZE if required. The EXTRA_FLAGS is used on mmap() call. If the call | |
2478 | succeedes S is updated with the allocated size. This is used as a fallback | |
2479 | if MORECORE fails. | |
2480 | */ | |
2481 | static void * | |
2482 | sysmalloc_mmap_fallback (long int *s, INTERNAL_SIZE_T nb, | |
2483 | INTERNAL_SIZE_T old_size, size_t minsize, | |
2484 | size_t pagesize, int extra_flags, mstate av) | |
2485 | { | |
2486 | long int size = *s; | |
2487 | ||
2488 | /* Cannot merge with old top, so add its size back in */ | |
2489 | if (contiguous (av)) | |
2490 | size = ALIGN_UP (size + old_size, pagesize); | |
2491 | ||
2492 | /* If we are relying on mmap as backup, then use larger units */ | |
2493 | if ((unsigned long) (size) < minsize) | |
2494 | size = minsize; | |
2495 | ||
2496 | /* Don't try if size wraps around 0 */ | |
2497 | if ((unsigned long) (size) <= (unsigned long) (nb)) | |
2498 | return MORECORE_FAILURE; | |
2499 | ||
2500 | char *mbrk = (char *) (MMAP (0, size, | |
2501 | mtag_mmap_flags | PROT_READ | PROT_WRITE, | |
2502 | extra_flags)); | |
2503 | if (mbrk == MAP_FAILED) | |
2504 | return MAP_FAILED; | |
2505 | ||
2506 | #ifdef MAP_HUGETLB | |
2507 | if (!(extra_flags & MAP_HUGETLB)) | |
2508 | madvise_thp (mbrk, size); | |
2509 | #endif | |
2510 | ||
2511 | /* Record that we no longer have a contiguous sbrk region. After the first | |
2512 | time mmap is used as backup, we do not ever rely on contiguous space | |
2513 | since this could incorrectly bridge regions. */ | |
2514 | set_noncontiguous (av); | |
2515 | ||
2516 | *s = size; | |
2517 | return mbrk; | |
2518 | } | |
2519 | ||
6c8dbf00 OB |
2520 | static void * |
2521 | sysmalloc (INTERNAL_SIZE_T nb, mstate av) | |
f65fd747 | 2522 | { |
6c8dbf00 | 2523 | mchunkptr old_top; /* incoming value of av->top */ |
fa8d436c | 2524 | INTERNAL_SIZE_T old_size; /* its size */ |
6c8dbf00 | 2525 | char *old_end; /* its end address */ |
f65fd747 | 2526 | |
6c8dbf00 OB |
2527 | long size; /* arg to first MORECORE or mmap call */ |
2528 | char *brk; /* return value from MORECORE */ | |
f65fd747 | 2529 | |
6c8dbf00 OB |
2530 | long correction; /* arg to 2nd MORECORE call */ |
2531 | char *snd_brk; /* 2nd return val */ | |
f65fd747 | 2532 | |
fa8d436c UD |
2533 | INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */ |
2534 | INTERNAL_SIZE_T end_misalign; /* partial page left at end of new space */ | |
6c8dbf00 | 2535 | char *aligned_brk; /* aligned offset into brk */ |
f65fd747 | 2536 | |
6c8dbf00 OB |
2537 | mchunkptr p; /* the allocated/returned chunk */ |
2538 | mchunkptr remainder; /* remainder from allocation */ | |
2539 | unsigned long remainder_size; /* its size */ | |
fa8d436c | 2540 | |
fa8d436c | 2541 | |
8a35c3fe | 2542 | size_t pagesize = GLRO (dl_pagesize); |
6c8dbf00 | 2543 | bool tried_mmap = false; |
fa8d436c UD |
2544 | |
2545 | ||
fa8d436c | 2546 | /* |
6c8dbf00 OB |
2547 | If have mmap, and the request size meets the mmap threshold, and |
2548 | the system supports mmap, and there are few enough currently | |
2549 | allocated mmapped regions, try to directly map this request | |
2550 | rather than expanding top. | |
2551 | */ | |
2552 | ||
fff94fa2 SP |
2553 | if (av == NULL |
2554 | || ((unsigned long) (nb) >= (unsigned long) (mp_.mmap_threshold) | |
2555 | && (mp_.n_mmaps < mp_.n_mmaps_max))) | |
6c8dbf00 | 2556 | { |
98d5fcb8 AZ |
2557 | char *mm; |
2558 | #if HAVE_TUNABLES | |
2559 | if (mp_.hp_pagesize > 0 && nb >= mp_.hp_pagesize) | |
2560 | { | |
2561 | /* There is no need to isse the THP madvise call if Huge Pages are | |
2562 | used directly. */ | |
2563 | mm = sysmalloc_mmap (nb, mp_.hp_pagesize, mp_.hp_flags, av); | |
2564 | if (mm != MAP_FAILED) | |
2565 | return mm; | |
2566 | } | |
2567 | #endif | |
2568 | mm = sysmalloc_mmap (nb, pagesize, 0, av); | |
6cc3ccc6 AZ |
2569 | if (mm != MAP_FAILED) |
2570 | return mm; | |
6c8dbf00 | 2571 | tried_mmap = true; |
fa8d436c | 2572 | } |
fa8d436c | 2573 | |
fff94fa2 SP |
2574 | /* There are no usable arenas and mmap also failed. */ |
2575 | if (av == NULL) | |
2576 | return 0; | |
2577 | ||
fa8d436c UD |
2578 | /* Record incoming configuration of top */ |
2579 | ||
6c8dbf00 OB |
2580 | old_top = av->top; |
2581 | old_size = chunksize (old_top); | |
2582 | old_end = (char *) (chunk_at_offset (old_top, old_size)); | |
fa8d436c | 2583 | |
6c8dbf00 | 2584 | brk = snd_brk = (char *) (MORECORE_FAILURE); |
fa8d436c | 2585 | |
a9177ff5 | 2586 | /* |
fa8d436c UD |
2587 | If not the first time through, we require old_size to be |
2588 | at least MINSIZE and to have prev_inuse set. | |
6c8dbf00 | 2589 | */ |
fa8d436c | 2590 | |
6c8dbf00 OB |
2591 | assert ((old_top == initial_top (av) && old_size == 0) || |
2592 | ((unsigned long) (old_size) >= MINSIZE && | |
2593 | prev_inuse (old_top) && | |
8a35c3fe | 2594 | ((unsigned long) old_end & (pagesize - 1)) == 0)); |
fa8d436c UD |
2595 | |
2596 | /* Precondition: not enough current space to satisfy nb request */ | |
6c8dbf00 | 2597 | assert ((unsigned long) (old_size) < (unsigned long) (nb + MINSIZE)); |
a9177ff5 | 2598 | |
72f90263 | 2599 | |
6c8dbf00 OB |
2600 | if (av != &main_arena) |
2601 | { | |
2602 | heap_info *old_heap, *heap; | |
2603 | size_t old_heap_size; | |
2604 | ||
2605 | /* First try to extend the current heap. */ | |
2606 | old_heap = heap_for_ptr (old_top); | |
2607 | old_heap_size = old_heap->size; | |
2608 | if ((long) (MINSIZE + nb - old_size) > 0 | |
2609 | && grow_heap (old_heap, MINSIZE + nb - old_size) == 0) | |
2610 | { | |
2611 | av->system_mem += old_heap->size - old_heap_size; | |
6c8dbf00 OB |
2612 | set_head (old_top, (((char *) old_heap + old_heap->size) - (char *) old_top) |
2613 | | PREV_INUSE); | |
2614 | } | |
2615 | else if ((heap = new_heap (nb + (MINSIZE + sizeof (*heap)), mp_.top_pad))) | |
2616 | { | |
2617 | /* Use a newly allocated heap. */ | |
2618 | heap->ar_ptr = av; | |
2619 | heap->prev = old_heap; | |
2620 | av->system_mem += heap->size; | |
6c8dbf00 OB |
2621 | /* Set up the new top. */ |
2622 | top (av) = chunk_at_offset (heap, sizeof (*heap)); | |
2623 | set_head (top (av), (heap->size - sizeof (*heap)) | PREV_INUSE); | |
2624 | ||
2625 | /* Setup fencepost and free the old top chunk with a multiple of | |
2626 | MALLOC_ALIGNMENT in size. */ | |
2627 | /* The fencepost takes at least MINSIZE bytes, because it might | |
2628 | become the top chunk again later. Note that a footer is set | |
2629 | up, too, although the chunk is marked in use. */ | |
2630 | old_size = (old_size - MINSIZE) & ~MALLOC_ALIGN_MASK; | |
3784dfc0 RE |
2631 | set_head (chunk_at_offset (old_top, old_size + CHUNK_HDR_SZ), |
2632 | 0 | PREV_INUSE); | |
6c8dbf00 OB |
2633 | if (old_size >= MINSIZE) |
2634 | { | |
3784dfc0 RE |
2635 | set_head (chunk_at_offset (old_top, old_size), |
2636 | CHUNK_HDR_SZ | PREV_INUSE); | |
2637 | set_foot (chunk_at_offset (old_top, old_size), CHUNK_HDR_SZ); | |
6c8dbf00 OB |
2638 | set_head (old_top, old_size | PREV_INUSE | NON_MAIN_ARENA); |
2639 | _int_free (av, old_top, 1); | |
2640 | } | |
2641 | else | |
2642 | { | |
3784dfc0 RE |
2643 | set_head (old_top, (old_size + CHUNK_HDR_SZ) | PREV_INUSE); |
2644 | set_foot (old_top, (old_size + CHUNK_HDR_SZ)); | |
6c8dbf00 OB |
2645 | } |
2646 | } | |
2647 | else if (!tried_mmap) | |
6cc3ccc6 | 2648 | { |
98d5fcb8 AZ |
2649 | /* We can at least try to use to mmap memory. If new_heap fails |
2650 | it is unlikely that trying to allocate huge pages will | |
2651 | succeed. */ | |
6cc3ccc6 AZ |
2652 | char *mm = sysmalloc_mmap (nb, pagesize, 0, av); |
2653 | if (mm != MAP_FAILED) | |
2654 | return mm; | |
2655 | } | |
fa8d436c | 2656 | } |
6c8dbf00 | 2657 | else /* av == main_arena */ |
fa8d436c | 2658 | |
fa8d436c | 2659 | |
6c8dbf00 OB |
2660 | { /* Request enough space for nb + pad + overhead */ |
2661 | size = nb + mp_.top_pad + MINSIZE; | |
a9177ff5 | 2662 | |
6c8dbf00 OB |
2663 | /* |
2664 | If contiguous, we can subtract out existing space that we hope to | |
2665 | combine with new space. We add it back later only if | |
2666 | we don't actually get contiguous space. | |
2667 | */ | |
a9177ff5 | 2668 | |
6c8dbf00 OB |
2669 | if (contiguous (av)) |
2670 | size -= old_size; | |
fa8d436c | 2671 | |
6c8dbf00 | 2672 | /* |
7478c995 | 2673 | Round to a multiple of page size or huge page size. |
6c8dbf00 OB |
2674 | If MORECORE is not contiguous, this ensures that we only call it |
2675 | with whole-page arguments. And if MORECORE is contiguous and | |
2676 | this is not first time through, this preserves page-alignment of | |
2677 | previous calls. Otherwise, we correct to page-align below. | |
2678 | */ | |
fa8d436c | 2679 | |
7478c995 AZ |
2680 | #if HAVE_TUNABLES && defined (MADV_HUGEPAGE) |
2681 | /* Defined in brk.c. */ | |
2682 | extern void *__curbrk; | |
2683 | if (__glibc_unlikely (mp_.thp_pagesize != 0)) | |
2684 | { | |
2685 | uintptr_t top = ALIGN_UP ((uintptr_t) __curbrk + size, | |
2686 | mp_.thp_pagesize); | |
2687 | size = top - (uintptr_t) __curbrk; | |
2688 | } | |
2689 | else | |
2690 | #endif | |
2691 | size = ALIGN_UP (size, GLRO(dl_pagesize)); | |
fa8d436c | 2692 | |
6c8dbf00 OB |
2693 | /* |
2694 | Don't try to call MORECORE if argument is so big as to appear | |
2695 | negative. Note that since mmap takes size_t arg, it may succeed | |
2696 | below even if we cannot call MORECORE. | |
2697 | */ | |
2698 | ||
2699 | if (size > 0) | |
2700 | { | |
2701 | brk = (char *) (MORECORE (size)); | |
5f6d8d97 AZ |
2702 | if (brk != (char *) (MORECORE_FAILURE)) |
2703 | madvise_thp (brk, size); | |
6c8dbf00 OB |
2704 | LIBC_PROBE (memory_sbrk_more, 2, brk, size); |
2705 | } | |
2706 | ||
57b07bed | 2707 | if (brk == (char *) (MORECORE_FAILURE)) |
6c8dbf00 OB |
2708 | { |
2709 | /* | |
2710 | If have mmap, try using it as a backup when MORECORE fails or | |
2711 | cannot be used. This is worth doing on systems that have "holes" in | |
2712 | address space, so sbrk cannot extend to give contiguous space, but | |
2713 | space is available elsewhere. Note that we ignore mmap max count | |
2714 | and threshold limits, since the space will not be used as a | |
2715 | segregated mmap region. | |
2716 | */ | |
2717 | ||
0f982c18 AZ |
2718 | char *mbrk = MAP_FAILED; |
2719 | #if HAVE_TUNABLES | |
2720 | if (mp_.hp_pagesize > 0) | |
2721 | mbrk = sysmalloc_mmap_fallback (&size, nb, old_size, | |
2722 | mp_.hp_pagesize, mp_.hp_pagesize, | |
2723 | mp_.hp_flags, av); | |
2724 | #endif | |
2725 | if (mbrk == MAP_FAILED) | |
2726 | mbrk = sysmalloc_mmap_fallback (&size, nb, old_size, pagesize, | |
2727 | MMAP_AS_MORECORE_SIZE, 0, av); | |
0849eed4 AZ |
2728 | if (mbrk != MAP_FAILED) |
2729 | { | |
2730 | /* We do not need, and cannot use, another sbrk call to find end */ | |
2731 | brk = mbrk; | |
2732 | snd_brk = brk + size; | |
2733 | } | |
6c8dbf00 OB |
2734 | } |
2735 | ||
2736 | if (brk != (char *) (MORECORE_FAILURE)) | |
2737 | { | |
2738 | if (mp_.sbrk_base == 0) | |
2739 | mp_.sbrk_base = brk; | |
2740 | av->system_mem += size; | |
2741 | ||
2742 | /* | |
2743 | If MORECORE extends previous space, we can likewise extend top size. | |
2744 | */ | |
2745 | ||
2746 | if (brk == old_end && snd_brk == (char *) (MORECORE_FAILURE)) | |
2747 | set_head (old_top, (size + old_size) | PREV_INUSE); | |
2748 | ||
2749 | else if (contiguous (av) && old_size && brk < old_end) | |
ac3ed168 FW |
2750 | /* Oops! Someone else killed our space.. Can't touch anything. */ |
2751 | malloc_printerr ("break adjusted to free malloc space"); | |
6c8dbf00 OB |
2752 | |
2753 | /* | |
2754 | Otherwise, make adjustments: | |
2755 | ||
2756 | * If the first time through or noncontiguous, we need to call sbrk | |
2757 | just to find out where the end of memory lies. | |
2758 | ||
2759 | * We need to ensure that all returned chunks from malloc will meet | |
2760 | MALLOC_ALIGNMENT | |
2761 | ||
2762 | * If there was an intervening foreign sbrk, we need to adjust sbrk | |
2763 | request size to account for fact that we will not be able to | |
2764 | combine new space with existing space in old_top. | |
2765 | ||
2766 | * Almost all systems internally allocate whole pages at a time, in | |
2767 | which case we might as well use the whole last page of request. | |
2768 | So we allocate enough more memory to hit a page boundary now, | |
2769 | which in turn causes future contiguous calls to page-align. | |
2770 | */ | |
2771 | ||
2772 | else | |
2773 | { | |
2774 | front_misalign = 0; | |
2775 | end_misalign = 0; | |
2776 | correction = 0; | |
2777 | aligned_brk = brk; | |
2778 | ||
2779 | /* handle contiguous cases */ | |
2780 | if (contiguous (av)) | |
2781 | { | |
2782 | /* Count foreign sbrk as system_mem. */ | |
2783 | if (old_size) | |
2784 | av->system_mem += brk - old_end; | |
2785 | ||
2786 | /* Guarantee alignment of first new chunk made from this space */ | |
2787 | ||
ca89f1c7 | 2788 | front_misalign = (INTERNAL_SIZE_T) chunk2mem (brk) & MALLOC_ALIGN_MASK; |
6c8dbf00 OB |
2789 | if (front_misalign > 0) |
2790 | { | |
2791 | /* | |
2792 | Skip over some bytes to arrive at an aligned position. | |
2793 | We don't need to specially mark these wasted front bytes. | |
2794 | They will never be accessed anyway because | |
2795 | prev_inuse of av->top (and any chunk created from its start) | |
2796 | is always true after initialization. | |
2797 | */ | |
2798 | ||
2799 | correction = MALLOC_ALIGNMENT - front_misalign; | |
2800 | aligned_brk += correction; | |
2801 | } | |
2802 | ||
2803 | /* | |
2804 | If this isn't adjacent to existing space, then we will not | |
2805 | be able to merge with old_top space, so must add to 2nd request. | |
2806 | */ | |
2807 | ||
2808 | correction += old_size; | |
2809 | ||
2810 | /* Extend the end address to hit a page boundary */ | |
2811 | end_misalign = (INTERNAL_SIZE_T) (brk + size + correction); | |
8a35c3fe | 2812 | correction += (ALIGN_UP (end_misalign, pagesize)) - end_misalign; |
6c8dbf00 OB |
2813 | |
2814 | assert (correction >= 0); | |
2815 | snd_brk = (char *) (MORECORE (correction)); | |
2816 | ||
2817 | /* | |
2818 | If can't allocate correction, try to at least find out current | |
2819 | brk. It might be enough to proceed without failing. | |
2820 | ||
2821 | Note that if second sbrk did NOT fail, we assume that space | |
2822 | is contiguous with first sbrk. This is a safe assumption unless | |
2823 | program is multithreaded but doesn't use locks and a foreign sbrk | |
2824 | occurred between our first and second calls. | |
2825 | */ | |
2826 | ||
2827 | if (snd_brk == (char *) (MORECORE_FAILURE)) | |
2828 | { | |
2829 | correction = 0; | |
2830 | snd_brk = (char *) (MORECORE (0)); | |
2831 | } | |
5f6d8d97 AZ |
2832 | else |
2833 | madvise_thp (snd_brk, correction); | |
6c8dbf00 OB |
2834 | } |
2835 | ||
2836 | /* handle non-contiguous cases */ | |
2837 | else | |
2838 | { | |
3784dfc0 | 2839 | if (MALLOC_ALIGNMENT == CHUNK_HDR_SZ) |
6c8dbf00 | 2840 | /* MORECORE/mmap must correctly align */ |
ca89f1c7 | 2841 | assert (((unsigned long) chunk2mem (brk) & MALLOC_ALIGN_MASK) == 0); |
6c8dbf00 OB |
2842 | else |
2843 | { | |
ca89f1c7 | 2844 | front_misalign = (INTERNAL_SIZE_T) chunk2mem (brk) & MALLOC_ALIGN_MASK; |
6c8dbf00 OB |
2845 | if (front_misalign > 0) |
2846 | { | |
2847 | /* | |
2848 | Skip over some bytes to arrive at an aligned position. | |
2849 | We don't need to specially mark these wasted front bytes. | |
2850 | They will never be accessed anyway because | |
2851 | prev_inuse of av->top (and any chunk created from its start) | |
2852 | is always true after initialization. | |
2853 | */ | |
2854 | ||
2855 | aligned_brk += MALLOC_ALIGNMENT - front_misalign; | |
2856 | } | |
2857 | } | |
2858 | ||
2859 | /* Find out current end of memory */ | |
2860 | if (snd_brk == (char *) (MORECORE_FAILURE)) | |
2861 | { | |
2862 | snd_brk = (char *) (MORECORE (0)); | |
2863 | } | |
2864 | } | |
2865 | ||
2866 | /* Adjust top based on results of second sbrk */ | |
2867 | if (snd_brk != (char *) (MORECORE_FAILURE)) | |
2868 | { | |
2869 | av->top = (mchunkptr) aligned_brk; | |
2870 | set_head (av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE); | |
2871 | av->system_mem += correction; | |
2872 | ||
2873 | /* | |
2874 | If not the first time through, we either have a | |
2875 | gap due to foreign sbrk or a non-contiguous region. Insert a | |
2876 | double fencepost at old_top to prevent consolidation with space | |
2877 | we don't own. These fenceposts are artificial chunks that are | |
2878 | marked as inuse and are in any case too small to use. We need | |
2879 | two to make sizes and alignments work out. | |
2880 | */ | |
2881 | ||
2882 | if (old_size != 0) | |
2883 | { | |
2884 | /* | |
2885 | Shrink old_top to insert fenceposts, keeping size a | |
2886 | multiple of MALLOC_ALIGNMENT. We know there is at least | |
2887 | enough space in old_top to do this. | |
2888 | */ | |
3784dfc0 | 2889 | old_size = (old_size - 2 * CHUNK_HDR_SZ) & ~MALLOC_ALIGN_MASK; |
6c8dbf00 OB |
2890 | set_head (old_top, old_size | PREV_INUSE); |
2891 | ||
2892 | /* | |
2893 | Note that the following assignments completely overwrite | |
2894 | old_top when old_size was previously MINSIZE. This is | |
2895 | intentional. We need the fencepost, even if old_top otherwise gets | |
2896 | lost. | |
2897 | */ | |
e9c4fe93 | 2898 | set_head (chunk_at_offset (old_top, old_size), |
3784dfc0 RE |
2899 | CHUNK_HDR_SZ | PREV_INUSE); |
2900 | set_head (chunk_at_offset (old_top, | |
2901 | old_size + CHUNK_HDR_SZ), | |
2902 | CHUNK_HDR_SZ | PREV_INUSE); | |
6c8dbf00 OB |
2903 | |
2904 | /* If possible, release the rest. */ | |
2905 | if (old_size >= MINSIZE) | |
2906 | { | |
2907 | _int_free (av, old_top, 1); | |
2908 | } | |
2909 | } | |
2910 | } | |
2911 | } | |
2912 | } | |
2913 | } /* if (av != &main_arena) */ | |
2914 | ||
2915 | if ((unsigned long) av->system_mem > (unsigned long) (av->max_system_mem)) | |
fa8d436c | 2916 | av->max_system_mem = av->system_mem; |
6c8dbf00 | 2917 | check_malloc_state (av); |
a9177ff5 | 2918 | |
fa8d436c UD |
2919 | /* finally, do the allocation */ |
2920 | p = av->top; | |
6c8dbf00 | 2921 | size = chunksize (p); |
fa8d436c UD |
2922 | |
2923 | /* check that one of the above allocation paths succeeded */ | |
6c8dbf00 OB |
2924 | if ((unsigned long) (size) >= (unsigned long) (nb + MINSIZE)) |
2925 | { | |
2926 | remainder_size = size - nb; | |
2927 | remainder = chunk_at_offset (p, nb); | |
2928 | av->top = remainder; | |
2929 | set_head (p, nb | PREV_INUSE | (av != &main_arena ? NON_MAIN_ARENA : 0)); | |
2930 | set_head (remainder, remainder_size | PREV_INUSE); | |
2931 | check_malloced_chunk (av, p, nb); | |
ca89f1c7 | 2932 | return chunk2mem (p); |
6c8dbf00 | 2933 | } |
fa8d436c UD |
2934 | |
2935 | /* catch all failure paths */ | |
8e58439c | 2936 | __set_errno (ENOMEM); |
fa8d436c UD |
2937 | return 0; |
2938 | } | |
2939 | ||
2940 | ||
2941 | /* | |
6c8dbf00 OB |
2942 | systrim is an inverse of sorts to sysmalloc. It gives memory back |
2943 | to the system (via negative arguments to sbrk) if there is unused | |
2944 | memory at the `high' end of the malloc pool. It is called | |
2945 | automatically by free() when top space exceeds the trim | |
2946 | threshold. It is also called by the public malloc_trim routine. It | |
2947 | returns 1 if it actually released any memory, else 0. | |
2948 | */ | |
fa8d436c | 2949 | |
6c8dbf00 OB |
2950 | static int |
2951 | systrim (size_t pad, mstate av) | |
fa8d436c | 2952 | { |
6c8dbf00 OB |
2953 | long top_size; /* Amount of top-most memory */ |
2954 | long extra; /* Amount to release */ | |
2955 | long released; /* Amount actually released */ | |
2956 | char *current_brk; /* address returned by pre-check sbrk call */ | |
2957 | char *new_brk; /* address returned by post-check sbrk call */ | |
6c8dbf00 | 2958 | long top_area; |
fa8d436c | 2959 | |
6c8dbf00 | 2960 | top_size = chunksize (av->top); |
a9177ff5 | 2961 | |
4b5b548c FS |
2962 | top_area = top_size - MINSIZE - 1; |
2963 | if (top_area <= pad) | |
2964 | return 0; | |
2965 | ||
ca6be165 | 2966 | /* Release in pagesize units and round down to the nearest page. */ |
7478c995 AZ |
2967 | #if HAVE_TUNABLES && defined (MADV_HUGEPAGE) |
2968 | if (__glibc_unlikely (mp_.thp_pagesize != 0)) | |
2969 | extra = ALIGN_DOWN (top_area - pad, mp_.thp_pagesize); | |
2970 | else | |
2971 | #endif | |
2972 | extra = ALIGN_DOWN (top_area - pad, GLRO(dl_pagesize)); | |
a9177ff5 | 2973 | |
51a7380b WN |
2974 | if (extra == 0) |
2975 | return 0; | |
2976 | ||
4b5b548c | 2977 | /* |
6c8dbf00 OB |
2978 | Only proceed if end of memory is where we last set it. |
2979 | This avoids problems if there were foreign sbrk calls. | |
2980 | */ | |
2981 | current_brk = (char *) (MORECORE (0)); | |
2982 | if (current_brk == (char *) (av->top) + top_size) | |
2983 | { | |
2984 | /* | |
2985 | Attempt to release memory. We ignore MORECORE return value, | |
2986 | and instead call again to find out where new end of memory is. | |
2987 | This avoids problems if first call releases less than we asked, | |
2988 | of if failure somehow altered brk value. (We could still | |
2989 | encounter problems if it altered brk in some very bad way, | |
2990 | but the only thing we can do is adjust anyway, which will cause | |
2991 | some downstream failure.) | |
2992 | */ | |
2993 | ||
2994 | MORECORE (-extra); | |
6c8dbf00 OB |
2995 | new_brk = (char *) (MORECORE (0)); |
2996 | ||
2997 | LIBC_PROBE (memory_sbrk_less, 2, new_brk, extra); | |
2998 | ||
2999 | if (new_brk != (char *) MORECORE_FAILURE) | |
3000 | { | |
3001 | released = (long) (current_brk - new_brk); | |
3002 | ||
3003 | if (released != 0) | |
3004 | { | |
3005 | /* Success. Adjust top. */ | |
3006 | av->system_mem -= released; | |
3007 | set_head (av->top, (top_size - released) | PREV_INUSE); | |
3008 | check_malloc_state (av); | |
3009 | return 1; | |
3010 | } | |
3011 | } | |
fa8d436c | 3012 | } |
fa8d436c | 3013 | return 0; |
f65fd747 UD |
3014 | } |
3015 | ||
431c33c0 | 3016 | static void |
6c8dbf00 | 3017 | munmap_chunk (mchunkptr p) |
f65fd747 | 3018 | { |
c0e82f11 | 3019 | size_t pagesize = GLRO (dl_pagesize); |
6c8dbf00 | 3020 | INTERNAL_SIZE_T size = chunksize (p); |
f65fd747 | 3021 | |
6c8dbf00 | 3022 | assert (chunk_is_mmapped (p)); |
8e635611 | 3023 | |
ca89f1c7 | 3024 | uintptr_t mem = (uintptr_t) chunk2mem (p); |
e9c4fe93 FW |
3025 | uintptr_t block = (uintptr_t) p - prev_size (p); |
3026 | size_t total_size = prev_size (p) + size; | |
8e635611 UD |
3027 | /* Unfortunately we have to do the compilers job by hand here. Normally |
3028 | we would test BLOCK and TOTAL-SIZE separately for compliance with the | |
3029 | page size. But gcc does not recognize the optimization possibility | |
3030 | (in the moment at least) so we combine the two values into one before | |
3031 | the bit test. */ | |
c0e82f11 IK |
3032 | if (__glibc_unlikely ((block | total_size) & (pagesize - 1)) != 0 |
3033 | || __glibc_unlikely (!powerof2 (mem & (pagesize - 1)))) | |
ac3ed168 | 3034 | malloc_printerr ("munmap_chunk(): invalid pointer"); |
f65fd747 | 3035 | |
a364a3a7 | 3036 | atomic_fetch_add_relaxed (&mp_.n_mmaps, -1); |
53b251c9 | 3037 | atomic_fetch_add_relaxed (&mp_.mmapped_mem, -total_size); |
f65fd747 | 3038 | |
6ef76f3b UD |
3039 | /* If munmap failed the process virtual memory address space is in a |
3040 | bad shape. Just leave the block hanging around, the process will | |
3041 | terminate shortly anyway since not much can be done. */ | |
6c8dbf00 | 3042 | __munmap ((char *) block, total_size); |
f65fd747 UD |
3043 | } |
3044 | ||
3045 | #if HAVE_MREMAP | |
3046 | ||
431c33c0 | 3047 | static mchunkptr |
6c8dbf00 | 3048 | mremap_chunk (mchunkptr p, size_t new_size) |
f65fd747 | 3049 | { |
8a35c3fe | 3050 | size_t pagesize = GLRO (dl_pagesize); |
e9c4fe93 | 3051 | INTERNAL_SIZE_T offset = prev_size (p); |
6c8dbf00 | 3052 | INTERNAL_SIZE_T size = chunksize (p); |
f65fd747 UD |
3053 | char *cp; |
3054 | ||
6c8dbf00 | 3055 | assert (chunk_is_mmapped (p)); |
ebe544bf IK |
3056 | |
3057 | uintptr_t block = (uintptr_t) p - offset; | |
ca89f1c7 | 3058 | uintptr_t mem = (uintptr_t) chunk2mem(p); |
ebe544bf IK |
3059 | size_t total_size = offset + size; |
3060 | if (__glibc_unlikely ((block | total_size) & (pagesize - 1)) != 0 | |
3061 | || __glibc_unlikely (!powerof2 (mem & (pagesize - 1)))) | |
3062 | malloc_printerr("mremap_chunk(): invalid pointer"); | |
f65fd747 UD |
3063 | |
3064 | /* Note the extra SIZE_SZ overhead as in mmap_chunk(). */ | |
8a35c3fe | 3065 | new_size = ALIGN_UP (new_size + offset + SIZE_SZ, pagesize); |
f65fd747 | 3066 | |
68f3802d | 3067 | /* No need to remap if the number of pages does not change. */ |
ebe544bf | 3068 | if (total_size == new_size) |
68f3802d UD |
3069 | return p; |
3070 | ||
ebe544bf | 3071 | cp = (char *) __mremap ((char *) block, total_size, new_size, |
6c8dbf00 | 3072 | MREMAP_MAYMOVE); |
f65fd747 | 3073 | |
6c8dbf00 OB |
3074 | if (cp == MAP_FAILED) |
3075 | return 0; | |
f65fd747 | 3076 | |
5f6d8d97 AZ |
3077 | madvise_thp (cp, new_size); |
3078 | ||
6c8dbf00 | 3079 | p = (mchunkptr) (cp + offset); |
f65fd747 | 3080 | |
ca89f1c7 | 3081 | assert (aligned_OK (chunk2mem (p))); |
f65fd747 | 3082 | |
e9c4fe93 | 3083 | assert (prev_size (p) == offset); |
6c8dbf00 | 3084 | set_head (p, (new_size - offset) | IS_MMAPPED); |
f65fd747 | 3085 | |
c6e4925d | 3086 | INTERNAL_SIZE_T new; |
89d40cac | 3087 | new = atomic_fetch_add_relaxed (&mp_.mmapped_mem, new_size - size - offset) |
6c8dbf00 | 3088 | + new_size - size - offset; |
c6e4925d | 3089 | atomic_max (&mp_.max_mmapped_mem, new); |
f65fd747 UD |
3090 | return p; |
3091 | } | |
f65fd747 UD |
3092 | #endif /* HAVE_MREMAP */ |
3093 | ||
fa8d436c | 3094 | /*------------------------ Public wrappers. --------------------------------*/ |
f65fd747 | 3095 | |
d5c3fafc DD |
3096 | #if USE_TCACHE |
3097 | ||
3098 | /* We overlay this structure on the user-data portion of a chunk when | |
3099 | the chunk is stored in the per-thread cache. */ | |
3100 | typedef struct tcache_entry | |
3101 | { | |
3102 | struct tcache_entry *next; | |
bcdaad21 | 3103 | /* This field exists to detect double frees. */ |
fc859c30 | 3104 | uintptr_t key; |
d5c3fafc DD |
3105 | } tcache_entry; |
3106 | ||
3107 | /* There is one of these for each thread, which contains the | |
3108 | per-thread cache (hence "tcache_perthread_struct"). Keeping | |
3109 | overall size low is mildly important. Note that COUNTS and ENTRIES | |
3110 | are redundant (we could have just counted the linked list each | |
3111 | time), this is for performance reasons. */ | |
3112 | typedef struct tcache_perthread_struct | |
3113 | { | |
1f50f2ad | 3114 | uint16_t counts[TCACHE_MAX_BINS]; |
d5c3fafc DD |
3115 | tcache_entry *entries[TCACHE_MAX_BINS]; |
3116 | } tcache_perthread_struct; | |
3117 | ||
1e26d351 | 3118 | static __thread bool tcache_shutting_down = false; |
d5c3fafc DD |
3119 | static __thread tcache_perthread_struct *tcache = NULL; |
3120 | ||
fc859c30 SP |
3121 | /* Process-wide key to try and catch a double-free in the same thread. */ |
3122 | static uintptr_t tcache_key; | |
3123 | ||
3124 | /* The value of tcache_key does not really have to be a cryptographically | |
3125 | secure random number. It only needs to be arbitrary enough so that it does | |
3126 | not collide with values present in applications. If a collision does happen | |
3127 | consistently enough, it could cause a degradation in performance since the | |
3128 | entire list is checked to check if the block indeed has been freed the | |
3129 | second time. The odds of this happening are exceedingly low though, about 1 | |
3130 | in 2^wordsize. There is probably a higher chance of the performance | |
3131 | degradation being due to a double free where the first free happened in a | |
3132 | different thread; that's a case this check does not cover. */ | |
3133 | static void | |
3134 | tcache_key_initialize (void) | |
3135 | { | |
7187efd0 | 3136 | if (__getrandom_nocancel (&tcache_key, sizeof(tcache_key), GRND_NONBLOCK) |
fc859c30 SP |
3137 | != sizeof (tcache_key)) |
3138 | { | |
3139 | tcache_key = random_bits (); | |
3140 | #if __WORDSIZE == 64 | |
3141 | tcache_key = (tcache_key << 32) | random_bits (); | |
3142 | #endif | |
3143 | } | |
3144 | } | |
3145 | ||
d5c3fafc DD |
3146 | /* Caller must ensure that we know tc_idx is valid and there's room |
3147 | for more chunks. */ | |
e4dd4ace | 3148 | static __always_inline void |
d5c3fafc DD |
3149 | tcache_put (mchunkptr chunk, size_t tc_idx) |
3150 | { | |
ca89f1c7 | 3151 | tcache_entry *e = (tcache_entry *) chunk2mem (chunk); |
bcdaad21 DD |
3152 | |
3153 | /* Mark this chunk as "in the tcache" so the test in _int_free will | |
3154 | detect a double free. */ | |
fc859c30 | 3155 | e->key = tcache_key; |
bcdaad21 | 3156 | |
a1a486d7 | 3157 | e->next = PROTECT_PTR (&e->next, tcache->entries[tc_idx]); |
d5c3fafc DD |
3158 | tcache->entries[tc_idx] = e; |
3159 | ++(tcache->counts[tc_idx]); | |
3160 | } | |
3161 | ||
3162 | /* Caller must ensure that we know tc_idx is valid and there's | |
3163 | available chunks to remove. */ | |
e4dd4ace | 3164 | static __always_inline void * |
d5c3fafc DD |
3165 | tcache_get (size_t tc_idx) |
3166 | { | |
3167 | tcache_entry *e = tcache->entries[tc_idx]; | |
49c3c376 EI |
3168 | if (__glibc_unlikely (!aligned_OK (e))) |
3169 | malloc_printerr ("malloc(): unaligned tcache chunk detected"); | |
a1a486d7 | 3170 | tcache->entries[tc_idx] = REVEAL_PTR (e->next); |
d5c3fafc | 3171 | --(tcache->counts[tc_idx]); |
fc859c30 | 3172 | e->key = 0; |
d5c3fafc DD |
3173 | return (void *) e; |
3174 | } | |
3175 | ||
0a947e06 FW |
3176 | static void |
3177 | tcache_thread_shutdown (void) | |
d5c3fafc DD |
3178 | { |
3179 | int i; | |
3180 | tcache_perthread_struct *tcache_tmp = tcache; | |
3181 | ||
dfec225e J |
3182 | tcache_shutting_down = true; |
3183 | ||
d5c3fafc DD |
3184 | if (!tcache) |
3185 | return; | |
3186 | ||
1e26d351 | 3187 | /* Disable the tcache and prevent it from being reinitialized. */ |
d5c3fafc DD |
3188 | tcache = NULL; |
3189 | ||
1e26d351 CD |
3190 | /* Free all of the entries and the tcache itself back to the arena |
3191 | heap for coalescing. */ | |
d5c3fafc DD |
3192 | for (i = 0; i < TCACHE_MAX_BINS; ++i) |
3193 | { | |
3194 | while (tcache_tmp->entries[i]) | |
3195 | { | |
3196 | tcache_entry *e = tcache_tmp->entries[i]; | |
768358b6 EI |
3197 | if (__glibc_unlikely (!aligned_OK (e))) |
3198 | malloc_printerr ("tcache_thread_shutdown(): " | |
3199 | "unaligned tcache chunk detected"); | |
a1a486d7 | 3200 | tcache_tmp->entries[i] = REVEAL_PTR (e->next); |
d5c3fafc DD |
3201 | __libc_free (e); |
3202 | } | |
3203 | } | |
3204 | ||
3205 | __libc_free (tcache_tmp); | |
d5c3fafc | 3206 | } |
d5c3fafc DD |
3207 | |
3208 | static void | |
3209 | tcache_init(void) | |
3210 | { | |
3211 | mstate ar_ptr; | |
3212 | void *victim = 0; | |
3213 | const size_t bytes = sizeof (tcache_perthread_struct); | |
3214 | ||
3215 | if (tcache_shutting_down) | |
3216 | return; | |
3217 | ||
3218 | arena_get (ar_ptr, bytes); | |
3219 | victim = _int_malloc (ar_ptr, bytes); | |
3220 | if (!victim && ar_ptr != NULL) | |
3221 | { | |
3222 | ar_ptr = arena_get_retry (ar_ptr, bytes); | |
3223 | victim = _int_malloc (ar_ptr, bytes); | |
3224 | } | |
3225 | ||
3226 | ||
3227 | if (ar_ptr != NULL) | |
3228 | __libc_lock_unlock (ar_ptr->mutex); | |
3229 | ||
3230 | /* In a low memory situation, we may not be able to allocate memory | |
3231 | - in which case, we just keep trying later. However, we | |
3232 | typically do this very early, so either there is sufficient | |
3233 | memory, or there isn't enough memory to do non-trivial | |
3234 | allocations anyway. */ | |
3235 | if (victim) | |
3236 | { | |
3237 | tcache = (tcache_perthread_struct *) victim; | |
3238 | memset (tcache, 0, sizeof (tcache_perthread_struct)); | |
3239 | } | |
3240 | ||
3241 | } | |
3242 | ||
0a947e06 | 3243 | # define MAYBE_INIT_TCACHE() \ |
d5c3fafc DD |
3244 | if (__glibc_unlikely (tcache == NULL)) \ |
3245 | tcache_init(); | |
3246 | ||
0a947e06 FW |
3247 | #else /* !USE_TCACHE */ |
3248 | # define MAYBE_INIT_TCACHE() | |
3249 | ||
3250 | static void | |
3251 | tcache_thread_shutdown (void) | |
3252 | { | |
3253 | /* Nothing to do if there is no thread cache. */ | |
3254 | } | |
3255 | ||
3256 | #endif /* !USE_TCACHE */ | |
d5c3fafc | 3257 | |
b5bd5bfe | 3258 | #if IS_IN (libc) |
6c8dbf00 OB |
3259 | void * |
3260 | __libc_malloc (size_t bytes) | |
fa8d436c UD |
3261 | { |
3262 | mstate ar_ptr; | |
22a89187 | 3263 | void *victim; |
f65fd747 | 3264 | |
9bf8e29c AZ |
3265 | _Static_assert (PTRDIFF_MAX <= SIZE_MAX / 2, |
3266 | "PTRDIFF_MAX is not more than half of SIZE_MAX"); | |
3267 | ||
cc35896e | 3268 | if (!__malloc_initialized) |
2d2d9f2b | 3269 | ptmalloc_init (); |
d5c3fafc DD |
3270 | #if USE_TCACHE |
3271 | /* int_free also calls request2size, be careful to not pad twice. */ | |
7519dee3 FW |
3272 | size_t tbytes = checked_request2size (bytes); |
3273 | if (tbytes == 0) | |
9bf8e29c AZ |
3274 | { |
3275 | __set_errno (ENOMEM); | |
3276 | return NULL; | |
3277 | } | |
d5c3fafc DD |
3278 | size_t tc_idx = csize2tidx (tbytes); |
3279 | ||
3280 | MAYBE_INIT_TCACHE (); | |
3281 | ||
3282 | DIAG_PUSH_NEEDS_COMMENT; | |
3283 | if (tc_idx < mp_.tcache_bins | |
d5c3fafc | 3284 | && tcache |
1f50f2ad | 3285 | && tcache->counts[tc_idx] > 0) |
d5c3fafc | 3286 | { |
3784dfc0 | 3287 | victim = tcache_get (tc_idx); |
0c719cf4 | 3288 | return tag_new_usable (victim); |
d5c3fafc DD |
3289 | } |
3290 | DIAG_POP_NEEDS_COMMENT; | |
3291 | #endif | |
f65fd747 | 3292 | |
3f6bb8a3 WD |
3293 | if (SINGLE_THREAD_P) |
3294 | { | |
0c719cf4 | 3295 | victim = tag_new_usable (_int_malloc (&main_arena, bytes)); |
3f6bb8a3 WD |
3296 | assert (!victim || chunk_is_mmapped (mem2chunk (victim)) || |
3297 | &main_arena == arena_for_chunk (mem2chunk (victim))); | |
3298 | return victim; | |
3299 | } | |
3300 | ||
94c5a52a | 3301 | arena_get (ar_ptr, bytes); |
425ce2ed | 3302 | |
6c8dbf00 | 3303 | victim = _int_malloc (ar_ptr, bytes); |
fff94fa2 SP |
3304 | /* Retry with another arena only if we were able to find a usable arena |
3305 | before. */ | |
3306 | if (!victim && ar_ptr != NULL) | |
6c8dbf00 OB |
3307 | { |
3308 | LIBC_PROBE (memory_malloc_retry, 1, bytes); | |
3309 | ar_ptr = arena_get_retry (ar_ptr, bytes); | |
fff94fa2 | 3310 | victim = _int_malloc (ar_ptr, bytes); |
60f0e64b | 3311 | } |
fff94fa2 SP |
3312 | |
3313 | if (ar_ptr != NULL) | |
4bf5f222 | 3314 | __libc_lock_unlock (ar_ptr->mutex); |
fff94fa2 | 3315 | |
0c719cf4 | 3316 | victim = tag_new_usable (victim); |
3784dfc0 | 3317 | |
6c8dbf00 OB |
3318 | assert (!victim || chunk_is_mmapped (mem2chunk (victim)) || |
3319 | ar_ptr == arena_for_chunk (mem2chunk (victim))); | |
fa8d436c | 3320 | return victim; |
f65fd747 | 3321 | } |
6c8dbf00 | 3322 | libc_hidden_def (__libc_malloc) |
f65fd747 | 3323 | |
fa8d436c | 3324 | void |
6c8dbf00 | 3325 | __libc_free (void *mem) |
f65fd747 | 3326 | { |
fa8d436c UD |
3327 | mstate ar_ptr; |
3328 | mchunkptr p; /* chunk corresponding to mem */ | |
3329 | ||
fa8d436c UD |
3330 | if (mem == 0) /* free(0) has no effect */ |
3331 | return; | |
f65fd747 | 3332 | |
3784dfc0 RE |
3333 | /* Quickly check that the freed pointer matches the tag for the memory. |
3334 | This gives a useful double-free detection. */ | |
d3262480 SN |
3335 | if (__glibc_unlikely (mtag_enabled)) |
3336 | *(volatile char *)mem; | |
3784dfc0 | 3337 | |
69fda43b PE |
3338 | int err = errno; |
3339 | ||
6c8dbf00 | 3340 | p = mem2chunk (mem); |
f65fd747 | 3341 | |
6c8dbf00 OB |
3342 | if (chunk_is_mmapped (p)) /* release mmapped memory. */ |
3343 | { | |
4cf6c72f FW |
3344 | /* See if the dynamic brk/mmap threshold needs adjusting. |
3345 | Dumped fake mmapped chunks do not affect the threshold. */ | |
6c8dbf00 | 3346 | if (!mp_.no_dyn_threshold |
e9c4fe93 | 3347 | && chunksize_nomask (p) > mp_.mmap_threshold |
0552fd2c | 3348 | && chunksize_nomask (p) <= DEFAULT_MMAP_THRESHOLD_MAX) |
6c8dbf00 OB |
3349 | { |
3350 | mp_.mmap_threshold = chunksize (p); | |
3351 | mp_.trim_threshold = 2 * mp_.mmap_threshold; | |
3352 | LIBC_PROBE (memory_mallopt_free_dyn_thresholds, 2, | |
3353 | mp_.mmap_threshold, mp_.trim_threshold); | |
3354 | } | |
3355 | munmap_chunk (p); | |
6c8dbf00 | 3356 | } |
69fda43b PE |
3357 | else |
3358 | { | |
3359 | MAYBE_INIT_TCACHE (); | |
f65fd747 | 3360 | |
b9b85be6 | 3361 | /* Mark the chunk as belonging to the library again. */ |
ca89f1c7 | 3362 | (void)tag_region (chunk2mem (p), memsize (p)); |
b9b85be6 | 3363 | |
69fda43b PE |
3364 | ar_ptr = arena_for_chunk (p); |
3365 | _int_free (ar_ptr, p, 0); | |
3366 | } | |
d5c3fafc | 3367 | |
69fda43b | 3368 | __set_errno (err); |
f65fd747 | 3369 | } |
3b49edc0 | 3370 | libc_hidden_def (__libc_free) |
f65fd747 | 3371 | |
6c8dbf00 OB |
3372 | void * |
3373 | __libc_realloc (void *oldmem, size_t bytes) | |
f65fd747 | 3374 | { |
fa8d436c | 3375 | mstate ar_ptr; |
6c8dbf00 | 3376 | INTERNAL_SIZE_T nb; /* padded request size */ |
f65fd747 | 3377 | |
6c8dbf00 | 3378 | void *newp; /* chunk to return */ |
f65fd747 | 3379 | |
cc35896e | 3380 | if (!__malloc_initialized) |
2d2d9f2b | 3381 | ptmalloc_init (); |
f65fd747 | 3382 | |
fa8d436c | 3383 | #if REALLOC_ZERO_BYTES_FREES |
6c8dbf00 OB |
3384 | if (bytes == 0 && oldmem != NULL) |
3385 | { | |
3386 | __libc_free (oldmem); return 0; | |
3387 | } | |
f65fd747 | 3388 | #endif |
f65fd747 | 3389 | |
fa8d436c | 3390 | /* realloc of null is supposed to be same as malloc */ |
6c8dbf00 OB |
3391 | if (oldmem == 0) |
3392 | return __libc_malloc (bytes); | |
f65fd747 | 3393 | |
3784dfc0 RE |
3394 | /* Perform a quick check to ensure that the pointer's tag matches the |
3395 | memory's tag. */ | |
d3262480 SN |
3396 | if (__glibc_unlikely (mtag_enabled)) |
3397 | *(volatile char*) oldmem; | |
3784dfc0 | 3398 | |
78ac92ad | 3399 | /* chunk corresponding to oldmem */ |
6c8dbf00 | 3400 | const mchunkptr oldp = mem2chunk (oldmem); |
78ac92ad | 3401 | /* its size */ |
6c8dbf00 | 3402 | const INTERNAL_SIZE_T oldsize = chunksize (oldp); |
f65fd747 | 3403 | |
fff94fa2 SP |
3404 | if (chunk_is_mmapped (oldp)) |
3405 | ar_ptr = NULL; | |
3406 | else | |
d5c3fafc DD |
3407 | { |
3408 | MAYBE_INIT_TCACHE (); | |
3409 | ar_ptr = arena_for_chunk (oldp); | |
3410 | } | |
fff94fa2 | 3411 | |
4cf6c72f FW |
3412 | /* Little security check which won't hurt performance: the allocator |
3413 | never wrapps around at the end of the address space. Therefore | |
3414 | we can exclude some size values which might appear here by | |
0552fd2c | 3415 | accident or by "design" from some intruder. */ |
4cf6c72f | 3416 | if ((__builtin_expect ((uintptr_t) oldp > (uintptr_t) -oldsize, 0) |
0552fd2c | 3417 | || __builtin_expect (misaligned_chunk (oldp), 0))) |
ac3ed168 | 3418 | malloc_printerr ("realloc(): invalid pointer"); |
dc165f7b | 3419 | |
7519dee3 FW |
3420 | nb = checked_request2size (bytes); |
3421 | if (nb == 0) | |
9bf8e29c AZ |
3422 | { |
3423 | __set_errno (ENOMEM); | |
3424 | return NULL; | |
3425 | } | |
f65fd747 | 3426 | |
6c8dbf00 OB |
3427 | if (chunk_is_mmapped (oldp)) |
3428 | { | |
3429 | void *newmem; | |
f65fd747 | 3430 | |
fa8d436c | 3431 | #if HAVE_MREMAP |
6c8dbf00 OB |
3432 | newp = mremap_chunk (oldp, nb); |
3433 | if (newp) | |
3784dfc0 | 3434 | { |
ca89f1c7 | 3435 | void *newmem = chunk2mem_tag (newp); |
3784dfc0 RE |
3436 | /* Give the new block a different tag. This helps to ensure |
3437 | that stale handles to the previous mapping are not | |
3438 | reused. There's a performance hit for both us and the | |
3439 | caller for doing this, so we might want to | |
3440 | reconsider. */ | |
0c719cf4 | 3441 | return tag_new_usable (newmem); |
3784dfc0 | 3442 | } |
f65fd747 | 3443 | #endif |
6c8dbf00 OB |
3444 | /* Note the extra SIZE_SZ overhead. */ |
3445 | if (oldsize - SIZE_SZ >= nb) | |
3446 | return oldmem; /* do nothing */ | |
3447 | ||
3448 | /* Must alloc, copy, free. */ | |
3449 | newmem = __libc_malloc (bytes); | |
3450 | if (newmem == 0) | |
3451 | return 0; /* propagate failure */ | |
fa8d436c | 3452 | |
3784dfc0 | 3453 | memcpy (newmem, oldmem, oldsize - CHUNK_HDR_SZ); |
6c8dbf00 OB |
3454 | munmap_chunk (oldp); |
3455 | return newmem; | |
3456 | } | |
3457 | ||
3f6bb8a3 WD |
3458 | if (SINGLE_THREAD_P) |
3459 | { | |
3460 | newp = _int_realloc (ar_ptr, oldp, oldsize, nb); | |
3461 | assert (!newp || chunk_is_mmapped (mem2chunk (newp)) || | |
3462 | ar_ptr == arena_for_chunk (mem2chunk (newp))); | |
3463 | ||
3464 | return newp; | |
3465 | } | |
3466 | ||
4bf5f222 | 3467 | __libc_lock_lock (ar_ptr->mutex); |
f65fd747 | 3468 | |
6c8dbf00 | 3469 | newp = _int_realloc (ar_ptr, oldp, oldsize, nb); |
f65fd747 | 3470 | |
4bf5f222 | 3471 | __libc_lock_unlock (ar_ptr->mutex); |
6c8dbf00 OB |
3472 | assert (!newp || chunk_is_mmapped (mem2chunk (newp)) || |
3473 | ar_ptr == arena_for_chunk (mem2chunk (newp))); | |
07014fca UD |
3474 | |
3475 | if (newp == NULL) | |
3476 | { | |
3477 | /* Try harder to allocate memory in other arenas. */ | |
35fed6f1 | 3478 | LIBC_PROBE (memory_realloc_retry, 2, bytes, oldmem); |
6c8dbf00 | 3479 | newp = __libc_malloc (bytes); |
07014fca | 3480 | if (newp != NULL) |
6c8dbf00 | 3481 | { |
faf003ed | 3482 | size_t sz = memsize (oldp); |
42cc9606 | 3483 | memcpy (newp, oldmem, sz); |
ca89f1c7 | 3484 | (void) tag_region (chunk2mem (oldp), sz); |
6c8dbf00 OB |
3485 | _int_free (ar_ptr, oldp, 0); |
3486 | } | |
07014fca UD |
3487 | } |
3488 | ||
fa8d436c UD |
3489 | return newp; |
3490 | } | |
3b49edc0 | 3491 | libc_hidden_def (__libc_realloc) |
f65fd747 | 3492 | |
6c8dbf00 OB |
3493 | void * |
3494 | __libc_memalign (size_t alignment, size_t bytes) | |
10ad46bc | 3495 | { |
cc35896e | 3496 | if (!__malloc_initialized) |
2d2d9f2b SP |
3497 | ptmalloc_init (); |
3498 | ||
10ad46bc OB |
3499 | void *address = RETURN_ADDRESS (0); |
3500 | return _mid_memalign (alignment, bytes, address); | |
3501 | } | |
3502 | ||
3503 | static void * | |
3504 | _mid_memalign (size_t alignment, size_t bytes, void *address) | |
fa8d436c UD |
3505 | { |
3506 | mstate ar_ptr; | |
22a89187 | 3507 | void *p; |
f65fd747 | 3508 | |
10ad46bc | 3509 | /* If we need less alignment than we give anyway, just relay to malloc. */ |
6c8dbf00 OB |
3510 | if (alignment <= MALLOC_ALIGNMENT) |
3511 | return __libc_malloc (bytes); | |
1228ed5c | 3512 | |
fa8d436c | 3513 | /* Otherwise, ensure that it is at least a minimum chunk size */ |
6c8dbf00 OB |
3514 | if (alignment < MINSIZE) |
3515 | alignment = MINSIZE; | |
f65fd747 | 3516 | |
a56ee40b WN |
3517 | /* If the alignment is greater than SIZE_MAX / 2 + 1 it cannot be a |
3518 | power of 2 and will cause overflow in the check below. */ | |
3519 | if (alignment > SIZE_MAX / 2 + 1) | |
3520 | { | |
3521 | __set_errno (EINVAL); | |
3522 | return 0; | |
3523 | } | |
3524 | ||
10ad46bc OB |
3525 | |
3526 | /* Make sure alignment is power of 2. */ | |
6c8dbf00 OB |
3527 | if (!powerof2 (alignment)) |
3528 | { | |
3529 | size_t a = MALLOC_ALIGNMENT * 2; | |
3530 | while (a < alignment) | |
3531 | a <<= 1; | |
3532 | alignment = a; | |
3533 | } | |
10ad46bc | 3534 | |
3f6bb8a3 WD |
3535 | if (SINGLE_THREAD_P) |
3536 | { | |
3537 | p = _int_memalign (&main_arena, alignment, bytes); | |
3538 | assert (!p || chunk_is_mmapped (mem2chunk (p)) || | |
3539 | &main_arena == arena_for_chunk (mem2chunk (p))); | |
0c719cf4 | 3540 | return tag_new_usable (p); |
3f6bb8a3 WD |
3541 | } |
3542 | ||
6c8dbf00 | 3543 | arena_get (ar_ptr, bytes + alignment + MINSIZE); |
6c8dbf00 OB |
3544 | |
3545 | p = _int_memalign (ar_ptr, alignment, bytes); | |
fff94fa2 | 3546 | if (!p && ar_ptr != NULL) |
6c8dbf00 OB |
3547 | { |
3548 | LIBC_PROBE (memory_memalign_retry, 2, bytes, alignment); | |
3549 | ar_ptr = arena_get_retry (ar_ptr, bytes); | |
fff94fa2 | 3550 | p = _int_memalign (ar_ptr, alignment, bytes); |
f65fd747 | 3551 | } |
fff94fa2 SP |
3552 | |
3553 | if (ar_ptr != NULL) | |
4bf5f222 | 3554 | __libc_lock_unlock (ar_ptr->mutex); |
fff94fa2 | 3555 | |
6c8dbf00 OB |
3556 | assert (!p || chunk_is_mmapped (mem2chunk (p)) || |
3557 | ar_ptr == arena_for_chunk (mem2chunk (p))); | |
0c719cf4 | 3558 | return tag_new_usable (p); |
f65fd747 | 3559 | } |
380d7e87 | 3560 | /* For ISO C11. */ |
3b49edc0 UD |
3561 | weak_alias (__libc_memalign, aligned_alloc) |
3562 | libc_hidden_def (__libc_memalign) | |
f65fd747 | 3563 | |
6c8dbf00 OB |
3564 | void * |
3565 | __libc_valloc (size_t bytes) | |
fa8d436c | 3566 | { |
cc35896e | 3567 | if (!__malloc_initialized) |
fa8d436c | 3568 | ptmalloc_init (); |
8088488d | 3569 | |
10ad46bc | 3570 | void *address = RETURN_ADDRESS (0); |
8a35c3fe | 3571 | size_t pagesize = GLRO (dl_pagesize); |
05f878c5 | 3572 | return _mid_memalign (pagesize, bytes, address); |
fa8d436c | 3573 | } |
f65fd747 | 3574 | |
6c8dbf00 OB |
3575 | void * |
3576 | __libc_pvalloc (size_t bytes) | |
fa8d436c | 3577 | { |
cc35896e | 3578 | if (!__malloc_initialized) |
fa8d436c | 3579 | ptmalloc_init (); |
8088488d | 3580 | |
10ad46bc | 3581 | void *address = RETURN_ADDRESS (0); |
8a35c3fe | 3582 | size_t pagesize = GLRO (dl_pagesize); |
9bf8e29c AZ |
3583 | size_t rounded_bytes; |
3584 | /* ALIGN_UP with overflow check. */ | |
3585 | if (__glibc_unlikely (__builtin_add_overflow (bytes, | |
3586 | pagesize - 1, | |
3587 | &rounded_bytes))) | |
1159a193 WN |
3588 | { |
3589 | __set_errno (ENOMEM); | |
3590 | return 0; | |
3591 | } | |
9bf8e29c | 3592 | rounded_bytes = rounded_bytes & -(pagesize - 1); |
1159a193 | 3593 | |
05f878c5 | 3594 | return _mid_memalign (pagesize, rounded_bytes, address); |
fa8d436c | 3595 | } |
f65fd747 | 3596 | |
6c8dbf00 OB |
3597 | void * |
3598 | __libc_calloc (size_t n, size_t elem_size) | |
f65fd747 | 3599 | { |
d6285c9f | 3600 | mstate av; |
3784dfc0 RE |
3601 | mchunkptr oldtop; |
3602 | INTERNAL_SIZE_T sz, oldtopsize; | |
6c8dbf00 | 3603 | void *mem; |
d6285c9f CD |
3604 | unsigned long clearsize; |
3605 | unsigned long nclears; | |
3606 | INTERNAL_SIZE_T *d; | |
9bf8e29c | 3607 | ptrdiff_t bytes; |
0950889b | 3608 | |
9bf8e29c | 3609 | if (__glibc_unlikely (__builtin_mul_overflow (n, elem_size, &bytes))) |
6c8dbf00 | 3610 | { |
9bf8e29c AZ |
3611 | __set_errno (ENOMEM); |
3612 | return NULL; | |
d9af917d | 3613 | } |
3784dfc0 | 3614 | |
9bf8e29c | 3615 | sz = bytes; |
0950889b | 3616 | |
cc35896e | 3617 | if (!__malloc_initialized) |
2d2d9f2b | 3618 | ptmalloc_init (); |
f65fd747 | 3619 | |
d5c3fafc DD |
3620 | MAYBE_INIT_TCACHE (); |
3621 | ||
3f6bb8a3 WD |
3622 | if (SINGLE_THREAD_P) |
3623 | av = &main_arena; | |
3624 | else | |
3625 | arena_get (av, sz); | |
3626 | ||
fff94fa2 SP |
3627 | if (av) |
3628 | { | |
3629 | /* Check if we hand out the top chunk, in which case there may be no | |
3630 | need to clear. */ | |
d6285c9f | 3631 | #if MORECORE_CLEARS |
fff94fa2 SP |
3632 | oldtop = top (av); |
3633 | oldtopsize = chunksize (top (av)); | |
d6285c9f | 3634 | # if MORECORE_CLEARS < 2 |
fff94fa2 SP |
3635 | /* Only newly allocated memory is guaranteed to be cleared. */ |
3636 | if (av == &main_arena && | |
3637 | oldtopsize < mp_.sbrk_base + av->max_system_mem - (char *) oldtop) | |
3638 | oldtopsize = (mp_.sbrk_base + av->max_system_mem - (char *) oldtop); | |
d6285c9f | 3639 | # endif |
fff94fa2 SP |
3640 | if (av != &main_arena) |
3641 | { | |
3642 | heap_info *heap = heap_for_ptr (oldtop); | |
3643 | if (oldtopsize < (char *) heap + heap->mprotect_size - (char *) oldtop) | |
3644 | oldtopsize = (char *) heap + heap->mprotect_size - (char *) oldtop; | |
3645 | } | |
3646 | #endif | |
3647 | } | |
3648 | else | |
d6285c9f | 3649 | { |
fff94fa2 SP |
3650 | /* No usable arenas. */ |
3651 | oldtop = 0; | |
3652 | oldtopsize = 0; | |
d6285c9f | 3653 | } |
d6285c9f CD |
3654 | mem = _int_malloc (av, sz); |
3655 | ||
d6285c9f CD |
3656 | assert (!mem || chunk_is_mmapped (mem2chunk (mem)) || |
3657 | av == arena_for_chunk (mem2chunk (mem))); | |
3658 | ||
3f6bb8a3 | 3659 | if (!SINGLE_THREAD_P) |
d6285c9f | 3660 | { |
3f6bb8a3 WD |
3661 | if (mem == 0 && av != NULL) |
3662 | { | |
3663 | LIBC_PROBE (memory_calloc_retry, 1, sz); | |
3664 | av = arena_get_retry (av, sz); | |
3665 | mem = _int_malloc (av, sz); | |
3666 | } | |
fff94fa2 | 3667 | |
3f6bb8a3 WD |
3668 | if (av != NULL) |
3669 | __libc_lock_unlock (av->mutex); | |
3670 | } | |
fff94fa2 SP |
3671 | |
3672 | /* Allocation failed even after a retry. */ | |
3673 | if (mem == 0) | |
3674 | return 0; | |
3675 | ||
3784dfc0 | 3676 | mchunkptr p = mem2chunk (mem); |
9d61722b | 3677 | |
3784dfc0 RE |
3678 | /* If we are using memory tagging, then we need to set the tags |
3679 | regardless of MORECORE_CLEARS, so we zero the whole block while | |
3680 | doing so. */ | |
9d61722b | 3681 | if (__glibc_unlikely (mtag_enabled)) |
faf003ed | 3682 | return tag_new_zero_region (mem, memsize (p)); |
9d61722b | 3683 | |
3784dfc0 | 3684 | INTERNAL_SIZE_T csz = chunksize (p); |
d6285c9f CD |
3685 | |
3686 | /* Two optional cases in which clearing not necessary */ | |
3687 | if (chunk_is_mmapped (p)) | |
3688 | { | |
3689 | if (__builtin_expect (perturb_byte, 0)) | |
3690 | return memset (mem, 0, sz); | |
3691 | ||
3692 | return mem; | |
3693 | } | |
3694 | ||
d6285c9f CD |
3695 | #if MORECORE_CLEARS |
3696 | if (perturb_byte == 0 && (p == oldtop && csz > oldtopsize)) | |
3697 | { | |
3698 | /* clear only the bytes from non-freshly-sbrked memory */ | |
3699 | csz = oldtopsize; | |
3700 | } | |
3701 | #endif | |
3702 | ||
3703 | /* Unroll clear of <= 36 bytes (72 if 8byte sizes). We know that | |
3704 | contents have an odd number of INTERNAL_SIZE_T-sized words; | |
3705 | minimally 3. */ | |
3706 | d = (INTERNAL_SIZE_T *) mem; | |
3707 | clearsize = csz - SIZE_SZ; | |
3708 | nclears = clearsize / sizeof (INTERNAL_SIZE_T); | |
3709 | assert (nclears >= 3); | |
3710 | ||
3711 | if (nclears > 9) | |
3712 | return memset (d, 0, clearsize); | |
3713 | ||
3714 | else | |
3715 | { | |
3716 | *(d + 0) = 0; | |
3717 | *(d + 1) = 0; | |
3718 | *(d + 2) = 0; | |
3719 | if (nclears > 4) | |
3720 | { | |
3721 | *(d + 3) = 0; | |
3722 | *(d + 4) = 0; | |
3723 | if (nclears > 6) | |
3724 | { | |
3725 | *(d + 5) = 0; | |
3726 | *(d + 6) = 0; | |
3727 | if (nclears > 8) | |
3728 | { | |
3729 | *(d + 7) = 0; | |
3730 | *(d + 8) = 0; | |
3731 | } | |
3732 | } | |
3733 | } | |
3734 | } | |
3735 | ||
3736 | return mem; | |
fa8d436c | 3737 | } |
b5bd5bfe | 3738 | #endif /* IS_IN (libc) */ |
f65fd747 | 3739 | |
f65fd747 | 3740 | /* |
6c8dbf00 OB |
3741 | ------------------------------ malloc ------------------------------ |
3742 | */ | |
f65fd747 | 3743 | |
6c8dbf00 OB |
3744 | static void * |
3745 | _int_malloc (mstate av, size_t bytes) | |
f65fd747 | 3746 | { |
fa8d436c | 3747 | INTERNAL_SIZE_T nb; /* normalized request size */ |
6c8dbf00 OB |
3748 | unsigned int idx; /* associated bin index */ |
3749 | mbinptr bin; /* associated bin */ | |
f65fd747 | 3750 | |
6c8dbf00 | 3751 | mchunkptr victim; /* inspected/selected chunk */ |
fa8d436c | 3752 | INTERNAL_SIZE_T size; /* its size */ |
6c8dbf00 | 3753 | int victim_index; /* its bin index */ |
f65fd747 | 3754 | |
6c8dbf00 OB |
3755 | mchunkptr remainder; /* remainder from a split */ |
3756 | unsigned long remainder_size; /* its size */ | |
8a4b65b4 | 3757 | |
6c8dbf00 OB |
3758 | unsigned int block; /* bit map traverser */ |
3759 | unsigned int bit; /* bit map traverser */ | |
3760 | unsigned int map; /* current word of binmap */ | |
8a4b65b4 | 3761 | |
6c8dbf00 OB |
3762 | mchunkptr fwd; /* misc temp for linking */ |
3763 | mchunkptr bck; /* misc temp for linking */ | |
8a4b65b4 | 3764 | |
d5c3fafc DD |
3765 | #if USE_TCACHE |
3766 | size_t tcache_unsorted_count; /* count of unsorted chunks processed */ | |
3767 | #endif | |
3768 | ||
fa8d436c | 3769 | /* |
6c8dbf00 OB |
3770 | Convert request size to internal form by adding SIZE_SZ bytes |
3771 | overhead plus possibly more to obtain necessary alignment and/or | |
3772 | to obtain a size of at least MINSIZE, the smallest allocatable | |
9bf8e29c | 3773 | size. Also, checked_request2size returns false for request sizes |
6c8dbf00 OB |
3774 | that are so large that they wrap around zero when padded and |
3775 | aligned. | |
3776 | */ | |
f65fd747 | 3777 | |
7519dee3 FW |
3778 | nb = checked_request2size (bytes); |
3779 | if (nb == 0) | |
9bf8e29c AZ |
3780 | { |
3781 | __set_errno (ENOMEM); | |
3782 | return NULL; | |
3783 | } | |
f65fd747 | 3784 | |
fff94fa2 SP |
3785 | /* There are no usable arenas. Fall back to sysmalloc to get a chunk from |
3786 | mmap. */ | |
3787 | if (__glibc_unlikely (av == NULL)) | |
3788 | { | |
3789 | void *p = sysmalloc (nb, av); | |
3790 | if (p != NULL) | |
3791 | alloc_perturb (p, bytes); | |
3792 | return p; | |
3793 | } | |
3794 | ||
fa8d436c | 3795 | /* |
6c8dbf00 OB |
3796 | If the size qualifies as a fastbin, first check corresponding bin. |
3797 | This code is safe to execute even if av is not yet initialized, so we | |
3798 | can try it without checking, which saves some time on this fast path. | |
3799 | */ | |
f65fd747 | 3800 | |
71effcea FW |
3801 | #define REMOVE_FB(fb, victim, pp) \ |
3802 | do \ | |
3803 | { \ | |
3804 | victim = pp; \ | |
3805 | if (victim == NULL) \ | |
3806 | break; \ | |
a1a486d7 | 3807 | pp = REVEAL_PTR (victim->fd); \ |
49c3c376 | 3808 | if (__glibc_unlikely (pp != NULL && misaligned_chunk (pp))) \ |
a1a486d7 | 3809 | malloc_printerr ("malloc(): unaligned fastbin chunk detected"); \ |
71effcea | 3810 | } \ |
a1a486d7 | 3811 | while ((pp = catomic_compare_and_exchange_val_acq (fb, pp, victim)) \ |
71effcea FW |
3812 | != victim); \ |
3813 | ||
6c8dbf00 OB |
3814 | if ((unsigned long) (nb) <= (unsigned long) (get_max_fast ())) |
3815 | { | |
3816 | idx = fastbin_index (nb); | |
3817 | mfastbinptr *fb = &fastbin (av, idx); | |
71effcea FW |
3818 | mchunkptr pp; |
3819 | victim = *fb; | |
3820 | ||
905a7725 WD |
3821 | if (victim != NULL) |
3822 | { | |
49c3c376 EI |
3823 | if (__glibc_unlikely (misaligned_chunk (victim))) |
3824 | malloc_printerr ("malloc(): unaligned fastbin chunk detected 2"); | |
a1a486d7 | 3825 | |
71effcea | 3826 | if (SINGLE_THREAD_P) |
a1a486d7 | 3827 | *fb = REVEAL_PTR (victim->fd); |
71effcea FW |
3828 | else |
3829 | REMOVE_FB (fb, pp, victim); | |
3830 | if (__glibc_likely (victim != NULL)) | |
6923f6db | 3831 | { |
71effcea FW |
3832 | size_t victim_idx = fastbin_index (chunksize (victim)); |
3833 | if (__builtin_expect (victim_idx != idx, 0)) | |
3834 | malloc_printerr ("malloc(): memory corruption (fast)"); | |
3835 | check_remalloced_chunk (av, victim, nb); | |
3836 | #if USE_TCACHE | |
3837 | /* While we're here, if we see other chunks of the same size, | |
3838 | stash them in the tcache. */ | |
3839 | size_t tc_idx = csize2tidx (nb); | |
3840 | if (tcache && tc_idx < mp_.tcache_bins) | |
d5c3fafc | 3841 | { |
71effcea FW |
3842 | mchunkptr tc_victim; |
3843 | ||
3844 | /* While bin not empty and tcache not full, copy chunks. */ | |
3845 | while (tcache->counts[tc_idx] < mp_.tcache_count | |
3846 | && (tc_victim = *fb) != NULL) | |
3847 | { | |
49c3c376 EI |
3848 | if (__glibc_unlikely (misaligned_chunk (tc_victim))) |
3849 | malloc_printerr ("malloc(): unaligned fastbin chunk detected 3"); | |
71effcea | 3850 | if (SINGLE_THREAD_P) |
a1a486d7 | 3851 | *fb = REVEAL_PTR (tc_victim->fd); |
71effcea FW |
3852 | else |
3853 | { | |
3854 | REMOVE_FB (fb, pp, tc_victim); | |
3855 | if (__glibc_unlikely (tc_victim == NULL)) | |
3856 | break; | |
3857 | } | |
3858 | tcache_put (tc_victim, tc_idx); | |
3859 | } | |
d5c3fafc | 3860 | } |
6923f6db | 3861 | #endif |
ca89f1c7 | 3862 | void *p = chunk2mem (victim); |
71effcea FW |
3863 | alloc_perturb (p, bytes); |
3864 | return p; | |
3865 | } | |
905a7725 | 3866 | } |
fa8d436c | 3867 | } |
f65fd747 | 3868 | |
fa8d436c | 3869 | /* |
6c8dbf00 OB |
3870 | If a small request, check regular bin. Since these "smallbins" |
3871 | hold one size each, no searching within bins is necessary. | |
3872 | (For a large request, we need to wait until unsorted chunks are | |
3873 | processed to find best fit. But for small ones, fits are exact | |
3874 | anyway, so we can check now, which is faster.) | |
3875 | */ | |
3876 | ||
3877 | if (in_smallbin_range (nb)) | |
3878 | { | |
3879 | idx = smallbin_index (nb); | |
3880 | bin = bin_at (av, idx); | |
3881 | ||
3882 | if ((victim = last (bin)) != bin) | |
3883 | { | |
3381be5c WD |
3884 | bck = victim->bk; |
3885 | if (__glibc_unlikely (bck->fd != victim)) | |
3886 | malloc_printerr ("malloc(): smallbin double linked list corrupted"); | |
3887 | set_inuse_bit_at_offset (victim, nb); | |
3888 | bin->bk = bck; | |
3889 | bck->fd = bin; | |
3890 | ||
3891 | if (av != &main_arena) | |
3892 | set_non_main_arena (victim); | |
3893 | check_malloced_chunk (av, victim, nb); | |
d5c3fafc DD |
3894 | #if USE_TCACHE |
3895 | /* While we're here, if we see other chunks of the same size, | |
3896 | stash them in the tcache. */ | |
3897 | size_t tc_idx = csize2tidx (nb); | |
3898 | if (tcache && tc_idx < mp_.tcache_bins) | |
3899 | { | |
3900 | mchunkptr tc_victim; | |
3901 | ||
3902 | /* While bin not empty and tcache not full, copy chunks over. */ | |
3903 | while (tcache->counts[tc_idx] < mp_.tcache_count | |
3904 | && (tc_victim = last (bin)) != bin) | |
3905 | { | |
3906 | if (tc_victim != 0) | |
3907 | { | |
3908 | bck = tc_victim->bk; | |
3909 | set_inuse_bit_at_offset (tc_victim, nb); | |
3910 | if (av != &main_arena) | |
3911 | set_non_main_arena (tc_victim); | |
3912 | bin->bk = bck; | |
3913 | bck->fd = bin; | |
3914 | ||
3915 | tcache_put (tc_victim, tc_idx); | |
3916 | } | |
3917 | } | |
3918 | } | |
3919 | #endif | |
ca89f1c7 | 3920 | void *p = chunk2mem (victim); |
3381be5c WD |
3921 | alloc_perturb (p, bytes); |
3922 | return p; | |
6c8dbf00 | 3923 | } |
fa8d436c | 3924 | } |
f65fd747 | 3925 | |
a9177ff5 | 3926 | /* |
fa8d436c UD |
3927 | If this is a large request, consolidate fastbins before continuing. |
3928 | While it might look excessive to kill all fastbins before | |
3929 | even seeing if there is space available, this avoids | |
3930 | fragmentation problems normally associated with fastbins. | |
3931 | Also, in practice, programs tend to have runs of either small or | |
a9177ff5 | 3932 | large requests, but less often mixtures, so consolidation is not |
fa8d436c UD |
3933 | invoked all that often in most programs. And the programs that |
3934 | it is called frequently in otherwise tend to fragment. | |
6c8dbf00 | 3935 | */ |
7799b7b3 | 3936 | |
6c8dbf00 OB |
3937 | else |
3938 | { | |
3939 | idx = largebin_index (nb); | |
e956075a | 3940 | if (atomic_load_relaxed (&av->have_fastchunks)) |
6c8dbf00 OB |
3941 | malloc_consolidate (av); |
3942 | } | |
f65fd747 | 3943 | |
fa8d436c | 3944 | /* |
6c8dbf00 OB |
3945 | Process recently freed or remaindered chunks, taking one only if |
3946 | it is exact fit, or, if this a small request, the chunk is remainder from | |
3947 | the most recent non-exact fit. Place other traversed chunks in | |
3948 | bins. Note that this step is the only place in any routine where | |
3949 | chunks are placed in bins. | |
3950 | ||
3951 | The outer loop here is needed because we might not realize until | |
3952 | near the end of malloc that we should have consolidated, so must | |
3953 | do so and retry. This happens at most once, and only when we would | |
3954 | otherwise need to expand memory to service a "small" request. | |
3955 | */ | |
3956 | ||
d5c3fafc DD |
3957 | #if USE_TCACHE |
3958 | INTERNAL_SIZE_T tcache_nb = 0; | |
3959 | size_t tc_idx = csize2tidx (nb); | |
3960 | if (tcache && tc_idx < mp_.tcache_bins) | |
3961 | tcache_nb = nb; | |
3962 | int return_cached = 0; | |
3963 | ||
3964 | tcache_unsorted_count = 0; | |
3965 | #endif | |
3966 | ||
6c8dbf00 OB |
3967 | for (;; ) |
3968 | { | |
3969 | int iters = 0; | |
3970 | while ((victim = unsorted_chunks (av)->bk) != unsorted_chunks (av)) | |
3971 | { | |
3972 | bck = victim->bk; | |
6c8dbf00 | 3973 | size = chunksize (victim); |
b90ddd08 IK |
3974 | mchunkptr next = chunk_at_offset (victim, size); |
3975 | ||
3784dfc0 | 3976 | if (__glibc_unlikely (size <= CHUNK_HDR_SZ) |
b90ddd08 IK |
3977 | || __glibc_unlikely (size > av->system_mem)) |
3978 | malloc_printerr ("malloc(): invalid size (unsorted)"); | |
3784dfc0 | 3979 | if (__glibc_unlikely (chunksize_nomask (next) < CHUNK_HDR_SZ) |
b90ddd08 IK |
3980 | || __glibc_unlikely (chunksize_nomask (next) > av->system_mem)) |
3981 | malloc_printerr ("malloc(): invalid next size (unsorted)"); | |
3982 | if (__glibc_unlikely ((prev_size (next) & ~(SIZE_BITS)) != size)) | |
3983 | malloc_printerr ("malloc(): mismatching next->prev_size (unsorted)"); | |
3984 | if (__glibc_unlikely (bck->fd != victim) | |
3985 | || __glibc_unlikely (victim->fd != unsorted_chunks (av))) | |
3986 | malloc_printerr ("malloc(): unsorted double linked list corrupted"); | |
35cfefd9 | 3987 | if (__glibc_unlikely (prev_inuse (next))) |
b90ddd08 | 3988 | malloc_printerr ("malloc(): invalid next->prev_inuse (unsorted)"); |
6c8dbf00 OB |
3989 | |
3990 | /* | |
3991 | If a small request, try to use last remainder if it is the | |
3992 | only chunk in unsorted bin. This helps promote locality for | |
3993 | runs of consecutive small requests. This is the only | |
3994 | exception to best-fit, and applies only when there is | |
3995 | no exact fit for a small chunk. | |
3996 | */ | |
3997 | ||
3998 | if (in_smallbin_range (nb) && | |
3999 | bck == unsorted_chunks (av) && | |
4000 | victim == av->last_remainder && | |
4001 | (unsigned long) (size) > (unsigned long) (nb + MINSIZE)) | |
4002 | { | |
4003 | /* split and reattach remainder */ | |
4004 | remainder_size = size - nb; | |
4005 | remainder = chunk_at_offset (victim, nb); | |
4006 | unsorted_chunks (av)->bk = unsorted_chunks (av)->fd = remainder; | |
4007 | av->last_remainder = remainder; | |
4008 | remainder->bk = remainder->fd = unsorted_chunks (av); | |
4009 | if (!in_smallbin_range (remainder_size)) | |
4010 | { | |
4011 | remainder->fd_nextsize = NULL; | |
4012 | remainder->bk_nextsize = NULL; | |
4013 | } | |
4014 | ||
4015 | set_head (victim, nb | PREV_INUSE | | |
4016 | (av != &main_arena ? NON_MAIN_ARENA : 0)); | |
4017 | set_head (remainder, remainder_size | PREV_INUSE); | |
4018 | set_foot (remainder, remainder_size); | |
4019 | ||
4020 | check_malloced_chunk (av, victim, nb); | |
ca89f1c7 | 4021 | void *p = chunk2mem (victim); |
6c8dbf00 OB |
4022 | alloc_perturb (p, bytes); |
4023 | return p; | |
4024 | } | |
4025 | ||
4026 | /* remove from unsorted list */ | |
bdc3009b FG |
4027 | if (__glibc_unlikely (bck->fd != victim)) |
4028 | malloc_printerr ("malloc(): corrupted unsorted chunks 3"); | |
6c8dbf00 OB |
4029 | unsorted_chunks (av)->bk = bck; |
4030 | bck->fd = unsorted_chunks (av); | |
4031 | ||
4032 | /* Take now instead of binning if exact fit */ | |
4033 | ||
4034 | if (size == nb) | |
4035 | { | |
4036 | set_inuse_bit_at_offset (victim, size); | |
4037 | if (av != &main_arena) | |
e9c4fe93 | 4038 | set_non_main_arena (victim); |
d5c3fafc DD |
4039 | #if USE_TCACHE |
4040 | /* Fill cache first, return to user only if cache fills. | |
4041 | We may return one of these chunks later. */ | |
4042 | if (tcache_nb | |
4043 | && tcache->counts[tc_idx] < mp_.tcache_count) | |
4044 | { | |
4045 | tcache_put (victim, tc_idx); | |
4046 | return_cached = 1; | |
4047 | continue; | |
4048 | } | |
4049 | else | |
4050 | { | |
4051 | #endif | |
6c8dbf00 | 4052 | check_malloced_chunk (av, victim, nb); |
ca89f1c7 | 4053 | void *p = chunk2mem (victim); |
6c8dbf00 OB |
4054 | alloc_perturb (p, bytes); |
4055 | return p; | |
d5c3fafc DD |
4056 | #if USE_TCACHE |
4057 | } | |
4058 | #endif | |
6c8dbf00 OB |
4059 | } |
4060 | ||
4061 | /* place chunk in bin */ | |
4062 | ||
4063 | if (in_smallbin_range (size)) | |
4064 | { | |
4065 | victim_index = smallbin_index (size); | |
4066 | bck = bin_at (av, victim_index); | |
4067 | fwd = bck->fd; | |
4068 | } | |
4069 | else | |
4070 | { | |
4071 | victim_index = largebin_index (size); | |
4072 | bck = bin_at (av, victim_index); | |
4073 | fwd = bck->fd; | |
4074 | ||
4075 | /* maintain large bins in sorted order */ | |
4076 | if (fwd != bck) | |
4077 | { | |
4078 | /* Or with inuse bit to speed comparisons */ | |
4079 | size |= PREV_INUSE; | |
4080 | /* if smaller than smallest, bypass loop below */ | |
e9c4fe93 FW |
4081 | assert (chunk_main_arena (bck->bk)); |
4082 | if ((unsigned long) (size) | |
4083 | < (unsigned long) chunksize_nomask (bck->bk)) | |
6c8dbf00 OB |
4084 | { |
4085 | fwd = bck; | |
4086 | bck = bck->bk; | |
4087 | ||
4088 | victim->fd_nextsize = fwd->fd; | |
4089 | victim->bk_nextsize = fwd->fd->bk_nextsize; | |
4090 | fwd->fd->bk_nextsize = victim->bk_nextsize->fd_nextsize = victim; | |
4091 | } | |
4092 | else | |
4093 | { | |
e9c4fe93 FW |
4094 | assert (chunk_main_arena (fwd)); |
4095 | while ((unsigned long) size < chunksize_nomask (fwd)) | |
6c8dbf00 OB |
4096 | { |
4097 | fwd = fwd->fd_nextsize; | |
e9c4fe93 | 4098 | assert (chunk_main_arena (fwd)); |
6c8dbf00 OB |
4099 | } |
4100 | ||
e9c4fe93 FW |
4101 | if ((unsigned long) size |
4102 | == (unsigned long) chunksize_nomask (fwd)) | |
6c8dbf00 OB |
4103 | /* Always insert in the second position. */ |
4104 | fwd = fwd->fd; | |
4105 | else | |
4106 | { | |
4107 | victim->fd_nextsize = fwd; | |
4108 | victim->bk_nextsize = fwd->bk_nextsize; | |
5b06f538 AM |
4109 | if (__glibc_unlikely (fwd->bk_nextsize->fd_nextsize != fwd)) |
4110 | malloc_printerr ("malloc(): largebin double linked list corrupted (nextsize)"); | |
6c8dbf00 OB |
4111 | fwd->bk_nextsize = victim; |
4112 | victim->bk_nextsize->fd_nextsize = victim; | |
4113 | } | |
4114 | bck = fwd->bk; | |
5b06f538 AM |
4115 | if (bck->fd != fwd) |
4116 | malloc_printerr ("malloc(): largebin double linked list corrupted (bk)"); | |
6c8dbf00 OB |
4117 | } |
4118 | } | |
4119 | else | |
4120 | victim->fd_nextsize = victim->bk_nextsize = victim; | |
4121 | } | |
4122 | ||
4123 | mark_bin (av, victim_index); | |
4124 | victim->bk = bck; | |
4125 | victim->fd = fwd; | |
4126 | fwd->bk = victim; | |
4127 | bck->fd = victim; | |
4128 | ||
d5c3fafc DD |
4129 | #if USE_TCACHE |
4130 | /* If we've processed as many chunks as we're allowed while | |
4131 | filling the cache, return one of the cached ones. */ | |
4132 | ++tcache_unsorted_count; | |
4133 | if (return_cached | |
4134 | && mp_.tcache_unsorted_limit > 0 | |
4135 | && tcache_unsorted_count > mp_.tcache_unsorted_limit) | |
4136 | { | |
4137 | return tcache_get (tc_idx); | |
4138 | } | |
4139 | #endif | |
4140 | ||
6c8dbf00 OB |
4141 | #define MAX_ITERS 10000 |
4142 | if (++iters >= MAX_ITERS) | |
4143 | break; | |
4144 | } | |
fa8d436c | 4145 | |
d5c3fafc DD |
4146 | #if USE_TCACHE |
4147 | /* If all the small chunks we found ended up cached, return one now. */ | |
4148 | if (return_cached) | |
4149 | { | |
4150 | return tcache_get (tc_idx); | |
4151 | } | |
4152 | #endif | |
4153 | ||
a9177ff5 | 4154 | /* |
6c8dbf00 OB |
4155 | If a large request, scan through the chunks of current bin in |
4156 | sorted order to find smallest that fits. Use the skip list for this. | |
4157 | */ | |
4158 | ||
4159 | if (!in_smallbin_range (nb)) | |
4160 | { | |
4161 | bin = bin_at (av, idx); | |
4162 | ||
4163 | /* skip scan if empty or largest chunk is too small */ | |
e9c4fe93 FW |
4164 | if ((victim = first (bin)) != bin |
4165 | && (unsigned long) chunksize_nomask (victim) | |
4166 | >= (unsigned long) (nb)) | |
6c8dbf00 OB |
4167 | { |
4168 | victim = victim->bk_nextsize; | |
4169 | while (((unsigned long) (size = chunksize (victim)) < | |
4170 | (unsigned long) (nb))) | |
4171 | victim = victim->bk_nextsize; | |
4172 | ||
4173 | /* Avoid removing the first entry for a size so that the skip | |
4174 | list does not have to be rerouted. */ | |
e9c4fe93 FW |
4175 | if (victim != last (bin) |
4176 | && chunksize_nomask (victim) | |
4177 | == chunksize_nomask (victim->fd)) | |
6c8dbf00 OB |
4178 | victim = victim->fd; |
4179 | ||
4180 | remainder_size = size - nb; | |
1ecba1fa | 4181 | unlink_chunk (av, victim); |
6c8dbf00 OB |
4182 | |
4183 | /* Exhaust */ | |
4184 | if (remainder_size < MINSIZE) | |
4185 | { | |
4186 | set_inuse_bit_at_offset (victim, size); | |
4187 | if (av != &main_arena) | |
e9c4fe93 | 4188 | set_non_main_arena (victim); |
6c8dbf00 OB |
4189 | } |
4190 | /* Split */ | |
4191 | else | |
4192 | { | |
4193 | remainder = chunk_at_offset (victim, nb); | |
4194 | /* We cannot assume the unsorted list is empty and therefore | |
4195 | have to perform a complete insert here. */ | |
4196 | bck = unsorted_chunks (av); | |
4197 | fwd = bck->fd; | |
ac3ed168 FW |
4198 | if (__glibc_unlikely (fwd->bk != bck)) |
4199 | malloc_printerr ("malloc(): corrupted unsorted chunks"); | |
6c8dbf00 OB |
4200 | remainder->bk = bck; |
4201 | remainder->fd = fwd; | |
4202 | bck->fd = remainder; | |
4203 | fwd->bk = remainder; | |
4204 | if (!in_smallbin_range (remainder_size)) | |
4205 | { | |
4206 | remainder->fd_nextsize = NULL; | |
4207 | remainder->bk_nextsize = NULL; | |
4208 | } | |
4209 | set_head (victim, nb | PREV_INUSE | | |
4210 | (av != &main_arena ? NON_MAIN_ARENA : 0)); | |
4211 | set_head (remainder, remainder_size | PREV_INUSE); | |
4212 | set_foot (remainder, remainder_size); | |
4213 | } | |
4214 | check_malloced_chunk (av, victim, nb); | |
ca89f1c7 | 4215 | void *p = chunk2mem (victim); |
6c8dbf00 OB |
4216 | alloc_perturb (p, bytes); |
4217 | return p; | |
4218 | } | |
4219 | } | |
f65fd747 | 4220 | |
6c8dbf00 OB |
4221 | /* |
4222 | Search for a chunk by scanning bins, starting with next largest | |
4223 | bin. This search is strictly by best-fit; i.e., the smallest | |
4224 | (with ties going to approximately the least recently used) chunk | |
4225 | that fits is selected. | |
4226 | ||
4227 | The bitmap avoids needing to check that most blocks are nonempty. | |
4228 | The particular case of skipping all bins during warm-up phases | |
4229 | when no chunks have been returned yet is faster than it might look. | |
4230 | */ | |
4231 | ||
4232 | ++idx; | |
4233 | bin = bin_at (av, idx); | |
4234 | block = idx2block (idx); | |
4235 | map = av->binmap[block]; | |
4236 | bit = idx2bit (idx); | |
4237 | ||
4238 | for (;; ) | |
4239 | { | |
4240 | /* Skip rest of block if there are no more set bits in this block. */ | |
4241 | if (bit > map || bit == 0) | |
4242 | { | |
4243 | do | |
4244 | { | |
4245 | if (++block >= BINMAPSIZE) /* out of bins */ | |
4246 | goto use_top; | |
4247 | } | |
4248 | while ((map = av->binmap[block]) == 0); | |
4249 | ||
4250 | bin = bin_at (av, (block << BINMAPSHIFT)); | |
4251 | bit = 1; | |
4252 | } | |
4253 | ||
4254 | /* Advance to bin with set bit. There must be one. */ | |
4255 | while ((bit & map) == 0) | |
4256 | { | |
4257 | bin = next_bin (bin); | |
4258 | bit <<= 1; | |
4259 | assert (bit != 0); | |
4260 | } | |
4261 | ||
4262 | /* Inspect the bin. It is likely to be non-empty */ | |
4263 | victim = last (bin); | |
4264 | ||
4265 | /* If a false alarm (empty bin), clear the bit. */ | |
4266 | if (victim == bin) | |
4267 | { | |
4268 | av->binmap[block] = map &= ~bit; /* Write through */ | |
4269 | bin = next_bin (bin); | |
4270 | bit <<= 1; | |
4271 | } | |
4272 | ||
4273 | else | |
4274 | { | |
4275 | size = chunksize (victim); | |
4276 | ||
4277 | /* We know the first chunk in this bin is big enough to use. */ | |
4278 | assert ((unsigned long) (size) >= (unsigned long) (nb)); | |
4279 | ||
4280 | remainder_size = size - nb; | |
4281 | ||
4282 | /* unlink */ | |
1ecba1fa | 4283 | unlink_chunk (av, victim); |
6c8dbf00 OB |
4284 | |
4285 | /* Exhaust */ | |
4286 | if (remainder_size < MINSIZE) | |
4287 | { | |
4288 | set_inuse_bit_at_offset (victim, size); | |
4289 | if (av != &main_arena) | |
e9c4fe93 | 4290 | set_non_main_arena (victim); |
6c8dbf00 OB |
4291 | } |
4292 | ||
4293 | /* Split */ | |
4294 | else | |
4295 | { | |
4296 | remainder = chunk_at_offset (victim, nb); | |
4297 | ||
4298 | /* We cannot assume the unsorted list is empty and therefore | |
4299 | have to perform a complete insert here. */ | |
4300 | bck = unsorted_chunks (av); | |
4301 | fwd = bck->fd; | |
ac3ed168 FW |
4302 | if (__glibc_unlikely (fwd->bk != bck)) |
4303 | malloc_printerr ("malloc(): corrupted unsorted chunks 2"); | |
6c8dbf00 OB |
4304 | remainder->bk = bck; |
4305 | remainder->fd = fwd; | |
4306 | bck->fd = remainder; | |
4307 | fwd->bk = remainder; | |
4308 | ||
4309 | /* advertise as last remainder */ | |
4310 | if (in_smallbin_range (nb)) | |
4311 | av->last_remainder = remainder; | |
4312 | if (!in_smallbin_range (remainder_size)) | |
4313 | { | |
4314 | remainder->fd_nextsize = NULL; | |
4315 | remainder->bk_nextsize = NULL; | |
4316 | } | |
4317 | set_head (victim, nb | PREV_INUSE | | |
4318 | (av != &main_arena ? NON_MAIN_ARENA : 0)); | |
4319 | set_head (remainder, remainder_size | PREV_INUSE); | |
4320 | set_foot (remainder, remainder_size); | |
4321 | } | |
4322 | check_malloced_chunk (av, victim, nb); | |
ca89f1c7 | 4323 | void *p = chunk2mem (victim); |
6c8dbf00 OB |
4324 | alloc_perturb (p, bytes); |
4325 | return p; | |
4326 | } | |
4327 | } | |
4328 | ||
4329 | use_top: | |
4330 | /* | |
4331 | If large enough, split off the chunk bordering the end of memory | |
4332 | (held in av->top). Note that this is in accord with the best-fit | |
4333 | search rule. In effect, av->top is treated as larger (and thus | |
4334 | less well fitting) than any other available chunk since it can | |
4335 | be extended to be as large as necessary (up to system | |
4336 | limitations). | |
4337 | ||
4338 | We require that av->top always exists (i.e., has size >= | |
4339 | MINSIZE) after initialization, so if it would otherwise be | |
4340 | exhausted by current request, it is replenished. (The main | |
4341 | reason for ensuring it exists is that we may need MINSIZE space | |
4342 | to put in fenceposts in sysmalloc.) | |
4343 | */ | |
4344 | ||
4345 | victim = av->top; | |
4346 | size = chunksize (victim); | |
4347 | ||
30a17d8c PC |
4348 | if (__glibc_unlikely (size > av->system_mem)) |
4349 | malloc_printerr ("malloc(): corrupted top size"); | |
4350 | ||
6c8dbf00 OB |
4351 | if ((unsigned long) (size) >= (unsigned long) (nb + MINSIZE)) |
4352 | { | |
4353 | remainder_size = size - nb; | |
4354 | remainder = chunk_at_offset (victim, nb); | |
4355 | av->top = remainder; | |
4356 | set_head (victim, nb | PREV_INUSE | | |
4357 | (av != &main_arena ? NON_MAIN_ARENA : 0)); | |
4358 | set_head (remainder, remainder_size | PREV_INUSE); | |
4359 | ||
4360 | check_malloced_chunk (av, victim, nb); | |
ca89f1c7 | 4361 | void *p = chunk2mem (victim); |
6c8dbf00 OB |
4362 | alloc_perturb (p, bytes); |
4363 | return p; | |
4364 | } | |
4365 | ||
4366 | /* When we are using atomic ops to free fast chunks we can get | |
4367 | here for all block sizes. */ | |
e956075a | 4368 | else if (atomic_load_relaxed (&av->have_fastchunks)) |
6c8dbf00 OB |
4369 | { |
4370 | malloc_consolidate (av); | |
4371 | /* restore original bin index */ | |
4372 | if (in_smallbin_range (nb)) | |
4373 | idx = smallbin_index (nb); | |
4374 | else | |
4375 | idx = largebin_index (nb); | |
4376 | } | |
f65fd747 | 4377 | |
6c8dbf00 OB |
4378 | /* |
4379 | Otherwise, relay to handle system-dependent cases | |
4380 | */ | |
425ce2ed | 4381 | else |
6c8dbf00 OB |
4382 | { |
4383 | void *p = sysmalloc (nb, av); | |
4384 | if (p != NULL) | |
4385 | alloc_perturb (p, bytes); | |
4386 | return p; | |
4387 | } | |
425ce2ed | 4388 | } |
fa8d436c | 4389 | } |
f65fd747 | 4390 | |
fa8d436c | 4391 | /* |
6c8dbf00 OB |
4392 | ------------------------------ free ------------------------------ |
4393 | */ | |
f65fd747 | 4394 | |
78ac92ad | 4395 | static void |
6c8dbf00 | 4396 | _int_free (mstate av, mchunkptr p, int have_lock) |
f65fd747 | 4397 | { |
fa8d436c | 4398 | INTERNAL_SIZE_T size; /* its size */ |
6c8dbf00 OB |
4399 | mfastbinptr *fb; /* associated fastbin */ |
4400 | mchunkptr nextchunk; /* next contiguous chunk */ | |
fa8d436c | 4401 | INTERNAL_SIZE_T nextsize; /* its size */ |
6c8dbf00 | 4402 | int nextinuse; /* true if nextchunk is used */ |
fa8d436c | 4403 | INTERNAL_SIZE_T prevsize; /* size of previous contiguous chunk */ |
6c8dbf00 OB |
4404 | mchunkptr bck; /* misc temp for linking */ |
4405 | mchunkptr fwd; /* misc temp for linking */ | |
fa8d436c | 4406 | |
6c8dbf00 | 4407 | size = chunksize (p); |
f65fd747 | 4408 | |
37fa1953 UD |
4409 | /* Little security check which won't hurt performance: the |
4410 | allocator never wrapps around at the end of the address space. | |
4411 | Therefore we can exclude some size values which might appear | |
4412 | here by accident or by "design" from some intruder. */ | |
dc165f7b | 4413 | if (__builtin_expect ((uintptr_t) p > (uintptr_t) -size, 0) |
073f560e | 4414 | || __builtin_expect (misaligned_chunk (p), 0)) |
ac3ed168 | 4415 | malloc_printerr ("free(): invalid pointer"); |
347c92e9 L |
4416 | /* We know that each chunk is at least MINSIZE bytes in size or a |
4417 | multiple of MALLOC_ALIGNMENT. */ | |
a1ffb40e | 4418 | if (__glibc_unlikely (size < MINSIZE || !aligned_OK (size))) |
ac3ed168 | 4419 | malloc_printerr ("free(): invalid size"); |
f65fd747 | 4420 | |
37fa1953 | 4421 | check_inuse_chunk(av, p); |
f65fd747 | 4422 | |
d5c3fafc DD |
4423 | #if USE_TCACHE |
4424 | { | |
4425 | size_t tc_idx = csize2tidx (size); | |
affec03b | 4426 | if (tcache != NULL && tc_idx < mp_.tcache_bins) |
d5c3fafc | 4427 | { |
affec03b | 4428 | /* Check to see if it's already in the tcache. */ |
ca89f1c7 | 4429 | tcache_entry *e = (tcache_entry *) chunk2mem (p); |
affec03b FW |
4430 | |
4431 | /* This test succeeds on double free. However, we don't 100% | |
4432 | trust it (it also matches random payload data at a 1 in | |
4433 | 2^<size_t> chance), so verify it's not an unlikely | |
4434 | coincidence before aborting. */ | |
fc859c30 | 4435 | if (__glibc_unlikely (e->key == tcache_key)) |
affec03b FW |
4436 | { |
4437 | tcache_entry *tmp; | |
0e00b357 | 4438 | size_t cnt = 0; |
affec03b FW |
4439 | LIBC_PROBE (memory_tcache_double_free, 2, e, tc_idx); |
4440 | for (tmp = tcache->entries[tc_idx]; | |
4441 | tmp; | |
0e00b357 | 4442 | tmp = REVEAL_PTR (tmp->next), ++cnt) |
768358b6 | 4443 | { |
0e00b357 H |
4444 | if (cnt >= mp_.tcache_count) |
4445 | malloc_printerr ("free(): too many chunks detected in tcache"); | |
768358b6 EI |
4446 | if (__glibc_unlikely (!aligned_OK (tmp))) |
4447 | malloc_printerr ("free(): unaligned chunk detected in tcache 2"); | |
4448 | if (tmp == e) | |
4449 | malloc_printerr ("free(): double free detected in tcache 2"); | |
4450 | /* If we get here, it was a coincidence. We've wasted a | |
4451 | few cycles, but don't abort. */ | |
4452 | } | |
affec03b FW |
4453 | } |
4454 | ||
4455 | if (tcache->counts[tc_idx] < mp_.tcache_count) | |
4456 | { | |
4457 | tcache_put (p, tc_idx); | |
4458 | return; | |
4459 | } | |
d5c3fafc DD |
4460 | } |
4461 | } | |
4462 | #endif | |
4463 | ||
37fa1953 UD |
4464 | /* |
4465 | If eligible, place chunk on a fastbin so it can be found | |
4466 | and used quickly in malloc. | |
4467 | */ | |
6bf4302e | 4468 | |
9bf248c6 | 4469 | if ((unsigned long)(size) <= (unsigned long)(get_max_fast ()) |
6bf4302e | 4470 | |
37fa1953 UD |
4471 | #if TRIM_FASTBINS |
4472 | /* | |
4473 | If TRIM_FASTBINS set, don't place chunks | |
4474 | bordering top into fastbins | |
4475 | */ | |
4476 | && (chunk_at_offset(p, size) != av->top) | |
4477 | #endif | |
4478 | ) { | |
fa8d436c | 4479 | |
e9c4fe93 | 4480 | if (__builtin_expect (chunksize_nomask (chunk_at_offset (p, size)) |
3784dfc0 | 4481 | <= CHUNK_HDR_SZ, 0) |
893e6098 UD |
4482 | || __builtin_expect (chunksize (chunk_at_offset (p, size)) |
4483 | >= av->system_mem, 0)) | |
4484 | { | |
d74e6f6c | 4485 | bool fail = true; |
bec466d9 | 4486 | /* We might not have a lock at this point and concurrent modifications |
d74e6f6c WD |
4487 | of system_mem might result in a false positive. Redo the test after |
4488 | getting the lock. */ | |
4489 | if (!have_lock) | |
4490 | { | |
4491 | __libc_lock_lock (av->mutex); | |
3784dfc0 | 4492 | fail = (chunksize_nomask (chunk_at_offset (p, size)) <= CHUNK_HDR_SZ |
d74e6f6c WD |
4493 | || chunksize (chunk_at_offset (p, size)) >= av->system_mem); |
4494 | __libc_lock_unlock (av->mutex); | |
4495 | } | |
4496 | ||
4497 | if (fail) | |
ac3ed168 | 4498 | malloc_printerr ("free(): invalid next size (fast)"); |
893e6098 UD |
4499 | } |
4500 | ||
ca89f1c7 | 4501 | free_perturb (chunk2mem(p), size - CHUNK_HDR_SZ); |
425ce2ed | 4502 | |
e956075a | 4503 | atomic_store_relaxed (&av->have_fastchunks, true); |
90a3055e UD |
4504 | unsigned int idx = fastbin_index(size); |
4505 | fb = &fastbin (av, idx); | |
425ce2ed | 4506 | |
362b47fe | 4507 | /* Atomically link P to its fastbin: P->FD = *FB; *FB = P; */ |
71effcea FW |
4508 | mchunkptr old = *fb, old2; |
4509 | ||
4510 | if (SINGLE_THREAD_P) | |
4511 | { | |
4512 | /* Check that the top of the bin is not the record we are going to | |
4513 | add (i.e., double free). */ | |
4514 | if (__builtin_expect (old == p, 0)) | |
4515 | malloc_printerr ("double free or corruption (fasttop)"); | |
a1a486d7 | 4516 | p->fd = PROTECT_PTR (&p->fd, old); |
71effcea FW |
4517 | *fb = p; |
4518 | } | |
4519 | else | |
4520 | do | |
4521 | { | |
4522 | /* Check that the top of the bin is not the record we are going to | |
4523 | add (i.e., double free). */ | |
4524 | if (__builtin_expect (old == p, 0)) | |
4525 | malloc_printerr ("double free or corruption (fasttop)"); | |
a1a486d7 EI |
4526 | old2 = old; |
4527 | p->fd = PROTECT_PTR (&p->fd, old); | |
71effcea FW |
4528 | } |
4529 | while ((old = catomic_compare_and_exchange_val_rel (fb, p, old2)) | |
4530 | != old2); | |
a15d53e2 WD |
4531 | |
4532 | /* Check that size of fastbin chunk at the top is the same as | |
4533 | size of the chunk that we are adding. We can dereference OLD | |
4534 | only if we have the lock, otherwise it might have already been | |
4535 | allocated again. */ | |
4536 | if (have_lock && old != NULL | |
4537 | && __builtin_expect (fastbin_index (chunksize (old)) != idx, 0)) | |
ac3ed168 | 4538 | malloc_printerr ("invalid fastbin entry (free)"); |
37fa1953 | 4539 | } |
f65fd747 | 4540 | |
37fa1953 UD |
4541 | /* |
4542 | Consolidate other non-mmapped chunks as they arrive. | |
4543 | */ | |
fa8d436c | 4544 | |
37fa1953 | 4545 | else if (!chunk_is_mmapped(p)) { |
a15d53e2 WD |
4546 | |
4547 | /* If we're single-threaded, don't lock the arena. */ | |
4548 | if (SINGLE_THREAD_P) | |
4549 | have_lock = true; | |
4550 | ||
24cffce7 | 4551 | if (!have_lock) |
4bf5f222 | 4552 | __libc_lock_lock (av->mutex); |
425ce2ed | 4553 | |
37fa1953 | 4554 | nextchunk = chunk_at_offset(p, size); |
fa8d436c | 4555 | |
37fa1953 UD |
4556 | /* Lightweight tests: check whether the block is already the |
4557 | top block. */ | |
a1ffb40e | 4558 | if (__glibc_unlikely (p == av->top)) |
ac3ed168 | 4559 | malloc_printerr ("double free or corruption (top)"); |
37fa1953 UD |
4560 | /* Or whether the next chunk is beyond the boundaries of the arena. */ |
4561 | if (__builtin_expect (contiguous (av) | |
4562 | && (char *) nextchunk | |
4563 | >= ((char *) av->top + chunksize(av->top)), 0)) | |
ac3ed168 | 4564 | malloc_printerr ("double free or corruption (out)"); |
37fa1953 | 4565 | /* Or whether the block is actually not marked used. */ |
a1ffb40e | 4566 | if (__glibc_unlikely (!prev_inuse(nextchunk))) |
ac3ed168 | 4567 | malloc_printerr ("double free or corruption (!prev)"); |
fa8d436c | 4568 | |
37fa1953 | 4569 | nextsize = chunksize(nextchunk); |
3784dfc0 | 4570 | if (__builtin_expect (chunksize_nomask (nextchunk) <= CHUNK_HDR_SZ, 0) |
893e6098 | 4571 | || __builtin_expect (nextsize >= av->system_mem, 0)) |
ac3ed168 | 4572 | malloc_printerr ("free(): invalid next size (normal)"); |
fa8d436c | 4573 | |
ca89f1c7 | 4574 | free_perturb (chunk2mem(p), size - CHUNK_HDR_SZ); |
854278df | 4575 | |
37fa1953 UD |
4576 | /* consolidate backward */ |
4577 | if (!prev_inuse(p)) { | |
e9c4fe93 | 4578 | prevsize = prev_size (p); |
37fa1953 UD |
4579 | size += prevsize; |
4580 | p = chunk_at_offset(p, -((long) prevsize)); | |
d6db68e6 ME |
4581 | if (__glibc_unlikely (chunksize(p) != prevsize)) |
4582 | malloc_printerr ("corrupted size vs. prev_size while consolidating"); | |
1ecba1fa | 4583 | unlink_chunk (av, p); |
37fa1953 | 4584 | } |
a9177ff5 | 4585 | |
37fa1953 UD |
4586 | if (nextchunk != av->top) { |
4587 | /* get and clear inuse bit */ | |
4588 | nextinuse = inuse_bit_at_offset(nextchunk, nextsize); | |
4589 | ||
4590 | /* consolidate forward */ | |
4591 | if (!nextinuse) { | |
1ecba1fa | 4592 | unlink_chunk (av, nextchunk); |
37fa1953 UD |
4593 | size += nextsize; |
4594 | } else | |
4595 | clear_inuse_bit_at_offset(nextchunk, 0); | |
10dc2a90 | 4596 | |
fa8d436c | 4597 | /* |
37fa1953 UD |
4598 | Place the chunk in unsorted chunk list. Chunks are |
4599 | not placed into regular bins until after they have | |
4600 | been given one chance to be used in malloc. | |
fa8d436c | 4601 | */ |
f65fd747 | 4602 | |
37fa1953 UD |
4603 | bck = unsorted_chunks(av); |
4604 | fwd = bck->fd; | |
a1ffb40e | 4605 | if (__glibc_unlikely (fwd->bk != bck)) |
ac3ed168 | 4606 | malloc_printerr ("free(): corrupted unsorted chunks"); |
37fa1953 | 4607 | p->fd = fwd; |
7ecfbd38 UD |
4608 | p->bk = bck; |
4609 | if (!in_smallbin_range(size)) | |
4610 | { | |
4611 | p->fd_nextsize = NULL; | |
4612 | p->bk_nextsize = NULL; | |
4613 | } | |
37fa1953 UD |
4614 | bck->fd = p; |
4615 | fwd->bk = p; | |
8a4b65b4 | 4616 | |
37fa1953 UD |
4617 | set_head(p, size | PREV_INUSE); |
4618 | set_foot(p, size); | |
4619 | ||
4620 | check_free_chunk(av, p); | |
4621 | } | |
4622 | ||
4623 | /* | |
4624 | If the chunk borders the current high end of memory, | |
4625 | consolidate into top | |
4626 | */ | |
4627 | ||
4628 | else { | |
4629 | size += nextsize; | |
4630 | set_head(p, size | PREV_INUSE); | |
4631 | av->top = p; | |
4632 | check_chunk(av, p); | |
4633 | } | |
4634 | ||
4635 | /* | |
4636 | If freeing a large space, consolidate possibly-surrounding | |
4637 | chunks. Then, if the total unused topmost memory exceeds trim | |
4638 | threshold, ask malloc_trim to reduce top. | |
4639 | ||
4640 | Unless max_fast is 0, we don't know if there are fastbins | |
4641 | bordering top, so we cannot tell for sure whether threshold | |
4642 | has been reached unless fastbins are consolidated. But we | |
4643 | don't want to consolidate on each free. As a compromise, | |
4644 | consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD | |
4645 | is reached. | |
4646 | */ | |
fa8d436c | 4647 | |
37fa1953 | 4648 | if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) { |
e956075a | 4649 | if (atomic_load_relaxed (&av->have_fastchunks)) |
37fa1953 | 4650 | malloc_consolidate(av); |
fa8d436c | 4651 | |
37fa1953 | 4652 | if (av == &main_arena) { |
a9177ff5 | 4653 | #ifndef MORECORE_CANNOT_TRIM |
37fa1953 UD |
4654 | if ((unsigned long)(chunksize(av->top)) >= |
4655 | (unsigned long)(mp_.trim_threshold)) | |
3b49edc0 | 4656 | systrim(mp_.top_pad, av); |
fa8d436c | 4657 | #endif |
37fa1953 UD |
4658 | } else { |
4659 | /* Always try heap_trim(), even if the top chunk is not | |
4660 | large, because the corresponding heap might go away. */ | |
4661 | heap_info *heap = heap_for_ptr(top(av)); | |
fa8d436c | 4662 | |
37fa1953 UD |
4663 | assert(heap->ar_ptr == av); |
4664 | heap_trim(heap, mp_.top_pad); | |
fa8d436c | 4665 | } |
fa8d436c | 4666 | } |
10dc2a90 | 4667 | |
24cffce7 | 4668 | if (!have_lock) |
4bf5f222 | 4669 | __libc_lock_unlock (av->mutex); |
37fa1953 UD |
4670 | } |
4671 | /* | |
22a89187 | 4672 | If the chunk was allocated via mmap, release via munmap(). |
37fa1953 UD |
4673 | */ |
4674 | ||
4675 | else { | |
c120d94d | 4676 | munmap_chunk (p); |
fa8d436c | 4677 | } |
10dc2a90 UD |
4678 | } |
4679 | ||
fa8d436c UD |
4680 | /* |
4681 | ------------------------- malloc_consolidate ------------------------- | |
4682 | ||
4683 | malloc_consolidate is a specialized version of free() that tears | |
4684 | down chunks held in fastbins. Free itself cannot be used for this | |
4685 | purpose since, among other things, it might place chunks back onto | |
4686 | fastbins. So, instead, we need to use a minor variant of the same | |
4687 | code. | |
fa8d436c UD |
4688 | */ |
4689 | ||
fa8d436c | 4690 | static void malloc_consolidate(mstate av) |
10dc2a90 | 4691 | { |
fa8d436c UD |
4692 | mfastbinptr* fb; /* current fastbin being consolidated */ |
4693 | mfastbinptr* maxfb; /* last fastbin (for loop control) */ | |
4694 | mchunkptr p; /* current chunk being consolidated */ | |
4695 | mchunkptr nextp; /* next chunk to consolidate */ | |
4696 | mchunkptr unsorted_bin; /* bin header */ | |
4697 | mchunkptr first_unsorted; /* chunk to link to */ | |
4698 | ||
4699 | /* These have same use as in free() */ | |
4700 | mchunkptr nextchunk; | |
4701 | INTERNAL_SIZE_T size; | |
4702 | INTERNAL_SIZE_T nextsize; | |
4703 | INTERNAL_SIZE_T prevsize; | |
4704 | int nextinuse; | |
10dc2a90 | 4705 | |
3381be5c | 4706 | atomic_store_relaxed (&av->have_fastchunks, false); |
10dc2a90 | 4707 | |
3381be5c | 4708 | unsorted_bin = unsorted_chunks(av); |
a9177ff5 | 4709 | |
3381be5c WD |
4710 | /* |
4711 | Remove each chunk from fast bin and consolidate it, placing it | |
4712 | then in unsorted bin. Among other reasons for doing this, | |
4713 | placing in unsorted bin avoids needing to calculate actual bins | |
4714 | until malloc is sure that chunks aren't immediately going to be | |
4715 | reused anyway. | |
4716 | */ | |
72f90263 | 4717 | |
3381be5c WD |
4718 | maxfb = &fastbin (av, NFASTBINS - 1); |
4719 | fb = &fastbin (av, 0); | |
4720 | do { | |
71effcea | 4721 | p = atomic_exchange_acq (fb, NULL); |
3381be5c WD |
4722 | if (p != 0) { |
4723 | do { | |
249a5895 | 4724 | { |
49c3c376 | 4725 | if (__glibc_unlikely (misaligned_chunk (p))) |
768358b6 | 4726 | malloc_printerr ("malloc_consolidate(): " |
a1a486d7 EI |
4727 | "unaligned fastbin chunk detected"); |
4728 | ||
249a5895 IK |
4729 | unsigned int idx = fastbin_index (chunksize (p)); |
4730 | if ((&fastbin (av, idx)) != fb) | |
4731 | malloc_printerr ("malloc_consolidate(): invalid chunk size"); | |
4732 | } | |
4733 | ||
3381be5c | 4734 | check_inuse_chunk(av, p); |
a1a486d7 | 4735 | nextp = REVEAL_PTR (p->fd); |
3381be5c WD |
4736 | |
4737 | /* Slightly streamlined version of consolidation code in free() */ | |
4738 | size = chunksize (p); | |
4739 | nextchunk = chunk_at_offset(p, size); | |
4740 | nextsize = chunksize(nextchunk); | |
4741 | ||
4742 | if (!prev_inuse(p)) { | |
4743 | prevsize = prev_size (p); | |
4744 | size += prevsize; | |
4745 | p = chunk_at_offset(p, -((long) prevsize)); | |
d6db68e6 ME |
4746 | if (__glibc_unlikely (chunksize(p) != prevsize)) |
4747 | malloc_printerr ("corrupted size vs. prev_size in fastbins"); | |
1ecba1fa | 4748 | unlink_chunk (av, p); |
3381be5c | 4749 | } |
72f90263 | 4750 | |
3381be5c WD |
4751 | if (nextchunk != av->top) { |
4752 | nextinuse = inuse_bit_at_offset(nextchunk, nextsize); | |
a9177ff5 | 4753 | |
3381be5c WD |
4754 | if (!nextinuse) { |
4755 | size += nextsize; | |
1ecba1fa | 4756 | unlink_chunk (av, nextchunk); |
3381be5c WD |
4757 | } else |
4758 | clear_inuse_bit_at_offset(nextchunk, 0); | |
a9177ff5 | 4759 | |
3381be5c WD |
4760 | first_unsorted = unsorted_bin->fd; |
4761 | unsorted_bin->fd = p; | |
4762 | first_unsorted->bk = p; | |
7ecfbd38 | 4763 | |
3381be5c WD |
4764 | if (!in_smallbin_range (size)) { |
4765 | p->fd_nextsize = NULL; | |
4766 | p->bk_nextsize = NULL; | |
72f90263 | 4767 | } |
a9177ff5 | 4768 | |
3381be5c WD |
4769 | set_head(p, size | PREV_INUSE); |
4770 | p->bk = unsorted_bin; | |
4771 | p->fd = first_unsorted; | |
4772 | set_foot(p, size); | |
4773 | } | |
a9177ff5 | 4774 | |
3381be5c WD |
4775 | else { |
4776 | size += nextsize; | |
4777 | set_head(p, size | PREV_INUSE); | |
4778 | av->top = p; | |
4779 | } | |
a9177ff5 | 4780 | |
3381be5c WD |
4781 | } while ( (p = nextp) != 0); |
4782 | ||
4783 | } | |
4784 | } while (fb++ != maxfb); | |
fa8d436c | 4785 | } |
10dc2a90 | 4786 | |
fa8d436c UD |
4787 | /* |
4788 | ------------------------------ realloc ------------------------------ | |
4789 | */ | |
f65fd747 | 4790 | |
79969f41 SP |
4791 | static void * |
4792 | _int_realloc (mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize, | |
4c8b8cc3 | 4793 | INTERNAL_SIZE_T nb) |
fa8d436c | 4794 | { |
fa8d436c UD |
4795 | mchunkptr newp; /* chunk to return */ |
4796 | INTERNAL_SIZE_T newsize; /* its size */ | |
22a89187 | 4797 | void* newmem; /* corresponding user mem */ |
f65fd747 | 4798 | |
fa8d436c | 4799 | mchunkptr next; /* next contiguous chunk after oldp */ |
f65fd747 | 4800 | |
fa8d436c UD |
4801 | mchunkptr remainder; /* extra space at end of newp */ |
4802 | unsigned long remainder_size; /* its size */ | |
f65fd747 | 4803 | |
6dd6a580 | 4804 | /* oldmem size */ |
3784dfc0 | 4805 | if (__builtin_expect (chunksize_nomask (oldp) <= CHUNK_HDR_SZ, 0) |
76761b63 | 4806 | || __builtin_expect (oldsize >= av->system_mem, 0)) |
ac3ed168 | 4807 | malloc_printerr ("realloc(): invalid old size"); |
76761b63 | 4808 | |
6c8dbf00 | 4809 | check_inuse_chunk (av, oldp); |
f65fd747 | 4810 | |
4c8b8cc3 | 4811 | /* All callers already filter out mmap'ed chunks. */ |
6c8dbf00 | 4812 | assert (!chunk_is_mmapped (oldp)); |
f65fd747 | 4813 | |
6c8dbf00 OB |
4814 | next = chunk_at_offset (oldp, oldsize); |
4815 | INTERNAL_SIZE_T nextsize = chunksize (next); | |
3784dfc0 | 4816 | if (__builtin_expect (chunksize_nomask (next) <= CHUNK_HDR_SZ, 0) |
22a89187 | 4817 | || __builtin_expect (nextsize >= av->system_mem, 0)) |
ac3ed168 | 4818 | malloc_printerr ("realloc(): invalid next size"); |
22a89187 | 4819 | |
6c8dbf00 OB |
4820 | if ((unsigned long) (oldsize) >= (unsigned long) (nb)) |
4821 | { | |
4822 | /* already big enough; split below */ | |
fa8d436c | 4823 | newp = oldp; |
6c8dbf00 | 4824 | newsize = oldsize; |
7799b7b3 | 4825 | } |
f65fd747 | 4826 | |
6c8dbf00 OB |
4827 | else |
4828 | { | |
4829 | /* Try to expand forward into top */ | |
4830 | if (next == av->top && | |
4831 | (unsigned long) (newsize = oldsize + nextsize) >= | |
4832 | (unsigned long) (nb + MINSIZE)) | |
4833 | { | |
4834 | set_head_size (oldp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0)); | |
4835 | av->top = chunk_at_offset (oldp, nb); | |
4836 | set_head (av->top, (newsize - nb) | PREV_INUSE); | |
4837 | check_inuse_chunk (av, oldp); | |
ca89f1c7 | 4838 | return tag_new_usable (chunk2mem (oldp)); |
6c8dbf00 OB |
4839 | } |
4840 | ||
4841 | /* Try to expand forward into next chunk; split off remainder below */ | |
4842 | else if (next != av->top && | |
4843 | !inuse (next) && | |
4844 | (unsigned long) (newsize = oldsize + nextsize) >= | |
4845 | (unsigned long) (nb)) | |
4846 | { | |
4847 | newp = oldp; | |
1ecba1fa | 4848 | unlink_chunk (av, next); |
6c8dbf00 OB |
4849 | } |
4850 | ||
4851 | /* allocate, copy, free */ | |
4852 | else | |
4853 | { | |
4854 | newmem = _int_malloc (av, nb - MALLOC_ALIGN_MASK); | |
4855 | if (newmem == 0) | |
4856 | return 0; /* propagate failure */ | |
4857 | ||
4858 | newp = mem2chunk (newmem); | |
4859 | newsize = chunksize (newp); | |
4860 | ||
4861 | /* | |
4862 | Avoid copy if newp is next chunk after oldp. | |
4863 | */ | |
4864 | if (newp == next) | |
4865 | { | |
4866 | newsize += oldsize; | |
4867 | newp = oldp; | |
4868 | } | |
4869 | else | |
4870 | { | |
ca89f1c7 | 4871 | void *oldmem = chunk2mem (oldp); |
faf003ed | 4872 | size_t sz = memsize (oldp); |
0c719cf4 SN |
4873 | (void) tag_region (oldmem, sz); |
4874 | newmem = tag_new_usable (newmem); | |
8ae909a5 SN |
4875 | memcpy (newmem, oldmem, sz); |
4876 | _int_free (av, oldp, 1); | |
4877 | check_inuse_chunk (av, newp); | |
4878 | return newmem; | |
6c8dbf00 OB |
4879 | } |
4880 | } | |
fa8d436c | 4881 | } |
f65fd747 | 4882 | |
22a89187 | 4883 | /* If possible, free extra space in old or extended chunk */ |
f65fd747 | 4884 | |
6c8dbf00 | 4885 | assert ((unsigned long) (newsize) >= (unsigned long) (nb)); |
f65fd747 | 4886 | |
22a89187 | 4887 | remainder_size = newsize - nb; |
10dc2a90 | 4888 | |
6c8dbf00 OB |
4889 | if (remainder_size < MINSIZE) /* not enough extra to split off */ |
4890 | { | |
4891 | set_head_size (newp, newsize | (av != &main_arena ? NON_MAIN_ARENA : 0)); | |
4892 | set_inuse_bit_at_offset (newp, newsize); | |
4893 | } | |
4894 | else /* split remainder */ | |
4895 | { | |
4896 | remainder = chunk_at_offset (newp, nb); | |
3784dfc0 | 4897 | /* Clear any user-space tags before writing the header. */ |
0c719cf4 | 4898 | remainder = tag_region (remainder, remainder_size); |
6c8dbf00 OB |
4899 | set_head_size (newp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0)); |
4900 | set_head (remainder, remainder_size | PREV_INUSE | | |
4901 | (av != &main_arena ? NON_MAIN_ARENA : 0)); | |
4902 | /* Mark remainder as inuse so free() won't complain */ | |
4903 | set_inuse_bit_at_offset (remainder, remainder_size); | |
4904 | _int_free (av, remainder, 1); | |
4905 | } | |
22a89187 | 4906 | |
6c8dbf00 | 4907 | check_inuse_chunk (av, newp); |
ca89f1c7 | 4908 | return tag_new_usable (chunk2mem (newp)); |
fa8d436c UD |
4909 | } |
4910 | ||
4911 | /* | |
6c8dbf00 OB |
4912 | ------------------------------ memalign ------------------------------ |
4913 | */ | |
fa8d436c | 4914 | |
6c8dbf00 OB |
4915 | static void * |
4916 | _int_memalign (mstate av, size_t alignment, size_t bytes) | |
fa8d436c UD |
4917 | { |
4918 | INTERNAL_SIZE_T nb; /* padded request size */ | |
6c8dbf00 OB |
4919 | char *m; /* memory returned by malloc call */ |
4920 | mchunkptr p; /* corresponding chunk */ | |
4921 | char *brk; /* alignment point within p */ | |
4922 | mchunkptr newp; /* chunk to return */ | |
fa8d436c UD |
4923 | INTERNAL_SIZE_T newsize; /* its size */ |
4924 | INTERNAL_SIZE_T leadsize; /* leading space before alignment point */ | |
6c8dbf00 OB |
4925 | mchunkptr remainder; /* spare room at end to split off */ |
4926 | unsigned long remainder_size; /* its size */ | |
fa8d436c | 4927 | INTERNAL_SIZE_T size; |
f65fd747 | 4928 | |
f65fd747 | 4929 | |
f65fd747 | 4930 | |
7519dee3 FW |
4931 | nb = checked_request2size (bytes); |
4932 | if (nb == 0) | |
9bf8e29c AZ |
4933 | { |
4934 | __set_errno (ENOMEM); | |
4935 | return NULL; | |
4936 | } | |
fa8d436c UD |
4937 | |
4938 | /* | |
6c8dbf00 OB |
4939 | Strategy: find a spot within that chunk that meets the alignment |
4940 | request, and then possibly free the leading and trailing space. | |
4941 | */ | |
fa8d436c | 4942 | |
fa8d436c UD |
4943 | /* Call malloc with worst case padding to hit alignment. */ |
4944 | ||
6c8dbf00 OB |
4945 | m = (char *) (_int_malloc (av, nb + alignment + MINSIZE)); |
4946 | ||
4947 | if (m == 0) | |
4948 | return 0; /* propagate failure */ | |
4949 | ||
4950 | p = mem2chunk (m); | |
4951 | ||
4952 | if ((((unsigned long) (m)) % alignment) != 0) /* misaligned */ | |
4953 | ||
4954 | { /* | |
4955 | Find an aligned spot inside chunk. Since we need to give back | |
4956 | leading space in a chunk of at least MINSIZE, if the first | |
4957 | calculation places us at a spot with less than MINSIZE leader, | |
4958 | we can move to the next aligned spot -- we've allocated enough | |
4959 | total room so that this is always possible. | |
4960 | */ | |
4961 | brk = (char *) mem2chunk (((unsigned long) (m + alignment - 1)) & | |
4962 | - ((signed long) alignment)); | |
4963 | if ((unsigned long) (brk - (char *) (p)) < MINSIZE) | |
4964 | brk += alignment; | |
4965 | ||
4966 | newp = (mchunkptr) brk; | |
4967 | leadsize = brk - (char *) (p); | |
4968 | newsize = chunksize (p) - leadsize; | |
4969 | ||
4970 | /* For mmapped chunks, just adjust offset */ | |
4971 | if (chunk_is_mmapped (p)) | |
4972 | { | |
e9c4fe93 | 4973 | set_prev_size (newp, prev_size (p) + leadsize); |
6c8dbf00 | 4974 | set_head (newp, newsize | IS_MMAPPED); |
ca89f1c7 | 4975 | return chunk2mem (newp); |
6c8dbf00 OB |
4976 | } |
4977 | ||
4978 | /* Otherwise, give back leader, use the rest */ | |
4979 | set_head (newp, newsize | PREV_INUSE | | |
4980 | (av != &main_arena ? NON_MAIN_ARENA : 0)); | |
4981 | set_inuse_bit_at_offset (newp, newsize); | |
4982 | set_head_size (p, leadsize | (av != &main_arena ? NON_MAIN_ARENA : 0)); | |
4983 | _int_free (av, p, 1); | |
4984 | p = newp; | |
4985 | ||
4986 | assert (newsize >= nb && | |
ca89f1c7 | 4987 | (((unsigned long) (chunk2mem (p))) % alignment) == 0); |
f65fd747 | 4988 | } |
f65fd747 | 4989 | |
f65fd747 | 4990 | /* Also give back spare room at the end */ |
6c8dbf00 OB |
4991 | if (!chunk_is_mmapped (p)) |
4992 | { | |
4993 | size = chunksize (p); | |
4994 | if ((unsigned long) (size) > (unsigned long) (nb + MINSIZE)) | |
4995 | { | |
4996 | remainder_size = size - nb; | |
4997 | remainder = chunk_at_offset (p, nb); | |
4998 | set_head (remainder, remainder_size | PREV_INUSE | | |
4999 | (av != &main_arena ? NON_MAIN_ARENA : 0)); | |
5000 | set_head_size (p, nb); | |
5001 | _int_free (av, remainder, 1); | |
5002 | } | |
fa8d436c | 5003 | } |
f65fd747 | 5004 | |
6c8dbf00 | 5005 | check_inuse_chunk (av, p); |
ca89f1c7 | 5006 | return chunk2mem (p); |
f65fd747 UD |
5007 | } |
5008 | ||
f65fd747 | 5009 | |
fa8d436c | 5010 | /* |
6c8dbf00 OB |
5011 | ------------------------------ malloc_trim ------------------------------ |
5012 | */ | |
8a4b65b4 | 5013 | |
6c8dbf00 OB |
5014 | static int |
5015 | mtrim (mstate av, size_t pad) | |
f65fd747 | 5016 | { |
3381be5c | 5017 | /* Ensure all blocks are consolidated. */ |
68631c8e UD |
5018 | malloc_consolidate (av); |
5019 | ||
6c8dbf00 | 5020 | const size_t ps = GLRO (dl_pagesize); |
68631c8e UD |
5021 | int psindex = bin_index (ps); |
5022 | const size_t psm1 = ps - 1; | |
5023 | ||
5024 | int result = 0; | |
5025 | for (int i = 1; i < NBINS; ++i) | |
5026 | if (i == 1 || i >= psindex) | |
5027 | { | |
6c8dbf00 | 5028 | mbinptr bin = bin_at (av, i); |
68631c8e | 5029 | |
6c8dbf00 OB |
5030 | for (mchunkptr p = last (bin); p != bin; p = p->bk) |
5031 | { | |
5032 | INTERNAL_SIZE_T size = chunksize (p); | |
68631c8e | 5033 | |
6c8dbf00 OB |
5034 | if (size > psm1 + sizeof (struct malloc_chunk)) |
5035 | { | |
5036 | /* See whether the chunk contains at least one unused page. */ | |
5037 | char *paligned_mem = (char *) (((uintptr_t) p | |
5038 | + sizeof (struct malloc_chunk) | |
5039 | + psm1) & ~psm1); | |
68631c8e | 5040 | |
ca89f1c7 | 5041 | assert ((char *) chunk2mem (p) + 2 * CHUNK_HDR_SZ |
3784dfc0 | 5042 | <= paligned_mem); |
6c8dbf00 | 5043 | assert ((char *) p + size > paligned_mem); |
68631c8e | 5044 | |
6c8dbf00 OB |
5045 | /* This is the size we could potentially free. */ |
5046 | size -= paligned_mem - (char *) p; | |
68631c8e | 5047 | |
6c8dbf00 OB |
5048 | if (size > psm1) |
5049 | { | |
439bda32 | 5050 | #if MALLOC_DEBUG |
6c8dbf00 OB |
5051 | /* When debugging we simulate destroying the memory |
5052 | content. */ | |
5053 | memset (paligned_mem, 0x89, size & ~psm1); | |
68631c8e | 5054 | #endif |
6c8dbf00 | 5055 | __madvise (paligned_mem, size & ~psm1, MADV_DONTNEED); |
68631c8e | 5056 | |
6c8dbf00 OB |
5057 | result = 1; |
5058 | } | |
5059 | } | |
5060 | } | |
68631c8e | 5061 | } |
8a4b65b4 | 5062 | |
a9177ff5 | 5063 | #ifndef MORECORE_CANNOT_TRIM |
3b49edc0 | 5064 | return result | (av == &main_arena ? systrim (pad, av) : 0); |
6c8dbf00 | 5065 | |
8a4b65b4 | 5066 | #else |
68631c8e | 5067 | return result; |
f65fd747 | 5068 | #endif |
f65fd747 UD |
5069 | } |
5070 | ||
f65fd747 | 5071 | |
3b49edc0 | 5072 | int |
6c8dbf00 | 5073 | __malloc_trim (size_t s) |
3b49edc0 UD |
5074 | { |
5075 | int result = 0; | |
5076 | ||
cc35896e | 5077 | if (!__malloc_initialized) |
3b49edc0 UD |
5078 | ptmalloc_init (); |
5079 | ||
5080 | mstate ar_ptr = &main_arena; | |
5081 | do | |
5082 | { | |
4bf5f222 | 5083 | __libc_lock_lock (ar_ptr->mutex); |
3b49edc0 | 5084 | result |= mtrim (ar_ptr, s); |
4bf5f222 | 5085 | __libc_lock_unlock (ar_ptr->mutex); |
3b49edc0 UD |
5086 | |
5087 | ar_ptr = ar_ptr->next; | |
5088 | } | |
5089 | while (ar_ptr != &main_arena); | |
5090 | ||
5091 | return result; | |
5092 | } | |
5093 | ||
5094 | ||
f65fd747 | 5095 | /* |
6c8dbf00 OB |
5096 | ------------------------- malloc_usable_size ------------------------- |
5097 | */ | |
f65fd747 | 5098 | |
3b49edc0 | 5099 | static size_t |
6c8dbf00 | 5100 | musable (void *mem) |
f65fd747 | 5101 | { |
88e316b0 | 5102 | mchunkptr p = mem2chunk (mem); |
6c8dbf00 | 5103 | |
88e316b0 SP |
5104 | if (chunk_is_mmapped (p)) |
5105 | return chunksize (p) - CHUNK_HDR_SZ; | |
5106 | else if (inuse (p)) | |
5107 | return memsize (p); | |
3784dfc0 | 5108 | |
fa8d436c | 5109 | return 0; |
f65fd747 UD |
5110 | } |
5111 | ||
b5bd5bfe | 5112 | #if IS_IN (libc) |
3b49edc0 | 5113 | size_t |
6c8dbf00 | 5114 | __malloc_usable_size (void *m) |
3b49edc0 | 5115 | { |
88e316b0 SP |
5116 | if (m == NULL) |
5117 | return 0; | |
5118 | return musable (m); | |
3b49edc0 | 5119 | } |
b5bd5bfe | 5120 | #endif |
3b49edc0 | 5121 | |
fa8d436c | 5122 | /* |
6c8dbf00 OB |
5123 | ------------------------------ mallinfo ------------------------------ |
5124 | Accumulate malloc statistics for arena AV into M. | |
5125 | */ | |
bedee953 | 5126 | static void |
e3960d1c | 5127 | int_mallinfo (mstate av, struct mallinfo2 *m) |
f65fd747 | 5128 | { |
6dd67bd5 | 5129 | size_t i; |
f65fd747 UD |
5130 | mbinptr b; |
5131 | mchunkptr p; | |
f65fd747 | 5132 | INTERNAL_SIZE_T avail; |
fa8d436c UD |
5133 | INTERNAL_SIZE_T fastavail; |
5134 | int nblocks; | |
5135 | int nfastblocks; | |
f65fd747 | 5136 | |
6c8dbf00 | 5137 | check_malloc_state (av); |
8a4b65b4 | 5138 | |
fa8d436c | 5139 | /* Account for top */ |
6c8dbf00 | 5140 | avail = chunksize (av->top); |
fa8d436c | 5141 | nblocks = 1; /* top always exists */ |
f65fd747 | 5142 | |
fa8d436c UD |
5143 | /* traverse fastbins */ |
5144 | nfastblocks = 0; | |
5145 | fastavail = 0; | |
5146 | ||
6c8dbf00 OB |
5147 | for (i = 0; i < NFASTBINS; ++i) |
5148 | { | |
a1a486d7 EI |
5149 | for (p = fastbin (av, i); |
5150 | p != 0; | |
5151 | p = REVEAL_PTR (p->fd)) | |
6c8dbf00 | 5152 | { |
49c3c376 | 5153 | if (__glibc_unlikely (misaligned_chunk (p))) |
768358b6 | 5154 | malloc_printerr ("int_mallinfo(): " |
a1a486d7 | 5155 | "unaligned fastbin chunk detected"); |
6c8dbf00 OB |
5156 | ++nfastblocks; |
5157 | fastavail += chunksize (p); | |
5158 | } | |
fa8d436c | 5159 | } |
fa8d436c UD |
5160 | |
5161 | avail += fastavail; | |
f65fd747 | 5162 | |
fa8d436c | 5163 | /* traverse regular bins */ |
6c8dbf00 OB |
5164 | for (i = 1; i < NBINS; ++i) |
5165 | { | |
5166 | b = bin_at (av, i); | |
5167 | for (p = last (b); p != b; p = p->bk) | |
5168 | { | |
5169 | ++nblocks; | |
5170 | avail += chunksize (p); | |
5171 | } | |
fa8d436c | 5172 | } |
f65fd747 | 5173 | |
bedee953 PP |
5174 | m->smblks += nfastblocks; |
5175 | m->ordblks += nblocks; | |
5176 | m->fordblks += avail; | |
5177 | m->uordblks += av->system_mem - avail; | |
5178 | m->arena += av->system_mem; | |
5179 | m->fsmblks += fastavail; | |
5180 | if (av == &main_arena) | |
5181 | { | |
5182 | m->hblks = mp_.n_mmaps; | |
5183 | m->hblkhd = mp_.mmapped_mem; | |
ca135f82 | 5184 | m->usmblks = 0; |
6c8dbf00 | 5185 | m->keepcost = chunksize (av->top); |
bedee953 | 5186 | } |
fa8d436c | 5187 | } |
f65fd747 | 5188 | |
3b49edc0 | 5189 | |
e3960d1c ML |
5190 | struct mallinfo2 |
5191 | __libc_mallinfo2 (void) | |
3b49edc0 | 5192 | { |
e3960d1c | 5193 | struct mallinfo2 m; |
bedee953 | 5194 | mstate ar_ptr; |
3b49edc0 | 5195 | |
cc35896e | 5196 | if (!__malloc_initialized) |
3b49edc0 | 5197 | ptmalloc_init (); |
bedee953 | 5198 | |
6c8dbf00 | 5199 | memset (&m, 0, sizeof (m)); |
bedee953 | 5200 | ar_ptr = &main_arena; |
6c8dbf00 OB |
5201 | do |
5202 | { | |
4bf5f222 | 5203 | __libc_lock_lock (ar_ptr->mutex); |
6c8dbf00 | 5204 | int_mallinfo (ar_ptr, &m); |
4bf5f222 | 5205 | __libc_lock_unlock (ar_ptr->mutex); |
bedee953 | 5206 | |
6c8dbf00 OB |
5207 | ar_ptr = ar_ptr->next; |
5208 | } | |
5209 | while (ar_ptr != &main_arena); | |
bedee953 | 5210 | |
3b49edc0 UD |
5211 | return m; |
5212 | } | |
cdf64542 | 5213 | libc_hidden_def (__libc_mallinfo2) |
3b49edc0 | 5214 | |
e3960d1c ML |
5215 | struct mallinfo |
5216 | __libc_mallinfo (void) | |
5217 | { | |
5218 | struct mallinfo m; | |
5219 | struct mallinfo2 m2 = __libc_mallinfo2 (); | |
5220 | ||
5221 | m.arena = m2.arena; | |
5222 | m.ordblks = m2.ordblks; | |
5223 | m.smblks = m2.smblks; | |
5224 | m.hblks = m2.hblks; | |
5225 | m.hblkhd = m2.hblkhd; | |
5226 | m.usmblks = m2.usmblks; | |
5227 | m.fsmblks = m2.fsmblks; | |
5228 | m.uordblks = m2.uordblks; | |
5229 | m.fordblks = m2.fordblks; | |
5230 | m.keepcost = m2.keepcost; | |
5231 | ||
5232 | return m; | |
5233 | } | |
5234 | ||
5235 | ||
fa8d436c | 5236 | /* |
6c8dbf00 OB |
5237 | ------------------------------ malloc_stats ------------------------------ |
5238 | */ | |
f65fd747 | 5239 | |
3b49edc0 | 5240 | void |
60d2f8f3 | 5241 | __malloc_stats (void) |
f65fd747 | 5242 | { |
8a4b65b4 | 5243 | int i; |
fa8d436c | 5244 | mstate ar_ptr; |
fa8d436c | 5245 | unsigned int in_use_b = mp_.mmapped_mem, system_b = in_use_b; |
8a4b65b4 | 5246 | |
cc35896e | 5247 | if (!__malloc_initialized) |
a234e27d | 5248 | ptmalloc_init (); |
8dab36a1 | 5249 | _IO_flockfile (stderr); |
9964a145 ZW |
5250 | int old_flags2 = stderr->_flags2; |
5251 | stderr->_flags2 |= _IO_FLAGS2_NOTCANCEL; | |
6c8dbf00 OB |
5252 | for (i = 0, ar_ptr = &main_arena;; i++) |
5253 | { | |
e3960d1c | 5254 | struct mallinfo2 mi; |
6c8dbf00 OB |
5255 | |
5256 | memset (&mi, 0, sizeof (mi)); | |
4bf5f222 | 5257 | __libc_lock_lock (ar_ptr->mutex); |
6c8dbf00 OB |
5258 | int_mallinfo (ar_ptr, &mi); |
5259 | fprintf (stderr, "Arena %d:\n", i); | |
5260 | fprintf (stderr, "system bytes = %10u\n", (unsigned int) mi.arena); | |
5261 | fprintf (stderr, "in use bytes = %10u\n", (unsigned int) mi.uordblks); | |
fa8d436c | 5262 | #if MALLOC_DEBUG > 1 |
6c8dbf00 OB |
5263 | if (i > 0) |
5264 | dump_heap (heap_for_ptr (top (ar_ptr))); | |
fa8d436c | 5265 | #endif |
6c8dbf00 OB |
5266 | system_b += mi.arena; |
5267 | in_use_b += mi.uordblks; | |
4bf5f222 | 5268 | __libc_lock_unlock (ar_ptr->mutex); |
6c8dbf00 OB |
5269 | ar_ptr = ar_ptr->next; |
5270 | if (ar_ptr == &main_arena) | |
5271 | break; | |
5272 | } | |
5273 | fprintf (stderr, "Total (incl. mmap):\n"); | |
5274 | fprintf (stderr, "system bytes = %10u\n", system_b); | |
5275 | fprintf (stderr, "in use bytes = %10u\n", in_use_b); | |
5276 | fprintf (stderr, "max mmap regions = %10u\n", (unsigned int) mp_.max_n_mmaps); | |
5277 | fprintf (stderr, "max mmap bytes = %10lu\n", | |
5278 | (unsigned long) mp_.max_mmapped_mem); | |
9964a145 | 5279 | stderr->_flags2 = old_flags2; |
8dab36a1 | 5280 | _IO_funlockfile (stderr); |
f65fd747 UD |
5281 | } |
5282 | ||
f65fd747 UD |
5283 | |
5284 | /* | |
6c8dbf00 OB |
5285 | ------------------------------ mallopt ------------------------------ |
5286 | */ | |
c2d8f0b7 | 5287 | static __always_inline int |
be7991c0 SP |
5288 | do_set_trim_threshold (size_t value) |
5289 | { | |
5290 | LIBC_PROBE (memory_mallopt_trim_threshold, 3, value, mp_.trim_threshold, | |
5291 | mp_.no_dyn_threshold); | |
5292 | mp_.trim_threshold = value; | |
5293 | mp_.no_dyn_threshold = 1; | |
5294 | return 1; | |
5295 | } | |
5296 | ||
c2d8f0b7 | 5297 | static __always_inline int |
be7991c0 SP |
5298 | do_set_top_pad (size_t value) |
5299 | { | |
5300 | LIBC_PROBE (memory_mallopt_top_pad, 3, value, mp_.top_pad, | |
5301 | mp_.no_dyn_threshold); | |
5302 | mp_.top_pad = value; | |
5303 | mp_.no_dyn_threshold = 1; | |
5304 | return 1; | |
5305 | } | |
5306 | ||
c2d8f0b7 | 5307 | static __always_inline int |
be7991c0 SP |
5308 | do_set_mmap_threshold (size_t value) |
5309 | { | |
0a4df6f5 PM |
5310 | LIBC_PROBE (memory_mallopt_mmap_threshold, 3, value, mp_.mmap_threshold, |
5311 | mp_.no_dyn_threshold); | |
5312 | mp_.mmap_threshold = value; | |
5313 | mp_.no_dyn_threshold = 1; | |
5314 | return 1; | |
be7991c0 SP |
5315 | } |
5316 | ||
c2d8f0b7 | 5317 | static __always_inline int |
be7991c0 SP |
5318 | do_set_mmaps_max (int32_t value) |
5319 | { | |
5320 | LIBC_PROBE (memory_mallopt_mmap_max, 3, value, mp_.n_mmaps_max, | |
5321 | mp_.no_dyn_threshold); | |
5322 | mp_.n_mmaps_max = value; | |
5323 | mp_.no_dyn_threshold = 1; | |
5324 | return 1; | |
5325 | } | |
5326 | ||
c2d8f0b7 | 5327 | static __always_inline int |
be7991c0 SP |
5328 | do_set_mallopt_check (int32_t value) |
5329 | { | |
be7991c0 SP |
5330 | return 1; |
5331 | } | |
5332 | ||
c2d8f0b7 | 5333 | static __always_inline int |
be7991c0 SP |
5334 | do_set_perturb_byte (int32_t value) |
5335 | { | |
5336 | LIBC_PROBE (memory_mallopt_perturb, 2, value, perturb_byte); | |
5337 | perturb_byte = value; | |
5338 | return 1; | |
5339 | } | |
5340 | ||
c2d8f0b7 | 5341 | static __always_inline int |
be7991c0 SP |
5342 | do_set_arena_test (size_t value) |
5343 | { | |
5344 | LIBC_PROBE (memory_mallopt_arena_test, 2, value, mp_.arena_test); | |
5345 | mp_.arena_test = value; | |
5346 | return 1; | |
5347 | } | |
5348 | ||
c2d8f0b7 | 5349 | static __always_inline int |
be7991c0 SP |
5350 | do_set_arena_max (size_t value) |
5351 | { | |
5352 | LIBC_PROBE (memory_mallopt_arena_max, 2, value, mp_.arena_max); | |
5353 | mp_.arena_max = value; | |
5354 | return 1; | |
5355 | } | |
5356 | ||
d5c3fafc | 5357 | #if USE_TCACHE |
c2d8f0b7 | 5358 | static __always_inline int |
d5c3fafc DD |
5359 | do_set_tcache_max (size_t value) |
5360 | { | |
16554464 | 5361 | if (value <= MAX_TCACHE_SIZE) |
d5c3fafc DD |
5362 | { |
5363 | LIBC_PROBE (memory_tunable_tcache_max_bytes, 2, value, mp_.tcache_max_bytes); | |
5364 | mp_.tcache_max_bytes = value; | |
5365 | mp_.tcache_bins = csize2tidx (request2size(value)) + 1; | |
16554464 | 5366 | return 1; |
d5c3fafc | 5367 | } |
16554464 | 5368 | return 0; |
d5c3fafc DD |
5369 | } |
5370 | ||
c2d8f0b7 | 5371 | static __always_inline int |
d5c3fafc DD |
5372 | do_set_tcache_count (size_t value) |
5373 | { | |
5ad533e8 WD |
5374 | if (value <= MAX_TCACHE_COUNT) |
5375 | { | |
5376 | LIBC_PROBE (memory_tunable_tcache_count, 2, value, mp_.tcache_count); | |
5377 | mp_.tcache_count = value; | |
16554464 | 5378 | return 1; |
5ad533e8 | 5379 | } |
16554464 | 5380 | return 0; |
d5c3fafc DD |
5381 | } |
5382 | ||
c2d8f0b7 | 5383 | static __always_inline int |
d5c3fafc DD |
5384 | do_set_tcache_unsorted_limit (size_t value) |
5385 | { | |
5386 | LIBC_PROBE (memory_tunable_tcache_unsorted_limit, 2, value, mp_.tcache_unsorted_limit); | |
5387 | mp_.tcache_unsorted_limit = value; | |
5388 | return 1; | |
5389 | } | |
5390 | #endif | |
f65fd747 | 5391 | |
a4ea49f8 | 5392 | static __always_inline int |
c48d92b4 DD |
5393 | do_set_mxfast (size_t value) |
5394 | { | |
16554464 | 5395 | if (value <= MAX_FAST_SIZE) |
c48d92b4 DD |
5396 | { |
5397 | LIBC_PROBE (memory_mallopt_mxfast, 2, value, get_max_fast ()); | |
5398 | set_max_fast (value); | |
5399 | return 1; | |
5400 | } | |
5401 | return 0; | |
5402 | } | |
5403 | ||
5f6d8d97 AZ |
5404 | #if HAVE_TUNABLES |
5405 | static __always_inline int | |
98d5fcb8 | 5406 | do_set_hugetlb (size_t value) |
5f6d8d97 AZ |
5407 | { |
5408 | if (value == 1) | |
5409 | { | |
5410 | enum malloc_thp_mode_t thp_mode = __malloc_thp_mode (); | |
5411 | /* | |
5412 | Only enable THP madvise usage if system does support it and | |
5413 | has 'madvise' mode. Otherwise the madvise() call is wasteful. | |
5414 | */ | |
5415 | if (thp_mode == malloc_thp_mode_madvise) | |
5416 | mp_.thp_pagesize = __malloc_default_thp_pagesize (); | |
5417 | } | |
98d5fcb8 AZ |
5418 | else if (value >= 2) |
5419 | __malloc_hugepage_config (value == 2 ? 0 : value, &mp_.hp_pagesize, | |
5420 | &mp_.hp_flags); | |
5f6d8d97 AZ |
5421 | return 0; |
5422 | } | |
5423 | #endif | |
5424 | ||
6c8dbf00 OB |
5425 | int |
5426 | __libc_mallopt (int param_number, int value) | |
f65fd747 | 5427 | { |
fa8d436c UD |
5428 | mstate av = &main_arena; |
5429 | int res = 1; | |
f65fd747 | 5430 | |
cc35896e | 5431 | if (!__malloc_initialized) |
0cb71e02 | 5432 | ptmalloc_init (); |
4bf5f222 | 5433 | __libc_lock_lock (av->mutex); |
2f6d1f1b | 5434 | |
3ea5be54 AO |
5435 | LIBC_PROBE (memory_mallopt, 2, param_number, value); |
5436 | ||
3381be5c WD |
5437 | /* We must consolidate main arena before changing max_fast |
5438 | (see definition of set_max_fast). */ | |
5439 | malloc_consolidate (av); | |
5440 | ||
16554464 DD |
5441 | /* Many of these helper functions take a size_t. We do not worry |
5442 | about overflow here, because negative int values will wrap to | |
5443 | very large size_t values and the helpers have sufficient range | |
5444 | checking for such conversions. Many of these helpers are also | |
5445 | used by the tunables macros in arena.c. */ | |
5446 | ||
6c8dbf00 OB |
5447 | switch (param_number) |
5448 | { | |
5449 | case M_MXFAST: | |
16554464 | 5450 | res = do_set_mxfast (value); |
6c8dbf00 OB |
5451 | break; |
5452 | ||
5453 | case M_TRIM_THRESHOLD: | |
16554464 | 5454 | res = do_set_trim_threshold (value); |
6c8dbf00 OB |
5455 | break; |
5456 | ||
5457 | case M_TOP_PAD: | |
16554464 | 5458 | res = do_set_top_pad (value); |
6c8dbf00 OB |
5459 | break; |
5460 | ||
5461 | case M_MMAP_THRESHOLD: | |
be7991c0 | 5462 | res = do_set_mmap_threshold (value); |
6c8dbf00 OB |
5463 | break; |
5464 | ||
5465 | case M_MMAP_MAX: | |
16554464 | 5466 | res = do_set_mmaps_max (value); |
6c8dbf00 OB |
5467 | break; |
5468 | ||
5469 | case M_CHECK_ACTION: | |
16554464 | 5470 | res = do_set_mallopt_check (value); |
6c8dbf00 OB |
5471 | break; |
5472 | ||
5473 | case M_PERTURB: | |
16554464 | 5474 | res = do_set_perturb_byte (value); |
6c8dbf00 OB |
5475 | break; |
5476 | ||
5477 | case M_ARENA_TEST: | |
5478 | if (value > 0) | |
16554464 | 5479 | res = do_set_arena_test (value); |
6c8dbf00 OB |
5480 | break; |
5481 | ||
5482 | case M_ARENA_MAX: | |
5483 | if (value > 0) | |
16554464 | 5484 | res = do_set_arena_max (value); |
6c8dbf00 OB |
5485 | break; |
5486 | } | |
4bf5f222 | 5487 | __libc_lock_unlock (av->mutex); |
fa8d436c | 5488 | return res; |
b22fc5f5 | 5489 | } |
3b49edc0 | 5490 | libc_hidden_def (__libc_mallopt) |
b22fc5f5 | 5491 | |
10dc2a90 | 5492 | |
a9177ff5 | 5493 | /* |
6c8dbf00 OB |
5494 | -------------------- Alternative MORECORE functions -------------------- |
5495 | */ | |
10dc2a90 | 5496 | |
b22fc5f5 | 5497 | |
fa8d436c | 5498 | /* |
6c8dbf00 | 5499 | General Requirements for MORECORE. |
b22fc5f5 | 5500 | |
6c8dbf00 | 5501 | The MORECORE function must have the following properties: |
b22fc5f5 | 5502 | |
6c8dbf00 | 5503 | If MORECORE_CONTIGUOUS is false: |
10dc2a90 | 5504 | |
6c8dbf00 | 5505 | * MORECORE must allocate in multiples of pagesize. It will |
fa8d436c | 5506 | only be called with arguments that are multiples of pagesize. |
10dc2a90 | 5507 | |
6c8dbf00 | 5508 | * MORECORE(0) must return an address that is at least |
fa8d436c | 5509 | MALLOC_ALIGNMENT aligned. (Page-aligning always suffices.) |
10dc2a90 | 5510 | |
6c8dbf00 | 5511 | else (i.e. If MORECORE_CONTIGUOUS is true): |
10dc2a90 | 5512 | |
6c8dbf00 | 5513 | * Consecutive calls to MORECORE with positive arguments |
fa8d436c UD |
5514 | return increasing addresses, indicating that space has been |
5515 | contiguously extended. | |
10dc2a90 | 5516 | |
6c8dbf00 | 5517 | * MORECORE need not allocate in multiples of pagesize. |
fa8d436c | 5518 | Calls to MORECORE need not have args of multiples of pagesize. |
10dc2a90 | 5519 | |
6c8dbf00 | 5520 | * MORECORE need not page-align. |
10dc2a90 | 5521 | |
6c8dbf00 | 5522 | In either case: |
10dc2a90 | 5523 | |
6c8dbf00 | 5524 | * MORECORE may allocate more memory than requested. (Or even less, |
fa8d436c | 5525 | but this will generally result in a malloc failure.) |
10dc2a90 | 5526 | |
6c8dbf00 | 5527 | * MORECORE must not allocate memory when given argument zero, but |
fa8d436c UD |
5528 | instead return one past the end address of memory from previous |
5529 | nonzero call. This malloc does NOT call MORECORE(0) | |
5530 | until at least one call with positive arguments is made, so | |
5531 | the initial value returned is not important. | |
10dc2a90 | 5532 | |
6c8dbf00 | 5533 | * Even though consecutive calls to MORECORE need not return contiguous |
fa8d436c UD |
5534 | addresses, it must be OK for malloc'ed chunks to span multiple |
5535 | regions in those cases where they do happen to be contiguous. | |
10dc2a90 | 5536 | |
6c8dbf00 | 5537 | * MORECORE need not handle negative arguments -- it may instead |
fa8d436c UD |
5538 | just return MORECORE_FAILURE when given negative arguments. |
5539 | Negative arguments are always multiples of pagesize. MORECORE | |
5540 | must not misinterpret negative args as large positive unsigned | |
5541 | args. You can suppress all such calls from even occurring by defining | |
5542 | MORECORE_CANNOT_TRIM, | |
10dc2a90 | 5543 | |
6c8dbf00 OB |
5544 | There is some variation across systems about the type of the |
5545 | argument to sbrk/MORECORE. If size_t is unsigned, then it cannot | |
5546 | actually be size_t, because sbrk supports negative args, so it is | |
5547 | normally the signed type of the same width as size_t (sometimes | |
5548 | declared as "intptr_t", and sometimes "ptrdiff_t"). It doesn't much | |
5549 | matter though. Internally, we use "long" as arguments, which should | |
5550 | work across all reasonable possibilities. | |
5551 | ||
5552 | Additionally, if MORECORE ever returns failure for a positive | |
5553 | request, then mmap is used as a noncontiguous system allocator. This | |
5554 | is a useful backup strategy for systems with holes in address spaces | |
5555 | -- in this case sbrk cannot contiguously expand the heap, but mmap | |
5556 | may be able to map noncontiguous space. | |
5557 | ||
5558 | If you'd like mmap to ALWAYS be used, you can define MORECORE to be | |
5559 | a function that always returns MORECORE_FAILURE. | |
5560 | ||
5561 | If you are using this malloc with something other than sbrk (or its | |
5562 | emulation) to supply memory regions, you probably want to set | |
5563 | MORECORE_CONTIGUOUS as false. As an example, here is a custom | |
5564 | allocator kindly contributed for pre-OSX macOS. It uses virtually | |
5565 | but not necessarily physically contiguous non-paged memory (locked | |
5566 | in, present and won't get swapped out). You can use it by | |
5567 | uncommenting this section, adding some #includes, and setting up the | |
5568 | appropriate defines above: | |
5569 | ||
5570 | *#define MORECORE osMoreCore | |
5571 | *#define MORECORE_CONTIGUOUS 0 | |
5572 | ||
5573 | There is also a shutdown routine that should somehow be called for | |
5574 | cleanup upon program exit. | |
5575 | ||
5576 | *#define MAX_POOL_ENTRIES 100 | |
5577 | *#define MINIMUM_MORECORE_SIZE (64 * 1024) | |
5578 | static int next_os_pool; | |
5579 | void *our_os_pools[MAX_POOL_ENTRIES]; | |
5580 | ||
5581 | void *osMoreCore(int size) | |
5582 | { | |
fa8d436c UD |
5583 | void *ptr = 0; |
5584 | static void *sbrk_top = 0; | |
ca34d7a7 | 5585 | |
fa8d436c UD |
5586 | if (size > 0) |
5587 | { | |
5588 | if (size < MINIMUM_MORECORE_SIZE) | |
6c8dbf00 | 5589 | size = MINIMUM_MORECORE_SIZE; |
fa8d436c | 5590 | if (CurrentExecutionLevel() == kTaskLevel) |
6c8dbf00 | 5591 | ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0); |
fa8d436c UD |
5592 | if (ptr == 0) |
5593 | { | |
6c8dbf00 | 5594 | return (void *) MORECORE_FAILURE; |
fa8d436c UD |
5595 | } |
5596 | // save ptrs so they can be freed during cleanup | |
5597 | our_os_pools[next_os_pool] = ptr; | |
5598 | next_os_pool++; | |
5599 | ptr = (void *) ((((unsigned long) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK); | |
5600 | sbrk_top = (char *) ptr + size; | |
5601 | return ptr; | |
5602 | } | |
5603 | else if (size < 0) | |
5604 | { | |
5605 | // we don't currently support shrink behavior | |
5606 | return (void *) MORECORE_FAILURE; | |
5607 | } | |
5608 | else | |
5609 | { | |
5610 | return sbrk_top; | |
431c33c0 | 5611 | } |
6c8dbf00 | 5612 | } |
ca34d7a7 | 5613 | |
6c8dbf00 OB |
5614 | // cleanup any allocated memory pools |
5615 | // called as last thing before shutting down driver | |
ca34d7a7 | 5616 | |
6c8dbf00 OB |
5617 | void osCleanupMem(void) |
5618 | { | |
fa8d436c | 5619 | void **ptr; |
ca34d7a7 | 5620 | |
fa8d436c UD |
5621 | for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++) |
5622 | if (*ptr) | |
5623 | { | |
6c8dbf00 OB |
5624 | PoolDeallocate(*ptr); |
5625 | * ptr = 0; | |
fa8d436c | 5626 | } |
6c8dbf00 | 5627 | } |
ee74a442 | 5628 | |
6c8dbf00 | 5629 | */ |
f65fd747 | 5630 | |
7e3be507 | 5631 | |
3e030bd5 UD |
5632 | /* Helper code. */ |
5633 | ||
ae7f5313 UD |
5634 | extern char **__libc_argv attribute_hidden; |
5635 | ||
3e030bd5 | 5636 | static void |
ac3ed168 | 5637 | malloc_printerr (const char *str) |
3e030bd5 | 5638 | { |
b5bd5bfe | 5639 | #if IS_IN (libc) |
cca9684f | 5640 | __libc_message ("%s\n", str); |
b5bd5bfe SP |
5641 | #else |
5642 | __libc_fatal (str); | |
5643 | #endif | |
ec2c1fce | 5644 | __builtin_unreachable (); |
3e030bd5 UD |
5645 | } |
5646 | ||
b5bd5bfe | 5647 | #if IS_IN (libc) |
a204dbb2 UD |
5648 | /* We need a wrapper function for one of the additions of POSIX. */ |
5649 | int | |
5650 | __posix_memalign (void **memptr, size_t alignment, size_t size) | |
5651 | { | |
5652 | void *mem; | |
5653 | ||
cc35896e | 5654 | if (!__malloc_initialized) |
2d2d9f2b SP |
5655 | ptmalloc_init (); |
5656 | ||
a204dbb2 UD |
5657 | /* Test whether the SIZE argument is valid. It must be a power of |
5658 | two multiple of sizeof (void *). */ | |
de02bd05 | 5659 | if (alignment % sizeof (void *) != 0 |
fc56e970 | 5660 | || !powerof2 (alignment / sizeof (void *)) |
de02bd05 | 5661 | || alignment == 0) |
a204dbb2 UD |
5662 | return EINVAL; |
5663 | ||
10ad46bc OB |
5664 | |
5665 | void *address = RETURN_ADDRESS (0); | |
5666 | mem = _mid_memalign (alignment, size, address); | |
a204dbb2 | 5667 | |
6c8dbf00 OB |
5668 | if (mem != NULL) |
5669 | { | |
5670 | *memptr = mem; | |
5671 | return 0; | |
5672 | } | |
a204dbb2 UD |
5673 | |
5674 | return ENOMEM; | |
5675 | } | |
5676 | weak_alias (__posix_memalign, posix_memalign) | |
b5bd5bfe | 5677 | #endif |
a204dbb2 | 5678 | |
20c13899 OB |
5679 | |
5680 | int | |
c52ff39e | 5681 | __malloc_info (int options, FILE *fp) |
bb066545 | 5682 | { |
20c13899 OB |
5683 | /* For now, at least. */ |
5684 | if (options != 0) | |
5685 | return EINVAL; | |
bb066545 | 5686 | |
20c13899 OB |
5687 | int n = 0; |
5688 | size_t total_nblocks = 0; | |
5689 | size_t total_nfastblocks = 0; | |
5690 | size_t total_avail = 0; | |
5691 | size_t total_fastavail = 0; | |
5692 | size_t total_system = 0; | |
5693 | size_t total_max_system = 0; | |
5694 | size_t total_aspace = 0; | |
5695 | size_t total_aspace_mprotect = 0; | |
bb066545 | 5696 | |
6c8dbf00 | 5697 | |
6c8dbf00 | 5698 | |
cc35896e | 5699 | if (!__malloc_initialized) |
987c0269 | 5700 | ptmalloc_init (); |
bb066545 | 5701 | |
987c0269 | 5702 | fputs ("<malloc version=\"1\">\n", fp); |
bb066545 | 5703 | |
987c0269 OB |
5704 | /* Iterate over all arenas currently in use. */ |
5705 | mstate ar_ptr = &main_arena; | |
5706 | do | |
5707 | { | |
5708 | fprintf (fp, "<heap nr=\"%d\">\n<sizes>\n", n++); | |
8b35e35d | 5709 | |
987c0269 OB |
5710 | size_t nblocks = 0; |
5711 | size_t nfastblocks = 0; | |
5712 | size_t avail = 0; | |
5713 | size_t fastavail = 0; | |
5714 | struct | |
5715 | { | |
5716 | size_t from; | |
5717 | size_t to; | |
5718 | size_t total; | |
5719 | size_t count; | |
5720 | } sizes[NFASTBINS + NBINS - 1]; | |
5721 | #define nsizes (sizeof (sizes) / sizeof (sizes[0])) | |
6c8dbf00 | 5722 | |
4bf5f222 | 5723 | __libc_lock_lock (ar_ptr->mutex); |
bb066545 | 5724 | |
b6d2c447 NH |
5725 | /* Account for top chunk. The top-most available chunk is |
5726 | treated specially and is never in any bin. See "initial_top" | |
5727 | comments. */ | |
5728 | avail = chunksize (ar_ptr->top); | |
5729 | nblocks = 1; /* Top always exists. */ | |
5730 | ||
987c0269 OB |
5731 | for (size_t i = 0; i < NFASTBINS; ++i) |
5732 | { | |
5733 | mchunkptr p = fastbin (ar_ptr, i); | |
5734 | if (p != NULL) | |
5735 | { | |
5736 | size_t nthissize = 0; | |
5737 | size_t thissize = chunksize (p); | |
5738 | ||
5739 | while (p != NULL) | |
5740 | { | |
49c3c376 | 5741 | if (__glibc_unlikely (misaligned_chunk (p))) |
768358b6 | 5742 | malloc_printerr ("__malloc_info(): " |
a1a486d7 | 5743 | "unaligned fastbin chunk detected"); |
987c0269 | 5744 | ++nthissize; |
a1a486d7 | 5745 | p = REVEAL_PTR (p->fd); |
987c0269 OB |
5746 | } |
5747 | ||
5748 | fastavail += nthissize * thissize; | |
5749 | nfastblocks += nthissize; | |
5750 | sizes[i].from = thissize - (MALLOC_ALIGNMENT - 1); | |
5751 | sizes[i].to = thissize; | |
5752 | sizes[i].count = nthissize; | |
5753 | } | |
5754 | else | |
5755 | sizes[i].from = sizes[i].to = sizes[i].count = 0; | |
bb066545 | 5756 | |
987c0269 OB |
5757 | sizes[i].total = sizes[i].count * sizes[i].to; |
5758 | } | |
bb066545 | 5759 | |
bb066545 | 5760 | |
987c0269 OB |
5761 | mbinptr bin; |
5762 | struct malloc_chunk *r; | |
bb066545 | 5763 | |
987c0269 OB |
5764 | for (size_t i = 1; i < NBINS; ++i) |
5765 | { | |
5766 | bin = bin_at (ar_ptr, i); | |
5767 | r = bin->fd; | |
5768 | sizes[NFASTBINS - 1 + i].from = ~((size_t) 0); | |
5769 | sizes[NFASTBINS - 1 + i].to = sizes[NFASTBINS - 1 + i].total | |
5770 | = sizes[NFASTBINS - 1 + i].count = 0; | |
5771 | ||
5772 | if (r != NULL) | |
5773 | while (r != bin) | |
5774 | { | |
e9c4fe93 | 5775 | size_t r_size = chunksize_nomask (r); |
987c0269 | 5776 | ++sizes[NFASTBINS - 1 + i].count; |
e9c4fe93 | 5777 | sizes[NFASTBINS - 1 + i].total += r_size; |
987c0269 | 5778 | sizes[NFASTBINS - 1 + i].from |
e9c4fe93 | 5779 | = MIN (sizes[NFASTBINS - 1 + i].from, r_size); |
987c0269 | 5780 | sizes[NFASTBINS - 1 + i].to = MAX (sizes[NFASTBINS - 1 + i].to, |
e9c4fe93 | 5781 | r_size); |
987c0269 OB |
5782 | |
5783 | r = r->fd; | |
5784 | } | |
5785 | ||
5786 | if (sizes[NFASTBINS - 1 + i].count == 0) | |
5787 | sizes[NFASTBINS - 1 + i].from = 0; | |
5788 | nblocks += sizes[NFASTBINS - 1 + i].count; | |
5789 | avail += sizes[NFASTBINS - 1 + i].total; | |
5790 | } | |
bb066545 | 5791 | |
7a9368a1 FW |
5792 | size_t heap_size = 0; |
5793 | size_t heap_mprotect_size = 0; | |
34eb4157 | 5794 | size_t heap_count = 0; |
7a9368a1 FW |
5795 | if (ar_ptr != &main_arena) |
5796 | { | |
34eb4157 | 5797 | /* Iterate over the arena heaps from back to front. */ |
7a9368a1 | 5798 | heap_info *heap = heap_for_ptr (top (ar_ptr)); |
34eb4157 FW |
5799 | do |
5800 | { | |
5801 | heap_size += heap->size; | |
5802 | heap_mprotect_size += heap->mprotect_size; | |
5803 | heap = heap->prev; | |
5804 | ++heap_count; | |
5805 | } | |
5806 | while (heap != NULL); | |
7a9368a1 FW |
5807 | } |
5808 | ||
4bf5f222 | 5809 | __libc_lock_unlock (ar_ptr->mutex); |
da2d2fb6 | 5810 | |
987c0269 OB |
5811 | total_nfastblocks += nfastblocks; |
5812 | total_fastavail += fastavail; | |
0588a9cb | 5813 | |
987c0269 OB |
5814 | total_nblocks += nblocks; |
5815 | total_avail += avail; | |
0588a9cb | 5816 | |
987c0269 OB |
5817 | for (size_t i = 0; i < nsizes; ++i) |
5818 | if (sizes[i].count != 0 && i != NFASTBINS) | |
b0f6679b | 5819 | fprintf (fp, "\ |
987c0269 OB |
5820 | <size from=\"%zu\" to=\"%zu\" total=\"%zu\" count=\"%zu\"/>\n", |
5821 | sizes[i].from, sizes[i].to, sizes[i].total, sizes[i].count); | |
fdfd175d | 5822 | |
987c0269 OB |
5823 | if (sizes[NFASTBINS].count != 0) |
5824 | fprintf (fp, "\ | |
5825 | <unsorted from=\"%zu\" to=\"%zu\" total=\"%zu\" count=\"%zu\"/>\n", | |
5826 | sizes[NFASTBINS].from, sizes[NFASTBINS].to, | |
5827 | sizes[NFASTBINS].total, sizes[NFASTBINS].count); | |
fdfd175d | 5828 | |
987c0269 OB |
5829 | total_system += ar_ptr->system_mem; |
5830 | total_max_system += ar_ptr->max_system_mem; | |
bb066545 | 5831 | |
987c0269 OB |
5832 | fprintf (fp, |
5833 | "</sizes>\n<total type=\"fast\" count=\"%zu\" size=\"%zu\"/>\n" | |
5834 | "<total type=\"rest\" count=\"%zu\" size=\"%zu\"/>\n" | |
5835 | "<system type=\"current\" size=\"%zu\"/>\n" | |
5836 | "<system type=\"max\" size=\"%zu\"/>\n", | |
5837 | nfastblocks, fastavail, nblocks, avail, | |
5838 | ar_ptr->system_mem, ar_ptr->max_system_mem); | |
346bc35c | 5839 | |
987c0269 OB |
5840 | if (ar_ptr != &main_arena) |
5841 | { | |
987c0269 OB |
5842 | fprintf (fp, |
5843 | "<aspace type=\"total\" size=\"%zu\"/>\n" | |
34eb4157 FW |
5844 | "<aspace type=\"mprotect\" size=\"%zu\"/>\n" |
5845 | "<aspace type=\"subheaps\" size=\"%zu\"/>\n", | |
5846 | heap_size, heap_mprotect_size, heap_count); | |
7a9368a1 FW |
5847 | total_aspace += heap_size; |
5848 | total_aspace_mprotect += heap_mprotect_size; | |
987c0269 OB |
5849 | } |
5850 | else | |
5851 | { | |
5852 | fprintf (fp, | |
5853 | "<aspace type=\"total\" size=\"%zu\"/>\n" | |
5854 | "<aspace type=\"mprotect\" size=\"%zu\"/>\n", | |
5855 | ar_ptr->system_mem, ar_ptr->system_mem); | |
5856 | total_aspace += ar_ptr->system_mem; | |
5857 | total_aspace_mprotect += ar_ptr->system_mem; | |
5858 | } | |
bb066545 | 5859 | |
987c0269 | 5860 | fputs ("</heap>\n", fp); |
bb066545 UD |
5861 | ar_ptr = ar_ptr->next; |
5862 | } | |
5863 | while (ar_ptr != &main_arena); | |
5864 | ||
5865 | fprintf (fp, | |
62a58816 SP |
5866 | "<total type=\"fast\" count=\"%zu\" size=\"%zu\"/>\n" |
5867 | "<total type=\"rest\" count=\"%zu\" size=\"%zu\"/>\n" | |
9fa76613 | 5868 | "<total type=\"mmap\" count=\"%d\" size=\"%zu\"/>\n" |
62a58816 SP |
5869 | "<system type=\"current\" size=\"%zu\"/>\n" |
5870 | "<system type=\"max\" size=\"%zu\"/>\n" | |
5871 | "<aspace type=\"total\" size=\"%zu\"/>\n" | |
5872 | "<aspace type=\"mprotect\" size=\"%zu\"/>\n" | |
5873 | "</malloc>\n", | |
5874 | total_nfastblocks, total_fastavail, total_nblocks, total_avail, | |
4d653a59 | 5875 | mp_.n_mmaps, mp_.mmapped_mem, |
62a58816 SP |
5876 | total_system, total_max_system, |
5877 | total_aspace, total_aspace_mprotect); | |
bb066545 UD |
5878 | |
5879 | return 0; | |
5880 | } | |
b5bd5bfe | 5881 | #if IS_IN (libc) |
c52ff39e | 5882 | weak_alias (__malloc_info, malloc_info) |
bb066545 | 5883 | |
eba19d2b | 5884 | strong_alias (__libc_calloc, __calloc) weak_alias (__libc_calloc, calloc) |
eba19d2b UD |
5885 | strong_alias (__libc_free, __free) strong_alias (__libc_free, free) |
5886 | strong_alias (__libc_malloc, __malloc) strong_alias (__libc_malloc, malloc) | |
5887 | strong_alias (__libc_memalign, __memalign) | |
5888 | weak_alias (__libc_memalign, memalign) | |
5889 | strong_alias (__libc_realloc, __realloc) strong_alias (__libc_realloc, realloc) | |
5890 | strong_alias (__libc_valloc, __valloc) weak_alias (__libc_valloc, valloc) | |
5891 | strong_alias (__libc_pvalloc, __pvalloc) weak_alias (__libc_pvalloc, pvalloc) | |
5892 | strong_alias (__libc_mallinfo, __mallinfo) | |
5893 | weak_alias (__libc_mallinfo, mallinfo) | |
e3960d1c ML |
5894 | strong_alias (__libc_mallinfo2, __mallinfo2) |
5895 | weak_alias (__libc_mallinfo2, mallinfo2) | |
eba19d2b | 5896 | strong_alias (__libc_mallopt, __mallopt) weak_alias (__libc_mallopt, mallopt) |
7e3be507 UD |
5897 | |
5898 | weak_alias (__malloc_stats, malloc_stats) | |
5899 | weak_alias (__malloc_usable_size, malloc_usable_size) | |
5900 | weak_alias (__malloc_trim, malloc_trim) | |
b5bd5bfe | 5901 | #endif |
7e3be507 | 5902 | |
025b33ae FW |
5903 | #if SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_26) |
5904 | compat_symbol (libc, __libc_free, cfree, GLIBC_2_0); | |
5905 | #endif | |
f65fd747 | 5906 | |
fa8d436c | 5907 | /* ------------------------------------------------------------ |
6c8dbf00 | 5908 | History: |
f65fd747 | 5909 | |
6c8dbf00 | 5910 | [see ftp://g.oswego.edu/pub/misc/malloc.c for the history of dlmalloc] |
f65fd747 | 5911 | |
6c8dbf00 | 5912 | */ |
fa8d436c UD |
5913 | /* |
5914 | * Local variables: | |
5915 | * c-basic-offset: 2 | |
5916 | * End: | |
5917 | */ |