]> git.ipfire.org Git - thirdparty/glibc.git/blame - malloc/hooks.c
malloc: Use current (C11-style) atomics for fastbin access
[thirdparty/glibc.git] / malloc / hooks.c
CommitLineData
fa8d436c 1/* Malloc implementation for multiple threads without lock contention.
688903eb 2 Copyright (C) 2001-2018 Free Software Foundation, Inc.
fa8d436c
UD
3 This file is part of the GNU C Library.
4 Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
5
6 The GNU C Library is free software; you can redistribute it and/or
cc7375ce
RM
7 modify it under the terms of the GNU Lesser General Public License as
8 published by the Free Software Foundation; either version 2.1 of the
fa8d436c
UD
9 License, or (at your option) any later version.
10
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
cc7375ce 14 Lesser General Public License for more details.
fa8d436c 15
cc7375ce 16 You should have received a copy of the GNU Lesser General Public
59ba27a6
PE
17 License along with the GNU C Library; see the file COPYING.LIB. If
18 not, see <http://www.gnu.org/licenses/>. */
fa8d436c 19
fa8d436c
UD
20/* What to do if the standard debugging hooks are in place and a
21 corrupt pointer is detected: do nothing (0), print an error message
22 (1), or call abort() (2). */
23
24/* Hooks for debugging versions. The initial hooks just call the
25 initialization routine, then do the normal work. */
26
6c8dbf00
OB
27static void *
28malloc_hook_ini (size_t sz, const void *caller)
fa8d436c
UD
29{
30 __malloc_hook = NULL;
6c8dbf00
OB
31 ptmalloc_init ();
32 return __libc_malloc (sz);
fa8d436c
UD
33}
34
6c8dbf00
OB
35static void *
36realloc_hook_ini (void *ptr, size_t sz, const void *caller)
fa8d436c
UD
37{
38 __malloc_hook = NULL;
39 __realloc_hook = NULL;
6c8dbf00
OB
40 ptmalloc_init ();
41 return __libc_realloc (ptr, sz);
fa8d436c
UD
42}
43
6c8dbf00
OB
44static void *
45memalign_hook_ini (size_t alignment, size_t sz, const void *caller)
fa8d436c
UD
46{
47 __memalign_hook = NULL;
6c8dbf00
OB
48 ptmalloc_init ();
49 return __libc_memalign (alignment, sz);
fa8d436c
UD
50}
51
fa8d436c
UD
52/* Whether we are using malloc checking. */
53static int using_malloc_checking;
54
fa8d436c
UD
55/* Activate a standard set of debugging hooks. */
56void
60d2f8f3 57__malloc_check_init (void)
fa8d436c 58{
fa8d436c
UD
59 using_malloc_checking = 1;
60 __malloc_hook = malloc_check;
61 __free_hook = free_check;
62 __realloc_hook = realloc_check;
63 __memalign_hook = memalign_check;
fa8d436c
UD
64}
65
66/* A simple, standard set of debugging hooks. Overhead is `only' one
67 byte per chunk; still this will catch most cases of double frees or
68 overruns. The goal here is to avoid obscure crashes due to invalid
69 usage, unlike in the MALLOC_DEBUG code. */
70
265cbed8
JL
71static unsigned char
72magicbyte (const void *p)
73{
74 unsigned char magic;
75
76 magic = (((uintptr_t) p >> 3) ^ ((uintptr_t) p >> 11)) & 0xFF;
77 /* Do not return 1. See the comment in mem2mem_check(). */
78 if (magic == 1)
79 ++magic;
80 return magic;
81}
82
fa8d436c 83
265cbed8
JL
84/* Visualize the chunk as being partitioned into blocks of 255 bytes from the
85 highest address of the chunk, downwards. The end of each block tells
86 us the size of that block, up to the actual size of the requested
9173840b
JL
87 memory. Our magic byte is right at the end of the requested size, so we
88 must reach it with this iteration, otherwise we have witnessed a memory
89 corruption. */
6ef9cc37 90static size_t
6c8dbf00 91malloc_check_get_size (mchunkptr p)
6ef9cc37 92{
9173840b 93 size_t size;
6ef9cc37 94 unsigned char c;
265cbed8 95 unsigned char magic = magicbyte (p);
6ef9cc37 96
6c8dbf00 97 assert (using_malloc_checking == 1);
6ef9cc37 98
9173840b
JL
99 for (size = chunksize (p) - 1 + (chunk_is_mmapped (p) ? 0 : SIZE_SZ);
100 (c = ((unsigned char *) p)[size]) != magic;
6c8dbf00
OB
101 size -= c)
102 {
9173840b 103 if (c <= 0 || size < (c + 2 * SIZE_SZ))
ac3ed168 104 malloc_printerr ("malloc_check_get_size: memory corruption");
6ef9cc37 105 }
6ef9cc37
SP
106
107 /* chunk2mem size. */
6c8dbf00 108 return size - 2 * SIZE_SZ;
6ef9cc37
SP
109}
110
fa8d436c 111/* Instrument a chunk with overrun detector byte(s) and convert it
265cbed8 112 into a user pointer with requested size req_sz. */
fa8d436c 113
6c8dbf00 114static void *
265cbed8 115mem2mem_check (void *ptr, size_t req_sz)
fa8d436c
UD
116{
117 mchunkptr p;
6c8dbf00 118 unsigned char *m_ptr = ptr;
265cbed8
JL
119 size_t max_sz, block_sz, i;
120 unsigned char magic;
fa8d436c
UD
121
122 if (!ptr)
123 return ptr;
6c8dbf00
OB
124
125 p = mem2chunk (ptr);
265cbed8
JL
126 magic = magicbyte (p);
127 max_sz = chunksize (p) - 2 * SIZE_SZ;
128 if (!chunk_is_mmapped (p))
129 max_sz += SIZE_SZ;
130 for (i = max_sz - 1; i > req_sz; i -= block_sz)
6c8dbf00 131 {
265cbed8
JL
132 block_sz = MIN (i - req_sz, 0xff);
133 /* Don't allow the magic byte to appear in the chain of length bytes.
134 For the following to work, magicbyte cannot return 0x01. */
135 if (block_sz == magic)
136 --block_sz;
137
138 m_ptr[i] = block_sz;
fa8d436c 139 }
265cbed8 140 m_ptr[req_sz] = magic;
6c8dbf00 141 return (void *) m_ptr;
fa8d436c
UD
142}
143
144/* Convert a pointer to be free()d or realloc()ed to a valid chunk
145 pointer. If the provided pointer is not valid, return NULL. */
146
147static mchunkptr
6c8dbf00 148mem2chunk_check (void *mem, unsigned char **magic_p)
fa8d436c
UD
149{
150 mchunkptr p;
151 INTERNAL_SIZE_T sz, c;
152 unsigned char magic;
153
6c8dbf00
OB
154 if (!aligned_OK (mem))
155 return NULL;
156
157 p = mem2chunk (mem);
265cbed8
JL
158 sz = chunksize (p);
159 magic = magicbyte (p);
6c8dbf00
OB
160 if (!chunk_is_mmapped (p))
161 {
162 /* Must be a chunk in conventional heap memory. */
163 int contig = contiguous (&main_arena);
6c8dbf00
OB
164 if ((contig &&
165 ((char *) p < mp_.sbrk_base ||
166 ((char *) p + sz) >= (mp_.sbrk_base + main_arena.system_mem))) ||
167 sz < MINSIZE || sz & MALLOC_ALIGN_MASK || !inuse (p) ||
e9c4fe93 168 (!prev_inuse (p) && ((prev_size (p) & MALLOC_ALIGN_MASK) != 0 ||
6c8dbf00
OB
169 (contig && (char *) prev_chunk (p) < mp_.sbrk_base) ||
170 next_chunk (prev_chunk (p)) != p)))
171 return NULL;
172
9173840b 173 for (sz += SIZE_SZ - 1; (c = ((unsigned char *) p)[sz]) != magic; sz -= c)
6c8dbf00 174 {
265cbed8 175 if (c == 0 || sz < (c + 2 * SIZE_SZ))
9173840b 176 return NULL;
6c8dbf00 177 }
fa8d436c 178 }
6c8dbf00
OB
179 else
180 {
181 unsigned long offset, page_mask = GLRO (dl_pagesize) - 1;
182
183 /* mmap()ed chunks have MALLOC_ALIGNMENT or higher power-of-two
184 alignment relative to the beginning of a page. Check this
185 first. */
186 offset = (unsigned long) mem & page_mask;
187 if ((offset != MALLOC_ALIGNMENT && offset != 0 && offset != 0x10 &&
188 offset != 0x20 && offset != 0x40 && offset != 0x80 && offset != 0x100 &&
189 offset != 0x200 && offset != 0x400 && offset != 0x800 && offset != 0x1000 &&
190 offset < 0x2000) ||
e9c4fe93
FW
191 !chunk_is_mmapped (p) || prev_inuse (p) ||
192 ((((unsigned long) p - prev_size (p)) & page_mask) != 0) ||
193 ((prev_size (p) + sz) & page_mask) != 0)
6c8dbf00
OB
194 return NULL;
195
9173840b 196 for (sz -= 1; (c = ((unsigned char *) p)[sz]) != magic; sz -= c)
6c8dbf00 197 {
265cbed8 198 if (c == 0 || sz < (c + 2 * SIZE_SZ))
9173840b 199 return NULL;
6c8dbf00 200 }
fa8d436c 201 }
6c8dbf00 202 ((unsigned char *) p)[sz] ^= 0xFF;
bfc832cc 203 if (magic_p)
6c8dbf00 204 *magic_p = (unsigned char *) p + sz;
fa8d436c
UD
205 return p;
206}
207
ac3ed168 208/* Check for corruption of the top chunk. */
5129873a 209static void
6c8dbf00 210top_check (void)
fa8d436c 211{
6c8dbf00 212 mchunkptr t = top (&main_arena);
6c8dbf00
OB
213
214 if (t == initial_top (&main_arena) ||
215 (!chunk_is_mmapped (t) &&
216 chunksize (t) >= MINSIZE &&
217 prev_inuse (t) &&
218 (!contiguous (&main_arena) ||
219 (char *) t + chunksize (t) == mp_.sbrk_base + main_arena.system_mem)))
5129873a 220 return;
fa8d436c 221
ac3ed168 222 malloc_printerr ("malloc: top chunk is corrupt");
fa8d436c
UD
223}
224
6c8dbf00
OB
225static void *
226malloc_check (size_t sz, const void *caller)
fa8d436c 227{
22a89187 228 void *victim;
fa8d436c 229
6c8dbf00
OB
230 if (sz + 1 == 0)
231 {
232 __set_errno (ENOMEM);
233 return NULL;
234 }
bfc832cc 235
4bf5f222 236 __libc_lock_lock (main_arena.mutex);
5129873a
FW
237 top_check ();
238 victim = _int_malloc (&main_arena, sz + 1);
4bf5f222 239 __libc_lock_unlock (main_arena.mutex);
6c8dbf00 240 return mem2mem_check (victim, sz);
fa8d436c
UD
241}
242
243static void
6c8dbf00 244free_check (void *mem, const void *caller)
fa8d436c
UD
245{
246 mchunkptr p;
247
6c8dbf00 248 if (!mem)
fa8d436c 249 return;
6c8dbf00 250
4bf5f222 251 __libc_lock_lock (main_arena.mutex);
6c8dbf00
OB
252 p = mem2chunk_check (mem, NULL);
253 if (!p)
ac3ed168 254 malloc_printerr ("free(): invalid pointer");
6c8dbf00
OB
255 if (chunk_is_mmapped (p))
256 {
4bf5f222 257 __libc_lock_unlock (main_arena.mutex);
6c8dbf00
OB
258 munmap_chunk (p);
259 return;
260 }
261 _int_free (&main_arena, p, 1);
4bf5f222 262 __libc_lock_unlock (main_arena.mutex);
fa8d436c
UD
263}
264
6c8dbf00
OB
265static void *
266realloc_check (void *oldmem, size_t bytes, const void *caller)
fa8d436c 267{
78ac92ad 268 INTERNAL_SIZE_T nb;
6c8dbf00 269 void *newmem = 0;
bfc832cc 270 unsigned char *magic_p;
fa8d436c 271
6c8dbf00
OB
272 if (bytes + 1 == 0)
273 {
274 __set_errno (ENOMEM);
275 return NULL;
276 }
277 if (oldmem == 0)
278 return malloc_check (bytes, NULL);
279
280 if (bytes == 0)
281 {
282 free_check (oldmem, NULL);
283 return NULL;
284 }
4bf5f222 285 __libc_lock_lock (main_arena.mutex);
6c8dbf00 286 const mchunkptr oldp = mem2chunk_check (oldmem, &magic_p);
4bf5f222 287 __libc_lock_unlock (main_arena.mutex);
6c8dbf00 288 if (!oldp)
ac3ed168 289 malloc_printerr ("realloc(): invalid pointer");
6c8dbf00
OB
290 const INTERNAL_SIZE_T oldsize = chunksize (oldp);
291
292 checked_request2size (bytes + 1, nb);
4bf5f222 293 __libc_lock_lock (main_arena.mutex);
6c8dbf00
OB
294
295 if (chunk_is_mmapped (oldp))
296 {
fa8d436c 297#if HAVE_MREMAP
6c8dbf00
OB
298 mchunkptr newp = mremap_chunk (oldp, nb);
299 if (newp)
300 newmem = chunk2mem (newp);
301 else
fa8d436c 302#endif
6c8dbf00
OB
303 {
304 /* Note the extra SIZE_SZ overhead. */
305 if (oldsize - SIZE_SZ >= nb)
306 newmem = oldmem; /* do nothing */
307 else
308 {
309 /* Must alloc, copy, free. */
5129873a
FW
310 top_check ();
311 newmem = _int_malloc (&main_arena, bytes + 1);
6c8dbf00
OB
312 if (newmem)
313 {
314 memcpy (newmem, oldmem, oldsize - 2 * SIZE_SZ);
315 munmap_chunk (oldp);
316 }
317 }
fa8d436c 318 }
fa8d436c 319 }
6c8dbf00
OB
320 else
321 {
5129873a
FW
322 top_check ();
323 INTERNAL_SIZE_T nb;
324 checked_request2size (bytes + 1, nb);
325 newmem = _int_realloc (&main_arena, oldp, oldsize, nb);
6dd6a580 326 }
bfc832cc 327
8e57c943
L
328 DIAG_PUSH_NEEDS_COMMENT;
329#if __GNUC_PREREQ (7, 0)
330 /* GCC 7 warns about magic_p may be used uninitialized. But we never
331 reach here if magic_p is uninitialized. */
332 DIAG_IGNORE_NEEDS_COMMENT (7, "-Wmaybe-uninitialized");
333#endif
bfc832cc
UD
334 /* mem2chunk_check changed the magic byte in the old chunk.
335 If newmem is NULL, then the old chunk will still be used though,
336 so we need to invert that change here. */
6c8dbf00
OB
337 if (newmem == NULL)
338 *magic_p ^= 0xFF;
8e57c943 339 DIAG_POP_NEEDS_COMMENT;
bfc832cc 340
4bf5f222 341 __libc_lock_unlock (main_arena.mutex);
fa8d436c 342
6c8dbf00 343 return mem2mem_check (newmem, bytes);
fa8d436c
UD
344}
345
6c8dbf00
OB
346static void *
347memalign_check (size_t alignment, size_t bytes, const void *caller)
fa8d436c 348{
6c8dbf00
OB
349 void *mem;
350
351 if (alignment <= MALLOC_ALIGNMENT)
352 return malloc_check (bytes, NULL);
fa8d436c 353
6c8dbf00
OB
354 if (alignment < MINSIZE)
355 alignment = MINSIZE;
fa8d436c 356
a56ee40b
WN
357 /* If the alignment is greater than SIZE_MAX / 2 + 1 it cannot be a
358 power of 2 and will cause overflow in the check below. */
359 if (alignment > SIZE_MAX / 2 + 1)
360 {
361 __set_errno (EINVAL);
362 return 0;
363 }
364
321e2684
WN
365 /* Check for overflow. */
366 if (bytes > SIZE_MAX - alignment - MINSIZE)
367 {
368 __set_errno (ENOMEM);
369 return 0;
370 }
371
10ad46bc 372 /* Make sure alignment is power of 2. */
6c8dbf00
OB
373 if (!powerof2 (alignment))
374 {
375 size_t a = MALLOC_ALIGNMENT * 2;
376 while (a < alignment)
377 a <<= 1;
378 alignment = a;
379 }
380
4bf5f222 381 __libc_lock_lock (main_arena.mutex);
5129873a
FW
382 top_check ();
383 mem = _int_memalign (&main_arena, alignment, bytes + 1);
4bf5f222 384 __libc_lock_unlock (main_arena.mutex);
6c8dbf00 385 return mem2mem_check (mem, bytes);
fa8d436c
UD
386}
387
e863cce5 388#if SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_25)
fa8d436c 389
524d796d
FW
390/* Support for restoring dumped heaps contained in historic Emacs
391 executables. The heap saving feature (malloc_get_state) is no
392 longer implemented in this version of glibc, but we have a heap
393 rewriter in malloc_set_state which transforms the heap into a
394 version compatible with current malloc. */
fa8d436c
UD
395
396#define MALLOC_STATE_MAGIC 0x444c4541l
dea39b13 397#define MALLOC_STATE_VERSION (0 * 0x100l + 5l) /* major*0x100 + minor */
6c8dbf00
OB
398
399struct malloc_save_state
400{
401 long magic;
402 long version;
403 mbinptr av[NBINS * 2 + 2];
404 char *sbrk_base;
405 int sbrked_mem_bytes;
fa8d436c
UD
406 unsigned long trim_threshold;
407 unsigned long top_pad;
6c8dbf00 408 unsigned int n_mmaps_max;
fa8d436c 409 unsigned long mmap_threshold;
6c8dbf00 410 int check_action;
fa8d436c 411 unsigned long max_sbrked_mem;
ca135f82 412 unsigned long max_total_mem; /* Always 0, for backwards compatibility. */
6c8dbf00
OB
413 unsigned int n_mmaps;
414 unsigned int max_n_mmaps;
fa8d436c
UD
415 unsigned long mmapped_mem;
416 unsigned long max_mmapped_mem;
6c8dbf00 417 int using_malloc_checking;
4c8b8cc3
UD
418 unsigned long max_fast;
419 unsigned long arena_test;
420 unsigned long arena_max;
421 unsigned long narenas;
fa8d436c
UD
422};
423
e863cce5
FW
424/* Dummy implementation which always fails. We need to provide this
425 symbol so that existing Emacs binaries continue to work with
426 BIND_NOW. */
6c8dbf00 427void *
e863cce5
FW
428attribute_compat_text_section
429malloc_get_state (void)
fa8d436c 430{
e863cce5
FW
431 __set_errno (ENOSYS);
432 return NULL;
fa8d436c 433}
e863cce5 434compat_symbol (libc, malloc_get_state, malloc_get_state, GLIBC_2_0);
fa8d436c
UD
435
436int
e863cce5
FW
437attribute_compat_text_section
438malloc_set_state (void *msptr)
fa8d436c 439{
6c8dbf00 440 struct malloc_save_state *ms = (struct malloc_save_state *) msptr;
fa8d436c 441
6c8dbf00
OB
442 if (ms->magic != MALLOC_STATE_MAGIC)
443 return -1;
444
fa8d436c 445 /* Must fail if the major version is too high. */
6c8dbf00
OB
446 if ((ms->version & ~0xffl) > (MALLOC_STATE_VERSION & ~0xffl))
447 return -2;
448
524d796d 449 /* We do not need to perform locking here because malloc_set_state
4cf6c72f
FW
450 must be called before the first call into the malloc subsytem
451 (usually via __malloc_initialize_hook). pthread_create always
452 calls calloc and thus must be called only afterwards, so there
453 cannot be more than one thread when we reach this point. */
454
455 /* Disable the malloc hooks (and malloc checking). */
456 __malloc_hook = NULL;
457 __realloc_hook = NULL;
458 __free_hook = NULL;
459 __memalign_hook = NULL;
460 using_malloc_checking = 0;
461
462 /* Patch the dumped heap. We no longer try to integrate into the
463 existing heap. Instead, we mark the existing chunks as mmapped.
464 Together with the update to dumped_main_arena_start and
465 dumped_main_arena_end, realloc and free will recognize these
466 chunks as dumped fake mmapped chunks and never free them. */
467
468 /* Find the chunk with the lowest address with the heap. */
469 mchunkptr chunk = NULL;
470 {
471 size_t *candidate = (size_t *) ms->sbrk_base;
472 size_t *end = (size_t *) (ms->sbrk_base + ms->sbrked_mem_bytes);
473 while (candidate < end)
474 if (*candidate != 0)
475 {
476 chunk = mem2chunk ((void *) (candidate + 1));
477 break;
478 }
6c8dbf00 479 else
4cf6c72f
FW
480 ++candidate;
481 }
482 if (chunk == NULL)
483 return 0;
484
485 /* Iterate over the dumped heap and patch the chunks so that they
486 are treated as fake mmapped chunks. */
487 mchunkptr top = ms->av[2];
488 while (chunk < top)
6c8dbf00 489 {
4cf6c72f
FW
490 if (inuse (chunk))
491 {
492 /* Mark chunk as mmapped, to trigger the fallback path. */
493 size_t size = chunksize (chunk);
494 set_head (chunk, size | IS_MMAPPED);
495 }
496 chunk = next_chunk (chunk);
6c8dbf00 497 }
6c8dbf00 498
4cf6c72f
FW
499 /* The dumped fake mmapped chunks all lie in this address range. */
500 dumped_main_arena_start = (mchunkptr) ms->sbrk_base;
501 dumped_main_arena_end = top;
502
fa8d436c
UD
503 return 0;
504}
e863cce5
FW
505compat_symbol (libc, malloc_set_state, malloc_set_state, GLIBC_2_0);
506
507#endif /* SHLIB_COMPAT */
fa8d436c
UD
508
509/*
510 * Local variables:
511 * c-basic-offset: 2
512 * End:
513 */