]> git.ipfire.org Git - thirdparty/glibc.git/blame - malloc/hooks.c
malloc/tst-mallocfork2: Use process-shared barriers
[thirdparty/glibc.git] / malloc / hooks.c
CommitLineData
fa8d436c 1/* Malloc implementation for multiple threads without lock contention.
04277e02 2 Copyright (C) 2001-2019 Free Software Foundation, Inc.
fa8d436c
UD
3 This file is part of the GNU C Library.
4 Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
5
6 The GNU C Library is free software; you can redistribute it and/or
cc7375ce
RM
7 modify it under the terms of the GNU Lesser General Public License as
8 published by the Free Software Foundation; either version 2.1 of the
fa8d436c
UD
9 License, or (at your option) any later version.
10
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
cc7375ce 14 Lesser General Public License for more details.
fa8d436c 15
cc7375ce 16 You should have received a copy of the GNU Lesser General Public
59ba27a6
PE
17 License along with the GNU C Library; see the file COPYING.LIB. If
18 not, see <http://www.gnu.org/licenses/>. */
fa8d436c 19
fa8d436c
UD
20/* What to do if the standard debugging hooks are in place and a
21 corrupt pointer is detected: do nothing (0), print an error message
22 (1), or call abort() (2). */
23
24/* Hooks for debugging versions. The initial hooks just call the
25 initialization routine, then do the normal work. */
26
6c8dbf00
OB
27static void *
28malloc_hook_ini (size_t sz, const void *caller)
fa8d436c
UD
29{
30 __malloc_hook = NULL;
6c8dbf00
OB
31 ptmalloc_init ();
32 return __libc_malloc (sz);
fa8d436c
UD
33}
34
6c8dbf00
OB
35static void *
36realloc_hook_ini (void *ptr, size_t sz, const void *caller)
fa8d436c
UD
37{
38 __malloc_hook = NULL;
39 __realloc_hook = NULL;
6c8dbf00
OB
40 ptmalloc_init ();
41 return __libc_realloc (ptr, sz);
fa8d436c
UD
42}
43
6c8dbf00
OB
44static void *
45memalign_hook_ini (size_t alignment, size_t sz, const void *caller)
fa8d436c
UD
46{
47 __memalign_hook = NULL;
6c8dbf00
OB
48 ptmalloc_init ();
49 return __libc_memalign (alignment, sz);
fa8d436c
UD
50}
51
fa8d436c
UD
52/* Whether we are using malloc checking. */
53static int using_malloc_checking;
54
fa8d436c
UD
55/* Activate a standard set of debugging hooks. */
56void
60d2f8f3 57__malloc_check_init (void)
fa8d436c 58{
fa8d436c
UD
59 using_malloc_checking = 1;
60 __malloc_hook = malloc_check;
61 __free_hook = free_check;
62 __realloc_hook = realloc_check;
63 __memalign_hook = memalign_check;
fa8d436c
UD
64}
65
66/* A simple, standard set of debugging hooks. Overhead is `only' one
67 byte per chunk; still this will catch most cases of double frees or
68 overruns. The goal here is to avoid obscure crashes due to invalid
69 usage, unlike in the MALLOC_DEBUG code. */
70
265cbed8
JL
71static unsigned char
72magicbyte (const void *p)
73{
74 unsigned char magic;
75
76 magic = (((uintptr_t) p >> 3) ^ ((uintptr_t) p >> 11)) & 0xFF;
77 /* Do not return 1. See the comment in mem2mem_check(). */
78 if (magic == 1)
79 ++magic;
80 return magic;
81}
82
fa8d436c 83
265cbed8
JL
84/* Visualize the chunk as being partitioned into blocks of 255 bytes from the
85 highest address of the chunk, downwards. The end of each block tells
86 us the size of that block, up to the actual size of the requested
9173840b
JL
87 memory. Our magic byte is right at the end of the requested size, so we
88 must reach it with this iteration, otherwise we have witnessed a memory
89 corruption. */
6ef9cc37 90static size_t
6c8dbf00 91malloc_check_get_size (mchunkptr p)
6ef9cc37 92{
9173840b 93 size_t size;
6ef9cc37 94 unsigned char c;
265cbed8 95 unsigned char magic = magicbyte (p);
6ef9cc37 96
6c8dbf00 97 assert (using_malloc_checking == 1);
6ef9cc37 98
9173840b
JL
99 for (size = chunksize (p) - 1 + (chunk_is_mmapped (p) ? 0 : SIZE_SZ);
100 (c = ((unsigned char *) p)[size]) != magic;
6c8dbf00
OB
101 size -= c)
102 {
9173840b 103 if (c <= 0 || size < (c + 2 * SIZE_SZ))
ac3ed168 104 malloc_printerr ("malloc_check_get_size: memory corruption");
6ef9cc37 105 }
6ef9cc37
SP
106
107 /* chunk2mem size. */
6c8dbf00 108 return size - 2 * SIZE_SZ;
6ef9cc37
SP
109}
110
fa8d436c 111/* Instrument a chunk with overrun detector byte(s) and convert it
265cbed8 112 into a user pointer with requested size req_sz. */
fa8d436c 113
6c8dbf00 114static void *
265cbed8 115mem2mem_check (void *ptr, size_t req_sz)
fa8d436c
UD
116{
117 mchunkptr p;
6c8dbf00 118 unsigned char *m_ptr = ptr;
265cbed8
JL
119 size_t max_sz, block_sz, i;
120 unsigned char magic;
fa8d436c
UD
121
122 if (!ptr)
123 return ptr;
6c8dbf00
OB
124
125 p = mem2chunk (ptr);
265cbed8
JL
126 magic = magicbyte (p);
127 max_sz = chunksize (p) - 2 * SIZE_SZ;
128 if (!chunk_is_mmapped (p))
129 max_sz += SIZE_SZ;
130 for (i = max_sz - 1; i > req_sz; i -= block_sz)
6c8dbf00 131 {
265cbed8
JL
132 block_sz = MIN (i - req_sz, 0xff);
133 /* Don't allow the magic byte to appear in the chain of length bytes.
134 For the following to work, magicbyte cannot return 0x01. */
135 if (block_sz == magic)
136 --block_sz;
137
138 m_ptr[i] = block_sz;
fa8d436c 139 }
265cbed8 140 m_ptr[req_sz] = magic;
6c8dbf00 141 return (void *) m_ptr;
fa8d436c
UD
142}
143
144/* Convert a pointer to be free()d or realloc()ed to a valid chunk
145 pointer. If the provided pointer is not valid, return NULL. */
146
147static mchunkptr
6c8dbf00 148mem2chunk_check (void *mem, unsigned char **magic_p)
fa8d436c
UD
149{
150 mchunkptr p;
151 INTERNAL_SIZE_T sz, c;
152 unsigned char magic;
153
6c8dbf00
OB
154 if (!aligned_OK (mem))
155 return NULL;
156
157 p = mem2chunk (mem);
265cbed8
JL
158 sz = chunksize (p);
159 magic = magicbyte (p);
6c8dbf00
OB
160 if (!chunk_is_mmapped (p))
161 {
162 /* Must be a chunk in conventional heap memory. */
163 int contig = contiguous (&main_arena);
6c8dbf00
OB
164 if ((contig &&
165 ((char *) p < mp_.sbrk_base ||
166 ((char *) p + sz) >= (mp_.sbrk_base + main_arena.system_mem))) ||
167 sz < MINSIZE || sz & MALLOC_ALIGN_MASK || !inuse (p) ||
e9c4fe93 168 (!prev_inuse (p) && ((prev_size (p) & MALLOC_ALIGN_MASK) != 0 ||
6c8dbf00
OB
169 (contig && (char *) prev_chunk (p) < mp_.sbrk_base) ||
170 next_chunk (prev_chunk (p)) != p)))
171 return NULL;
172
9173840b 173 for (sz += SIZE_SZ - 1; (c = ((unsigned char *) p)[sz]) != magic; sz -= c)
6c8dbf00 174 {
265cbed8 175 if (c == 0 || sz < (c + 2 * SIZE_SZ))
9173840b 176 return NULL;
6c8dbf00 177 }
fa8d436c 178 }
6c8dbf00
OB
179 else
180 {
181 unsigned long offset, page_mask = GLRO (dl_pagesize) - 1;
182
183 /* mmap()ed chunks have MALLOC_ALIGNMENT or higher power-of-two
184 alignment relative to the beginning of a page. Check this
185 first. */
186 offset = (unsigned long) mem & page_mask;
187 if ((offset != MALLOC_ALIGNMENT && offset != 0 && offset != 0x10 &&
188 offset != 0x20 && offset != 0x40 && offset != 0x80 && offset != 0x100 &&
189 offset != 0x200 && offset != 0x400 && offset != 0x800 && offset != 0x1000 &&
190 offset < 0x2000) ||
e9c4fe93
FW
191 !chunk_is_mmapped (p) || prev_inuse (p) ||
192 ((((unsigned long) p - prev_size (p)) & page_mask) != 0) ||
193 ((prev_size (p) + sz) & page_mask) != 0)
6c8dbf00
OB
194 return NULL;
195
9173840b 196 for (sz -= 1; (c = ((unsigned char *) p)[sz]) != magic; sz -= c)
6c8dbf00 197 {
265cbed8 198 if (c == 0 || sz < (c + 2 * SIZE_SZ))
9173840b 199 return NULL;
6c8dbf00 200 }
fa8d436c 201 }
6c8dbf00 202 ((unsigned char *) p)[sz] ^= 0xFF;
bfc832cc 203 if (magic_p)
6c8dbf00 204 *magic_p = (unsigned char *) p + sz;
fa8d436c
UD
205 return p;
206}
207
ac3ed168 208/* Check for corruption of the top chunk. */
5129873a 209static void
6c8dbf00 210top_check (void)
fa8d436c 211{
6c8dbf00 212 mchunkptr t = top (&main_arena);
6c8dbf00
OB
213
214 if (t == initial_top (&main_arena) ||
215 (!chunk_is_mmapped (t) &&
216 chunksize (t) >= MINSIZE &&
217 prev_inuse (t) &&
218 (!contiguous (&main_arena) ||
219 (char *) t + chunksize (t) == mp_.sbrk_base + main_arena.system_mem)))
5129873a 220 return;
fa8d436c 221
ac3ed168 222 malloc_printerr ("malloc: top chunk is corrupt");
fa8d436c
UD
223}
224
6c8dbf00
OB
225static void *
226malloc_check (size_t sz, const void *caller)
fa8d436c 227{
22a89187 228 void *victim;
9bf8e29c 229 size_t nb;
fa8d436c 230
9bf8e29c 231 if (__builtin_add_overflow (sz, 1, &nb))
6c8dbf00
OB
232 {
233 __set_errno (ENOMEM);
234 return NULL;
235 }
bfc832cc 236
4bf5f222 237 __libc_lock_lock (main_arena.mutex);
5129873a 238 top_check ();
9bf8e29c 239 victim = _int_malloc (&main_arena, nb);
4bf5f222 240 __libc_lock_unlock (main_arena.mutex);
6c8dbf00 241 return mem2mem_check (victim, sz);
fa8d436c
UD
242}
243
244static void
6c8dbf00 245free_check (void *mem, const void *caller)
fa8d436c
UD
246{
247 mchunkptr p;
248
6c8dbf00 249 if (!mem)
fa8d436c 250 return;
6c8dbf00 251
4bf5f222 252 __libc_lock_lock (main_arena.mutex);
6c8dbf00
OB
253 p = mem2chunk_check (mem, NULL);
254 if (!p)
ac3ed168 255 malloc_printerr ("free(): invalid pointer");
6c8dbf00
OB
256 if (chunk_is_mmapped (p))
257 {
4bf5f222 258 __libc_lock_unlock (main_arena.mutex);
6c8dbf00
OB
259 munmap_chunk (p);
260 return;
261 }
262 _int_free (&main_arena, p, 1);
4bf5f222 263 __libc_lock_unlock (main_arena.mutex);
fa8d436c
UD
264}
265
6c8dbf00
OB
266static void *
267realloc_check (void *oldmem, size_t bytes, const void *caller)
fa8d436c 268{
78ac92ad 269 INTERNAL_SIZE_T nb;
6c8dbf00 270 void *newmem = 0;
bfc832cc 271 unsigned char *magic_p;
9bf8e29c 272 size_t rb;
fa8d436c 273
9bf8e29c 274 if (__builtin_add_overflow (bytes, 1, &rb))
6c8dbf00
OB
275 {
276 __set_errno (ENOMEM);
277 return NULL;
278 }
279 if (oldmem == 0)
280 return malloc_check (bytes, NULL);
281
282 if (bytes == 0)
283 {
284 free_check (oldmem, NULL);
285 return NULL;
286 }
4bf5f222 287 __libc_lock_lock (main_arena.mutex);
6c8dbf00 288 const mchunkptr oldp = mem2chunk_check (oldmem, &magic_p);
4bf5f222 289 __libc_lock_unlock (main_arena.mutex);
6c8dbf00 290 if (!oldp)
ac3ed168 291 malloc_printerr ("realloc(): invalid pointer");
6c8dbf00
OB
292 const INTERNAL_SIZE_T oldsize = chunksize (oldp);
293
9bf8e29c
AZ
294 if (!checked_request2size (rb, &nb))
295 goto invert;
296
4bf5f222 297 __libc_lock_lock (main_arena.mutex);
6c8dbf00
OB
298
299 if (chunk_is_mmapped (oldp))
300 {
fa8d436c 301#if HAVE_MREMAP
6c8dbf00
OB
302 mchunkptr newp = mremap_chunk (oldp, nb);
303 if (newp)
304 newmem = chunk2mem (newp);
305 else
fa8d436c 306#endif
6c8dbf00
OB
307 {
308 /* Note the extra SIZE_SZ overhead. */
309 if (oldsize - SIZE_SZ >= nb)
310 newmem = oldmem; /* do nothing */
311 else
312 {
313 /* Must alloc, copy, free. */
5129873a 314 top_check ();
9bf8e29c 315 newmem = _int_malloc (&main_arena, rb);
6c8dbf00
OB
316 if (newmem)
317 {
318 memcpy (newmem, oldmem, oldsize - 2 * SIZE_SZ);
319 munmap_chunk (oldp);
320 }
321 }
fa8d436c 322 }
fa8d436c 323 }
6c8dbf00
OB
324 else
325 {
5129873a 326 top_check ();
5129873a 327 newmem = _int_realloc (&main_arena, oldp, oldsize, nb);
6dd6a580 328 }
bfc832cc 329
8e57c943
L
330 DIAG_PUSH_NEEDS_COMMENT;
331#if __GNUC_PREREQ (7, 0)
332 /* GCC 7 warns about magic_p may be used uninitialized. But we never
333 reach here if magic_p is uninitialized. */
334 DIAG_IGNORE_NEEDS_COMMENT (7, "-Wmaybe-uninitialized");
335#endif
bfc832cc
UD
336 /* mem2chunk_check changed the magic byte in the old chunk.
337 If newmem is NULL, then the old chunk will still be used though,
338 so we need to invert that change here. */
9bf8e29c 339invert:
6c8dbf00
OB
340 if (newmem == NULL)
341 *magic_p ^= 0xFF;
8e57c943 342 DIAG_POP_NEEDS_COMMENT;
bfc832cc 343
4bf5f222 344 __libc_lock_unlock (main_arena.mutex);
fa8d436c 345
6c8dbf00 346 return mem2mem_check (newmem, bytes);
fa8d436c
UD
347}
348
6c8dbf00
OB
349static void *
350memalign_check (size_t alignment, size_t bytes, const void *caller)
fa8d436c 351{
6c8dbf00
OB
352 void *mem;
353
354 if (alignment <= MALLOC_ALIGNMENT)
355 return malloc_check (bytes, NULL);
fa8d436c 356
6c8dbf00
OB
357 if (alignment < MINSIZE)
358 alignment = MINSIZE;
fa8d436c 359
a56ee40b
WN
360 /* If the alignment is greater than SIZE_MAX / 2 + 1 it cannot be a
361 power of 2 and will cause overflow in the check below. */
362 if (alignment > SIZE_MAX / 2 + 1)
363 {
364 __set_errno (EINVAL);
365 return 0;
366 }
367
321e2684
WN
368 /* Check for overflow. */
369 if (bytes > SIZE_MAX - alignment - MINSIZE)
370 {
371 __set_errno (ENOMEM);
372 return 0;
373 }
374
10ad46bc 375 /* Make sure alignment is power of 2. */
6c8dbf00
OB
376 if (!powerof2 (alignment))
377 {
378 size_t a = MALLOC_ALIGNMENT * 2;
379 while (a < alignment)
380 a <<= 1;
381 alignment = a;
382 }
383
4bf5f222 384 __libc_lock_lock (main_arena.mutex);
5129873a
FW
385 top_check ();
386 mem = _int_memalign (&main_arena, alignment, bytes + 1);
4bf5f222 387 __libc_lock_unlock (main_arena.mutex);
6c8dbf00 388 return mem2mem_check (mem, bytes);
fa8d436c
UD
389}
390
e863cce5 391#if SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_25)
fa8d436c 392
524d796d
FW
393/* Support for restoring dumped heaps contained in historic Emacs
394 executables. The heap saving feature (malloc_get_state) is no
395 longer implemented in this version of glibc, but we have a heap
396 rewriter in malloc_set_state which transforms the heap into a
397 version compatible with current malloc. */
fa8d436c
UD
398
399#define MALLOC_STATE_MAGIC 0x444c4541l
dea39b13 400#define MALLOC_STATE_VERSION (0 * 0x100l + 5l) /* major*0x100 + minor */
6c8dbf00
OB
401
402struct malloc_save_state
403{
404 long magic;
405 long version;
406 mbinptr av[NBINS * 2 + 2];
407 char *sbrk_base;
408 int sbrked_mem_bytes;
fa8d436c
UD
409 unsigned long trim_threshold;
410 unsigned long top_pad;
6c8dbf00 411 unsigned int n_mmaps_max;
fa8d436c 412 unsigned long mmap_threshold;
6c8dbf00 413 int check_action;
fa8d436c 414 unsigned long max_sbrked_mem;
ca135f82 415 unsigned long max_total_mem; /* Always 0, for backwards compatibility. */
6c8dbf00
OB
416 unsigned int n_mmaps;
417 unsigned int max_n_mmaps;
fa8d436c
UD
418 unsigned long mmapped_mem;
419 unsigned long max_mmapped_mem;
6c8dbf00 420 int using_malloc_checking;
4c8b8cc3
UD
421 unsigned long max_fast;
422 unsigned long arena_test;
423 unsigned long arena_max;
424 unsigned long narenas;
fa8d436c
UD
425};
426
e863cce5
FW
427/* Dummy implementation which always fails. We need to provide this
428 symbol so that existing Emacs binaries continue to work with
429 BIND_NOW. */
6c8dbf00 430void *
e863cce5
FW
431attribute_compat_text_section
432malloc_get_state (void)
fa8d436c 433{
e863cce5
FW
434 __set_errno (ENOSYS);
435 return NULL;
fa8d436c 436}
e863cce5 437compat_symbol (libc, malloc_get_state, malloc_get_state, GLIBC_2_0);
fa8d436c
UD
438
439int
e863cce5
FW
440attribute_compat_text_section
441malloc_set_state (void *msptr)
fa8d436c 442{
6c8dbf00 443 struct malloc_save_state *ms = (struct malloc_save_state *) msptr;
fa8d436c 444
6c8dbf00
OB
445 if (ms->magic != MALLOC_STATE_MAGIC)
446 return -1;
447
fa8d436c 448 /* Must fail if the major version is too high. */
6c8dbf00
OB
449 if ((ms->version & ~0xffl) > (MALLOC_STATE_VERSION & ~0xffl))
450 return -2;
451
524d796d 452 /* We do not need to perform locking here because malloc_set_state
4cf6c72f
FW
453 must be called before the first call into the malloc subsytem
454 (usually via __malloc_initialize_hook). pthread_create always
455 calls calloc and thus must be called only afterwards, so there
456 cannot be more than one thread when we reach this point. */
457
458 /* Disable the malloc hooks (and malloc checking). */
459 __malloc_hook = NULL;
460 __realloc_hook = NULL;
461 __free_hook = NULL;
462 __memalign_hook = NULL;
463 using_malloc_checking = 0;
464
465 /* Patch the dumped heap. We no longer try to integrate into the
466 existing heap. Instead, we mark the existing chunks as mmapped.
467 Together with the update to dumped_main_arena_start and
468 dumped_main_arena_end, realloc and free will recognize these
469 chunks as dumped fake mmapped chunks and never free them. */
470
471 /* Find the chunk with the lowest address with the heap. */
472 mchunkptr chunk = NULL;
473 {
474 size_t *candidate = (size_t *) ms->sbrk_base;
475 size_t *end = (size_t *) (ms->sbrk_base + ms->sbrked_mem_bytes);
476 while (candidate < end)
477 if (*candidate != 0)
478 {
479 chunk = mem2chunk ((void *) (candidate + 1));
480 break;
481 }
6c8dbf00 482 else
4cf6c72f
FW
483 ++candidate;
484 }
485 if (chunk == NULL)
486 return 0;
487
488 /* Iterate over the dumped heap and patch the chunks so that they
489 are treated as fake mmapped chunks. */
490 mchunkptr top = ms->av[2];
491 while (chunk < top)
6c8dbf00 492 {
4cf6c72f
FW
493 if (inuse (chunk))
494 {
495 /* Mark chunk as mmapped, to trigger the fallback path. */
496 size_t size = chunksize (chunk);
497 set_head (chunk, size | IS_MMAPPED);
498 }
499 chunk = next_chunk (chunk);
6c8dbf00 500 }
6c8dbf00 501
4cf6c72f
FW
502 /* The dumped fake mmapped chunks all lie in this address range. */
503 dumped_main_arena_start = (mchunkptr) ms->sbrk_base;
504 dumped_main_arena_end = top;
505
fa8d436c
UD
506 return 0;
507}
e863cce5
FW
508compat_symbol (libc, malloc_set_state, malloc_set_state, GLIBC_2_0);
509
510#endif /* SHLIB_COMPAT */
fa8d436c
UD
511
512/*
513 * Local variables:
514 * c-basic-offset: 2
515 * End:
516 */