]> git.ipfire.org Git - thirdparty/glibc.git/blame - malloc/hooks.c
Increase some test timeouts.
[thirdparty/glibc.git] / malloc / hooks.c
CommitLineData
fa8d436c 1/* Malloc implementation for multiple threads without lock contention.
688903eb 2 Copyright (C) 2001-2018 Free Software Foundation, Inc.
fa8d436c
UD
3 This file is part of the GNU C Library.
4 Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
5
6 The GNU C Library is free software; you can redistribute it and/or
cc7375ce
RM
7 modify it under the terms of the GNU Lesser General Public License as
8 published by the Free Software Foundation; either version 2.1 of the
fa8d436c
UD
9 License, or (at your option) any later version.
10
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
cc7375ce 14 Lesser General Public License for more details.
fa8d436c 15
cc7375ce 16 You should have received a copy of the GNU Lesser General Public
59ba27a6
PE
17 License along with the GNU C Library; see the file COPYING.LIB. If
18 not, see <http://www.gnu.org/licenses/>. */
fa8d436c 19
fa8d436c
UD
20/* What to do if the standard debugging hooks are in place and a
21 corrupt pointer is detected: do nothing (0), print an error message
22 (1), or call abort() (2). */
23
24/* Hooks for debugging versions. The initial hooks just call the
25 initialization routine, then do the normal work. */
26
6c8dbf00
OB
27static void *
28malloc_hook_ini (size_t sz, const void *caller)
fa8d436c
UD
29{
30 __malloc_hook = NULL;
6c8dbf00
OB
31 ptmalloc_init ();
32 return __libc_malloc (sz);
fa8d436c
UD
33}
34
6c8dbf00
OB
35static void *
36realloc_hook_ini (void *ptr, size_t sz, const void *caller)
fa8d436c
UD
37{
38 __malloc_hook = NULL;
39 __realloc_hook = NULL;
6c8dbf00
OB
40 ptmalloc_init ();
41 return __libc_realloc (ptr, sz);
fa8d436c
UD
42}
43
6c8dbf00
OB
44static void *
45memalign_hook_ini (size_t alignment, size_t sz, const void *caller)
fa8d436c
UD
46{
47 __memalign_hook = NULL;
6c8dbf00
OB
48 ptmalloc_init ();
49 return __libc_memalign (alignment, sz);
fa8d436c
UD
50}
51
fa8d436c
UD
52/* Whether we are using malloc checking. */
53static int using_malloc_checking;
54
55/* A flag that is set by malloc_set_state, to signal that malloc checking
56 must not be enabled on the request from the user (via the MALLOC_CHECK_
57 environment variable). It is reset by __malloc_check_init to tell
58 malloc_set_state that the user has requested malloc checking.
59
60 The purpose of this flag is to make sure that malloc checking is not
61 enabled when the heap to be restored was constructed without malloc
62 checking, and thus does not contain the required magic bytes.
63 Otherwise the heap would be corrupted by calls to free and realloc. If
64 it turns out that the heap was created with malloc checking and the
65 user has requested it malloc_set_state just calls __malloc_check_init
66 again to enable it. On the other hand, reusing such a heap without
67 further malloc checking is safe. */
68static int disallow_malloc_check;
69
70/* Activate a standard set of debugging hooks. */
71void
60d2f8f3 72__malloc_check_init (void)
fa8d436c 73{
6c8dbf00
OB
74 if (disallow_malloc_check)
75 {
76 disallow_malloc_check = 0;
77 return;
78 }
fa8d436c
UD
79 using_malloc_checking = 1;
80 __malloc_hook = malloc_check;
81 __free_hook = free_check;
82 __realloc_hook = realloc_check;
83 __memalign_hook = memalign_check;
fa8d436c
UD
84}
85
86/* A simple, standard set of debugging hooks. Overhead is `only' one
87 byte per chunk; still this will catch most cases of double frees or
88 overruns. The goal here is to avoid obscure crashes due to invalid
89 usage, unlike in the MALLOC_DEBUG code. */
90
265cbed8
JL
91static unsigned char
92magicbyte (const void *p)
93{
94 unsigned char magic;
95
96 magic = (((uintptr_t) p >> 3) ^ ((uintptr_t) p >> 11)) & 0xFF;
97 /* Do not return 1. See the comment in mem2mem_check(). */
98 if (magic == 1)
99 ++magic;
100 return magic;
101}
102
fa8d436c 103
265cbed8
JL
104/* Visualize the chunk as being partitioned into blocks of 255 bytes from the
105 highest address of the chunk, downwards. The end of each block tells
106 us the size of that block, up to the actual size of the requested
9173840b
JL
107 memory. Our magic byte is right at the end of the requested size, so we
108 must reach it with this iteration, otherwise we have witnessed a memory
109 corruption. */
6ef9cc37 110static size_t
6c8dbf00 111malloc_check_get_size (mchunkptr p)
6ef9cc37 112{
9173840b 113 size_t size;
6ef9cc37 114 unsigned char c;
265cbed8 115 unsigned char magic = magicbyte (p);
6ef9cc37 116
6c8dbf00 117 assert (using_malloc_checking == 1);
6ef9cc37 118
9173840b
JL
119 for (size = chunksize (p) - 1 + (chunk_is_mmapped (p) ? 0 : SIZE_SZ);
120 (c = ((unsigned char *) p)[size]) != magic;
6c8dbf00
OB
121 size -= c)
122 {
9173840b 123 if (c <= 0 || size < (c + 2 * SIZE_SZ))
ac3ed168 124 malloc_printerr ("malloc_check_get_size: memory corruption");
6ef9cc37 125 }
6ef9cc37
SP
126
127 /* chunk2mem size. */
6c8dbf00 128 return size - 2 * SIZE_SZ;
6ef9cc37
SP
129}
130
fa8d436c 131/* Instrument a chunk with overrun detector byte(s) and convert it
265cbed8 132 into a user pointer with requested size req_sz. */
fa8d436c 133
6c8dbf00 134static void *
265cbed8 135mem2mem_check (void *ptr, size_t req_sz)
fa8d436c
UD
136{
137 mchunkptr p;
6c8dbf00 138 unsigned char *m_ptr = ptr;
265cbed8
JL
139 size_t max_sz, block_sz, i;
140 unsigned char magic;
fa8d436c
UD
141
142 if (!ptr)
143 return ptr;
6c8dbf00
OB
144
145 p = mem2chunk (ptr);
265cbed8
JL
146 magic = magicbyte (p);
147 max_sz = chunksize (p) - 2 * SIZE_SZ;
148 if (!chunk_is_mmapped (p))
149 max_sz += SIZE_SZ;
150 for (i = max_sz - 1; i > req_sz; i -= block_sz)
6c8dbf00 151 {
265cbed8
JL
152 block_sz = MIN (i - req_sz, 0xff);
153 /* Don't allow the magic byte to appear in the chain of length bytes.
154 For the following to work, magicbyte cannot return 0x01. */
155 if (block_sz == magic)
156 --block_sz;
157
158 m_ptr[i] = block_sz;
fa8d436c 159 }
265cbed8 160 m_ptr[req_sz] = magic;
6c8dbf00 161 return (void *) m_ptr;
fa8d436c
UD
162}
163
164/* Convert a pointer to be free()d or realloc()ed to a valid chunk
165 pointer. If the provided pointer is not valid, return NULL. */
166
167static mchunkptr
6c8dbf00 168mem2chunk_check (void *mem, unsigned char **magic_p)
fa8d436c
UD
169{
170 mchunkptr p;
171 INTERNAL_SIZE_T sz, c;
172 unsigned char magic;
173
6c8dbf00
OB
174 if (!aligned_OK (mem))
175 return NULL;
176
177 p = mem2chunk (mem);
265cbed8
JL
178 sz = chunksize (p);
179 magic = magicbyte (p);
6c8dbf00
OB
180 if (!chunk_is_mmapped (p))
181 {
182 /* Must be a chunk in conventional heap memory. */
183 int contig = contiguous (&main_arena);
6c8dbf00
OB
184 if ((contig &&
185 ((char *) p < mp_.sbrk_base ||
186 ((char *) p + sz) >= (mp_.sbrk_base + main_arena.system_mem))) ||
187 sz < MINSIZE || sz & MALLOC_ALIGN_MASK || !inuse (p) ||
e9c4fe93 188 (!prev_inuse (p) && ((prev_size (p) & MALLOC_ALIGN_MASK) != 0 ||
6c8dbf00
OB
189 (contig && (char *) prev_chunk (p) < mp_.sbrk_base) ||
190 next_chunk (prev_chunk (p)) != p)))
191 return NULL;
192
9173840b 193 for (sz += SIZE_SZ - 1; (c = ((unsigned char *) p)[sz]) != magic; sz -= c)
6c8dbf00 194 {
265cbed8 195 if (c == 0 || sz < (c + 2 * SIZE_SZ))
9173840b 196 return NULL;
6c8dbf00 197 }
fa8d436c 198 }
6c8dbf00
OB
199 else
200 {
201 unsigned long offset, page_mask = GLRO (dl_pagesize) - 1;
202
203 /* mmap()ed chunks have MALLOC_ALIGNMENT or higher power-of-two
204 alignment relative to the beginning of a page. Check this
205 first. */
206 offset = (unsigned long) mem & page_mask;
207 if ((offset != MALLOC_ALIGNMENT && offset != 0 && offset != 0x10 &&
208 offset != 0x20 && offset != 0x40 && offset != 0x80 && offset != 0x100 &&
209 offset != 0x200 && offset != 0x400 && offset != 0x800 && offset != 0x1000 &&
210 offset < 0x2000) ||
e9c4fe93
FW
211 !chunk_is_mmapped (p) || prev_inuse (p) ||
212 ((((unsigned long) p - prev_size (p)) & page_mask) != 0) ||
213 ((prev_size (p) + sz) & page_mask) != 0)
6c8dbf00
OB
214 return NULL;
215
9173840b 216 for (sz -= 1; (c = ((unsigned char *) p)[sz]) != magic; sz -= c)
6c8dbf00 217 {
265cbed8 218 if (c == 0 || sz < (c + 2 * SIZE_SZ))
9173840b 219 return NULL;
6c8dbf00 220 }
fa8d436c 221 }
6c8dbf00 222 ((unsigned char *) p)[sz] ^= 0xFF;
bfc832cc 223 if (magic_p)
6c8dbf00 224 *magic_p = (unsigned char *) p + sz;
fa8d436c
UD
225 return p;
226}
227
ac3ed168 228/* Check for corruption of the top chunk. */
5129873a 229static void
6c8dbf00 230top_check (void)
fa8d436c 231{
6c8dbf00 232 mchunkptr t = top (&main_arena);
6c8dbf00
OB
233
234 if (t == initial_top (&main_arena) ||
235 (!chunk_is_mmapped (t) &&
236 chunksize (t) >= MINSIZE &&
237 prev_inuse (t) &&
238 (!contiguous (&main_arena) ||
239 (char *) t + chunksize (t) == mp_.sbrk_base + main_arena.system_mem)))
5129873a 240 return;
fa8d436c 241
ac3ed168 242 malloc_printerr ("malloc: top chunk is corrupt");
fa8d436c
UD
243}
244
6c8dbf00
OB
245static void *
246malloc_check (size_t sz, const void *caller)
fa8d436c 247{
22a89187 248 void *victim;
fa8d436c 249
6c8dbf00
OB
250 if (sz + 1 == 0)
251 {
252 __set_errno (ENOMEM);
253 return NULL;
254 }
bfc832cc 255
4bf5f222 256 __libc_lock_lock (main_arena.mutex);
5129873a
FW
257 top_check ();
258 victim = _int_malloc (&main_arena, sz + 1);
4bf5f222 259 __libc_lock_unlock (main_arena.mutex);
6c8dbf00 260 return mem2mem_check (victim, sz);
fa8d436c
UD
261}
262
263static void
6c8dbf00 264free_check (void *mem, const void *caller)
fa8d436c
UD
265{
266 mchunkptr p;
267
6c8dbf00 268 if (!mem)
fa8d436c 269 return;
6c8dbf00 270
4bf5f222 271 __libc_lock_lock (main_arena.mutex);
6c8dbf00
OB
272 p = mem2chunk_check (mem, NULL);
273 if (!p)
ac3ed168 274 malloc_printerr ("free(): invalid pointer");
6c8dbf00
OB
275 if (chunk_is_mmapped (p))
276 {
4bf5f222 277 __libc_lock_unlock (main_arena.mutex);
6c8dbf00
OB
278 munmap_chunk (p);
279 return;
280 }
281 _int_free (&main_arena, p, 1);
4bf5f222 282 __libc_lock_unlock (main_arena.mutex);
fa8d436c
UD
283}
284
6c8dbf00
OB
285static void *
286realloc_check (void *oldmem, size_t bytes, const void *caller)
fa8d436c 287{
78ac92ad 288 INTERNAL_SIZE_T nb;
6c8dbf00 289 void *newmem = 0;
bfc832cc 290 unsigned char *magic_p;
fa8d436c 291
6c8dbf00
OB
292 if (bytes + 1 == 0)
293 {
294 __set_errno (ENOMEM);
295 return NULL;
296 }
297 if (oldmem == 0)
298 return malloc_check (bytes, NULL);
299
300 if (bytes == 0)
301 {
302 free_check (oldmem, NULL);
303 return NULL;
304 }
4bf5f222 305 __libc_lock_lock (main_arena.mutex);
6c8dbf00 306 const mchunkptr oldp = mem2chunk_check (oldmem, &magic_p);
4bf5f222 307 __libc_lock_unlock (main_arena.mutex);
6c8dbf00 308 if (!oldp)
ac3ed168 309 malloc_printerr ("realloc(): invalid pointer");
6c8dbf00
OB
310 const INTERNAL_SIZE_T oldsize = chunksize (oldp);
311
312 checked_request2size (bytes + 1, nb);
4bf5f222 313 __libc_lock_lock (main_arena.mutex);
6c8dbf00
OB
314
315 if (chunk_is_mmapped (oldp))
316 {
fa8d436c 317#if HAVE_MREMAP
6c8dbf00
OB
318 mchunkptr newp = mremap_chunk (oldp, nb);
319 if (newp)
320 newmem = chunk2mem (newp);
321 else
fa8d436c 322#endif
6c8dbf00
OB
323 {
324 /* Note the extra SIZE_SZ overhead. */
325 if (oldsize - SIZE_SZ >= nb)
326 newmem = oldmem; /* do nothing */
327 else
328 {
329 /* Must alloc, copy, free. */
5129873a
FW
330 top_check ();
331 newmem = _int_malloc (&main_arena, bytes + 1);
6c8dbf00
OB
332 if (newmem)
333 {
334 memcpy (newmem, oldmem, oldsize - 2 * SIZE_SZ);
335 munmap_chunk (oldp);
336 }
337 }
fa8d436c 338 }
fa8d436c 339 }
6c8dbf00
OB
340 else
341 {
5129873a
FW
342 top_check ();
343 INTERNAL_SIZE_T nb;
344 checked_request2size (bytes + 1, nb);
345 newmem = _int_realloc (&main_arena, oldp, oldsize, nb);
6dd6a580 346 }
bfc832cc 347
8e57c943
L
348 DIAG_PUSH_NEEDS_COMMENT;
349#if __GNUC_PREREQ (7, 0)
350 /* GCC 7 warns about magic_p may be used uninitialized. But we never
351 reach here if magic_p is uninitialized. */
352 DIAG_IGNORE_NEEDS_COMMENT (7, "-Wmaybe-uninitialized");
353#endif
bfc832cc
UD
354 /* mem2chunk_check changed the magic byte in the old chunk.
355 If newmem is NULL, then the old chunk will still be used though,
356 so we need to invert that change here. */
6c8dbf00
OB
357 if (newmem == NULL)
358 *magic_p ^= 0xFF;
8e57c943 359 DIAG_POP_NEEDS_COMMENT;
bfc832cc 360
4bf5f222 361 __libc_lock_unlock (main_arena.mutex);
fa8d436c 362
6c8dbf00 363 return mem2mem_check (newmem, bytes);
fa8d436c
UD
364}
365
6c8dbf00
OB
366static void *
367memalign_check (size_t alignment, size_t bytes, const void *caller)
fa8d436c 368{
6c8dbf00
OB
369 void *mem;
370
371 if (alignment <= MALLOC_ALIGNMENT)
372 return malloc_check (bytes, NULL);
fa8d436c 373
6c8dbf00
OB
374 if (alignment < MINSIZE)
375 alignment = MINSIZE;
fa8d436c 376
a56ee40b
WN
377 /* If the alignment is greater than SIZE_MAX / 2 + 1 it cannot be a
378 power of 2 and will cause overflow in the check below. */
379 if (alignment > SIZE_MAX / 2 + 1)
380 {
381 __set_errno (EINVAL);
382 return 0;
383 }
384
321e2684
WN
385 /* Check for overflow. */
386 if (bytes > SIZE_MAX - alignment - MINSIZE)
387 {
388 __set_errno (ENOMEM);
389 return 0;
390 }
391
10ad46bc 392 /* Make sure alignment is power of 2. */
6c8dbf00
OB
393 if (!powerof2 (alignment))
394 {
395 size_t a = MALLOC_ALIGNMENT * 2;
396 while (a < alignment)
397 a <<= 1;
398 alignment = a;
399 }
400
4bf5f222 401 __libc_lock_lock (main_arena.mutex);
5129873a
FW
402 top_check ();
403 mem = _int_memalign (&main_arena, alignment, bytes + 1);
4bf5f222 404 __libc_lock_unlock (main_arena.mutex);
6c8dbf00 405 return mem2mem_check (mem, bytes);
fa8d436c
UD
406}
407
e863cce5 408#if SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_25)
fa8d436c
UD
409
410/* Get/set state: malloc_get_state() records the current state of all
411 malloc variables (_except_ for the actual heap contents and `hook'
412 function pointers) in a system dependent, opaque data structure.
413 This data structure is dynamically allocated and can be free()d
414 after use. malloc_set_state() restores the state of all malloc
415 variables to the previously obtained state. This is especially
416 useful when using this malloc as part of a shared library, and when
417 the heap contents are saved/restored via some other method. The
418 primary example for this is GNU Emacs with its `dumping' procedure.
419 `Hook' function pointers are never saved or restored by these
420 functions, with two exceptions: If malloc checking was in use when
421 malloc_get_state() was called, then malloc_set_state() calls
422 __malloc_check_init() if possible; if malloc checking was not in
423 use in the recorded state but the user requested malloc checking,
424 then the hooks are reset to 0. */
425
426#define MALLOC_STATE_MAGIC 0x444c4541l
dea39b13 427#define MALLOC_STATE_VERSION (0 * 0x100l + 5l) /* major*0x100 + minor */
6c8dbf00
OB
428
429struct malloc_save_state
430{
431 long magic;
432 long version;
433 mbinptr av[NBINS * 2 + 2];
434 char *sbrk_base;
435 int sbrked_mem_bytes;
fa8d436c
UD
436 unsigned long trim_threshold;
437 unsigned long top_pad;
6c8dbf00 438 unsigned int n_mmaps_max;
fa8d436c 439 unsigned long mmap_threshold;
6c8dbf00 440 int check_action;
fa8d436c 441 unsigned long max_sbrked_mem;
ca135f82 442 unsigned long max_total_mem; /* Always 0, for backwards compatibility. */
6c8dbf00
OB
443 unsigned int n_mmaps;
444 unsigned int max_n_mmaps;
fa8d436c
UD
445 unsigned long mmapped_mem;
446 unsigned long max_mmapped_mem;
6c8dbf00 447 int using_malloc_checking;
4c8b8cc3
UD
448 unsigned long max_fast;
449 unsigned long arena_test;
450 unsigned long arena_max;
451 unsigned long narenas;
fa8d436c
UD
452};
453
e863cce5
FW
454/* Dummy implementation which always fails. We need to provide this
455 symbol so that existing Emacs binaries continue to work with
456 BIND_NOW. */
6c8dbf00 457void *
e863cce5
FW
458attribute_compat_text_section
459malloc_get_state (void)
fa8d436c 460{
e863cce5
FW
461 __set_errno (ENOSYS);
462 return NULL;
fa8d436c 463}
e863cce5 464compat_symbol (libc, malloc_get_state, malloc_get_state, GLIBC_2_0);
fa8d436c
UD
465
466int
e863cce5
FW
467attribute_compat_text_section
468malloc_set_state (void *msptr)
fa8d436c 469{
6c8dbf00 470 struct malloc_save_state *ms = (struct malloc_save_state *) msptr;
fa8d436c 471
6c8dbf00
OB
472 if (ms->magic != MALLOC_STATE_MAGIC)
473 return -1;
474
fa8d436c 475 /* Must fail if the major version is too high. */
6c8dbf00
OB
476 if ((ms->version & ~0xffl) > (MALLOC_STATE_VERSION & ~0xffl))
477 return -2;
478
4cf6c72f
FW
479 /* We do not need to perform locking here because __malloc_set_state
480 must be called before the first call into the malloc subsytem
481 (usually via __malloc_initialize_hook). pthread_create always
482 calls calloc and thus must be called only afterwards, so there
483 cannot be more than one thread when we reach this point. */
484
485 /* Disable the malloc hooks (and malloc checking). */
486 __malloc_hook = NULL;
487 __realloc_hook = NULL;
488 __free_hook = NULL;
489 __memalign_hook = NULL;
490 using_malloc_checking = 0;
491
492 /* Patch the dumped heap. We no longer try to integrate into the
493 existing heap. Instead, we mark the existing chunks as mmapped.
494 Together with the update to dumped_main_arena_start and
495 dumped_main_arena_end, realloc and free will recognize these
496 chunks as dumped fake mmapped chunks and never free them. */
497
498 /* Find the chunk with the lowest address with the heap. */
499 mchunkptr chunk = NULL;
500 {
501 size_t *candidate = (size_t *) ms->sbrk_base;
502 size_t *end = (size_t *) (ms->sbrk_base + ms->sbrked_mem_bytes);
503 while (candidate < end)
504 if (*candidate != 0)
505 {
506 chunk = mem2chunk ((void *) (candidate + 1));
507 break;
508 }
6c8dbf00 509 else
4cf6c72f
FW
510 ++candidate;
511 }
512 if (chunk == NULL)
513 return 0;
514
515 /* Iterate over the dumped heap and patch the chunks so that they
516 are treated as fake mmapped chunks. */
517 mchunkptr top = ms->av[2];
518 while (chunk < top)
6c8dbf00 519 {
4cf6c72f
FW
520 if (inuse (chunk))
521 {
522 /* Mark chunk as mmapped, to trigger the fallback path. */
523 size_t size = chunksize (chunk);
524 set_head (chunk, size | IS_MMAPPED);
525 }
526 chunk = next_chunk (chunk);
6c8dbf00 527 }
6c8dbf00 528
4cf6c72f
FW
529 /* The dumped fake mmapped chunks all lie in this address range. */
530 dumped_main_arena_start = (mchunkptr) ms->sbrk_base;
531 dumped_main_arena_end = top;
532
fa8d436c
UD
533 return 0;
534}
e863cce5
FW
535compat_symbol (libc, malloc_set_state, malloc_set_state, GLIBC_2_0);
536
537#endif /* SHLIB_COMPAT */
fa8d436c
UD
538
539/*
540 * Local variables:
541 * c-basic-offset: 2
542 * End:
543 */