]> git.ipfire.org Git - thirdparty/glibc.git/blob - malloc/hooks.c
Consolidate valloc/pvalloc code.
[thirdparty/glibc.git] / malloc / hooks.c
1 /* Malloc implementation for multiple threads without lock contention.
2 Copyright (C) 2001-2013 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
5
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public License as
8 published by the Free Software Foundation; either version 2.1 of the
9 License, or (at your option) any later version.
10
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
15
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; see the file COPYING.LIB. If
18 not, see <http://www.gnu.org/licenses/>. */
19
20 /* What to do if the standard debugging hooks are in place and a
21 corrupt pointer is detected: do nothing (0), print an error message
22 (1), or call abort() (2). */
23
24 /* Hooks for debugging versions. The initial hooks just call the
25 initialization routine, then do the normal work. */
26
27 static void*
28 malloc_hook_ini(size_t sz, const void *caller)
29 {
30 __malloc_hook = NULL;
31 ptmalloc_init();
32 return __libc_malloc(sz);
33 }
34
35 static void*
36 realloc_hook_ini(void* ptr, size_t sz, const void *caller)
37 {
38 __malloc_hook = NULL;
39 __realloc_hook = NULL;
40 ptmalloc_init();
41 return __libc_realloc(ptr, sz);
42 }
43
44 static void*
45 memalign_hook_ini(size_t alignment, size_t sz, const void *caller)
46 {
47 __memalign_hook = NULL;
48 ptmalloc_init();
49 return __libc_memalign(alignment, sz);
50 }
51
52 /* Whether we are using malloc checking. */
53 static int using_malloc_checking;
54
55 /* A flag that is set by malloc_set_state, to signal that malloc checking
56 must not be enabled on the request from the user (via the MALLOC_CHECK_
57 environment variable). It is reset by __malloc_check_init to tell
58 malloc_set_state that the user has requested malloc checking.
59
60 The purpose of this flag is to make sure that malloc checking is not
61 enabled when the heap to be restored was constructed without malloc
62 checking, and thus does not contain the required magic bytes.
63 Otherwise the heap would be corrupted by calls to free and realloc. If
64 it turns out that the heap was created with malloc checking and the
65 user has requested it malloc_set_state just calls __malloc_check_init
66 again to enable it. On the other hand, reusing such a heap without
67 further malloc checking is safe. */
68 static int disallow_malloc_check;
69
70 /* Activate a standard set of debugging hooks. */
71 void
72 __malloc_check_init (void)
73 {
74 if (disallow_malloc_check) {
75 disallow_malloc_check = 0;
76 return;
77 }
78 using_malloc_checking = 1;
79 __malloc_hook = malloc_check;
80 __free_hook = free_check;
81 __realloc_hook = realloc_check;
82 __memalign_hook = memalign_check;
83 }
84
85 /* A simple, standard set of debugging hooks. Overhead is `only' one
86 byte per chunk; still this will catch most cases of double frees or
87 overruns. The goal here is to avoid obscure crashes due to invalid
88 usage, unlike in the MALLOC_DEBUG code. */
89
90 #define MAGICBYTE(p) ( ( ((size_t)p >> 3) ^ ((size_t)p >> 11)) & 0xFF )
91
92 /* Visualize the chunk as being partitioned into blocks of 256 bytes from the
93 highest address of the chunk, downwards. The beginning of each block tells
94 us the size of the previous block, up to the actual size of the requested
95 memory. Our magic byte is right at the end of the requested size, so we
96 must reach it with this iteration, otherwise we have witnessed a memory
97 corruption. */
98 static size_t
99 malloc_check_get_size(mchunkptr p)
100 {
101 size_t size;
102 unsigned char c;
103 unsigned char magic = MAGICBYTE(p);
104
105 assert(using_malloc_checking == 1);
106
107 for (size = chunksize(p) - 1 + (chunk_is_mmapped(p) ? 0 : SIZE_SZ);
108 (c = ((unsigned char*)p)[size]) != magic;
109 size -= c) {
110 if(c<=0 || size<(c+2*SIZE_SZ)) {
111 malloc_printerr(check_action, "malloc_check_get_size: memory corruption",
112 chunk2mem(p));
113 return 0;
114 }
115 }
116
117 /* chunk2mem size. */
118 return size - 2*SIZE_SZ;
119 }
120
121 /* Instrument a chunk with overrun detector byte(s) and convert it
122 into a user pointer with requested size sz. */
123
124 static void*
125 internal_function
126 mem2mem_check(void *ptr, size_t sz)
127 {
128 mchunkptr p;
129 unsigned char* m_ptr = ptr;
130 size_t i;
131
132 if (!ptr)
133 return ptr;
134 p = mem2chunk(ptr);
135 for(i = chunksize(p) - (chunk_is_mmapped(p) ? 2*SIZE_SZ+1 : SIZE_SZ+1);
136 i > sz;
137 i -= 0xFF) {
138 if(i-sz < 0x100) {
139 m_ptr[i] = (unsigned char)(i-sz);
140 break;
141 }
142 m_ptr[i] = 0xFF;
143 }
144 m_ptr[sz] = MAGICBYTE(p);
145 return (void*)m_ptr;
146 }
147
148 /* Convert a pointer to be free()d or realloc()ed to a valid chunk
149 pointer. If the provided pointer is not valid, return NULL. */
150
151 static mchunkptr
152 internal_function
153 mem2chunk_check(void* mem, unsigned char **magic_p)
154 {
155 mchunkptr p;
156 INTERNAL_SIZE_T sz, c;
157 unsigned char magic;
158
159 if(!aligned_OK(mem)) return NULL;
160 p = mem2chunk(mem);
161 if (!chunk_is_mmapped(p)) {
162 /* Must be a chunk in conventional heap memory. */
163 int contig = contiguous(&main_arena);
164 sz = chunksize(p);
165 if((contig &&
166 ((char*)p<mp_.sbrk_base ||
167 ((char*)p + sz)>=(mp_.sbrk_base+main_arena.system_mem) )) ||
168 sz<MINSIZE || sz&MALLOC_ALIGN_MASK || !inuse(p) ||
169 ( !prev_inuse(p) && (p->prev_size&MALLOC_ALIGN_MASK ||
170 (contig && (char*)prev_chunk(p)<mp_.sbrk_base) ||
171 next_chunk(prev_chunk(p))!=p) ))
172 return NULL;
173 magic = MAGICBYTE(p);
174 for(sz += SIZE_SZ-1; (c = ((unsigned char*)p)[sz]) != magic; sz -= c) {
175 if(c<=0 || sz<(c+2*SIZE_SZ)) return NULL;
176 }
177 } else {
178 unsigned long offset, page_mask = GLRO(dl_pagesize)-1;
179
180 /* mmap()ed chunks have MALLOC_ALIGNMENT or higher power-of-two
181 alignment relative to the beginning of a page. Check this
182 first. */
183 offset = (unsigned long)mem & page_mask;
184 if((offset!=MALLOC_ALIGNMENT && offset!=0 && offset!=0x10 &&
185 offset!=0x20 && offset!=0x40 && offset!=0x80 && offset!=0x100 &&
186 offset!=0x200 && offset!=0x400 && offset!=0x800 && offset!=0x1000 &&
187 offset<0x2000) ||
188 !chunk_is_mmapped(p) || (p->size & PREV_INUSE) ||
189 ( (((unsigned long)p - p->prev_size) & page_mask) != 0 ) ||
190 ( (sz = chunksize(p)), ((p->prev_size + sz) & page_mask) != 0 ) )
191 return NULL;
192 magic = MAGICBYTE(p);
193 for(sz -= 1; (c = ((unsigned char*)p)[sz]) != magic; sz -= c) {
194 if(c<=0 || sz<(c+2*SIZE_SZ)) return NULL;
195 }
196 }
197 ((unsigned char*)p)[sz] ^= 0xFF;
198 if (magic_p)
199 *magic_p = (unsigned char *)p + sz;
200 return p;
201 }
202
203 /* Check for corruption of the top chunk, and try to recover if
204 necessary. */
205
206 static int
207 internal_function
208 top_check(void)
209 {
210 mchunkptr t = top(&main_arena);
211 char* brk, * new_brk;
212 INTERNAL_SIZE_T front_misalign, sbrk_size;
213 unsigned long pagesz = GLRO(dl_pagesize);
214
215 if (t == initial_top(&main_arena) ||
216 (!chunk_is_mmapped(t) &&
217 chunksize(t)>=MINSIZE &&
218 prev_inuse(t) &&
219 (!contiguous(&main_arena) ||
220 (char*)t + chunksize(t) == mp_.sbrk_base + main_arena.system_mem)))
221 return 0;
222
223 malloc_printerr (check_action, "malloc: top chunk is corrupt", t);
224
225 /* Try to set up a new top chunk. */
226 brk = MORECORE(0);
227 front_misalign = (unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK;
228 if (front_misalign > 0)
229 front_misalign = MALLOC_ALIGNMENT - front_misalign;
230 sbrk_size = front_misalign + mp_.top_pad + MINSIZE;
231 sbrk_size += pagesz - ((unsigned long)(brk + sbrk_size) & (pagesz - 1));
232 new_brk = (char*)(MORECORE (sbrk_size));
233 if (new_brk == (char*)(MORECORE_FAILURE))
234 {
235 __set_errno (ENOMEM);
236 return -1;
237 }
238 /* Call the `morecore' hook if necessary. */
239 void (*hook) (void) = force_reg (__after_morecore_hook);
240 if (hook)
241 (*hook) ();
242 main_arena.system_mem = (new_brk - mp_.sbrk_base) + sbrk_size;
243
244 top(&main_arena) = (mchunkptr)(brk + front_misalign);
245 set_head(top(&main_arena), (sbrk_size - front_misalign) | PREV_INUSE);
246
247 return 0;
248 }
249
250 static void*
251 malloc_check(size_t sz, const void *caller)
252 {
253 void *victim;
254
255 if (sz+1 == 0) {
256 __set_errno (ENOMEM);
257 return NULL;
258 }
259
260 (void)mutex_lock(&main_arena.mutex);
261 victim = (top_check() >= 0) ? _int_malloc(&main_arena, sz+1) : NULL;
262 (void)mutex_unlock(&main_arena.mutex);
263 return mem2mem_check(victim, sz);
264 }
265
266 static void
267 free_check(void* mem, const void *caller)
268 {
269 mchunkptr p;
270
271 if(!mem) return;
272 (void)mutex_lock(&main_arena.mutex);
273 p = mem2chunk_check(mem, NULL);
274 if(!p) {
275 (void)mutex_unlock(&main_arena.mutex);
276
277 malloc_printerr(check_action, "free(): invalid pointer", mem);
278 return;
279 }
280 if (chunk_is_mmapped(p)) {
281 (void)mutex_unlock(&main_arena.mutex);
282 munmap_chunk(p);
283 return;
284 }
285 _int_free(&main_arena, p, 1);
286 (void)mutex_unlock(&main_arena.mutex);
287 }
288
289 static void*
290 realloc_check(void* oldmem, size_t bytes, const void *caller)
291 {
292 INTERNAL_SIZE_T nb;
293 void* newmem = 0;
294 unsigned char *magic_p;
295
296 if (bytes+1 == 0) {
297 __set_errno (ENOMEM);
298 return NULL;
299 }
300 if (oldmem == 0) return malloc_check(bytes, NULL);
301 if (bytes == 0) {
302 free_check (oldmem, NULL);
303 return NULL;
304 }
305 (void)mutex_lock(&main_arena.mutex);
306 const mchunkptr oldp = mem2chunk_check(oldmem, &magic_p);
307 (void)mutex_unlock(&main_arena.mutex);
308 if(!oldp) {
309 malloc_printerr(check_action, "realloc(): invalid pointer", oldmem);
310 return malloc_check(bytes, NULL);
311 }
312 const INTERNAL_SIZE_T oldsize = chunksize(oldp);
313
314 checked_request2size(bytes+1, nb);
315 (void)mutex_lock(&main_arena.mutex);
316
317 if (chunk_is_mmapped(oldp)) {
318 #if HAVE_MREMAP
319 mchunkptr newp = mremap_chunk(oldp, nb);
320 if(newp)
321 newmem = chunk2mem(newp);
322 else
323 #endif
324 {
325 /* Note the extra SIZE_SZ overhead. */
326 if(oldsize - SIZE_SZ >= nb)
327 newmem = oldmem; /* do nothing */
328 else {
329 /* Must alloc, copy, free. */
330 if (top_check() >= 0)
331 newmem = _int_malloc(&main_arena, bytes+1);
332 if (newmem) {
333 MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ);
334 munmap_chunk(oldp);
335 }
336 }
337 }
338 } else {
339 if (top_check() >= 0) {
340 INTERNAL_SIZE_T nb;
341 checked_request2size(bytes + 1, nb);
342 newmem = _int_realloc(&main_arena, oldp, oldsize, nb);
343 }
344 }
345
346 /* mem2chunk_check changed the magic byte in the old chunk.
347 If newmem is NULL, then the old chunk will still be used though,
348 so we need to invert that change here. */
349 if (newmem == NULL) *magic_p ^= 0xFF;
350
351 (void)mutex_unlock(&main_arena.mutex);
352
353 return mem2mem_check(newmem, bytes);
354 }
355
356 static void*
357 memalign_check(size_t alignment, size_t bytes, const void *caller)
358 {
359 void* mem;
360
361 if (alignment <= MALLOC_ALIGNMENT) return malloc_check(bytes, NULL);
362 if (alignment < MINSIZE) alignment = MINSIZE;
363
364 /* If the alignment is greater than SIZE_MAX / 2 + 1 it cannot be a
365 power of 2 and will cause overflow in the check below. */
366 if (alignment > SIZE_MAX / 2 + 1)
367 {
368 __set_errno (EINVAL);
369 return 0;
370 }
371
372 /* Check for overflow. */
373 if (bytes > SIZE_MAX - alignment - MINSIZE)
374 {
375 __set_errno (ENOMEM);
376 return 0;
377 }
378
379 /* Make sure alignment is power of 2. */
380 if (!powerof2(alignment)) {
381 size_t a = MALLOC_ALIGNMENT * 2;
382 while (a < alignment) a <<= 1;
383 alignment = a;
384 }
385
386 (void)mutex_lock(&main_arena.mutex);
387 mem = (top_check() >= 0) ? _int_memalign(&main_arena, alignment, bytes+1) :
388 NULL;
389 (void)mutex_unlock(&main_arena.mutex);
390 return mem2mem_check(mem, bytes);
391 }
392
393
394 /* Get/set state: malloc_get_state() records the current state of all
395 malloc variables (_except_ for the actual heap contents and `hook'
396 function pointers) in a system dependent, opaque data structure.
397 This data structure is dynamically allocated and can be free()d
398 after use. malloc_set_state() restores the state of all malloc
399 variables to the previously obtained state. This is especially
400 useful when using this malloc as part of a shared library, and when
401 the heap contents are saved/restored via some other method. The
402 primary example for this is GNU Emacs with its `dumping' procedure.
403 `Hook' function pointers are never saved or restored by these
404 functions, with two exceptions: If malloc checking was in use when
405 malloc_get_state() was called, then malloc_set_state() calls
406 __malloc_check_init() if possible; if malloc checking was not in
407 use in the recorded state but the user requested malloc checking,
408 then the hooks are reset to 0. */
409
410 #define MALLOC_STATE_MAGIC 0x444c4541l
411 #define MALLOC_STATE_VERSION (0*0x100l + 4l) /* major*0x100 + minor */
412
413 struct malloc_save_state {
414 long magic;
415 long version;
416 mbinptr av[NBINS * 2 + 2];
417 char* sbrk_base;
418 int sbrked_mem_bytes;
419 unsigned long trim_threshold;
420 unsigned long top_pad;
421 unsigned int n_mmaps_max;
422 unsigned long mmap_threshold;
423 int check_action;
424 unsigned long max_sbrked_mem;
425 unsigned long max_total_mem;
426 unsigned int n_mmaps;
427 unsigned int max_n_mmaps;
428 unsigned long mmapped_mem;
429 unsigned long max_mmapped_mem;
430 int using_malloc_checking;
431 unsigned long max_fast;
432 unsigned long arena_test;
433 unsigned long arena_max;
434 unsigned long narenas;
435 };
436
437 void*
438 __malloc_get_state(void)
439 {
440 struct malloc_save_state* ms;
441 int i;
442 mbinptr b;
443
444 ms = (struct malloc_save_state*)__libc_malloc(sizeof(*ms));
445 if (!ms)
446 return 0;
447 (void)mutex_lock(&main_arena.mutex);
448 malloc_consolidate(&main_arena);
449 ms->magic = MALLOC_STATE_MAGIC;
450 ms->version = MALLOC_STATE_VERSION;
451 ms->av[0] = 0;
452 ms->av[1] = 0; /* used to be binblocks, now no longer used */
453 ms->av[2] = top(&main_arena);
454 ms->av[3] = 0; /* used to be undefined */
455 for(i=1; i<NBINS; i++) {
456 b = bin_at(&main_arena, i);
457 if(first(b) == b)
458 ms->av[2*i+2] = ms->av[2*i+3] = 0; /* empty bin */
459 else {
460 ms->av[2*i+2] = first(b);
461 ms->av[2*i+3] = last(b);
462 }
463 }
464 ms->sbrk_base = mp_.sbrk_base;
465 ms->sbrked_mem_bytes = main_arena.system_mem;
466 ms->trim_threshold = mp_.trim_threshold;
467 ms->top_pad = mp_.top_pad;
468 ms->n_mmaps_max = mp_.n_mmaps_max;
469 ms->mmap_threshold = mp_.mmap_threshold;
470 ms->check_action = check_action;
471 ms->max_sbrked_mem = main_arena.max_system_mem;
472 ms->max_total_mem = 0;
473 ms->n_mmaps = mp_.n_mmaps;
474 ms->max_n_mmaps = mp_.max_n_mmaps;
475 ms->mmapped_mem = mp_.mmapped_mem;
476 ms->max_mmapped_mem = mp_.max_mmapped_mem;
477 ms->using_malloc_checking = using_malloc_checking;
478 ms->max_fast = get_max_fast();
479 #ifdef PER_THREAD
480 ms->arena_test = mp_.arena_test;
481 ms->arena_max = mp_.arena_max;
482 ms->narenas = narenas;
483 #endif
484 (void)mutex_unlock(&main_arena.mutex);
485 return (void*)ms;
486 }
487
488 int
489 __malloc_set_state(void* msptr)
490 {
491 struct malloc_save_state* ms = (struct malloc_save_state*)msptr;
492 size_t i;
493 mbinptr b;
494
495 disallow_malloc_check = 1;
496 ptmalloc_init();
497 if(ms->magic != MALLOC_STATE_MAGIC) return -1;
498 /* Must fail if the major version is too high. */
499 if((ms->version & ~0xffl) > (MALLOC_STATE_VERSION & ~0xffl)) return -2;
500 (void)mutex_lock(&main_arena.mutex);
501 /* There are no fastchunks. */
502 clear_fastchunks(&main_arena);
503 if (ms->version >= 4)
504 set_max_fast(ms->max_fast);
505 else
506 set_max_fast(64); /* 64 used to be the value we always used. */
507 for (i=0; i<NFASTBINS; ++i)
508 fastbin (&main_arena, i) = 0;
509 for (i=0; i<BINMAPSIZE; ++i)
510 main_arena.binmap[i] = 0;
511 top(&main_arena) = ms->av[2];
512 main_arena.last_remainder = 0;
513 for(i=1; i<NBINS; i++) {
514 b = bin_at(&main_arena, i);
515 if(ms->av[2*i+2] == 0) {
516 assert(ms->av[2*i+3] == 0);
517 first(b) = last(b) = b;
518 } else {
519 if(ms->version >= 3 &&
520 (i<NSMALLBINS || (largebin_index(chunksize(ms->av[2*i+2]))==i &&
521 largebin_index(chunksize(ms->av[2*i+3]))==i))) {
522 first(b) = ms->av[2*i+2];
523 last(b) = ms->av[2*i+3];
524 /* Make sure the links to the bins within the heap are correct. */
525 first(b)->bk = b;
526 last(b)->fd = b;
527 /* Set bit in binblocks. */
528 mark_bin(&main_arena, i);
529 } else {
530 /* Oops, index computation from chunksize must have changed.
531 Link the whole list into unsorted_chunks. */
532 first(b) = last(b) = b;
533 b = unsorted_chunks(&main_arena);
534 ms->av[2*i+2]->bk = b;
535 ms->av[2*i+3]->fd = b->fd;
536 b->fd->bk = ms->av[2*i+3];
537 b->fd = ms->av[2*i+2];
538 }
539 }
540 }
541 if (ms->version < 3) {
542 /* Clear fd_nextsize and bk_nextsize fields. */
543 b = unsorted_chunks(&main_arena)->fd;
544 while (b != unsorted_chunks(&main_arena)) {
545 if (!in_smallbin_range(chunksize(b))) {
546 b->fd_nextsize = NULL;
547 b->bk_nextsize = NULL;
548 }
549 b = b->fd;
550 }
551 }
552 mp_.sbrk_base = ms->sbrk_base;
553 main_arena.system_mem = ms->sbrked_mem_bytes;
554 mp_.trim_threshold = ms->trim_threshold;
555 mp_.top_pad = ms->top_pad;
556 mp_.n_mmaps_max = ms->n_mmaps_max;
557 mp_.mmap_threshold = ms->mmap_threshold;
558 check_action = ms->check_action;
559 main_arena.max_system_mem = ms->max_sbrked_mem;
560 mp_.n_mmaps = ms->n_mmaps;
561 mp_.max_n_mmaps = ms->max_n_mmaps;
562 mp_.mmapped_mem = ms->mmapped_mem;
563 mp_.max_mmapped_mem = ms->max_mmapped_mem;
564 /* add version-dependent code here */
565 if (ms->version >= 1) {
566 /* Check whether it is safe to enable malloc checking, or whether
567 it is necessary to disable it. */
568 if (ms->using_malloc_checking && !using_malloc_checking &&
569 !disallow_malloc_check)
570 __malloc_check_init ();
571 else if (!ms->using_malloc_checking && using_malloc_checking) {
572 __malloc_hook = NULL;
573 __free_hook = NULL;
574 __realloc_hook = NULL;
575 __memalign_hook = NULL;
576 using_malloc_checking = 0;
577 }
578 }
579 if (ms->version >= 4) {
580 #ifdef PER_THREAD
581 mp_.arena_test = ms->arena_test;
582 mp_.arena_max = ms->arena_max;
583 narenas = ms->narenas;
584 #endif
585 }
586 check_malloc_state(&main_arena);
587
588 (void)mutex_unlock(&main_arena.mutex);
589 return 0;
590 }
591
592 /*
593 * Local variables:
594 * c-basic-offset: 2
595 * End:
596 */