]> git.ipfire.org Git - thirdparty/glibc.git/blame - malloc/hooks.c
Use (void) in no-arguments function definitions.
[thirdparty/glibc.git] / malloc / hooks.c
CommitLineData
fa8d436c 1/* Malloc implementation for multiple threads without lock contention.
568035b7 2 Copyright (C) 2001-2013 Free Software Foundation, Inc.
fa8d436c
UD
3 This file is part of the GNU C Library.
4 Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
5
6 The GNU C Library is free software; you can redistribute it and/or
cc7375ce
RM
7 modify it under the terms of the GNU Lesser General Public License as
8 published by the Free Software Foundation; either version 2.1 of the
fa8d436c
UD
9 License, or (at your option) any later version.
10
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
cc7375ce 14 Lesser General Public License for more details.
fa8d436c 15
cc7375ce 16 You should have received a copy of the GNU Lesser General Public
59ba27a6
PE
17 License along with the GNU C Library; see the file COPYING.LIB. If
18 not, see <http://www.gnu.org/licenses/>. */
fa8d436c 19
fa8d436c
UD
20/* What to do if the standard debugging hooks are in place and a
21 corrupt pointer is detected: do nothing (0), print an error message
22 (1), or call abort() (2). */
23
24/* Hooks for debugging versions. The initial hooks just call the
25 initialization routine, then do the normal work. */
26
22a89187 27static void*
a222d91a 28malloc_hook_ini(size_t sz, const void *caller)
fa8d436c
UD
29{
30 __malloc_hook = NULL;
31 ptmalloc_init();
3b49edc0 32 return __libc_malloc(sz);
fa8d436c
UD
33}
34
22a89187 35static void*
a222d91a 36realloc_hook_ini(void* ptr, size_t sz, const void *caller)
fa8d436c
UD
37{
38 __malloc_hook = NULL;
39 __realloc_hook = NULL;
40 ptmalloc_init();
3b49edc0 41 return __libc_realloc(ptr, sz);
fa8d436c
UD
42}
43
22a89187 44static void*
a222d91a 45memalign_hook_ini(size_t alignment, size_t sz, const void *caller)
fa8d436c
UD
46{
47 __memalign_hook = NULL;
48 ptmalloc_init();
3b49edc0 49 return __libc_memalign(alignment, sz);
fa8d436c
UD
50}
51
fa8d436c
UD
52/* Whether we are using malloc checking. */
53static int using_malloc_checking;
54
55/* A flag that is set by malloc_set_state, to signal that malloc checking
56 must not be enabled on the request from the user (via the MALLOC_CHECK_
57 environment variable). It is reset by __malloc_check_init to tell
58 malloc_set_state that the user has requested malloc checking.
59
60 The purpose of this flag is to make sure that malloc checking is not
61 enabled when the heap to be restored was constructed without malloc
62 checking, and thus does not contain the required magic bytes.
63 Otherwise the heap would be corrupted by calls to free and realloc. If
64 it turns out that the heap was created with malloc checking and the
65 user has requested it malloc_set_state just calls __malloc_check_init
66 again to enable it. On the other hand, reusing such a heap without
67 further malloc checking is safe. */
68static int disallow_malloc_check;
69
70/* Activate a standard set of debugging hooks. */
71void
60d2f8f3 72__malloc_check_init (void)
fa8d436c
UD
73{
74 if (disallow_malloc_check) {
75 disallow_malloc_check = 0;
76 return;
77 }
78 using_malloc_checking = 1;
79 __malloc_hook = malloc_check;
80 __free_hook = free_check;
81 __realloc_hook = realloc_check;
82 __memalign_hook = memalign_check;
fa8d436c
UD
83}
84
85/* A simple, standard set of debugging hooks. Overhead is `only' one
86 byte per chunk; still this will catch most cases of double frees or
87 overruns. The goal here is to avoid obscure crashes due to invalid
88 usage, unlike in the MALLOC_DEBUG code. */
89
90#define MAGICBYTE(p) ( ( ((size_t)p >> 3) ^ ((size_t)p >> 11)) & 0xFF )
91
6ef9cc37
SP
92/* Visualize the chunk as being partitioned into blocks of 256 bytes from the
93 highest address of the chunk, downwards. The beginning of each block tells
94 us the size of the previous block, up to the actual size of the requested
95 memory. Our magic byte is right at the end of the requested size, so we
96 must reach it with this iteration, otherwise we have witnessed a memory
97 corruption. */
98static size_t
99malloc_check_get_size(mchunkptr p)
100{
101 size_t size;
102 unsigned char c;
103 unsigned char magic = MAGICBYTE(p);
104
105 assert(using_malloc_checking == 1);
106
107 for (size = chunksize(p) - 1 + (chunk_is_mmapped(p) ? 0 : SIZE_SZ);
108 (c = ((unsigned char*)p)[size]) != magic;
109 size -= c) {
110 if(c<=0 || size<(c+2*SIZE_SZ)) {
111 malloc_printerr(check_action, "malloc_check_get_size: memory corruption",
112 chunk2mem(p));
113 return 0;
114 }
115 }
116
117 /* chunk2mem size. */
118 return size - 2*SIZE_SZ;
119}
120
fa8d436c
UD
121/* Instrument a chunk with overrun detector byte(s) and convert it
122 into a user pointer with requested size sz. */
123
22a89187 124static void*
fa8d436c 125internal_function
22a89187 126mem2mem_check(void *ptr, size_t sz)
fa8d436c
UD
127{
128 mchunkptr p;
d77e7869 129 unsigned char* m_ptr = ptr;
fa8d436c
UD
130 size_t i;
131
132 if (!ptr)
133 return ptr;
134 p = mem2chunk(ptr);
135 for(i = chunksize(p) - (chunk_is_mmapped(p) ? 2*SIZE_SZ+1 : SIZE_SZ+1);
136 i > sz;
137 i -= 0xFF) {
138 if(i-sz < 0x100) {
139 m_ptr[i] = (unsigned char)(i-sz);
140 break;
141 }
142 m_ptr[i] = 0xFF;
143 }
144 m_ptr[sz] = MAGICBYTE(p);
22a89187 145 return (void*)m_ptr;
fa8d436c
UD
146}
147
148/* Convert a pointer to be free()d or realloc()ed to a valid chunk
149 pointer. If the provided pointer is not valid, return NULL. */
150
151static mchunkptr
152internal_function
22a89187 153mem2chunk_check(void* mem, unsigned char **magic_p)
fa8d436c
UD
154{
155 mchunkptr p;
156 INTERNAL_SIZE_T sz, c;
157 unsigned char magic;
158
371a11f3 159 if(!aligned_OK(mem)) return NULL;
fa8d436c 160 p = mem2chunk(mem);
b102cfc2 161 if (!chunk_is_mmapped(p)) {
fa8d436c 162 /* Must be a chunk in conventional heap memory. */
b102cfc2
UD
163 int contig = contiguous(&main_arena);
164 sz = chunksize(p);
165 if((contig &&
166 ((char*)p<mp_.sbrk_base ||
167 ((char*)p + sz)>=(mp_.sbrk_base+main_arena.system_mem) )) ||
fa8d436c
UD
168 sz<MINSIZE || sz&MALLOC_ALIGN_MASK || !inuse(p) ||
169 ( !prev_inuse(p) && (p->prev_size&MALLOC_ALIGN_MASK ||
cc49a5a8
UD
170 (contig && (char*)prev_chunk(p)<mp_.sbrk_base) ||
171 next_chunk(prev_chunk(p))!=p) ))
fa8d436c
UD
172 return NULL;
173 magic = MAGICBYTE(p);
174 for(sz += SIZE_SZ-1; (c = ((unsigned char*)p)[sz]) != magic; sz -= c) {
175 if(c<=0 || sz<(c+2*SIZE_SZ)) return NULL;
176 }
fa8d436c 177 } else {
02d46fc4 178 unsigned long offset, page_mask = GLRO(dl_pagesize)-1;
fa8d436c
UD
179
180 /* mmap()ed chunks have MALLOC_ALIGNMENT or higher power-of-two
181 alignment relative to the beginning of a page. Check this
182 first. */
183 offset = (unsigned long)mem & page_mask;
184 if((offset!=MALLOC_ALIGNMENT && offset!=0 && offset!=0x10 &&
cc49a5a8
UD
185 offset!=0x20 && offset!=0x40 && offset!=0x80 && offset!=0x100 &&
186 offset!=0x200 && offset!=0x400 && offset!=0x800 && offset!=0x1000 &&
187 offset<0x2000) ||
fa8d436c
UD
188 !chunk_is_mmapped(p) || (p->size & PREV_INUSE) ||
189 ( (((unsigned long)p - p->prev_size) & page_mask) != 0 ) ||
190 ( (sz = chunksize(p)), ((p->prev_size + sz) & page_mask) != 0 ) )
191 return NULL;
192 magic = MAGICBYTE(p);
193 for(sz -= 1; (c = ((unsigned char*)p)[sz]) != magic; sz -= c) {
194 if(c<=0 || sz<(c+2*SIZE_SZ)) return NULL;
195 }
fa8d436c 196 }
bfc832cc
UD
197 ((unsigned char*)p)[sz] ^= 0xFF;
198 if (magic_p)
199 *magic_p = (unsigned char *)p + sz;
fa8d436c
UD
200 return p;
201}
202
203/* Check for corruption of the top chunk, and try to recover if
204 necessary. */
205
206static int
207internal_function
fa8d436c 208top_check(void)
fa8d436c
UD
209{
210 mchunkptr t = top(&main_arena);
211 char* brk, * new_brk;
212 INTERNAL_SIZE_T front_misalign, sbrk_size;
02d46fc4 213 unsigned long pagesz = GLRO(dl_pagesize);
fa8d436c 214
b102cfc2
UD
215 if (t == initial_top(&main_arena) ||
216 (!chunk_is_mmapped(t) &&
217 chunksize(t)>=MINSIZE &&
218 prev_inuse(t) &&
219 (!contiguous(&main_arena) ||
220 (char*)t + chunksize(t) == mp_.sbrk_base + main_arena.system_mem)))
221 return 0;
fa8d436c 222
a962d7dc 223 malloc_printerr (check_action, "malloc: top chunk is corrupt", t);
fa8d436c
UD
224
225 /* Try to set up a new top chunk. */
226 brk = MORECORE(0);
227 front_misalign = (unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK;
228 if (front_misalign > 0)
229 front_misalign = MALLOC_ALIGNMENT - front_misalign;
230 sbrk_size = front_misalign + mp_.top_pad + MINSIZE;
231 sbrk_size += pagesz - ((unsigned long)(brk + sbrk_size) & (pagesz - 1));
232 new_brk = (char*)(MORECORE (sbrk_size));
bfc832cc
UD
233 if (new_brk == (char*)(MORECORE_FAILURE))
234 {
8e58439c 235 __set_errno (ENOMEM);
bfc832cc
UD
236 return -1;
237 }
fa8d436c 238 /* Call the `morecore' hook if necessary. */
acdf2309 239 void (*hook) (void) = force_reg (__after_morecore_hook);
df77455c
UD
240 if (hook)
241 (*hook) ();
fa8d436c
UD
242 main_arena.system_mem = (new_brk - mp_.sbrk_base) + sbrk_size;
243
244 top(&main_arena) = (mchunkptr)(brk + front_misalign);
245 set_head(top(&main_arena), (sbrk_size - front_misalign) | PREV_INUSE);
246
247 return 0;
248}
249
22a89187
UD
250static void*
251malloc_check(size_t sz, const void *caller)
fa8d436c 252{
22a89187 253 void *victim;
fa8d436c 254
bfc832cc 255 if (sz+1 == 0) {
8e58439c 256 __set_errno (ENOMEM);
bfc832cc
UD
257 return NULL;
258 }
259
fa8d436c
UD
260 (void)mutex_lock(&main_arena.mutex);
261 victim = (top_check() >= 0) ? _int_malloc(&main_arena, sz+1) : NULL;
262 (void)mutex_unlock(&main_arena.mutex);
263 return mem2mem_check(victim, sz);
264}
265
266static void
22a89187 267free_check(void* mem, const void *caller)
fa8d436c
UD
268{
269 mchunkptr p;
270
271 if(!mem) return;
cc49a5a8 272 (void)mutex_lock(&main_arena.mutex);
bfc832cc 273 p = mem2chunk_check(mem, NULL);
fa8d436c 274 if(!p) {
cc49a5a8
UD
275 (void)mutex_unlock(&main_arena.mutex);
276
14abadc8 277 malloc_printerr(check_action, "free(): invalid pointer", mem);
fa8d436c
UD
278 return;
279 }
fa8d436c 280 if (chunk_is_mmapped(p)) {
cc49a5a8 281 (void)mutex_unlock(&main_arena.mutex);
fa8d436c
UD
282 munmap_chunk(p);
283 return;
284 }
cc49a5a8 285 _int_free(&main_arena, p, 1);
cc49a5a8 286 (void)mutex_unlock(&main_arena.mutex);
fa8d436c
UD
287}
288
22a89187
UD
289static void*
290realloc_check(void* oldmem, size_t bytes, const void *caller)
fa8d436c 291{
78ac92ad 292 INTERNAL_SIZE_T nb;
22a89187 293 void* newmem = 0;
bfc832cc 294 unsigned char *magic_p;
fa8d436c 295
bfc832cc 296 if (bytes+1 == 0) {
8e58439c 297 __set_errno (ENOMEM);
bfc832cc
UD
298 return NULL;
299 }
fa8d436c 300 if (oldmem == 0) return malloc_check(bytes, NULL);
bfc832cc
UD
301 if (bytes == 0) {
302 free_check (oldmem, NULL);
303 return NULL;
304 }
fa8d436c 305 (void)mutex_lock(&main_arena.mutex);
78ac92ad 306 const mchunkptr oldp = mem2chunk_check(oldmem, &magic_p);
fa8d436c
UD
307 (void)mutex_unlock(&main_arena.mutex);
308 if(!oldp) {
14abadc8 309 malloc_printerr(check_action, "realloc(): invalid pointer", oldmem);
fa8d436c
UD
310 return malloc_check(bytes, NULL);
311 }
78ac92ad 312 const INTERNAL_SIZE_T oldsize = chunksize(oldp);
fa8d436c
UD
313
314 checked_request2size(bytes+1, nb);
315 (void)mutex_lock(&main_arena.mutex);
316
fa8d436c
UD
317 if (chunk_is_mmapped(oldp)) {
318#if HAVE_MREMAP
a9177ff5 319 mchunkptr newp = mremap_chunk(oldp, nb);
1ae915d9
UD
320 if(newp)
321 newmem = chunk2mem(newp);
322 else
fa8d436c 323#endif
1ae915d9 324 {
fa8d436c
UD
325 /* Note the extra SIZE_SZ overhead. */
326 if(oldsize - SIZE_SZ >= nb)
327 newmem = oldmem; /* do nothing */
328 else {
cc49a5a8
UD
329 /* Must alloc, copy, free. */
330 if (top_check() >= 0)
fa8d436c 331 newmem = _int_malloc(&main_arena, bytes+1);
cc49a5a8 332 if (newmem) {
d77e7869 333 MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ);
cc49a5a8
UD
334 munmap_chunk(oldp);
335 }
fa8d436c 336 }
fa8d436c 337 }
fa8d436c 338 } else {
6dd6a580
UD
339 if (top_check() >= 0) {
340 INTERNAL_SIZE_T nb;
341 checked_request2size(bytes + 1, nb);
4c8b8cc3 342 newmem = _int_realloc(&main_arena, oldp, oldsize, nb);
6dd6a580 343 }
fa8d436c 344 }
bfc832cc
UD
345
346 /* mem2chunk_check changed the magic byte in the old chunk.
347 If newmem is NULL, then the old chunk will still be used though,
348 so we need to invert that change here. */
349 if (newmem == NULL) *magic_p ^= 0xFF;
350
fa8d436c
UD
351 (void)mutex_unlock(&main_arena.mutex);
352
353 return mem2mem_check(newmem, bytes);
354}
355
22a89187
UD
356static void*
357memalign_check(size_t alignment, size_t bytes, const void *caller)
fa8d436c 358{
22a89187 359 void* mem;
fa8d436c
UD
360
361 if (alignment <= MALLOC_ALIGNMENT) return malloc_check(bytes, NULL);
362 if (alignment < MINSIZE) alignment = MINSIZE;
363
bfc832cc 364 if (bytes+1 == 0) {
8e58439c 365 __set_errno (ENOMEM);
bfc832cc
UD
366 return NULL;
367 }
fa8d436c
UD
368 (void)mutex_lock(&main_arena.mutex);
369 mem = (top_check() >= 0) ? _int_memalign(&main_arena, alignment, bytes+1) :
370 NULL;
371 (void)mutex_unlock(&main_arena.mutex);
372 return mem2mem_check(mem, bytes);
373}
374
fa8d436c
UD
375
376/* Get/set state: malloc_get_state() records the current state of all
377 malloc variables (_except_ for the actual heap contents and `hook'
378 function pointers) in a system dependent, opaque data structure.
379 This data structure is dynamically allocated and can be free()d
380 after use. malloc_set_state() restores the state of all malloc
381 variables to the previously obtained state. This is especially
382 useful when using this malloc as part of a shared library, and when
383 the heap contents are saved/restored via some other method. The
384 primary example for this is GNU Emacs with its `dumping' procedure.
385 `Hook' function pointers are never saved or restored by these
386 functions, with two exceptions: If malloc checking was in use when
387 malloc_get_state() was called, then malloc_set_state() calls
388 __malloc_check_init() if possible; if malloc checking was not in
389 use in the recorded state but the user requested malloc checking,
390 then the hooks are reset to 0. */
391
392#define MALLOC_STATE_MAGIC 0x444c4541l
4c8b8cc3 393#define MALLOC_STATE_VERSION (0*0x100l + 4l) /* major*0x100 + minor */
fa8d436c
UD
394
395struct malloc_save_state {
396 long magic;
397 long version;
398 mbinptr av[NBINS * 2 + 2];
399 char* sbrk_base;
400 int sbrked_mem_bytes;
401 unsigned long trim_threshold;
402 unsigned long top_pad;
403 unsigned int n_mmaps_max;
404 unsigned long mmap_threshold;
405 int check_action;
406 unsigned long max_sbrked_mem;
407 unsigned long max_total_mem;
408 unsigned int n_mmaps;
409 unsigned int max_n_mmaps;
410 unsigned long mmapped_mem;
411 unsigned long max_mmapped_mem;
412 int using_malloc_checking;
4c8b8cc3
UD
413 unsigned long max_fast;
414 unsigned long arena_test;
415 unsigned long arena_max;
416 unsigned long narenas;
fa8d436c
UD
417};
418
22a89187 419void*
3b49edc0 420__malloc_get_state(void)
fa8d436c
UD
421{
422 struct malloc_save_state* ms;
423 int i;
424 mbinptr b;
425
3b49edc0 426 ms = (struct malloc_save_state*)__libc_malloc(sizeof(*ms));
fa8d436c
UD
427 if (!ms)
428 return 0;
429 (void)mutex_lock(&main_arena.mutex);
430 malloc_consolidate(&main_arena);
431 ms->magic = MALLOC_STATE_MAGIC;
432 ms->version = MALLOC_STATE_VERSION;
433 ms->av[0] = 0;
434 ms->av[1] = 0; /* used to be binblocks, now no longer used */
435 ms->av[2] = top(&main_arena);
436 ms->av[3] = 0; /* used to be undefined */
437 for(i=1; i<NBINS; i++) {
438 b = bin_at(&main_arena, i);
439 if(first(b) == b)
440 ms->av[2*i+2] = ms->av[2*i+3] = 0; /* empty bin */
441 else {
442 ms->av[2*i+2] = first(b);
443 ms->av[2*i+3] = last(b);
444 }
445 }
446 ms->sbrk_base = mp_.sbrk_base;
447 ms->sbrked_mem_bytes = main_arena.system_mem;
448 ms->trim_threshold = mp_.trim_threshold;
449 ms->top_pad = mp_.top_pad;
450 ms->n_mmaps_max = mp_.n_mmaps_max;
451 ms->mmap_threshold = mp_.mmap_threshold;
452 ms->check_action = check_action;
453 ms->max_sbrked_mem = main_arena.max_system_mem;
fa8d436c 454 ms->max_total_mem = 0;
fa8d436c
UD
455 ms->n_mmaps = mp_.n_mmaps;
456 ms->max_n_mmaps = mp_.max_n_mmaps;
457 ms->mmapped_mem = mp_.mmapped_mem;
458 ms->max_mmapped_mem = mp_.max_mmapped_mem;
459 ms->using_malloc_checking = using_malloc_checking;
4c8b8cc3
UD
460 ms->max_fast = get_max_fast();
461#ifdef PER_THREAD
462 ms->arena_test = mp_.arena_test;
463 ms->arena_max = mp_.arena_max;
464 ms->narenas = narenas;
465#endif
fa8d436c 466 (void)mutex_unlock(&main_arena.mutex);
22a89187 467 return (void*)ms;
fa8d436c
UD
468}
469
470int
3b49edc0 471__malloc_set_state(void* msptr)
fa8d436c
UD
472{
473 struct malloc_save_state* ms = (struct malloc_save_state*)msptr;
6dd67bd5 474 size_t i;
fa8d436c
UD
475 mbinptr b;
476
477 disallow_malloc_check = 1;
478 ptmalloc_init();
479 if(ms->magic != MALLOC_STATE_MAGIC) return -1;
480 /* Must fail if the major version is too high. */
481 if((ms->version & ~0xffl) > (MALLOC_STATE_VERSION & ~0xffl)) return -2;
482 (void)mutex_lock(&main_arena.mutex);
483 /* There are no fastchunks. */
484 clear_fastchunks(&main_arena);
4c8b8cc3
UD
485 if (ms->version >= 4)
486 set_max_fast(ms->max_fast);
487 else
488 set_max_fast(64); /* 64 used to be the value we always used. */
fa8d436c 489 for (i=0; i<NFASTBINS; ++i)
425ce2ed 490 fastbin (&main_arena, i) = 0;
fa8d436c
UD
491 for (i=0; i<BINMAPSIZE; ++i)
492 main_arena.binmap[i] = 0;
493 top(&main_arena) = ms->av[2];
494 main_arena.last_remainder = 0;
495 for(i=1; i<NBINS; i++) {
496 b = bin_at(&main_arena, i);
497 if(ms->av[2*i+2] == 0) {
498 assert(ms->av[2*i+3] == 0);
499 first(b) = last(b) = b;
500 } else {
1f4843fb
UD
501 if(ms->version >= 3 &&
502 (i<NSMALLBINS || (largebin_index(chunksize(ms->av[2*i+2]))==i &&
503 largebin_index(chunksize(ms->av[2*i+3]))==i))) {
fa8d436c
UD
504 first(b) = ms->av[2*i+2];
505 last(b) = ms->av[2*i+3];
506 /* Make sure the links to the bins within the heap are correct. */
507 first(b)->bk = b;
508 last(b)->fd = b;
509 /* Set bit in binblocks. */
510 mark_bin(&main_arena, i);
511 } else {
512 /* Oops, index computation from chunksize must have changed.
cc49a5a8 513 Link the whole list into unsorted_chunks. */
fa8d436c
UD
514 first(b) = last(b) = b;
515 b = unsorted_chunks(&main_arena);
516 ms->av[2*i+2]->bk = b;
517 ms->av[2*i+3]->fd = b->fd;
518 b->fd->bk = ms->av[2*i+3];
519 b->fd = ms->av[2*i+2];
520 }
521 }
522 }
1f4843fb
UD
523 if (ms->version < 3) {
524 /* Clear fd_nextsize and bk_nextsize fields. */
525 b = unsorted_chunks(&main_arena)->fd;
526 while (b != unsorted_chunks(&main_arena)) {
527 if (!in_smallbin_range(chunksize(b))) {
528 b->fd_nextsize = NULL;
529 b->bk_nextsize = NULL;
530 }
531 b = b->fd;
532 }
533 }
fa8d436c
UD
534 mp_.sbrk_base = ms->sbrk_base;
535 main_arena.system_mem = ms->sbrked_mem_bytes;
536 mp_.trim_threshold = ms->trim_threshold;
537 mp_.top_pad = ms->top_pad;
538 mp_.n_mmaps_max = ms->n_mmaps_max;
539 mp_.mmap_threshold = ms->mmap_threshold;
540 check_action = ms->check_action;
541 main_arena.max_system_mem = ms->max_sbrked_mem;
fa8d436c
UD
542 mp_.n_mmaps = ms->n_mmaps;
543 mp_.max_n_mmaps = ms->max_n_mmaps;
544 mp_.mmapped_mem = ms->mmapped_mem;
545 mp_.max_mmapped_mem = ms->max_mmapped_mem;
546 /* add version-dependent code here */
547 if (ms->version >= 1) {
548 /* Check whether it is safe to enable malloc checking, or whether
549 it is necessary to disable it. */
550 if (ms->using_malloc_checking && !using_malloc_checking &&
cc49a5a8 551 !disallow_malloc_check)
fa8d436c
UD
552 __malloc_check_init ();
553 else if (!ms->using_malloc_checking && using_malloc_checking) {
df77455c
UD
554 __malloc_hook = NULL;
555 __free_hook = NULL;
556 __realloc_hook = NULL;
557 __memalign_hook = NULL;
fa8d436c
UD
558 using_malloc_checking = 0;
559 }
560 }
4c8b8cc3
UD
561 if (ms->version >= 4) {
562#ifdef PER_THREAD
563 mp_.arena_test = ms->arena_test;
564 mp_.arena_max = ms->arena_max;
565 narenas = ms->narenas;
566#endif
567 }
fa8d436c
UD
568 check_malloc_state(&main_arena);
569
570 (void)mutex_unlock(&main_arena.mutex);
571 return 0;
572}
573
574/*
575 * Local variables:
576 * c-basic-offset: 2
577 * End:
578 */