1 /* The malloc headers and source files from the C library follow here. */
3 /* Declarations for `malloc' and friends.
4 Copyright 1990, 91, 92, 93, 95, 96 Free Software Foundation, Inc.
5 Written May 1989 by Mike Haertel.
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Library General Public License as
9 published by the Free Software Foundation; either version 2 of the
10 License, or (at your option) any later version.
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Library General Public License for more details.
17 You should have received a copy of the GNU Library General Public
18 License along with this library; see the file COPYING.LIB. If
19 ot, write to the Free Software Foundation, Inc.,
20 59 Temple Place, Suite 330, Boston, MA 02111 USA.
22 The author may be reached (Email) at the address mike@ai.mit.edu,
23 or (US mail) as Mike Haertel c/o Free Software Foundation. */
26 1. Augment the mstats struct so we can see how many blocks for fragments
27 and how many blocks for large requests were allocated.
31 1. Reorganized the source for my benefit.
32 2. Integrated the range-checking code by default.
33 3. free(0) no longer dumps core.
34 4. Extended the statistics.
35 5. Fixed a couple of places where the stats were not kept correctly.
42 #if defined (HAVE_STRING_H)
48 #if defined (HAVE_LIMITS_H)
52 #if defined (HAVE_UNISTD_H)
54 # include <sys/types.h>
59 #if defined (HAVE_STDDEF_H)
64 #if defined (RCHECK) && !defined (botch)
66 # define STDIO_H_INCLUDED
75 /* Need an autoconf test for this. */
78 # define genptr_t void *
81 # define genptr_t char *
82 #endif /* !__STDC__ */
84 #if !defined (HAVE_MEMSET)
85 # define memset(s, zero, n) bzero ((s), (n))
87 #if !defined (HAVE_MEMCPY)
88 # define memcpy(d, s, n) bcopy ((s), (d), (n))
91 /* Cope with systems lacking `memmove'. */
92 #if !defined (HAVE_MEMMOVE) && !defined (memmove)
93 static void malloc_safe_bcopy
__P ((genptr_t
, genptr_t
, size_t));
94 # define memmove(to, from, size) malloc_safe_bcopy ((from), (to), (size))
102 #define min(A, B) ((A) < (B) ? (A) : (B))
105 /* Return values for `mprobe': these are the kinds of inconsistencies that
106 `mcheck' enables detection of. */
109 MCHECK_DISABLED
= -1, /* Consistency checking is not turned on. */
110 MCHECK_OK
, /* Block is fine. */
111 MCHECK_FREE
, /* Block freed twice. */
112 MCHECK_HEAD
, /* Memory before the block was clobbered. */
113 MCHECK_TAIL
/* Memory after the block was clobbered. */
116 /* Statistics available to the user. */
119 size_t bytes_total
; /* Total size of the heap. */
120 size_t chunks_used
; /* Chunks allocated by the user. */
121 size_t bytes_used
; /* Byte total of user-allocated chunks. */
122 size_t chunks_free
; /* Chunks in the free list. */
123 size_t bytes_free
; /* Byte total of chunks in the free list. */
124 int nmalloc
; /* Total number of calls to malloc. */
125 int nfree
; /* Total number of calls to free. */
126 int nrealloc
; /* Total number of calls to realloc. */
127 int nsbrk
; /* Total number of calls to sbrk. */
128 size_t tsbrk
; /* Total number of bytes allocated via sbrk. */
129 int negsbrk
; /* Total number of calls to sbrk with a negative arg */
130 size_t tnegsbrk
; /* Total number of bytes returned to the kernel. */
134 /* Arbitrary magical numbers. */
135 #define MAGICWORD 0xfedabeeb
136 #define MAGICFREE 0xd8675309
137 #define MAGICBYTE ((char) 0xd7)
138 #define MALLOCFLOOD ((char) 0x93)
139 #define FREEFLOOD ((char) 0x95)
143 size_t size
; /* Exact size requested by user. */
144 u_bits32_t magic
; /* Magic number to check header integrity. */
148 /* Functions exported by this library. */
149 /* Allocate SIZE bytes of memory. */
150 extern genptr_t malloc
__P ((size_t __size
));
152 /* Re-allocate the previously allocated block
153 in genptr_t, making the new block SIZE bytes long. */
154 extern genptr_t realloc
__P ((genptr_t __ptr
, size_t __size
));
156 /* Allocate NMEMB elements of SIZE bytes each, all initialized to 0. */
157 extern genptr_t calloc
__P ((size_t __nmemb
, size_t __size
));
159 /* Free a block allocated by `malloc', `realloc' or `calloc'. */
160 extern void free
__P ((genptr_t __ptr
));
162 /* Allocate SIZE bytes allocated to ALIGNMENT bytes. */
163 extern genptr_t memalign
__P ((size_t __alignment
, size_t __size
));
165 /* Pick up the current statistics. */
166 extern struct mstats mstats
__P ((void));
169 extern enum mcheck_status mprobe
__P((genptr_t ptr
));
172 /* End of exported functions. */
174 /* The allocator divides the heap into blocks of fixed size; large
175 requests receive one or more whole blocks, and small requests
176 receive a fragment of a block. Fragment sizes are powers of two,
177 and all fragments of a block are the same size. When all the
178 fragments in a block have been freed, the block itself is freed. */
180 #define BLOCKSIZE 4096 /* 1 << BLOCKLOG */
181 #define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
183 /* Determine the amount of memory spanned by the initial heap table
184 (not an absolute limit). */
185 #define HEAP 4194304 /* 1 << 22 */
187 /* Number of contiguous free blocks allowed to build up at the end of
188 memory before they will be returned to the system. */
189 #define FINAL_FREE_BLOCKS 8
191 /* Data structure giving per-block information. */
194 /* Heap information for a busy block. */
197 /* Zero for a large (multiblock) object, or positive giving the
198 logarithm to the base two of the fragment size. */
204 size_t nfree
; /* Free frags in a fragmented block. */
205 size_t first
; /* First free fragment of the block. */
207 /* For a large object, in its first block, this has the number
208 of blocks in the object. In the other blocks, this has a
209 negative number which says how far back the first block is. */
213 /* Heap information for a free block (that may be the first of a
217 size_t size
; /* Size (in blocks) of a free cluster. */
218 size_t next
; /* Index of next free cluster. */
219 size_t prev
; /* Index of previous free cluster. */
223 /* Pointer to first block of the heap. */
224 static char *_heapbase
;
226 /* Table indexed by block number giving per-block information. */
227 static malloc_info
*_heapinfo
;
229 /* Address to block number and vice versa. */
230 #define BLOCK(A) (((char *) (A) - _heapbase) / BLOCKSIZE + 1)
231 #define ADDRESS(B) ((genptr_t) (((B) - 1) * BLOCKSIZE + _heapbase))
233 /* Number of info entries. */
234 static size_t heapsize
;
236 /* Current search index for the heap table. */
237 static size_t _heapindex
;
239 /* Limit of valid info table indices. */
240 static size_t _heaplimit
;
242 /* Doubly linked lists of free fragments. */
249 /* Free list headers for each fragment size. */
250 static struct list _fraghead
[BLOCKLOG
];
252 /* List of blocks allocated with `memalign'. */
255 struct alignlist
*next
;
256 genptr_t aligned
; /* The address that memaligned returned. */
257 genptr_t exact
; /* The address that malloc returned. */
260 /* List of blocks allocated by memalign. */
261 static struct alignlist
*_aligned_blocks
= NULL
;
263 /* Internal versions of `malloc', `realloc', and `free'
264 used when these functions need to call each other. */
265 static genptr_t imalloc
__P ((size_t __size
));
266 static genptr_t irealloc
__P ((genptr_t __ptr
, size_t __size
));
267 static void ifree
__P ((genptr_t __ptr
));
269 /* Given an address in the middle of a malloc'd object,
270 return the address of the beginning of the object. */
271 static genptr_t malloc_find_object_address
__P ((genptr_t __ptr
));
273 /* Underlying allocation function; successive calls should
274 return contiguous pieces of memory. */
275 static genptr_t default_morecore
__P ((ptrdiff_t __size
));
277 /* Number of extra blocks to get each time we ask for more core.
278 This reduces the frequency of calling `default_morecore'. */
279 static size_t malloc_extra_blocks
;
281 /* Nonzero if `malloc' has been called and done its initialization. */
282 static int malloc_initialized
;
283 /* Function called to initialize malloc data structures. */
284 static int malloc_initialize
__P ((void));
287 static void zmemset
__P((genptr_t
, int, size_t));
288 static enum mcheck_status checkhdr
__P((const struct hdr
*));
289 static void mabort
__P((enum mcheck_status
));
292 /* Instrumentation. */
293 static size_t chunks_used
;
294 static size_t bytes_used
;
295 static size_t chunks_free
;
296 static size_t bytes_free
;
297 static int nmalloc
, nfree
, nrealloc
;
301 static size_t tnegsbrk
;
303 /* Aligned allocation. */
309 unsigned long int adj
;
311 result
= default_morecore (size
);
312 adj
= (unsigned long int) ((unsigned long int) ((char *) result
-
313 (char *) NULL
)) % BLOCKSIZE
;
317 adj
= BLOCKSIZE
- adj
;
318 new = default_morecore (adj
);
319 result
= (char *) result
+ adj
;
325 /* Get SIZE bytes, if we can get them starting at END.
326 Return the address of the space we got.
327 If we cannot get space at END, fail and return -1. */
329 get_contiguous_space (size
, position
)
336 before
= default_morecore (0);
337 /* If we can tell in advance that the break is at the wrong place,
339 if (before
!= position
)
342 /* Allocate SIZE bytes and get the address of them. */
343 after
= default_morecore (size
);
347 /* It was not contiguous--reject it. */
348 if (after
!= position
)
350 default_morecore (- size
);
357 /* This is called when `_heapinfo' and `heapsize' have just
358 been set to describe a new info table. Set up the table
359 to describe itself and account for it in the statistics. */
363 size_t block
, blocks
;
365 block
= BLOCK (_heapinfo
);
366 blocks
= BLOCKIFY (heapsize
* sizeof (malloc_info
));
368 /* Account for the _heapinfo block itself in the statistics. */
369 bytes_used
+= blocks
* BLOCKSIZE
;
372 /* Describe the heapinfo block itself in the heapinfo. */
373 _heapinfo
[block
].busy
.type
= 0;
374 _heapinfo
[block
].busy
.info
.size
= blocks
;
375 /* Leave back-pointers for malloc_find_address. */
377 _heapinfo
[block
+ blocks
].busy
.info
.size
= -blocks
;
380 /* Set everything up and remember that we have. */
384 if (malloc_initialized
)
387 heapsize
= HEAP
/ BLOCKSIZE
;
388 _heapinfo
= (malloc_info
*) align (heapsize
* sizeof (malloc_info
));
389 if (_heapinfo
== NULL
)
391 memset (_heapinfo
, 0, heapsize
* sizeof (malloc_info
));
392 _heapinfo
[0].free
.size
= 0;
393 _heapinfo
[0].free
.next
= _heapinfo
[0].free
.prev
= 0;
395 _heapbase
= (char *) _heapinfo
;
396 _heaplimit
= BLOCK (_heapbase
+ heapsize
* sizeof (malloc_info
));
398 register_heapinfo ();
400 malloc_initialized
= 1;
404 /* Allocate INCREMENT more bytes of data space,
405 and return the start of data space, or NULL on errors.
406 If INCREMENT is negative, shrink data space. */
408 default_morecore (increment
)
418 tnegsbrk
+= -increment
;
420 result
= (genptr_t
) sbrk (increment
);
421 if ((long)result
== -1L)
426 static int morecore_recursing
;
428 /* Get neatly aligned memory, initializing or
429 growing the heap info table as necessary. */
435 malloc_info
*newinfo
, *oldinfo
;
438 if (morecore_recursing
)
439 /* Avoid recursion. The caller will know how to handle a null return. */
442 result
= align (size
);
446 /* Check if we need to grow the info table. */
447 if ((size_t) BLOCK ((char *) result
+ size
) > heapsize
)
449 /* Calculate the new _heapinfo table size. We do not account for the
450 added blocks in the table itself, as we hope to place them in
451 existing free space, which is already covered by part of the
456 while ((size_t) BLOCK ((char *) result
+ size
) > newsize
);
458 /* We must not reuse existing core for the new info table when called
459 from realloc in the case of growing a large block, because the
460 block being grown is momentarily marked as free. In this case
461 _heaplimit is zero so we know not to reuse space for internal
465 /* First try to allocate the new info table in core we already
466 have, in the usual way using realloc. If realloc cannot
467 extend it in place or relocate it to existing sufficient core,
468 we will get called again, and the code above will notice the
469 `morecore_recursing' flag and return null. */
470 int save
= errno
; /* Don't want to clobber errno with ENOMEM. */
471 morecore_recursing
= 1;
472 newinfo
= (malloc_info
*) irealloc (_heapinfo
, newsize
* sizeof (malloc_info
));
473 morecore_recursing
= 0;
478 /* We found some space in core, and realloc has put the old
479 table's blocks on the free list. Now zero the new part
480 of the table and install the new table location. */
481 memset (&newinfo
[heapsize
], 0, (newsize
- heapsize
) * sizeof (malloc_info
));
488 /* Allocate new space for the malloc info table. */
491 newinfo
= (malloc_info
*) align (newsize
* sizeof (malloc_info
));
496 default_morecore (-size
);
500 /* Is it big enough to record status for its own space?
502 if ((size_t) BLOCK ((char *) newinfo
+ newsize
* sizeof (malloc_info
)) < newsize
)
505 /* Must try again. First give back most of what we just got. */
506 default_morecore (- newsize
* sizeof (malloc_info
));
510 /* Copy the old table to the beginning of the new,
511 and zero the rest of the new table. */
512 memcpy (newinfo
, _heapinfo
, heapsize
* sizeof (malloc_info
));
513 memset (&newinfo
[heapsize
], 0, (newsize
- heapsize
) * sizeof (malloc_info
));
518 register_heapinfo ();
520 /* Reset _heaplimit so ifree never decides
521 it can relocate or resize the info table. */
525 /* The new heap limit includes the new table just allocated. */
526 _heaplimit
= BLOCK ((char *) newinfo
+ heapsize
* sizeof (malloc_info
));
531 _heaplimit
= BLOCK ((char *) result
+ size
);
535 /* Allocate memory from the heap. */
541 size_t block
, blocks
, lastblocks
, start
;
545 /* ANSI C allows `malloc (0)' to either return NULL, or to return a
546 valid address you can realloc and free (though not dereference).
548 It turns out that some extant code (sunrpc, at least Ultrix's version)
549 expects `malloc (0)' to return non-NULL and breaks otherwise.
557 if (size
< sizeof (struct list
))
558 size
= sizeof (struct list
);
560 #ifdef SUNOS_LOCALTIME_BUG
565 /* Determine the allocation policy based on the request size. */
566 if (size
<= BLOCKSIZE
/ 2)
568 /* Small allocation to receive a fragment of a block.
569 Determine the logarithm to base two of the fragment size. */
570 register size_t log
= 1;
572 while ((size
/= 2) != 0)
575 /* Look in the fragment lists for a
576 free fragment of the desired size. */
577 next
= _fraghead
[log
].next
;
580 /* There are free fragments of this size.
581 Pop a fragment out of the fragment list and return it.
582 Update the block's nfree and first counters. */
583 result
= (genptr_t
) next
;
584 next
->prev
->next
= next
->next
;
585 if (next
->next
!= NULL
)
586 next
->next
->prev
= next
->prev
;
587 block
= BLOCK (result
);
588 if (--_heapinfo
[block
].busy
.info
.frag
.nfree
!= 0)
589 _heapinfo
[block
].busy
.info
.frag
.first
= (unsigned long int)
590 ((unsigned long int) ((char *) next
->next
- (char *) NULL
)
593 /* Update the statistics. */
595 bytes_used
+= 1 << log
;
597 bytes_free
-= 1 << log
;
601 /* No free fragments of the desired size, so get a new block
602 and break it into fragments, returning the first. */
603 result
= imalloc (BLOCKSIZE
);
607 /* Link all fragments but the first into the free list. */
608 next
= (struct list
*) ((char *) result
+ (1 << log
));
610 next
->prev
= &_fraghead
[log
];
611 _fraghead
[log
].next
= next
;
613 for (i
= 2; i
< (size_t) (BLOCKSIZE
>> log
); ++i
)
615 next
= (struct list
*) ((char *) result
+ (i
<< log
));
616 next
->next
= _fraghead
[log
].next
;
617 next
->prev
= &_fraghead
[log
];
618 next
->prev
->next
= next
;
619 next
->next
->prev
= next
;
622 /* Initialize the nfree and first counters for this block. */
623 block
= BLOCK (result
);
624 _heapinfo
[block
].busy
.type
= log
;
625 _heapinfo
[block
].busy
.info
.frag
.nfree
= i
- 1;
626 _heapinfo
[block
].busy
.info
.frag
.first
= i
- 1;
628 chunks_free
+= (BLOCKSIZE
>> log
) - 1;
629 bytes_free
+= BLOCKSIZE
- (1 << log
);
630 bytes_used
-= BLOCKSIZE
- (1 << log
);
635 /* Large allocation to receive one or more blocks.
636 Search the free list in a circle starting at the last place visited.
637 If we loop completely around without finding a large enough
638 space we will have to get more memory from the system. */
639 blocks
= BLOCKIFY (size
);
640 start
= block
= _heapindex
;
641 while (_heapinfo
[block
].free
.size
< blocks
)
643 block
= _heapinfo
[block
].free
.next
;
646 /* Need to get more from the system. Get a little extra. */
647 size_t wantblocks
= blocks
+ malloc_extra_blocks
;
648 block
= _heapinfo
[0].free
.prev
;
649 lastblocks
= _heapinfo
[block
].free
.size
;
650 /* Check to see if the new core will be contiguous with the
651 final free block; if so we don't need to get as much. */
652 if (_heaplimit
!= 0 && block
+ lastblocks
== _heaplimit
&&
653 /* We can't do this if we will have to make the heap info
654 table bigger to accomodate the new space. */
655 block
+ wantblocks
<= heapsize
&&
656 get_contiguous_space ((wantblocks
- lastblocks
) * BLOCKSIZE
,
657 ADDRESS (block
+ lastblocks
)))
659 /* We got it contiguously. Which block we are extending
660 (the `final free block' referred to above) might have
661 changed, if it got combined with a freed info table. */
662 block
= _heapinfo
[0].free
.prev
;
663 _heapinfo
[block
].free
.size
+= (wantblocks
- lastblocks
);
664 bytes_free
+= (wantblocks
- lastblocks
) * BLOCKSIZE
;
665 _heaplimit
+= wantblocks
- lastblocks
;
668 result
= morecore (wantblocks
* BLOCKSIZE
);
671 block
= BLOCK (result
);
672 /* Put the new block at the end of the free list. */
673 _heapinfo
[block
].free
.size
= wantblocks
;
674 _heapinfo
[block
].free
.prev
= _heapinfo
[0].free
.prev
;
675 _heapinfo
[block
].free
.next
= 0;
676 _heapinfo
[0].free
.prev
= block
;
677 _heapinfo
[_heapinfo
[block
].free
.prev
].free
.next
= block
;
679 bytes_free
+= wantblocks
* BLOCKSIZE
;
680 /* Now loop to use some of that block for this allocation. */
684 /* At this point we have found a suitable free list entry.
685 Figure out how to remove what we need from the list. */
686 result
= ADDRESS (block
);
687 if (_heapinfo
[block
].free
.size
> blocks
)
689 /* The block we found has a bit left over,
690 so relink the tail end back into the free list. */
691 _heapinfo
[block
+ blocks
].free
.size
692 = _heapinfo
[block
].free
.size
- blocks
;
693 _heapinfo
[block
+ blocks
].free
.next
694 = _heapinfo
[block
].free
.next
;
695 _heapinfo
[block
+ blocks
].free
.prev
696 = _heapinfo
[block
].free
.prev
;
697 _heapinfo
[_heapinfo
[block
].free
.prev
].free
.next
698 = _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
699 = _heapindex
= block
+ blocks
;
703 /* The block exactly matches our requirements,
704 so just remove it from the list. */
705 _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
706 = _heapinfo
[block
].free
.prev
;
707 _heapinfo
[_heapinfo
[block
].free
.prev
].free
.next
708 = _heapindex
= _heapinfo
[block
].free
.next
;
712 _heapinfo
[block
].busy
.type
= 0;
713 _heapinfo
[block
].busy
.info
.size
= blocks
;
715 bytes_used
+= blocks
* BLOCKSIZE
;
716 bytes_free
-= blocks
* BLOCKSIZE
;
718 /* Mark all the blocks of the object just allocated except for the
719 first with a negative number so you can find the first block by
720 adding that adjustment. */
722 _heapinfo
[block
+ blocks
].busy
.info
.size
= -blocks
;
738 if (malloc_initialized
== 0 && malloc_initialize () == 0)
742 hdr
= (struct hdr
*) imalloc (sizeof (struct hdr
) + size
+ 1);
747 hdr
->magic
= MAGICWORD
;
748 ((char *) &hdr
[1])[size
] = MAGICBYTE
;
749 zmemset ((genptr_t
) (hdr
+ 1), MALLOCFLOOD
, size
);
750 return (genptr_t
) (hdr
+ 1);
752 return (imalloc (size
));
756 /* Free a block of memory allocated by `malloc'. */
758 /* Return memory to the heap. */
764 size_t block
, blocks
;
766 struct list
*prev
, *next
;
768 size_t lesscore_threshold
;
769 register struct alignlist
*l
;
774 /* Threshold of free space at which we will return some to the system. */
775 lesscore_threshold
= FINAL_FREE_BLOCKS
+ 2 * malloc_extra_blocks
;
777 for (l
= _aligned_blocks
; l
!= NULL
; l
= l
->next
)
778 if (l
->aligned
== ptr
)
780 l
->aligned
= NULL
; /* Mark the slot in the list as free. */
787 type
= _heapinfo
[block
].busy
.type
;
791 /* Get as many statistics as early as we can. */
793 bytes_used
-= _heapinfo
[block
].busy
.info
.size
* BLOCKSIZE
;
794 bytes_free
+= _heapinfo
[block
].busy
.info
.size
* BLOCKSIZE
;
796 /* Find the free cluster previous to this one in the free list.
797 Start searching at the last block referenced; this may benefit
798 programs with locality of allocation. */
802 i
= _heapinfo
[i
].free
.prev
;
806 i
= _heapinfo
[i
].free
.next
;
807 while (i
> 0 && i
< block
);
808 i
= _heapinfo
[i
].free
.prev
;
811 /* Determine how to link this block into the free list. */
812 if (block
== i
+ _heapinfo
[i
].free
.size
)
814 /* Coalesce this block with its predecessor. */
815 _heapinfo
[i
].free
.size
+= _heapinfo
[block
].busy
.info
.size
;
820 /* Really link this block back into the free list. */
821 _heapinfo
[block
].free
.size
= _heapinfo
[block
].busy
.info
.size
;
822 _heapinfo
[block
].free
.next
= _heapinfo
[i
].free
.next
;
823 _heapinfo
[block
].free
.prev
= i
;
824 _heapinfo
[i
].free
.next
= block
;
825 _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
= block
;
829 /* Now that the block is linked in, see if we can coalesce it
830 with its successor (by deleting its successor from the list
831 and adding in its size). */
832 if (block
+ _heapinfo
[block
].free
.size
== _heapinfo
[block
].free
.next
)
834 _heapinfo
[block
].free
.size
835 += _heapinfo
[_heapinfo
[block
].free
.next
].free
.size
;
836 _heapinfo
[block
].free
.next
837 = _heapinfo
[_heapinfo
[block
].free
.next
].free
.next
;
838 _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
= block
;
842 /* How many trailing free blocks are there now? */
843 blocks
= _heapinfo
[block
].free
.size
;
845 /* Where is the current end of accessible core? */
846 curbrk
= default_morecore (0);
848 if (_heaplimit
!= 0 && curbrk
== ADDRESS (_heaplimit
))
850 /* The end of the malloc heap is at the end of accessible core.
851 It's possible that moving _heapinfo will allow us to
852 return some space to the system. */
854 size_t info_block
= BLOCK (_heapinfo
);
855 size_t info_blocks
= _heapinfo
[info_block
].busy
.info
.size
;
856 size_t prev_block
= _heapinfo
[block
].free
.prev
;
857 size_t prev_blocks
= _heapinfo
[prev_block
].free
.size
;
858 size_t next_block
= _heapinfo
[block
].free
.next
;
859 size_t next_blocks
= _heapinfo
[next_block
].free
.size
;
861 if (/* Win if this block being freed is last in core, the info table
862 is just before it, the previous free block is just before the
863 info table, and the two free blocks together form a useful
864 amount to return to the system. */
865 (block
+ blocks
== _heaplimit
&&
866 info_block
+ info_blocks
== block
&&
867 prev_block
!= 0 && prev_block
+ prev_blocks
== info_block
&&
868 blocks
+ prev_blocks
>= lesscore_threshold
) ||
869 /* Nope, not the case. We can also win if this block being
870 freed is just before the info table, and the table extends
871 to the end of core or is followed only by a free block,
872 and the total free space is worth returning to the system. */
873 (block
+ blocks
== info_block
&&
874 ((info_block
+ info_blocks
== _heaplimit
&&
875 blocks
>= lesscore_threshold
) ||
876 (info_block
+ info_blocks
== next_block
&&
877 next_block
+ next_blocks
== _heaplimit
&&
878 blocks
+ next_blocks
>= lesscore_threshold
)))
881 malloc_info
*newinfo
;
882 size_t oldlimit
= _heaplimit
;
884 /* Free the old info table, clearing _heaplimit to avoid
885 recursion into this code. We don't want to return the
886 table's blocks to the system before we have copied them to
890 _heaplimit
= oldlimit
;
892 /* Tell malloc to search from the beginning of the heap for
893 free blocks, so it doesn't reuse the ones just freed. */
896 /* Allocate new space for the info table and move its data. */
897 newinfo
= (malloc_info
*) imalloc (info_blocks
899 memmove (newinfo
, _heapinfo
, info_blocks
* BLOCKSIZE
);
902 /* We should now have coalesced the free block with the
903 blocks freed from the old info table. Examine the entire
904 trailing free block to decide below whether to return some
906 block
= _heapinfo
[0].free
.prev
;
907 blocks
= _heapinfo
[block
].free
.size
;
910 /* Now see if we can return stuff to the system. */
911 if (block
+ blocks
== _heaplimit
&& blocks
>= lesscore_threshold
)
913 register size_t bytes
= blocks
* BLOCKSIZE
;
914 _heaplimit
-= blocks
;
915 default_morecore (-bytes
);
916 _heapinfo
[_heapinfo
[block
].free
.prev
].free
.next
917 = _heapinfo
[block
].free
.next
;
918 _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
919 = _heapinfo
[block
].free
.prev
;
920 block
= _heapinfo
[block
].free
.prev
;
926 /* Set the next search to begin at this block. */
931 /* Do some of the statistics. */
933 bytes_used
-= 1 << type
;
935 bytes_free
+= 1 << type
;
937 /* Get the address of the first free fragment in this block. */
938 prev
= (struct list
*) ((char *) ADDRESS (block
) +
939 (_heapinfo
[block
].busy
.info
.frag
.first
<< type
));
941 if (_heapinfo
[block
].busy
.info
.frag
.nfree
== (BLOCKSIZE
>> type
) - 1)
943 /* If all fragments of this block are free, remove them
944 from the fragment list and free the whole block. */
946 for (i
= 1; i
< (size_t) (BLOCKSIZE
>> type
); ++i
)
948 prev
->prev
->next
= next
;
950 next
->prev
= prev
->prev
;
951 _heapinfo
[block
].busy
.type
= 0;
952 _heapinfo
[block
].busy
.info
.size
= 1;
954 /* Keep the statistics accurate. */
956 bytes_used
+= BLOCKSIZE
;
957 chunks_free
-= BLOCKSIZE
>> type
;
958 bytes_free
-= BLOCKSIZE
;
960 ifree (ADDRESS (block
));
962 else if (_heapinfo
[block
].busy
.info
.frag
.nfree
!= 0)
964 /* If some fragments of this block are free, link this
965 fragment into the fragment list after the first free
966 fragment of this block. */
967 next
= (struct list
*) ptr
;
968 next
->next
= prev
->next
;
971 if (next
->next
!= NULL
)
972 next
->next
->prev
= next
;
973 ++_heapinfo
[block
].busy
.info
.frag
.nfree
;
977 /* No fragments of this block are free, so link this
978 fragment into the fragment list and announce that
979 it is the first free fragment of this block. */
980 prev
= (struct list
*) ptr
;
981 _heapinfo
[block
].busy
.info
.frag
.nfree
= 1;
982 _heapinfo
[block
].busy
.info
.frag
.first
= (unsigned long int)
983 ((unsigned long int) ((char *) ptr
- (char *) NULL
)
984 % BLOCKSIZE
>> type
);
985 prev
->next
= _fraghead
[type
].next
;
986 prev
->prev
= &_fraghead
[type
];
987 prev
->prev
->next
= prev
;
988 if (prev
->next
!= NULL
)
989 prev
->next
->prev
= prev
;
995 /* Return memory to the heap. */
1010 hdr
= ((struct hdr
*) ptr
) - 1;
1012 hdr
->magic
= MAGICFREE
;
1013 zmemset (ptr
, FREEFLOOD
, hdr
->size
);
1020 /* Change the size of a block allocated by `malloc'. */
1022 #ifndef HAVE_MEMMOVE
1023 /* Snarfed directly from Emacs src/dispnew.c:
1024 XXX Should use system bcopy if it handles overlap. */
1026 /* Like bcopy except never gets confused by overlap. */
1029 malloc_safe_bcopy (afrom
, ato
, size
)
1038 if (size
<= 0 || from
== to
)
1041 /* If the source and destination don't overlap, then bcopy can
1042 handle it. If they do overlap, but the destination is lower in
1043 memory than the source, we'll assume bcopy can handle that. */
1044 if (to
< from
|| from
+ size
<= to
)
1045 bcopy (from
, to
, size
);
1047 /* Otherwise, we'll copy from the end. */
1050 register char *endf
= from
+ size
;
1051 register char *endt
= to
+ size
;
1053 /* If TO - FROM is large, then we should break the copy into
1054 nonoverlapping chunks of TO - FROM bytes each. However, if
1055 TO - FROM is small, then the bcopy function call overhead
1056 makes this not worth it. The crossover point could be about
1057 anywhere. Since I don't think the obvious copy loop is too
1058 bad, I'm trying to err in its favor. */
1063 while (endf
!= from
);
1069 endt
-= (to
- from
);
1070 endf
-= (to
- from
);
1075 bcopy (endf
, endt
, to
- from
);
1078 /* If SIZE wasn't a multiple of TO - FROM, there will be a
1079 little left over. The amount left over is
1080 (endt + (to - from)) - to, which is endt - from. */
1081 bcopy (from
, to
, endt
- from
);
1085 #endif /* !HAVE_MEMMOVE */
1087 /* Resize the given region to the new size, returning a pointer
1088 to the (possibly moved) region. This is optimized for speed;
1089 some benchmarks seem to indicate that greater compactness is
1090 achieved by unconditionally allocating and copying to a
1091 new region. This module has incestuous knowledge of the
1092 internals of both free and malloc. */
1094 irealloc (ptr
, size
)
1100 size_t block
, blocks
, oldlimit
;
1107 else if (ptr
== NULL
)
1108 return imalloc (size
);
1110 block
= BLOCK (ptr
);
1112 type
= _heapinfo
[block
].busy
.type
;
1116 /* Maybe reallocate a large block to a small fragment. */
1117 if (size
<= BLOCKSIZE
/ 2)
1119 result
= imalloc (size
);
1122 memcpy (result
, ptr
, size
);
1128 /* The new size is a large allocation as well;
1129 see if we can hold it in place. */
1130 blocks
= BLOCKIFY (size
);
1131 if (blocks
< _heapinfo
[block
].busy
.info
.size
)
1133 /* The new size is smaller; return
1134 excess memory to the free list. */
1135 _heapinfo
[block
+ blocks
].busy
.type
= 0;
1136 _heapinfo
[block
+ blocks
].busy
.info
.size
1137 = _heapinfo
[block
].busy
.info
.size
- blocks
;
1138 _heapinfo
[block
].busy
.info
.size
= blocks
;
1139 /* We have just created a new chunk by splitting a chunk in two.
1140 Now we will free this chunk; increment the statistics counter
1141 so it doesn't become wrong when ifree decrements it. */
1143 ifree (ADDRESS (block
+ blocks
));
1146 else if (blocks
== _heapinfo
[block
].busy
.info
.size
)
1147 /* No size change necessary. */
1151 /* Won't fit, so allocate a new region that will.
1152 Free the old region first in case there is sufficient
1153 adjacent free space to grow without moving. */
1154 blocks
= _heapinfo
[block
].busy
.info
.size
;
1155 /* Prevent free from actually returning memory to the system. */
1156 oldlimit
= _heaplimit
;
1159 result
= imalloc (size
);
1160 if (_heaplimit
== 0)
1161 _heaplimit
= oldlimit
;
1164 /* Now we're really in trouble. We have to unfree
1165 the thing we just freed. Unfortunately it might
1166 have been coalesced with its neighbors. */
1167 if (_heapindex
== block
)
1168 (void) imalloc (blocks
* BLOCKSIZE
);
1172 previous
= imalloc ((block
- _heapindex
) * BLOCKSIZE
);
1173 (void) imalloc (blocks
* BLOCKSIZE
);
1179 memmove (result
, ptr
, blocks
* BLOCKSIZE
);
1184 /* Old size is a fragment; type is logarithm
1185 to base two of the fragment size. */
1186 if (size
> (size_t) (1 << (type
- 1)) &&
1187 size
<= (size_t) (1 << type
))
1188 /* The new size is the same kind of fragment. */
1192 /* The new size is different; allocate a new space,
1193 and copy the lesser of the new size and the old. */
1194 result
= imalloc (size
);
1197 memcpy (result
, ptr
, min (size
, (size_t) 1 << type
));
1216 if (malloc_initialized
== 0 && malloc_initialize () == 0)
1222 hdr
= ((struct hdr
*) ptr
) - 1;
1227 zmemset ((char *) ptr
+ size
, FREEFLOOD
, osize
- size
);
1228 hdr
= (struct hdr
*) irealloc ((genptr_t
) hdr
, sizeof (struct hdr
) + size
+ 1);
1233 hdr
->magic
= MAGICWORD
;
1234 ((char *) &hdr
[1])[size
] = MAGICBYTE
;
1236 zmemset ((char *) (hdr
+ 1) + osize
, MALLOCFLOOD
, size
- osize
);
1237 return (genptr_t
) (hdr
+ 1);
1239 return (irealloc (ptr
, size
));
1243 /* Allocate an array of NMEMB elements each SIZE bytes long.
1244 The entire array is initialized to zeros. */
1246 calloc (nmemb
, size
)
1247 register size_t nmemb
;
1248 register size_t size
;
1250 register genptr_t result
;
1252 result
= malloc (nmemb
* size
);
1254 (void) memset (result
, 0, nmemb
* size
);
1259 /* Define the `cfree' alias for `free'. */
1268 memalign (alignment
, size
)
1273 unsigned long int adj
, lastadj
;
1275 /* Allocate a block with enough extra space to pad the block with up to
1276 (ALIGNMENT - 1) bytes if necessary. */
1277 result
= malloc (size
+ alignment
- 1);
1281 /* Figure out how much we will need to pad this particular block
1282 to achieve the required alignment. */
1283 adj
= (unsigned long int) ((char *) result
- (char *) NULL
) % alignment
;
1287 /* Reallocate the block with only as much excess as it needs. */
1289 result
= malloc (adj
+ size
);
1290 if (result
== NULL
) /* Impossible unless interrupted. */
1294 adj
= (unsigned long int) ((char *) result
- (char *) NULL
) % alignment
;
1295 /* It's conceivable we might have been so unlucky as to get a
1296 different block with weaker alignment. If so, this block is too
1297 short to contain SIZE after alignment correction. So we must
1298 try again and get another block, slightly larger. */
1299 } while (adj
> lastadj
);
1303 /* Record this block in the list of aligned blocks, so that `free'
1304 can identify the pointer it is passed, which will be in the middle
1305 of an allocated block. */
1307 struct alignlist
*l
;
1308 for (l
= _aligned_blocks
; l
!= NULL
; l
= l
->next
)
1309 if (l
->aligned
== NULL
)
1310 /* This slot is free. Use it. */
1314 l
= (struct alignlist
*) imalloc (sizeof (struct alignlist
));
1320 l
->next
= _aligned_blocks
;
1321 _aligned_blocks
= l
;
1324 result
= l
->aligned
= (char *) result
+ alignment
- adj
;
1330 /* On some ANSI C systems, some libc functions call _malloc, _free
1331 and _realloc. Make them use the GNU functions. */
1337 return malloc (size
);
1348 _realloc (ptr
, size
)
1352 return realloc (ptr
, size
);
1358 struct mstats result
;
1360 result
.bytes_total
= (char *) default_morecore (0) - _heapbase
;
1361 result
.chunks_used
= chunks_used
;
1362 result
.bytes_used
= bytes_used
;
1363 result
.chunks_free
= chunks_free
;
1364 result
.bytes_free
= bytes_free
;
1365 result
.nmalloc
= nmalloc
;
1366 result
.nrealloc
= nrealloc
;
1367 result
.nfree
= nfree
;
1368 result
.nsbrk
= nsbrk
;
1369 result
.tsbrk
= tsbrk
;
1370 result
.negsbrk
= negsbrk
;
1371 result
.tnegsbrk
= tnegsbrk
;
1377 /* Standard debugging hooks for `malloc'. */
1380 zmemset (ptr
, val
, size
)
1391 static enum mcheck_status
1393 const struct hdr
*hdr
;
1395 enum mcheck_status status
;
1400 status
= MCHECK_HEAD
;
1403 status
= MCHECK_FREE
;
1406 if (((char *) &hdr
[1])[hdr
->size
] != MAGICBYTE
)
1407 status
= MCHECK_TAIL
;
1412 if (status
!= MCHECK_OK
)
1421 fprintf (stderr
, "mcheck: %s\n", msg
);
1429 enum mcheck_status status
;
1436 msg
= "memory is consistent, library is buggy";
1439 msg
= "memory clobbered before allocated block";
1442 msg
= "memory clobbered past end of allocated block";
1445 msg
= "block freed twice";
1448 msg
= "bogus mcheck_status, library is buggy";
1459 return checkhdr ((struct hdr
*)ptr
);
1462 #ifndef STDIO_H_INCLUDED
1467 print_malloc_stats (s
)
1473 fprintf (stderr
, "Memory allocation statistics: %s\n", s
? s
: "");
1474 fprintf (stderr
, "\nTotal chunks in use: %d, total chunks free: %d\n",
1475 ms
.chunks_used
, ms
.chunks_free
);
1476 fprintf (stderr
, "Total bytes in use: %u, total bytes free: %u\n",
1477 ms
.bytes_used
, ms
.bytes_free
);
1478 fprintf (stderr
, "Total bytes (from heapbase): %d\n", ms
.bytes_total
);
1479 fprintf (stderr
, "Total mallocs: %d, total frees: %d, total reallocs: %d\n",
1480 ms
.nmalloc
, ms
.nfree
, ms
.nrealloc
);
1481 fprintf (stderr
, "Total sbrks: %d, total bytes via sbrk: %d\n",
1482 ms
.nsbrk
, ms
.tsbrk
);
1483 fprintf (stderr
, "Total negative sbrks: %d, total bytes returned to kernel: %d\n",
1484 ms
.negsbrk
, ms
.tnegsbrk
);