]>
git.ipfire.org Git - thirdparty/bash.git/blob - lib/malloc/malloc.c
1 /* malloc.c - dynamic memory allocation for bash. */
3 /* Copyright (C) 1985-2003 Free Software Foundation, Inc.
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2, or (at your option)
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the Free Software
17 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111 USA.
19 In other words, you are welcome to use, share and improve this program.
20 You are forbidden to forbid anyone else to use, share and improve
21 what you give them. Help stamp out software-hoarding! */
24 * @(#)nmalloc.c 1 (Caltech) 2/21/82
26 * U of M Modified: 20 Jun 1983 ACT: strange hacks for Emacs
28 * Nov 1983, Mike@BRL, Added support for 4.1C/4.2 BSD.
30 * This is a very fast storage allocator. It allocates blocks of a small
31 * number of different sizes, and keeps free lists of each size. Blocks
32 * that don't exactly fit are passed up to the next larger size. In this
33 * implementation, the available sizes are (2^n)-4 (or -16) bytes long.
34 * This is designed for use in a program that uses vast quantities of
35 * memory, but bombs when it runs out. To make it a little better, it
36 * warns the user when he starts to get near the end.
38 * June 84, ACT: modified rcheck code to check the range given to malloc,
39 * rather than the range determined by the 2-power used.
41 * Jan 85, RMS: calls malloc_warning to issue warning on nearly full.
42 * No longer Emacs-specific; can serve as all-purpose malloc for GNU.
43 * You should call malloc_init to reinitialize after loading dumped Emacs.
44 * Call malloc_stats to get info on memory stats if MALLOC_STATS turned on.
45 * realloc knows how to return same block given, just changing its size,
46 * if the power of 2 is correct.
50 * nextf[i] is the pointer to the next free block of size 2^(i+3). The
51 * smallest allocatable block is 8 bytes. The overhead information will
52 * go in the first int of the block, and the returned pointer will point
56 /* Define MEMSCRAMBLE to have free() write 0xcf into memory as it's freed, to
57 uncover callers that refer to freed memory, and to have malloc() write 0xdf
58 into memory as it's allocated to avoid referring to previous contents. */
60 /* SCO 3.2v4 getcwd and possibly other libc routines fail with MEMSCRAMBLE;
61 handled by configure. */
63 #if defined (HAVE_CONFIG_H)
65 #endif /* HAVE_CONFIG_H */
68 # include "bashtypes.h"
71 # include <sys/types.h>
74 #if defined (HAVE_UNISTD_H)
78 /* Determine which kind of system this is. */
81 #if defined (HAVE_STRING_H)
89 /* Define getpagesize () if the system does not. */
90 #ifndef HAVE_GETPAGESIZE
91 # include "getpagesize.h"
98 #ifdef MALLOC_REGISTER
105 /* System-specific omissions. */
112 #define ISALLOC ((char) 0xf7) /* magic byte that implies allocation */
113 #define ISFREE ((char) 0x54) /* magic byte that implies free block */
114 /* this is for error checking only */
115 #define ISMEMALIGN ((char) 0xd6) /* Stored before the value returned by
116 memalign, with the rest of the word
117 being the distance to the true
118 beginning of the block. */
121 /* We have a flag indicating whether memory is allocated, an index in
122 nextf[], a size field, and a sentinel value to determine whether or
123 not a caller wrote before the start of allocated memory; to realloc()
124 memory we either copy mh_nbytes or just change mh_nbytes if there is
125 enough room in the block for the new size. Range checking is always
128 bits64_t mh_align
; /* 8 */
130 char mi_alloc
; /* ISALLOC or ISFREE */ /* 1 */
131 char mi_index
; /* index in nextf[] */ /* 1 */
132 /* Remainder are valid only when block is allocated */
133 u_bits16_t mi_magic2
; /* should be == MAGIC2 */ /* 2 */
134 u_bits32_t mi_nbytes
; /* # of bytes allocated */ /* 4 */
137 #define mh_alloc minfo.mi_alloc
138 #define mh_index minfo.mi_index
139 #define mh_nbytes minfo.mi_nbytes
140 #define mh_magic2 minfo.mi_magic2
142 #define MOVERHEAD sizeof(union mhead)
143 #define MALIGN_MASK 7 /* one less than desired alignment */
145 typedef union _malloc_guard
{
150 /* Access free-list pointer of a block.
151 It is stored at block + sizeof (char *).
152 This is not a field in the minfo structure member of union mhead
153 because we want sizeof (union mhead)
154 to describe the overhead for when the block is in use,
155 and we do not want the free-list pointer to count in that. */
158 (*(union mhead **) (sizeof (char *) + (char *) (a)))
160 /* To implement range checking, we write magic values in at the beginning
161 and end of each allocated block, and make sure they are undisturbed
162 whenever a free or a realloc occurs. */
164 /* Written in the 2 bytes before the block's real space (-4 bytes) */
165 #define MAGIC2 0x5555
166 #define MSLOP 4 /* 4 bytes extra for u_bits32_t size */
168 /* How many bytes are actually allocated for a request of size N --
169 rounded up to nearest multiple of 8 after accounting for malloc
171 #define ALLOCATED_BYTES(n) \
172 (((n) + MOVERHEAD + MSLOP + MALIGN_MASK) & ~MALIGN_MASK)
177 if (!(p)) xbotch((PTR_T)0, ERR_ASSERT_FAILED, __STRING(p), file, line); \
181 /* Minimum and maximum bucket indices for block splitting (and to bound
182 the search for a block to split). */
183 #define SPLIT_MIN 2 /* XXX - was 3 */
187 /* Minimum and maximum bucket indices for block coalescing. */
188 #define COMBINE_MIN 2
189 #define COMBINE_MAX (pagebucket - 1) /* XXX */
191 #define LESSCORE_MIN 10
192 #define LESSCORE_FRC 13
196 /* Flags for the internal functions. */
197 #define MALLOC_WRAPPER 0x01 /* wrapper function */
198 #define MALLOC_INTERNAL 0x02 /* internal function calling another */
199 #define MALLOC_NOTRACE 0x04 /* don't trace this allocation or free */
200 #define MALLOC_NOREG 0x08 /* don't register this allocation or free */
203 #define ERR_DUPFREE 0x01
204 #define ERR_UNALLOC 0x02
205 #define ERR_UNDERFLOW 0x04
206 #define ERR_ASSERT_FAILED 0x08
208 /* Evaluates to true if NB is appropriate for bucket NU. NB is adjusted
209 appropriately by the caller to account for malloc overhead. This only
210 checks that the recorded size is not too big for the bucket. We
211 can't check whether or not it's in between NU and NU-1 because we
212 might have encountered a busy bucket when allocating and moved up to
214 #define IN_BUCKET(nb, nu) ((nb) <= binsizes[(nu)])
216 /* Use this when we want to be sure that NB is in bucket NU. */
217 #define RIGHT_BUCKET(nb, nu) \
218 (((nb) > binsizes[(nu)-1]) && ((nb) <= binsizes[(nu)]))
220 /* nextf[i] is free list of blocks of size 2**(i + 3) */
222 static union mhead
*nextf
[NBUCKETS
];
224 /* busy[i] is nonzero while allocation of block size i is in progress. */
226 static char busy
[NBUCKETS
];
228 static int pagesz
; /* system page size. */
229 static int pagebucket
; /* bucket for requests a page in size */
230 static int maxbuck
; /* highest bucket receiving allocation request. */
232 static char *memtop
; /* top of heap */
234 static unsigned long binsizes
[NBUCKETS
] = {
235 8UL, 16UL, 32UL, 64UL, 128UL, 256UL, 512UL, 1024UL, 2048UL, 4096UL,
236 8192UL, 16384UL, 32768UL, 65536UL, 131072UL, 262144UL, 524288UL,
237 1048576UL, 2097152UL, 4194304UL, 8388608UL, 16777216UL, 33554432UL,
238 67108864UL, 134217728UL, 268435456UL, 536870912UL, 1073741824UL,
239 2147483648UL, 4294967295UL
242 /* binsizes[x] == (1 << ((x) + 3)) */
243 #define binsize(x) binsizes[(x)]
245 /* Declarations for internal functions */
246 static PTR_T internal_malloc
__P((size_t, const char *, int, int));
247 static PTR_T internal_realloc
__P((PTR_T
, size_t, const char *, int, int));
248 static void internal_free
__P((PTR_T
, const char *, int, int));
249 static PTR_T internal_memalign
__P((unsigned int, size_t, const char *, int, int));
251 static PTR_T internal_calloc
__P((size_t, size_t, const char *, int, int));
252 static void internal_cfree
__P((PTR_T
, const char *, int, int));
255 static PTR_T internal_valloc
__P((size_t, const char *, int, int));
259 extern void botch ();
261 static void botch
__P((const char *, const char *, int));
263 static void xbotch
__P((PTR_T
, int, const char *, const char *, int));
266 extern char *sbrk ();
267 #endif /* !HAVE_DECL_SBRK */
270 extern int interrupt_immediately
;
271 extern int signal_is_trapped
__P((int));
275 struct _malstats _mstats
;
276 #endif /* MALLOC_STATS */
278 /* Debugging variables available to applications. */
279 int malloc_flags
= 0; /* future use */
280 int malloc_trace
= 0; /* trace allocations and frees to stderr */
281 int malloc_register
= 0; /* future use */
284 char _malloc_trace_buckets
[NBUCKETS
];
286 /* These should really go into a header file. */
287 extern void mtrace_alloc
__P((const char *, PTR_T
, size_t, const char *, int));
288 extern void mtrace_free
__P((PTR_T
, int, const char *, int));
293 botch (s
, file
, line
)
298 fprintf (stderr
, _("malloc: failed assertion: %s\n"), s
);
299 (void)fflush (stderr
);
304 /* print the file and line number that caused the assertion failure and
305 call botch() to do whatever the application wants with the information */
307 xbotch (mem
, e
, s
, file
, line
)
314 fprintf (stderr
, _("\r\nmalloc: %s:%d: assertion botched\r\n"),
315 file
? file
: "unknown", line
);
316 #ifdef MALLOC_REGISTER
317 if (mem
!= NULL
&& malloc_register
)
318 mregister_describe_mem (mem
, stderr
);
320 (void)fflush (stderr
);
321 botch(s
, file
, line
);
324 /* Coalesce two adjacent free blocks off the free list for size NU - 1,
325 as long as we can find two adjacent free blocks. nextf[NU -1] is
326 assumed to not be busy; the caller (morecore()) checks for this. */
331 register union mhead
*mp
, *mp1
, *mp2
;
336 if (nextf
[nbuck
] == 0)
339 siz
= binsize (nbuck
);
341 mp2
= mp1
= nextf
[nbuck
];
343 while (mp
&& mp
!= (union mhead
*)((char *)mp1
+ siz
))
352 /* OK, now we have mp1 pointing to the block we want to add to nextf[NU].
353 CHAIN(mp2) must equal mp1. Check that mp1 and mp are adjacent. */
354 if (mp2
!= mp1
&& CHAIN(mp2
) != mp1
)
355 xbotch ((PTR_T
)0, 0, "bcoalesce: CHAIN(mp2) != mp1", (char *)NULL
, 0);
358 if (CHAIN (mp1
) != (union mhead
*)((char *)mp1
+ siz
))
359 return; /* not adjacent */
363 _mstats
.tbcoalesce
++;
364 _mstats
.ncoalesce
[nbuck
]++;
367 /* Since they are adjacent, remove them from the free list */
368 if (mp1
== nextf
[nbuck
])
369 nextf
[nbuck
] = CHAIN (mp
);
371 CHAIN (mp2
) = CHAIN (mp
);
373 /* And add the combined two blocks to nextf[NU]. */
374 mp1
->mh_alloc
= ISFREE
;
376 CHAIN (mp1
) = nextf
[nu
];
380 /* Split a block at index > NU (but less than SPLIT_MAX) into a set of
381 blocks of the correct size, and attach them to nextf[NU]. nextf[NU]
382 is assumed to be empty. Must be called with signals blocked (e.g.,
388 register union mhead
*mp
;
389 int nbuck
, nblks
, split_max
;
392 split_max
= (maxbuck
> SPLIT_MAX
) ? maxbuck
: SPLIT_MAX
;
396 for (nbuck
= split_max
; nbuck
> nu
; nbuck
--)
398 if (busy
[nbuck
] || nextf
[nbuck
] == 0)
405 for (nbuck
= nu
+ 1; nbuck
<= split_max
; nbuck
++)
407 if (busy
[nbuck
] || nextf
[nbuck
] == 0)
413 if (nbuck
> split_max
|| nbuck
<= nu
)
416 /* XXX might want to split only if nextf[nbuck] has >= 2 blocks free
417 and nbuck is below some threshold. */
421 _mstats
.nsplit
[nbuck
]++;
424 /* Figure out how many blocks we'll get. */
426 nblks
= binsize (nbuck
) / siz
;
428 /* Remove the block from the chain of larger blocks. */
430 nextf
[nbuck
] = CHAIN (mp
);
432 /* Split the block and put it on the requested chain. */
436 mp
->mh_alloc
= ISFREE
;
438 if (--nblks
<= 0) break;
439 CHAIN (mp
) = (union mhead
*)((char *)mp
+ siz
);
440 mp
= (union mhead
*)((char *)mp
+ siz
);
446 block_signals (setp
, osetp
)
447 sigset_t
*setp
, *osetp
;
449 #ifdef HAVE_POSIX_SIGNALS
452 sigprocmask (SIG_BLOCK
, setp
, osetp
);
454 # if defined (HAVE_BSD_SIGNALS)
455 *osetp
= sigsetmask (-1);
461 unblock_signals (setp
, osetp
)
462 sigset_t
*setp
, *osetp
;
464 #ifdef HAVE_POSIX_SIGNALS
465 sigprocmask (SIG_SETMASK
, osetp
, (sigset_t
*)NULL
);
467 # if defined (HAVE_BSD_SIGNALS)
473 /* Return some memory to the system by reducing the break. This is only
474 called with NU > pagebucket, so we're always assured of giving back
475 more than one page of memory. */
477 lesscore (nu
) /* give system back some memory */
478 register int nu
; /* size index we're discarding */
483 /* Should check for errors here, I guess. */
489 _mstats
.tsbrk
-= siz
;
490 _mstats
.nlesscore
[nu
]++;
495 morecore (nu
) /* ask system for more memory */
496 register int nu
; /* size index to get more of */
498 register union mhead
*mp
;
501 long sbrk_amt
; /* amount to get via sbrk() */
505 /* Block all signals in case we are executed from a signal handler. */
508 if (interrupt_immediately
|| signal_is_trapped (SIGINT
) || signal_is_trapped (SIGCHLD
))
511 block_signals (&set
, &oset
);
515 siz
= binsize (nu
); /* size of desired block for nextf[nu] */
518 goto morecore_done
; /* oops */
521 _mstats
.nmorecore
[nu
]++;
524 /* Try to split a larger block here, if we're within the range of sizes
533 /* Try to coalesce two adjacent blocks from the free list on nextf[nu - 1],
534 if we can, and we're withing the range of the block coalescing limits. */
535 if (nu
>= COMBINE_MIN
&& nu
< COMBINE_MAX
&& busy
[nu
- 1] == 0 && nextf
[nu
- 1])
542 /* Take at least a page, and figure out how many blocks of the requested
543 size we're getting. */
547 nblks
= sbrk_amt
/ siz
;
551 /* We always want to request an integral multiple of the page size
552 from the kernel, so let's compute whether or not `siz' is such
553 an amount. If it is, we can just request it. If not, we want
554 the smallest integral multiple of pagesize that is larger than
555 `siz' and will satisfy the request. */
556 sbrk_amt
= siz
& (pagesz
- 1);
560 sbrk_amt
= siz
+ pagesz
- sbrk_amt
;
566 _mstats
.tsbrk
+= sbrk_amt
;
569 mp
= (union mhead
*) sbrk (sbrk_amt
);
571 /* Totally out of memory. */
577 /* shouldn't happen, but just in case -- require 8-byte alignment */
578 if ((long)mp
& MALIGN_MASK
)
580 mp
= (union mhead
*) (((long)mp
+ MALIGN_MASK
) & ~MALIGN_MASK
);
584 /* save new header and link the nblks blocks together */
588 mp
->mh_alloc
= ISFREE
;
590 if (--nblks
<= 0) break;
591 CHAIN (mp
) = (union mhead
*)((char *)mp
+ siz
);
592 mp
= (union mhead
*)((char *)mp
+ siz
);
598 unblock_signals (&set
, &oset
);
602 malloc_debug_dummy ()
604 write (1, "malloc_debug_dummy\n", 19);
608 #define PREPOP_SIZE 32
614 register union mhead
*mp
;
618 pagesz
= getpagesize ();
622 /* OK, how much do we need to allocate to make things page-aligned?
623 Some of this partial page will be wasted space, but we'll use as
624 much as we can. Once we figure out how much to advance the break
625 pointer, go ahead and do it. */
626 memtop
= curbrk
= sbrk (0);
627 sbrk_needed
= pagesz
- ((long)curbrk
& (pagesz
- 1)); /* sbrk(0) % pagesz */
629 sbrk_needed
+= pagesz
;
631 /* Now allocate the wasted space. */
636 _mstats
.tsbrk
+= sbrk_needed
;
638 curbrk
= sbrk (sbrk_needed
);
639 if ((long)curbrk
== -1)
641 memtop
+= sbrk_needed
;
643 /* Take the memory which would otherwise be wasted and populate the most
644 popular bin (2 == 32 bytes) with it. Add whatever we need to curbrk
645 to make things 32-byte aligned, compute how many 32-byte chunks we're
646 going to get, and set up the bin. */
647 curbrk
+= sbrk_needed
& (PREPOP_SIZE
- 1);
648 sbrk_needed
-= sbrk_needed
& (PREPOP_SIZE
- 1);
649 nunits
= sbrk_needed
/ PREPOP_SIZE
;
653 mp
= (union mhead
*)curbrk
;
655 nextf
[PREPOP_BIN
] = mp
;
658 mp
->mh_alloc
= ISFREE
;
659 mp
->mh_index
= PREPOP_BIN
;
660 if (--nunits
<= 0) break;
661 CHAIN(mp
) = (union mhead
*)((char *)mp
+ PREPOP_SIZE
);
662 mp
= (union mhead
*)((char *)mp
+ PREPOP_SIZE
);
668 /* compute which bin corresponds to the page size. */
669 for (nunits
= 7; nunits
< NBUCKETS
; nunits
++)
670 if (pagesz
<= binsize(nunits
))
678 internal_malloc (n
, file
, line
, flags
) /* get a block */
683 register union mhead
*p
;
685 register char *m
, *z
;
689 /* Get the system page size and align break pointer so future sbrks will
690 be page-aligned. The page size must be at least 1K -- anything
691 smaller is increased. */
693 if (pagealign () < 0)
694 return ((PTR_T
)NULL
);
696 /* Figure out how many bytes are required, rounding up to the nearest
697 multiple of 8, then figure out which nextf[] area to use. Try to
698 be smart about where to start searching -- if the number of bytes
699 needed is greater than the page size, we can start at pagebucket. */
700 nbytes
= ALLOCATED_BYTES(n
);
701 nunits
= (nbytes
<= (pagesz
>> 1)) ? STARTBUCK
: pagebucket
;
702 for ( ; nunits
< NBUCKETS
; nunits
++)
703 if (nbytes
<= binsize(nunits
))
706 /* Silently reject too-large requests. */
707 if (nunits
>= NBUCKETS
)
708 return ((PTR_T
) NULL
);
710 /* In case this is reentrant use of malloc from signal handler,
711 pick a block size that no other malloc level is currently
712 trying to allocate. That's the easiest harmless way not to
713 interfere with the other level of execution. */
715 if (busy
[nunits
]) _mstats
.nrecurse
++;
717 while (busy
[nunits
]) nunits
++;
720 if (nunits
> maxbuck
)
723 /* If there are no blocks of the appropriate size, go get some */
724 if (nextf
[nunits
] == 0)
727 /* Get one block off the list, and set the new list head */
728 if ((p
= nextf
[nunits
]) == NULL
)
733 nextf
[nunits
] = CHAIN (p
);
736 /* Check for free block clobbered */
737 /* If not for this check, we would gobble a clobbered free chain ptr
738 and bomb out on the NEXT allocate of this size block */
739 if (p
->mh_alloc
!= ISFREE
|| p
->mh_index
!= nunits
)
740 xbotch ((PTR_T
)(p
+1), 0, _("malloc: block on free list clobbered"), file
, line
);
742 /* Fill in the info, and set up the magic numbers for range checking. */
743 p
->mh_alloc
= ISALLOC
;
744 p
->mh_magic2
= MAGIC2
;
750 m
= (char *) (p
+ 1) + n
;
751 *m
++ = *z
++, *m
++ = *z
++, *m
++ = *z
++, *m
++ = *z
++;
755 MALLOC_MEMSET ((char *)(p
+ 1), 0xdf, n
); /* scramble previous contents */
758 _mstats
.nmalloc
[nunits
]++;
759 _mstats
.tmalloc
[nunits
]++;
761 _mstats
.bytesreq
+= n
;
762 #endif /* MALLOC_STATS */
765 if (malloc_trace
&& (flags
& MALLOC_NOTRACE
) == 0)
766 mtrace_alloc ("malloc", p
+ 1, n
, file
, line
);
767 else if (_malloc_trace_buckets
[nunits
])
768 mtrace_alloc ("malloc", p
+ 1, n
, file
, line
);
771 #ifdef MALLOC_REGISTER
772 if (malloc_register
&& (flags
& MALLOC_NOREG
) == 0)
773 mregister_alloc ("malloc", p
+ 1, n
, file
, line
);
777 if (_malloc_nwatch
> 0)
778 _malloc_ckwatch (p
+ 1, file
, line
, W_ALLOC
, n
);
781 return (PTR_T
) (p
+ 1);
785 internal_free (mem
, file
, line
, flags
)
790 register union mhead
*p
;
791 register char *ap
, *z
;
793 register unsigned int nbytes
;
794 int ubytes
; /* caller-requested size */
797 if ((ap
= (char *)mem
) == 0)
800 p
= (union mhead
*) ap
- 1;
802 if (p
->mh_alloc
== ISMEMALIGN
)
805 p
= (union mhead
*) ap
- 1;
808 #if defined (MALLOC_TRACE) || defined (MALLOC_REGISTER)
809 if (malloc_trace
|| malloc_register
)
810 ubytes
= p
->mh_nbytes
;
813 if (p
->mh_alloc
!= ISALLOC
)
815 if (p
->mh_alloc
== ISFREE
)
816 xbotch (mem
, ERR_DUPFREE
,
817 _("free: called with already freed block argument"), file
, line
);
819 xbotch (mem
, ERR_UNALLOC
,
820 _("free: called with unallocated block argument"), file
, line
);
823 ASSERT (p
->mh_magic2
== MAGIC2
);
825 nunits
= p
->mh_index
;
826 nbytes
= ALLOCATED_BYTES(p
->mh_nbytes
);
827 /* Since the sizeof(u_bits32_t) bytes before the memory handed to the user
828 are now used for the number of bytes allocated, a simple check of
829 mh_magic2 is no longer sufficient to catch things like p[-1] = 'x'.
830 We sanity-check the value of mh_nbytes against the size of the blocks
831 in the appropriate bucket before we use it. This can still cause problems
832 and obscure errors if mh_nbytes is wrong but still within range; the
833 checks against the size recorded at the end of the chunk will probably
834 fail then. Using MALLOC_REGISTER will help here, since it saves the
835 original number of bytes requested. */
837 if (IN_BUCKET(nbytes
, nunits
) == 0)
838 xbotch (mem
, ERR_UNDERFLOW
,
839 _("free: underflow detected; mh_nbytes out of range"), file
, line
);
843 *z
++ = *ap
++, *z
++ = *ap
++, *z
++ = *ap
++, *z
++ = *ap
++;
844 if (mg
.i
!= p
->mh_nbytes
)
845 xbotch (mem
, ERR_ASSERT_FAILED
, _("free: start and end chunk sizes differ"), file
, line
);
848 if (nunits
>= LESSCORE_MIN
&& ((char *)p
+ binsize(nunits
) == memtop
))
850 if (((char *)p
+ binsize(nunits
) == memtop
) && nunits
>= LESSCORE_MIN
)
853 /* If above LESSCORE_FRC, give back unconditionally. This should be set
854 high enough to be infrequently encountered. If between LESSCORE_MIN
855 and LESSCORE_FRC, call lesscore if the bucket is marked as busy (in
856 which case we would punt below and leak memory) or if there's already
857 a block on the free list. */
858 if ((nunits
>= LESSCORE_FRC
) || busy
[nunits
] || nextf
[nunits
] != 0)
861 /* keeps the tracing and registering code in one place */
868 MALLOC_MEMSET (mem
, 0xcf, p
->mh_nbytes
);
871 ASSERT (nunits
< NBUCKETS
);
872 p
->mh_alloc
= ISFREE
;
874 if (busy
[nunits
] == 1)
875 return; /* this is bogus, but at least it won't corrupt the chains */
877 /* Protect against signal handlers calling malloc. */
879 /* Put this block on the free list. */
880 CHAIN (p
) = nextf
[nunits
];
885 ; /* Empty statement in case this is the end of the function */
888 _mstats
.nmalloc
[nunits
]--;
890 #endif /* MALLOC_STATS */
893 if (malloc_trace
&& (flags
& MALLOC_NOTRACE
) == 0)
894 mtrace_free (mem
, ubytes
, file
, line
);
895 else if (_malloc_trace_buckets
[nunits
])
896 mtrace_free (mem
, ubytes
, file
, line
);
899 #ifdef MALLOC_REGISTER
900 if (malloc_register
&& (flags
& MALLOC_NOREG
) == 0)
901 mregister_free (mem
, ubytes
, file
, line
);
905 if (_malloc_nwatch
> 0)
906 _malloc_ckwatch (mem
, file
, line
, W_FREE
, ubytes
);
911 internal_realloc (mem
, n
, file
, line
, flags
)
917 register union mhead
*p
;
918 register u_bits32_t tocopy
;
919 register unsigned int nbytes
;
921 register char *m
, *z
;
930 internal_free (mem
, file
, line
, MALLOC_INTERNAL
);
933 if ((p
= (union mhead
*) mem
) == 0)
934 return internal_malloc (n
, file
, line
, MALLOC_INTERNAL
);
937 nunits
= p
->mh_index
;
938 ASSERT (nunits
< NBUCKETS
);
940 if (p
->mh_alloc
!= ISALLOC
)
941 xbotch (mem
, ERR_UNALLOC
,
942 _("realloc: called with unallocated block argument"), file
, line
);
944 ASSERT (p
->mh_magic2
== MAGIC2
);
945 nbytes
= ALLOCATED_BYTES(p
->mh_nbytes
);
946 /* Since the sizeof(u_bits32_t) bytes before the memory handed to the user
947 are now used for the number of bytes allocated, a simple check of
948 mh_magic2 is no longer sufficient to catch things like p[-1] = 'x'.
949 We sanity-check the value of mh_nbytes against the size of the blocks
950 in the appropriate bucket before we use it. This can still cause problems
951 and obscure errors if mh_nbytes is wrong but still within range; the
952 checks against the size recorded at the end of the chunk will probably
953 fail then. Using MALLOC_REGISTER will help here, since it saves the
954 original number of bytes requested. */
955 if (IN_BUCKET(nbytes
, nunits
) == 0)
956 xbotch (mem
, ERR_UNDERFLOW
,
957 _("realloc: underflow detected; mh_nbytes out of range"), file
, line
);
959 m
= (char *)mem
+ (tocopy
= p
->mh_nbytes
);
961 *z
++ = *m
++, *z
++ = *m
++, *z
++ = *m
++, *z
++ = *m
++;
962 if (mg
.i
!= p
->mh_nbytes
)
963 xbotch (mem
, ERR_ASSERT_FAILED
, _("realloc: start and end chunk sizes differ"), file
, line
);
966 if (_malloc_nwatch
> 0)
967 _malloc_ckwatch (p
+ 1, file
, line
, W_REALLOC
, n
);
970 _mstats
.bytesreq
+= (n
< tocopy
) ? 0 : n
- tocopy
;
973 /* See if desired size rounds to same power of 2 as actual size. */
974 nbytes
= ALLOCATED_BYTES(n
);
976 /* If ok, use the same block, just marking its size as changed. */
977 if (RIGHT_BUCKET(nbytes
, nunits
))
980 m
= (char *)mem
+ p
->mh_nbytes
;
982 /* Compensate for increment above. */
985 *m
++ = 0; *m
++ = 0; *m
++ = 0; *m
++ = 0;
986 m
= (char *)mem
+ (p
->mh_nbytes
= n
);
990 *m
++ = *z
++, *m
++ = *z
++, *m
++ = *z
++, *m
++ = *z
++;
1002 if ((m
= internal_malloc (n
, file
, line
, MALLOC_INTERNAL
|MALLOC_NOTRACE
|MALLOC_NOREG
)) == 0)
1004 FASTCOPY (mem
, m
, tocopy
);
1005 internal_free (mem
, file
, line
, MALLOC_INTERNAL
);
1008 if (malloc_trace
&& (flags
& MALLOC_NOTRACE
) == 0)
1009 mtrace_alloc ("realloc", m
, n
, file
, line
);
1010 else if (_malloc_trace_buckets
[nunits
])
1011 mtrace_alloc ("realloc", m
, n
, file
, line
);
1014 #ifdef MALLOC_REGISTER
1015 if (malloc_register
&& (flags
& MALLOC_NOREG
) == 0)
1016 mregister_alloc ("realloc", m
, n
, file
, line
);
1020 if (_malloc_nwatch
> 0)
1021 _malloc_ckwatch (m
, file
, line
, W_RESIZED
, n
);
1028 internal_memalign (alignment
, size
, file
, line
, flags
)
1029 unsigned int alignment
;
1035 register char *aligned
;
1036 register union mhead
*p
;
1038 ptr
= internal_malloc (size
+ alignment
, file
, line
, MALLOC_INTERNAL
);
1042 /* If entire block has the desired alignment, just accept it. */
1043 if (((long) ptr
& (alignment
- 1)) == 0)
1045 /* Otherwise, get address of byte in the block that has that alignment. */
1047 aligned
= (char *) (((long) ptr
+ alignment
- 1) & -alignment
);
1049 aligned
= (char *) (((long) ptr
+ alignment
- 1) & (~alignment
+ 1));
1052 /* Store a suitable indication of how to free the block,
1053 so that free can find the true beginning of it. */
1054 p
= (union mhead
*) aligned
- 1;
1055 p
->mh_nbytes
= aligned
- ptr
;
1056 p
->mh_alloc
= ISMEMALIGN
;
1061 #if !defined (NO_VALLOC)
1062 /* This runs into trouble with getpagesize on HPUX, and Multimax machines.
1063 Patching out seems cleaner than the ugly fix needed. */
1065 internal_valloc (size
, file
, line
, flags
)
1070 return internal_memalign (getpagesize (), size
, file
, line
, flags
|MALLOC_INTERNAL
);
1072 #endif /* !NO_VALLOC */
1076 internal_calloc (n
, s
, file
, line
, flags
)
1085 result
= internal_malloc (total
, file
, line
, flags
|MALLOC_INTERNAL
);
1087 memset (result
, 0, total
);
1092 internal_cfree (p
, file
, line
, flags
)
1097 internal_free (p
, file
, line
, flags
|MALLOC_INTERNAL
);
1099 #endif /* !NO_CALLOC */
1103 malloc_free_blocks (size
)
1107 register union mhead
*p
;
1110 for (p
= nextf
[size
]; p
; p
= CHAIN (p
))
1117 #if defined (MALLOC_WRAPFUNCS)
1119 sh_malloc (bytes
, file
, line
)
1124 return internal_malloc (bytes
, file
, line
, MALLOC_WRAPPER
);
1128 sh_realloc (ptr
, size
, file
, line
)
1134 return internal_realloc (ptr
, size
, file
, line
, MALLOC_WRAPPER
);
1138 sh_free (mem
, file
, line
)
1143 internal_free (mem
, file
, line
, MALLOC_WRAPPER
);
1147 sh_memalign (alignment
, size
, file
, line
)
1148 unsigned int alignment
;
1153 return internal_memalign (alignment
, size
, file
, line
, MALLOC_WRAPPER
);
1158 sh_calloc (n
, s
, file
, line
)
1163 return internal_calloc (n
, s
, file
, line
, MALLOC_WRAPPER
);
1167 sh_cfree (mem
, file
, line
)
1172 internal_cfree (mem
, file
, line
, MALLOC_WRAPPER
);
1178 sh_valloc (size
, file
, line
)
1183 return internal_valloc (size
, file
, line
, MALLOC_WRAPPER
);
1185 #endif /* !NO_VALLOC */
1187 #endif /* MALLOC_WRAPFUNCS */
1189 /* Externally-available functions that call their internal counterparts. */
1195 return internal_malloc (size
, (char *)NULL
, 0, 0);
1199 realloc (mem
, nbytes
)
1203 return internal_realloc (mem
, nbytes
, (char *)NULL
, 0, 0);
1210 internal_free (mem
, (char *)NULL
, 0, 0);
1214 memalign (alignment
, size
)
1215 unsigned int alignment
;
1218 return internal_memalign (alignment
, size
, (char *)NULL
, 0, 0);
1226 return internal_valloc (size
, (char *)NULL
, 0, 0);
1235 return internal_calloc (n
, s
, (char *)NULL
, 0, 0);
1242 internal_cfree (mem
, (char *)NULL
, 0, 0);