]>
git.ipfire.org Git - thirdparty/bash.git/blob - lib/malloc/malloc.c
f61f02715da3188934602eef429cc554cc08be2a
1 /* malloc.c - dynamic memory allocation for bash. */
3 /* Copyright (C) 1985-2021 Free Software Foundation, Inc.
5 This file is part of GNU Bash, the Bourne-Again SHell.
7 Bash is free software: you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation, either version 3 of the License, or
10 (at your option) any later version.
12 Bash is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with Bash. If not, see <http://www.gnu.org/licenses/>.
22 * @(#)nmalloc.c 1 (Caltech) 2/21/82
24 * U of M Modified: 20 Jun 1983 ACT: strange hacks for Emacs
26 * Nov 1983, Mike@BRL, Added support for 4.1C/4.2 BSD.
28 * [VERY] old explanation:
30 * This is a very fast storage allocator. It allocates blocks of a small
31 * number of different sizes, and keeps free lists of each size. Blocks
32 * that don't exactly fit are passed up to the next larger size. In this
33 * implementation, the available sizes are (2^n)-4 (or -16) bytes long.
34 * This is designed for use in a program that uses vast quantities of
35 * memory, but bombs when it runs out. To make it a little better, it
36 * warns the user when he starts to get near the end.
38 * June 84, ACT: modified rcheck code to check the range given to malloc,
39 * rather than the range determined by the 2-power used.
41 * Jan 85, RMS: calls malloc_warning to issue warning on nearly full.
42 * No longer Emacs-specific; can serve as all-purpose malloc for GNU.
43 * You should call malloc_init to reinitialize after loading dumped Emacs.
44 * Call malloc_stats to get info on memory stats if MALLOC_STATS turned on.
45 * realloc knows how to return same block given, just changing its size,
46 * if the power of 2 is correct.
50 * nextf[i] is the pointer to the next free block of size 2^(i+5). The
51 * smallest allocatable block is 32 bytes. The overhead information will
52 * go in the first 16 bytes of the block, and the returned pointer will point
56 /* Define MEMSCRAMBLE to have free() write 0xcf into memory as it's freed, to
57 uncover callers that refer to freed memory, and to have malloc() write 0xdf
58 into memory as it's allocated to avoid referring to previous contents. */
60 /* SCO 3.2v4 getcwd and possibly other libc routines fail with MEMSCRAMBLE;
61 handled by configure. */
63 #if defined (HAVE_CONFIG_H)
65 #endif /* HAVE_CONFIG_H */
68 # include "bashtypes.h"
71 # include <sys/types.h>
74 #if defined (HAVE_UNISTD_H)
78 /* Determine which kind of system this is. */
81 #if defined (HAVE_STRING_H)
93 #if defined (HAVE_MMAP)
97 /* Define getpagesize () if the system does not. */
98 #ifndef HAVE_GETPAGESIZE
99 # include "getpagesize.h"
106 #ifdef MALLOC_REGISTER
116 /* Could also use (((x) & -(x)) == (x)) */
117 #define powerof2(x) ((((x) - 1) & (x)) == 0)
119 /* System-specific omissions. */
124 #define MALLOC_PAGESIZE_MIN 4096
125 #define MALLOC_INCR_PAGES 8192
127 #define ISALLOC ((char) 0xf7) /* magic byte that implies allocation */
128 #define ISFREE ((char) 0x54) /* magic byte that implies free block */
129 /* this is for error checking only */
130 #define ISMEMALIGN ((char) 0xd6) /* Stored before the value returned by
131 memalign, with the rest of the word
132 being the distance to the true
133 beginning of the block. */
136 /* We have a flag indicating whether memory is allocated, an index in
137 nextf[], a size field, and a sentinel value to determine whether or
138 not a caller wrote before the start of allocated memory; to realloc()
139 memory we either copy mh_nbytes or just change mh_nbytes if there is
140 enough room in the block for the new size. Range checking is always
143 bits64_t mh_align
[2]; /* 16 */
145 char mi_alloc
; /* ISALLOC or ISFREE */ /* 1 */
146 char mi_index
; /* index in nextf[] */ /* 1 */
147 /* Remainder are valid only when block is allocated */
148 u_bits16_t mi_magic2
; /* should be == MAGIC2 */ /* 2 */
149 u_bits32_t mi_nbytes
; /* # of bytes allocated */ /* 4 */
150 char mi_magic8
[8]; /* MAGIC1 guard bytes */ /* 8 */
153 #define mh_alloc minfo.mi_alloc
154 #define mh_index minfo.mi_index
155 #define mh_nbytes minfo.mi_nbytes
156 #define mh_magic2 minfo.mi_magic2
157 #define mh_magic8 minfo.mi_magic8
159 #define MAGIC8_NUMBYTES 8
160 #define MALLOC_SIZE_T u_bits32_t
162 #define MOVERHEAD sizeof(union mhead)
164 #define MALIGN_MASK 15 /* one less than desired alignment */
166 /* Guard bytes we write at the end of the allocation, encoding the size. */
167 typedef union _malloc_guard
{
172 /* Access free-list pointer of a block.
173 It is stored at block + sizeof (char *).
174 This is not a field in the minfo structure member of union mhead
175 because we want sizeof (union mhead)
176 to describe the overhead for when the block is in use,
177 and we do not want the free-list pointer to count in that. */
178 /* If we have mmap, this is not used for chunks larger than mmap_threshold,
179 since we munmap immediately on free(). */
181 /* If SIZEOF_CHAR_P == 8, this goes into the mh_magic8 buffer at the end of
182 the rest of the struct. This may need adjusting. */
184 (*(union mhead **) (sizeof (char *) + (char *) (a)))
186 /* To implement range checking, we write magic values in at the beginning
187 and end of each allocated block, and make sure they are undisturbed
188 whenever a free or a realloc occurs. */
190 /* Written in the bytes before the block's real space (-SIZEOF_CHAR_P bytes) */
192 #define MAGIC2 0x5555
194 #define MSLOP 4 /* 4 bytes extra for u_bits32_t end guard size */
196 /* How many bytes are actually allocated for a request of size N --
197 rounded up to nearest multiple of 16 (alignment) after accounting for
199 #define ALLOCATED_BYTES(n) \
200 (((n) + MOVERHEAD + MSLOP + MALIGN_MASK) & ~MALIGN_MASK)
205 if (!(p)) xbotch((PTR_T)0, ERR_ASSERT_FAILED, CPP_STRING(p), file, line); \
209 /* Minimum and maximum bucket indices for block splitting (and to bound
210 the search for a block to split). */
211 #define SPLIT_MIN 1 /* 64 */
212 #define SPLIT_MID 9 /* 16384 */
213 #define SPLIT_MAX 12 /* 131072 */
215 /* Minimum and maximum bucket indices for block coalescing. */
216 #define COMBINE_MIN 1 /* 64 */
217 #define COMBINE_MAX (pagebucket - 1) /* 2048 for 4096-byte pages */
219 #define LESSCORE_MIN 8 /* 8192 */
220 #define LESSCORE_FRC 11 /* 65536 */
222 /* Which bin do we prepopulate with the initial sbrk memory? */
224 #define PREPOP_SIZE 64
228 /* Should we use mmap for large allocations? */
229 #if defined (HAVE_MMAP)
230 # if defined (MAP_ANON) && !defined (MAP_ANONYMOUS)
231 # define MAP_ANONYMOUS MAP_ANON
235 #if defined (HAVE_MMAP) && defined (MAP_ANONYMOUS)
239 #if defined (USE_MMAP)
240 # define MMAP_THRESHOLD 12 /* must be >= SPLIT_MAX, COMBINE_MAX */
242 # define MMAP_THRESHOLD (8 * SIZEOF_LONG)
245 /* We don't try to decipher the differences between the Linux-style and
246 BSD-style implementations of mremap here; we use the Linux one. */
247 #if USE_MMAP == 1 && defined (HAVE_MREMAP) && defined (MREMAP_MAYMOVE)
248 # define USE_MREMAP 1
251 /* usable bins from STARTBUCK..NBUCKETS-1 */
255 /* Flags for the internal functions. */
256 #define MALLOC_WRAPPER 0x01 /* wrapper function */
257 #define MALLOC_INTERNAL 0x02 /* internal function calling another */
258 #define MALLOC_NOTRACE 0x04 /* don't trace this allocation or free */
259 #define MALLOC_NOREG 0x08 /* don't register this allocation or free */
262 #define ERR_DUPFREE 0x01
263 #define ERR_UNALLOC 0x02
264 #define ERR_UNDERFLOW 0x04
265 #define ERR_ASSERT_FAILED 0x08
267 /* Evaluates to true if NB is appropriate for bucket NU. NB is adjusted
268 appropriately by the caller to account for malloc overhead. This only
269 checks that the recorded size is not too big for the bucket. We
270 can't check whether or not it's in between NU and NU-1 because we
271 might have encountered a busy bucket when allocating and moved up to
273 #define IN_BUCKET(nb, nu) ((nb) <= binsizes[(nu)])
275 /* Use this when we want to be sure that NB is in bucket NU. */
276 #define RIGHT_BUCKET(nb, nu) \
277 (((nb) > binsizes[(nu)-1]) && ((nb) <= binsizes[(nu)]))
279 /* nextf[i] is free list of blocks of size 2**(i + 5) */
281 static union mhead
*nextf
[NBUCKETS
];
283 /* busy[i] is nonzero while allocation or free of block size i is in progress. */
285 static char busy
[NBUCKETS
];
287 static int pagesz
; /* system page size. */
288 static int pagebucket
; /* bucket for requests a page in size */
289 static int maxbuck
; /* highest bucket receiving allocation request. */
291 static char *memtop
; /* top of heap */
293 static const unsigned long binsizes
[NBUCKETS
] = {
294 32UL, 64UL, 128UL, 256UL, 512UL, 1024UL, 2048UL, 4096UL,
295 8192UL, 16384UL, 32768UL, 65536UL, 131072UL, 262144UL, 524288UL,
296 1048576UL, 2097152UL, 4194304UL, 8388608UL, 16777216UL, 33554432UL,
297 67108864UL, 134217728UL, 268435456UL, 536870912UL, 1073741824UL,
298 2147483648UL, 4294967295UL
301 /* binsizes[x] == (1 << ((x) + 5)) */
302 #define binsize(x) binsizes[(x)]
304 #define MAXALLOC_SIZE binsizes[NBUCKETS-1]
310 /* Declarations for internal functions */
311 static PTR_T internal_malloc
PARAMS((size_t, const char *, int, int));
312 static PTR_T internal_realloc
PARAMS((PTR_T
, size_t, const char *, int, int));
313 static void internal_free
PARAMS((PTR_T
, const char *, int, int));
314 static PTR_T internal_memalign
PARAMS((size_t, size_t, const char *, int, int));
316 static PTR_T internal_calloc
PARAMS((size_t, size_t, const char *, int, int));
317 static void internal_cfree
PARAMS((PTR_T
, const char *, int, int));
320 static PTR_T internal_valloc
PARAMS((size_t, const char *, int, int));
322 static PTR_T internal_remap
PARAMS((PTR_T
, size_t, int, int));
325 extern void botch ();
327 static void botch
PARAMS((const char *, const char *, int));
329 static void xbotch
PARAMS((PTR_T
, int, const char *, const char *, int));
332 extern char *sbrk ();
333 #endif /* !HAVE_DECL_SBRK */
336 extern int running_trap
;
337 extern int signal_is_trapped
PARAMS((int));
341 struct _malstats _mstats
;
342 #endif /* MALLOC_STATS */
344 /* Debugging variables available to applications. */
345 int malloc_flags
= 0; /* future use */
346 int malloc_trace
= 0; /* trace allocations and frees to stderr */
347 int malloc_register
= 0; /* future use */
349 /* Use a variable in case we want to dynamically adapt it in the future */
350 int malloc_mmap_threshold
= MMAP_THRESHOLD
;
353 char _malloc_trace_buckets
[NBUCKETS
];
355 /* These should really go into a header file. */
356 extern void mtrace_alloc
PARAMS((const char *, PTR_T
, size_t, const char *, int));
357 extern void mtrace_free
PARAMS((PTR_T
, int, const char *, int));
362 botch (s
, file
, line
)
367 fprintf (stderr
, _("malloc: failed assertion: %s\n"), s
);
368 (void)fflush (stderr
);
373 /* print the file and line number that caused the assertion failure and
374 call botch() to do whatever the application wants with the information */
376 xbotch (mem
, e
, s
, file
, line
)
383 fprintf (stderr
, _("\r\nmalloc: %s:%d: assertion botched\r\n"),
384 file
? file
: _("unknown"), line
);
385 #ifdef MALLOC_REGISTER
386 if (mem
!= NULL
&& malloc_register
)
387 mregister_describe_mem (mem
, stderr
);
389 (void)fflush (stderr
);
390 botch(s
, file
, line
);
393 /* Coalesce two adjacent free blocks off the free list for size NU - 1,
394 as long as we can find two adjacent free blocks. nextf[NU -1] is
395 assumed to not be busy; the caller (morecore()) checks for this.
396 BUSY[NU] must be set to 1. */
401 register union mhead
*mp
, *mp1
, *mp2
;
406 if (nextf
[nbuck
] == 0 || busy
[nbuck
])
410 siz
= binsize (nbuck
);
412 mp2
= mp1
= nextf
[nbuck
];
414 while (mp
&& mp
!= (union mhead
*)((char *)mp1
+ siz
))
427 /* OK, now we have mp1 pointing to the block we want to add to nextf[NU].
428 CHAIN(mp2) must equal mp1. Check that mp1 and mp are adjacent. */
429 if (mp2
!= mp1
&& CHAIN(mp2
) != mp1
)
432 xbotch ((PTR_T
)0, 0, "bcoalesce: CHAIN(mp2) != mp1", (char *)NULL
, 0);
436 if (CHAIN (mp1
) != (union mhead
*)((char *)mp1
+ siz
))
439 return; /* not adjacent */
443 /* Since they are adjacent, remove them from the free list */
444 if (mp1
== nextf
[nbuck
])
445 nextf
[nbuck
] = CHAIN (mp
);
447 CHAIN (mp2
) = CHAIN (mp
);
451 _mstats
.tbcoalesce
++;
452 _mstats
.ncoalesce
[nbuck
]++;
455 /* And add the combined two blocks to nextf[NU]. */
456 mp1
->mh_alloc
= ISFREE
;
458 CHAIN (mp1
) = nextf
[nu
];
462 /* Split a block at index > NU (but less than SPLIT_MAX) into a set of
463 blocks of the correct size, and attach them to nextf[NU]. nextf[NU]
464 is assumed to be empty. Must be called with signals blocked (e.g.,
465 by morecore()). BUSY[NU] must be set to 1. */
470 register union mhead
*mp
;
471 int nbuck
, nblks
, split_max
;
474 split_max
= (maxbuck
> SPLIT_MAX
) ? maxbuck
: SPLIT_MAX
;
478 for (nbuck
= split_max
; nbuck
> nu
; nbuck
--)
480 if (busy
[nbuck
] || nextf
[nbuck
] == 0)
487 for (nbuck
= nu
+ 1; nbuck
<= split_max
; nbuck
++)
489 if (busy
[nbuck
] || nextf
[nbuck
] == 0)
495 if (nbuck
> split_max
|| nbuck
<= nu
)
498 /* XXX might want to split only if nextf[nbuck] has >= 2 blocks free
499 and nbuck is below some threshold. */
501 /* Remove the block from the chain of larger blocks. */
504 nextf
[nbuck
] = CHAIN (mp
);
509 _mstats
.nsplit
[nbuck
]++;
512 /* Figure out how many blocks we'll get. */
514 nblks
= binsize (nbuck
) / siz
;
516 /* Split the block and put it on the requested chain. */
520 mp
->mh_alloc
= ISFREE
;
522 if (--nblks
<= 0) break;
523 CHAIN (mp
) = (union mhead
*)((char *)mp
+ siz
);
524 mp
= (union mhead
*)((char *)mp
+ siz
);
529 /* Take the memory block MP and add it to a chain < NU. NU is the right bucket,
530 but is busy. This avoids memory orphaning. */
537 int nbuck
, nblks
, split_max
;
541 while (nbuck
>= SPLIT_MIN
&& busy
[nbuck
])
543 if (nbuck
< SPLIT_MIN
)
548 _mstats
.nsplit
[nu
]++;
551 /* Figure out how many blocks we'll get. */
552 siz
= binsize (nu
); /* original block size */
553 nblks
= siz
/ binsize (nbuck
); /* should be 2 most of the time */
555 /* And add it to nextf[nbuck] */
556 siz
= binsize (nbuck
); /* XXX - resetting here */
560 mp
->mh_alloc
= ISFREE
;
561 mp
->mh_index
= nbuck
;
562 if (--nblks
<= 0) break;
563 CHAIN (mp
) = (union mhead
*)((char *)mp
+ siz
);
564 mp
= (union mhead
*)((char *)mp
+ siz
);
567 CHAIN (mp
) = nextf
[nbuck
];
573 _malloc_block_signals (setp
, osetp
)
574 sigset_t
*setp
, *osetp
;
576 #ifdef HAVE_POSIX_SIGNALS
579 sigprocmask (SIG_BLOCK
, setp
, osetp
);
581 # if defined (HAVE_BSD_SIGNALS)
582 *osetp
= sigsetmask (-1);
588 _malloc_unblock_signals (setp
, osetp
)
589 sigset_t
*setp
, *osetp
;
591 #ifdef HAVE_POSIX_SIGNALS
592 sigprocmask (SIG_SETMASK
, osetp
, (sigset_t
*)NULL
);
594 # if defined (HAVE_BSD_SIGNALS)
600 #if defined (USE_LESSCORE)
601 /* Return some memory to the system by reducing the break. This is only
602 called with NU > pagebucket, so we're always assured of giving back
603 more than one page of memory. */
605 lesscore (nu
) /* give system back some memory */
606 register int nu
; /* size index we're discarding */
611 /* Should check for errors here, I guess. */
617 _mstats
.tsbrk
-= siz
;
618 _mstats
.nlesscore
[nu
]++;
621 #endif /* USE_LESSCORE */
623 /* Ask system for more memory; add to NEXTF[NU]. BUSY[NU] must be set to 1. */
626 register int nu
; /* size index to get more of */
628 register union mhead
*mp
;
631 long sbrk_amt
; /* amount to get via sbrk() */
635 /* Block all signals in case we are executed from a signal handler. */
638 # if defined (SIGCHLD)
639 if (running_trap
|| signal_is_trapped (SIGINT
) || signal_is_trapped (SIGCHLD
))
641 if (running_trap
|| signal_is_trapped (SIGINT
))
645 _malloc_block_signals (&set
, &oset
);
649 siz
= binsize (nu
); /* size of desired block for nextf[nu] */
652 goto morecore_done
; /* oops */
655 _mstats
.nmorecore
[nu
]++;
658 /* Try to split a larger block here, if we're within the range of sizes
660 if (nu
>= SPLIT_MIN
&& nu
<= malloc_mmap_threshold
)
667 /* Try to coalesce two adjacent blocks from the free list on nextf[nu - 1],
668 if we can, and we're within the range of the block coalescing limits. */
669 if (nu
>= COMBINE_MIN
&& nu
< COMBINE_MAX
&& nu
<= malloc_mmap_threshold
&& busy
[nu
- 1] == 0 && nextf
[nu
- 1])
676 /* Take at least a page, and figure out how many blocks of the requested
677 size we're getting. */
681 nblks
= sbrk_amt
/ siz
;
685 /* We always want to request an integral multiple of the page size
686 from the kernel, so let's compute whether or not `siz' is such
687 an amount. If it is, we can just request it. If not, we want
688 the smallest integral multiple of pagesize that is larger than
689 `siz' and will satisfy the request. */
690 sbrk_amt
= siz
& (pagesz
- 1);
694 sbrk_amt
= siz
+ pagesz
- sbrk_amt
;
698 #if defined (USE_MMAP)
699 if (nu
> malloc_mmap_threshold
)
701 mp
= (union mhead
*)mmap (0, sbrk_amt
, PROT_READ
|PROT_WRITE
, MAP_PRIVATE
|MAP_ANONYMOUS
, -1, 0);
702 if ((void *)mp
== MAP_FAILED
)
705 mp
->mh_alloc
= ISFREE
;
710 _mstats
.tmmap
+= sbrk_amt
;
719 _mstats
.tsbrk
+= sbrk_amt
;
722 mp
= (union mhead
*) sbrk (sbrk_amt
);
724 /* Totally out of memory. */
730 /* shouldn't happen, but just in case -- require 8- or 16-byte alignment */
731 if ((long)mp
& MALIGN_MASK
)
733 mp
= (union mhead
*) (((long)mp
+ MALIGN_MASK
) & ~MALIGN_MASK
);
737 /* save new header and link the nblks blocks together */
741 mp
->mh_alloc
= ISFREE
;
743 if (--nblks
<= 0) break;
744 CHAIN (mp
) = (union mhead
*)((char *)mp
+ siz
);
745 mp
= (union mhead
*)((char *)mp
+ siz
);
751 _malloc_unblock_signals (&set
, &oset
);
755 malloc_debug_dummy ()
757 write (1, "malloc_debug_dummy\n", 19);
764 register union mhead
*mp
;
768 pagesz
= getpagesize ();
769 if (pagesz
< MALLOC_PAGESIZE_MIN
)
770 pagesz
= MALLOC_PAGESIZE_MIN
;
772 /* OK, how much do we need to allocate to make things page-aligned?
773 Some of this partial page will be wasted space, but we'll use as
774 much as we can. Once we figure out how much to advance the break
775 pointer, go ahead and do it. */
776 memtop
= curbrk
= sbrk (0);
777 sbrk_needed
= pagesz
- ((long)curbrk
& (pagesz
- 1)); /* sbrk(0) % pagesz */
779 sbrk_needed
+= pagesz
;
781 /* Now allocate the wasted space. */
786 _mstats
.tsbrk
+= sbrk_needed
;
788 curbrk
= sbrk (sbrk_needed
);
789 if ((long)curbrk
== -1)
791 memtop
+= sbrk_needed
;
793 /* Take the memory which would otherwise be wasted and populate the most
794 popular bin (3 == 64 bytes) with it. Add whatever we need to curbrk
795 to make things 64-byte aligned, compute how many 64-byte chunks we're
796 going to get, and set up the bin. */
797 curbrk
+= sbrk_needed
& (PREPOP_SIZE
- 1);
798 sbrk_needed
-= sbrk_needed
& (PREPOP_SIZE
- 1);
799 nunits
= sbrk_needed
/ PREPOP_SIZE
;
803 mp
= (union mhead
*)curbrk
;
805 nextf
[PREPOP_BIN
] = mp
;
808 mp
->mh_alloc
= ISFREE
;
809 mp
->mh_index
= PREPOP_BIN
;
810 if (--nunits
<= 0) break;
811 CHAIN(mp
) = (union mhead
*)((char *)mp
+ PREPOP_SIZE
);
812 mp
= (union mhead
*)((char *)mp
+ PREPOP_SIZE
);
818 /* compute which bin corresponds to the page size. */
819 for (nunits
= 7; nunits
< NBUCKETS
; nunits
++)
820 if (pagesz
<= binsize(nunits
))
828 internal_malloc (n
, file
, line
, flags
) /* get a block */
833 register union mhead
*p
;
835 register char *m
, *z
;
836 MALLOC_SIZE_T nbytes
;
839 /* Get the system page size and align break pointer so future sbrks will
840 be page-aligned. The page size must be at least 1K -- anything
841 smaller is increased. */
843 if (pagealign () < 0)
844 return ((PTR_T
)NULL
);
846 /* Figure out how many bytes are required, rounding up to the nearest
847 multiple of 8, then figure out which nextf[] area to use. Try to
848 be smart about where to start searching -- if the number of bytes
849 needed is greater than the page size, we can start at pagebucket. */
850 #if SIZEOF_SIZE_T == 8
851 if (ALLOCATED_BYTES(n
) > MAXALLOC_SIZE
)
852 return ((PTR_T
) NULL
);
854 nbytes
= ALLOCATED_BYTES(n
);
855 nunits
= (nbytes
<= (pagesz
>> 1)) ? STARTBUCK
: pagebucket
;
856 for ( ; nunits
< NBUCKETS
; nunits
++)
857 if (nbytes
<= binsize(nunits
))
860 /* Silently reject too-large requests. XXX - can increase this if HAVE_MMAP */
861 if (nunits
>= NBUCKETS
)
862 return ((PTR_T
) NULL
);
864 /* In case this is reentrant use of malloc from signal handler,
865 pick a block size that no other malloc level is currently
866 trying to allocate. That's the easiest harmless way not to
867 interfere with the other level of execution. */
869 if (busy
[nunits
]) _mstats
.nrecurse
++;
871 while (busy
[nunits
]) nunits
++;
874 if (nunits
> maxbuck
)
877 /* If there are no blocks of the appropriate size, go get some */
878 if (nextf
[nunits
] == 0)
881 /* Get one block off the list, and set the new list head */
882 if ((p
= nextf
[nunits
]) == NULL
)
887 nextf
[nunits
] = CHAIN (p
);
890 /* Check for free block clobbered */
891 /* If not for this check, we would gobble a clobbered free chain ptr
892 and bomb out on the NEXT allocate of this size block */
893 if (p
->mh_alloc
!= ISFREE
|| p
->mh_index
!= nunits
)
894 xbotch ((PTR_T
)(p
+1), 0, _("malloc: block on free list clobbered"), file
, line
);
896 /* Fill in the info, and set up the magic numbers for range checking. */
897 p
->mh_alloc
= ISALLOC
;
898 p
->mh_magic2
= MAGIC2
;
902 MALLOC_MEMSET ((char *)p
->mh_magic8
, MAGIC1
, MAGIC8_NUMBYTES
);
907 m
= (char *) (p
+ 1) + n
;
908 *m
++ = *z
++, *m
++ = *z
++, *m
++ = *z
++, *m
++ = *z
++;
912 MALLOC_MEMSET ((char *)(p
+ 1), 0xdf, n
); /* scramble previous contents */
915 _mstats
.nmalloc
[nunits
]++;
916 _mstats
.tmalloc
[nunits
]++;
918 _mstats
.bytesreq
+= n
;
919 #endif /* MALLOC_STATS */
922 if (malloc_trace
&& (flags
& MALLOC_NOTRACE
) == 0)
923 mtrace_alloc ("malloc", p
+ 1, n
, file
, line
);
924 else if (_malloc_trace_buckets
[nunits
])
925 mtrace_alloc ("malloc", p
+ 1, n
, file
, line
);
928 #ifdef MALLOC_REGISTER
929 if (malloc_register
&& (flags
& MALLOC_NOREG
) == 0)
930 mregister_alloc ("malloc", p
+ 1, n
, file
, line
);
934 if (_malloc_nwatch
> 0)
935 _malloc_ckwatch (p
+ 1, file
, line
, W_ALLOC
, n
);
938 #if defined (MALLOC_DEBUG)
939 z
= (char *) (p
+ 1);
940 /* Check alignment of returned pointer */
941 if ((unsigned long)z
& MALIGN_MASK
)
942 fprintf (stderr
, "malloc: %s:%d: warning: request for %d bytes not aligned on %d byte boundary\r\n",
943 file
? file
: _("unknown"), line
, p
->mh_nbytes
, MALIGN_MASK
+1);
946 return (PTR_T
) (p
+ 1);
950 internal_free (mem
, file
, line
, flags
)
955 register union mhead
*p
;
956 register char *ap
, *z
;
958 register MALLOC_SIZE_T nbytes
;
959 MALLOC_SIZE_T ubytes
; /* caller-requested size */
962 if ((ap
= (char *)mem
) == 0)
965 p
= (union mhead
*) ap
- 1;
967 if (p
->mh_alloc
== ISMEMALIGN
)
970 p
= (union mhead
*) ap
- 1;
973 #if defined (MALLOC_TRACE) || defined (MALLOC_REGISTER) || defined (MALLOC_WATCH)
974 if (malloc_trace
|| malloc_register
|| _malloc_nwatch
> 0)
975 ubytes
= p
->mh_nbytes
;
978 if (p
->mh_alloc
!= ISALLOC
)
980 if (p
->mh_alloc
== ISFREE
)
981 xbotch (mem
, ERR_DUPFREE
,
982 _("free: called with already freed block argument"), file
, line
);
984 xbotch (mem
, ERR_UNALLOC
,
985 _("free: called with unallocated block argument"), file
, line
);
988 ASSERT (p
->mh_magic2
== MAGIC2
);
990 nunits
= p
->mh_index
;
991 nbytes
= ALLOCATED_BYTES(p
->mh_nbytes
);
992 /* The MAGIC8_NUMBYTES bytes before the memory handed to the user are now
993 used for a simple check to catch things like p[-1] = 'x'.
994 We sanity-check the value of mh_nbytes against the size of the blocks
995 in the appropriate bucket before we use it. This can still cause problems
996 and obscure errors if mh_nbytes is wrong but still within range; the
997 checks against the size recorded at the end of the chunk will probably
998 fail then. Using MALLOC_REGISTER will help here, since it saves the
999 original number of bytes requested. */
1001 if (IN_BUCKET(nbytes
, nunits
) == 0)
1002 xbotch (mem
, ERR_UNDERFLOW
,
1003 _("free: underflow detected; mh_nbytes out of range"), file
, line
);
1006 for (i
= 0, z
= p
->mh_magic8
; i
< MAGIC8_NUMBYTES
; i
++)
1008 xbotch (mem
, ERR_UNDERFLOW
,
1009 _("free: underflow detected; magic8 corrupted"), file
, line
);
1014 *z
++ = *ap
++, *z
++ = *ap
++, *z
++ = *ap
++, *z
++ = *ap
++;
1015 if (mg
.i
!= p
->mh_nbytes
)
1016 xbotch (mem
, ERR_ASSERT_FAILED
, _("free: start and end chunk sizes differ"), file
, line
);
1018 #if defined (USE_MMAP)
1019 if (nunits
> malloc_mmap_threshold
)
1021 munmap (p
, binsize (nunits
));
1022 #if defined (MALLOC_STATS)
1023 _mstats
.nlesscore
[nunits
]++;
1029 #if defined (USE_LESSCORE)
1030 /* We take care of the mmap case and munmap above */
1031 if (nunits
>= LESSCORE_MIN
&& ((char *)p
+ binsize(nunits
) == memtop
))
1033 /* If above LESSCORE_FRC, give back unconditionally. This should be set
1034 high enough to be infrequently encountered. If between LESSCORE_MIN
1035 and LESSCORE_FRC, call lesscore if the bucket is marked as busy or if
1036 there's already a block on the free list. */
1037 if ((nunits
>= LESSCORE_FRC
) || busy
[nunits
] || nextf
[nunits
] != 0)
1040 /* keeps the tracing and registering code in one place */
1044 #endif /* USE_LESSCORE */
1048 MALLOC_MEMSET (mem
, 0xcf, p
->mh_nbytes
);
1051 ASSERT (nunits
< NBUCKETS
);
1053 if (busy
[nunits
] == 1)
1055 xsplit (p
, nunits
); /* split block and add to different chain */
1059 p
->mh_alloc
= ISFREE
;
1060 /* Protect against signal handlers calling malloc. */
1062 /* Put this block on the free list. */
1063 CHAIN (p
) = nextf
[nunits
];
1068 ; /* Empty statement in case this is the end of the function */
1071 _mstats
.nmalloc
[nunits
]--;
1073 #endif /* MALLOC_STATS */
1076 if (malloc_trace
&& (flags
& MALLOC_NOTRACE
) == 0)
1077 mtrace_free (mem
, ubytes
, file
, line
);
1078 else if (_malloc_trace_buckets
[nunits
])
1079 mtrace_free (mem
, ubytes
, file
, line
);
1082 #ifdef MALLOC_REGISTER
1083 if (malloc_register
&& (flags
& MALLOC_NOREG
) == 0)
1084 mregister_free (mem
, ubytes
, file
, line
);
1088 if (_malloc_nwatch
> 0)
1089 _malloc_ckwatch (mem
, file
, line
, W_FREE
, ubytes
);
1094 /* Assume the caller (internal_realloc) has already performed the sanity and
1095 overflow tests. Basically we kill the old guard information, determine the
1096 new size, call mremap with the new size, and add the bookkeeping and guard
1097 information back in. */
1099 internal_remap (mem
, n
, nunits
, flags
)
1105 register union mhead
*p
, *np
;
1108 MALLOC_SIZE_T nbytes
;
1110 if (nunits
>= NBUCKETS
) /* Uh oh */
1111 return ((PTR_T
) NULL
);
1113 p
= (union mhead
*)mem
- 1;
1115 m
= (char *)mem
+ p
->mh_nbytes
;
1117 *m
++ = 0; *m
++ = 0; *m
++ = 0; *m
++ = 0; /* erase guard */
1119 nbytes
= ALLOCATED_BYTES(n
);
1122 np
= (union mhead
*)mremap (p
, binsize (p
->mh_index
), binsize (nunits
), MREMAP_MAYMOVE
);
1124 if (np
== MAP_FAILED
)
1129 np
->mh_alloc
= ISALLOC
;
1130 np
->mh_magic2
= MAGIC2
;
1131 MALLOC_MEMSET ((char *)np
->mh_magic8
, MAGIC1
, MAGIC8_NUMBYTES
);
1133 np
->mh_index
= nunits
;
1138 m
= (char *)(np
+ 1) + n
;
1139 *m
++ = *z
++, *m
++ = *z
++, *m
++ = *z
++, *m
++ = *z
++;
1141 return ((PTR_T
)(np
+ 1));
1146 internal_realloc (mem
, n
, file
, line
, flags
)
1152 register union mhead
*p
;
1153 register MALLOC_SIZE_T tocopy
;
1154 register MALLOC_SIZE_T nbytes
;
1155 register int newunits
, nunits
;
1156 register char *m
, *z
;
1165 internal_free (mem
, file
, line
, MALLOC_INTERNAL
);
1168 if ((p
= (union mhead
*) mem
) == 0)
1169 return internal_malloc (n
, file
, line
, MALLOC_INTERNAL
);
1172 nunits
= p
->mh_index
;
1173 ASSERT (nunits
< NBUCKETS
);
1175 if (p
->mh_alloc
!= ISALLOC
)
1176 xbotch (mem
, ERR_UNALLOC
,
1177 _("realloc: called with unallocated block argument"), file
, line
);
1179 ASSERT (p
->mh_magic2
== MAGIC2
);
1180 nbytes
= ALLOCATED_BYTES(p
->mh_nbytes
);
1181 /* Since the sizeof(u_bits32_t) bytes before the memory handed to the user
1182 are now used for the number of bytes allocated, a simple check of
1183 mh_magic2 is no longer sufficient to catch things like p[-1] = 'x'.
1184 We sanity-check the value of mh_nbytes against the size of the blocks
1185 in the appropriate bucket before we use it. This can still cause problems
1186 and obscure errors if mh_nbytes is wrong but still within range; the
1187 checks against the size recorded at the end of the chunk will probably
1188 fail then. Using MALLOC_REGISTER will help here, since it saves the
1189 original number of bytes requested. */
1190 if (IN_BUCKET(nbytes
, nunits
) == 0)
1191 xbotch (mem
, ERR_UNDERFLOW
,
1192 _("realloc: underflow detected; mh_nbytes out of range"), file
, line
);
1195 for (i
= 0, z
= p
->mh_magic8
; i
< MAGIC8_NUMBYTES
; i
++)
1197 xbotch (mem
, ERR_UNDERFLOW
,
1198 _("realloc: underflow detected; magic8 corrupted"), file
, line
);
1202 m
= (char *)mem
+ (tocopy
= p
->mh_nbytes
);
1204 *z
++ = *m
++, *z
++ = *m
++, *z
++ = *m
++, *z
++ = *m
++;
1205 if (mg
.i
!= p
->mh_nbytes
)
1206 xbotch (mem
, ERR_ASSERT_FAILED
, _("realloc: start and end chunk sizes differ"), file
, line
);
1209 if (_malloc_nwatch
> 0)
1210 _malloc_ckwatch (p
+ 1, file
, line
, W_REALLOC
, n
);
1213 _mstats
.bytesreq
+= (n
< tocopy
) ? 0 : n
- tocopy
;
1216 /* If we're reallocating to the same size as previously, return now */
1217 if (n
== p
->mh_nbytes
)
1220 #if SIZEOF_SIZE_T == 8
1221 if (ALLOCATED_BYTES(n
) > MAXALLOC_SIZE
)
1222 return ((PTR_T
) NULL
);
1224 /* See if desired size rounds to same power of 2 as actual size. */
1225 nbytes
= ALLOCATED_BYTES(n
);
1227 /* If ok, use the same block, just marking its size as changed. */
1228 if (RIGHT_BUCKET(nbytes
, nunits
) || RIGHT_BUCKET(nbytes
, nunits
-1))
1230 /* Compensate for increment above. */
1233 *m
++ = 0; *m
++ = 0; *m
++ = 0; *m
++ = 0;
1234 m
= (char *)mem
+ (p
->mh_nbytes
= n
);
1238 *m
++ = *z
++, *m
++ = *z
++, *m
++ = *z
++, *m
++ = *z
++;
1251 /* If we are using mmap and have mremap, we use it here. Make sure that
1252 the old size and new size are above the threshold where we use mmap */
1253 if (nbytes
> p
->mh_nbytes
)
1256 newunits
= (nbytes
<= (pagesz
>> 1)) ? STARTBUCK
: pagebucket
;
1257 for ( ; newunits
< NBUCKETS
; newunits
++)
1258 if (nbytes
<= binsize(newunits
))
1261 if (nunits
> malloc_mmap_threshold
&& newunits
> malloc_mmap_threshold
)
1263 m
= internal_remap (mem
, n
, newunits
, MALLOC_INTERNAL
);
1268 #endif /* USE_MREMAP */
1270 if ((m
= internal_malloc (n
, file
, line
, MALLOC_INTERNAL
|MALLOC_NOTRACE
|MALLOC_NOREG
)) == 0)
1272 FASTCOPY (mem
, m
, tocopy
);
1273 internal_free (mem
, file
, line
, MALLOC_INTERNAL
);
1277 if (malloc_trace
&& (flags
& MALLOC_NOTRACE
) == 0)
1278 mtrace_alloc ("realloc", m
, n
, file
, line
);
1279 else if (_malloc_trace_buckets
[nunits
])
1280 mtrace_alloc ("realloc", m
, n
, file
, line
);
1283 #ifdef MALLOC_REGISTER
1284 if (malloc_register
&& (flags
& MALLOC_NOREG
) == 0)
1285 mregister_alloc ("realloc", m
, n
, file
, line
);
1289 if (_malloc_nwatch
> 0)
1290 _malloc_ckwatch (m
, file
, line
, W_RESIZED
, n
);
1297 internal_memalign (alignment
, size
, file
, line
, flags
)
1304 register char *aligned
;
1305 register union mhead
*p
;
1307 ptr
= internal_malloc (size
+ alignment
, file
, line
, MALLOC_INTERNAL
);
1311 /* If entire block has the desired alignment, just accept it. */
1312 if (((long) ptr
& (alignment
- 1)) == 0)
1314 /* Otherwise, get address of byte in the block that has that alignment. */
1315 aligned
= (char *) (((long) ptr
+ alignment
- 1) & (~alignment
+ 1));
1317 /* Store a suitable indication of how to free the block,
1318 so that free can find the true beginning of it. */
1319 p
= (union mhead
*) aligned
- 1;
1320 p
->mh_nbytes
= aligned
- ptr
;
1321 p
->mh_alloc
= ISMEMALIGN
;
1327 posix_memalign (memptr
, alignment
, size
)
1329 size_t alignment
, size
;
1333 /* Perform posix-mandated error checking here */
1334 if ((alignment
% sizeof (void *) != 0) || alignment
== 0)
1336 else if (powerof2 (alignment
) == 0)
1339 mem
= internal_memalign (alignment
, size
, (char *)0, 0, 0);
1349 malloc_usable_size (mem
)
1352 register union mhead
*p
;
1355 if ((ap
= (char *)mem
) == 0)
1358 /* Find the true start of the memory block to discover which bin */
1359 p
= (union mhead
*) ap
- 1;
1361 if (p
->mh_alloc
== ISMEMALIGN
)
1364 p
= (union mhead
*) ap
- 1;
1367 /* return 0 if ISFREE */
1368 if (p
->mh_alloc
== ISFREE
)
1371 /* Since we use bounds checking, the usable size is the last requested size. */
1372 return (p
->mh_nbytes
);
1375 #if !defined (NO_VALLOC)
1376 /* This runs into trouble with getpagesize on HPUX, and Multimax machines.
1377 Patching out seems cleaner than the ugly fix needed. */
1379 internal_valloc (size
, file
, line
, flags
)
1384 return internal_memalign (getpagesize (), size
, file
, line
, flags
|MALLOC_INTERNAL
);
1386 #endif /* !NO_VALLOC */
1390 internal_calloc (n
, s
, file
, line
, flags
)
1399 result
= internal_malloc (total
, file
, line
, flags
|MALLOC_INTERNAL
);
1401 memset (result
, 0, total
);
1406 internal_cfree (p
, file
, line
, flags
)
1411 internal_free (p
, file
, line
, flags
|MALLOC_INTERNAL
);
1413 #endif /* !NO_CALLOC */
1417 malloc_free_blocks (size
)
1421 register union mhead
*p
;
1424 for (p
= nextf
[size
]; p
; p
= CHAIN (p
))
1431 #if defined (MALLOC_WRAPFUNCS)
1433 sh_malloc (bytes
, file
, line
)
1438 return internal_malloc (bytes
, file
, line
, MALLOC_WRAPPER
);
1442 sh_realloc (ptr
, size
, file
, line
)
1448 return internal_realloc (ptr
, size
, file
, line
, MALLOC_WRAPPER
);
1452 sh_free (mem
, file
, line
)
1457 internal_free (mem
, file
, line
, MALLOC_WRAPPER
);
1461 sh_memalign (alignment
, size
, file
, line
)
1467 return internal_memalign (alignment
, size
, file
, line
, MALLOC_WRAPPER
);
1472 sh_calloc (n
, s
, file
, line
)
1477 return internal_calloc (n
, s
, file
, line
, MALLOC_WRAPPER
);
1481 sh_cfree (mem
, file
, line
)
1486 internal_cfree (mem
, file
, line
, MALLOC_WRAPPER
);
1492 sh_valloc (size
, file
, line
)
1497 return internal_valloc (size
, file
, line
, MALLOC_WRAPPER
);
1499 #endif /* !NO_VALLOC */
1501 #endif /* MALLOC_WRAPFUNCS */
1503 /* Externally-available functions that call their internal counterparts. */
1509 return internal_malloc (size
, (char *)NULL
, 0, 0);
1513 realloc (mem
, nbytes
)
1517 return internal_realloc (mem
, nbytes
, (char *)NULL
, 0, 0);
1524 internal_free (mem
, (char *)NULL
, 0, 0);
1528 memalign (alignment
, size
)
1532 return internal_memalign (alignment
, size
, (char *)NULL
, 0, 0);
1540 return internal_valloc (size
, (char *)NULL
, 0, 0);
1549 return internal_calloc (n
, s
, (char *)NULL
, 0, 0);
1556 internal_cfree (mem
, (char *)NULL
, 0, 0);