]> git.ipfire.org Git - thirdparty/bash.git/blob - lib/malloc/malloc.c
Imported from ../bash-3.0.tar.gz.
[thirdparty/bash.git] / lib / malloc / malloc.c
1 /* malloc.c - dynamic memory allocation for bash. */
2
3 /* Copyright (C) 1985-2003 Free Software Foundation, Inc.
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2, or (at your option)
8 any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the Free Software
17 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111 USA.
18
19 In other words, you are welcome to use, share and improve this program.
20 You are forbidden to forbid anyone else to use, share and improve
21 what you give them. Help stamp out software-hoarding! */
22
23 /*
24 * @(#)nmalloc.c 1 (Caltech) 2/21/82
25 *
26 * U of M Modified: 20 Jun 1983 ACT: strange hacks for Emacs
27 *
28 * Nov 1983, Mike@BRL, Added support for 4.1C/4.2 BSD.
29 *
30 * This is a very fast storage allocator. It allocates blocks of a small
31 * number of different sizes, and keeps free lists of each size. Blocks
32 * that don't exactly fit are passed up to the next larger size. In this
33 * implementation, the available sizes are (2^n)-4 (or -16) bytes long.
34 * This is designed for use in a program that uses vast quantities of
35 * memory, but bombs when it runs out. To make it a little better, it
36 * warns the user when he starts to get near the end.
37 *
38 * June 84, ACT: modified rcheck code to check the range given to malloc,
39 * rather than the range determined by the 2-power used.
40 *
41 * Jan 85, RMS: calls malloc_warning to issue warning on nearly full.
42 * No longer Emacs-specific; can serve as all-purpose malloc for GNU.
43 * You should call malloc_init to reinitialize after loading dumped Emacs.
44 * Call malloc_stats to get info on memory stats if MALLOC_STATS turned on.
45 * realloc knows how to return same block given, just changing its size,
46 * if the power of 2 is correct.
47 */
48
49 /*
50 * nextf[i] is the pointer to the next free block of size 2^(i+3). The
51 * smallest allocatable block is 8 bytes. The overhead information will
52 * go in the first int of the block, and the returned pointer will point
53 * to the second.
54 */
55
56 /* Define MEMSCRAMBLE to have free() write 0xcf into memory as it's freed, to
57 uncover callers that refer to freed memory, and to have malloc() write 0xdf
58 into memory as it's allocated to avoid referring to previous contents. */
59
60 /* SCO 3.2v4 getcwd and possibly other libc routines fail with MEMSCRAMBLE;
61 handled by configure. */
62
63 #if defined (HAVE_CONFIG_H)
64 # include <config.h>
65 #endif /* HAVE_CONFIG_H */
66
67 #if defined (SHELL)
68 # include "bashtypes.h"
69 # include "stdc.h"
70 #else
71 # include <sys/types.h>
72 #endif
73
74 #if defined (HAVE_UNISTD_H)
75 # include <unistd.h>
76 #endif
77
78 /* Determine which kind of system this is. */
79 #include <signal.h>
80
81 #if defined (HAVE_STRING_H)
82 # include <string.h>
83 #else
84 # include <strings.h>
85 #endif
86
87 #include <stdio.h>
88
89 /* Define getpagesize () if the system does not. */
90 #ifndef HAVE_GETPAGESIZE
91 # include "getpagesize.h"
92 #endif
93
94 #include "imalloc.h"
95 #ifdef MALLOC_STATS
96 # include "mstats.h"
97 #endif
98 #ifdef MALLOC_REGISTER
99 # include "table.h"
100 #endif
101 #ifdef MALLOC_WATCH
102 # include "watch.h"
103 #endif
104
105 /* System-specific omissions. */
106 #ifdef HPUX
107 # define NO_VALLOC
108 #endif
109
110 #define NBUCKETS 30
111
112 #define ISALLOC ((char) 0xf7) /* magic byte that implies allocation */
113 #define ISFREE ((char) 0x54) /* magic byte that implies free block */
114 /* this is for error checking only */
115 #define ISMEMALIGN ((char) 0xd6) /* Stored before the value returned by
116 memalign, with the rest of the word
117 being the distance to the true
118 beginning of the block. */
119
120
121 /* We have a flag indicating whether memory is allocated, an index in
122 nextf[], a size field, and a sentinel value to determine whether or
123 not a caller wrote before the start of allocated memory; to realloc()
124 memory we either copy mh_nbytes or just change mh_nbytes if there is
125 enough room in the block for the new size. Range checking is always
126 done. */
127 union mhead {
128 bits64_t mh_align; /* 8 */
129 struct {
130 char mi_alloc; /* ISALLOC or ISFREE */ /* 1 */
131 char mi_index; /* index in nextf[] */ /* 1 */
132 /* Remainder are valid only when block is allocated */
133 u_bits16_t mi_magic2; /* should be == MAGIC2 */ /* 2 */
134 u_bits32_t mi_nbytes; /* # of bytes allocated */ /* 4 */
135 } minfo;
136 };
137 #define mh_alloc minfo.mi_alloc
138 #define mh_index minfo.mi_index
139 #define mh_nbytes minfo.mi_nbytes
140 #define mh_magic2 minfo.mi_magic2
141
142 #define MOVERHEAD sizeof(union mhead)
143 #define MALIGN_MASK 7 /* one less than desired alignment */
144
145 typedef union _malloc_guard {
146 char s[4];
147 u_bits32_t i;
148 } mguard_t;
149
150 /* Access free-list pointer of a block.
151 It is stored at block + sizeof (char *).
152 This is not a field in the minfo structure member of union mhead
153 because we want sizeof (union mhead)
154 to describe the overhead for when the block is in use,
155 and we do not want the free-list pointer to count in that. */
156
157 #define CHAIN(a) \
158 (*(union mhead **) (sizeof (char *) + (char *) (a)))
159
160 /* To implement range checking, we write magic values in at the beginning
161 and end of each allocated block, and make sure they are undisturbed
162 whenever a free or a realloc occurs. */
163
164 /* Written in the 2 bytes before the block's real space (-4 bytes) */
165 #define MAGIC2 0x5555
166 #define MSLOP 4 /* 4 bytes extra for u_bits32_t size */
167
168 /* How many bytes are actually allocated for a request of size N --
169 rounded up to nearest multiple of 8 after accounting for malloc
170 overhead. */
171 #define ALLOCATED_BYTES(n) \
172 (((n) + MOVERHEAD + MSLOP + MALIGN_MASK) & ~MALIGN_MASK)
173
174 #define ASSERT(p) \
175 do \
176 { \
177 if (!(p)) xbotch((PTR_T)0, ERR_ASSERT_FAILED, __STRING(p), file, line); \
178 } \
179 while (0)
180
181 /* Minimum and maximum bucket indices for block splitting (and to bound
182 the search for a block to split). */
183 #define SPLIT_MIN 2 /* XXX - was 3 */
184 #define SPLIT_MID 11
185 #define SPLIT_MAX 14
186
187 /* Minimum and maximum bucket indices for block coalescing. */
188 #define COMBINE_MIN 2
189 #define COMBINE_MAX (pagebucket - 1) /* XXX */
190
191 #define LESSCORE_MIN 10
192 #define LESSCORE_FRC 13
193
194 #define STARTBUCK 1
195
196 /* Flags for the internal functions. */
197 #define MALLOC_WRAPPER 0x01 /* wrapper function */
198 #define MALLOC_INTERNAL 0x02 /* internal function calling another */
199 #define MALLOC_NOTRACE 0x04 /* don't trace this allocation or free */
200 #define MALLOC_NOREG 0x08 /* don't register this allocation or free */
201
202 /* Future use. */
203 #define ERR_DUPFREE 0x01
204 #define ERR_UNALLOC 0x02
205 #define ERR_UNDERFLOW 0x04
206 #define ERR_ASSERT_FAILED 0x08
207
208 /* Evaluates to true if NB is appropriate for bucket NU. NB is adjusted
209 appropriately by the caller to account for malloc overhead. This only
210 checks that the recorded size is not too big for the bucket. We
211 can't check whether or not it's in between NU and NU-1 because we
212 might have encountered a busy bucket when allocating and moved up to
213 the next size. */
214 #define IN_BUCKET(nb, nu) ((nb) <= binsizes[(nu)])
215
216 /* Use this when we want to be sure that NB is in bucket NU. */
217 #define RIGHT_BUCKET(nb, nu) \
218 (((nb) > binsizes[(nu)-1]) && ((nb) <= binsizes[(nu)]))
219
220 /* nextf[i] is free list of blocks of size 2**(i + 3) */
221
222 static union mhead *nextf[NBUCKETS];
223
224 /* busy[i] is nonzero while allocation of block size i is in progress. */
225
226 static char busy[NBUCKETS];
227
228 static int pagesz; /* system page size. */
229 static int pagebucket; /* bucket for requests a page in size */
230 static int maxbuck; /* highest bucket receiving allocation request. */
231
232 static char *memtop; /* top of heap */
233
234 static unsigned long binsizes[NBUCKETS] = {
235 8UL, 16UL, 32UL, 64UL, 128UL, 256UL, 512UL, 1024UL, 2048UL, 4096UL,
236 8192UL, 16384UL, 32768UL, 65536UL, 131072UL, 262144UL, 524288UL,
237 1048576UL, 2097152UL, 4194304UL, 8388608UL, 16777216UL, 33554432UL,
238 67108864UL, 134217728UL, 268435456UL, 536870912UL, 1073741824UL,
239 2147483648UL, 4294967295UL
240 };
241
242 /* binsizes[x] == (1 << ((x) + 3)) */
243 #define binsize(x) binsizes[(x)]
244
245 /* Declarations for internal functions */
246 static PTR_T internal_malloc __P((size_t, const char *, int, int));
247 static PTR_T internal_realloc __P((PTR_T, size_t, const char *, int, int));
248 static void internal_free __P((PTR_T, const char *, int, int));
249 static PTR_T internal_memalign __P((unsigned int, size_t, const char *, int, int));
250 #ifndef NO_CALLOC
251 static PTR_T internal_calloc __P((size_t, size_t, const char *, int, int));
252 static void internal_cfree __P((PTR_T, const char *, int, int));
253 #endif
254 #ifndef NO_VALLOC
255 static PTR_T internal_valloc __P((size_t, const char *, int, int));
256 #endif
257
258 #if defined (botch)
259 extern void botch ();
260 #else
261 static void botch __P((const char *, const char *, int));
262 #endif
263 static void xbotch __P((PTR_T, int, const char *, const char *, int));
264
265 #if !HAVE_DECL_SBRK
266 extern char *sbrk ();
267 #endif /* !HAVE_DECL_SBRK */
268
269 #ifdef SHELL
270 extern int interrupt_immediately;
271 extern int signal_is_trapped __P((int));
272 #endif
273
274 #ifdef MALLOC_STATS
275 struct _malstats _mstats;
276 #endif /* MALLOC_STATS */
277
278 /* Debugging variables available to applications. */
279 int malloc_flags = 0; /* future use */
280 int malloc_trace = 0; /* trace allocations and frees to stderr */
281 int malloc_register = 0; /* future use */
282
283 #ifdef MALLOC_TRACE
284 char _malloc_trace_buckets[NBUCKETS];
285
286 /* These should really go into a header file. */
287 extern void mtrace_alloc __P((const char *, PTR_T, size_t, const char *, int));
288 extern void mtrace_free __P((PTR_T, int, const char *, int));
289 #endif
290
291 #if !defined (botch)
292 static void
293 botch (s, file, line)
294 const char *s;
295 const char *file;
296 int line;
297 {
298 fprintf (stderr, _("malloc: failed assertion: %s\n"), s);
299 (void)fflush (stderr);
300 abort ();
301 }
302 #endif
303
304 /* print the file and line number that caused the assertion failure and
305 call botch() to do whatever the application wants with the information */
306 static void
307 xbotch (mem, e, s, file, line)
308 PTR_T mem;
309 int e;
310 const char *s;
311 const char *file;
312 int line;
313 {
314 fprintf (stderr, _("\r\nmalloc: %s:%d: assertion botched\r\n"),
315 file ? file : "unknown", line);
316 #ifdef MALLOC_REGISTER
317 if (mem != NULL && malloc_register)
318 mregister_describe_mem (mem, stderr);
319 #endif
320 (void)fflush (stderr);
321 botch(s, file, line);
322 }
323
324 /* Coalesce two adjacent free blocks off the free list for size NU - 1,
325 as long as we can find two adjacent free blocks. nextf[NU -1] is
326 assumed to not be busy; the caller (morecore()) checks for this. */
327 static void
328 bcoalesce (nu)
329 register int nu;
330 {
331 register union mhead *mp, *mp1, *mp2;
332 register int nbuck;
333 unsigned long siz;
334
335 nbuck = nu - 1;
336 if (nextf[nbuck] == 0)
337 return;
338
339 siz = binsize (nbuck);
340
341 mp2 = mp1 = nextf[nbuck];
342 mp = CHAIN (mp1);
343 while (mp && mp != (union mhead *)((char *)mp1 + siz))
344 {
345 mp2 = mp1;
346 mp1 = mp;
347 mp = CHAIN (mp);
348 }
349 if (mp == 0)
350 return;
351
352 /* OK, now we have mp1 pointing to the block we want to add to nextf[NU].
353 CHAIN(mp2) must equal mp1. Check that mp1 and mp are adjacent. */
354 if (mp2 != mp1 && CHAIN(mp2) != mp1)
355 xbotch ((PTR_T)0, 0, "bcoalesce: CHAIN(mp2) != mp1", (char *)NULL, 0);
356
357 #ifdef MALLOC_DEBUG
358 if (CHAIN (mp1) != (union mhead *)((char *)mp1 + siz))
359 return; /* not adjacent */
360 #endif
361
362 #ifdef MALLOC_STATS
363 _mstats.tbcoalesce++;
364 _mstats.ncoalesce[nbuck]++;
365 #endif
366
367 /* Since they are adjacent, remove them from the free list */
368 if (mp1 == nextf[nbuck])
369 nextf[nbuck] = CHAIN (mp);
370 else
371 CHAIN (mp2) = CHAIN (mp);
372
373 /* And add the combined two blocks to nextf[NU]. */
374 mp1->mh_alloc = ISFREE;
375 mp1->mh_index = nu;
376 CHAIN (mp1) = nextf[nu];
377 nextf[nu] = mp1;
378 }
379
380 /* Split a block at index > NU (but less than SPLIT_MAX) into a set of
381 blocks of the correct size, and attach them to nextf[NU]. nextf[NU]
382 is assumed to be empty. Must be called with signals blocked (e.g.,
383 by morecore()). */
384 static void
385 bsplit (nu)
386 register int nu;
387 {
388 register union mhead *mp;
389 int nbuck, nblks, split_max;
390 unsigned long siz;
391
392 split_max = (maxbuck > SPLIT_MAX) ? maxbuck : SPLIT_MAX;
393
394 if (nu >= SPLIT_MID)
395 {
396 for (nbuck = split_max; nbuck > nu; nbuck--)
397 {
398 if (busy[nbuck] || nextf[nbuck] == 0)
399 continue;
400 break;
401 }
402 }
403 else
404 {
405 for (nbuck = nu + 1; nbuck <= split_max; nbuck++)
406 {
407 if (busy[nbuck] || nextf[nbuck] == 0)
408 continue;
409 break;
410 }
411 }
412
413 if (nbuck > split_max || nbuck <= nu)
414 return;
415
416 /* XXX might want to split only if nextf[nbuck] has >= 2 blocks free
417 and nbuck is below some threshold. */
418
419 #ifdef MALLOC_STATS
420 _mstats.tbsplit++;
421 _mstats.nsplit[nbuck]++;
422 #endif
423
424 /* Figure out how many blocks we'll get. */
425 siz = binsize (nu);
426 nblks = binsize (nbuck) / siz;
427
428 /* Remove the block from the chain of larger blocks. */
429 mp = nextf[nbuck];
430 nextf[nbuck] = CHAIN (mp);
431
432 /* Split the block and put it on the requested chain. */
433 nextf[nu] = mp;
434 while (1)
435 {
436 mp->mh_alloc = ISFREE;
437 mp->mh_index = nu;
438 if (--nblks <= 0) break;
439 CHAIN (mp) = (union mhead *)((char *)mp + siz);
440 mp = (union mhead *)((char *)mp + siz);
441 }
442 CHAIN (mp) = 0;
443 }
444
445 static void
446 block_signals (setp, osetp)
447 sigset_t *setp, *osetp;
448 {
449 #ifdef HAVE_POSIX_SIGNALS
450 sigfillset (setp);
451 sigemptyset (osetp);
452 sigprocmask (SIG_BLOCK, setp, osetp);
453 #else
454 # if defined (HAVE_BSD_SIGNALS)
455 *osetp = sigsetmask (-1);
456 # endif
457 #endif
458 }
459
460 static void
461 unblock_signals (setp, osetp)
462 sigset_t *setp, *osetp;
463 {
464 #ifdef HAVE_POSIX_SIGNALS
465 sigprocmask (SIG_SETMASK, osetp, (sigset_t *)NULL);
466 #else
467 # if defined (HAVE_BSD_SIGNALS)
468 sigsetmask (*osetp);
469 # endif
470 #endif
471 }
472
473 /* Return some memory to the system by reducing the break. This is only
474 called with NU > pagebucket, so we're always assured of giving back
475 more than one page of memory. */
476 static void
477 lesscore (nu) /* give system back some memory */
478 register int nu; /* size index we're discarding */
479 {
480 long siz;
481
482 siz = binsize (nu);
483 /* Should check for errors here, I guess. */
484 sbrk (-siz);
485 memtop -= siz;
486
487 #ifdef MALLOC_STATS
488 _mstats.nsbrk++;
489 _mstats.tsbrk -= siz;
490 _mstats.nlesscore[nu]++;
491 #endif
492 }
493
494 static void
495 morecore (nu) /* ask system for more memory */
496 register int nu; /* size index to get more of */
497 {
498 register union mhead *mp;
499 register int nblks;
500 register long siz;
501 long sbrk_amt; /* amount to get via sbrk() */
502 sigset_t set, oset;
503 int blocked_sigs;
504
505 /* Block all signals in case we are executed from a signal handler. */
506 blocked_sigs = 0;
507 #ifdef SHELL
508 if (interrupt_immediately || signal_is_trapped (SIGINT) || signal_is_trapped (SIGCHLD))
509 #endif
510 {
511 block_signals (&set, &oset);
512 blocked_sigs = 1;
513 }
514
515 siz = binsize (nu); /* size of desired block for nextf[nu] */
516
517 if (siz < 0)
518 goto morecore_done; /* oops */
519
520 #ifdef MALLOC_STATS
521 _mstats.nmorecore[nu]++;
522 #endif
523
524 /* Try to split a larger block here, if we're within the range of sizes
525 to split. */
526 if (nu >= SPLIT_MIN)
527 {
528 bsplit (nu);
529 if (nextf[nu] != 0)
530 goto morecore_done;
531 }
532
533 /* Try to coalesce two adjacent blocks from the free list on nextf[nu - 1],
534 if we can, and we're withing the range of the block coalescing limits. */
535 if (nu >= COMBINE_MIN && nu < COMBINE_MAX && busy[nu - 1] == 0 && nextf[nu - 1])
536 {
537 bcoalesce (nu);
538 if (nextf[nu] != 0)
539 goto morecore_done;
540 }
541
542 /* Take at least a page, and figure out how many blocks of the requested
543 size we're getting. */
544 if (siz <= pagesz)
545 {
546 sbrk_amt = pagesz;
547 nblks = sbrk_amt / siz;
548 }
549 else
550 {
551 /* We always want to request an integral multiple of the page size
552 from the kernel, so let's compute whether or not `siz' is such
553 an amount. If it is, we can just request it. If not, we want
554 the smallest integral multiple of pagesize that is larger than
555 `siz' and will satisfy the request. */
556 sbrk_amt = siz & (pagesz - 1);
557 if (sbrk_amt == 0)
558 sbrk_amt = siz;
559 else
560 sbrk_amt = siz + pagesz - sbrk_amt;
561 nblks = 1;
562 }
563
564 #ifdef MALLOC_STATS
565 _mstats.nsbrk++;
566 _mstats.tsbrk += sbrk_amt;
567 #endif
568
569 mp = (union mhead *) sbrk (sbrk_amt);
570
571 /* Totally out of memory. */
572 if ((long)mp == -1)
573 goto morecore_done;
574
575 memtop += sbrk_amt;
576
577 /* shouldn't happen, but just in case -- require 8-byte alignment */
578 if ((long)mp & MALIGN_MASK)
579 {
580 mp = (union mhead *) (((long)mp + MALIGN_MASK) & ~MALIGN_MASK);
581 nblks--;
582 }
583
584 /* save new header and link the nblks blocks together */
585 nextf[nu] = mp;
586 while (1)
587 {
588 mp->mh_alloc = ISFREE;
589 mp->mh_index = nu;
590 if (--nblks <= 0) break;
591 CHAIN (mp) = (union mhead *)((char *)mp + siz);
592 mp = (union mhead *)((char *)mp + siz);
593 }
594 CHAIN (mp) = 0;
595
596 morecore_done:
597 if (blocked_sigs)
598 unblock_signals (&set, &oset);
599 }
600
601 static void
602 malloc_debug_dummy ()
603 {
604 write (1, "malloc_debug_dummy\n", 19);
605 }
606
607 #define PREPOP_BIN 2
608 #define PREPOP_SIZE 32
609
610 static int
611 pagealign ()
612 {
613 register int nunits;
614 register union mhead *mp;
615 long sbrk_needed;
616 char *curbrk;
617
618 pagesz = getpagesize ();
619 if (pagesz < 1024)
620 pagesz = 1024;
621
622 /* OK, how much do we need to allocate to make things page-aligned?
623 Some of this partial page will be wasted space, but we'll use as
624 much as we can. Once we figure out how much to advance the break
625 pointer, go ahead and do it. */
626 memtop = curbrk = sbrk (0);
627 sbrk_needed = pagesz - ((long)curbrk & (pagesz - 1)); /* sbrk(0) % pagesz */
628 if (sbrk_needed < 0)
629 sbrk_needed += pagesz;
630
631 /* Now allocate the wasted space. */
632 if (sbrk_needed)
633 {
634 #ifdef MALLOC_STATS
635 _mstats.nsbrk++;
636 _mstats.tsbrk += sbrk_needed;
637 #endif
638 curbrk = sbrk (sbrk_needed);
639 if ((long)curbrk == -1)
640 return -1;
641 memtop += sbrk_needed;
642
643 /* Take the memory which would otherwise be wasted and populate the most
644 popular bin (2 == 32 bytes) with it. Add whatever we need to curbrk
645 to make things 32-byte aligned, compute how many 32-byte chunks we're
646 going to get, and set up the bin. */
647 curbrk += sbrk_needed & (PREPOP_SIZE - 1);
648 sbrk_needed -= sbrk_needed & (PREPOP_SIZE - 1);
649 nunits = sbrk_needed / PREPOP_SIZE;
650
651 if (nunits > 0)
652 {
653 mp = (union mhead *)curbrk;
654
655 nextf[PREPOP_BIN] = mp;
656 while (1)
657 {
658 mp->mh_alloc = ISFREE;
659 mp->mh_index = PREPOP_BIN;
660 if (--nunits <= 0) break;
661 CHAIN(mp) = (union mhead *)((char *)mp + PREPOP_SIZE);
662 mp = (union mhead *)((char *)mp + PREPOP_SIZE);
663 }
664 CHAIN(mp) = 0;
665 }
666 }
667
668 /* compute which bin corresponds to the page size. */
669 for (nunits = 7; nunits < NBUCKETS; nunits++)
670 if (pagesz <= binsize(nunits))
671 break;
672 pagebucket = nunits;
673
674 return 0;
675 }
676
677 static PTR_T
678 internal_malloc (n, file, line, flags) /* get a block */
679 size_t n;
680 const char *file;
681 int line, flags;
682 {
683 register union mhead *p;
684 register int nunits;
685 register char *m, *z;
686 long nbytes;
687 mguard_t mg;
688
689 /* Get the system page size and align break pointer so future sbrks will
690 be page-aligned. The page size must be at least 1K -- anything
691 smaller is increased. */
692 if (pagesz == 0)
693 if (pagealign () < 0)
694 return ((PTR_T)NULL);
695
696 /* Figure out how many bytes are required, rounding up to the nearest
697 multiple of 8, then figure out which nextf[] area to use. Try to
698 be smart about where to start searching -- if the number of bytes
699 needed is greater than the page size, we can start at pagebucket. */
700 nbytes = ALLOCATED_BYTES(n);
701 nunits = (nbytes <= (pagesz >> 1)) ? STARTBUCK : pagebucket;
702 for ( ; nunits < NBUCKETS; nunits++)
703 if (nbytes <= binsize(nunits))
704 break;
705
706 /* Silently reject too-large requests. */
707 if (nunits >= NBUCKETS)
708 return ((PTR_T) NULL);
709
710 /* In case this is reentrant use of malloc from signal handler,
711 pick a block size that no other malloc level is currently
712 trying to allocate. That's the easiest harmless way not to
713 interfere with the other level of execution. */
714 #ifdef MALLOC_STATS
715 if (busy[nunits]) _mstats.nrecurse++;
716 #endif
717 while (busy[nunits]) nunits++;
718 busy[nunits] = 1;
719
720 if (nunits > maxbuck)
721 maxbuck = nunits;
722
723 /* If there are no blocks of the appropriate size, go get some */
724 if (nextf[nunits] == 0)
725 morecore (nunits);
726
727 /* Get one block off the list, and set the new list head */
728 if ((p = nextf[nunits]) == NULL)
729 {
730 busy[nunits] = 0;
731 return NULL;
732 }
733 nextf[nunits] = CHAIN (p);
734 busy[nunits] = 0;
735
736 /* Check for free block clobbered */
737 /* If not for this check, we would gobble a clobbered free chain ptr
738 and bomb out on the NEXT allocate of this size block */
739 if (p->mh_alloc != ISFREE || p->mh_index != nunits)
740 xbotch ((PTR_T)(p+1), 0, _("malloc: block on free list clobbered"), file, line);
741
742 /* Fill in the info, and set up the magic numbers for range checking. */
743 p->mh_alloc = ISALLOC;
744 p->mh_magic2 = MAGIC2;
745 p->mh_nbytes = n;
746
747 /* End guard */
748 mg.i = n;
749 z = mg.s;
750 m = (char *) (p + 1) + n;
751 *m++ = *z++, *m++ = *z++, *m++ = *z++, *m++ = *z++;
752
753 #ifdef MEMSCRAMBLE
754 if (n)
755 MALLOC_MEMSET ((char *)(p + 1), 0xdf, n); /* scramble previous contents */
756 #endif
757 #ifdef MALLOC_STATS
758 _mstats.nmalloc[nunits]++;
759 _mstats.tmalloc[nunits]++;
760 _mstats.nmal++;
761 _mstats.bytesreq += n;
762 #endif /* MALLOC_STATS */
763
764 #ifdef MALLOC_TRACE
765 if (malloc_trace && (flags & MALLOC_NOTRACE) == 0)
766 mtrace_alloc ("malloc", p + 1, n, file, line);
767 else if (_malloc_trace_buckets[nunits])
768 mtrace_alloc ("malloc", p + 1, n, file, line);
769 #endif
770
771 #ifdef MALLOC_REGISTER
772 if (malloc_register && (flags & MALLOC_NOREG) == 0)
773 mregister_alloc ("malloc", p + 1, n, file, line);
774 #endif
775
776 #ifdef MALLOC_WATCH
777 if (_malloc_nwatch > 0)
778 _malloc_ckwatch (p + 1, file, line, W_ALLOC, n);
779 #endif
780
781 return (PTR_T) (p + 1);
782 }
783
784 static void
785 internal_free (mem, file, line, flags)
786 PTR_T mem;
787 const char *file;
788 int line, flags;
789 {
790 register union mhead *p;
791 register char *ap, *z;
792 register int nunits;
793 register unsigned int nbytes;
794 int ubytes; /* caller-requested size */
795 mguard_t mg;
796
797 if ((ap = (char *)mem) == 0)
798 return;
799
800 p = (union mhead *) ap - 1;
801
802 if (p->mh_alloc == ISMEMALIGN)
803 {
804 ap -= p->mh_nbytes;
805 p = (union mhead *) ap - 1;
806 }
807
808 #if defined (MALLOC_TRACE) || defined (MALLOC_REGISTER)
809 if (malloc_trace || malloc_register)
810 ubytes = p->mh_nbytes;
811 #endif
812
813 if (p->mh_alloc != ISALLOC)
814 {
815 if (p->mh_alloc == ISFREE)
816 xbotch (mem, ERR_DUPFREE,
817 _("free: called with already freed block argument"), file, line);
818 else
819 xbotch (mem, ERR_UNALLOC,
820 _("free: called with unallocated block argument"), file, line);
821 }
822
823 ASSERT (p->mh_magic2 == MAGIC2);
824
825 nunits = p->mh_index;
826 nbytes = ALLOCATED_BYTES(p->mh_nbytes);
827 /* Since the sizeof(u_bits32_t) bytes before the memory handed to the user
828 are now used for the number of bytes allocated, a simple check of
829 mh_magic2 is no longer sufficient to catch things like p[-1] = 'x'.
830 We sanity-check the value of mh_nbytes against the size of the blocks
831 in the appropriate bucket before we use it. This can still cause problems
832 and obscure errors if mh_nbytes is wrong but still within range; the
833 checks against the size recorded at the end of the chunk will probably
834 fail then. Using MALLOC_REGISTER will help here, since it saves the
835 original number of bytes requested. */
836
837 if (IN_BUCKET(nbytes, nunits) == 0)
838 xbotch (mem, ERR_UNDERFLOW,
839 _("free: underflow detected; mh_nbytes out of range"), file, line);
840
841 ap += p->mh_nbytes;
842 z = mg.s;
843 *z++ = *ap++, *z++ = *ap++, *z++ = *ap++, *z++ = *ap++;
844 if (mg.i != p->mh_nbytes)
845 xbotch (mem, ERR_ASSERT_FAILED, _("free: start and end chunk sizes differ"), file, line);
846
847 #if 1
848 if (nunits >= LESSCORE_MIN && ((char *)p + binsize(nunits) == memtop))
849 #else
850 if (((char *)p + binsize(nunits) == memtop) && nunits >= LESSCORE_MIN)
851 #endif
852 {
853 /* If above LESSCORE_FRC, give back unconditionally. This should be set
854 high enough to be infrequently encountered. If between LESSCORE_MIN
855 and LESSCORE_FRC, call lesscore if the bucket is marked as busy (in
856 which case we would punt below and leak memory) or if there's already
857 a block on the free list. */
858 if ((nunits >= LESSCORE_FRC) || busy[nunits] || nextf[nunits] != 0)
859 {
860 lesscore (nunits);
861 /* keeps the tracing and registering code in one place */
862 goto free_return;
863 }
864 }
865
866 #ifdef MEMSCRAMBLE
867 if (p->mh_nbytes)
868 MALLOC_MEMSET (mem, 0xcf, p->mh_nbytes);
869 #endif
870
871 ASSERT (nunits < NBUCKETS);
872 p->mh_alloc = ISFREE;
873
874 if (busy[nunits] == 1)
875 return; /* this is bogus, but at least it won't corrupt the chains */
876
877 /* Protect against signal handlers calling malloc. */
878 busy[nunits] = 1;
879 /* Put this block on the free list. */
880 CHAIN (p) = nextf[nunits];
881 nextf[nunits] = p;
882 busy[nunits] = 0;
883
884 free_return:
885 ; /* Empty statement in case this is the end of the function */
886
887 #ifdef MALLOC_STATS
888 _mstats.nmalloc[nunits]--;
889 _mstats.nfre++;
890 #endif /* MALLOC_STATS */
891
892 #ifdef MALLOC_TRACE
893 if (malloc_trace && (flags & MALLOC_NOTRACE) == 0)
894 mtrace_free (mem, ubytes, file, line);
895 else if (_malloc_trace_buckets[nunits])
896 mtrace_free (mem, ubytes, file, line);
897 #endif
898
899 #ifdef MALLOC_REGISTER
900 if (malloc_register && (flags & MALLOC_NOREG) == 0)
901 mregister_free (mem, ubytes, file, line);
902 #endif
903
904 #ifdef MALLOC_WATCH
905 if (_malloc_nwatch > 0)
906 _malloc_ckwatch (mem, file, line, W_FREE, ubytes);
907 #endif
908 }
909
910 static PTR_T
911 internal_realloc (mem, n, file, line, flags)
912 PTR_T mem;
913 register size_t n;
914 const char *file;
915 int line, flags;
916 {
917 register union mhead *p;
918 register u_bits32_t tocopy;
919 register unsigned int nbytes;
920 register int nunits;
921 register char *m, *z;
922 mguard_t mg;
923
924 #ifdef MALLOC_STATS
925 _mstats.nrealloc++;
926 #endif
927
928 if (n == 0)
929 {
930 internal_free (mem, file, line, MALLOC_INTERNAL);
931 return (NULL);
932 }
933 if ((p = (union mhead *) mem) == 0)
934 return internal_malloc (n, file, line, MALLOC_INTERNAL);
935
936 p--;
937 nunits = p->mh_index;
938 ASSERT (nunits < NBUCKETS);
939
940 if (p->mh_alloc != ISALLOC)
941 xbotch (mem, ERR_UNALLOC,
942 _("realloc: called with unallocated block argument"), file, line);
943
944 ASSERT (p->mh_magic2 == MAGIC2);
945 nbytes = ALLOCATED_BYTES(p->mh_nbytes);
946 /* Since the sizeof(u_bits32_t) bytes before the memory handed to the user
947 are now used for the number of bytes allocated, a simple check of
948 mh_magic2 is no longer sufficient to catch things like p[-1] = 'x'.
949 We sanity-check the value of mh_nbytes against the size of the blocks
950 in the appropriate bucket before we use it. This can still cause problems
951 and obscure errors if mh_nbytes is wrong but still within range; the
952 checks against the size recorded at the end of the chunk will probably
953 fail then. Using MALLOC_REGISTER will help here, since it saves the
954 original number of bytes requested. */
955 if (IN_BUCKET(nbytes, nunits) == 0)
956 xbotch (mem, ERR_UNDERFLOW,
957 _("realloc: underflow detected; mh_nbytes out of range"), file, line);
958
959 m = (char *)mem + (tocopy = p->mh_nbytes);
960 z = mg.s;
961 *z++ = *m++, *z++ = *m++, *z++ = *m++, *z++ = *m++;
962 if (mg.i != p->mh_nbytes)
963 xbotch (mem, ERR_ASSERT_FAILED, _("realloc: start and end chunk sizes differ"), file, line);
964
965 #ifdef MALLOC_WATCH
966 if (_malloc_nwatch > 0)
967 _malloc_ckwatch (p + 1, file, line, W_REALLOC, n);
968 #endif
969 #ifdef MALLOC_STATS
970 _mstats.bytesreq += (n < tocopy) ? 0 : n - tocopy;
971 #endif
972
973 /* See if desired size rounds to same power of 2 as actual size. */
974 nbytes = ALLOCATED_BYTES(n);
975
976 /* If ok, use the same block, just marking its size as changed. */
977 if (RIGHT_BUCKET(nbytes, nunits))
978 {
979 #if 0
980 m = (char *)mem + p->mh_nbytes;
981 #else
982 /* Compensate for increment above. */
983 m -= 4;
984 #endif
985 *m++ = 0; *m++ = 0; *m++ = 0; *m++ = 0;
986 m = (char *)mem + (p->mh_nbytes = n);
987
988 mg.i = n;
989 z = mg.s;
990 *m++ = *z++, *m++ = *z++, *m++ = *z++, *m++ = *z++;
991
992 return mem;
993 }
994
995 if (n < tocopy)
996 tocopy = n;
997
998 #ifdef MALLOC_STATS
999 _mstats.nrcopy++;
1000 #endif
1001
1002 if ((m = internal_malloc (n, file, line, MALLOC_INTERNAL|MALLOC_NOTRACE|MALLOC_NOREG)) == 0)
1003 return 0;
1004 FASTCOPY (mem, m, tocopy);
1005 internal_free (mem, file, line, MALLOC_INTERNAL);
1006
1007 #ifdef MALLOC_TRACE
1008 if (malloc_trace && (flags & MALLOC_NOTRACE) == 0)
1009 mtrace_alloc ("realloc", m, n, file, line);
1010 else if (_malloc_trace_buckets[nunits])
1011 mtrace_alloc ("realloc", m, n, file, line);
1012 #endif
1013
1014 #ifdef MALLOC_REGISTER
1015 if (malloc_register && (flags & MALLOC_NOREG) == 0)
1016 mregister_alloc ("realloc", m, n, file, line);
1017 #endif
1018
1019 #ifdef MALLOC_WATCH
1020 if (_malloc_nwatch > 0)
1021 _malloc_ckwatch (m, file, line, W_RESIZED, n);
1022 #endif
1023
1024 return m;
1025 }
1026
1027 static PTR_T
1028 internal_memalign (alignment, size, file, line, flags)
1029 unsigned int alignment;
1030 size_t size;
1031 const char *file;
1032 int line, flags;
1033 {
1034 register char *ptr;
1035 register char *aligned;
1036 register union mhead *p;
1037
1038 ptr = internal_malloc (size + alignment, file, line, MALLOC_INTERNAL);
1039
1040 if (ptr == 0)
1041 return 0;
1042 /* If entire block has the desired alignment, just accept it. */
1043 if (((long) ptr & (alignment - 1)) == 0)
1044 return ptr;
1045 /* Otherwise, get address of byte in the block that has that alignment. */
1046 #if 0
1047 aligned = (char *) (((long) ptr + alignment - 1) & -alignment);
1048 #else
1049 aligned = (char *) (((long) ptr + alignment - 1) & (~alignment + 1));
1050 #endif
1051
1052 /* Store a suitable indication of how to free the block,
1053 so that free can find the true beginning of it. */
1054 p = (union mhead *) aligned - 1;
1055 p->mh_nbytes = aligned - ptr;
1056 p->mh_alloc = ISMEMALIGN;
1057
1058 return aligned;
1059 }
1060
1061 #if !defined (NO_VALLOC)
1062 /* This runs into trouble with getpagesize on HPUX, and Multimax machines.
1063 Patching out seems cleaner than the ugly fix needed. */
1064 static PTR_T
1065 internal_valloc (size, file, line, flags)
1066 size_t size;
1067 const char *file;
1068 int line, flags;
1069 {
1070 return internal_memalign (getpagesize (), size, file, line, flags|MALLOC_INTERNAL);
1071 }
1072 #endif /* !NO_VALLOC */
1073
1074 #ifndef NO_CALLOC
1075 static PTR_T
1076 internal_calloc (n, s, file, line, flags)
1077 size_t n, s;
1078 const char *file;
1079 int line, flags;
1080 {
1081 size_t total;
1082 PTR_T result;
1083
1084 total = n * s;
1085 result = internal_malloc (total, file, line, flags|MALLOC_INTERNAL);
1086 if (result)
1087 memset (result, 0, total);
1088 return result;
1089 }
1090
1091 static void
1092 internal_cfree (p, file, line, flags)
1093 PTR_T p;
1094 const char *file;
1095 int line, flags;
1096 {
1097 internal_free (p, file, line, flags|MALLOC_INTERNAL);
1098 }
1099 #endif /* !NO_CALLOC */
1100
1101 #ifdef MALLOC_STATS
1102 int
1103 malloc_free_blocks (size)
1104 int size;
1105 {
1106 int nfree;
1107 register union mhead *p;
1108
1109 nfree = 0;
1110 for (p = nextf[size]; p; p = CHAIN (p))
1111 nfree++;
1112
1113 return nfree;
1114 }
1115 #endif
1116
1117 #if defined (MALLOC_WRAPFUNCS)
1118 PTR_T
1119 sh_malloc (bytes, file, line)
1120 size_t bytes;
1121 const char *file;
1122 int line;
1123 {
1124 return internal_malloc (bytes, file, line, MALLOC_WRAPPER);
1125 }
1126
1127 PTR_T
1128 sh_realloc (ptr, size, file, line)
1129 PTR_T ptr;
1130 size_t size;
1131 const char *file;
1132 int line;
1133 {
1134 return internal_realloc (ptr, size, file, line, MALLOC_WRAPPER);
1135 }
1136
1137 void
1138 sh_free (mem, file, line)
1139 PTR_T mem;
1140 const char *file;
1141 int line;
1142 {
1143 internal_free (mem, file, line, MALLOC_WRAPPER);
1144 }
1145
1146 PTR_T
1147 sh_memalign (alignment, size, file, line)
1148 unsigned int alignment;
1149 size_t size;
1150 const char *file;
1151 int line;
1152 {
1153 return internal_memalign (alignment, size, file, line, MALLOC_WRAPPER);
1154 }
1155
1156 #ifndef NO_CALLOC
1157 PTR_T
1158 sh_calloc (n, s, file, line)
1159 size_t n, s;
1160 const char *file;
1161 int line;
1162 {
1163 return internal_calloc (n, s, file, line, MALLOC_WRAPPER);
1164 }
1165
1166 void
1167 sh_cfree (mem, file, line)
1168 PTR_T mem;
1169 const char *file;
1170 int line;
1171 {
1172 internal_cfree (mem, file, line, MALLOC_WRAPPER);
1173 }
1174 #endif
1175
1176 #ifndef NO_VALLOC
1177 PTR_T
1178 sh_valloc (size, file, line)
1179 size_t size;
1180 const char *file;
1181 int line;
1182 {
1183 return internal_valloc (size, file, line, MALLOC_WRAPPER);
1184 }
1185 #endif /* !NO_VALLOC */
1186
1187 #endif /* MALLOC_WRAPFUNCS */
1188
1189 /* Externally-available functions that call their internal counterparts. */
1190
1191 PTR_T
1192 malloc (size)
1193 size_t size;
1194 {
1195 return internal_malloc (size, (char *)NULL, 0, 0);
1196 }
1197
1198 PTR_T
1199 realloc (mem, nbytes)
1200 PTR_T mem;
1201 size_t nbytes;
1202 {
1203 return internal_realloc (mem, nbytes, (char *)NULL, 0, 0);
1204 }
1205
1206 void
1207 free (mem)
1208 PTR_T mem;
1209 {
1210 internal_free (mem, (char *)NULL, 0, 0);
1211 }
1212
1213 PTR_T
1214 memalign (alignment, size)
1215 unsigned int alignment;
1216 size_t size;
1217 {
1218 return internal_memalign (alignment, size, (char *)NULL, 0, 0);
1219 }
1220
1221 #ifndef NO_VALLOC
1222 PTR_T
1223 valloc (size)
1224 size_t size;
1225 {
1226 return internal_valloc (size, (char *)NULL, 0, 0);
1227 }
1228 #endif
1229
1230 #ifndef NO_CALLOC
1231 PTR_T
1232 calloc (n, s)
1233 size_t n, s;
1234 {
1235 return internal_calloc (n, s, (char *)NULL, 0, 0);
1236 }
1237
1238 void
1239 cfree (mem)
1240 PTR_T mem;
1241 {
1242 internal_cfree (mem, (char *)NULL, 0, 0);
1243 }
1244 #endif