]> git.ipfire.org Git - thirdparty/bash.git/blob - lib/malloc/malloc.c
0b4c84700492459377847e61ec66147ca7d34ec2
[thirdparty/bash.git] / lib / malloc / malloc.c
1 /* malloc.c - dynamic memory allocation for bash. */
2
3 /* Copyright (C) 1985, 1987, 1997 Free Software Foundation, Inc.
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2, or (at your option)
8 any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the Free Software
17 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111 USA.
18
19 In other words, you are welcome to use, share and improve this program.
20 You are forbidden to forbid anyone else to use, share and improve
21 what you give them. Help stamp out software-hoarding! */
22
23 /*
24 * @(#)nmalloc.c 1 (Caltech) 2/21/82
25 *
26 * U of M Modified: 20 Jun 1983 ACT: strange hacks for Emacs
27 *
28 * Nov 1983, Mike@BRL, Added support for 4.1C/4.2 BSD.
29 *
30 * This is a very fast storage allocator. It allocates blocks of a small
31 * number of different sizes, and keeps free lists of each size. Blocks
32 * that don't exactly fit are passed up to the next larger size. In this
33 * implementation, the available sizes are (2^n)-4 (or -16) bytes long.
34 * This is designed for use in a program that uses vast quantities of
35 * memory, but bombs when it runs out. To make it a little better, it
36 * warns the user when he starts to get near the end.
37 *
38 * June 84, ACT: modified rcheck code to check the range given to malloc,
39 * rather than the range determined by the 2-power used.
40 *
41 * Jan 85, RMS: calls malloc_warning to issue warning on nearly full.
42 * No longer Emacs-specific; can serve as all-purpose malloc for GNU.
43 * You should call malloc_init to reinitialize after loading dumped Emacs.
44 * Call malloc_stats to get info on memory stats if MALLOC_STATS turned on.
45 * realloc knows how to return same block given, just changing its size,
46 * if the power of 2 is correct.
47 */
48
49 /*
50 * nextf[i] is the pointer to the next free block of size 2^(i+3). The
51 * smallest allocatable block is 8 bytes. The overhead information will
52 * go in the first int of the block, and the returned pointer will point
53 * to the second.
54 */
55
56 /* Define MEMSCRAMBLE to have free() write 0xcf into memory as it's freed, to
57 uncover callers that refer to freed memory, and to have malloc() write 0xdf
58 into memory as it's allocated to avoid referring to previous contents. */
59
60 /* SCO 3.2v4 getcwd and possibly other libc routines fail with MEMSCRAMBLE;
61 handled by configure. */
62
63 #if defined (HAVE_CONFIG_H)
64 # include <config.h>
65 #endif /* HAVE_CONFIG_H */
66
67 #if defined (SHELL)
68 # include "bashtypes.h"
69 # include "stdc.h"
70 #else
71 # include <sys/types.h>
72 #endif
73
74 #if defined (HAVE_UNISTD_H)
75 # include <unistd.h>
76 #endif
77
78 /* Determine which kind of system this is. */
79 #include <signal.h>
80
81 #if defined (HAVE_STRING_H)
82 # include <string.h>
83 #else
84 # include <strings.h>
85 #endif
86
87 #include <stdio.h>
88
89 /* Define getpagesize () if the system does not. */
90 #ifndef HAVE_GETPAGESIZE
91 # include "getpagesize.h"
92 #endif
93
94 #include "imalloc.h"
95 #ifdef MALLOC_STATS
96 # include "mstats.h"
97 #endif
98 #ifdef MALLOC_REGISTER
99 # include "table.h"
100 #endif
101 #ifdef MALLOC_WATCH
102 # include "watch.h"
103 #endif
104
105 /* System-specific omissions. */
106 #ifdef HPUX
107 # define NO_VALLOC
108 #endif
109
110 #define NBUCKETS 30
111
112 #define ISALLOC ((char) 0xf7) /* magic byte that implies allocation */
113 #define ISFREE ((char) 0x54) /* magic byte that implies free block */
114 /* this is for error checking only */
115 #define ISMEMALIGN ((char) 0xd6) /* Stored before the value returned by
116 memalign, with the rest of the word
117 being the distance to the true
118 beginning of the block. */
119
120
121 /* We have a flag indicating whether memory is allocated, an index in
122 nextf[], a size field, and a sentinel value to determine whether or
123 not a caller wrote before the start of allocated memory; to realloc()
124 memory we either copy mh_nbytes or just change mh_nbytes if there is
125 enough room in the block for the new size. Range checking is always
126 done. */
127 union mhead {
128 bits64_t mh_align; /* 8 */
129 struct {
130 char mi_alloc; /* ISALLOC or ISFREE */ /* 1 */
131 char mi_index; /* index in nextf[] */ /* 1 */
132 /* Remainder are valid only when block is allocated */
133 u_bits16_t mi_magic2; /* should be == MAGIC2 */ /* 2 */
134 u_bits32_t mi_nbytes; /* # of bytes allocated */ /* 4 */
135 } minfo;
136 };
137 #define mh_alloc minfo.mi_alloc
138 #define mh_index minfo.mi_index
139 #define mh_nbytes minfo.mi_nbytes
140 #define mh_magic2 minfo.mi_magic2
141
142 #define MOVERHEAD sizeof(union mhead)
143 #define MALIGN_MASK 7 /* one less than desired alignment */
144
145 typedef union _malloc_guard {
146 char s[4];
147 u_bits32_t i;
148 } mguard_t;
149
150 /* Access free-list pointer of a block.
151 It is stored at block + sizeof (char *).
152 This is not a field in the minfo structure member of union mhead
153 because we want sizeof (union mhead)
154 to describe the overhead for when the block is in use,
155 and we do not want the free-list pointer to count in that. */
156
157 #define CHAIN(a) \
158 (*(union mhead **) (sizeof (char *) + (char *) (a)))
159
160 /* To implement range checking, we write magic values in at the beginning
161 and end of each allocated block, and make sure they are undisturbed
162 whenever a free or a realloc occurs. */
163
164 /* Written in the 2 bytes before the block's real space (-4 bytes) */
165 #define MAGIC2 0x5555
166 #define MSLOP 4 /* 4 bytes extra for u_bits32_t size */
167
168 /* How many bytes are actually allocated for a request of size N --
169 rounded up to nearest multiple of 8 after accounting for malloc
170 overhead. */
171 #define ALLOCATED_BYTES(n) \
172 (((n) + MOVERHEAD + MSLOP + MALIGN_MASK) & ~MALIGN_MASK)
173
174 #define ASSERT(p) \
175 do \
176 { \
177 if (!(p)) xbotch((PTR_T)0, ERR_ASSERT_FAILED, __STRING(p), file, line); \
178 } \
179 while (0)
180
181 /* Minimum and maximum bucket indices for block splitting (and to bound
182 the search for a block to split). */
183 #define SPLIT_MIN 2 /* XXX - was 3 */
184 #define SPLIT_MID 11
185 #define SPLIT_MAX 14
186
187 /* Minimum and maximum bucket indices for block coalescing. */
188 #define COMBINE_MIN 2
189 #define COMBINE_MAX (pagebucket - 1) /* XXX */
190
191 #define LESSCORE_MIN 10
192 #define LESSCORE_FRC 13
193
194 #define STARTBUCK 1
195
196 /* Flags for the internal functions. */
197 #define MALLOC_WRAPPER 0x01 /* wrapper function */
198 #define MALLOC_INTERNAL 0x02 /* internal function calling another */
199 #define MALLOC_NOTRACE 0x04 /* don't trace this allocation or free */
200 #define MALLOC_NOREG 0x08 /* don't register this allocation or free */
201
202 /* Future use. */
203 #define ERR_DUPFREE 0x01
204 #define ERR_UNALLOC 0x02
205 #define ERR_UNDERFLOW 0x04
206 #define ERR_ASSERT_FAILED 0x08
207
208 /* Evaluates to true if NB is appropriate for bucket NU. NB is adjusted
209 appropriately by the caller to account for malloc overhead. This only
210 checks that the recorded size is not too big for the bucket. We
211 can't check whether or not it's in between NU and NU-1 because we
212 might have encountered a busy bucket when allocating and moved up to
213 the next size. */
214 #define IN_BUCKET(nb, nu) ((nb) <= binsizes[(nu)])
215
216 /* Use this when we want to be sure that NB is in bucket NU. */
217 #define RIGHT_BUCKET(nb, nu) \
218 (((nb) > binsizes[(nu)-1]) && ((nb) <= binsizes[(nu)]))
219
220 /* nextf[i] is free list of blocks of size 2**(i + 3) */
221
222 static union mhead *nextf[NBUCKETS];
223
224 /* busy[i] is nonzero while allocation of block size i is in progress. */
225
226 static char busy[NBUCKETS];
227
228 static int pagesz; /* system page size. */
229 static int pagebucket; /* bucket for requests a page in size */
230 static int maxbuck; /* highest bucket receiving allocation request. */
231
232 static char *memtop; /* top of heap */
233
234 static unsigned long binsizes[NBUCKETS] = {
235 8UL, 16UL, 32UL, 64UL, 128UL, 256UL, 512UL, 1024UL, 2048UL, 4096UL,
236 8192UL, 16384UL, 32768UL, 65536UL, 131072UL, 262144UL, 524288UL,
237 1048576UL, 2097152UL, 4194304UL, 8388608UL, 16777216UL, 33554432UL,
238 67108864UL, 134217728UL, 268435456UL, 536870912UL, 1073741824UL,
239 2147483648UL, 4294967296UL-1
240 };
241
242 /* binsizes[x] == (1 << ((x) + 3)) */
243 #define binsize(x) binsizes[(x)]
244
245 /* Declarations for internal functions */
246 static PTR_T internal_malloc __P((size_t, const char *, int, int));
247 static PTR_T internal_realloc __P((PTR_T, size_t, const char *, int, int));
248 static void internal_free __P((PTR_T, const char *, int, int));
249 static PTR_T internal_memalign __P((unsigned int, size_t, const char *, int, int));
250 #ifndef NO_CALLOC
251 static PTR_T internal_calloc __P((size_t, size_t, const char *, int, int));
252 static void internal_cfree __P((PTR_T, const char *, int, int));
253 #endif
254 #ifndef NO_VALLOC
255 static PTR_T internal_valloc __P((size_t, const char *, int, int));
256 #endif
257
258 #if defined (botch)
259 extern void botch ();
260 #else
261 static void botch __P((const char *, const char *, int));
262 #endif
263 static void xbotch __P((PTR_T, int, const char *, const char *, int));
264
265 #if !HAVE_DECL_SBRK
266 extern char *sbrk ();
267 #endif /* !HAVE_DECL_SBRK */
268
269 #ifdef SHELL
270 extern int interrupt_immediately;
271 extern int signal_is_trapped __P((int));
272 #endif
273
274 #ifdef MALLOC_STATS
275 struct _malstats _mstats;
276 #endif /* MALLOC_STATS */
277
278 /* Debugging variables available to applications. */
279 int malloc_flags = 0; /* future use */
280 int malloc_trace = 0; /* trace allocations and frees to stderr */
281 int malloc_register = 0; /* future use */
282
283 #ifdef MALLOC_TRACE
284 char _malloc_trace_buckets[NBUCKETS];
285
286 /* These should really go into a header file. */
287 extern void mtrace_alloc __P((const char *, PTR_T, size_t, const char *, int));
288 extern void mtrace_free __P((PTR_T, int, const char *, int));
289 #endif
290
291 #if !defined (botch)
292 static void
293 botch (s, file, line)
294 {
295 fprintf (stderr, "malloc: failed assertion: %s\n", s);
296 (void)fflush (stderr);
297 abort ();
298 }
299 #endif
300
301 /* print the file and line number that caused the assertion failure and
302 call botch() to do whatever the application wants with the information */
303 static void
304 xbotch (mem, e, s, file, line)
305 PTR_T mem;
306 int e;
307 const char *s;
308 const char *file;
309 int line;
310 {
311 fprintf (stderr, "\r\nmalloc: %s:%d: assertion botched\r\n",
312 file ? file : "unknown", line);
313 #ifdef MALLOC_REGISTER
314 if (mem != NULL && malloc_register)
315 mregister_describe_mem (mem, stderr);
316 #endif
317 (void)fflush (stderr);
318 botch(s, file, line);
319 }
320
321 /* Coalesce two adjacent free blocks off the free list for size NU - 1,
322 as long as we can find two adjacent free blocks. nextf[NU -1] is
323 assumed to not be busy; the caller (morecore()) checks for this. */
324 static void
325 bcoalesce (nu)
326 register int nu;
327 {
328 register union mhead *mp, *mp1, *mp2;
329 register int nbuck;
330 unsigned long siz;
331
332 nbuck = nu - 1;
333 if (nextf[nbuck] == 0)
334 return;
335
336 siz = binsize (nbuck);
337
338 mp2 = mp1 = nextf[nbuck];
339 mp = CHAIN (mp1);
340 while (mp && mp != (union mhead *)((char *)mp1 + siz))
341 {
342 mp2 = mp1;
343 mp1 = mp;
344 mp = CHAIN (mp);
345 }
346 if (mp == 0)
347 return;
348
349 /* OK, now we have mp1 pointing to the block we want to add to nextf[NU].
350 CHAIN(mp2) must equal mp1. Check that mp1 and mp are adjacent. */
351 if (mp2 != mp1 && CHAIN(mp2) != mp1)
352 xbotch ((PTR_T)0, 0, "bcoalesce: CHAIN(mp2) != mp1", (char *)NULL, 0);
353
354 #ifdef MALLOC_DEBUG
355 if (CHAIN (mp1) != (union mhead *)((char *)mp1 + siz))
356 return; /* not adjacent */
357 #endif
358
359 #ifdef MALLOC_STATS
360 _mstats.tbcoalesce++;
361 _mstats.ncoalesce[nbuck]++;
362 #endif
363
364 /* Since they are adjacent, remove them from the free list */
365 if (mp1 == nextf[nbuck])
366 nextf[nbuck] = CHAIN (mp);
367 else
368 CHAIN (mp2) = CHAIN (mp);
369
370 /* And add the combined two blocks to nextf[NU]. */
371 mp1->mh_alloc = ISFREE;
372 mp1->mh_index = nu;
373 CHAIN (mp1) = nextf[nu];
374 nextf[nu] = mp1;
375 }
376
377 /* Split a block at index > NU (but less than SPLIT_MAX) into a set of
378 blocks of the correct size, and attach them to nextf[NU]. nextf[NU]
379 is assumed to be empty. Must be called with signals blocked (e.g.,
380 by morecore()). */
381 static void
382 bsplit (nu)
383 register int nu;
384 {
385 register union mhead *mp;
386 int nbuck, nblks, split_max;
387 unsigned long siz;
388
389 split_max = (maxbuck > SPLIT_MAX) ? maxbuck : SPLIT_MAX;
390
391 if (nu >= SPLIT_MID)
392 {
393 for (nbuck = split_max; nbuck > nu; nbuck--)
394 {
395 if (busy[nbuck] || nextf[nbuck] == 0)
396 continue;
397 break;
398 }
399 }
400 else
401 {
402 for (nbuck = nu + 1; nbuck <= split_max; nbuck++)
403 {
404 if (busy[nbuck] || nextf[nbuck] == 0)
405 continue;
406 break;
407 }
408 }
409
410 if (nbuck > split_max || nbuck <= nu)
411 return;
412
413 /* XXX might want to split only if nextf[nbuck] has >= 2 blocks free
414 and nbuck is below some threshold. */
415
416 #ifdef MALLOC_STATS
417 _mstats.tbsplit++;
418 _mstats.nsplit[nbuck]++;
419 #endif
420
421 /* Figure out how many blocks we'll get. */
422 siz = binsize (nu);
423 nblks = binsize (nbuck) / siz;
424
425 /* Remove the block from the chain of larger blocks. */
426 mp = nextf[nbuck];
427 nextf[nbuck] = CHAIN (mp);
428
429 /* Split the block and put it on the requested chain. */
430 nextf[nu] = mp;
431 while (1)
432 {
433 mp->mh_alloc = ISFREE;
434 mp->mh_index = nu;
435 if (--nblks <= 0) break;
436 CHAIN (mp) = (union mhead *)((char *)mp + siz);
437 mp = (union mhead *)((char *)mp + siz);
438 }
439 CHAIN (mp) = 0;
440 }
441
442 static void
443 block_signals (setp, osetp)
444 sigset_t *setp, *osetp;
445 {
446 #ifdef HAVE_POSIX_SIGNALS
447 sigfillset (setp);
448 sigemptyset (osetp);
449 sigprocmask (SIG_BLOCK, setp, osetp);
450 #else
451 # if defined (HAVE_BSD_SIGNALS)
452 *osetp = sigsetmask (-1);
453 # endif
454 #endif
455 }
456
457 static void
458 unblock_signals (setp, osetp)
459 sigset_t *setp, *osetp;
460 {
461 #ifdef HAVE_POSIX_SIGNALS
462 sigprocmask (SIG_SETMASK, osetp, (sigset_t *)NULL);
463 #else
464 # if defined (HAVE_BSD_SIGNALS)
465 sigsetmask (*osetp);
466 # endif
467 #endif
468 }
469
470 /* Return some memory to the system by reducing the break. This is only
471 called with NU > pagebucket, so we're always assured of giving back
472 more than one page of memory. */
473 static void
474 lesscore (nu) /* give system back some memory */
475 register int nu; /* size index we're discarding */
476 {
477 long siz;
478
479 siz = binsize (nu);
480 /* Should check for errors here, I guess. */
481 sbrk (-siz);
482 memtop -= siz;
483
484 #ifdef MALLOC_STATS
485 _mstats.nsbrk++;
486 _mstats.tsbrk -= siz;
487 _mstats.nlesscore[nu]++;
488 #endif
489 }
490
491 static void
492 morecore (nu) /* ask system for more memory */
493 register int nu; /* size index to get more of */
494 {
495 register union mhead *mp;
496 register int nblks;
497 register long siz;
498 long sbrk_amt; /* amount to get via sbrk() */
499 sigset_t set, oset;
500 int blocked_sigs;
501
502 /* Block all signals in case we are executed from a signal handler. */
503 blocked_sigs = 0;
504 #ifdef SHELL
505 if (interrupt_immediately || signal_is_trapped (SIGINT) || signal_is_trapped (SIGCHLD))
506 #endif
507 {
508 block_signals (&set, &oset);
509 blocked_sigs = 1;
510 }
511
512 siz = binsize (nu); /* size of desired block for nextf[nu] */
513
514 if (siz < 0)
515 goto morecore_done; /* oops */
516
517 #ifdef MALLOC_STATS
518 _mstats.nmorecore[nu]++;
519 #endif
520
521 /* Try to split a larger block here, if we're within the range of sizes
522 to split. */
523 if (nu >= SPLIT_MIN)
524 {
525 bsplit (nu);
526 if (nextf[nu] != 0)
527 goto morecore_done;
528 }
529
530 /* Try to coalesce two adjacent blocks from the free list on nextf[nu - 1],
531 if we can, and we're withing the range of the block coalescing limits. */
532 if (nu >= COMBINE_MIN && nu < COMBINE_MAX && busy[nu - 1] == 0 && nextf[nu - 1])
533 {
534 bcoalesce (nu);
535 if (nextf[nu] != 0)
536 goto morecore_done;
537 }
538
539 /* Take at least a page, and figure out how many blocks of the requested
540 size we're getting. */
541 if (siz <= pagesz)
542 {
543 sbrk_amt = pagesz;
544 nblks = sbrk_amt / siz;
545 }
546 else
547 {
548 /* We always want to request an integral multiple of the page size
549 from the kernel, so let's compute whether or not `siz' is such
550 an amount. If it is, we can just request it. If not, we want
551 the smallest integral multiple of pagesize that is larger than
552 `siz' and will satisfy the request. */
553 sbrk_amt = siz & (pagesz - 1);
554 if (sbrk_amt == 0)
555 sbrk_amt = siz;
556 else
557 sbrk_amt = siz + pagesz - sbrk_amt;
558 nblks = 1;
559 }
560
561 #ifdef MALLOC_STATS
562 _mstats.nsbrk++;
563 _mstats.tsbrk += sbrk_amt;
564 #endif
565
566 mp = (union mhead *) sbrk (sbrk_amt);
567
568 /* Totally out of memory. */
569 if ((long)mp == -1)
570 goto morecore_done;
571
572 memtop += sbrk_amt;
573
574 /* shouldn't happen, but just in case -- require 8-byte alignment */
575 if ((long)mp & MALIGN_MASK)
576 {
577 mp = (union mhead *) (((long)mp + MALIGN_MASK) & ~MALIGN_MASK);
578 nblks--;
579 }
580
581 /* save new header and link the nblks blocks together */
582 nextf[nu] = mp;
583 while (1)
584 {
585 mp->mh_alloc = ISFREE;
586 mp->mh_index = nu;
587 if (--nblks <= 0) break;
588 CHAIN (mp) = (union mhead *)((char *)mp + siz);
589 mp = (union mhead *)((char *)mp + siz);
590 }
591 CHAIN (mp) = 0;
592
593 morecore_done:
594 if (blocked_sigs)
595 unblock_signals (&set, &oset);
596 }
597
598 static void
599 malloc_debug_dummy ()
600 {
601 write (1, "malloc_debug_dummy\n", 19);
602 }
603
604 #define PREPOP_BIN 2
605 #define PREPOP_SIZE 32
606
607 static int
608 pagealign ()
609 {
610 register int nunits;
611 register union mhead *mp;
612 long sbrk_needed;
613 char *curbrk;
614
615 pagesz = getpagesize ();
616 if (pagesz < 1024)
617 pagesz = 1024;
618
619 /* OK, how much do we need to allocate to make things page-aligned?
620 Some of this partial page will be wasted space, but we'll use as
621 much as we can. Once we figure out how much to advance the break
622 pointer, go ahead and do it. */
623 memtop = curbrk = sbrk (0);
624 sbrk_needed = pagesz - ((long)curbrk & (pagesz - 1)); /* sbrk(0) % pagesz */
625 if (sbrk_needed < 0)
626 sbrk_needed += pagesz;
627
628 /* Now allocate the wasted space. */
629 if (sbrk_needed)
630 {
631 #ifdef MALLOC_STATS
632 _mstats.nsbrk++;
633 _mstats.tsbrk += sbrk_needed;
634 #endif
635 curbrk = sbrk (sbrk_needed);
636 if ((long)curbrk == -1)
637 return -1;
638 memtop += sbrk_needed;
639
640 /* Take the memory which would otherwise be wasted and populate the most
641 popular bin (2 == 32 bytes) with it. Add whatever we need to curbrk
642 to make things 32-byte aligned, compute how many 32-byte chunks we're
643 going to get, and set up the bin. */
644 curbrk += sbrk_needed & (PREPOP_SIZE - 1);
645 sbrk_needed -= sbrk_needed & (PREPOP_SIZE - 1);
646 nunits = sbrk_needed / PREPOP_SIZE;
647
648 if (nunits > 0)
649 {
650 mp = (union mhead *)curbrk;
651
652 nextf[PREPOP_BIN] = mp;
653 while (1)
654 {
655 mp->mh_alloc = ISFREE;
656 mp->mh_index = PREPOP_BIN;
657 if (--nunits <= 0) break;
658 CHAIN(mp) = (union mhead *)((char *)mp + PREPOP_SIZE);
659 mp = (union mhead *)((char *)mp + PREPOP_SIZE);
660 }
661 CHAIN(mp) = 0;
662 }
663 }
664
665 /* compute which bin corresponds to the page size. */
666 for (nunits = 7; nunits < NBUCKETS; nunits++)
667 if (pagesz <= binsize(nunits))
668 break;
669 pagebucket = nunits;
670
671 return 0;
672 }
673
674 static PTR_T
675 internal_malloc (n, file, line, flags) /* get a block */
676 size_t n;
677 const char *file;
678 int line, flags;
679 {
680 register union mhead *p;
681 register int nunits;
682 register char *m, *z;
683 long nbytes;
684 mguard_t mg;
685
686 /* Get the system page size and align break pointer so future sbrks will
687 be page-aligned. The page size must be at least 1K -- anything
688 smaller is increased. */
689 if (pagesz == 0)
690 if (pagealign () < 0)
691 return ((PTR_T)NULL);
692
693 /* Figure out how many bytes are required, rounding up to the nearest
694 multiple of 8, then figure out which nextf[] area to use. Try to
695 be smart about where to start searching -- if the number of bytes
696 needed is greater than the page size, we can start at pagebucket. */
697 nbytes = ALLOCATED_BYTES(n);
698 nunits = (nbytes <= (pagesz >> 1)) ? STARTBUCK : pagebucket;
699 for ( ; nunits < NBUCKETS; nunits++)
700 if (nbytes <= binsize(nunits))
701 break;
702
703 /* Silently reject too-large requests. */
704 if (nunits >= NBUCKETS)
705 return ((PTR_T) NULL);
706
707 /* In case this is reentrant use of malloc from signal handler,
708 pick a block size that no other malloc level is currently
709 trying to allocate. That's the easiest harmless way not to
710 interfere with the other level of execution. */
711 #ifdef MALLOC_STATS
712 if (busy[nunits]) _mstats.nrecurse++;
713 #endif
714 while (busy[nunits]) nunits++;
715 busy[nunits] = 1;
716
717 if (nunits > maxbuck)
718 maxbuck = nunits;
719
720 /* If there are no blocks of the appropriate size, go get some */
721 if (nextf[nunits] == 0)
722 morecore (nunits);
723
724 /* Get one block off the list, and set the new list head */
725 if ((p = nextf[nunits]) == NULL)
726 {
727 busy[nunits] = 0;
728 return NULL;
729 }
730 nextf[nunits] = CHAIN (p);
731 busy[nunits] = 0;
732
733 /* Check for free block clobbered */
734 /* If not for this check, we would gobble a clobbered free chain ptr
735 and bomb out on the NEXT allocate of this size block */
736 if (p->mh_alloc != ISFREE || p->mh_index != nunits)
737 xbotch ((PTR_T)(p+1), 0, "malloc: block on free list clobbered", file, line);
738
739 /* Fill in the info, and set up the magic numbers for range checking. */
740 p->mh_alloc = ISALLOC;
741 p->mh_magic2 = MAGIC2;
742 p->mh_nbytes = n;
743
744 /* End guard */
745 mg.i = n;
746 z = mg.s;
747 m = (char *) (p + 1) + n;
748 *m++ = *z++, *m++ = *z++, *m++ = *z++, *m++ = *z++;
749
750 #ifdef MEMSCRAMBLE
751 if (n)
752 MALLOC_MEMSET ((char *)(p + 1), 0xdf, n); /* scramble previous contents */
753 #endif
754 #ifdef MALLOC_STATS
755 _mstats.nmalloc[nunits]++;
756 _mstats.tmalloc[nunits]++;
757 _mstats.nmal++;
758 _mstats.bytesreq += n;
759 #endif /* MALLOC_STATS */
760
761 #ifdef MALLOC_TRACE
762 if (malloc_trace && (flags & MALLOC_NOTRACE) == 0)
763 mtrace_alloc ("malloc", p + 1, n, file, line);
764 else if (_malloc_trace_buckets[nunits])
765 mtrace_alloc ("malloc", p + 1, n, file, line);
766 #endif
767
768 #ifdef MALLOC_REGISTER
769 if (malloc_register && (flags & MALLOC_NOREG) == 0)
770 mregister_alloc ("malloc", p + 1, n, file, line);
771 #endif
772
773 #ifdef MALLOC_WATCH
774 if (_malloc_nwatch > 0)
775 _malloc_ckwatch (p + 1, file, line, W_ALLOC, n);
776 #endif
777
778 return (PTR_T) (p + 1);
779 }
780
781 static void
782 internal_free (mem, file, line, flags)
783 PTR_T mem;
784 const char *file;
785 int line, flags;
786 {
787 register union mhead *p;
788 register char *ap, *z;
789 register int nunits;
790 register unsigned int nbytes;
791 int ubytes; /* caller-requested size */
792 mguard_t mg;
793
794 if ((ap = (char *)mem) == 0)
795 return;
796
797 p = (union mhead *) ap - 1;
798
799 if (p->mh_alloc == ISMEMALIGN)
800 {
801 ap -= p->mh_nbytes;
802 p = (union mhead *) ap - 1;
803 }
804
805 #if defined (MALLOC_TRACE) || defined (MALLOC_REGISTER)
806 if (malloc_trace || malloc_register)
807 ubytes = p->mh_nbytes;
808 #endif
809
810 if (p->mh_alloc != ISALLOC)
811 {
812 if (p->mh_alloc == ISFREE)
813 xbotch (mem, ERR_DUPFREE,
814 "free: called with already freed block argument", file, line);
815 else
816 xbotch (mem, ERR_UNALLOC,
817 "free: called with unallocated block argument", file, line);
818 }
819
820 ASSERT (p->mh_magic2 == MAGIC2);
821
822 nunits = p->mh_index;
823 nbytes = ALLOCATED_BYTES(p->mh_nbytes);
824 /* Since the sizeof(u_bits32_t) bytes before the memory handed to the user
825 are now used for the number of bytes allocated, a simple check of
826 mh_magic2 is no longer sufficient to catch things like p[-1] = 'x'.
827 We sanity-check the value of mh_nbytes against the size of the blocks
828 in the appropriate bucket before we use it. This can still cause problems
829 and obscure errors if mh_nbytes is wrong but still within range; the
830 checks against the size recorded at the end of the chunk will probably
831 fail then. Using MALLOC_REGISTER will help here, since it saves the
832 original number of bytes requested. */
833
834 if (IN_BUCKET(nbytes, nunits) == 0)
835 xbotch (mem, ERR_UNDERFLOW,
836 "free: underflow detected; mh_nbytes out of range", file, line);
837
838 ap += p->mh_nbytes;
839 z = mg.s;
840 *z++ = *ap++, *z++ = *ap++, *z++ = *ap++, *z++ = *ap++;
841 if (mg.i != p->mh_nbytes)
842 xbotch (mem, ERR_ASSERT_FAILED, "free: start and end chunk sizes differ", file, line);
843
844 #if 1
845 if (nunits >= LESSCORE_MIN && ((char *)p + binsize(nunits) == memtop))
846 #else
847 if (((char *)p + binsize(nunits) == memtop) && nunits >= LESSCORE_MIN)
848 #endif
849 {
850 /* If above LESSCORE_FRC, give back unconditionally. This should be set
851 high enough to be infrequently encountered. If between LESSCORE_MIN
852 and LESSCORE_FRC, call lesscore if the bucket is marked as busy (in
853 which case we would punt below and leak memory) or if there's already
854 a block on the free list. */
855 if ((nunits >= LESSCORE_FRC) || busy[nunits] || nextf[nunits] != 0)
856 {
857 lesscore (nunits);
858 /* keeps the tracing and registering code in one place */
859 goto free_return;
860 }
861 }
862
863 #ifdef MEMSCRAMBLE
864 if (p->mh_nbytes)
865 MALLOC_MEMSET (mem, 0xcf, p->mh_nbytes);
866 #endif
867
868 ASSERT (nunits < NBUCKETS);
869 p->mh_alloc = ISFREE;
870
871 if (busy[nunits] == 1)
872 return; /* this is bogus, but at least it won't corrupt the chains */
873
874 /* Protect against signal handlers calling malloc. */
875 busy[nunits] = 1;
876 /* Put this block on the free list. */
877 CHAIN (p) = nextf[nunits];
878 nextf[nunits] = p;
879 busy[nunits] = 0;
880
881 free_return:
882
883 #ifdef MALLOC_STATS
884 _mstats.nmalloc[nunits]--;
885 _mstats.nfre++;
886 #endif /* MALLOC_STATS */
887
888 #ifdef MALLOC_TRACE
889 if (malloc_trace && (flags & MALLOC_NOTRACE) == 0)
890 mtrace_free (mem, ubytes, file, line);
891 else if (_malloc_trace_buckets[nunits])
892 mtrace_free (mem, ubytes, file, line);
893 #endif
894
895 #ifdef MALLOC_REGISTER
896 if (malloc_register && (flags & MALLOC_NOREG) == 0)
897 mregister_free (mem, ubytes, file, line);
898 #endif
899
900 #ifdef MALLOC_WATCH
901 if (_malloc_nwatch > 0)
902 _malloc_ckwatch (mem, file, line, W_FREE, ubytes);
903 #endif
904 }
905
906 static PTR_T
907 internal_realloc (mem, n, file, line, flags)
908 PTR_T mem;
909 register size_t n;
910 const char *file;
911 int line, flags;
912 {
913 register union mhead *p;
914 register u_bits32_t tocopy;
915 register unsigned int nbytes;
916 register int nunits;
917 register char *m, *z;
918 mguard_t mg;
919
920 #ifdef MALLOC_STATS
921 _mstats.nrealloc++;
922 #endif
923
924 if (n == 0)
925 {
926 internal_free (mem, file, line, MALLOC_INTERNAL);
927 return (NULL);
928 }
929 if ((p = (union mhead *) mem) == 0)
930 return internal_malloc (n, file, line, MALLOC_INTERNAL);
931
932 p--;
933 nunits = p->mh_index;
934 ASSERT (nunits < NBUCKETS);
935
936 if (p->mh_alloc != ISALLOC)
937 xbotch (mem, ERR_UNALLOC,
938 "realloc: called with unallocated block argument", file, line);
939
940 ASSERT (p->mh_magic2 == MAGIC2);
941 nbytes = ALLOCATED_BYTES(p->mh_nbytes);
942 /* Since the sizeof(u_bits32_t) bytes before the memory handed to the user
943 are now used for the number of bytes allocated, a simple check of
944 mh_magic2 is no longer sufficient to catch things like p[-1] = 'x'.
945 We sanity-check the value of mh_nbytes against the size of the blocks
946 in the appropriate bucket before we use it. This can still cause problems
947 and obscure errors if mh_nbytes is wrong but still within range; the
948 checks against the size recorded at the end of the chunk will probably
949 fail then. Using MALLOC_REGISTER will help here, since it saves the
950 original number of bytes requested. */
951 if (IN_BUCKET(nbytes, nunits) == 0)
952 xbotch (mem, ERR_UNDERFLOW,
953 "realloc: underflow detected; mh_nbytes out of range", file, line);
954
955 m = (char *)mem + (tocopy = p->mh_nbytes);
956 z = mg.s;
957 *z++ = *m++, *z++ = *m++, *z++ = *m++, *z++ = *m++;
958 if (mg.i != p->mh_nbytes)
959 xbotch (mem, ERR_ASSERT_FAILED, "realloc: start and end chunk sizes differ", file, line);
960
961 #ifdef MALLOC_WATCH
962 if (_malloc_nwatch > 0)
963 _malloc_ckwatch (p + 1, file, line, W_REALLOC, n);
964 #endif
965 #ifdef MALLOC_STATS
966 _mstats.bytesreq += (n < tocopy) ? 0 : n - tocopy;
967 #endif
968
969 /* See if desired size rounds to same power of 2 as actual size. */
970 nbytes = ALLOCATED_BYTES(n);
971
972 /* If ok, use the same block, just marking its size as changed. */
973 if (RIGHT_BUCKET(nbytes, nunits))
974 {
975 #if 0
976 m = (char *)mem + p->mh_nbytes;
977 #else
978 /* Compensate for increment above. */
979 m -= 4;
980 #endif
981 *m++ = 0; *m++ = 0; *m++ = 0; *m++ = 0;
982 m = (char *)mem + (p->mh_nbytes = n);
983
984 mg.i = n;
985 z = mg.s;
986 *m++ = *z++, *m++ = *z++, *m++ = *z++, *m++ = *z++;
987
988 return mem;
989 }
990
991 if (n < tocopy)
992 tocopy = n;
993
994 #ifdef MALLOC_STATS
995 _mstats.nrcopy++;
996 #endif
997
998 if ((m = internal_malloc (n, file, line, MALLOC_INTERNAL|MALLOC_NOTRACE|MALLOC_NOREG)) == 0)
999 return 0;
1000 FASTCOPY (mem, m, tocopy);
1001 internal_free (mem, file, line, MALLOC_INTERNAL);
1002
1003 #ifdef MALLOC_TRACE
1004 if (malloc_trace && (flags & MALLOC_NOTRACE) == 0)
1005 mtrace_alloc ("realloc", m, n, file, line);
1006 else if (_malloc_trace_buckets[nunits])
1007 mtrace_alloc ("realloc", m, n, file, line);
1008 #endif
1009
1010 #ifdef MALLOC_REGISTER
1011 if (malloc_register && (flags & MALLOC_NOREG) == 0)
1012 mregister_alloc ("realloc", m, n, file, line);
1013 #endif
1014
1015 #ifdef MALLOC_WATCH
1016 if (_malloc_nwatch > 0)
1017 _malloc_ckwatch (m, file, line, W_RESIZED, n);
1018 #endif
1019
1020 return m;
1021 }
1022
1023 static PTR_T
1024 internal_memalign (alignment, size, file, line, flags)
1025 unsigned int alignment;
1026 size_t size;
1027 const char *file;
1028 int line, flags;
1029 {
1030 register char *ptr;
1031 register char *aligned;
1032 register union mhead *p;
1033
1034 ptr = internal_malloc (size + alignment, file, line, MALLOC_INTERNAL);
1035
1036 if (ptr == 0)
1037 return 0;
1038 /* If entire block has the desired alignment, just accept it. */
1039 if (((long) ptr & (alignment - 1)) == 0)
1040 return ptr;
1041 /* Otherwise, get address of byte in the block that has that alignment. */
1042 #if 0
1043 aligned = (char *) (((long) ptr + alignment - 1) & -alignment);
1044 #else
1045 aligned = (char *) (((long) ptr + alignment - 1) & (~alignment + 1));
1046 #endif
1047
1048 /* Store a suitable indication of how to free the block,
1049 so that free can find the true beginning of it. */
1050 p = (union mhead *) aligned - 1;
1051 p->mh_nbytes = aligned - ptr;
1052 p->mh_alloc = ISMEMALIGN;
1053
1054 return aligned;
1055 }
1056
1057 #if !defined (NO_VALLOC)
1058 /* This runs into trouble with getpagesize on HPUX, and Multimax machines.
1059 Patching out seems cleaner than the ugly fix needed. */
1060 static PTR_T
1061 internal_valloc (size, file, line, flags)
1062 size_t size;
1063 const char *file;
1064 int line, flags;
1065 {
1066 return internal_memalign (getpagesize (), size, file, line, flags|MALLOC_INTERNAL);
1067 }
1068 #endif /* !NO_VALLOC */
1069
1070 #ifndef NO_CALLOC
1071 static PTR_T
1072 internal_calloc (n, s, file, line, flags)
1073 size_t n, s;
1074 const char *file;
1075 int line, flags;
1076 {
1077 size_t total;
1078 PTR_T result;
1079
1080 total = n * s;
1081 result = internal_malloc (total, file, line, flags|MALLOC_INTERNAL);
1082 if (result)
1083 memset (result, 0, total);
1084 return result;
1085 }
1086
1087 static void
1088 internal_cfree (p, file, line, flags)
1089 PTR_T p;
1090 const char *file;
1091 int line, flags;
1092 {
1093 internal_free (p, file, line, flags|MALLOC_INTERNAL);
1094 }
1095 #endif /* !NO_CALLOC */
1096
1097 #ifdef MALLOC_STATS
1098 int
1099 malloc_free_blocks (size)
1100 int size;
1101 {
1102 int nfree;
1103 register union mhead *p;
1104
1105 nfree = 0;
1106 for (p = nextf[size]; p; p = CHAIN (p))
1107 nfree++;
1108
1109 return nfree;
1110 }
1111 #endif
1112
1113 #if defined (MALLOC_WRAPFUNCS)
1114 PTR_T
1115 sh_malloc (bytes, file, line)
1116 size_t bytes;
1117 const char *file;
1118 int line;
1119 {
1120 return internal_malloc (bytes, file, line, MALLOC_WRAPPER);
1121 }
1122
1123 PTR_T
1124 sh_realloc (ptr, size, file, line)
1125 PTR_T ptr;
1126 size_t size;
1127 const char *file;
1128 int line;
1129 {
1130 return internal_realloc (ptr, size, file, line, MALLOC_WRAPPER);
1131 }
1132
1133 void
1134 sh_free (mem, file, line)
1135 PTR_T mem;
1136 const char *file;
1137 int line;
1138 {
1139 internal_free (mem, file, line, MALLOC_WRAPPER);
1140 }
1141
1142 PTR_T
1143 sh_memalign (alignment, size, file, line)
1144 unsigned int alignment;
1145 size_t size;
1146 const char *file;
1147 int line;
1148 {
1149 return internal_memalign (alignment, size, file, line, MALLOC_WRAPPER);
1150 }
1151
1152 #ifndef NO_CALLOC
1153 PTR_T
1154 sh_calloc (n, s, file, line)
1155 size_t n, s;
1156 const char *file;
1157 int line;
1158 {
1159 return internal_calloc (n, s, file, line, MALLOC_WRAPPER);
1160 }
1161
1162 void
1163 sh_cfree (mem, file, line)
1164 PTR_T mem;
1165 const char *file;
1166 int line;
1167 {
1168 internal_cfree (mem, file, line, MALLOC_WRAPPER);
1169 }
1170 #endif
1171
1172 #ifndef NO_VALLOC
1173 PTR_T
1174 sh_valloc (size, file, line)
1175 size_t size;
1176 const char *file;
1177 int line;
1178 {
1179 return internal_valloc (size, file, line, MALLOC_WRAPPER);
1180 }
1181 #endif /* !NO_VALLOC */
1182
1183 #endif /* MALLOC_WRAPFUNCS */
1184
1185 /* Externally-available functions that call their internal counterparts. */
1186
1187 PTR_T
1188 malloc (size)
1189 size_t size;
1190 {
1191 return internal_malloc (size, (char *)NULL, 0, 0);
1192 }
1193
1194 PTR_T
1195 realloc (mem, nbytes)
1196 PTR_T mem;
1197 size_t nbytes;
1198 {
1199 return internal_realloc (mem, nbytes, (char *)NULL, 0, 0);
1200 }
1201
1202 void
1203 free (mem)
1204 PTR_T mem;
1205 {
1206 internal_free (mem, (char *)NULL, 0, 0);
1207 }
1208
1209 PTR_T
1210 memalign (alignment, size)
1211 unsigned int alignment;
1212 size_t size;
1213 {
1214 return internal_memalign (alignment, size, (char *)NULL, 0, 0);
1215 }
1216
1217 #ifndef NO_VALLOC
1218 PTR_T
1219 valloc (size)
1220 size_t size;
1221 {
1222 return internal_valloc (size, (char *)NULL, 0, 0);
1223 }
1224 #endif
1225
1226 #ifndef NO_CALLOC
1227 PTR_T
1228 calloc (n, s)
1229 size_t n, s;
1230 {
1231 return internal_calloc (n, s, (char *)NULL, 0, 0);
1232 }
1233
1234 void
1235 cfree (mem)
1236 PTR_T mem;
1237 {
1238 internal_cfree (mem, (char *)NULL, 0, 0);
1239 }
1240 #endif