]> git.ipfire.org Git - thirdparty/bash.git/blob - lib/malloc/malloc.c
Imported from ../bash-2.05.tar.gz.
[thirdparty/bash.git] / lib / malloc / malloc.c
1 /* malloc.c - dynamic memory allocation for bash. */
2
3 /* Copyright (C) 1985, 1987, 1997 Free Software Foundation, Inc.
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2, or (at your option)
8 any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the Free Software
17 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111 USA.
18
19 In other words, you are welcome to use, share and improve this program.
20 You are forbidden to forbid anyone else to use, share and improve
21 what you give them. Help stamp out software-hoarding! */
22
23 /*
24 * @(#)nmalloc.c 1 (Caltech) 2/21/82
25 *
26 * U of M Modified: 20 Jun 1983 ACT: strange hacks for Emacs
27 *
28 * Nov 1983, Mike@BRL, Added support for 4.1C/4.2 BSD.
29 *
30 * This is a very fast storage allocator. It allocates blocks of a small
31 * number of different sizes, and keeps free lists of each size. Blocks
32 * that don't exactly fit are passed up to the next larger size. In this
33 * implementation, the available sizes are (2^n)-4 (or -16) bytes long.
34 * This is designed for use in a program that uses vast quantities of
35 * memory, but bombs when it runs out. To make it a little better, it
36 * warns the user when he starts to get near the end.
37 *
38 * June 84, ACT: modified rcheck code to check the range given to malloc,
39 * rather than the range determined by the 2-power used.
40 *
41 * Jan 85, RMS: calls malloc_warning to issue warning on nearly full.
42 * No longer Emacs-specific; can serve as all-purpose malloc for GNU.
43 * You should call malloc_init to reinitialize after loading dumped Emacs.
44 * Call malloc_stats to get info on memory stats if MALLOC_STATS turned on.
45 * realloc knows how to return same block given, just changing its size,
46 * if the power of 2 is correct.
47 */
48 #define MALLOC_STATS /* for the time being */
49
50 /*
51 * nextf[i] is the pointer to the next free block of size 2^(i+3). The
52 * smallest allocatable block is 8 bytes. The overhead information will
53 * go in the first int of the block, and the returned pointer will point
54 * to the second.
55 */
56
57 /* Define this to have free() write 0xcf into memory as it's freed, to
58 uncover callers that refer to freed memory. */
59 /* SCO 3.2v4 getcwd and possibly other libc routines fail with MEMSCRAMBLE */
60 #if !defined (NO_MEMSCRAMBLE)
61 # define MEMSCRAMBLE
62 #endif
63
64 #if defined (HAVE_CONFIG_H)
65 # include <config.h>
66 #endif /* HAVE_CONFIG_H */
67
68 #if defined (SHELL)
69 # include "bashtypes.h"
70 #else
71 # include <sys/types.h>
72 #endif
73
74 #if defined (HAVE_UNISTD_H)
75 # include <unistd.h>
76 #endif
77
78 /* Determine which kind of system this is. */
79 #include <signal.h>
80
81 #if defined (HAVE_STRING_H)
82 # include <string.h>
83 #else
84 # include <strings.h>
85 #endif
86
87 #if defined (MALLOC_STATS) || !defined (botch)
88 # include <stdio.h>
89 #endif /* MALLOC_STATS || !botch */
90
91 /* Define getpagesize () if the system does not. */
92 #ifndef HAVE_GETPAGESIZE
93 # include "getpagesize.h"
94 #endif
95
96 #if __GNUC__ > 1
97 # define FASTCOPY(s, d, n) __builtin_memcpy (d, s, n)
98 #else /* !__GNUC__ */
99 # if !defined (HAVE_BCOPY)
100 # if !defined (HAVE_MEMMOVE)
101 # define FASTCOPY(s, d, n) memcpy (d, s, n)
102 # else
103 # define FASTCOPY(s, d, n) memmove (d, s, n)
104 # endif /* !HAVE_MEMMOVE */
105 # else /* HAVE_BCOPY */
106 # define FASTCOPY(s, d, n) bcopy (s, d, n)
107 # endif /* HAVE_BCOPY */
108 #endif /* !__GNUC__ */
109
110 /* Generic pointer type. */
111 #ifndef PTR_T
112 # if defined (__STDC__)
113 # define PTR_T void *
114 # else
115 # define PTR_T char *
116 # endif
117 #endif
118
119 #if !defined (NULL)
120 # define NULL 0
121 #endif
122
123 #define NBUCKETS 30
124
125 #define ISALLOC ((char) 0xf7) /* magic byte that implies allocation */
126 #define ISFREE ((char) 0x54) /* magic byte that implies free block */
127 /* this is for error checking only */
128 #define ISMEMALIGN ((char) 0xd6) /* Stored before the value returned by
129 memalign, with the rest of the word
130 being the distance to the true
131 beginning of the block. */
132
133 #if !defined (SBRK_DECLARED)
134 extern char *sbrk ();
135 #endif /* !SBRK_DECLARED */
136
137 #ifdef MALLOC_STATS
138 /*
139 * NMALLOC[i] is the difference between the number of mallocs and frees
140 * for a given block size. TMALLOC[i] is the total number of mallocs for
141 * a given block size. NMORECORE[i] is the total number of calls to
142 * morecore(i). NMAL and NFRE are counts of the number of calls to malloc()
143 * and free(), respectively. NREALLOC is the total number of calls to
144 * realloc(); NRCOPY is the number of times realloc() had to allocate new
145 * memory and copy to it. NRECURSE is a count of the number of recursive
146 * calls to malloc() for the same bucket size, which can be caused by calls
147 * to malloc() from a signal handler. NSBRK is the number of calls to sbrk()
148 * (whether by morecore() or for alignment); TSBRK is the total number of
149 * bytes requested from the kernel with sbrk(). BYTESUSED is the total
150 * number of bytes consumed by blocks currently in use; BYTESFREE is the
151 * total number of bytes currently on all of the free lists. TBSPLIT is
152 * the number of times a larger block was split to satisfy a smaller request.
153 * NSPLIT[i] is the number of times a block of size I was split.
154 * TBCOALESCE is the number of times two adjacent smaller blocks off the free
155 * list were combined to satisfy a larger request.
156 */
157 struct _malstats {
158 int nmalloc[NBUCKETS];
159 int tmalloc[NBUCKETS];
160 int nmorecore[NBUCKETS];
161 int nmal;
162 int nfre;
163 int nrealloc;
164 int nrcopy;
165 int nrecurse;
166 int nsbrk;
167 bits32_t tsbrk;
168 bits32_t bytesused;
169 bits32_t bytesfree;
170 int tbsplit;
171 int nsplit[NBUCKETS];
172 int tbcoalesce;
173 };
174
175 static struct _malstats _mstats;
176
177 /* Return statistics describing allocation of blocks of size BLOCKSIZE.
178 NFREE is the number of free blocks for this allocation size. NUSED
179 is the number of blocks in use. NMAL is the number of requests for
180 blocks of size BLOCKSIZE. NMORECORE is the number of times we had
181 to call MORECORE to repopulate the free list for this bucket. NSPLIT
182 is the number of times a block of this size was split to satisfy a
183 smaller request. */
184 struct bucket_stats {
185 u_bits32_t blocksize;
186 int nfree;
187 int nused;
188 int nmal;
189 int nmorecore;
190 int nsplit;
191 };
192 #endif /* MALLOC_STATS */
193
194 /* We have a flag indicating whether memory is allocated, an index in
195 nextf[], a size field, and a sentinel value to determine whether or
196 not a caller wrote before the start of allocated memory; to realloc()
197 memory we either copy mh_nbytes or just change mh_nbytes if there is
198 enough room in the block for the new size. Range checking is always
199 done. */
200 union mhead {
201 bits64_t mh_align; /* 8 */
202 struct {
203 char mi_alloc; /* ISALLOC or ISFREE */ /* 1 */
204 char mi_index; /* index in nextf[] */ /* 1 */
205 /* Remainder are valid only when block is allocated */
206 u_bits32_t mi_nbytes; /* # of bytes allocated */ /* 4 */
207 u_bits16_t mi_magic2;/* should be == MAGIC2 */ /* 2 */
208 } minfo;
209 };
210 #define mh_alloc minfo.mi_alloc
211 #define mh_index minfo.mi_index
212 #define mh_nbytes minfo.mi_nbytes
213 #define mh_magic2 minfo.mi_magic2
214
215 /* Access free-list pointer of a block.
216 It is stored at block + sizeof (char *).
217 This is not a field in the minfo structure member of union mhead
218 because we want sizeof (union mhead)
219 to describe the overhead for when the block is in use,
220 and we do not want the free-list pointer to count in that. */
221
222 #define CHAIN(a) \
223 (*(union mhead **) (sizeof (char *) + (char *) (a)))
224
225 #if defined (botch)
226 extern void botch ();
227 #else
228 static void
229 botch (s)
230 char *s;
231 {
232 fprintf (stderr, "\r\nmalloc: assertion botched: %s\r\n", s);
233 (void)fflush (stderr);
234 abort ();
235 }
236 #endif /* !botch */
237
238 #if !defined (__STRING)
239 # if defined (__STDC__)
240 # define __STRING(x) #x
241 # else
242 # define __STRING(x) "x"
243 # endif
244 #endif /* !__STRING */
245
246 /* To implement range checking, we write magic values in at the beginning
247 and end of each allocated block, and make sure they are undisturbed
248 whenever a free or a realloc occurs. */
249
250 /* Written in each of the 4 bytes following the block's real space */
251 #define MAGIC1 0x55
252 /* Written in the 2 bytes before the block's real space */
253 #define MAGIC2 0x5555
254 #define ASSERT(p) do { if (!(p)) botch(__STRING(p)); } while (0)
255 #define MSLOP 4 /* 4 bytes extra for MAGIC1s */
256
257 /* Minimum and maximum bucket indices for block splitting (and to bound
258 the search for a block to split). */
259 #define SPLIT_MIN 3
260 #define SPLIT_MID 11 /* XXX - was 9 */
261 #define SPLIT_MAX 14 /* XXX - was 12 */
262
263 /* Minimum and maximum bucket indices for block coalescing. */
264 #define COMBINE_MIN 6
265 #define COMBINE_MAX (pagebucket - 1)
266
267 #define MIN_COMBINE_FREE 4
268
269 /* nextf[i] is free list of blocks of size 2**(i + 3) */
270
271 static union mhead *nextf[NBUCKETS];
272
273 /* busy[i] is nonzero while allocation of block size i is in progress. */
274
275 static char busy[NBUCKETS];
276
277 static int pagesz; /* system page size. */
278 static int pagebucket; /* bucket for requests a page in size */
279 static int maxbuck; /* highest bucket receiving allocation request. */
280
281 #ifdef SHELL
282 extern int interrupt_immediately;
283 extern int signal_is_trapped ();
284 #endif
285
286 #if 0
287 /* Coalesce two adjacent free blocks off the free list for size NU - 1,
288 as long as there are at least MIN_COMBINE_FREE free blocks and we
289 can find two adjacent free blocks. nextf[NU -1] is assumed to not
290 be busy; the caller (morecore()) checks for this. */
291 static void
292 bcoalesce (nu)
293 register int nu;
294 {
295 register union mhead *mp, *mp1, *mp2;
296 register int nfree, nbuck;
297 unsigned long siz;
298
299 nbuck = nu - 1;
300 if (nextf[nbuck] == 0)
301 return;
302
303 nfree = 1;
304 mp1 = nextf[nbuck];
305 mp = CHAIN (mp1);
306 mp2 = (union mhead *)0;
307 while (CHAIN (mp))
308 {
309 mp2 = mp1;
310 mp1 = mp;
311 mp = CHAIN (mp);
312 nfree++;
313 /* We may not want to run all the way through the free list here;
314 if we do not, we need to check a threshold value here and break
315 if nfree exceeds it. */
316 }
317 if (nfree < MIN_COMBINE_FREE)
318 return;
319 /* OK, now we have mp1 pointing to the block we want to add to nextf[NU].
320 CHAIN(mp2) must equal mp1. Check that mp1 and mp are adjacent. */
321 if (CHAIN(mp2) != mp1)
322 botch ("bcoalesce: CHAIN(mp2) != mp1");
323 siz = 1 << (nbuck + 3);
324 if (CHAIN (mp1) != (union mhead *)((char *)mp1 + siz))
325 return; /* not adjacent */
326
327 #ifdef MALLOC_STATS
328 _mstats.tbcoalesce++;
329 #endif
330
331 /* Since they are adjacent, remove them from the free list */
332 CHAIN (mp2) = CHAIN (mp);
333
334 /* And add the combined two blocks to nextf[NU]. */
335 mp1->mh_alloc = ISFREE;
336 mp1->mh_index = nu;
337 CHAIN (mp1) = nextf[nu];
338 nextf[nu] = mp1;
339 }
340 #endif
341
342 /* Split a block at index > NU (but less than SPLIT_MAX) into a set of
343 blocks of the correct size, and attach them to nextf[NU]. nextf[NU]
344 is assumed to be empty. Must be called with signals blocked (e.g.,
345 by morecore()). */
346 static void
347 bsplit (nu)
348 register int nu;
349 {
350 register union mhead *mp;
351 int nbuck, nblks, split_max;
352 unsigned long siz;
353
354 split_max = (maxbuck > SPLIT_MAX) ? maxbuck : SPLIT_MAX;
355
356 if (nu >= SPLIT_MID)
357 {
358 for (nbuck = split_max; nbuck > nu; nbuck--)
359 {
360 if (busy[nbuck] || nextf[nbuck] == 0)
361 continue;
362 break;
363 }
364 }
365 else
366 {
367 for (nbuck = nu + 1; nbuck <= split_max; nbuck++)
368 {
369 if (busy[nbuck] || nextf[nbuck] == 0)
370 continue;
371 break;
372 }
373 }
374
375 if (nbuck > split_max || nbuck <= nu)
376 return;
377
378 /* XXX might want to split only if nextf[nbuck] has >= 2 blocks free
379 and nbuck is below some threshold. */
380
381 #ifdef MALLOC_STATS
382 _mstats.tbsplit++;
383 _mstats.nsplit[nbuck]++;
384 #endif
385
386 /* Figure out how many blocks we'll get. */
387 siz = (1 << (nu + 3));
388 nblks = (1 << (nbuck + 3)) / siz;
389
390 /* Remove the block from the chain of larger blocks. */
391 mp = nextf[nbuck];
392 nextf[nbuck] = CHAIN (mp);
393
394 /* Split the block and put it on the requested chain. */
395 nextf[nu] = mp;
396 while (1)
397 {
398 mp->mh_alloc = ISFREE;
399 mp->mh_index = nu;
400 if (--nblks <= 0) break;
401 CHAIN (mp) = (union mhead *)((char *)mp + siz);
402 mp = (union mhead *)((char *)mp + siz);
403 }
404 CHAIN (mp) = 0;
405 }
406
407 static void
408 block_signals (setp, osetp)
409 sigset_t *setp, *osetp;
410 {
411 #ifdef HAVE_POSIX_SIGNALS
412 sigfillset (setp);
413 sigemptyset (osetp);
414 sigprocmask (SIG_BLOCK, setp, osetp);
415 #else
416 # if defined (HAVE_BSD_SIGNALS)
417 *osetp = sigsetmask (-1);
418 # endif
419 #endif
420 }
421
422 static void
423 unblock_signals (setp, osetp)
424 sigset_t *setp, *osetp;
425 {
426 #ifdef HAVE_POSIX_SIGNALS
427 sigprocmask (SIG_SETMASK, osetp, (sigset_t *)NULL);
428 #else
429 # if defined (HAVE_BSD_SIGNALS)
430 sigsetmask (*osetp);
431 # endif
432 #endif
433 }
434
435 static void
436 morecore (nu) /* ask system for more memory */
437 register int nu; /* size index to get more of */
438 {
439 register union mhead *mp;
440 register int nblks;
441 register long siz;
442 long sbrk_amt; /* amount to get via sbrk() */
443 sigset_t set, oset;
444 int blocked_sigs;
445
446 /* Block all signals in case we are executed from a signal handler. */
447 blocked_sigs = 0;
448 #ifdef SHELL
449 if (interrupt_immediately || signal_is_trapped (SIGINT) || signal_is_trapped (SIGCHLD))
450 #endif
451 {
452 block_signals (&set, &oset);
453 blocked_sigs = 1;
454 }
455
456 siz = 1 << (nu + 3); /* size of desired block for nextf[nu] */
457
458 if (siz < 0)
459 goto morecore_done; /* oops */
460
461 #ifdef MALLOC_STATS
462 _mstats.nmorecore[nu]++;
463 #endif
464
465 /* Try to split a larger block here, if we're within the range of sizes
466 to split. */
467 #if 0
468 if (nu >= SPLIT_MIN && nu < SPLIT_MAX)
469 #else
470 if (nu >= SPLIT_MIN)
471 #endif
472 {
473 bsplit (nu);
474 if (nextf[nu] != 0)
475 goto morecore_done;
476 }
477
478 #if 0
479 /* Try to coalesce two adjacent blocks from the free list on nextf[nu - 1],
480 if we can, and we're withing the range of the block coalescing limits. */
481 if (nu >= COMBINE_MIN && nu < COMBINE_MAX && busy[nu - 1] == 0 && nextf[nu - 1])
482 {
483 bcoalesce (nu);
484 if (nextf[nu] != 0)
485 goto morecore_done;
486 }
487 #endif
488
489 /* Take at least a page, and figure out how many blocks of the requested
490 size we're getting. */
491 if (siz <= pagesz)
492 {
493 sbrk_amt = pagesz;
494 nblks = sbrk_amt / siz;
495 }
496 else
497 {
498 /* We always want to request an integral multiple of the page size
499 from the kernel, so let's compute whether or not `siz' is such
500 an amount. If it is, we can just request it. If not, we want
501 the smallest integral multiple of pagesize that is larger than
502 `siz' and will satisfy the request. */
503 sbrk_amt = siz % pagesz;
504 if (sbrk_amt == 0)
505 sbrk_amt = siz;
506 else
507 sbrk_amt = siz + pagesz - sbrk_amt;
508 nblks = 1;
509 }
510
511 #ifdef MALLOC_STATS
512 _mstats.nsbrk++;
513 _mstats.tsbrk += sbrk_amt;
514 #endif
515
516 mp = (union mhead *) sbrk (sbrk_amt);
517
518 /* Totally out of memory. */
519 if ((long)mp == -1)
520 goto morecore_done;
521
522 /* shouldn't happen, but just in case -- require 8-byte alignment */
523 if ((long)mp & 7)
524 {
525 mp = (union mhead *) (((long)mp + 8) & ~7);
526 nblks--;
527 }
528
529 /* save new header and link the nblks blocks together */
530 nextf[nu] = mp;
531 while (1)
532 {
533 mp->mh_alloc = ISFREE;
534 mp->mh_index = nu;
535 if (--nblks <= 0) break;
536 CHAIN (mp) = (union mhead *)((char *)mp + siz);
537 mp = (union mhead *)((char *)mp + siz);
538 }
539 CHAIN (mp) = 0;
540
541 morecore_done:
542 if (blocked_sigs)
543 unblock_signals (&set, &oset);
544 }
545
546 #if defined (MEMSCRAMBLE) || !defined (NO_CALLOC)
547 static char *
548 zmemset (s, c, n)
549 char *s;
550 int c;
551 register int n;
552 {
553 register char *sp;
554
555 sp = s;
556 while (--n >= 0)
557 *sp++ = c;
558 return (s);
559 }
560 #endif /* MEMSCRAMBLE || !NO_CALLOC */
561
562 static void
563 malloc_debug_dummy ()
564 {
565 write (1, "malloc_debug_dummy\n", 19);
566 }
567
568 PTR_T
569 malloc (n) /* get a block */
570 size_t n;
571 {
572 register union mhead *p;
573 register long nbytes;
574 register int nunits;
575
576 /* Get the system page size and align break pointer so everything will
577 be page-aligned. The page size must be at least 1K -- anything
578 smaller is increased. */
579 if (pagesz == 0)
580 {
581 register long sbrk_needed;
582
583 pagesz = getpagesize ();
584 if (pagesz < 1024)
585 pagesz = 1024;
586 /* OK, how much do we need to allocate to make things page-aligned?
587 This partial page is wasted space. Once we figure out how much
588 to advance the break pointer, go ahead and do it. */
589 sbrk_needed = pagesz - ((long)sbrk (0) & (pagesz - 1)); /* sbrk(0) % pagesz */
590 if (sbrk_needed < 0)
591 sbrk_needed += pagesz;
592 /* Now allocate the wasted space. */
593 if (sbrk_needed)
594 {
595 #ifdef MALLOC_STATS
596 _mstats.nsbrk++;
597 _mstats.tsbrk += sbrk_needed;
598 #endif
599 if ((long)sbrk (sbrk_needed) == -1)
600 return (NULL);
601 }
602 nunits = 0;
603 nbytes = 8;
604 while (pagesz > nbytes)
605 {
606 nbytes <<= 1;
607 nunits++;
608 }
609 pagebucket = nunits;
610 }
611
612 /* Figure out how many bytes are required, rounding up to the nearest
613 multiple of 4, then figure out which nextf[] area to use. Try to
614 be smart about where to start searching -- if the number of bytes
615 needed is greater than the page size, we can start at pagebucket. */
616 nbytes = (n + sizeof *p + MSLOP + 3) & ~3;
617 nunits = 0;
618 if (nbytes <= (pagesz >> 1))
619 {
620 register unsigned int shiftr;
621
622 shiftr = (nbytes - 1) >> 2; /* == (nbytes - 1) / 4 */
623 while (shiftr >>= 1) /* == (nbytes - 1) / {8,16,32,...} */
624 nunits++;
625 }
626 else
627 {
628 register u_bits32_t amt;
629
630 nunits = pagebucket;
631 amt = pagesz;
632 while (nbytes > amt)
633 {
634 amt <<= 1;
635 nunits++;
636 }
637 }
638
639 /* In case this is reentrant use of malloc from signal handler,
640 pick a block size that no other malloc level is currently
641 trying to allocate. That's the easiest harmless way not to
642 interfere with the other level of execution. */
643 #ifdef MALLOC_STATS
644 if (busy[nunits]) _mstats.nrecurse++;
645 #endif
646 while (busy[nunits]) nunits++;
647 busy[nunits] = 1;
648
649 if (nunits > maxbuck)
650 maxbuck = nunits;
651
652 /* If there are no blocks of the appropriate size, go get some */
653 if (nextf[nunits] == 0)
654 morecore (nunits);
655
656 /* Get one block off the list, and set the new list head */
657 if ((p = nextf[nunits]) == NULL)
658 {
659 busy[nunits] = 0;
660 return NULL;
661 }
662 nextf[nunits] = CHAIN (p);
663 busy[nunits] = 0;
664
665 /* Check for free block clobbered */
666 /* If not for this check, we would gobble a clobbered free chain ptr
667 and bomb out on the NEXT allocate of this size block */
668 if (p->mh_alloc != ISFREE || p->mh_index != nunits)
669 botch ("malloc: block on free list clobbered");
670
671 /* Fill in the info, and if range checking, set up the magic numbers */
672 p->mh_alloc = ISALLOC;
673 p->mh_nbytes = n;
674 p->mh_magic2 = MAGIC2;
675 {
676 register char *m = (char *) (p + 1) + n;
677
678 *m++ = MAGIC1, *m++ = MAGIC1, *m++ = MAGIC1, *m = MAGIC1;
679 }
680
681 #ifdef MEMSCRAMBLE
682 zmemset ((char *)(p + 1), 0xdf, n); /* scramble previous contents */
683 #endif
684 #ifdef MALLOC_STATS
685 _mstats.nmalloc[nunits]++;
686 _mstats.tmalloc[nunits]++;
687 _mstats.nmal++;
688 #endif /* MALLOC_STATS */
689 return (char *) (p + 1); /* XXX - should be cast to PTR_T? */
690 }
691
692 void
693 free (mem)
694 PTR_T mem;
695 {
696 register union mhead *p;
697 register char *ap;
698 register int nunits;
699
700 if ((ap = (char *)mem) == 0)
701 return;
702
703 p = (union mhead *) ap - 1;
704
705 if (p->mh_alloc == ISMEMALIGN)
706 {
707 ap -= p->mh_nbytes;
708 p = (union mhead *) ap - 1;
709 }
710
711 if (p->mh_alloc != ISALLOC)
712 {
713 if (p->mh_alloc == ISFREE)
714 botch ("free: called with already freed block argument");
715 else
716 botch ("free: called with unallocated block argument");
717 }
718
719 ASSERT (p->mh_magic2 == MAGIC2);
720 ap += p->mh_nbytes;
721 ASSERT (*ap++ == MAGIC1); ASSERT (*ap++ == MAGIC1);
722 ASSERT (*ap++ == MAGIC1); ASSERT (*ap == MAGIC1);
723
724 #ifdef MEMSCRAMBLE
725 zmemset (mem, 0xcf, p->mh_nbytes);
726 #endif
727
728 nunits = p->mh_index;
729
730 ASSERT (nunits < NBUCKETS);
731 p->mh_alloc = ISFREE;
732
733 #if 0
734 if (busy[nunits] == 1)
735 botch ("calling free %d while in malloc for %d", nunits, nunits);
736 #endif
737
738 /* Protect against signal handlers calling malloc. */
739 busy[nunits] = 1;
740 /* Put this block on the free list. */
741 CHAIN (p) = nextf[nunits];
742 nextf[nunits] = p;
743 busy[nunits] = 0;
744
745 #ifdef MALLOC_STATS
746 _mstats.nmalloc[nunits]--;
747 _mstats.nfre++;
748 #endif /* MALLOC_STATS */
749 }
750
751 PTR_T
752 realloc (mem, n)
753 PTR_T mem;
754 register size_t n;
755 {
756 register union mhead *p;
757 register u_bits32_t tocopy;
758 register unsigned int nbytes;
759 register int nunits;
760 register char *m;
761
762 #ifdef MALLOC_STATS
763 _mstats.nrealloc++;
764 #endif
765
766 if (n == 0)
767 {
768 free (mem);
769 return (NULL);
770 }
771 if ((p = (union mhead *) mem) == 0)
772 return malloc (n);
773 p--;
774 nunits = p->mh_index;
775 ASSERT (p->mh_alloc == ISALLOC);
776 ASSERT (p->mh_magic2 == MAGIC2);
777
778 m = (char *)mem + (tocopy = p->mh_nbytes);
779 ASSERT (*m++ == MAGIC1); ASSERT (*m++ == MAGIC1);
780 ASSERT (*m++ == MAGIC1); ASSERT (*m == MAGIC1);
781
782 /* See if desired size rounds to same power of 2 as actual size. */
783 nbytes = (n + sizeof *p + MSLOP + 7) & ~7;
784
785 /* If ok, use the same block, just marking its size as changed. */
786 if (nbytes > (4 << nunits) && nbytes <= (8 << nunits))
787 {
788 m = (char *)mem + tocopy;
789 *m++ = 0; *m++ = 0; *m++ = 0; *m++ = 0;
790 p->mh_nbytes = n;
791 m = (char *)mem + n;
792 *m++ = MAGIC1; *m++ = MAGIC1; *m++ = MAGIC1; *m++ = MAGIC1;
793 return mem;
794 }
795
796 #ifdef MALLOC_STATS
797 _mstats.nrcopy++;
798 #endif
799
800 if (n < tocopy)
801 tocopy = n;
802
803 if ((m = malloc (n)) == 0)
804 return 0;
805 FASTCOPY (mem, m, tocopy);
806 free (mem);
807 return m;
808 }
809
810 PTR_T
811 memalign (alignment, size)
812 unsigned int alignment;
813 size_t size;
814 {
815 register char *ptr;
816 register char *aligned;
817 register union mhead *p;
818
819 ptr = malloc (size + alignment);
820
821 if (ptr == 0)
822 return 0;
823 /* If entire block has the desired alignment, just accept it. */
824 if (((int) ptr & (alignment - 1)) == 0)
825 return ptr;
826 /* Otherwise, get address of byte in the block that has that alignment. */
827 aligned = (char *) (((int) ptr + alignment - 1) & -alignment);
828
829 /* Store a suitable indication of how to free the block,
830 so that free can find the true beginning of it. */
831 p = (union mhead *) aligned - 1;
832 p->mh_nbytes = aligned - ptr;
833 p->mh_alloc = ISMEMALIGN;
834 return aligned;
835 }
836
837 #if !defined (HPUX)
838 /* This runs into trouble with getpagesize on HPUX, and Multimax machines.
839 Patching out seems cleaner than the ugly fix needed. */
840 PTR_T
841 valloc (size)
842 size_t size;
843 {
844 return memalign (getpagesize (), size);
845 }
846 #endif /* !HPUX */
847
848 #ifndef NO_CALLOC
849 PTR_T
850 calloc (n, s)
851 size_t n, s;
852 {
853 size_t total;
854 char *result;
855
856 total = n * s;
857 result = malloc (total);
858 if (result)
859 zmemset (result, 0, total);
860 return result;
861 }
862
863 void
864 cfree (p)
865 PTR_T p;
866 {
867 free (p);
868 }
869 #endif /* !NO_CALLOC */
870
871 #ifdef MALLOC_STATS
872
873 struct bucket_stats
874 malloc_bucket_stats (size)
875 int size;
876 {
877 struct bucket_stats v;
878 register union mhead *p;
879
880 v.nfree = 0;
881
882 if (size < 0 || size >= NBUCKETS)
883 {
884 v.blocksize = 0;
885 v.nused = v.nmal = v.nmorecore = v.nsplit = 0;
886 return v;
887 }
888
889 v.blocksize = 1 << (size + 3);
890 v.nused = _mstats.nmalloc[size];
891 v.nmal = _mstats.tmalloc[size];
892 v.nmorecore = _mstats.nmorecore[size];
893 v.nsplit = _mstats.nsplit[size];
894
895 for (p = nextf[size]; p; p = CHAIN (p))
896 v.nfree++;
897
898 return v;
899 }
900
901 /* Return a copy of _MSTATS, with two additional fields filled in:
902 BYTESFREE is the total number of bytes on free lists. BYTESUSED
903 is the total number of bytes in use. These two fields are fairly
904 expensive to compute, so we do it only when asked to. */
905 struct _malstats
906 malloc_stats ()
907 {
908 struct _malstats result;
909 struct bucket_stats v;
910 register int i;
911
912 result = _mstats;
913 result.bytesused = result.bytesfree = 0;
914 for (i = 0; i < NBUCKETS; i++)
915 {
916 v = malloc_bucket_stats (i);
917 result.bytesfree += v.nfree * v.blocksize;
918 result.bytesused += v.nused * v.blocksize;
919 }
920 return (result);
921 }
922
923 static void
924 _print_malloc_stats (s, fp)
925 char *s;
926 FILE *fp;
927 {
928 register int i;
929 int totused, totfree;
930 struct bucket_stats v;
931
932 fprintf (fp, "Memory allocation statistics: %s\n\tsize\tfree\tin use\ttotal\tmorecore\tsplit\n", s ? s : "");
933 for (i = totused = totfree = 0; i < NBUCKETS; i++)
934 {
935 v = malloc_bucket_stats (i);
936 fprintf (fp, "%12lu\t%4d\t%6d\t%5d\t%8d\t%5d\n", v.blocksize, v.nfree, v.nused, v.nmal, v.nmorecore, v.nsplit);
937 totfree += v.nfree * v.blocksize;
938 totused += v.nused * v.blocksize;
939 }
940 fprintf (fp, "\nTotal bytes in use: %d, total bytes free: %d\n",
941 totused, totfree);
942 fprintf (fp, "Total mallocs: %d, total frees: %d, total reallocs: %d (%d copies)\n",
943 _mstats.nmal, _mstats.nfre, _mstats.nrealloc, _mstats.nrcopy);
944 fprintf (fp, "Total sbrks: %d, total bytes via sbrk: %d\n",
945 _mstats.nsbrk, _mstats.tsbrk);
946 fprintf (fp, "Total blocks split: %d, total block coalesces: %d\n",
947 _mstats.tbsplit, _mstats.tbcoalesce);
948 }
949
950 void
951 print_malloc_stats (s)
952 char *s;
953 {
954 _print_malloc_stats (s, stderr);
955 }
956
957 #define TRACEROOT "/var/tmp/maltrace/trace."
958 extern char *inttostr ();
959
960 void
961 trace_malloc_stats (s)
962 char *s;
963 {
964 char ibuf[32], *ip;
965 char fname[64];
966 int p;
967 FILE *fp;
968
969 p = (int)getpid();
970 ip = inttostr(p, ibuf, sizeof(ibuf));
971 strcpy (fname, TRACEROOT);
972 strcat (fname, ip);
973 fp = fopen(fname, "w");
974 if (fp)
975 {
976 _print_malloc_stats (s, fp);
977 fflush(fp);
978 fclose(fp);
979 }
980 }
981 #endif /* MALLOC_STATS */