]> git.ipfire.org Git - thirdparty/bash.git/blame - lib/malloc/malloc.c
Imported from ../bash-2.05.tar.gz.
[thirdparty/bash.git] / lib / malloc / malloc.c
CommitLineData
cce855bc 1/* malloc.c - dynamic memory allocation for bash. */
726f6388 2
cce855bc 3/* Copyright (C) 1985, 1987, 1997 Free Software Foundation, Inc.
726f6388
JA
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
bb70624e 7 the Free Software Foundation; either version 2, or (at your option)
726f6388
JA
8 any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the Free Software
bb70624e 17 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111 USA.
726f6388
JA
18
19In other words, you are welcome to use, share and improve this program.
20You are forbidden to forbid anyone else to use, share and improve
21what you give them. Help stamp out software-hoarding! */
22
23/*
24 * @(#)nmalloc.c 1 (Caltech) 2/21/82
25 *
26 * U of M Modified: 20 Jun 1983 ACT: strange hacks for Emacs
27 *
28 * Nov 1983, Mike@BRL, Added support for 4.1C/4.2 BSD.
29 *
30 * This is a very fast storage allocator. It allocates blocks of a small
31 * number of different sizes, and keeps free lists of each size. Blocks
32 * that don't exactly fit are passed up to the next larger size. In this
33 * implementation, the available sizes are (2^n)-4 (or -16) bytes long.
34 * This is designed for use in a program that uses vast quantities of
35 * memory, but bombs when it runs out. To make it a little better, it
36 * warns the user when he starts to get near the end.
37 *
38 * June 84, ACT: modified rcheck code to check the range given to malloc,
39 * rather than the range determined by the 2-power used.
40 *
41 * Jan 85, RMS: calls malloc_warning to issue warning on nearly full.
42 * No longer Emacs-specific; can serve as all-purpose malloc for GNU.
43 * You should call malloc_init to reinitialize after loading dumped Emacs.
cce855bc 44 * Call malloc_stats to get info on memory stats if MALLOC_STATS turned on.
726f6388
JA
45 * realloc knows how to return same block given, just changing its size,
46 * if the power of 2 is correct.
47 */
cce855bc 48#define MALLOC_STATS /* for the time being */
726f6388
JA
49
50/*
51 * nextf[i] is the pointer to the next free block of size 2^(i+3). The
52 * smallest allocatable block is 8 bytes. The overhead information will
53 * go in the first int of the block, and the returned pointer will point
54 * to the second.
726f6388
JA
55 */
56
ccc6cda3
JA
57/* Define this to have free() write 0xcf into memory as it's freed, to
58 uncover callers that refer to freed memory. */
59/* SCO 3.2v4 getcwd and possibly other libc routines fail with MEMSCRAMBLE */
60#if !defined (NO_MEMSCRAMBLE)
61# define MEMSCRAMBLE
62#endif
63
cce855bc 64#if defined (HAVE_CONFIG_H)
ccc6cda3 65# include <config.h>
cce855bc
JA
66#endif /* HAVE_CONFIG_H */
67
68#if defined (SHELL)
69# include "bashtypes.h"
70#else
71# include <sys/types.h>
72#endif
726f6388 73
ccc6cda3
JA
74#if defined (HAVE_UNISTD_H)
75# include <unistd.h>
76#endif
726f6388
JA
77
78/* Determine which kind of system this is. */
cce855bc
JA
79#include <signal.h>
80
81#if defined (HAVE_STRING_H)
82# include <string.h>
d166f048 83#else
cce855bc 84# include <strings.h>
d166f048 85#endif
cce855bc
JA
86
87#if defined (MALLOC_STATS) || !defined (botch)
88# include <stdio.h>
89#endif /* MALLOC_STATS || !botch */
726f6388 90
ccc6cda3
JA
91/* Define getpagesize () if the system does not. */
92#ifndef HAVE_GETPAGESIZE
726f6388
JA
93# include "getpagesize.h"
94#endif
95
d166f048
JA
96#if __GNUC__ > 1
97# define FASTCOPY(s, d, n) __builtin_memcpy (d, s, n)
98#else /* !__GNUC__ */
99# if !defined (HAVE_BCOPY)
100# if !defined (HAVE_MEMMOVE)
101# define FASTCOPY(s, d, n) memcpy (d, s, n)
102# else
103# define FASTCOPY(s, d, n) memmove (d, s, n)
104# endif /* !HAVE_MEMMOVE */
105# else /* HAVE_BCOPY */
106# define FASTCOPY(s, d, n) bcopy (s, d, n)
107# endif /* HAVE_BCOPY */
108#endif /* !__GNUC__ */
109
bb70624e
JA
110/* Generic pointer type. */
111#ifndef PTR_T
112# if defined (__STDC__)
113# define PTR_T void *
114# else
115# define PTR_T char *
116# endif
117#endif
118
ccc6cda3
JA
119#if !defined (NULL)
120# define NULL 0
121#endif
122
cce855bc 123#define NBUCKETS 30
726f6388
JA
124
125#define ISALLOC ((char) 0xf7) /* magic byte that implies allocation */
126#define ISFREE ((char) 0x54) /* magic byte that implies free block */
127 /* this is for error checking only */
128#define ISMEMALIGN ((char) 0xd6) /* Stored before the value returned by
129 memalign, with the rest of the word
130 being the distance to the true
131 beginning of the block. */
726f6388 132
ccc6cda3 133#if !defined (SBRK_DECLARED)
726f6388 134extern char *sbrk ();
ccc6cda3 135#endif /* !SBRK_DECLARED */
726f6388 136
cce855bc
JA
137#ifdef MALLOC_STATS
138/*
139 * NMALLOC[i] is the difference between the number of mallocs and frees
140 * for a given block size. TMALLOC[i] is the total number of mallocs for
141 * a given block size. NMORECORE[i] is the total number of calls to
142 * morecore(i). NMAL and NFRE are counts of the number of calls to malloc()
143 * and free(), respectively. NREALLOC is the total number of calls to
144 * realloc(); NRCOPY is the number of times realloc() had to allocate new
145 * memory and copy to it. NRECURSE is a count of the number of recursive
146 * calls to malloc() for the same bucket size, which can be caused by calls
147 * to malloc() from a signal handler. NSBRK is the number of calls to sbrk()
148 * (whether by morecore() or for alignment); TSBRK is the total number of
149 * bytes requested from the kernel with sbrk(). BYTESUSED is the total
150 * number of bytes consumed by blocks currently in use; BYTESFREE is the
bb70624e 151 * total number of bytes currently on all of the free lists. TBSPLIT is
cce855bc 152 * the number of times a larger block was split to satisfy a smaller request.
bb70624e
JA
153 * NSPLIT[i] is the number of times a block of size I was split.
154 * TBCOALESCE is the number of times two adjacent smaller blocks off the free
cce855bc
JA
155 * list were combined to satisfy a larger request.
156 */
157struct _malstats {
158 int nmalloc[NBUCKETS];
159 int tmalloc[NBUCKETS];
160 int nmorecore[NBUCKETS];
161 int nmal;
162 int nfre;
163 int nrealloc;
164 int nrcopy;
165 int nrecurse;
166 int nsbrk;
bb70624e
JA
167 bits32_t tsbrk;
168 bits32_t bytesused;
169 bits32_t bytesfree;
170 int tbsplit;
171 int nsplit[NBUCKETS];
172 int tbcoalesce;
cce855bc
JA
173};
174
175static struct _malstats _mstats;
176
177/* Return statistics describing allocation of blocks of size BLOCKSIZE.
178 NFREE is the number of free blocks for this allocation size. NUSED
179 is the number of blocks in use. NMAL is the number of requests for
180 blocks of size BLOCKSIZE. NMORECORE is the number of times we had
bb70624e
JA
181 to call MORECORE to repopulate the free list for this bucket. NSPLIT
182 is the number of times a block of this size was split to satisfy a
183 smaller request. */
cce855bc 184struct bucket_stats {
bb70624e 185 u_bits32_t blocksize;
cce855bc
JA
186 int nfree;
187 int nused;
188 int nmal;
189 int nmorecore;
bb70624e 190 int nsplit;
cce855bc
JA
191};
192#endif /* MALLOC_STATS */
193
194/* We have a flag indicating whether memory is allocated, an index in
195 nextf[], a size field, and a sentinel value to determine whether or
196 not a caller wrote before the start of allocated memory; to realloc()
197 memory we either copy mh_nbytes or just change mh_nbytes if there is
198 enough room in the block for the new size. Range checking is always
199 done. */
200union mhead {
b72432fd 201 bits64_t mh_align; /* 8 */
cce855bc
JA
202 struct {
203 char mi_alloc; /* ISALLOC or ISFREE */ /* 1 */
204 char mi_index; /* index in nextf[] */ /* 1 */
205 /* Remainder are valid only when block is allocated */
bb70624e
JA
206 u_bits32_t mi_nbytes; /* # of bytes allocated */ /* 4 */
207 u_bits16_t mi_magic2;/* should be == MAGIC2 */ /* 2 */
cce855bc 208 } minfo;
726f6388 209};
cce855bc
JA
210#define mh_alloc minfo.mi_alloc
211#define mh_index minfo.mi_index
212#define mh_nbytes minfo.mi_nbytes
213#define mh_magic2 minfo.mi_magic2
726f6388
JA
214
215/* Access free-list pointer of a block.
cce855bc 216 It is stored at block + sizeof (char *).
b72432fd
JA
217 This is not a field in the minfo structure member of union mhead
218 because we want sizeof (union mhead)
cce855bc
JA
219 to describe the overhead for when the block is in use,
220 and we do not want the free-list pointer to count in that. */
726f6388
JA
221
222#define CHAIN(a) \
cce855bc 223 (*(union mhead **) (sizeof (char *) + (char *) (a)))
726f6388 224
cce855bc
JA
225#if defined (botch)
226extern void botch ();
227#else
228static void
229botch (s)
230 char *s;
231{
232 fprintf (stderr, "\r\nmalloc: assertion botched: %s\r\n", s);
233 (void)fflush (stderr);
234 abort ();
235}
236#endif /* !botch */
726f6388 237
cce855bc
JA
238#if !defined (__STRING)
239# if defined (__STDC__)
240# define __STRING(x) #x
241# else
242# define __STRING(x) "x"
726f6388 243# endif
cce855bc 244#endif /* !__STRING */
726f6388 245
cce855bc
JA
246/* To implement range checking, we write magic values in at the beginning
247 and end of each allocated block, and make sure they are undisturbed
248 whenever a free or a realloc occurs. */
249
250/* Written in each of the 4 bytes following the block's real space */
251#define MAGIC1 0x55
252/* Written in the 2 bytes before the block's real space */
253#define MAGIC2 0x5555
254#define ASSERT(p) do { if (!(p)) botch(__STRING(p)); } while (0)
255#define MSLOP 4 /* 4 bytes extra for MAGIC1s */
256
257/* Minimum and maximum bucket indices for block splitting (and to bound
258 the search for a block to split). */
259#define SPLIT_MIN 3
bb70624e
JA
260#define SPLIT_MID 11 /* XXX - was 9 */
261#define SPLIT_MAX 14 /* XXX - was 12 */
cce855bc
JA
262
263/* Minimum and maximum bucket indices for block coalescing. */
264#define COMBINE_MIN 6
265#define COMBINE_MAX (pagebucket - 1)
266
267#define MIN_COMBINE_FREE 4
726f6388
JA
268
269/* nextf[i] is free list of blocks of size 2**(i + 3) */
270
cce855bc 271static union mhead *nextf[NBUCKETS];
726f6388
JA
272
273/* busy[i] is nonzero while allocation of block size i is in progress. */
274
cce855bc 275static char busy[NBUCKETS];
726f6388 276
cce855bc
JA
277static int pagesz; /* system page size. */
278static int pagebucket; /* bucket for requests a page in size */
bb70624e 279static int maxbuck; /* highest bucket receiving allocation request. */
726f6388 280
28ef6c31
JA
281#ifdef SHELL
282extern int interrupt_immediately;
283extern int signal_is_trapped ();
284#endif
285
cce855bc
JA
286#if 0
287/* Coalesce two adjacent free blocks off the free list for size NU - 1,
288 as long as there are at least MIN_COMBINE_FREE free blocks and we
289 can find two adjacent free blocks. nextf[NU -1] is assumed to not
290 be busy; the caller (morecore()) checks for this. */
291static void
292bcoalesce (nu)
293 register int nu;
294{
295 register union mhead *mp, *mp1, *mp2;
296 register int nfree, nbuck;
297 unsigned long siz;
726f6388 298
cce855bc
JA
299 nbuck = nu - 1;
300 if (nextf[nbuck] == 0)
301 return;
726f6388 302
cce855bc
JA
303 nfree = 1;
304 mp1 = nextf[nbuck];
305 mp = CHAIN (mp1);
306 mp2 = (union mhead *)0;
307 while (CHAIN (mp))
308 {
309 mp2 = mp1;
310 mp1 = mp;
311 mp = CHAIN (mp);
312 nfree++;
313 /* We may not want to run all the way through the free list here;
314 if we do not, we need to check a threshold value here and break
315 if nfree exceeds it. */
316 }
317 if (nfree < MIN_COMBINE_FREE)
318 return;
319 /* OK, now we have mp1 pointing to the block we want to add to nextf[NU].
320 CHAIN(mp2) must equal mp1. Check that mp1 and mp are adjacent. */
321 if (CHAIN(mp2) != mp1)
322 botch ("bcoalesce: CHAIN(mp2) != mp1");
323 siz = 1 << (nbuck + 3);
324 if (CHAIN (mp1) != (union mhead *)((char *)mp1 + siz))
325 return; /* not adjacent */
326
327#ifdef MALLOC_STATS
bb70624e 328 _mstats.tbcoalesce++;
cce855bc 329#endif
726f6388 330
cce855bc
JA
331 /* Since they are adjacent, remove them from the free list */
332 CHAIN (mp2) = CHAIN (mp);
726f6388 333
cce855bc
JA
334 /* And add the combined two blocks to nextf[NU]. */
335 mp1->mh_alloc = ISFREE;
336 mp1->mh_index = nu;
337 CHAIN (mp1) = nextf[nu];
338 nextf[nu] = mp1;
339}
340#endif
726f6388 341
cce855bc
JA
342/* Split a block at index > NU (but less than SPLIT_MAX) into a set of
343 blocks of the correct size, and attach them to nextf[NU]. nextf[NU]
344 is assumed to be empty. Must be called with signals blocked (e.g.,
345 by morecore()). */
346static void
347bsplit (nu)
348 register int nu;
726f6388 349{
cce855bc 350 register union mhead *mp;
bb70624e 351 int nbuck, nblks, split_max;
cce855bc 352 unsigned long siz;
726f6388 353
bb70624e
JA
354 split_max = (maxbuck > SPLIT_MAX) ? maxbuck : SPLIT_MAX;
355
cce855bc
JA
356 if (nu >= SPLIT_MID)
357 {
bb70624e 358 for (nbuck = split_max; nbuck > nu; nbuck--)
cce855bc
JA
359 {
360 if (busy[nbuck] || nextf[nbuck] == 0)
361 continue;
362 break;
363 }
364 }
365 else
366 {
bb70624e 367 for (nbuck = nu + 1; nbuck <= split_max; nbuck++)
cce855bc
JA
368 {
369 if (busy[nbuck] || nextf[nbuck] == 0)
370 continue;
371 break;
372 }
373 }
726f6388 374
bb70624e 375 if (nbuck > split_max || nbuck <= nu)
cce855bc
JA
376 return;
377
378 /* XXX might want to split only if nextf[nbuck] has >= 2 blocks free
379 and nbuck is below some threshold. */
380
381#ifdef MALLOC_STATS
bb70624e
JA
382 _mstats.tbsplit++;
383 _mstats.nsplit[nbuck]++;
cce855bc
JA
384#endif
385
386 /* Figure out how many blocks we'll get. */
387 siz = (1 << (nu + 3));
388 nblks = (1 << (nbuck + 3)) / siz;
726f6388 389
cce855bc
JA
390 /* Remove the block from the chain of larger blocks. */
391 mp = nextf[nbuck];
392 nextf[nbuck] = CHAIN (mp);
393
394 /* Split the block and put it on the requested chain. */
395 nextf[nu] = mp;
396 while (1)
397 {
398 mp->mh_alloc = ISFREE;
399 mp->mh_index = nu;
400 if (--nblks <= 0) break;
401 CHAIN (mp) = (union mhead *)((char *)mp + siz);
402 mp = (union mhead *)((char *)mp + siz);
403 }
404 CHAIN (mp) = 0;
726f6388 405}
ccc6cda3 406
28ef6c31
JA
407static void
408block_signals (setp, osetp)
409 sigset_t *setp, *osetp;
410{
411#ifdef HAVE_POSIX_SIGNALS
412 sigfillset (setp);
413 sigemptyset (osetp);
414 sigprocmask (SIG_BLOCK, setp, osetp);
415#else
416# if defined (HAVE_BSD_SIGNALS)
417 *osetp = sigsetmask (-1);
418# endif
419#endif
420}
421
422static void
423unblock_signals (setp, osetp)
424 sigset_t *setp, *osetp;
425{
426#ifdef HAVE_POSIX_SIGNALS
427 sigprocmask (SIG_SETMASK, osetp, (sigset_t *)NULL);
428#else
429# if defined (HAVE_BSD_SIGNALS)
430 sigsetmask (*osetp);
431# endif
432#endif
433}
434
726f6388
JA
435static void
436morecore (nu) /* ask system for more memory */
437 register int nu; /* size index to get more of */
438{
cce855bc 439 register union mhead *mp;
726f6388 440 register int nblks;
cce855bc
JA
441 register long siz;
442 long sbrk_amt; /* amount to get via sbrk() */
28ef6c31
JA
443 sigset_t set, oset;
444 int blocked_sigs;
726f6388 445
ccc6cda3 446 /* Block all signals in case we are executed from a signal handler. */
28ef6c31
JA
447 blocked_sigs = 0;
448#ifdef SHELL
449 if (interrupt_immediately || signal_is_trapped (SIGINT) || signal_is_trapped (SIGCHLD))
450#endif
451 {
452 block_signals (&set, &oset);
453 blocked_sigs = 1;
454 }
726f6388 455
cce855bc
JA
456 siz = 1 << (nu + 3); /* size of desired block for nextf[nu] */
457
458 if (siz < 0)
bb70624e 459 goto morecore_done; /* oops */
cce855bc
JA
460
461#ifdef MALLOC_STATS
462 _mstats.nmorecore[nu]++;
463#endif
464
465 /* Try to split a larger block here, if we're within the range of sizes
466 to split. */
bb70624e 467#if 0
cce855bc 468 if (nu >= SPLIT_MIN && nu < SPLIT_MAX)
bb70624e
JA
469#else
470 if (nu >= SPLIT_MIN)
471#endif
cce855bc
JA
472 {
473 bsplit (nu);
474 if (nextf[nu] != 0)
475 goto morecore_done;
476 }
477
478#if 0
479 /* Try to coalesce two adjacent blocks from the free list on nextf[nu - 1],
480 if we can, and we're withing the range of the block coalescing limits. */
481 if (nu >= COMBINE_MIN && nu < COMBINE_MAX && busy[nu - 1] == 0 && nextf[nu - 1])
482 {
483 bcoalesce (nu);
484 if (nextf[nu] != 0)
28ef6c31 485 goto morecore_done;
cce855bc
JA
486 }
487#endif
488
489 /* Take at least a page, and figure out how many blocks of the requested
490 size we're getting. */
491 if (siz <= pagesz)
726f6388 492 {
cce855bc
JA
493 sbrk_amt = pagesz;
494 nblks = sbrk_amt / siz;
726f6388 495 }
cce855bc
JA
496 else
497 {
498 /* We always want to request an integral multiple of the page size
499 from the kernel, so let's compute whether or not `siz' is such
500 an amount. If it is, we can just request it. If not, we want
501 the smallest integral multiple of pagesize that is larger than
502 `siz' and will satisfy the request. */
503 sbrk_amt = siz % pagesz;
504 if (sbrk_amt == 0)
505 sbrk_amt = siz;
506 else
507 sbrk_amt = siz + pagesz - sbrk_amt;
508 nblks = 1;
509 }
510
511#ifdef MALLOC_STATS
512 _mstats.nsbrk++;
513 _mstats.tsbrk += sbrk_amt;
514#endif
515
516 mp = (union mhead *) sbrk (sbrk_amt);
726f6388 517
cce855bc
JA
518 /* Totally out of memory. */
519 if ((long)mp == -1)
bb70624e 520 goto morecore_done;
cce855bc
JA
521
522 /* shouldn't happen, but just in case -- require 8-byte alignment */
523 if ((long)mp & 7)
524 {
525 mp = (union mhead *) (((long)mp + 8) & ~7);
726f6388
JA
526 nblks--;
527 }
528
cce855bc
JA
529 /* save new header and link the nblks blocks together */
530 nextf[nu] = mp;
726f6388
JA
531 while (1)
532 {
cce855bc
JA
533 mp->mh_alloc = ISFREE;
534 mp->mh_index = nu;
726f6388 535 if (--nblks <= 0) break;
cce855bc
JA
536 CHAIN (mp) = (union mhead *)((char *)mp + siz);
537 mp = (union mhead *)((char *)mp + siz);
726f6388 538 }
cce855bc 539 CHAIN (mp) = 0;
726f6388 540
cce855bc 541morecore_done:
28ef6c31
JA
542 if (blocked_sigs)
543 unblock_signals (&set, &oset);
726f6388
JA
544}
545
ccc6cda3
JA
546#if defined (MEMSCRAMBLE) || !defined (NO_CALLOC)
547static char *
548zmemset (s, c, n)
549 char *s;
550 int c;
551 register int n;
552{
553 register char *sp;
554
555 sp = s;
556 while (--n >= 0)
557 *sp++ = c;
558 return (s);
559}
560#endif /* MEMSCRAMBLE || !NO_CALLOC */
561
cce855bc
JA
562static void
563malloc_debug_dummy ()
564{
bb70624e 565 write (1, "malloc_debug_dummy\n", 19);
cce855bc
JA
566}
567
bb70624e 568PTR_T
726f6388 569malloc (n) /* get a block */
cce855bc 570 size_t n;
726f6388 571{
cce855bc
JA
572 register union mhead *p;
573 register long nbytes;
574 register int nunits;
726f6388 575
cce855bc
JA
576 /* Get the system page size and align break pointer so everything will
577 be page-aligned. The page size must be at least 1K -- anything
578 smaller is increased. */
579 if (pagesz == 0)
580 {
581 register long sbrk_needed;
582
583 pagesz = getpagesize ();
584 if (pagesz < 1024)
28ef6c31 585 pagesz = 1024;
cce855bc 586 /* OK, how much do we need to allocate to make things page-aligned?
28ef6c31
JA
587 This partial page is wasted space. Once we figure out how much
588 to advance the break pointer, go ahead and do it. */
cce855bc
JA
589 sbrk_needed = pagesz - ((long)sbrk (0) & (pagesz - 1)); /* sbrk(0) % pagesz */
590 if (sbrk_needed < 0)
28ef6c31 591 sbrk_needed += pagesz;
cce855bc
JA
592 /* Now allocate the wasted space. */
593 if (sbrk_needed)
28ef6c31 594 {
cce855bc
JA
595#ifdef MALLOC_STATS
596 _mstats.nsbrk++;
597 _mstats.tsbrk += sbrk_needed;
598#endif
28ef6c31
JA
599 if ((long)sbrk (sbrk_needed) == -1)
600 return (NULL);
601 }
cce855bc
JA
602 nunits = 0;
603 nbytes = 8;
604 while (pagesz > nbytes)
28ef6c31
JA
605 {
606 nbytes <<= 1;
607 nunits++;
608 }
cce855bc
JA
609 pagebucket = nunits;
610 }
611
726f6388 612 /* Figure out how many bytes are required, rounding up to the nearest
cce855bc
JA
613 multiple of 4, then figure out which nextf[] area to use. Try to
614 be smart about where to start searching -- if the number of bytes
615 needed is greater than the page size, we can start at pagebucket. */
616 nbytes = (n + sizeof *p + MSLOP + 3) & ~3;
617 nunits = 0;
618 if (nbytes <= (pagesz >> 1))
619 {
620 register unsigned int shiftr;
726f6388 621
cce855bc
JA
622 shiftr = (nbytes - 1) >> 2; /* == (nbytes - 1) / 4 */
623 while (shiftr >>= 1) /* == (nbytes - 1) / {8,16,32,...} */
624 nunits++;
625 }
626 else
627 {
bb70624e 628 register u_bits32_t amt;
cce855bc
JA
629
630 nunits = pagebucket;
631 amt = pagesz;
632 while (nbytes > amt)
28ef6c31
JA
633 {
634 amt <<= 1;
635 nunits++;
636 }
cce855bc 637 }
726f6388
JA
638
639 /* In case this is reentrant use of malloc from signal handler,
640 pick a block size that no other malloc level is currently
641 trying to allocate. That's the easiest harmless way not to
642 interfere with the other level of execution. */
cce855bc
JA
643#ifdef MALLOC_STATS
644 if (busy[nunits]) _mstats.nrecurse++;
645#endif
726f6388
JA
646 while (busy[nunits]) nunits++;
647 busy[nunits] = 1;
648
bb70624e
JA
649 if (nunits > maxbuck)
650 maxbuck = nunits;
651
726f6388 652 /* If there are no blocks of the appropriate size, go get some */
726f6388
JA
653 if (nextf[nunits] == 0)
654 morecore (nunits);
655
656 /* Get one block off the list, and set the new list head */
cce855bc 657 if ((p = nextf[nunits]) == NULL)
726f6388
JA
658 {
659 busy[nunits] = 0;
cce855bc 660 return NULL;
726f6388
JA
661 }
662 nextf[nunits] = CHAIN (p);
663 busy[nunits] = 0;
664
665 /* Check for free block clobbered */
cce855bc
JA
666 /* If not for this check, we would gobble a clobbered free chain ptr
667 and bomb out on the NEXT allocate of this size block */
668 if (p->mh_alloc != ISFREE || p->mh_index != nunits)
669 botch ("malloc: block on free list clobbered");
726f6388
JA
670
671 /* Fill in the info, and if range checking, set up the magic numbers */
cce855bc
JA
672 p->mh_alloc = ISALLOC;
673 p->mh_nbytes = n;
674 p->mh_magic2 = MAGIC2;
726f6388
JA
675 {
676 register char *m = (char *) (p + 1) + n;
677
678 *m++ = MAGIC1, *m++ = MAGIC1, *m++ = MAGIC1, *m = MAGIC1;
679 }
cce855bc 680
ccc6cda3
JA
681#ifdef MEMSCRAMBLE
682 zmemset ((char *)(p + 1), 0xdf, n); /* scramble previous contents */
683#endif
cce855bc
JA
684#ifdef MALLOC_STATS
685 _mstats.nmalloc[nunits]++;
686 _mstats.tmalloc[nunits]++;
687 _mstats.nmal++;
688#endif /* MALLOC_STATS */
bb70624e 689 return (char *) (p + 1); /* XXX - should be cast to PTR_T? */
726f6388
JA
690}
691
692void
693free (mem)
bb70624e 694 PTR_T mem;
726f6388 695{
cce855bc
JA
696 register union mhead *p;
697 register char *ap;
698 register int nunits;
699
bb70624e 700 if ((ap = (char *)mem) == 0)
cce855bc 701 return;
726f6388 702
cce855bc 703 p = (union mhead *) ap - 1;
726f6388 704
cce855bc
JA
705 if (p->mh_alloc == ISMEMALIGN)
706 {
707 ap -= p->mh_nbytes;
708 p = (union mhead *) ap - 1;
709 }
710
711 if (p->mh_alloc != ISALLOC)
712 {
713 if (p->mh_alloc == ISFREE)
714 botch ("free: called with already freed block argument");
715 else
716 botch ("free: called with unallocated block argument");
717 }
718
719 ASSERT (p->mh_magic2 == MAGIC2);
720 ap += p->mh_nbytes;
721 ASSERT (*ap++ == MAGIC1); ASSERT (*ap++ == MAGIC1);
722 ASSERT (*ap++ == MAGIC1); ASSERT (*ap == MAGIC1);
726f6388 723
ccc6cda3 724#ifdef MEMSCRAMBLE
cce855bc 725 zmemset (mem, 0xcf, p->mh_nbytes);
ccc6cda3 726#endif
cce855bc
JA
727
728 nunits = p->mh_index;
729
730 ASSERT (nunits < NBUCKETS);
731 p->mh_alloc = ISFREE;
732
bb70624e
JA
733#if 0
734 if (busy[nunits] == 1)
735 botch ("calling free %d while in malloc for %d", nunits, nunits);
736#endif
737
cce855bc
JA
738 /* Protect against signal handlers calling malloc. */
739 busy[nunits] = 1;
740 /* Put this block on the free list. */
741 CHAIN (p) = nextf[nunits];
742 nextf[nunits] = p;
743 busy[nunits] = 0;
744
745#ifdef MALLOC_STATS
746 _mstats.nmalloc[nunits]--;
747 _mstats.nfre++;
748#endif /* MALLOC_STATS */
726f6388
JA
749}
750
bb70624e 751PTR_T
726f6388 752realloc (mem, n)
bb70624e 753 PTR_T mem;
cce855bc 754 register size_t n;
726f6388 755{
cce855bc 756 register union mhead *p;
bb70624e 757 register u_bits32_t tocopy;
726f6388
JA
758 register unsigned int nbytes;
759 register int nunits;
cce855bc
JA
760 register char *m;
761
762#ifdef MALLOC_STATS
763 _mstats.nrealloc++;
764#endif
726f6388 765
cce855bc
JA
766 if (n == 0)
767 {
768 free (mem);
769 return (NULL);
770 }
771 if ((p = (union mhead *) mem) == 0)
726f6388
JA
772 return malloc (n);
773 p--;
cce855bc
JA
774 nunits = p->mh_index;
775 ASSERT (p->mh_alloc == ISALLOC);
776 ASSERT (p->mh_magic2 == MAGIC2);
777
bb70624e 778 m = (char *)mem + (tocopy = p->mh_nbytes);
cce855bc
JA
779 ASSERT (*m++ == MAGIC1); ASSERT (*m++ == MAGIC1);
780 ASSERT (*m++ == MAGIC1); ASSERT (*m == MAGIC1);
726f6388
JA
781
782 /* See if desired size rounds to same power of 2 as actual size. */
cce855bc 783 nbytes = (n + sizeof *p + MSLOP + 7) & ~7;
726f6388
JA
784
785 /* If ok, use the same block, just marking its size as changed. */
786 if (nbytes > (4 << nunits) && nbytes <= (8 << nunits))
787 {
bb70624e 788 m = (char *)mem + tocopy;
726f6388 789 *m++ = 0; *m++ = 0; *m++ = 0; *m++ = 0;
cce855bc 790 p->mh_nbytes = n;
bb70624e 791 m = (char *)mem + n;
726f6388 792 *m++ = MAGIC1; *m++ = MAGIC1; *m++ = MAGIC1; *m++ = MAGIC1;
726f6388
JA
793 return mem;
794 }
795
cce855bc
JA
796#ifdef MALLOC_STATS
797 _mstats.nrcopy++;
798#endif
799
726f6388
JA
800 if (n < tocopy)
801 tocopy = n;
726f6388 802
cce855bc
JA
803 if ((m = malloc (n)) == 0)
804 return 0;
805 FASTCOPY (mem, m, tocopy);
806 free (mem);
807 return m;
726f6388
JA
808}
809
bb70624e 810PTR_T
726f6388 811memalign (alignment, size)
cce855bc
JA
812 unsigned int alignment;
813 size_t size;
726f6388 814{
ccc6cda3 815 register char *ptr;
726f6388 816 register char *aligned;
cce855bc 817 register union mhead *p;
726f6388 818
ccc6cda3
JA
819 ptr = malloc (size + alignment);
820
726f6388
JA
821 if (ptr == 0)
822 return 0;
823 /* If entire block has the desired alignment, just accept it. */
824 if (((int) ptr & (alignment - 1)) == 0)
825 return ptr;
826 /* Otherwise, get address of byte in the block that has that alignment. */
827 aligned = (char *) (((int) ptr + alignment - 1) & -alignment);
828
829 /* Store a suitable indication of how to free the block,
830 so that free can find the true beginning of it. */
cce855bc
JA
831 p = (union mhead *) aligned - 1;
832 p->mh_nbytes = aligned - ptr;
833 p->mh_alloc = ISMEMALIGN;
726f6388
JA
834 return aligned;
835}
836
ccc6cda3 837#if !defined (HPUX)
726f6388
JA
838/* This runs into trouble with getpagesize on HPUX, and Multimax machines.
839 Patching out seems cleaner than the ugly fix needed. */
bb70624e 840PTR_T
726f6388 841valloc (size)
ccc6cda3 842 size_t size;
726f6388
JA
843{
844 return memalign (getpagesize (), size);
845}
ccc6cda3
JA
846#endif /* !HPUX */
847
848#ifndef NO_CALLOC
bb70624e 849PTR_T
ccc6cda3
JA
850calloc (n, s)
851 size_t n, s;
852{
853 size_t total;
854 char *result;
855
856 total = n * s;
857 result = malloc (total);
858 if (result)
859 zmemset (result, 0, total);
860 return result;
861}
862
863void
864cfree (p)
bb70624e 865 PTR_T p;
ccc6cda3
JA
866{
867 free (p);
868}
869#endif /* !NO_CALLOC */
870
cce855bc 871#ifdef MALLOC_STATS
726f6388 872
cce855bc
JA
873struct bucket_stats
874malloc_bucket_stats (size)
726f6388
JA
875 int size;
876{
cce855bc
JA
877 struct bucket_stats v;
878 register union mhead *p;
726f6388
JA
879
880 v.nfree = 0;
881
cce855bc 882 if (size < 0 || size >= NBUCKETS)
726f6388
JA
883 {
884 v.blocksize = 0;
bb70624e 885 v.nused = v.nmal = v.nmorecore = v.nsplit = 0;
726f6388
JA
886 return v;
887 }
888
889 v.blocksize = 1 << (size + 3);
cce855bc
JA
890 v.nused = _mstats.nmalloc[size];
891 v.nmal = _mstats.tmalloc[size];
892 v.nmorecore = _mstats.nmorecore[size];
bb70624e 893 v.nsplit = _mstats.nsplit[size];
726f6388
JA
894
895 for (p = nextf[size]; p; p = CHAIN (p))
896 v.nfree++;
897
898 return v;
899}
ccc6cda3 900
cce855bc
JA
901/* Return a copy of _MSTATS, with two additional fields filled in:
902 BYTESFREE is the total number of bytes on free lists. BYTESUSED
903 is the total number of bytes in use. These two fields are fairly
904 expensive to compute, so we do it only when asked to. */
905struct _malstats
906malloc_stats ()
907{
908 struct _malstats result;
909 struct bucket_stats v;
910 register int i;
726f6388 911
cce855bc
JA
912 result = _mstats;
913 result.bytesused = result.bytesfree = 0;
914 for (i = 0; i < NBUCKETS; i++)
915 {
916 v = malloc_bucket_stats (i);
917 result.bytesfree += v.nfree * v.blocksize;
918 result.bytesused += v.nused * v.blocksize;
919 }
920 return (result);
726f6388
JA
921}
922
bb70624e
JA
923static void
924_print_malloc_stats (s, fp)
cce855bc 925 char *s;
bb70624e 926 FILE *fp;
726f6388 927{
cce855bc
JA
928 register int i;
929 int totused, totfree;
930 struct bucket_stats v;
726f6388 931
bb70624e 932 fprintf (fp, "Memory allocation statistics: %s\n\tsize\tfree\tin use\ttotal\tmorecore\tsplit\n", s ? s : "");
cce855bc
JA
933 for (i = totused = totfree = 0; i < NBUCKETS; i++)
934 {
935 v = malloc_bucket_stats (i);
bb70624e 936 fprintf (fp, "%12lu\t%4d\t%6d\t%5d\t%8d\t%5d\n", v.blocksize, v.nfree, v.nused, v.nmal, v.nmorecore, v.nsplit);
cce855bc
JA
937 totfree += v.nfree * v.blocksize;
938 totused += v.nused * v.blocksize;
939 }
bb70624e 940 fprintf (fp, "\nTotal bytes in use: %d, total bytes free: %d\n",
cce855bc 941 totused, totfree);
bb70624e 942 fprintf (fp, "Total mallocs: %d, total frees: %d, total reallocs: %d (%d copies)\n",
cce855bc 943 _mstats.nmal, _mstats.nfre, _mstats.nrealloc, _mstats.nrcopy);
bb70624e 944 fprintf (fp, "Total sbrks: %d, total bytes via sbrk: %d\n",
cce855bc 945 _mstats.nsbrk, _mstats.tsbrk);
bb70624e
JA
946 fprintf (fp, "Total blocks split: %d, total block coalesces: %d\n",
947 _mstats.tbsplit, _mstats.tbcoalesce);
948}
949
950void
951print_malloc_stats (s)
28ef6c31 952 char *s;
bb70624e
JA
953{
954 _print_malloc_stats (s, stderr);
955}
956
957#define TRACEROOT "/var/tmp/maltrace/trace."
958extern char *inttostr ();
959
960void
961trace_malloc_stats (s)
28ef6c31 962 char *s;
bb70624e
JA
963{
964 char ibuf[32], *ip;
965 char fname[64];
966 int p;
967 FILE *fp;
968
969 p = (int)getpid();
970 ip = inttostr(p, ibuf, sizeof(ibuf));
971 strcpy (fname, TRACEROOT);
972 strcat (fname, ip);
973 fp = fopen(fname, "w");
974 if (fp)
975 {
976 _print_malloc_stats (s, fp);
977 fflush(fp);
978 fclose(fp);
979 }
726f6388 980}
cce855bc 981#endif /* MALLOC_STATS */