]> git.ipfire.org Git - thirdparty/bash.git/blame - lib/malloc/malloc.c
Imported from ../bash-3.1.tar.gz.
[thirdparty/bash.git] / lib / malloc / malloc.c
CommitLineData
cce855bc 1/* malloc.c - dynamic memory allocation for bash. */
726f6388 2
95732b49 3/* Copyright (C) 1985-2005 Free Software Foundation, Inc.
726f6388
JA
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
bb70624e 7 the Free Software Foundation; either version 2, or (at your option)
726f6388
JA
8 any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the Free Software
bb70624e 17 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111 USA.
726f6388
JA
18
19In other words, you are welcome to use, share and improve this program.
20You are forbidden to forbid anyone else to use, share and improve
21what you give them. Help stamp out software-hoarding! */
22
23/*
24 * @(#)nmalloc.c 1 (Caltech) 2/21/82
25 *
26 * U of M Modified: 20 Jun 1983 ACT: strange hacks for Emacs
27 *
28 * Nov 1983, Mike@BRL, Added support for 4.1C/4.2 BSD.
29 *
30 * This is a very fast storage allocator. It allocates blocks of a small
31 * number of different sizes, and keeps free lists of each size. Blocks
32 * that don't exactly fit are passed up to the next larger size. In this
33 * implementation, the available sizes are (2^n)-4 (or -16) bytes long.
34 * This is designed for use in a program that uses vast quantities of
35 * memory, but bombs when it runs out. To make it a little better, it
36 * warns the user when he starts to get near the end.
37 *
38 * June 84, ACT: modified rcheck code to check the range given to malloc,
39 * rather than the range determined by the 2-power used.
40 *
41 * Jan 85, RMS: calls malloc_warning to issue warning on nearly full.
42 * No longer Emacs-specific; can serve as all-purpose malloc for GNU.
43 * You should call malloc_init to reinitialize after loading dumped Emacs.
cce855bc 44 * Call malloc_stats to get info on memory stats if MALLOC_STATS turned on.
726f6388
JA
45 * realloc knows how to return same block given, just changing its size,
46 * if the power of 2 is correct.
47 */
48
49/*
50 * nextf[i] is the pointer to the next free block of size 2^(i+3). The
51 * smallest allocatable block is 8 bytes. The overhead information will
52 * go in the first int of the block, and the returned pointer will point
53 * to the second.
726f6388
JA
54 */
55
7117c2d2
JA
56/* Define MEMSCRAMBLE to have free() write 0xcf into memory as it's freed, to
57 uncover callers that refer to freed memory, and to have malloc() write 0xdf
58 into memory as it's allocated to avoid referring to previous contents. */
59
60/* SCO 3.2v4 getcwd and possibly other libc routines fail with MEMSCRAMBLE;
61 handled by configure. */
ccc6cda3 62
cce855bc 63#if defined (HAVE_CONFIG_H)
ccc6cda3 64# include <config.h>
cce855bc
JA
65#endif /* HAVE_CONFIG_H */
66
67#if defined (SHELL)
68# include "bashtypes.h"
f73dda09 69# include "stdc.h"
cce855bc
JA
70#else
71# include <sys/types.h>
72#endif
726f6388 73
ccc6cda3
JA
74#if defined (HAVE_UNISTD_H)
75# include <unistd.h>
76#endif
726f6388
JA
77
78/* Determine which kind of system this is. */
cce855bc
JA
79#include <signal.h>
80
81#if defined (HAVE_STRING_H)
82# include <string.h>
d166f048 83#else
cce855bc 84# include <strings.h>
d166f048 85#endif
cce855bc 86
f73dda09 87#include <stdio.h>
726f6388 88
ccc6cda3
JA
89/* Define getpagesize () if the system does not. */
90#ifndef HAVE_GETPAGESIZE
726f6388
JA
91# include "getpagesize.h"
92#endif
93
f73dda09
JA
94#include "imalloc.h"
95#ifdef MALLOC_STATS
96# include "mstats.h"
97#endif
98#ifdef MALLOC_REGISTER
99# include "table.h"
bb70624e 100#endif
7117c2d2
JA
101#ifdef MALLOC_WATCH
102# include "watch.h"
103#endif
bb70624e 104
f73dda09
JA
105/* System-specific omissions. */
106#ifdef HPUX
107# define NO_VALLOC
ccc6cda3
JA
108#endif
109
cce855bc 110#define NBUCKETS 30
726f6388
JA
111
112#define ISALLOC ((char) 0xf7) /* magic byte that implies allocation */
113#define ISFREE ((char) 0x54) /* magic byte that implies free block */
114 /* this is for error checking only */
115#define ISMEMALIGN ((char) 0xd6) /* Stored before the value returned by
116 memalign, with the rest of the word
117 being the distance to the true
118 beginning of the block. */
726f6388 119
cce855bc
JA
120
121/* We have a flag indicating whether memory is allocated, an index in
122 nextf[], a size field, and a sentinel value to determine whether or
123 not a caller wrote before the start of allocated memory; to realloc()
124 memory we either copy mh_nbytes or just change mh_nbytes if there is
125 enough room in the block for the new size. Range checking is always
126 done. */
127union mhead {
f73dda09 128 bits64_t mh_align; /* 8 */
cce855bc 129 struct {
f73dda09
JA
130 char mi_alloc; /* ISALLOC or ISFREE */ /* 1 */
131 char mi_index; /* index in nextf[] */ /* 1 */
cce855bc 132 /* Remainder are valid only when block is allocated */
f73dda09
JA
133 u_bits16_t mi_magic2; /* should be == MAGIC2 */ /* 2 */
134 u_bits32_t mi_nbytes; /* # of bytes allocated */ /* 4 */
cce855bc 135 } minfo;
726f6388 136};
cce855bc
JA
137#define mh_alloc minfo.mi_alloc
138#define mh_index minfo.mi_index
139#define mh_nbytes minfo.mi_nbytes
140#define mh_magic2 minfo.mi_magic2
726f6388 141
7117c2d2
JA
142#define MOVERHEAD sizeof(union mhead)
143#define MALIGN_MASK 7 /* one less than desired alignment */
144
145typedef union _malloc_guard {
146 char s[4];
147 u_bits32_t i;
148} mguard_t;
149
726f6388 150/* Access free-list pointer of a block.
cce855bc 151 It is stored at block + sizeof (char *).
b72432fd
JA
152 This is not a field in the minfo structure member of union mhead
153 because we want sizeof (union mhead)
cce855bc
JA
154 to describe the overhead for when the block is in use,
155 and we do not want the free-list pointer to count in that. */
726f6388
JA
156
157#define CHAIN(a) \
cce855bc 158 (*(union mhead **) (sizeof (char *) + (char *) (a)))
726f6388 159
cce855bc
JA
160/* To implement range checking, we write magic values in at the beginning
161 and end of each allocated block, and make sure they are undisturbed
162 whenever a free or a realloc occurs. */
163
f73dda09 164/* Written in the 2 bytes before the block's real space (-4 bytes) */
cce855bc 165#define MAGIC2 0x5555
7117c2d2 166#define MSLOP 4 /* 4 bytes extra for u_bits32_t size */
cce855bc 167
f73dda09
JA
168/* How many bytes are actually allocated for a request of size N --
169 rounded up to nearest multiple of 8 after accounting for malloc
170 overhead. */
7117c2d2
JA
171#define ALLOCATED_BYTES(n) \
172 (((n) + MOVERHEAD + MSLOP + MALIGN_MASK) & ~MALIGN_MASK)
f73dda09
JA
173
174#define ASSERT(p) \
175 do \
176 { \
177 if (!(p)) xbotch((PTR_T)0, ERR_ASSERT_FAILED, __STRING(p), file, line); \
178 } \
179 while (0)
180
cce855bc
JA
181/* Minimum and maximum bucket indices for block splitting (and to bound
182 the search for a block to split). */
7117c2d2
JA
183#define SPLIT_MIN 2 /* XXX - was 3 */
184#define SPLIT_MID 11
185#define SPLIT_MAX 14
cce855bc
JA
186
187/* Minimum and maximum bucket indices for block coalescing. */
7117c2d2
JA
188#define COMBINE_MIN 2
189#define COMBINE_MAX (pagebucket - 1) /* XXX */
cce855bc 190
7117c2d2
JA
191#define LESSCORE_MIN 10
192#define LESSCORE_FRC 13
193
194#define STARTBUCK 1
726f6388 195
f73dda09
JA
196/* Flags for the internal functions. */
197#define MALLOC_WRAPPER 0x01 /* wrapper function */
198#define MALLOC_INTERNAL 0x02 /* internal function calling another */
199#define MALLOC_NOTRACE 0x04 /* don't trace this allocation or free */
200#define MALLOC_NOREG 0x08 /* don't register this allocation or free */
201
202/* Future use. */
203#define ERR_DUPFREE 0x01
204#define ERR_UNALLOC 0x02
205#define ERR_UNDERFLOW 0x04
206#define ERR_ASSERT_FAILED 0x08
207
208/* Evaluates to true if NB is appropriate for bucket NU. NB is adjusted
7117c2d2
JA
209 appropriately by the caller to account for malloc overhead. This only
210 checks that the recorded size is not too big for the bucket. We
211 can't check whether or not it's in between NU and NU-1 because we
212 might have encountered a busy bucket when allocating and moved up to
213 the next size. */
214#define IN_BUCKET(nb, nu) ((nb) <= binsizes[(nu)])
215
216/* Use this when we want to be sure that NB is in bucket NU. */
217#define RIGHT_BUCKET(nb, nu) \
218 (((nb) > binsizes[(nu)-1]) && ((nb) <= binsizes[(nu)]))
f73dda09 219
726f6388
JA
220/* nextf[i] is free list of blocks of size 2**(i + 3) */
221
cce855bc 222static union mhead *nextf[NBUCKETS];
726f6388 223
95732b49 224/* busy[i] is nonzero while allocation or free of block size i is in progress. */
726f6388 225
cce855bc 226static char busy[NBUCKETS];
726f6388 227
cce855bc
JA
228static int pagesz; /* system page size. */
229static int pagebucket; /* bucket for requests a page in size */
bb70624e 230static int maxbuck; /* highest bucket receiving allocation request. */
726f6388 231
7117c2d2
JA
232static char *memtop; /* top of heap */
233
234static unsigned long binsizes[NBUCKETS] = {
235 8UL, 16UL, 32UL, 64UL, 128UL, 256UL, 512UL, 1024UL, 2048UL, 4096UL,
236 8192UL, 16384UL, 32768UL, 65536UL, 131072UL, 262144UL, 524288UL,
237 1048576UL, 2097152UL, 4194304UL, 8388608UL, 16777216UL, 33554432UL,
238 67108864UL, 134217728UL, 268435456UL, 536870912UL, 1073741824UL,
b80f6443 239 2147483648UL, 4294967295UL
7117c2d2
JA
240};
241
242/* binsizes[x] == (1 << ((x) + 3)) */
243#define binsize(x) binsizes[(x)]
244
f73dda09
JA
245/* Declarations for internal functions */
246static PTR_T internal_malloc __P((size_t, const char *, int, int));
247static PTR_T internal_realloc __P((PTR_T, size_t, const char *, int, int));
248static void internal_free __P((PTR_T, const char *, int, int));
95732b49 249static PTR_T internal_memalign __P((size_t, size_t, const char *, int, int));
f73dda09
JA
250#ifndef NO_CALLOC
251static PTR_T internal_calloc __P((size_t, size_t, const char *, int, int));
252static void internal_cfree __P((PTR_T, const char *, int, int));
253#endif
254#ifndef NO_VALLOC
255static PTR_T internal_valloc __P((size_t, const char *, int, int));
256#endif
257
258#if defined (botch)
259extern void botch ();
260#else
261static void botch __P((const char *, const char *, int));
262#endif
263static void xbotch __P((PTR_T, int, const char *, const char *, int));
264
f73dda09
JA
265#if !HAVE_DECL_SBRK
266extern char *sbrk ();
267#endif /* !HAVE_DECL_SBRK */
268
28ef6c31
JA
269#ifdef SHELL
270extern int interrupt_immediately;
f73dda09 271extern int signal_is_trapped __P((int));
28ef6c31
JA
272#endif
273
7117c2d2
JA
274#ifdef MALLOC_STATS
275struct _malstats _mstats;
276#endif /* MALLOC_STATS */
277
f73dda09
JA
278/* Debugging variables available to applications. */
279int malloc_flags = 0; /* future use */
280int malloc_trace = 0; /* trace allocations and frees to stderr */
281int malloc_register = 0; /* future use */
282
7117c2d2
JA
283#ifdef MALLOC_TRACE
284char _malloc_trace_buckets[NBUCKETS];
285
286/* These should really go into a header file. */
287extern void mtrace_alloc __P((const char *, PTR_T, size_t, const char *, int));
288extern void mtrace_free __P((PTR_T, int, const char *, int));
289#endif
290
f73dda09
JA
291#if !defined (botch)
292static void
293botch (s, file, line)
b80f6443
JA
294 const char *s;
295 const char *file;
296 int line;
f73dda09 297{
b80f6443 298 fprintf (stderr, _("malloc: failed assertion: %s\n"), s);
f73dda09
JA
299 (void)fflush (stderr);
300 abort ();
301}
302#endif
303
304/* print the file and line number that caused the assertion failure and
305 call botch() to do whatever the application wants with the information */
306static void
307xbotch (mem, e, s, file, line)
308 PTR_T mem;
309 int e;
310 const char *s;
311 const char *file;
312 int line;
313{
b80f6443 314 fprintf (stderr, _("\r\nmalloc: %s:%d: assertion botched\r\n"),
f73dda09
JA
315 file ? file : "unknown", line);
316#ifdef MALLOC_REGISTER
317 if (mem != NULL && malloc_register)
318 mregister_describe_mem (mem, stderr);
319#endif
320 (void)fflush (stderr);
321 botch(s, file, line);
322}
323
cce855bc 324/* Coalesce two adjacent free blocks off the free list for size NU - 1,
7117c2d2 325 as long as we can find two adjacent free blocks. nextf[NU -1] is
95732b49
JA
326 assumed to not be busy; the caller (morecore()) checks for this.
327 BUSY[NU] must be set to 1. */
cce855bc
JA
328static void
329bcoalesce (nu)
330 register int nu;
331{
332 register union mhead *mp, *mp1, *mp2;
7117c2d2 333 register int nbuck;
cce855bc 334 unsigned long siz;
726f6388 335
cce855bc 336 nbuck = nu - 1;
95732b49 337 if (nextf[nbuck] == 0 || busy[nbuck])
cce855bc 338 return;
726f6388 339
95732b49 340 busy[nbuck] = 1;
7117c2d2
JA
341 siz = binsize (nbuck);
342
343 mp2 = mp1 = nextf[nbuck];
cce855bc 344 mp = CHAIN (mp1);
7117c2d2 345 while (mp && mp != (union mhead *)((char *)mp1 + siz))
cce855bc
JA
346 {
347 mp2 = mp1;
348 mp1 = mp;
349 mp = CHAIN (mp);
cce855bc 350 }
95732b49 351
7117c2d2 352 if (mp == 0)
95732b49
JA
353 {
354 busy[nbuck] = 0;
355 return;
356 }
7117c2d2 357
cce855bc
JA
358 /* OK, now we have mp1 pointing to the block we want to add to nextf[NU].
359 CHAIN(mp2) must equal mp1. Check that mp1 and mp are adjacent. */
7117c2d2 360 if (mp2 != mp1 && CHAIN(mp2) != mp1)
95732b49
JA
361 {
362 busy[nbuck] = 0;
363 xbotch ((PTR_T)0, 0, "bcoalesce: CHAIN(mp2) != mp1", (char *)NULL, 0);
364 }
7117c2d2
JA
365
366#ifdef MALLOC_DEBUG
cce855bc 367 if (CHAIN (mp1) != (union mhead *)((char *)mp1 + siz))
95732b49
JA
368 {
369 busy[nbuck] = 0;
370 return; /* not adjacent */
371 }
cce855bc 372#endif
726f6388 373
cce855bc 374 /* Since they are adjacent, remove them from the free list */
7117c2d2
JA
375 if (mp1 == nextf[nbuck])
376 nextf[nbuck] = CHAIN (mp);
377 else
378 CHAIN (mp2) = CHAIN (mp);
95732b49
JA
379 busy[nbuck] = 0;
380
381#ifdef MALLOC_STATS
382 _mstats.tbcoalesce++;
383 _mstats.ncoalesce[nbuck]++;
384#endif
726f6388 385
cce855bc
JA
386 /* And add the combined two blocks to nextf[NU]. */
387 mp1->mh_alloc = ISFREE;
388 mp1->mh_index = nu;
389 CHAIN (mp1) = nextf[nu];
390 nextf[nu] = mp1;
391}
726f6388 392
cce855bc
JA
393/* Split a block at index > NU (but less than SPLIT_MAX) into a set of
394 blocks of the correct size, and attach them to nextf[NU]. nextf[NU]
395 is assumed to be empty. Must be called with signals blocked (e.g.,
95732b49 396 by morecore()). BUSY[NU] must be set to 1. */
cce855bc
JA
397static void
398bsplit (nu)
399 register int nu;
726f6388 400{
cce855bc 401 register union mhead *mp;
bb70624e 402 int nbuck, nblks, split_max;
cce855bc 403 unsigned long siz;
726f6388 404
bb70624e
JA
405 split_max = (maxbuck > SPLIT_MAX) ? maxbuck : SPLIT_MAX;
406
cce855bc
JA
407 if (nu >= SPLIT_MID)
408 {
bb70624e 409 for (nbuck = split_max; nbuck > nu; nbuck--)
cce855bc
JA
410 {
411 if (busy[nbuck] || nextf[nbuck] == 0)
412 continue;
413 break;
414 }
415 }
416 else
417 {
bb70624e 418 for (nbuck = nu + 1; nbuck <= split_max; nbuck++)
cce855bc
JA
419 {
420 if (busy[nbuck] || nextf[nbuck] == 0)
421 continue;
422 break;
423 }
424 }
726f6388 425
bb70624e 426 if (nbuck > split_max || nbuck <= nu)
cce855bc
JA
427 return;
428
429 /* XXX might want to split only if nextf[nbuck] has >= 2 blocks free
430 and nbuck is below some threshold. */
431
95732b49
JA
432 /* Remove the block from the chain of larger blocks. */
433 busy[nbuck] = 1;
434 mp = nextf[nbuck];
435 nextf[nbuck] = CHAIN (mp);
436 busy[nbuck] = 0;
437
cce855bc 438#ifdef MALLOC_STATS
bb70624e
JA
439 _mstats.tbsplit++;
440 _mstats.nsplit[nbuck]++;
cce855bc
JA
441#endif
442
443 /* Figure out how many blocks we'll get. */
7117c2d2
JA
444 siz = binsize (nu);
445 nblks = binsize (nbuck) / siz;
726f6388 446
cce855bc
JA
447 /* Split the block and put it on the requested chain. */
448 nextf[nu] = mp;
449 while (1)
450 {
451 mp->mh_alloc = ISFREE;
452 mp->mh_index = nu;
453 if (--nblks <= 0) break;
454 CHAIN (mp) = (union mhead *)((char *)mp + siz);
455 mp = (union mhead *)((char *)mp + siz);
456 }
457 CHAIN (mp) = 0;
726f6388 458}
ccc6cda3 459
95732b49
JA
460/* Take the memory block MP and add it to a chain < NU. NU is the right bucket,
461 but is busy. This avoids memory orphaning. */
462static void
463xsplit (mp, nu)
464 union mhead *mp;
465 int nu;
466{
467 union mhead *nh;
468 int nbuck, nblks, split_max;
469 unsigned long siz;
470
471 nbuck = nu - 1;
472 while (nbuck >= SPLIT_MIN && busy[nbuck])
473 nbuck--;
474 if (nbuck < SPLIT_MIN)
475 return;
476
477#ifdef MALLOC_STATS
478 _mstats.tbsplit++;
479 _mstats.nsplit[nu]++;
480#endif
481
482 /* Figure out how many blocks we'll get. */
483 siz = binsize (nu); /* original block size */
484 nblks = siz / binsize (nbuck); /* should be 2 most of the time */
485
486 /* And add it to nextf[nbuck] */
487 siz = binsize (nbuck); /* XXX - resetting here */
488 nh = mp;
489 while (1)
490 {
491 mp->mh_alloc = ISFREE;
492 mp->mh_index = nbuck;
493 if (--nblks <= 0) break;
494 CHAIN (mp) = (union mhead *)((char *)mp + siz);
495 mp = (union mhead *)((char *)mp + siz);
496 }
497 busy[nbuck] = 1;
498 CHAIN (mp) = nextf[nbuck];
499 nextf[nbuck] = nh;
500 busy[nbuck] = 0;
501}
502
28ef6c31
JA
503static void
504block_signals (setp, osetp)
505 sigset_t *setp, *osetp;
506{
507#ifdef HAVE_POSIX_SIGNALS
508 sigfillset (setp);
509 sigemptyset (osetp);
510 sigprocmask (SIG_BLOCK, setp, osetp);
511#else
512# if defined (HAVE_BSD_SIGNALS)
513 *osetp = sigsetmask (-1);
514# endif
515#endif
516}
517
518static void
519unblock_signals (setp, osetp)
520 sigset_t *setp, *osetp;
521{
522#ifdef HAVE_POSIX_SIGNALS
523 sigprocmask (SIG_SETMASK, osetp, (sigset_t *)NULL);
524#else
525# if defined (HAVE_BSD_SIGNALS)
526 sigsetmask (*osetp);
527# endif
528#endif
529}
7117c2d2
JA
530
531/* Return some memory to the system by reducing the break. This is only
532 called with NU > pagebucket, so we're always assured of giving back
533 more than one page of memory. */
534static void
535lesscore (nu) /* give system back some memory */
536 register int nu; /* size index we're discarding */
537{
538 long siz;
539
540 siz = binsize (nu);
541 /* Should check for errors here, I guess. */
542 sbrk (-siz);
543 memtop -= siz;
544
545#ifdef MALLOC_STATS
546 _mstats.nsbrk++;
547 _mstats.tsbrk -= siz;
548 _mstats.nlesscore[nu]++;
549#endif
550}
95732b49
JA
551
552/* Ask system for more memory; add to NEXTF[NU]. BUSY[NU] must be set to 1. */
726f6388 553static void
95732b49 554morecore (nu)
726f6388
JA
555 register int nu; /* size index to get more of */
556{
cce855bc 557 register union mhead *mp;
726f6388 558 register int nblks;
cce855bc
JA
559 register long siz;
560 long sbrk_amt; /* amount to get via sbrk() */
28ef6c31
JA
561 sigset_t set, oset;
562 int blocked_sigs;
726f6388 563
ccc6cda3 564 /* Block all signals in case we are executed from a signal handler. */
28ef6c31
JA
565 blocked_sigs = 0;
566#ifdef SHELL
567 if (interrupt_immediately || signal_is_trapped (SIGINT) || signal_is_trapped (SIGCHLD))
568#endif
569 {
570 block_signals (&set, &oset);
571 blocked_sigs = 1;
572 }
726f6388 573
7117c2d2 574 siz = binsize (nu); /* size of desired block for nextf[nu] */
cce855bc
JA
575
576 if (siz < 0)
bb70624e 577 goto morecore_done; /* oops */
cce855bc
JA
578
579#ifdef MALLOC_STATS
580 _mstats.nmorecore[nu]++;
581#endif
582
583 /* Try to split a larger block here, if we're within the range of sizes
584 to split. */
bb70624e 585 if (nu >= SPLIT_MIN)
cce855bc
JA
586 {
587 bsplit (nu);
588 if (nextf[nu] != 0)
589 goto morecore_done;
590 }
591
cce855bc 592 /* Try to coalesce two adjacent blocks from the free list on nextf[nu - 1],
95732b49 593 if we can, and we're within the range of the block coalescing limits. */
cce855bc
JA
594 if (nu >= COMBINE_MIN && nu < COMBINE_MAX && busy[nu - 1] == 0 && nextf[nu - 1])
595 {
596 bcoalesce (nu);
597 if (nextf[nu] != 0)
28ef6c31 598 goto morecore_done;
cce855bc 599 }
cce855bc
JA
600
601 /* Take at least a page, and figure out how many blocks of the requested
602 size we're getting. */
603 if (siz <= pagesz)
726f6388 604 {
cce855bc
JA
605 sbrk_amt = pagesz;
606 nblks = sbrk_amt / siz;
726f6388 607 }
cce855bc
JA
608 else
609 {
610 /* We always want to request an integral multiple of the page size
611 from the kernel, so let's compute whether or not `siz' is such
612 an amount. If it is, we can just request it. If not, we want
613 the smallest integral multiple of pagesize that is larger than
614 `siz' and will satisfy the request. */
7117c2d2 615 sbrk_amt = siz & (pagesz - 1);
cce855bc
JA
616 if (sbrk_amt == 0)
617 sbrk_amt = siz;
618 else
619 sbrk_amt = siz + pagesz - sbrk_amt;
620 nblks = 1;
621 }
622
623#ifdef MALLOC_STATS
624 _mstats.nsbrk++;
625 _mstats.tsbrk += sbrk_amt;
626#endif
627
628 mp = (union mhead *) sbrk (sbrk_amt);
726f6388 629
cce855bc
JA
630 /* Totally out of memory. */
631 if ((long)mp == -1)
bb70624e 632 goto morecore_done;
cce855bc 633
7117c2d2
JA
634 memtop += sbrk_amt;
635
cce855bc 636 /* shouldn't happen, but just in case -- require 8-byte alignment */
7117c2d2 637 if ((long)mp & MALIGN_MASK)
cce855bc 638 {
7117c2d2 639 mp = (union mhead *) (((long)mp + MALIGN_MASK) & ~MALIGN_MASK);
726f6388
JA
640 nblks--;
641 }
642
cce855bc
JA
643 /* save new header and link the nblks blocks together */
644 nextf[nu] = mp;
726f6388
JA
645 while (1)
646 {
cce855bc
JA
647 mp->mh_alloc = ISFREE;
648 mp->mh_index = nu;
726f6388 649 if (--nblks <= 0) break;
cce855bc
JA
650 CHAIN (mp) = (union mhead *)((char *)mp + siz);
651 mp = (union mhead *)((char *)mp + siz);
726f6388 652 }
cce855bc 653 CHAIN (mp) = 0;
726f6388 654
cce855bc 655morecore_done:
28ef6c31
JA
656 if (blocked_sigs)
657 unblock_signals (&set, &oset);
726f6388
JA
658}
659
cce855bc
JA
660static void
661malloc_debug_dummy ()
662{
bb70624e 663 write (1, "malloc_debug_dummy\n", 19);
cce855bc
JA
664}
665
7117c2d2
JA
666#define PREPOP_BIN 2
667#define PREPOP_SIZE 32
668
669static int
670pagealign ()
671{
672 register int nunits;
673 register union mhead *mp;
674 long sbrk_needed;
675 char *curbrk;
676
677 pagesz = getpagesize ();
678 if (pagesz < 1024)
679 pagesz = 1024;
680
681 /* OK, how much do we need to allocate to make things page-aligned?
682 Some of this partial page will be wasted space, but we'll use as
683 much as we can. Once we figure out how much to advance the break
684 pointer, go ahead and do it. */
685 memtop = curbrk = sbrk (0);
686 sbrk_needed = pagesz - ((long)curbrk & (pagesz - 1)); /* sbrk(0) % pagesz */
687 if (sbrk_needed < 0)
688 sbrk_needed += pagesz;
689
690 /* Now allocate the wasted space. */
691 if (sbrk_needed)
692 {
693#ifdef MALLOC_STATS
694 _mstats.nsbrk++;
695 _mstats.tsbrk += sbrk_needed;
696#endif
697 curbrk = sbrk (sbrk_needed);
698 if ((long)curbrk == -1)
699 return -1;
700 memtop += sbrk_needed;
701
702 /* Take the memory which would otherwise be wasted and populate the most
703 popular bin (2 == 32 bytes) with it. Add whatever we need to curbrk
704 to make things 32-byte aligned, compute how many 32-byte chunks we're
705 going to get, and set up the bin. */
706 curbrk += sbrk_needed & (PREPOP_SIZE - 1);
707 sbrk_needed -= sbrk_needed & (PREPOP_SIZE - 1);
708 nunits = sbrk_needed / PREPOP_SIZE;
709
710 if (nunits > 0)
711 {
712 mp = (union mhead *)curbrk;
713
714 nextf[PREPOP_BIN] = mp;
715 while (1)
716 {
717 mp->mh_alloc = ISFREE;
718 mp->mh_index = PREPOP_BIN;
719 if (--nunits <= 0) break;
720 CHAIN(mp) = (union mhead *)((char *)mp + PREPOP_SIZE);
721 mp = (union mhead *)((char *)mp + PREPOP_SIZE);
722 }
723 CHAIN(mp) = 0;
724 }
725 }
726
727 /* compute which bin corresponds to the page size. */
728 for (nunits = 7; nunits < NBUCKETS; nunits++)
729 if (pagesz <= binsize(nunits))
730 break;
731 pagebucket = nunits;
732
733 return 0;
734}
735
f73dda09
JA
736static PTR_T
737internal_malloc (n, file, line, flags) /* get a block */
cce855bc 738 size_t n;
f73dda09
JA
739 const char *file;
740 int line, flags;
726f6388 741{
cce855bc 742 register union mhead *p;
cce855bc 743 register int nunits;
7117c2d2
JA
744 register char *m, *z;
745 long nbytes;
746 mguard_t mg;
726f6388 747
7117c2d2 748 /* Get the system page size and align break pointer so future sbrks will
cce855bc
JA
749 be page-aligned. The page size must be at least 1K -- anything
750 smaller is increased. */
751 if (pagesz == 0)
7117c2d2
JA
752 if (pagealign () < 0)
753 return ((PTR_T)NULL);
cce855bc 754
726f6388 755 /* Figure out how many bytes are required, rounding up to the nearest
f73dda09 756 multiple of 8, then figure out which nextf[] area to use. Try to
cce855bc
JA
757 be smart about where to start searching -- if the number of bytes
758 needed is greater than the page size, we can start at pagebucket. */
f73dda09 759 nbytes = ALLOCATED_BYTES(n);
7117c2d2
JA
760 nunits = (nbytes <= (pagesz >> 1)) ? STARTBUCK : pagebucket;
761 for ( ; nunits < NBUCKETS; nunits++)
762 if (nbytes <= binsize(nunits))
763 break;
726f6388 764
f73dda09
JA
765 /* Silently reject too-large requests. */
766 if (nunits >= NBUCKETS)
767 return ((PTR_T) NULL);
768
726f6388
JA
769 /* In case this is reentrant use of malloc from signal handler,
770 pick a block size that no other malloc level is currently
771 trying to allocate. That's the easiest harmless way not to
772 interfere with the other level of execution. */
cce855bc
JA
773#ifdef MALLOC_STATS
774 if (busy[nunits]) _mstats.nrecurse++;
775#endif
726f6388
JA
776 while (busy[nunits]) nunits++;
777 busy[nunits] = 1;
778
bb70624e
JA
779 if (nunits > maxbuck)
780 maxbuck = nunits;
781
726f6388 782 /* If there are no blocks of the appropriate size, go get some */
726f6388
JA
783 if (nextf[nunits] == 0)
784 morecore (nunits);
785
786 /* Get one block off the list, and set the new list head */
cce855bc 787 if ((p = nextf[nunits]) == NULL)
726f6388
JA
788 {
789 busy[nunits] = 0;
cce855bc 790 return NULL;
726f6388
JA
791 }
792 nextf[nunits] = CHAIN (p);
793 busy[nunits] = 0;
794
795 /* Check for free block clobbered */
cce855bc
JA
796 /* If not for this check, we would gobble a clobbered free chain ptr
797 and bomb out on the NEXT allocate of this size block */
798 if (p->mh_alloc != ISFREE || p->mh_index != nunits)
b80f6443 799 xbotch ((PTR_T)(p+1), 0, _("malloc: block on free list clobbered"), file, line);
726f6388 800
f73dda09 801 /* Fill in the info, and set up the magic numbers for range checking. */
cce855bc 802 p->mh_alloc = ISALLOC;
cce855bc 803 p->mh_magic2 = MAGIC2;
f73dda09 804 p->mh_nbytes = n;
726f6388 805
7117c2d2
JA
806 /* End guard */
807 mg.i = n;
808 z = mg.s;
809 m = (char *) (p + 1) + n;
810 *m++ = *z++, *m++ = *z++, *m++ = *z++, *m++ = *z++;
cce855bc 811
ccc6cda3 812#ifdef MEMSCRAMBLE
7117c2d2
JA
813 if (n)
814 MALLOC_MEMSET ((char *)(p + 1), 0xdf, n); /* scramble previous contents */
ccc6cda3 815#endif
cce855bc
JA
816#ifdef MALLOC_STATS
817 _mstats.nmalloc[nunits]++;
818 _mstats.tmalloc[nunits]++;
819 _mstats.nmal++;
7117c2d2 820 _mstats.bytesreq += n;
cce855bc 821#endif /* MALLOC_STATS */
f73dda09
JA
822
823#ifdef MALLOC_TRACE
824 if (malloc_trace && (flags & MALLOC_NOTRACE) == 0)
825 mtrace_alloc ("malloc", p + 1, n, file, line);
7117c2d2
JA
826 else if (_malloc_trace_buckets[nunits])
827 mtrace_alloc ("malloc", p + 1, n, file, line);
f73dda09
JA
828#endif
829
830#ifdef MALLOC_REGISTER
831 if (malloc_register && (flags & MALLOC_NOREG) == 0)
832 mregister_alloc ("malloc", p + 1, n, file, line);
833#endif
834
7117c2d2
JA
835#ifdef MALLOC_WATCH
836 if (_malloc_nwatch > 0)
837 _malloc_ckwatch (p + 1, file, line, W_ALLOC, n);
838#endif
839
840 return (PTR_T) (p + 1);
726f6388
JA
841}
842
f73dda09
JA
843static void
844internal_free (mem, file, line, flags)
bb70624e 845 PTR_T mem;
f73dda09
JA
846 const char *file;
847 int line, flags;
726f6388 848{
cce855bc 849 register union mhead *p;
7117c2d2 850 register char *ap, *z;
cce855bc 851 register int nunits;
f73dda09
JA
852 register unsigned int nbytes;
853 int ubytes; /* caller-requested size */
7117c2d2 854 mguard_t mg;
cce855bc 855
bb70624e 856 if ((ap = (char *)mem) == 0)
cce855bc 857 return;
726f6388 858
cce855bc 859 p = (union mhead *) ap - 1;
726f6388 860
cce855bc
JA
861 if (p->mh_alloc == ISMEMALIGN)
862 {
863 ap -= p->mh_nbytes;
864 p = (union mhead *) ap - 1;
865 }
866
f73dda09
JA
867#if defined (MALLOC_TRACE) || defined (MALLOC_REGISTER)
868 if (malloc_trace || malloc_register)
869 ubytes = p->mh_nbytes;
870#endif
871
cce855bc
JA
872 if (p->mh_alloc != ISALLOC)
873 {
874 if (p->mh_alloc == ISFREE)
f73dda09 875 xbotch (mem, ERR_DUPFREE,
b80f6443 876 _("free: called with already freed block argument"), file, line);
cce855bc 877 else
f73dda09 878 xbotch (mem, ERR_UNALLOC,
b80f6443 879 _("free: called with unallocated block argument"), file, line);
cce855bc
JA
880 }
881
882 ASSERT (p->mh_magic2 == MAGIC2);
f73dda09
JA
883
884 nunits = p->mh_index;
885 nbytes = ALLOCATED_BYTES(p->mh_nbytes);
886 /* Since the sizeof(u_bits32_t) bytes before the memory handed to the user
887 are now used for the number of bytes allocated, a simple check of
888 mh_magic2 is no longer sufficient to catch things like p[-1] = 'x'.
889 We sanity-check the value of mh_nbytes against the size of the blocks
890 in the appropriate bucket before we use it. This can still cause problems
891 and obscure errors if mh_nbytes is wrong but still within range; the
7117c2d2
JA
892 checks against the size recorded at the end of the chunk will probably
893 fail then. Using MALLOC_REGISTER will help here, since it saves the
894 original number of bytes requested. */
895
f73dda09
JA
896 if (IN_BUCKET(nbytes, nunits) == 0)
897 xbotch (mem, ERR_UNDERFLOW,
b80f6443 898 _("free: underflow detected; mh_nbytes out of range"), file, line);
f73dda09 899
cce855bc 900 ap += p->mh_nbytes;
7117c2d2
JA
901 z = mg.s;
902 *z++ = *ap++, *z++ = *ap++, *z++ = *ap++, *z++ = *ap++;
903 if (mg.i != p->mh_nbytes)
b80f6443 904 xbotch (mem, ERR_ASSERT_FAILED, _("free: start and end chunk sizes differ"), file, line);
7117c2d2
JA
905
906#if 1
907 if (nunits >= LESSCORE_MIN && ((char *)p + binsize(nunits) == memtop))
908#else
909 if (((char *)p + binsize(nunits) == memtop) && nunits >= LESSCORE_MIN)
910#endif
911 {
912 /* If above LESSCORE_FRC, give back unconditionally. This should be set
913 high enough to be infrequently encountered. If between LESSCORE_MIN
95732b49
JA
914 and LESSCORE_FRC, call lesscore if the bucket is marked as busy or if
915 there's already a block on the free list. */
7117c2d2
JA
916 if ((nunits >= LESSCORE_FRC) || busy[nunits] || nextf[nunits] != 0)
917 {
918 lesscore (nunits);
919 /* keeps the tracing and registering code in one place */
920 goto free_return;
921 }
922 }
726f6388 923
ccc6cda3 924#ifdef MEMSCRAMBLE
7117c2d2
JA
925 if (p->mh_nbytes)
926 MALLOC_MEMSET (mem, 0xcf, p->mh_nbytes);
ccc6cda3 927#endif
cce855bc 928
cce855bc 929 ASSERT (nunits < NBUCKETS);
cce855bc 930
bb70624e 931 if (busy[nunits] == 1)
95732b49
JA
932 {
933 xsplit (p, nunits); /* split block and add to different chain */
934 goto free_return;
935 }
bb70624e 936
95732b49 937 p->mh_alloc = ISFREE;
cce855bc
JA
938 /* Protect against signal handlers calling malloc. */
939 busy[nunits] = 1;
940 /* Put this block on the free list. */
941 CHAIN (p) = nextf[nunits];
942 nextf[nunits] = p;
943 busy[nunits] = 0;
944
7117c2d2 945free_return:
b80f6443 946 ; /* Empty statement in case this is the end of the function */
7117c2d2 947
cce855bc
JA
948#ifdef MALLOC_STATS
949 _mstats.nmalloc[nunits]--;
950 _mstats.nfre++;
951#endif /* MALLOC_STATS */
f73dda09
JA
952
953#ifdef MALLOC_TRACE
954 if (malloc_trace && (flags & MALLOC_NOTRACE) == 0)
955 mtrace_free (mem, ubytes, file, line);
7117c2d2
JA
956 else if (_malloc_trace_buckets[nunits])
957 mtrace_free (mem, ubytes, file, line);
f73dda09
JA
958#endif
959
960#ifdef MALLOC_REGISTER
961 if (malloc_register && (flags & MALLOC_NOREG) == 0)
962 mregister_free (mem, ubytes, file, line);
963#endif
7117c2d2
JA
964
965#ifdef MALLOC_WATCH
966 if (_malloc_nwatch > 0)
967 _malloc_ckwatch (mem, file, line, W_FREE, ubytes);
968#endif
726f6388
JA
969}
970
f73dda09
JA
971static PTR_T
972internal_realloc (mem, n, file, line, flags)
bb70624e 973 PTR_T mem;
cce855bc 974 register size_t n;
f73dda09
JA
975 const char *file;
976 int line, flags;
726f6388 977{
cce855bc 978 register union mhead *p;
bb70624e 979 register u_bits32_t tocopy;
726f6388
JA
980 register unsigned int nbytes;
981 register int nunits;
7117c2d2
JA
982 register char *m, *z;
983 mguard_t mg;
cce855bc
JA
984
985#ifdef MALLOC_STATS
986 _mstats.nrealloc++;
987#endif
726f6388 988
cce855bc
JA
989 if (n == 0)
990 {
f73dda09 991 internal_free (mem, file, line, MALLOC_INTERNAL);
cce855bc
JA
992 return (NULL);
993 }
994 if ((p = (union mhead *) mem) == 0)
f73dda09
JA
995 return internal_malloc (n, file, line, MALLOC_INTERNAL);
996
726f6388 997 p--;
cce855bc 998 nunits = p->mh_index;
f73dda09
JA
999 ASSERT (nunits < NBUCKETS);
1000
1001 if (p->mh_alloc != ISALLOC)
1002 xbotch (mem, ERR_UNALLOC,
b80f6443 1003 _("realloc: called with unallocated block argument"), file, line);
f73dda09 1004
cce855bc 1005 ASSERT (p->mh_magic2 == MAGIC2);
f73dda09
JA
1006 nbytes = ALLOCATED_BYTES(p->mh_nbytes);
1007 /* Since the sizeof(u_bits32_t) bytes before the memory handed to the user
1008 are now used for the number of bytes allocated, a simple check of
1009 mh_magic2 is no longer sufficient to catch things like p[-1] = 'x'.
1010 We sanity-check the value of mh_nbytes against the size of the blocks
1011 in the appropriate bucket before we use it. This can still cause problems
1012 and obscure errors if mh_nbytes is wrong but still within range; the
7117c2d2
JA
1013 checks against the size recorded at the end of the chunk will probably
1014 fail then. Using MALLOC_REGISTER will help here, since it saves the
1015 original number of bytes requested. */
f73dda09
JA
1016 if (IN_BUCKET(nbytes, nunits) == 0)
1017 xbotch (mem, ERR_UNDERFLOW,
b80f6443 1018 _("realloc: underflow detected; mh_nbytes out of range"), file, line);
cce855bc 1019
bb70624e 1020 m = (char *)mem + (tocopy = p->mh_nbytes);
7117c2d2
JA
1021 z = mg.s;
1022 *z++ = *m++, *z++ = *m++, *z++ = *m++, *z++ = *m++;
1023 if (mg.i != p->mh_nbytes)
b80f6443 1024 xbotch (mem, ERR_ASSERT_FAILED, _("realloc: start and end chunk sizes differ"), file, line);
7117c2d2
JA
1025
1026#ifdef MALLOC_WATCH
1027 if (_malloc_nwatch > 0)
1028 _malloc_ckwatch (p + 1, file, line, W_REALLOC, n);
1029#endif
1030#ifdef MALLOC_STATS
1031 _mstats.bytesreq += (n < tocopy) ? 0 : n - tocopy;
1032#endif
726f6388
JA
1033
1034 /* See if desired size rounds to same power of 2 as actual size. */
f73dda09 1035 nbytes = ALLOCATED_BYTES(n);
726f6388
JA
1036
1037 /* If ok, use the same block, just marking its size as changed. */
7117c2d2 1038 if (RIGHT_BUCKET(nbytes, nunits))
726f6388 1039 {
7117c2d2
JA
1040#if 0
1041 m = (char *)mem + p->mh_nbytes;
1042#else
1043 /* Compensate for increment above. */
1044 m -= 4;
1045#endif
726f6388 1046 *m++ = 0; *m++ = 0; *m++ = 0; *m++ = 0;
7117c2d2
JA
1047 m = (char *)mem + (p->mh_nbytes = n);
1048
1049 mg.i = n;
1050 z = mg.s;
1051 *m++ = *z++, *m++ = *z++, *m++ = *z++, *m++ = *z++;
1052
726f6388
JA
1053 return mem;
1054 }
1055
7117c2d2
JA
1056 if (n < tocopy)
1057 tocopy = n;
1058
cce855bc
JA
1059#ifdef MALLOC_STATS
1060 _mstats.nrcopy++;
1061#endif
1062
f73dda09 1063 if ((m = internal_malloc (n, file, line, MALLOC_INTERNAL|MALLOC_NOTRACE|MALLOC_NOREG)) == 0)
cce855bc
JA
1064 return 0;
1065 FASTCOPY (mem, m, tocopy);
f73dda09
JA
1066 internal_free (mem, file, line, MALLOC_INTERNAL);
1067
1068#ifdef MALLOC_TRACE
1069 if (malloc_trace && (flags & MALLOC_NOTRACE) == 0)
1070 mtrace_alloc ("realloc", m, n, file, line);
7117c2d2
JA
1071 else if (_malloc_trace_buckets[nunits])
1072 mtrace_alloc ("realloc", m, n, file, line);
f73dda09
JA
1073#endif
1074
1075#ifdef MALLOC_REGISTER
1076 if (malloc_register && (flags & MALLOC_NOREG) == 0)
1077 mregister_alloc ("realloc", m, n, file, line);
1078#endif
1079
7117c2d2
JA
1080#ifdef MALLOC_WATCH
1081 if (_malloc_nwatch > 0)
1082 _malloc_ckwatch (m, file, line, W_RESIZED, n);
1083#endif
1084
cce855bc 1085 return m;
726f6388
JA
1086}
1087
f73dda09
JA
1088static PTR_T
1089internal_memalign (alignment, size, file, line, flags)
95732b49 1090 size_t alignment;
cce855bc 1091 size_t size;
f73dda09
JA
1092 const char *file;
1093 int line, flags;
726f6388 1094{
ccc6cda3 1095 register char *ptr;
726f6388 1096 register char *aligned;
cce855bc 1097 register union mhead *p;
726f6388 1098
f73dda09 1099 ptr = internal_malloc (size + alignment, file, line, MALLOC_INTERNAL);
ccc6cda3 1100
726f6388
JA
1101 if (ptr == 0)
1102 return 0;
1103 /* If entire block has the desired alignment, just accept it. */
f73dda09 1104 if (((long) ptr & (alignment - 1)) == 0)
726f6388
JA
1105 return ptr;
1106 /* Otherwise, get address of byte in the block that has that alignment. */
f73dda09
JA
1107#if 0
1108 aligned = (char *) (((long) ptr + alignment - 1) & -alignment);
1109#else
1110 aligned = (char *) (((long) ptr + alignment - 1) & (~alignment + 1));
1111#endif
726f6388
JA
1112
1113 /* Store a suitable indication of how to free the block,
1114 so that free can find the true beginning of it. */
cce855bc
JA
1115 p = (union mhead *) aligned - 1;
1116 p->mh_nbytes = aligned - ptr;
1117 p->mh_alloc = ISMEMALIGN;
f73dda09 1118
726f6388
JA
1119 return aligned;
1120}
1121
f73dda09 1122#if !defined (NO_VALLOC)
726f6388
JA
1123/* This runs into trouble with getpagesize on HPUX, and Multimax machines.
1124 Patching out seems cleaner than the ugly fix needed. */
f73dda09
JA
1125static PTR_T
1126internal_valloc (size, file, line, flags)
ccc6cda3 1127 size_t size;
f73dda09
JA
1128 const char *file;
1129 int line, flags;
726f6388 1130{
f73dda09 1131 return internal_memalign (getpagesize (), size, file, line, flags|MALLOC_INTERNAL);
726f6388 1132}
f73dda09 1133#endif /* !NO_VALLOC */
ccc6cda3
JA
1134
1135#ifndef NO_CALLOC
f73dda09
JA
1136static PTR_T
1137internal_calloc (n, s, file, line, flags)
ccc6cda3 1138 size_t n, s;
f73dda09
JA
1139 const char *file;
1140 int line, flags;
ccc6cda3
JA
1141{
1142 size_t total;
f73dda09 1143 PTR_T result;
ccc6cda3
JA
1144
1145 total = n * s;
f73dda09 1146 result = internal_malloc (total, file, line, flags|MALLOC_INTERNAL);
ccc6cda3 1147 if (result)
7117c2d2 1148 memset (result, 0, total);
ccc6cda3
JA
1149 return result;
1150}
1151
f73dda09
JA
1152static void
1153internal_cfree (p, file, line, flags)
bb70624e 1154 PTR_T p;
f73dda09
JA
1155 const char *file;
1156 int line, flags;
ccc6cda3 1157{
f73dda09 1158 internal_free (p, file, line, flags|MALLOC_INTERNAL);
ccc6cda3
JA
1159}
1160#endif /* !NO_CALLOC */
1161
cce855bc 1162#ifdef MALLOC_STATS
f73dda09
JA
1163int
1164malloc_free_blocks (size)
726f6388
JA
1165 int size;
1166{
f73dda09 1167 int nfree;
cce855bc 1168 register union mhead *p;
726f6388 1169
f73dda09
JA
1170 nfree = 0;
1171 for (p = nextf[size]; p; p = CHAIN (p))
1172 nfree++;
726f6388 1173
f73dda09
JA
1174 return nfree;
1175}
1176#endif
726f6388 1177
7117c2d2 1178#if defined (MALLOC_WRAPFUNCS)
f73dda09
JA
1179PTR_T
1180sh_malloc (bytes, file, line)
1181 size_t bytes;
1182 const char *file;
1183 int line;
1184{
1185 return internal_malloc (bytes, file, line, MALLOC_WRAPPER);
1186}
726f6388 1187
f73dda09
JA
1188PTR_T
1189sh_realloc (ptr, size, file, line)
1190 PTR_T ptr;
1191 size_t size;
1192 const char *file;
1193 int line;
1194{
1195 return internal_realloc (ptr, size, file, line, MALLOC_WRAPPER);
1196}
726f6388 1197
f73dda09
JA
1198void
1199sh_free (mem, file, line)
1200 PTR_T mem;
1201 const char *file;
1202 int line;
1203{
1204 internal_free (mem, file, line, MALLOC_WRAPPER);
726f6388 1205}
ccc6cda3 1206
f73dda09
JA
1207PTR_T
1208sh_memalign (alignment, size, file, line)
95732b49 1209 size_t alignment;
f73dda09
JA
1210 size_t size;
1211 const char *file;
1212 int line;
cce855bc 1213{
f73dda09
JA
1214 return internal_memalign (alignment, size, file, line, MALLOC_WRAPPER);
1215}
726f6388 1216
f73dda09
JA
1217#ifndef NO_CALLOC
1218PTR_T
1219sh_calloc (n, s, file, line)
1220 size_t n, s;
1221 const char *file;
1222 int line;
1223{
1224 return internal_calloc (n, s, file, line, MALLOC_WRAPPER);
726f6388
JA
1225}
1226
f73dda09
JA
1227void
1228sh_cfree (mem, file, line)
1229 PTR_T mem;
1230 const char *file;
1231 int line;
726f6388 1232{
f73dda09
JA
1233 internal_cfree (mem, file, line, MALLOC_WRAPPER);
1234}
1235#endif
726f6388 1236
f73dda09
JA
1237#ifndef NO_VALLOC
1238PTR_T
1239sh_valloc (size, file, line)
1240 size_t size;
1241 const char *file;
1242 int line;
1243{
1244 return internal_valloc (size, file, line, MALLOC_WRAPPER);
1245}
7117c2d2 1246#endif /* !NO_VALLOC */
f73dda09 1247
7117c2d2 1248#endif /* MALLOC_WRAPFUNCS */
f73dda09
JA
1249
1250/* Externally-available functions that call their internal counterparts. */
1251
1252PTR_T
1253malloc (size)
1254 size_t size;
1255{
1256 return internal_malloc (size, (char *)NULL, 0, 0);
1257}
1258
1259PTR_T
1260realloc (mem, nbytes)
1261 PTR_T mem;
1262 size_t nbytes;
1263{
1264 return internal_realloc (mem, nbytes, (char *)NULL, 0, 0);
bb70624e
JA
1265}
1266
1267void
f73dda09
JA
1268free (mem)
1269 PTR_T mem;
bb70624e 1270{
f73dda09 1271 internal_free (mem, (char *)NULL, 0, 0);
bb70624e
JA
1272}
1273
f73dda09
JA
1274PTR_T
1275memalign (alignment, size)
95732b49 1276 size_t alignment;
f73dda09
JA
1277 size_t size;
1278{
1279 return internal_memalign (alignment, size, (char *)NULL, 0, 0);
1280}
1281
1282#ifndef NO_VALLOC
1283PTR_T
1284valloc (size)
1285 size_t size;
1286{
1287 return internal_valloc (size, (char *)NULL, 0, 0);
1288}
1289#endif
1290
1291#ifndef NO_CALLOC
1292PTR_T
1293calloc (n, s)
1294 size_t n, s;
1295{
1296 return internal_calloc (n, s, (char *)NULL, 0, 0);
1297}
bb70624e
JA
1298
1299void
f73dda09
JA
1300cfree (mem)
1301 PTR_T mem;
bb70624e 1302{
f73dda09 1303 internal_cfree (mem, (char *)NULL, 0, 0);
726f6388 1304}
f73dda09 1305#endif