]>
Commit | Line | Data |
---|---|---|
cce855bc | 1 | /* malloc.c - dynamic memory allocation for bash. */ |
726f6388 | 2 | |
74091dd4 | 3 | /* Copyright (C) 1985-2021 Free Software Foundation, Inc. |
726f6388 | 4 | |
3185942a JA |
5 | This file is part of GNU Bash, the Bourne-Again SHell. |
6 | ||
7 | Bash is free software: you can redistribute it and/or modify | |
8 | it under the terms of the GNU General Public License as published by | |
9 | the Free Software Foundation, either version 3 of the License, or | |
10 | (at your option) any later version. | |
11 | ||
12 | Bash is distributed in the hope that it will be useful, | |
13 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
15 | GNU General Public License for more details. | |
16 | ||
17 | You should have received a copy of the GNU General Public License | |
18 | along with Bash. If not, see <http://www.gnu.org/licenses/>. | |
19 | */ | |
726f6388 JA |
20 | |
21 | /* | |
22 | * @(#)nmalloc.c 1 (Caltech) 2/21/82 | |
23 | * | |
24 | * U of M Modified: 20 Jun 1983 ACT: strange hacks for Emacs | |
25 | * | |
26 | * Nov 1983, Mike@BRL, Added support for 4.1C/4.2 BSD. | |
27 | * | |
8868edaf CR |
28 | * [VERY] old explanation: |
29 | * | |
726f6388 JA |
30 | * This is a very fast storage allocator. It allocates blocks of a small |
31 | * number of different sizes, and keeps free lists of each size. Blocks | |
32 | * that don't exactly fit are passed up to the next larger size. In this | |
33 | * implementation, the available sizes are (2^n)-4 (or -16) bytes long. | |
34 | * This is designed for use in a program that uses vast quantities of | |
35 | * memory, but bombs when it runs out. To make it a little better, it | |
36 | * warns the user when he starts to get near the end. | |
37 | * | |
38 | * June 84, ACT: modified rcheck code to check the range given to malloc, | |
39 | * rather than the range determined by the 2-power used. | |
40 | * | |
41 | * Jan 85, RMS: calls malloc_warning to issue warning on nearly full. | |
42 | * No longer Emacs-specific; can serve as all-purpose malloc for GNU. | |
43 | * You should call malloc_init to reinitialize after loading dumped Emacs. | |
cce855bc | 44 | * Call malloc_stats to get info on memory stats if MALLOC_STATS turned on. |
726f6388 JA |
45 | * realloc knows how to return same block given, just changing its size, |
46 | * if the power of 2 is correct. | |
47 | */ | |
48 | ||
49 | /* | |
74091dd4 CR |
50 | * nextf[i] is the pointer to the next free block of size 2^(i+5). The |
51 | * smallest allocatable block is 32 bytes. The overhead information will | |
52 | * go in the first 16 bytes of the block, and the returned pointer will point | |
53 | * to the rest. | |
726f6388 JA |
54 | */ |
55 | ||
7117c2d2 JA |
56 | /* Define MEMSCRAMBLE to have free() write 0xcf into memory as it's freed, to |
57 | uncover callers that refer to freed memory, and to have malloc() write 0xdf | |
58 | into memory as it's allocated to avoid referring to previous contents. */ | |
59 | ||
60 | /* SCO 3.2v4 getcwd and possibly other libc routines fail with MEMSCRAMBLE; | |
61 | handled by configure. */ | |
ccc6cda3 | 62 | |
cce855bc | 63 | #if defined (HAVE_CONFIG_H) |
ccc6cda3 | 64 | # include <config.h> |
cce855bc JA |
65 | #endif /* HAVE_CONFIG_H */ |
66 | ||
67 | #if defined (SHELL) | |
68 | # include "bashtypes.h" | |
f73dda09 | 69 | # include "stdc.h" |
cce855bc JA |
70 | #else |
71 | # include <sys/types.h> | |
72 | #endif | |
726f6388 | 73 | |
ccc6cda3 JA |
74 | #if defined (HAVE_UNISTD_H) |
75 | # include <unistd.h> | |
76 | #endif | |
726f6388 JA |
77 | |
78 | /* Determine which kind of system this is. */ | |
cce855bc JA |
79 | #include <signal.h> |
80 | ||
81 | #if defined (HAVE_STRING_H) | |
82 | # include <string.h> | |
d166f048 | 83 | #else |
cce855bc | 84 | # include <strings.h> |
d166f048 | 85 | #endif |
d233b485 | 86 | #include <errno.h> |
f73dda09 | 87 | #include <stdio.h> |
726f6388 | 88 | |
8868edaf CR |
89 | #if !defined (botch) |
90 | #include <stdlib.h> | |
91 | #endif | |
92 | ||
d233b485 CR |
93 | #if defined (HAVE_MMAP) |
94 | #include <sys/mman.h> | |
95 | #endif | |
96 | ||
ccc6cda3 JA |
97 | /* Define getpagesize () if the system does not. */ |
98 | #ifndef HAVE_GETPAGESIZE | |
726f6388 JA |
99 | # include "getpagesize.h" |
100 | #endif | |
101 | ||
f73dda09 JA |
102 | #include "imalloc.h" |
103 | #ifdef MALLOC_STATS | |
104 | # include "mstats.h" | |
105 | #endif | |
106 | #ifdef MALLOC_REGISTER | |
107 | # include "table.h" | |
bb70624e | 108 | #endif |
7117c2d2 JA |
109 | #ifdef MALLOC_WATCH |
110 | # include "watch.h" | |
111 | #endif | |
bb70624e | 112 | |
d233b485 CR |
113 | #ifdef powerof2 |
114 | # undef powerof2 | |
115 | #endif | |
116 | /* Could also use (((x) & -(x)) == (x)) */ | |
117 | #define powerof2(x) ((((x) - 1) & (x)) == 0) | |
118 | ||
f73dda09 JA |
119 | /* System-specific omissions. */ |
120 | #ifdef HPUX | |
121 | # define NO_VALLOC | |
ccc6cda3 JA |
122 | #endif |
123 | ||
74091dd4 CR |
124 | #define MALLOC_PAGESIZE_MIN 4096 |
125 | #define MALLOC_INCR_PAGES 8192 | |
726f6388 JA |
126 | |
127 | #define ISALLOC ((char) 0xf7) /* magic byte that implies allocation */ | |
128 | #define ISFREE ((char) 0x54) /* magic byte that implies free block */ | |
129 | /* this is for error checking only */ | |
130 | #define ISMEMALIGN ((char) 0xd6) /* Stored before the value returned by | |
131 | memalign, with the rest of the word | |
132 | being the distance to the true | |
133 | beginning of the block. */ | |
726f6388 | 134 | |
cce855bc JA |
135 | |
136 | /* We have a flag indicating whether memory is allocated, an index in | |
137 | nextf[], a size field, and a sentinel value to determine whether or | |
138 | not a caller wrote before the start of allocated memory; to realloc() | |
139 | memory we either copy mh_nbytes or just change mh_nbytes if there is | |
140 | enough room in the block for the new size. Range checking is always | |
141 | done. */ | |
142 | union mhead { | |
8868edaf | 143 | bits64_t mh_align[2]; /* 16 */ |
cce855bc | 144 | struct { |
f73dda09 JA |
145 | char mi_alloc; /* ISALLOC or ISFREE */ /* 1 */ |
146 | char mi_index; /* index in nextf[] */ /* 1 */ | |
cce855bc | 147 | /* Remainder are valid only when block is allocated */ |
f73dda09 JA |
148 | u_bits16_t mi_magic2; /* should be == MAGIC2 */ /* 2 */ |
149 | u_bits32_t mi_nbytes; /* # of bytes allocated */ /* 4 */ | |
8868edaf | 150 | char mi_magic8[8]; /* MAGIC1 guard bytes */ /* 8 */ |
cce855bc | 151 | } minfo; |
726f6388 | 152 | }; |
cce855bc JA |
153 | #define mh_alloc minfo.mi_alloc |
154 | #define mh_index minfo.mi_index | |
155 | #define mh_nbytes minfo.mi_nbytes | |
156 | #define mh_magic2 minfo.mi_magic2 | |
8868edaf | 157 | #define mh_magic8 minfo.mi_magic8 |
726f6388 | 158 | |
74091dd4 CR |
159 | #define MAGIC8_NUMBYTES 8 |
160 | #define MALLOC_SIZE_T u_bits32_t | |
161 | ||
7117c2d2 | 162 | #define MOVERHEAD sizeof(union mhead) |
8868edaf | 163 | |
74091dd4 | 164 | #define MALIGN_MASK 15 /* one less than desired alignment */ |
7117c2d2 | 165 | |
74091dd4 | 166 | /* Guard bytes we write at the end of the allocation, encoding the size. */ |
7117c2d2 JA |
167 | typedef union _malloc_guard { |
168 | char s[4]; | |
169 | u_bits32_t i; | |
170 | } mguard_t; | |
171 | ||
726f6388 | 172 | /* Access free-list pointer of a block. |
cce855bc | 173 | It is stored at block + sizeof (char *). |
b72432fd JA |
174 | This is not a field in the minfo structure member of union mhead |
175 | because we want sizeof (union mhead) | |
cce855bc JA |
176 | to describe the overhead for when the block is in use, |
177 | and we do not want the free-list pointer to count in that. */ | |
74091dd4 CR |
178 | /* If we have mmap, this is not used for chunks larger than mmap_threshold, |
179 | since we munmap immediately on free(). */ | |
726f6388 | 180 | |
8868edaf CR |
181 | /* If SIZEOF_CHAR_P == 8, this goes into the mh_magic8 buffer at the end of |
182 | the rest of the struct. This may need adjusting. */ | |
726f6388 | 183 | #define CHAIN(a) \ |
cce855bc | 184 | (*(union mhead **) (sizeof (char *) + (char *) (a))) |
726f6388 | 185 | |
cce855bc JA |
186 | /* To implement range checking, we write magic values in at the beginning |
187 | and end of each allocated block, and make sure they are undisturbed | |
188 | whenever a free or a realloc occurs. */ | |
189 | ||
8868edaf CR |
190 | /* Written in the bytes before the block's real space (-SIZEOF_CHAR_P bytes) */ |
191 | #define MAGIC1 0x55 | |
cce855bc | 192 | #define MAGIC2 0x5555 |
74091dd4 CR |
193 | |
194 | #define MSLOP 4 /* 4 bytes extra for u_bits32_t end guard size */ | |
cce855bc | 195 | |
f73dda09 | 196 | /* How many bytes are actually allocated for a request of size N -- |
74091dd4 | 197 | rounded up to nearest multiple of 16 (alignment) after accounting for |
8868edaf | 198 | malloc overhead. */ |
7117c2d2 JA |
199 | #define ALLOCATED_BYTES(n) \ |
200 | (((n) + MOVERHEAD + MSLOP + MALIGN_MASK) & ~MALIGN_MASK) | |
f73dda09 JA |
201 | |
202 | #define ASSERT(p) \ | |
203 | do \ | |
204 | { \ | |
ac50fbac | 205 | if (!(p)) xbotch((PTR_T)0, ERR_ASSERT_FAILED, CPP_STRING(p), file, line); \ |
f73dda09 JA |
206 | } \ |
207 | while (0) | |
208 | ||
cce855bc JA |
209 | /* Minimum and maximum bucket indices for block splitting (and to bound |
210 | the search for a block to split). */ | |
74091dd4 CR |
211 | #define SPLIT_MIN 1 /* 64 */ |
212 | #define SPLIT_MID 9 /* 16384 */ | |
213 | #define SPLIT_MAX 12 /* 131072 */ | |
cce855bc JA |
214 | |
215 | /* Minimum and maximum bucket indices for block coalescing. */ | |
74091dd4 CR |
216 | #define COMBINE_MIN 1 /* 64 */ |
217 | #define COMBINE_MAX (pagebucket - 1) /* 2048 for 4096-byte pages */ | |
cce855bc | 218 | |
74091dd4 CR |
219 | #define LESSCORE_MIN 8 /* 8192 */ |
220 | #define LESSCORE_FRC 11 /* 65536 */ | |
221 | ||
222 | /* Which bin do we prepopulate with the initial sbrk memory? */ | |
223 | #define PREPOP_BIN 1 | |
224 | #define PREPOP_SIZE 64 | |
7117c2d2 | 225 | |
74091dd4 | 226 | #define STARTBUCK 0 |
726f6388 | 227 | |
d233b485 CR |
228 | /* Should we use mmap for large allocations? */ |
229 | #if defined (HAVE_MMAP) | |
8868edaf CR |
230 | # if defined (MAP_ANON) && !defined (MAP_ANONYMOUS) |
231 | # define MAP_ANONYMOUS MAP_ANON | |
d233b485 CR |
232 | # endif |
233 | #endif | |
234 | ||
8868edaf | 235 | #if defined (HAVE_MMAP) && defined (MAP_ANONYMOUS) |
74091dd4 | 236 | # define USE_MMAP 1 |
d233b485 CR |
237 | #endif |
238 | ||
239 | #if defined (USE_MMAP) | |
74091dd4 | 240 | # define MMAP_THRESHOLD 12 /* must be >= SPLIT_MAX, COMBINE_MAX */ |
d233b485 CR |
241 | #else |
242 | # define MMAP_THRESHOLD (8 * SIZEOF_LONG) | |
243 | #endif | |
244 | ||
74091dd4 CR |
245 | /* We don't try to decipher the differences between the Linux-style and |
246 | BSD-style implementations of mremap here; we use the Linux one. */ | |
247 | #if USE_MMAP == 1 && defined (HAVE_MREMAP) && defined (MREMAP_MAYMOVE) | |
248 | # define USE_MREMAP 1 | |
249 | #endif | |
250 | ||
251 | /* usable bins from STARTBUCK..NBUCKETS-1 */ | |
252 | ||
253 | #define NBUCKETS 28 | |
254 | ||
f73dda09 JA |
255 | /* Flags for the internal functions. */ |
256 | #define MALLOC_WRAPPER 0x01 /* wrapper function */ | |
257 | #define MALLOC_INTERNAL 0x02 /* internal function calling another */ | |
258 | #define MALLOC_NOTRACE 0x04 /* don't trace this allocation or free */ | |
259 | #define MALLOC_NOREG 0x08 /* don't register this allocation or free */ | |
260 | ||
261 | /* Future use. */ | |
262 | #define ERR_DUPFREE 0x01 | |
263 | #define ERR_UNALLOC 0x02 | |
264 | #define ERR_UNDERFLOW 0x04 | |
265 | #define ERR_ASSERT_FAILED 0x08 | |
266 | ||
267 | /* Evaluates to true if NB is appropriate for bucket NU. NB is adjusted | |
7117c2d2 JA |
268 | appropriately by the caller to account for malloc overhead. This only |
269 | checks that the recorded size is not too big for the bucket. We | |
270 | can't check whether or not it's in between NU and NU-1 because we | |
271 | might have encountered a busy bucket when allocating and moved up to | |
272 | the next size. */ | |
273 | #define IN_BUCKET(nb, nu) ((nb) <= binsizes[(nu)]) | |
274 | ||
275 | /* Use this when we want to be sure that NB is in bucket NU. */ | |
276 | #define RIGHT_BUCKET(nb, nu) \ | |
277 | (((nb) > binsizes[(nu)-1]) && ((nb) <= binsizes[(nu)])) | |
f73dda09 | 278 | |
74091dd4 | 279 | /* nextf[i] is free list of blocks of size 2**(i + 5) */ |
726f6388 | 280 | |
cce855bc | 281 | static union mhead *nextf[NBUCKETS]; |
726f6388 | 282 | |
95732b49 | 283 | /* busy[i] is nonzero while allocation or free of block size i is in progress. */ |
726f6388 | 284 | |
cce855bc | 285 | static char busy[NBUCKETS]; |
726f6388 | 286 | |
cce855bc JA |
287 | static int pagesz; /* system page size. */ |
288 | static int pagebucket; /* bucket for requests a page in size */ | |
bb70624e | 289 | static int maxbuck; /* highest bucket receiving allocation request. */ |
726f6388 | 290 | |
7117c2d2 JA |
291 | static char *memtop; /* top of heap */ |
292 | ||
3185942a | 293 | static const unsigned long binsizes[NBUCKETS] = { |
74091dd4 | 294 | 32UL, 64UL, 128UL, 256UL, 512UL, 1024UL, 2048UL, 4096UL, |
7117c2d2 JA |
295 | 8192UL, 16384UL, 32768UL, 65536UL, 131072UL, 262144UL, 524288UL, |
296 | 1048576UL, 2097152UL, 4194304UL, 8388608UL, 16777216UL, 33554432UL, | |
297 | 67108864UL, 134217728UL, 268435456UL, 536870912UL, 1073741824UL, | |
b80f6443 | 298 | 2147483648UL, 4294967295UL |
7117c2d2 JA |
299 | }; |
300 | ||
74091dd4 | 301 | /* binsizes[x] == (1 << ((x) + 5)) */ |
7117c2d2 JA |
302 | #define binsize(x) binsizes[(x)] |
303 | ||
74091dd4 CR |
304 | #define MAXALLOC_SIZE binsizes[NBUCKETS-1] |
305 | ||
d233b485 CR |
306 | #if !defined (errno) |
307 | extern int errno; | |
308 | #endif | |
309 | ||
f73dda09 | 310 | /* Declarations for internal functions */ |
8868edaf CR |
311 | static PTR_T internal_malloc PARAMS((size_t, const char *, int, int)); |
312 | static PTR_T internal_realloc PARAMS((PTR_T, size_t, const char *, int, int)); | |
313 | static void internal_free PARAMS((PTR_T, const char *, int, int)); | |
314 | static PTR_T internal_memalign PARAMS((size_t, size_t, const char *, int, int)); | |
f73dda09 | 315 | #ifndef NO_CALLOC |
8868edaf CR |
316 | static PTR_T internal_calloc PARAMS((size_t, size_t, const char *, int, int)); |
317 | static void internal_cfree PARAMS((PTR_T, const char *, int, int)); | |
f73dda09 JA |
318 | #endif |
319 | #ifndef NO_VALLOC | |
8868edaf | 320 | static PTR_T internal_valloc PARAMS((size_t, const char *, int, int)); |
f73dda09 | 321 | #endif |
74091dd4 | 322 | static PTR_T internal_remap PARAMS((PTR_T, size_t, int, int)); |
f73dda09 JA |
323 | |
324 | #if defined (botch) | |
325 | extern void botch (); | |
326 | #else | |
8868edaf | 327 | static void botch PARAMS((const char *, const char *, int)); |
f73dda09 | 328 | #endif |
8868edaf | 329 | static void xbotch PARAMS((PTR_T, int, const char *, const char *, int)); |
f73dda09 | 330 | |
f73dda09 JA |
331 | #if !HAVE_DECL_SBRK |
332 | extern char *sbrk (); | |
333 | #endif /* !HAVE_DECL_SBRK */ | |
334 | ||
28ef6c31 | 335 | #ifdef SHELL |
8868edaf CR |
336 | extern int running_trap; |
337 | extern int signal_is_trapped PARAMS((int)); | |
28ef6c31 JA |
338 | #endif |
339 | ||
7117c2d2 JA |
340 | #ifdef MALLOC_STATS |
341 | struct _malstats _mstats; | |
342 | #endif /* MALLOC_STATS */ | |
343 | ||
f73dda09 JA |
344 | /* Debugging variables available to applications. */ |
345 | int malloc_flags = 0; /* future use */ | |
346 | int malloc_trace = 0; /* trace allocations and frees to stderr */ | |
347 | int malloc_register = 0; /* future use */ | |
348 | ||
d233b485 CR |
349 | /* Use a variable in case we want to dynamically adapt it in the future */ |
350 | int malloc_mmap_threshold = MMAP_THRESHOLD; | |
351 | ||
7117c2d2 JA |
352 | #ifdef MALLOC_TRACE |
353 | char _malloc_trace_buckets[NBUCKETS]; | |
354 | ||
355 | /* These should really go into a header file. */ | |
8868edaf CR |
356 | extern void mtrace_alloc PARAMS((const char *, PTR_T, size_t, const char *, int)); |
357 | extern void mtrace_free PARAMS((PTR_T, int, const char *, int)); | |
7117c2d2 JA |
358 | #endif |
359 | ||
f73dda09 JA |
360 | #if !defined (botch) |
361 | static void | |
362 | botch (s, file, line) | |
b80f6443 JA |
363 | const char *s; |
364 | const char *file; | |
365 | int line; | |
f73dda09 | 366 | { |
b80f6443 | 367 | fprintf (stderr, _("malloc: failed assertion: %s\n"), s); |
f73dda09 JA |
368 | (void)fflush (stderr); |
369 | abort (); | |
370 | } | |
371 | #endif | |
372 | ||
373 | /* print the file and line number that caused the assertion failure and | |
374 | call botch() to do whatever the application wants with the information */ | |
375 | static void | |
376 | xbotch (mem, e, s, file, line) | |
377 | PTR_T mem; | |
378 | int e; | |
379 | const char *s; | |
380 | const char *file; | |
381 | int line; | |
382 | { | |
b80f6443 | 383 | fprintf (stderr, _("\r\nmalloc: %s:%d: assertion botched\r\n"), |
3185942a | 384 | file ? file : _("unknown"), line); |
f73dda09 JA |
385 | #ifdef MALLOC_REGISTER |
386 | if (mem != NULL && malloc_register) | |
387 | mregister_describe_mem (mem, stderr); | |
388 | #endif | |
389 | (void)fflush (stderr); | |
390 | botch(s, file, line); | |
391 | } | |
392 | ||
cce855bc | 393 | /* Coalesce two adjacent free blocks off the free list for size NU - 1, |
7117c2d2 | 394 | as long as we can find two adjacent free blocks. nextf[NU -1] is |
95732b49 JA |
395 | assumed to not be busy; the caller (morecore()) checks for this. |
396 | BUSY[NU] must be set to 1. */ | |
cce855bc JA |
397 | static void |
398 | bcoalesce (nu) | |
399 | register int nu; | |
400 | { | |
401 | register union mhead *mp, *mp1, *mp2; | |
7117c2d2 | 402 | register int nbuck; |
cce855bc | 403 | unsigned long siz; |
726f6388 | 404 | |
cce855bc | 405 | nbuck = nu - 1; |
95732b49 | 406 | if (nextf[nbuck] == 0 || busy[nbuck]) |
cce855bc | 407 | return; |
726f6388 | 408 | |
95732b49 | 409 | busy[nbuck] = 1; |
7117c2d2 JA |
410 | siz = binsize (nbuck); |
411 | ||
412 | mp2 = mp1 = nextf[nbuck]; | |
cce855bc | 413 | mp = CHAIN (mp1); |
7117c2d2 | 414 | while (mp && mp != (union mhead *)((char *)mp1 + siz)) |
cce855bc JA |
415 | { |
416 | mp2 = mp1; | |
417 | mp1 = mp; | |
418 | mp = CHAIN (mp); | |
cce855bc | 419 | } |
95732b49 | 420 | |
7117c2d2 | 421 | if (mp == 0) |
95732b49 JA |
422 | { |
423 | busy[nbuck] = 0; | |
424 | return; | |
425 | } | |
7117c2d2 | 426 | |
cce855bc JA |
427 | /* OK, now we have mp1 pointing to the block we want to add to nextf[NU]. |
428 | CHAIN(mp2) must equal mp1. Check that mp1 and mp are adjacent. */ | |
7117c2d2 | 429 | if (mp2 != mp1 && CHAIN(mp2) != mp1) |
95732b49 JA |
430 | { |
431 | busy[nbuck] = 0; | |
432 | xbotch ((PTR_T)0, 0, "bcoalesce: CHAIN(mp2) != mp1", (char *)NULL, 0); | |
433 | } | |
7117c2d2 JA |
434 | |
435 | #ifdef MALLOC_DEBUG | |
cce855bc | 436 | if (CHAIN (mp1) != (union mhead *)((char *)mp1 + siz)) |
95732b49 JA |
437 | { |
438 | busy[nbuck] = 0; | |
439 | return; /* not adjacent */ | |
440 | } | |
cce855bc | 441 | #endif |
726f6388 | 442 | |
cce855bc | 443 | /* Since they are adjacent, remove them from the free list */ |
7117c2d2 JA |
444 | if (mp1 == nextf[nbuck]) |
445 | nextf[nbuck] = CHAIN (mp); | |
446 | else | |
447 | CHAIN (mp2) = CHAIN (mp); | |
95732b49 JA |
448 | busy[nbuck] = 0; |
449 | ||
450 | #ifdef MALLOC_STATS | |
451 | _mstats.tbcoalesce++; | |
452 | _mstats.ncoalesce[nbuck]++; | |
453 | #endif | |
726f6388 | 454 | |
cce855bc JA |
455 | /* And add the combined two blocks to nextf[NU]. */ |
456 | mp1->mh_alloc = ISFREE; | |
457 | mp1->mh_index = nu; | |
458 | CHAIN (mp1) = nextf[nu]; | |
459 | nextf[nu] = mp1; | |
460 | } | |
726f6388 | 461 | |
cce855bc JA |
462 | /* Split a block at index > NU (but less than SPLIT_MAX) into a set of |
463 | blocks of the correct size, and attach them to nextf[NU]. nextf[NU] | |
464 | is assumed to be empty. Must be called with signals blocked (e.g., | |
95732b49 | 465 | by morecore()). BUSY[NU] must be set to 1. */ |
cce855bc JA |
466 | static void |
467 | bsplit (nu) | |
468 | register int nu; | |
726f6388 | 469 | { |
cce855bc | 470 | register union mhead *mp; |
bb70624e | 471 | int nbuck, nblks, split_max; |
cce855bc | 472 | unsigned long siz; |
726f6388 | 473 | |
bb70624e JA |
474 | split_max = (maxbuck > SPLIT_MAX) ? maxbuck : SPLIT_MAX; |
475 | ||
cce855bc JA |
476 | if (nu >= SPLIT_MID) |
477 | { | |
bb70624e | 478 | for (nbuck = split_max; nbuck > nu; nbuck--) |
cce855bc JA |
479 | { |
480 | if (busy[nbuck] || nextf[nbuck] == 0) | |
481 | continue; | |
482 | break; | |
483 | } | |
484 | } | |
485 | else | |
486 | { | |
bb70624e | 487 | for (nbuck = nu + 1; nbuck <= split_max; nbuck++) |
cce855bc JA |
488 | { |
489 | if (busy[nbuck] || nextf[nbuck] == 0) | |
490 | continue; | |
491 | break; | |
492 | } | |
493 | } | |
726f6388 | 494 | |
bb70624e | 495 | if (nbuck > split_max || nbuck <= nu) |
cce855bc JA |
496 | return; |
497 | ||
498 | /* XXX might want to split only if nextf[nbuck] has >= 2 blocks free | |
499 | and nbuck is below some threshold. */ | |
500 | ||
95732b49 JA |
501 | /* Remove the block from the chain of larger blocks. */ |
502 | busy[nbuck] = 1; | |
503 | mp = nextf[nbuck]; | |
504 | nextf[nbuck] = CHAIN (mp); | |
505 | busy[nbuck] = 0; | |
506 | ||
cce855bc | 507 | #ifdef MALLOC_STATS |
bb70624e JA |
508 | _mstats.tbsplit++; |
509 | _mstats.nsplit[nbuck]++; | |
cce855bc JA |
510 | #endif |
511 | ||
512 | /* Figure out how many blocks we'll get. */ | |
7117c2d2 JA |
513 | siz = binsize (nu); |
514 | nblks = binsize (nbuck) / siz; | |
726f6388 | 515 | |
cce855bc JA |
516 | /* Split the block and put it on the requested chain. */ |
517 | nextf[nu] = mp; | |
518 | while (1) | |
519 | { | |
520 | mp->mh_alloc = ISFREE; | |
521 | mp->mh_index = nu; | |
522 | if (--nblks <= 0) break; | |
523 | CHAIN (mp) = (union mhead *)((char *)mp + siz); | |
524 | mp = (union mhead *)((char *)mp + siz); | |
525 | } | |
526 | CHAIN (mp) = 0; | |
726f6388 | 527 | } |
ccc6cda3 | 528 | |
95732b49 JA |
529 | /* Take the memory block MP and add it to a chain < NU. NU is the right bucket, |
530 | but is busy. This avoids memory orphaning. */ | |
531 | static void | |
532 | xsplit (mp, nu) | |
533 | union mhead *mp; | |
534 | int nu; | |
535 | { | |
536 | union mhead *nh; | |
537 | int nbuck, nblks, split_max; | |
538 | unsigned long siz; | |
539 | ||
540 | nbuck = nu - 1; | |
541 | while (nbuck >= SPLIT_MIN && busy[nbuck]) | |
542 | nbuck--; | |
543 | if (nbuck < SPLIT_MIN) | |
544 | return; | |
545 | ||
546 | #ifdef MALLOC_STATS | |
547 | _mstats.tbsplit++; | |
548 | _mstats.nsplit[nu]++; | |
549 | #endif | |
550 | ||
551 | /* Figure out how many blocks we'll get. */ | |
552 | siz = binsize (nu); /* original block size */ | |
553 | nblks = siz / binsize (nbuck); /* should be 2 most of the time */ | |
554 | ||
555 | /* And add it to nextf[nbuck] */ | |
556 | siz = binsize (nbuck); /* XXX - resetting here */ | |
557 | nh = mp; | |
558 | while (1) | |
559 | { | |
560 | mp->mh_alloc = ISFREE; | |
561 | mp->mh_index = nbuck; | |
562 | if (--nblks <= 0) break; | |
563 | CHAIN (mp) = (union mhead *)((char *)mp + siz); | |
564 | mp = (union mhead *)((char *)mp + siz); | |
565 | } | |
566 | busy[nbuck] = 1; | |
567 | CHAIN (mp) = nextf[nbuck]; | |
568 | nextf[nbuck] = nh; | |
569 | busy[nbuck] = 0; | |
570 | } | |
571 | ||
ac50fbac CR |
572 | void |
573 | _malloc_block_signals (setp, osetp) | |
28ef6c31 JA |
574 | sigset_t *setp, *osetp; |
575 | { | |
576 | #ifdef HAVE_POSIX_SIGNALS | |
577 | sigfillset (setp); | |
578 | sigemptyset (osetp); | |
579 | sigprocmask (SIG_BLOCK, setp, osetp); | |
580 | #else | |
581 | # if defined (HAVE_BSD_SIGNALS) | |
582 | *osetp = sigsetmask (-1); | |
583 | # endif | |
584 | #endif | |
585 | } | |
586 | ||
ac50fbac CR |
587 | void |
588 | _malloc_unblock_signals (setp, osetp) | |
28ef6c31 JA |
589 | sigset_t *setp, *osetp; |
590 | { | |
591 | #ifdef HAVE_POSIX_SIGNALS | |
592 | sigprocmask (SIG_SETMASK, osetp, (sigset_t *)NULL); | |
593 | #else | |
594 | # if defined (HAVE_BSD_SIGNALS) | |
595 | sigsetmask (*osetp); | |
596 | # endif | |
597 | #endif | |
598 | } | |
7117c2d2 | 599 | |
74091dd4 | 600 | #if defined (USE_LESSCORE) |
7117c2d2 JA |
601 | /* Return some memory to the system by reducing the break. This is only |
602 | called with NU > pagebucket, so we're always assured of giving back | |
603 | more than one page of memory. */ | |
604 | static void | |
605 | lesscore (nu) /* give system back some memory */ | |
606 | register int nu; /* size index we're discarding */ | |
607 | { | |
608 | long siz; | |
609 | ||
610 | siz = binsize (nu); | |
611 | /* Should check for errors here, I guess. */ | |
612 | sbrk (-siz); | |
613 | memtop -= siz; | |
614 | ||
615 | #ifdef MALLOC_STATS | |
616 | _mstats.nsbrk++; | |
617 | _mstats.tsbrk -= siz; | |
618 | _mstats.nlesscore[nu]++; | |
619 | #endif | |
620 | } | |
74091dd4 | 621 | #endif /* USE_LESSCORE */ |
95732b49 JA |
622 | |
623 | /* Ask system for more memory; add to NEXTF[NU]. BUSY[NU] must be set to 1. */ | |
726f6388 | 624 | static void |
95732b49 | 625 | morecore (nu) |
726f6388 JA |
626 | register int nu; /* size index to get more of */ |
627 | { | |
cce855bc | 628 | register union mhead *mp; |
726f6388 | 629 | register int nblks; |
cce855bc JA |
630 | register long siz; |
631 | long sbrk_amt; /* amount to get via sbrk() */ | |
28ef6c31 JA |
632 | sigset_t set, oset; |
633 | int blocked_sigs; | |
726f6388 | 634 | |
ccc6cda3 | 635 | /* Block all signals in case we are executed from a signal handler. */ |
28ef6c31 JA |
636 | blocked_sigs = 0; |
637 | #ifdef SHELL | |
ac50fbac | 638 | # if defined (SIGCHLD) |
8868edaf | 639 | if (running_trap || signal_is_trapped (SIGINT) || signal_is_trapped (SIGCHLD)) |
ac50fbac | 640 | # else |
8868edaf | 641 | if (running_trap || signal_is_trapped (SIGINT)) |
ac50fbac | 642 | # endif |
28ef6c31 JA |
643 | #endif |
644 | { | |
ac50fbac | 645 | _malloc_block_signals (&set, &oset); |
28ef6c31 JA |
646 | blocked_sigs = 1; |
647 | } | |
726f6388 | 648 | |
7117c2d2 | 649 | siz = binsize (nu); /* size of desired block for nextf[nu] */ |
cce855bc JA |
650 | |
651 | if (siz < 0) | |
bb70624e | 652 | goto morecore_done; /* oops */ |
cce855bc JA |
653 | |
654 | #ifdef MALLOC_STATS | |
655 | _mstats.nmorecore[nu]++; | |
656 | #endif | |
657 | ||
658 | /* Try to split a larger block here, if we're within the range of sizes | |
659 | to split. */ | |
d233b485 | 660 | if (nu >= SPLIT_MIN && nu <= malloc_mmap_threshold) |
cce855bc JA |
661 | { |
662 | bsplit (nu); | |
663 | if (nextf[nu] != 0) | |
664 | goto morecore_done; | |
665 | } | |
666 | ||
cce855bc | 667 | /* Try to coalesce two adjacent blocks from the free list on nextf[nu - 1], |
95732b49 | 668 | if we can, and we're within the range of the block coalescing limits. */ |
d233b485 | 669 | if (nu >= COMBINE_MIN && nu < COMBINE_MAX && nu <= malloc_mmap_threshold && busy[nu - 1] == 0 && nextf[nu - 1]) |
cce855bc JA |
670 | { |
671 | bcoalesce (nu); | |
672 | if (nextf[nu] != 0) | |
28ef6c31 | 673 | goto morecore_done; |
cce855bc | 674 | } |
cce855bc JA |
675 | |
676 | /* Take at least a page, and figure out how many blocks of the requested | |
677 | size we're getting. */ | |
678 | if (siz <= pagesz) | |
726f6388 | 679 | { |
cce855bc JA |
680 | sbrk_amt = pagesz; |
681 | nblks = sbrk_amt / siz; | |
726f6388 | 682 | } |
cce855bc JA |
683 | else |
684 | { | |
685 | /* We always want to request an integral multiple of the page size | |
686 | from the kernel, so let's compute whether or not `siz' is such | |
687 | an amount. If it is, we can just request it. If not, we want | |
688 | the smallest integral multiple of pagesize that is larger than | |
689 | `siz' and will satisfy the request. */ | |
7117c2d2 | 690 | sbrk_amt = siz & (pagesz - 1); |
cce855bc JA |
691 | if (sbrk_amt == 0) |
692 | sbrk_amt = siz; | |
693 | else | |
694 | sbrk_amt = siz + pagesz - sbrk_amt; | |
695 | nblks = 1; | |
696 | } | |
697 | ||
d233b485 CR |
698 | #if defined (USE_MMAP) |
699 | if (nu > malloc_mmap_threshold) | |
700 | { | |
701 | mp = (union mhead *)mmap (0, sbrk_amt, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); | |
702 | if ((void *)mp == MAP_FAILED) | |
703 | goto morecore_done; | |
704 | nextf[nu] = mp; | |
705 | mp->mh_alloc = ISFREE; | |
706 | mp->mh_index = nu; | |
707 | CHAIN (mp) = 0; | |
708 | #ifdef MALLOC_STATS | |
709 | _mstats.nmmap++; | |
710 | _mstats.tmmap += sbrk_amt; | |
711 | #endif | |
712 | goto morecore_done; | |
713 | } | |
714 | #endif | |
715 | ||
716 | ||
cce855bc JA |
717 | #ifdef MALLOC_STATS |
718 | _mstats.nsbrk++; | |
719 | _mstats.tsbrk += sbrk_amt; | |
720 | #endif | |
721 | ||
722 | mp = (union mhead *) sbrk (sbrk_amt); | |
726f6388 | 723 | |
cce855bc JA |
724 | /* Totally out of memory. */ |
725 | if ((long)mp == -1) | |
bb70624e | 726 | goto morecore_done; |
cce855bc | 727 | |
7117c2d2 JA |
728 | memtop += sbrk_amt; |
729 | ||
8868edaf | 730 | /* shouldn't happen, but just in case -- require 8- or 16-byte alignment */ |
7117c2d2 | 731 | if ((long)mp & MALIGN_MASK) |
cce855bc | 732 | { |
7117c2d2 | 733 | mp = (union mhead *) (((long)mp + MALIGN_MASK) & ~MALIGN_MASK); |
726f6388 JA |
734 | nblks--; |
735 | } | |
736 | ||
cce855bc JA |
737 | /* save new header and link the nblks blocks together */ |
738 | nextf[nu] = mp; | |
726f6388 JA |
739 | while (1) |
740 | { | |
cce855bc JA |
741 | mp->mh_alloc = ISFREE; |
742 | mp->mh_index = nu; | |
726f6388 | 743 | if (--nblks <= 0) break; |
cce855bc JA |
744 | CHAIN (mp) = (union mhead *)((char *)mp + siz); |
745 | mp = (union mhead *)((char *)mp + siz); | |
726f6388 | 746 | } |
cce855bc | 747 | CHAIN (mp) = 0; |
726f6388 | 748 | |
cce855bc | 749 | morecore_done: |
28ef6c31 | 750 | if (blocked_sigs) |
ac50fbac | 751 | _malloc_unblock_signals (&set, &oset); |
726f6388 JA |
752 | } |
753 | ||
cce855bc JA |
754 | static void |
755 | malloc_debug_dummy () | |
756 | { | |
bb70624e | 757 | write (1, "malloc_debug_dummy\n", 19); |
cce855bc JA |
758 | } |
759 | ||
7117c2d2 JA |
760 | static int |
761 | pagealign () | |
762 | { | |
763 | register int nunits; | |
764 | register union mhead *mp; | |
765 | long sbrk_needed; | |
766 | char *curbrk; | |
767 | ||
768 | pagesz = getpagesize (); | |
74091dd4 CR |
769 | if (pagesz < MALLOC_PAGESIZE_MIN) |
770 | pagesz = MALLOC_PAGESIZE_MIN; | |
7117c2d2 JA |
771 | |
772 | /* OK, how much do we need to allocate to make things page-aligned? | |
773 | Some of this partial page will be wasted space, but we'll use as | |
774 | much as we can. Once we figure out how much to advance the break | |
775 | pointer, go ahead and do it. */ | |
776 | memtop = curbrk = sbrk (0); | |
777 | sbrk_needed = pagesz - ((long)curbrk & (pagesz - 1)); /* sbrk(0) % pagesz */ | |
778 | if (sbrk_needed < 0) | |
779 | sbrk_needed += pagesz; | |
780 | ||
781 | /* Now allocate the wasted space. */ | |
782 | if (sbrk_needed) | |
783 | { | |
784 | #ifdef MALLOC_STATS | |
785 | _mstats.nsbrk++; | |
786 | _mstats.tsbrk += sbrk_needed; | |
787 | #endif | |
788 | curbrk = sbrk (sbrk_needed); | |
789 | if ((long)curbrk == -1) | |
790 | return -1; | |
791 | memtop += sbrk_needed; | |
792 | ||
793 | /* Take the memory which would otherwise be wasted and populate the most | |
8868edaf CR |
794 | popular bin (3 == 64 bytes) with it. Add whatever we need to curbrk |
795 | to make things 64-byte aligned, compute how many 64-byte chunks we're | |
7117c2d2 JA |
796 | going to get, and set up the bin. */ |
797 | curbrk += sbrk_needed & (PREPOP_SIZE - 1); | |
798 | sbrk_needed -= sbrk_needed & (PREPOP_SIZE - 1); | |
799 | nunits = sbrk_needed / PREPOP_SIZE; | |
800 | ||
801 | if (nunits > 0) | |
802 | { | |
803 | mp = (union mhead *)curbrk; | |
804 | ||
805 | nextf[PREPOP_BIN] = mp; | |
806 | while (1) | |
807 | { | |
808 | mp->mh_alloc = ISFREE; | |
809 | mp->mh_index = PREPOP_BIN; | |
810 | if (--nunits <= 0) break; | |
811 | CHAIN(mp) = (union mhead *)((char *)mp + PREPOP_SIZE); | |
812 | mp = (union mhead *)((char *)mp + PREPOP_SIZE); | |
813 | } | |
814 | CHAIN(mp) = 0; | |
815 | } | |
816 | } | |
817 | ||
818 | /* compute which bin corresponds to the page size. */ | |
819 | for (nunits = 7; nunits < NBUCKETS; nunits++) | |
820 | if (pagesz <= binsize(nunits)) | |
821 | break; | |
822 | pagebucket = nunits; | |
823 | ||
824 | return 0; | |
825 | } | |
826 | ||
f73dda09 JA |
827 | static PTR_T |
828 | internal_malloc (n, file, line, flags) /* get a block */ | |
cce855bc | 829 | size_t n; |
f73dda09 JA |
830 | const char *file; |
831 | int line, flags; | |
726f6388 | 832 | { |
cce855bc | 833 | register union mhead *p; |
cce855bc | 834 | register int nunits; |
7117c2d2 | 835 | register char *m, *z; |
74091dd4 | 836 | MALLOC_SIZE_T nbytes; |
7117c2d2 | 837 | mguard_t mg; |
726f6388 | 838 | |
7117c2d2 | 839 | /* Get the system page size and align break pointer so future sbrks will |
cce855bc JA |
840 | be page-aligned. The page size must be at least 1K -- anything |
841 | smaller is increased. */ | |
842 | if (pagesz == 0) | |
7117c2d2 JA |
843 | if (pagealign () < 0) |
844 | return ((PTR_T)NULL); | |
cce855bc | 845 | |
726f6388 | 846 | /* Figure out how many bytes are required, rounding up to the nearest |
f73dda09 | 847 | multiple of 8, then figure out which nextf[] area to use. Try to |
cce855bc JA |
848 | be smart about where to start searching -- if the number of bytes |
849 | needed is greater than the page size, we can start at pagebucket. */ | |
74091dd4 CR |
850 | #if SIZEOF_SIZE_T == 8 |
851 | if (ALLOCATED_BYTES(n) > MAXALLOC_SIZE) | |
852 | return ((PTR_T) NULL); | |
853 | #endif | |
f73dda09 | 854 | nbytes = ALLOCATED_BYTES(n); |
7117c2d2 JA |
855 | nunits = (nbytes <= (pagesz >> 1)) ? STARTBUCK : pagebucket; |
856 | for ( ; nunits < NBUCKETS; nunits++) | |
857 | if (nbytes <= binsize(nunits)) | |
858 | break; | |
726f6388 | 859 | |
8868edaf | 860 | /* Silently reject too-large requests. XXX - can increase this if HAVE_MMAP */ |
f73dda09 JA |
861 | if (nunits >= NBUCKETS) |
862 | return ((PTR_T) NULL); | |
863 | ||
726f6388 JA |
864 | /* In case this is reentrant use of malloc from signal handler, |
865 | pick a block size that no other malloc level is currently | |
866 | trying to allocate. That's the easiest harmless way not to | |
867 | interfere with the other level of execution. */ | |
cce855bc JA |
868 | #ifdef MALLOC_STATS |
869 | if (busy[nunits]) _mstats.nrecurse++; | |
870 | #endif | |
726f6388 JA |
871 | while (busy[nunits]) nunits++; |
872 | busy[nunits] = 1; | |
873 | ||
bb70624e JA |
874 | if (nunits > maxbuck) |
875 | maxbuck = nunits; | |
876 | ||
726f6388 | 877 | /* If there are no blocks of the appropriate size, go get some */ |
726f6388 JA |
878 | if (nextf[nunits] == 0) |
879 | morecore (nunits); | |
880 | ||
881 | /* Get one block off the list, and set the new list head */ | |
cce855bc | 882 | if ((p = nextf[nunits]) == NULL) |
726f6388 JA |
883 | { |
884 | busy[nunits] = 0; | |
cce855bc | 885 | return NULL; |
726f6388 JA |
886 | } |
887 | nextf[nunits] = CHAIN (p); | |
888 | busy[nunits] = 0; | |
889 | ||
890 | /* Check for free block clobbered */ | |
cce855bc JA |
891 | /* If not for this check, we would gobble a clobbered free chain ptr |
892 | and bomb out on the NEXT allocate of this size block */ | |
893 | if (p->mh_alloc != ISFREE || p->mh_index != nunits) | |
b80f6443 | 894 | xbotch ((PTR_T)(p+1), 0, _("malloc: block on free list clobbered"), file, line); |
726f6388 | 895 | |
f73dda09 | 896 | /* Fill in the info, and set up the magic numbers for range checking. */ |
cce855bc | 897 | p->mh_alloc = ISALLOC; |
cce855bc | 898 | p->mh_magic2 = MAGIC2; |
f73dda09 | 899 | p->mh_nbytes = n; |
726f6388 | 900 | |
8868edaf | 901 | /* Begin guard */ |
74091dd4 | 902 | MALLOC_MEMSET ((char *)p->mh_magic8, MAGIC1, MAGIC8_NUMBYTES); |
8868edaf | 903 | |
7117c2d2 JA |
904 | /* End guard */ |
905 | mg.i = n; | |
906 | z = mg.s; | |
907 | m = (char *) (p + 1) + n; | |
908 | *m++ = *z++, *m++ = *z++, *m++ = *z++, *m++ = *z++; | |
cce855bc | 909 | |
ccc6cda3 | 910 | #ifdef MEMSCRAMBLE |
7117c2d2 JA |
911 | if (n) |
912 | MALLOC_MEMSET ((char *)(p + 1), 0xdf, n); /* scramble previous contents */ | |
ccc6cda3 | 913 | #endif |
cce855bc JA |
914 | #ifdef MALLOC_STATS |
915 | _mstats.nmalloc[nunits]++; | |
916 | _mstats.tmalloc[nunits]++; | |
917 | _mstats.nmal++; | |
7117c2d2 | 918 | _mstats.bytesreq += n; |
cce855bc | 919 | #endif /* MALLOC_STATS */ |
f73dda09 JA |
920 | |
921 | #ifdef MALLOC_TRACE | |
922 | if (malloc_trace && (flags & MALLOC_NOTRACE) == 0) | |
923 | mtrace_alloc ("malloc", p + 1, n, file, line); | |
7117c2d2 JA |
924 | else if (_malloc_trace_buckets[nunits]) |
925 | mtrace_alloc ("malloc", p + 1, n, file, line); | |
f73dda09 JA |
926 | #endif |
927 | ||
928 | #ifdef MALLOC_REGISTER | |
929 | if (malloc_register && (flags & MALLOC_NOREG) == 0) | |
930 | mregister_alloc ("malloc", p + 1, n, file, line); | |
931 | #endif | |
932 | ||
7117c2d2 JA |
933 | #ifdef MALLOC_WATCH |
934 | if (_malloc_nwatch > 0) | |
935 | _malloc_ckwatch (p + 1, file, line, W_ALLOC, n); | |
936 | #endif | |
937 | ||
8868edaf CR |
938 | #if defined (MALLOC_DEBUG) |
939 | z = (char *) (p + 1); | |
940 | /* Check alignment of returned pointer */ | |
941 | if ((unsigned long)z & MALIGN_MASK) | |
942 | fprintf (stderr, "malloc: %s:%d: warning: request for %d bytes not aligned on %d byte boundary\r\n", | |
943 | file ? file : _("unknown"), line, p->mh_nbytes, MALIGN_MASK+1); | |
944 | #endif | |
945 | ||
7117c2d2 | 946 | return (PTR_T) (p + 1); |
726f6388 JA |
947 | } |
948 | ||
f73dda09 JA |
949 | static void |
950 | internal_free (mem, file, line, flags) | |
bb70624e | 951 | PTR_T mem; |
f73dda09 JA |
952 | const char *file; |
953 | int line, flags; | |
726f6388 | 954 | { |
cce855bc | 955 | register union mhead *p; |
7117c2d2 | 956 | register char *ap, *z; |
cce855bc | 957 | register int nunits; |
74091dd4 CR |
958 | register MALLOC_SIZE_T nbytes; |
959 | MALLOC_SIZE_T ubytes; /* caller-requested size */ | |
7117c2d2 | 960 | mguard_t mg; |
cce855bc | 961 | |
bb70624e | 962 | if ((ap = (char *)mem) == 0) |
cce855bc | 963 | return; |
726f6388 | 964 | |
cce855bc | 965 | p = (union mhead *) ap - 1; |
726f6388 | 966 | |
cce855bc JA |
967 | if (p->mh_alloc == ISMEMALIGN) |
968 | { | |
969 | ap -= p->mh_nbytes; | |
970 | p = (union mhead *) ap - 1; | |
971 | } | |
972 | ||
a0c0a00f CR |
973 | #if defined (MALLOC_TRACE) || defined (MALLOC_REGISTER) || defined (MALLOC_WATCH) |
974 | if (malloc_trace || malloc_register || _malloc_nwatch > 0) | |
f73dda09 JA |
975 | ubytes = p->mh_nbytes; |
976 | #endif | |
977 | ||
cce855bc JA |
978 | if (p->mh_alloc != ISALLOC) |
979 | { | |
980 | if (p->mh_alloc == ISFREE) | |
f73dda09 | 981 | xbotch (mem, ERR_DUPFREE, |
b80f6443 | 982 | _("free: called with already freed block argument"), file, line); |
cce855bc | 983 | else |
f73dda09 | 984 | xbotch (mem, ERR_UNALLOC, |
b80f6443 | 985 | _("free: called with unallocated block argument"), file, line); |
cce855bc JA |
986 | } |
987 | ||
988 | ASSERT (p->mh_magic2 == MAGIC2); | |
f73dda09 JA |
989 | |
990 | nunits = p->mh_index; | |
991 | nbytes = ALLOCATED_BYTES(p->mh_nbytes); | |
74091dd4 CR |
992 | /* The MAGIC8_NUMBYTES bytes before the memory handed to the user are now |
993 | used for a simple check to catch things like p[-1] = 'x'. | |
f73dda09 JA |
994 | We sanity-check the value of mh_nbytes against the size of the blocks |
995 | in the appropriate bucket before we use it. This can still cause problems | |
996 | and obscure errors if mh_nbytes is wrong but still within range; the | |
7117c2d2 | 997 | checks against the size recorded at the end of the chunk will probably |
74091dd4 | 998 | fail then. Using MALLOC_REGISTER will help here, since it saves the |
7117c2d2 JA |
999 | original number of bytes requested. */ |
1000 | ||
f73dda09 JA |
1001 | if (IN_BUCKET(nbytes, nunits) == 0) |
1002 | xbotch (mem, ERR_UNDERFLOW, | |
b80f6443 | 1003 | _("free: underflow detected; mh_nbytes out of range"), file, line); |
8868edaf CR |
1004 | { |
1005 | int i; | |
74091dd4 | 1006 | for (i = 0, z = p->mh_magic8; i < MAGIC8_NUMBYTES; i++) |
8868edaf CR |
1007 | if (*z++ != MAGIC1) |
1008 | xbotch (mem, ERR_UNDERFLOW, | |
1009 | _("free: underflow detected; magic8 corrupted"), file, line); | |
1010 | } | |
f73dda09 | 1011 | |
cce855bc | 1012 | ap += p->mh_nbytes; |
7117c2d2 JA |
1013 | z = mg.s; |
1014 | *z++ = *ap++, *z++ = *ap++, *z++ = *ap++, *z++ = *ap++; | |
1015 | if (mg.i != p->mh_nbytes) | |
b80f6443 | 1016 | xbotch (mem, ERR_ASSERT_FAILED, _("free: start and end chunk sizes differ"), file, line); |
7117c2d2 | 1017 | |
d233b485 CR |
1018 | #if defined (USE_MMAP) |
1019 | if (nunits > malloc_mmap_threshold) | |
1020 | { | |
1021 | munmap (p, binsize (nunits)); | |
1022 | #if defined (MALLOC_STATS) | |
1023 | _mstats.nlesscore[nunits]++; | |
1024 | #endif | |
1025 | goto free_return; | |
1026 | } | |
1027 | #endif | |
1028 | ||
74091dd4 CR |
1029 | #if defined (USE_LESSCORE) |
1030 | /* We take care of the mmap case and munmap above */ | |
ac50fbac | 1031 | if (nunits >= LESSCORE_MIN && ((char *)p + binsize(nunits) == memtop)) |
7117c2d2 JA |
1032 | { |
1033 | /* If above LESSCORE_FRC, give back unconditionally. This should be set | |
1034 | high enough to be infrequently encountered. If between LESSCORE_MIN | |
95732b49 JA |
1035 | and LESSCORE_FRC, call lesscore if the bucket is marked as busy or if |
1036 | there's already a block on the free list. */ | |
7117c2d2 JA |
1037 | if ((nunits >= LESSCORE_FRC) || busy[nunits] || nextf[nunits] != 0) |
1038 | { | |
1039 | lesscore (nunits); | |
1040 | /* keeps the tracing and registering code in one place */ | |
1041 | goto free_return; | |
1042 | } | |
1043 | } | |
74091dd4 | 1044 | #endif /* USE_LESSCORE */ |
726f6388 | 1045 | |
ccc6cda3 | 1046 | #ifdef MEMSCRAMBLE |
7117c2d2 JA |
1047 | if (p->mh_nbytes) |
1048 | MALLOC_MEMSET (mem, 0xcf, p->mh_nbytes); | |
ccc6cda3 | 1049 | #endif |
cce855bc | 1050 | |
cce855bc | 1051 | ASSERT (nunits < NBUCKETS); |
cce855bc | 1052 | |
bb70624e | 1053 | if (busy[nunits] == 1) |
95732b49 JA |
1054 | { |
1055 | xsplit (p, nunits); /* split block and add to different chain */ | |
1056 | goto free_return; | |
1057 | } | |
bb70624e | 1058 | |
95732b49 | 1059 | p->mh_alloc = ISFREE; |
cce855bc JA |
1060 | /* Protect against signal handlers calling malloc. */ |
1061 | busy[nunits] = 1; | |
1062 | /* Put this block on the free list. */ | |
1063 | CHAIN (p) = nextf[nunits]; | |
1064 | nextf[nunits] = p; | |
1065 | busy[nunits] = 0; | |
1066 | ||
7117c2d2 | 1067 | free_return: |
b80f6443 | 1068 | ; /* Empty statement in case this is the end of the function */ |
7117c2d2 | 1069 | |
cce855bc JA |
1070 | #ifdef MALLOC_STATS |
1071 | _mstats.nmalloc[nunits]--; | |
1072 | _mstats.nfre++; | |
1073 | #endif /* MALLOC_STATS */ | |
f73dda09 JA |
1074 | |
1075 | #ifdef MALLOC_TRACE | |
1076 | if (malloc_trace && (flags & MALLOC_NOTRACE) == 0) | |
1077 | mtrace_free (mem, ubytes, file, line); | |
7117c2d2 JA |
1078 | else if (_malloc_trace_buckets[nunits]) |
1079 | mtrace_free (mem, ubytes, file, line); | |
f73dda09 JA |
1080 | #endif |
1081 | ||
1082 | #ifdef MALLOC_REGISTER | |
1083 | if (malloc_register && (flags & MALLOC_NOREG) == 0) | |
1084 | mregister_free (mem, ubytes, file, line); | |
1085 | #endif | |
7117c2d2 JA |
1086 | |
1087 | #ifdef MALLOC_WATCH | |
1088 | if (_malloc_nwatch > 0) | |
1089 | _malloc_ckwatch (mem, file, line, W_FREE, ubytes); | |
1090 | #endif | |
726f6388 JA |
1091 | } |
1092 | ||
74091dd4 CR |
1093 | #if USE_MREMAP == 1 |
1094 | /* Assume the caller (internal_realloc) has already performed the sanity and | |
1095 | overflow tests. Basically we kill the old guard information, determine the | |
1096 | new size, call mremap with the new size, and add the bookkeeping and guard | |
1097 | information back in. */ | |
1098 | static PTR_T | |
1099 | internal_remap (mem, n, nunits, flags) | |
1100 | PTR_T mem; | |
1101 | register size_t n; | |
1102 | int nunits; | |
1103 | int flags; | |
1104 | { | |
1105 | register union mhead *p, *np; | |
1106 | char *m, *z; | |
1107 | mguard_t mg; | |
1108 | MALLOC_SIZE_T nbytes; | |
1109 | ||
1110 | if (nunits >= NBUCKETS) /* Uh oh */ | |
1111 | return ((PTR_T) NULL); | |
1112 | ||
1113 | p = (union mhead *)mem - 1; | |
1114 | ||
1115 | m = (char *)mem + p->mh_nbytes; | |
1116 | z = mg.s; | |
1117 | *m++ = 0; *m++ = 0; *m++ = 0; *m++ = 0; /* erase guard */ | |
1118 | ||
1119 | nbytes = ALLOCATED_BYTES(n); | |
1120 | ||
1121 | busy[nunits] = 1; | |
1122 | np = (union mhead *)mremap (p, binsize (p->mh_index), binsize (nunits), MREMAP_MAYMOVE); | |
1123 | busy[nunits] = 0; | |
1124 | if (np == MAP_FAILED) | |
1125 | return (PTR_T)NULL; | |
1126 | ||
1127 | if (np != p) | |
1128 | { | |
1129 | np->mh_alloc = ISALLOC; | |
1130 | np->mh_magic2 = MAGIC2; | |
1131 | MALLOC_MEMSET ((char *)np->mh_magic8, MAGIC1, MAGIC8_NUMBYTES); | |
1132 | } | |
1133 | np->mh_index = nunits; | |
1134 | np->mh_nbytes = n; | |
1135 | ||
1136 | mg.i = n; | |
1137 | z = mg.s; | |
1138 | m = (char *)(np + 1) + n; | |
1139 | *m++ = *z++, *m++ = *z++, *m++ = *z++, *m++ = *z++; | |
1140 | ||
1141 | return ((PTR_T)(np + 1)); | |
1142 | } | |
1143 | #endif | |
1144 | ||
f73dda09 JA |
1145 | static PTR_T |
1146 | internal_realloc (mem, n, file, line, flags) | |
bb70624e | 1147 | PTR_T mem; |
cce855bc | 1148 | register size_t n; |
f73dda09 JA |
1149 | const char *file; |
1150 | int line, flags; | |
726f6388 | 1151 | { |
cce855bc | 1152 | register union mhead *p; |
74091dd4 CR |
1153 | register MALLOC_SIZE_T tocopy; |
1154 | register MALLOC_SIZE_T nbytes; | |
1155 | register int newunits, nunits; | |
7117c2d2 JA |
1156 | register char *m, *z; |
1157 | mguard_t mg; | |
cce855bc JA |
1158 | |
1159 | #ifdef MALLOC_STATS | |
1160 | _mstats.nrealloc++; | |
1161 | #endif | |
726f6388 | 1162 | |
cce855bc JA |
1163 | if (n == 0) |
1164 | { | |
f73dda09 | 1165 | internal_free (mem, file, line, MALLOC_INTERNAL); |
cce855bc JA |
1166 | return (NULL); |
1167 | } | |
1168 | if ((p = (union mhead *) mem) == 0) | |
f73dda09 JA |
1169 | return internal_malloc (n, file, line, MALLOC_INTERNAL); |
1170 | ||
726f6388 | 1171 | p--; |
cce855bc | 1172 | nunits = p->mh_index; |
f73dda09 JA |
1173 | ASSERT (nunits < NBUCKETS); |
1174 | ||
1175 | if (p->mh_alloc != ISALLOC) | |
1176 | xbotch (mem, ERR_UNALLOC, | |
b80f6443 | 1177 | _("realloc: called with unallocated block argument"), file, line); |
f73dda09 | 1178 | |
cce855bc | 1179 | ASSERT (p->mh_magic2 == MAGIC2); |
f73dda09 JA |
1180 | nbytes = ALLOCATED_BYTES(p->mh_nbytes); |
1181 | /* Since the sizeof(u_bits32_t) bytes before the memory handed to the user | |
1182 | are now used for the number of bytes allocated, a simple check of | |
1183 | mh_magic2 is no longer sufficient to catch things like p[-1] = 'x'. | |
1184 | We sanity-check the value of mh_nbytes against the size of the blocks | |
1185 | in the appropriate bucket before we use it. This can still cause problems | |
1186 | and obscure errors if mh_nbytes is wrong but still within range; the | |
7117c2d2 JA |
1187 | checks against the size recorded at the end of the chunk will probably |
1188 | fail then. Using MALLOC_REGISTER will help here, since it saves the | |
1189 | original number of bytes requested. */ | |
f73dda09 JA |
1190 | if (IN_BUCKET(nbytes, nunits) == 0) |
1191 | xbotch (mem, ERR_UNDERFLOW, | |
b80f6443 | 1192 | _("realloc: underflow detected; mh_nbytes out of range"), file, line); |
8868edaf CR |
1193 | { |
1194 | int i; | |
74091dd4 | 1195 | for (i = 0, z = p->mh_magic8; i < MAGIC8_NUMBYTES; i++) |
8868edaf CR |
1196 | if (*z++ != MAGIC1) |
1197 | xbotch (mem, ERR_UNDERFLOW, | |
1198 | _("realloc: underflow detected; magic8 corrupted"), file, line); | |
1199 | ||
1200 | } | |
cce855bc | 1201 | |
bb70624e | 1202 | m = (char *)mem + (tocopy = p->mh_nbytes); |
7117c2d2 JA |
1203 | z = mg.s; |
1204 | *z++ = *m++, *z++ = *m++, *z++ = *m++, *z++ = *m++; | |
1205 | if (mg.i != p->mh_nbytes) | |
b80f6443 | 1206 | xbotch (mem, ERR_ASSERT_FAILED, _("realloc: start and end chunk sizes differ"), file, line); |
7117c2d2 JA |
1207 | |
1208 | #ifdef MALLOC_WATCH | |
1209 | if (_malloc_nwatch > 0) | |
1210 | _malloc_ckwatch (p + 1, file, line, W_REALLOC, n); | |
1211 | #endif | |
1212 | #ifdef MALLOC_STATS | |
1213 | _mstats.bytesreq += (n < tocopy) ? 0 : n - tocopy; | |
1214 | #endif | |
726f6388 | 1215 | |
d233b485 CR |
1216 | /* If we're reallocating to the same size as previously, return now */ |
1217 | if (n == p->mh_nbytes) | |
1218 | return mem; | |
1219 | ||
74091dd4 CR |
1220 | #if SIZEOF_SIZE_T == 8 |
1221 | if (ALLOCATED_BYTES(n) > MAXALLOC_SIZE) | |
1222 | return ((PTR_T) NULL); | |
1223 | #endif | |
726f6388 | 1224 | /* See if desired size rounds to same power of 2 as actual size. */ |
f73dda09 | 1225 | nbytes = ALLOCATED_BYTES(n); |
726f6388 JA |
1226 | |
1227 | /* If ok, use the same block, just marking its size as changed. */ | |
d233b485 | 1228 | if (RIGHT_BUCKET(nbytes, nunits) || RIGHT_BUCKET(nbytes, nunits-1)) |
726f6388 | 1229 | { |
7117c2d2 JA |
1230 | /* Compensate for increment above. */ |
1231 | m -= 4; | |
d233b485 | 1232 | |
726f6388 | 1233 | *m++ = 0; *m++ = 0; *m++ = 0; *m++ = 0; |
7117c2d2 JA |
1234 | m = (char *)mem + (p->mh_nbytes = n); |
1235 | ||
1236 | mg.i = n; | |
1237 | z = mg.s; | |
1238 | *m++ = *z++, *m++ = *z++, *m++ = *z++, *m++ = *z++; | |
1239 | ||
726f6388 JA |
1240 | return mem; |
1241 | } | |
1242 | ||
7117c2d2 JA |
1243 | if (n < tocopy) |
1244 | tocopy = n; | |
1245 | ||
cce855bc JA |
1246 | #ifdef MALLOC_STATS |
1247 | _mstats.nrcopy++; | |
1248 | #endif | |
1249 | ||
74091dd4 CR |
1250 | #if USE_MREMAP == 1 |
1251 | /* If we are using mmap and have mremap, we use it here. Make sure that | |
1252 | the old size and new size are above the threshold where we use mmap */ | |
1253 | if (nbytes > p->mh_nbytes) | |
1254 | newunits = nunits; | |
1255 | else | |
1256 | newunits = (nbytes <= (pagesz >> 1)) ? STARTBUCK : pagebucket; | |
1257 | for ( ; newunits < NBUCKETS; newunits++) | |
1258 | if (nbytes <= binsize(newunits)) | |
1259 | break; | |
d233b485 | 1260 | |
74091dd4 CR |
1261 | if (nunits > malloc_mmap_threshold && newunits > malloc_mmap_threshold) |
1262 | { | |
1263 | m = internal_remap (mem, n, newunits, MALLOC_INTERNAL); | |
1264 | if (m == 0) | |
1265 | return 0; | |
1266 | } | |
1267 | else | |
1268 | #endif /* USE_MREMAP */ | |
1269 | { | |
f73dda09 | 1270 | if ((m = internal_malloc (n, file, line, MALLOC_INTERNAL|MALLOC_NOTRACE|MALLOC_NOREG)) == 0) |
cce855bc JA |
1271 | return 0; |
1272 | FASTCOPY (mem, m, tocopy); | |
f73dda09 | 1273 | internal_free (mem, file, line, MALLOC_INTERNAL); |
74091dd4 | 1274 | } |
f73dda09 JA |
1275 | |
1276 | #ifdef MALLOC_TRACE | |
1277 | if (malloc_trace && (flags & MALLOC_NOTRACE) == 0) | |
1278 | mtrace_alloc ("realloc", m, n, file, line); | |
7117c2d2 JA |
1279 | else if (_malloc_trace_buckets[nunits]) |
1280 | mtrace_alloc ("realloc", m, n, file, line); | |
f73dda09 JA |
1281 | #endif |
1282 | ||
1283 | #ifdef MALLOC_REGISTER | |
1284 | if (malloc_register && (flags & MALLOC_NOREG) == 0) | |
1285 | mregister_alloc ("realloc", m, n, file, line); | |
1286 | #endif | |
1287 | ||
7117c2d2 JA |
1288 | #ifdef MALLOC_WATCH |
1289 | if (_malloc_nwatch > 0) | |
1290 | _malloc_ckwatch (m, file, line, W_RESIZED, n); | |
1291 | #endif | |
1292 | ||
cce855bc | 1293 | return m; |
726f6388 JA |
1294 | } |
1295 | ||
f73dda09 JA |
1296 | static PTR_T |
1297 | internal_memalign (alignment, size, file, line, flags) | |
95732b49 | 1298 | size_t alignment; |
cce855bc | 1299 | size_t size; |
f73dda09 JA |
1300 | const char *file; |
1301 | int line, flags; | |
726f6388 | 1302 | { |
ccc6cda3 | 1303 | register char *ptr; |
726f6388 | 1304 | register char *aligned; |
cce855bc | 1305 | register union mhead *p; |
726f6388 | 1306 | |
f73dda09 | 1307 | ptr = internal_malloc (size + alignment, file, line, MALLOC_INTERNAL); |
ccc6cda3 | 1308 | |
726f6388 JA |
1309 | if (ptr == 0) |
1310 | return 0; | |
1311 | /* If entire block has the desired alignment, just accept it. */ | |
f73dda09 | 1312 | if (((long) ptr & (alignment - 1)) == 0) |
726f6388 JA |
1313 | return ptr; |
1314 | /* Otherwise, get address of byte in the block that has that alignment. */ | |
f73dda09 | 1315 | aligned = (char *) (((long) ptr + alignment - 1) & (~alignment + 1)); |
726f6388 JA |
1316 | |
1317 | /* Store a suitable indication of how to free the block, | |
1318 | so that free can find the true beginning of it. */ | |
cce855bc JA |
1319 | p = (union mhead *) aligned - 1; |
1320 | p->mh_nbytes = aligned - ptr; | |
1321 | p->mh_alloc = ISMEMALIGN; | |
f73dda09 | 1322 | |
726f6388 JA |
1323 | return aligned; |
1324 | } | |
1325 | ||
d233b485 CR |
1326 | int |
1327 | posix_memalign (memptr, alignment, size) | |
1328 | void **memptr; | |
1329 | size_t alignment, size; | |
1330 | { | |
1331 | void *mem; | |
1332 | ||
1333 | /* Perform posix-mandated error checking here */ | |
1334 | if ((alignment % sizeof (void *) != 0) || alignment == 0) | |
1335 | return EINVAL; | |
1336 | else if (powerof2 (alignment) == 0) | |
1337 | return EINVAL; | |
1338 | ||
1339 | mem = internal_memalign (alignment, size, (char *)0, 0, 0); | |
1340 | if (mem != 0) | |
1341 | { | |
1342 | *memptr = mem; | |
1343 | return 0; | |
1344 | } | |
1345 | return ENOMEM; | |
1346 | } | |
1347 | ||
1348 | size_t | |
1349 | malloc_usable_size (mem) | |
1350 | void *mem; | |
1351 | { | |
1352 | register union mhead *p; | |
1353 | register char *ap; | |
d233b485 CR |
1354 | |
1355 | if ((ap = (char *)mem) == 0) | |
1356 | return 0; | |
1357 | ||
1358 | /* Find the true start of the memory block to discover which bin */ | |
1359 | p = (union mhead *) ap - 1; | |
74091dd4 | 1360 | |
d233b485 CR |
1361 | if (p->mh_alloc == ISMEMALIGN) |
1362 | { | |
1363 | ap -= p->mh_nbytes; | |
1364 | p = (union mhead *) ap - 1; | |
1365 | } | |
1366 | ||
fed42742 CR |
1367 | /* return 0 if ISFREE */ |
1368 | if (p->mh_alloc == ISFREE) | |
1369 | return 0; | |
1370 | ||
1371 | /* Since we use bounds checking, the usable size is the last requested size. */ | |
1372 | return (p->mh_nbytes); | |
d233b485 CR |
1373 | } |
1374 | ||
f73dda09 | 1375 | #if !defined (NO_VALLOC) |
726f6388 JA |
1376 | /* This runs into trouble with getpagesize on HPUX, and Multimax machines. |
1377 | Patching out seems cleaner than the ugly fix needed. */ | |
f73dda09 JA |
1378 | static PTR_T |
1379 | internal_valloc (size, file, line, flags) | |
ccc6cda3 | 1380 | size_t size; |
f73dda09 JA |
1381 | const char *file; |
1382 | int line, flags; | |
726f6388 | 1383 | { |
f73dda09 | 1384 | return internal_memalign (getpagesize (), size, file, line, flags|MALLOC_INTERNAL); |
726f6388 | 1385 | } |
f73dda09 | 1386 | #endif /* !NO_VALLOC */ |
ccc6cda3 JA |
1387 | |
1388 | #ifndef NO_CALLOC | |
f73dda09 JA |
1389 | static PTR_T |
1390 | internal_calloc (n, s, file, line, flags) | |
ccc6cda3 | 1391 | size_t n, s; |
f73dda09 JA |
1392 | const char *file; |
1393 | int line, flags; | |
ccc6cda3 JA |
1394 | { |
1395 | size_t total; | |
f73dda09 | 1396 | PTR_T result; |
ccc6cda3 JA |
1397 | |
1398 | total = n * s; | |
f73dda09 | 1399 | result = internal_malloc (total, file, line, flags|MALLOC_INTERNAL); |
ccc6cda3 | 1400 | if (result) |
7117c2d2 | 1401 | memset (result, 0, total); |
ccc6cda3 JA |
1402 | return result; |
1403 | } | |
1404 | ||
f73dda09 JA |
1405 | static void |
1406 | internal_cfree (p, file, line, flags) | |
bb70624e | 1407 | PTR_T p; |
f73dda09 JA |
1408 | const char *file; |
1409 | int line, flags; | |
ccc6cda3 | 1410 | { |
f73dda09 | 1411 | internal_free (p, file, line, flags|MALLOC_INTERNAL); |
ccc6cda3 JA |
1412 | } |
1413 | #endif /* !NO_CALLOC */ | |
1414 | ||
cce855bc | 1415 | #ifdef MALLOC_STATS |
f73dda09 JA |
1416 | int |
1417 | malloc_free_blocks (size) | |
726f6388 JA |
1418 | int size; |
1419 | { | |
f73dda09 | 1420 | int nfree; |
cce855bc | 1421 | register union mhead *p; |
726f6388 | 1422 | |
f73dda09 JA |
1423 | nfree = 0; |
1424 | for (p = nextf[size]; p; p = CHAIN (p)) | |
1425 | nfree++; | |
726f6388 | 1426 | |
f73dda09 JA |
1427 | return nfree; |
1428 | } | |
1429 | #endif | |
726f6388 | 1430 | |
7117c2d2 | 1431 | #if defined (MALLOC_WRAPFUNCS) |
f73dda09 JA |
1432 | PTR_T |
1433 | sh_malloc (bytes, file, line) | |
1434 | size_t bytes; | |
1435 | const char *file; | |
1436 | int line; | |
1437 | { | |
1438 | return internal_malloc (bytes, file, line, MALLOC_WRAPPER); | |
1439 | } | |
726f6388 | 1440 | |
f73dda09 JA |
1441 | PTR_T |
1442 | sh_realloc (ptr, size, file, line) | |
1443 | PTR_T ptr; | |
1444 | size_t size; | |
1445 | const char *file; | |
1446 | int line; | |
1447 | { | |
1448 | return internal_realloc (ptr, size, file, line, MALLOC_WRAPPER); | |
1449 | } | |
726f6388 | 1450 | |
f73dda09 JA |
1451 | void |
1452 | sh_free (mem, file, line) | |
1453 | PTR_T mem; | |
1454 | const char *file; | |
1455 | int line; | |
1456 | { | |
1457 | internal_free (mem, file, line, MALLOC_WRAPPER); | |
726f6388 | 1458 | } |
ccc6cda3 | 1459 | |
f73dda09 JA |
1460 | PTR_T |
1461 | sh_memalign (alignment, size, file, line) | |
95732b49 | 1462 | size_t alignment; |
f73dda09 JA |
1463 | size_t size; |
1464 | const char *file; | |
1465 | int line; | |
cce855bc | 1466 | { |
f73dda09 JA |
1467 | return internal_memalign (alignment, size, file, line, MALLOC_WRAPPER); |
1468 | } | |
726f6388 | 1469 | |
f73dda09 JA |
1470 | #ifndef NO_CALLOC |
1471 | PTR_T | |
1472 | sh_calloc (n, s, file, line) | |
1473 | size_t n, s; | |
1474 | const char *file; | |
1475 | int line; | |
1476 | { | |
1477 | return internal_calloc (n, s, file, line, MALLOC_WRAPPER); | |
726f6388 JA |
1478 | } |
1479 | ||
f73dda09 JA |
1480 | void |
1481 | sh_cfree (mem, file, line) | |
1482 | PTR_T mem; | |
1483 | const char *file; | |
1484 | int line; | |
726f6388 | 1485 | { |
f73dda09 JA |
1486 | internal_cfree (mem, file, line, MALLOC_WRAPPER); |
1487 | } | |
1488 | #endif | |
726f6388 | 1489 | |
f73dda09 JA |
1490 | #ifndef NO_VALLOC |
1491 | PTR_T | |
1492 | sh_valloc (size, file, line) | |
1493 | size_t size; | |
1494 | const char *file; | |
1495 | int line; | |
1496 | { | |
1497 | return internal_valloc (size, file, line, MALLOC_WRAPPER); | |
1498 | } | |
7117c2d2 | 1499 | #endif /* !NO_VALLOC */ |
f73dda09 | 1500 | |
7117c2d2 | 1501 | #endif /* MALLOC_WRAPFUNCS */ |
f73dda09 JA |
1502 | |
1503 | /* Externally-available functions that call their internal counterparts. */ | |
1504 | ||
1505 | PTR_T | |
1506 | malloc (size) | |
1507 | size_t size; | |
1508 | { | |
1509 | return internal_malloc (size, (char *)NULL, 0, 0); | |
1510 | } | |
1511 | ||
1512 | PTR_T | |
1513 | realloc (mem, nbytes) | |
1514 | PTR_T mem; | |
1515 | size_t nbytes; | |
1516 | { | |
1517 | return internal_realloc (mem, nbytes, (char *)NULL, 0, 0); | |
bb70624e JA |
1518 | } |
1519 | ||
1520 | void | |
f73dda09 JA |
1521 | free (mem) |
1522 | PTR_T mem; | |
bb70624e | 1523 | { |
f73dda09 | 1524 | internal_free (mem, (char *)NULL, 0, 0); |
bb70624e JA |
1525 | } |
1526 | ||
f73dda09 JA |
1527 | PTR_T |
1528 | memalign (alignment, size) | |
95732b49 | 1529 | size_t alignment; |
f73dda09 JA |
1530 | size_t size; |
1531 | { | |
1532 | return internal_memalign (alignment, size, (char *)NULL, 0, 0); | |
1533 | } | |
1534 | ||
1535 | #ifndef NO_VALLOC | |
1536 | PTR_T | |
1537 | valloc (size) | |
1538 | size_t size; | |
1539 | { | |
1540 | return internal_valloc (size, (char *)NULL, 0, 0); | |
1541 | } | |
1542 | #endif | |
1543 | ||
1544 | #ifndef NO_CALLOC | |
1545 | PTR_T | |
1546 | calloc (n, s) | |
1547 | size_t n, s; | |
1548 | { | |
1549 | return internal_calloc (n, s, (char *)NULL, 0, 0); | |
1550 | } | |
bb70624e JA |
1551 | |
1552 | void | |
f73dda09 JA |
1553 | cfree (mem) |
1554 | PTR_T mem; | |
bb70624e | 1555 | { |
f73dda09 | 1556 | internal_cfree (mem, (char *)NULL, 0, 0); |
726f6388 | 1557 | } |
f73dda09 | 1558 | #endif |