]>
Commit | Line | Data |
---|---|---|
cce855bc | 1 | /* malloc.c - dynamic memory allocation for bash. */ |
726f6388 | 2 | |
95732b49 | 3 | /* Copyright (C) 1985-2005 Free Software Foundation, Inc. |
726f6388 | 4 | |
3185942a JA |
5 | This file is part of GNU Bash, the Bourne-Again SHell. |
6 | ||
7 | Bash is free software: you can redistribute it and/or modify | |
8 | it under the terms of the GNU General Public License as published by | |
9 | the Free Software Foundation, either version 3 of the License, or | |
10 | (at your option) any later version. | |
11 | ||
12 | Bash is distributed in the hope that it will be useful, | |
13 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
15 | GNU General Public License for more details. | |
16 | ||
17 | You should have received a copy of the GNU General Public License | |
18 | along with Bash. If not, see <http://www.gnu.org/licenses/>. | |
19 | */ | |
726f6388 JA |
20 | |
21 | /* | |
22 | * @(#)nmalloc.c 1 (Caltech) 2/21/82 | |
23 | * | |
24 | * U of M Modified: 20 Jun 1983 ACT: strange hacks for Emacs | |
25 | * | |
26 | * Nov 1983, Mike@BRL, Added support for 4.1C/4.2 BSD. | |
27 | * | |
28 | * This is a very fast storage allocator. It allocates blocks of a small | |
29 | * number of different sizes, and keeps free lists of each size. Blocks | |
30 | * that don't exactly fit are passed up to the next larger size. In this | |
31 | * implementation, the available sizes are (2^n)-4 (or -16) bytes long. | |
32 | * This is designed for use in a program that uses vast quantities of | |
33 | * memory, but bombs when it runs out. To make it a little better, it | |
34 | * warns the user when he starts to get near the end. | |
35 | * | |
36 | * June 84, ACT: modified rcheck code to check the range given to malloc, | |
37 | * rather than the range determined by the 2-power used. | |
38 | * | |
39 | * Jan 85, RMS: calls malloc_warning to issue warning on nearly full. | |
40 | * No longer Emacs-specific; can serve as all-purpose malloc for GNU. | |
41 | * You should call malloc_init to reinitialize after loading dumped Emacs. | |
cce855bc | 42 | * Call malloc_stats to get info on memory stats if MALLOC_STATS turned on. |
726f6388 JA |
43 | * realloc knows how to return same block given, just changing its size, |
44 | * if the power of 2 is correct. | |
45 | */ | |
46 | ||
47 | /* | |
48 | * nextf[i] is the pointer to the next free block of size 2^(i+3). The | |
49 | * smallest allocatable block is 8 bytes. The overhead information will | |
50 | * go in the first int of the block, and the returned pointer will point | |
51 | * to the second. | |
726f6388 JA |
52 | */ |
53 | ||
7117c2d2 JA |
54 | /* Define MEMSCRAMBLE to have free() write 0xcf into memory as it's freed, to |
55 | uncover callers that refer to freed memory, and to have malloc() write 0xdf | |
56 | into memory as it's allocated to avoid referring to previous contents. */ | |
57 | ||
58 | /* SCO 3.2v4 getcwd and possibly other libc routines fail with MEMSCRAMBLE; | |
59 | handled by configure. */ | |
ccc6cda3 | 60 | |
cce855bc | 61 | #if defined (HAVE_CONFIG_H) |
ccc6cda3 | 62 | # include <config.h> |
cce855bc JA |
63 | #endif /* HAVE_CONFIG_H */ |
64 | ||
65 | #if defined (SHELL) | |
66 | # include "bashtypes.h" | |
f73dda09 | 67 | # include "stdc.h" |
cce855bc JA |
68 | #else |
69 | # include <sys/types.h> | |
70 | #endif | |
726f6388 | 71 | |
ccc6cda3 JA |
72 | #if defined (HAVE_UNISTD_H) |
73 | # include <unistd.h> | |
74 | #endif | |
726f6388 JA |
75 | |
76 | /* Determine which kind of system this is. */ | |
cce855bc JA |
77 | #include <signal.h> |
78 | ||
79 | #if defined (HAVE_STRING_H) | |
80 | # include <string.h> | |
d166f048 | 81 | #else |
cce855bc | 82 | # include <strings.h> |
d166f048 | 83 | #endif |
cce855bc | 84 | |
f73dda09 | 85 | #include <stdio.h> |
726f6388 | 86 | |
ccc6cda3 JA |
87 | /* Define getpagesize () if the system does not. */ |
88 | #ifndef HAVE_GETPAGESIZE | |
726f6388 JA |
89 | # include "getpagesize.h" |
90 | #endif | |
91 | ||
f73dda09 JA |
92 | #include "imalloc.h" |
93 | #ifdef MALLOC_STATS | |
94 | # include "mstats.h" | |
95 | #endif | |
96 | #ifdef MALLOC_REGISTER | |
97 | # include "table.h" | |
bb70624e | 98 | #endif |
7117c2d2 JA |
99 | #ifdef MALLOC_WATCH |
100 | # include "watch.h" | |
101 | #endif | |
bb70624e | 102 | |
f73dda09 JA |
103 | /* System-specific omissions. */ |
104 | #ifdef HPUX | |
105 | # define NO_VALLOC | |
ccc6cda3 JA |
106 | #endif |
107 | ||
cce855bc | 108 | #define NBUCKETS 30 |
726f6388 JA |
109 | |
110 | #define ISALLOC ((char) 0xf7) /* magic byte that implies allocation */ | |
111 | #define ISFREE ((char) 0x54) /* magic byte that implies free block */ | |
112 | /* this is for error checking only */ | |
113 | #define ISMEMALIGN ((char) 0xd6) /* Stored before the value returned by | |
114 | memalign, with the rest of the word | |
115 | being the distance to the true | |
116 | beginning of the block. */ | |
726f6388 | 117 | |
cce855bc JA |
118 | |
119 | /* We have a flag indicating whether memory is allocated, an index in | |
120 | nextf[], a size field, and a sentinel value to determine whether or | |
121 | not a caller wrote before the start of allocated memory; to realloc() | |
122 | memory we either copy mh_nbytes or just change mh_nbytes if there is | |
123 | enough room in the block for the new size. Range checking is always | |
124 | done. */ | |
125 | union mhead { | |
f73dda09 | 126 | bits64_t mh_align; /* 8 */ |
cce855bc | 127 | struct { |
f73dda09 JA |
128 | char mi_alloc; /* ISALLOC or ISFREE */ /* 1 */ |
129 | char mi_index; /* index in nextf[] */ /* 1 */ | |
cce855bc | 130 | /* Remainder are valid only when block is allocated */ |
f73dda09 JA |
131 | u_bits16_t mi_magic2; /* should be == MAGIC2 */ /* 2 */ |
132 | u_bits32_t mi_nbytes; /* # of bytes allocated */ /* 4 */ | |
cce855bc | 133 | } minfo; |
726f6388 | 134 | }; |
cce855bc JA |
135 | #define mh_alloc minfo.mi_alloc |
136 | #define mh_index minfo.mi_index | |
137 | #define mh_nbytes minfo.mi_nbytes | |
138 | #define mh_magic2 minfo.mi_magic2 | |
726f6388 | 139 | |
7117c2d2 JA |
140 | #define MOVERHEAD sizeof(union mhead) |
141 | #define MALIGN_MASK 7 /* one less than desired alignment */ | |
142 | ||
143 | typedef union _malloc_guard { | |
144 | char s[4]; | |
145 | u_bits32_t i; | |
146 | } mguard_t; | |
147 | ||
726f6388 | 148 | /* Access free-list pointer of a block. |
cce855bc | 149 | It is stored at block + sizeof (char *). |
b72432fd JA |
150 | This is not a field in the minfo structure member of union mhead |
151 | because we want sizeof (union mhead) | |
cce855bc JA |
152 | to describe the overhead for when the block is in use, |
153 | and we do not want the free-list pointer to count in that. */ | |
726f6388 JA |
154 | |
155 | #define CHAIN(a) \ | |
cce855bc | 156 | (*(union mhead **) (sizeof (char *) + (char *) (a))) |
726f6388 | 157 | |
cce855bc JA |
158 | /* To implement range checking, we write magic values in at the beginning |
159 | and end of each allocated block, and make sure they are undisturbed | |
160 | whenever a free or a realloc occurs. */ | |
161 | ||
f73dda09 | 162 | /* Written in the 2 bytes before the block's real space (-4 bytes) */ |
cce855bc | 163 | #define MAGIC2 0x5555 |
7117c2d2 | 164 | #define MSLOP 4 /* 4 bytes extra for u_bits32_t size */ |
cce855bc | 165 | |
f73dda09 JA |
166 | /* How many bytes are actually allocated for a request of size N -- |
167 | rounded up to nearest multiple of 8 after accounting for malloc | |
168 | overhead. */ | |
7117c2d2 JA |
169 | #define ALLOCATED_BYTES(n) \ |
170 | (((n) + MOVERHEAD + MSLOP + MALIGN_MASK) & ~MALIGN_MASK) | |
f73dda09 JA |
171 | |
172 | #define ASSERT(p) \ | |
173 | do \ | |
174 | { \ | |
175 | if (!(p)) xbotch((PTR_T)0, ERR_ASSERT_FAILED, __STRING(p), file, line); \ | |
176 | } \ | |
177 | while (0) | |
178 | ||
cce855bc JA |
179 | /* Minimum and maximum bucket indices for block splitting (and to bound |
180 | the search for a block to split). */ | |
7117c2d2 JA |
181 | #define SPLIT_MIN 2 /* XXX - was 3 */ |
182 | #define SPLIT_MID 11 | |
183 | #define SPLIT_MAX 14 | |
cce855bc JA |
184 | |
185 | /* Minimum and maximum bucket indices for block coalescing. */ | |
7117c2d2 JA |
186 | #define COMBINE_MIN 2 |
187 | #define COMBINE_MAX (pagebucket - 1) /* XXX */ | |
cce855bc | 188 | |
7117c2d2 JA |
189 | #define LESSCORE_MIN 10 |
190 | #define LESSCORE_FRC 13 | |
191 | ||
192 | #define STARTBUCK 1 | |
726f6388 | 193 | |
f73dda09 JA |
194 | /* Flags for the internal functions. */ |
195 | #define MALLOC_WRAPPER 0x01 /* wrapper function */ | |
196 | #define MALLOC_INTERNAL 0x02 /* internal function calling another */ | |
197 | #define MALLOC_NOTRACE 0x04 /* don't trace this allocation or free */ | |
198 | #define MALLOC_NOREG 0x08 /* don't register this allocation or free */ | |
199 | ||
200 | /* Future use. */ | |
201 | #define ERR_DUPFREE 0x01 | |
202 | #define ERR_UNALLOC 0x02 | |
203 | #define ERR_UNDERFLOW 0x04 | |
204 | #define ERR_ASSERT_FAILED 0x08 | |
205 | ||
206 | /* Evaluates to true if NB is appropriate for bucket NU. NB is adjusted | |
7117c2d2 JA |
207 | appropriately by the caller to account for malloc overhead. This only |
208 | checks that the recorded size is not too big for the bucket. We | |
209 | can't check whether or not it's in between NU and NU-1 because we | |
210 | might have encountered a busy bucket when allocating and moved up to | |
211 | the next size. */ | |
212 | #define IN_BUCKET(nb, nu) ((nb) <= binsizes[(nu)]) | |
213 | ||
214 | /* Use this when we want to be sure that NB is in bucket NU. */ | |
215 | #define RIGHT_BUCKET(nb, nu) \ | |
216 | (((nb) > binsizes[(nu)-1]) && ((nb) <= binsizes[(nu)])) | |
f73dda09 | 217 | |
726f6388 JA |
218 | /* nextf[i] is free list of blocks of size 2**(i + 3) */ |
219 | ||
cce855bc | 220 | static union mhead *nextf[NBUCKETS]; |
726f6388 | 221 | |
95732b49 | 222 | /* busy[i] is nonzero while allocation or free of block size i is in progress. */ |
726f6388 | 223 | |
cce855bc | 224 | static char busy[NBUCKETS]; |
726f6388 | 225 | |
cce855bc JA |
226 | static int pagesz; /* system page size. */ |
227 | static int pagebucket; /* bucket for requests a page in size */ | |
bb70624e | 228 | static int maxbuck; /* highest bucket receiving allocation request. */ |
726f6388 | 229 | |
7117c2d2 JA |
230 | static char *memtop; /* top of heap */ |
231 | ||
3185942a | 232 | static const unsigned long binsizes[NBUCKETS] = { |
7117c2d2 JA |
233 | 8UL, 16UL, 32UL, 64UL, 128UL, 256UL, 512UL, 1024UL, 2048UL, 4096UL, |
234 | 8192UL, 16384UL, 32768UL, 65536UL, 131072UL, 262144UL, 524288UL, | |
235 | 1048576UL, 2097152UL, 4194304UL, 8388608UL, 16777216UL, 33554432UL, | |
236 | 67108864UL, 134217728UL, 268435456UL, 536870912UL, 1073741824UL, | |
b80f6443 | 237 | 2147483648UL, 4294967295UL |
7117c2d2 JA |
238 | }; |
239 | ||
240 | /* binsizes[x] == (1 << ((x) + 3)) */ | |
241 | #define binsize(x) binsizes[(x)] | |
242 | ||
f73dda09 JA |
243 | /* Declarations for internal functions */ |
244 | static PTR_T internal_malloc __P((size_t, const char *, int, int)); | |
245 | static PTR_T internal_realloc __P((PTR_T, size_t, const char *, int, int)); | |
246 | static void internal_free __P((PTR_T, const char *, int, int)); | |
95732b49 | 247 | static PTR_T internal_memalign __P((size_t, size_t, const char *, int, int)); |
f73dda09 JA |
248 | #ifndef NO_CALLOC |
249 | static PTR_T internal_calloc __P((size_t, size_t, const char *, int, int)); | |
250 | static void internal_cfree __P((PTR_T, const char *, int, int)); | |
251 | #endif | |
252 | #ifndef NO_VALLOC | |
253 | static PTR_T internal_valloc __P((size_t, const char *, int, int)); | |
254 | #endif | |
255 | ||
256 | #if defined (botch) | |
257 | extern void botch (); | |
258 | #else | |
259 | static void botch __P((const char *, const char *, int)); | |
260 | #endif | |
261 | static void xbotch __P((PTR_T, int, const char *, const char *, int)); | |
262 | ||
f73dda09 JA |
263 | #if !HAVE_DECL_SBRK |
264 | extern char *sbrk (); | |
265 | #endif /* !HAVE_DECL_SBRK */ | |
266 | ||
28ef6c31 JA |
267 | #ifdef SHELL |
268 | extern int interrupt_immediately; | |
f73dda09 | 269 | extern int signal_is_trapped __P((int)); |
28ef6c31 JA |
270 | #endif |
271 | ||
7117c2d2 JA |
272 | #ifdef MALLOC_STATS |
273 | struct _malstats _mstats; | |
274 | #endif /* MALLOC_STATS */ | |
275 | ||
f73dda09 JA |
276 | /* Debugging variables available to applications. */ |
277 | int malloc_flags = 0; /* future use */ | |
278 | int malloc_trace = 0; /* trace allocations and frees to stderr */ | |
279 | int malloc_register = 0; /* future use */ | |
280 | ||
7117c2d2 JA |
281 | #ifdef MALLOC_TRACE |
282 | char _malloc_trace_buckets[NBUCKETS]; | |
283 | ||
284 | /* These should really go into a header file. */ | |
285 | extern void mtrace_alloc __P((const char *, PTR_T, size_t, const char *, int)); | |
286 | extern void mtrace_free __P((PTR_T, int, const char *, int)); | |
287 | #endif | |
288 | ||
f73dda09 JA |
289 | #if !defined (botch) |
290 | static void | |
291 | botch (s, file, line) | |
b80f6443 JA |
292 | const char *s; |
293 | const char *file; | |
294 | int line; | |
f73dda09 | 295 | { |
b80f6443 | 296 | fprintf (stderr, _("malloc: failed assertion: %s\n"), s); |
f73dda09 JA |
297 | (void)fflush (stderr); |
298 | abort (); | |
299 | } | |
300 | #endif | |
301 | ||
302 | /* print the file and line number that caused the assertion failure and | |
303 | call botch() to do whatever the application wants with the information */ | |
304 | static void | |
305 | xbotch (mem, e, s, file, line) | |
306 | PTR_T mem; | |
307 | int e; | |
308 | const char *s; | |
309 | const char *file; | |
310 | int line; | |
311 | { | |
b80f6443 | 312 | fprintf (stderr, _("\r\nmalloc: %s:%d: assertion botched\r\n"), |
3185942a | 313 | file ? file : _("unknown"), line); |
f73dda09 JA |
314 | #ifdef MALLOC_REGISTER |
315 | if (mem != NULL && malloc_register) | |
316 | mregister_describe_mem (mem, stderr); | |
317 | #endif | |
318 | (void)fflush (stderr); | |
319 | botch(s, file, line); | |
320 | } | |
321 | ||
cce855bc | 322 | /* Coalesce two adjacent free blocks off the free list for size NU - 1, |
7117c2d2 | 323 | as long as we can find two adjacent free blocks. nextf[NU -1] is |
95732b49 JA |
324 | assumed to not be busy; the caller (morecore()) checks for this. |
325 | BUSY[NU] must be set to 1. */ | |
cce855bc JA |
326 | static void |
327 | bcoalesce (nu) | |
328 | register int nu; | |
329 | { | |
330 | register union mhead *mp, *mp1, *mp2; | |
7117c2d2 | 331 | register int nbuck; |
cce855bc | 332 | unsigned long siz; |
726f6388 | 333 | |
cce855bc | 334 | nbuck = nu - 1; |
95732b49 | 335 | if (nextf[nbuck] == 0 || busy[nbuck]) |
cce855bc | 336 | return; |
726f6388 | 337 | |
95732b49 | 338 | busy[nbuck] = 1; |
7117c2d2 JA |
339 | siz = binsize (nbuck); |
340 | ||
341 | mp2 = mp1 = nextf[nbuck]; | |
cce855bc | 342 | mp = CHAIN (mp1); |
7117c2d2 | 343 | while (mp && mp != (union mhead *)((char *)mp1 + siz)) |
cce855bc JA |
344 | { |
345 | mp2 = mp1; | |
346 | mp1 = mp; | |
347 | mp = CHAIN (mp); | |
cce855bc | 348 | } |
95732b49 | 349 | |
7117c2d2 | 350 | if (mp == 0) |
95732b49 JA |
351 | { |
352 | busy[nbuck] = 0; | |
353 | return; | |
354 | } | |
7117c2d2 | 355 | |
cce855bc JA |
356 | /* OK, now we have mp1 pointing to the block we want to add to nextf[NU]. |
357 | CHAIN(mp2) must equal mp1. Check that mp1 and mp are adjacent. */ | |
7117c2d2 | 358 | if (mp2 != mp1 && CHAIN(mp2) != mp1) |
95732b49 JA |
359 | { |
360 | busy[nbuck] = 0; | |
361 | xbotch ((PTR_T)0, 0, "bcoalesce: CHAIN(mp2) != mp1", (char *)NULL, 0); | |
362 | } | |
7117c2d2 JA |
363 | |
364 | #ifdef MALLOC_DEBUG | |
cce855bc | 365 | if (CHAIN (mp1) != (union mhead *)((char *)mp1 + siz)) |
95732b49 JA |
366 | { |
367 | busy[nbuck] = 0; | |
368 | return; /* not adjacent */ | |
369 | } | |
cce855bc | 370 | #endif |
726f6388 | 371 | |
cce855bc | 372 | /* Since they are adjacent, remove them from the free list */ |
7117c2d2 JA |
373 | if (mp1 == nextf[nbuck]) |
374 | nextf[nbuck] = CHAIN (mp); | |
375 | else | |
376 | CHAIN (mp2) = CHAIN (mp); | |
95732b49 JA |
377 | busy[nbuck] = 0; |
378 | ||
379 | #ifdef MALLOC_STATS | |
380 | _mstats.tbcoalesce++; | |
381 | _mstats.ncoalesce[nbuck]++; | |
382 | #endif | |
726f6388 | 383 | |
cce855bc JA |
384 | /* And add the combined two blocks to nextf[NU]. */ |
385 | mp1->mh_alloc = ISFREE; | |
386 | mp1->mh_index = nu; | |
387 | CHAIN (mp1) = nextf[nu]; | |
388 | nextf[nu] = mp1; | |
389 | } | |
726f6388 | 390 | |
cce855bc JA |
391 | /* Split a block at index > NU (but less than SPLIT_MAX) into a set of |
392 | blocks of the correct size, and attach them to nextf[NU]. nextf[NU] | |
393 | is assumed to be empty. Must be called with signals blocked (e.g., | |
95732b49 | 394 | by morecore()). BUSY[NU] must be set to 1. */ |
cce855bc JA |
395 | static void |
396 | bsplit (nu) | |
397 | register int nu; | |
726f6388 | 398 | { |
cce855bc | 399 | register union mhead *mp; |
bb70624e | 400 | int nbuck, nblks, split_max; |
cce855bc | 401 | unsigned long siz; |
726f6388 | 402 | |
bb70624e JA |
403 | split_max = (maxbuck > SPLIT_MAX) ? maxbuck : SPLIT_MAX; |
404 | ||
cce855bc JA |
405 | if (nu >= SPLIT_MID) |
406 | { | |
bb70624e | 407 | for (nbuck = split_max; nbuck > nu; nbuck--) |
cce855bc JA |
408 | { |
409 | if (busy[nbuck] || nextf[nbuck] == 0) | |
410 | continue; | |
411 | break; | |
412 | } | |
413 | } | |
414 | else | |
415 | { | |
bb70624e | 416 | for (nbuck = nu + 1; nbuck <= split_max; nbuck++) |
cce855bc JA |
417 | { |
418 | if (busy[nbuck] || nextf[nbuck] == 0) | |
419 | continue; | |
420 | break; | |
421 | } | |
422 | } | |
726f6388 | 423 | |
bb70624e | 424 | if (nbuck > split_max || nbuck <= nu) |
cce855bc JA |
425 | return; |
426 | ||
427 | /* XXX might want to split only if nextf[nbuck] has >= 2 blocks free | |
428 | and nbuck is below some threshold. */ | |
429 | ||
95732b49 JA |
430 | /* Remove the block from the chain of larger blocks. */ |
431 | busy[nbuck] = 1; | |
432 | mp = nextf[nbuck]; | |
433 | nextf[nbuck] = CHAIN (mp); | |
434 | busy[nbuck] = 0; | |
435 | ||
cce855bc | 436 | #ifdef MALLOC_STATS |
bb70624e JA |
437 | _mstats.tbsplit++; |
438 | _mstats.nsplit[nbuck]++; | |
cce855bc JA |
439 | #endif |
440 | ||
441 | /* Figure out how many blocks we'll get. */ | |
7117c2d2 JA |
442 | siz = binsize (nu); |
443 | nblks = binsize (nbuck) / siz; | |
726f6388 | 444 | |
cce855bc JA |
445 | /* Split the block and put it on the requested chain. */ |
446 | nextf[nu] = mp; | |
447 | while (1) | |
448 | { | |
449 | mp->mh_alloc = ISFREE; | |
450 | mp->mh_index = nu; | |
451 | if (--nblks <= 0) break; | |
452 | CHAIN (mp) = (union mhead *)((char *)mp + siz); | |
453 | mp = (union mhead *)((char *)mp + siz); | |
454 | } | |
455 | CHAIN (mp) = 0; | |
726f6388 | 456 | } |
ccc6cda3 | 457 | |
95732b49 JA |
458 | /* Take the memory block MP and add it to a chain < NU. NU is the right bucket, |
459 | but is busy. This avoids memory orphaning. */ | |
460 | static void | |
461 | xsplit (mp, nu) | |
462 | union mhead *mp; | |
463 | int nu; | |
464 | { | |
465 | union mhead *nh; | |
466 | int nbuck, nblks, split_max; | |
467 | unsigned long siz; | |
468 | ||
469 | nbuck = nu - 1; | |
470 | while (nbuck >= SPLIT_MIN && busy[nbuck]) | |
471 | nbuck--; | |
472 | if (nbuck < SPLIT_MIN) | |
473 | return; | |
474 | ||
475 | #ifdef MALLOC_STATS | |
476 | _mstats.tbsplit++; | |
477 | _mstats.nsplit[nu]++; | |
478 | #endif | |
479 | ||
480 | /* Figure out how many blocks we'll get. */ | |
481 | siz = binsize (nu); /* original block size */ | |
482 | nblks = siz / binsize (nbuck); /* should be 2 most of the time */ | |
483 | ||
484 | /* And add it to nextf[nbuck] */ | |
485 | siz = binsize (nbuck); /* XXX - resetting here */ | |
486 | nh = mp; | |
487 | while (1) | |
488 | { | |
489 | mp->mh_alloc = ISFREE; | |
490 | mp->mh_index = nbuck; | |
491 | if (--nblks <= 0) break; | |
492 | CHAIN (mp) = (union mhead *)((char *)mp + siz); | |
493 | mp = (union mhead *)((char *)mp + siz); | |
494 | } | |
495 | busy[nbuck] = 1; | |
496 | CHAIN (mp) = nextf[nbuck]; | |
497 | nextf[nbuck] = nh; | |
498 | busy[nbuck] = 0; | |
499 | } | |
500 | ||
28ef6c31 JA |
501 | static void |
502 | block_signals (setp, osetp) | |
503 | sigset_t *setp, *osetp; | |
504 | { | |
505 | #ifdef HAVE_POSIX_SIGNALS | |
506 | sigfillset (setp); | |
507 | sigemptyset (osetp); | |
508 | sigprocmask (SIG_BLOCK, setp, osetp); | |
509 | #else | |
510 | # if defined (HAVE_BSD_SIGNALS) | |
511 | *osetp = sigsetmask (-1); | |
512 | # endif | |
513 | #endif | |
514 | } | |
515 | ||
516 | static void | |
517 | unblock_signals (setp, osetp) | |
518 | sigset_t *setp, *osetp; | |
519 | { | |
520 | #ifdef HAVE_POSIX_SIGNALS | |
521 | sigprocmask (SIG_SETMASK, osetp, (sigset_t *)NULL); | |
522 | #else | |
523 | # if defined (HAVE_BSD_SIGNALS) | |
524 | sigsetmask (*osetp); | |
525 | # endif | |
526 | #endif | |
527 | } | |
7117c2d2 JA |
528 | |
529 | /* Return some memory to the system by reducing the break. This is only | |
530 | called with NU > pagebucket, so we're always assured of giving back | |
531 | more than one page of memory. */ | |
532 | static void | |
533 | lesscore (nu) /* give system back some memory */ | |
534 | register int nu; /* size index we're discarding */ | |
535 | { | |
536 | long siz; | |
537 | ||
538 | siz = binsize (nu); | |
539 | /* Should check for errors here, I guess. */ | |
540 | sbrk (-siz); | |
541 | memtop -= siz; | |
542 | ||
543 | #ifdef MALLOC_STATS | |
544 | _mstats.nsbrk++; | |
545 | _mstats.tsbrk -= siz; | |
546 | _mstats.nlesscore[nu]++; | |
547 | #endif | |
548 | } | |
95732b49 JA |
549 | |
550 | /* Ask system for more memory; add to NEXTF[NU]. BUSY[NU] must be set to 1. */ | |
726f6388 | 551 | static void |
95732b49 | 552 | morecore (nu) |
726f6388 JA |
553 | register int nu; /* size index to get more of */ |
554 | { | |
cce855bc | 555 | register union mhead *mp; |
726f6388 | 556 | register int nblks; |
cce855bc JA |
557 | register long siz; |
558 | long sbrk_amt; /* amount to get via sbrk() */ | |
28ef6c31 JA |
559 | sigset_t set, oset; |
560 | int blocked_sigs; | |
726f6388 | 561 | |
ccc6cda3 | 562 | /* Block all signals in case we are executed from a signal handler. */ |
28ef6c31 JA |
563 | blocked_sigs = 0; |
564 | #ifdef SHELL | |
565 | if (interrupt_immediately || signal_is_trapped (SIGINT) || signal_is_trapped (SIGCHLD)) | |
566 | #endif | |
567 | { | |
568 | block_signals (&set, &oset); | |
569 | blocked_sigs = 1; | |
570 | } | |
726f6388 | 571 | |
7117c2d2 | 572 | siz = binsize (nu); /* size of desired block for nextf[nu] */ |
cce855bc JA |
573 | |
574 | if (siz < 0) | |
bb70624e | 575 | goto morecore_done; /* oops */ |
cce855bc JA |
576 | |
577 | #ifdef MALLOC_STATS | |
578 | _mstats.nmorecore[nu]++; | |
579 | #endif | |
580 | ||
581 | /* Try to split a larger block here, if we're within the range of sizes | |
582 | to split. */ | |
bb70624e | 583 | if (nu >= SPLIT_MIN) |
cce855bc JA |
584 | { |
585 | bsplit (nu); | |
586 | if (nextf[nu] != 0) | |
587 | goto morecore_done; | |
588 | } | |
589 | ||
cce855bc | 590 | /* Try to coalesce two adjacent blocks from the free list on nextf[nu - 1], |
95732b49 | 591 | if we can, and we're within the range of the block coalescing limits. */ |
cce855bc JA |
592 | if (nu >= COMBINE_MIN && nu < COMBINE_MAX && busy[nu - 1] == 0 && nextf[nu - 1]) |
593 | { | |
594 | bcoalesce (nu); | |
595 | if (nextf[nu] != 0) | |
28ef6c31 | 596 | goto morecore_done; |
cce855bc | 597 | } |
cce855bc JA |
598 | |
599 | /* Take at least a page, and figure out how many blocks of the requested | |
600 | size we're getting. */ | |
601 | if (siz <= pagesz) | |
726f6388 | 602 | { |
cce855bc JA |
603 | sbrk_amt = pagesz; |
604 | nblks = sbrk_amt / siz; | |
726f6388 | 605 | } |
cce855bc JA |
606 | else |
607 | { | |
608 | /* We always want to request an integral multiple of the page size | |
609 | from the kernel, so let's compute whether or not `siz' is such | |
610 | an amount. If it is, we can just request it. If not, we want | |
611 | the smallest integral multiple of pagesize that is larger than | |
612 | `siz' and will satisfy the request. */ | |
7117c2d2 | 613 | sbrk_amt = siz & (pagesz - 1); |
cce855bc JA |
614 | if (sbrk_amt == 0) |
615 | sbrk_amt = siz; | |
616 | else | |
617 | sbrk_amt = siz + pagesz - sbrk_amt; | |
618 | nblks = 1; | |
619 | } | |
620 | ||
621 | #ifdef MALLOC_STATS | |
622 | _mstats.nsbrk++; | |
623 | _mstats.tsbrk += sbrk_amt; | |
624 | #endif | |
625 | ||
626 | mp = (union mhead *) sbrk (sbrk_amt); | |
726f6388 | 627 | |
cce855bc JA |
628 | /* Totally out of memory. */ |
629 | if ((long)mp == -1) | |
bb70624e | 630 | goto morecore_done; |
cce855bc | 631 | |
7117c2d2 JA |
632 | memtop += sbrk_amt; |
633 | ||
cce855bc | 634 | /* shouldn't happen, but just in case -- require 8-byte alignment */ |
7117c2d2 | 635 | if ((long)mp & MALIGN_MASK) |
cce855bc | 636 | { |
7117c2d2 | 637 | mp = (union mhead *) (((long)mp + MALIGN_MASK) & ~MALIGN_MASK); |
726f6388 JA |
638 | nblks--; |
639 | } | |
640 | ||
cce855bc JA |
641 | /* save new header and link the nblks blocks together */ |
642 | nextf[nu] = mp; | |
726f6388 JA |
643 | while (1) |
644 | { | |
cce855bc JA |
645 | mp->mh_alloc = ISFREE; |
646 | mp->mh_index = nu; | |
726f6388 | 647 | if (--nblks <= 0) break; |
cce855bc JA |
648 | CHAIN (mp) = (union mhead *)((char *)mp + siz); |
649 | mp = (union mhead *)((char *)mp + siz); | |
726f6388 | 650 | } |
cce855bc | 651 | CHAIN (mp) = 0; |
726f6388 | 652 | |
cce855bc | 653 | morecore_done: |
28ef6c31 JA |
654 | if (blocked_sigs) |
655 | unblock_signals (&set, &oset); | |
726f6388 JA |
656 | } |
657 | ||
cce855bc JA |
658 | static void |
659 | malloc_debug_dummy () | |
660 | { | |
bb70624e | 661 | write (1, "malloc_debug_dummy\n", 19); |
cce855bc JA |
662 | } |
663 | ||
7117c2d2 JA |
664 | #define PREPOP_BIN 2 |
665 | #define PREPOP_SIZE 32 | |
666 | ||
667 | static int | |
668 | pagealign () | |
669 | { | |
670 | register int nunits; | |
671 | register union mhead *mp; | |
672 | long sbrk_needed; | |
673 | char *curbrk; | |
674 | ||
675 | pagesz = getpagesize (); | |
676 | if (pagesz < 1024) | |
677 | pagesz = 1024; | |
678 | ||
679 | /* OK, how much do we need to allocate to make things page-aligned? | |
680 | Some of this partial page will be wasted space, but we'll use as | |
681 | much as we can. Once we figure out how much to advance the break | |
682 | pointer, go ahead and do it. */ | |
683 | memtop = curbrk = sbrk (0); | |
684 | sbrk_needed = pagesz - ((long)curbrk & (pagesz - 1)); /* sbrk(0) % pagesz */ | |
685 | if (sbrk_needed < 0) | |
686 | sbrk_needed += pagesz; | |
687 | ||
688 | /* Now allocate the wasted space. */ | |
689 | if (sbrk_needed) | |
690 | { | |
691 | #ifdef MALLOC_STATS | |
692 | _mstats.nsbrk++; | |
693 | _mstats.tsbrk += sbrk_needed; | |
694 | #endif | |
695 | curbrk = sbrk (sbrk_needed); | |
696 | if ((long)curbrk == -1) | |
697 | return -1; | |
698 | memtop += sbrk_needed; | |
699 | ||
700 | /* Take the memory which would otherwise be wasted and populate the most | |
701 | popular bin (2 == 32 bytes) with it. Add whatever we need to curbrk | |
702 | to make things 32-byte aligned, compute how many 32-byte chunks we're | |
703 | going to get, and set up the bin. */ | |
704 | curbrk += sbrk_needed & (PREPOP_SIZE - 1); | |
705 | sbrk_needed -= sbrk_needed & (PREPOP_SIZE - 1); | |
706 | nunits = sbrk_needed / PREPOP_SIZE; | |
707 | ||
708 | if (nunits > 0) | |
709 | { | |
710 | mp = (union mhead *)curbrk; | |
711 | ||
712 | nextf[PREPOP_BIN] = mp; | |
713 | while (1) | |
714 | { | |
715 | mp->mh_alloc = ISFREE; | |
716 | mp->mh_index = PREPOP_BIN; | |
717 | if (--nunits <= 0) break; | |
718 | CHAIN(mp) = (union mhead *)((char *)mp + PREPOP_SIZE); | |
719 | mp = (union mhead *)((char *)mp + PREPOP_SIZE); | |
720 | } | |
721 | CHAIN(mp) = 0; | |
722 | } | |
723 | } | |
724 | ||
725 | /* compute which bin corresponds to the page size. */ | |
726 | for (nunits = 7; nunits < NBUCKETS; nunits++) | |
727 | if (pagesz <= binsize(nunits)) | |
728 | break; | |
729 | pagebucket = nunits; | |
730 | ||
731 | return 0; | |
732 | } | |
733 | ||
f73dda09 JA |
734 | static PTR_T |
735 | internal_malloc (n, file, line, flags) /* get a block */ | |
cce855bc | 736 | size_t n; |
f73dda09 JA |
737 | const char *file; |
738 | int line, flags; | |
726f6388 | 739 | { |
cce855bc | 740 | register union mhead *p; |
cce855bc | 741 | register int nunits; |
7117c2d2 JA |
742 | register char *m, *z; |
743 | long nbytes; | |
744 | mguard_t mg; | |
726f6388 | 745 | |
7117c2d2 | 746 | /* Get the system page size and align break pointer so future sbrks will |
cce855bc JA |
747 | be page-aligned. The page size must be at least 1K -- anything |
748 | smaller is increased. */ | |
749 | if (pagesz == 0) | |
7117c2d2 JA |
750 | if (pagealign () < 0) |
751 | return ((PTR_T)NULL); | |
cce855bc | 752 | |
726f6388 | 753 | /* Figure out how many bytes are required, rounding up to the nearest |
f73dda09 | 754 | multiple of 8, then figure out which nextf[] area to use. Try to |
cce855bc JA |
755 | be smart about where to start searching -- if the number of bytes |
756 | needed is greater than the page size, we can start at pagebucket. */ | |
f73dda09 | 757 | nbytes = ALLOCATED_BYTES(n); |
7117c2d2 JA |
758 | nunits = (nbytes <= (pagesz >> 1)) ? STARTBUCK : pagebucket; |
759 | for ( ; nunits < NBUCKETS; nunits++) | |
760 | if (nbytes <= binsize(nunits)) | |
761 | break; | |
726f6388 | 762 | |
f73dda09 JA |
763 | /* Silently reject too-large requests. */ |
764 | if (nunits >= NBUCKETS) | |
765 | return ((PTR_T) NULL); | |
766 | ||
726f6388 JA |
767 | /* In case this is reentrant use of malloc from signal handler, |
768 | pick a block size that no other malloc level is currently | |
769 | trying to allocate. That's the easiest harmless way not to | |
770 | interfere with the other level of execution. */ | |
cce855bc JA |
771 | #ifdef MALLOC_STATS |
772 | if (busy[nunits]) _mstats.nrecurse++; | |
773 | #endif | |
726f6388 JA |
774 | while (busy[nunits]) nunits++; |
775 | busy[nunits] = 1; | |
776 | ||
bb70624e JA |
777 | if (nunits > maxbuck) |
778 | maxbuck = nunits; | |
779 | ||
726f6388 | 780 | /* If there are no blocks of the appropriate size, go get some */ |
726f6388 JA |
781 | if (nextf[nunits] == 0) |
782 | morecore (nunits); | |
783 | ||
784 | /* Get one block off the list, and set the new list head */ | |
cce855bc | 785 | if ((p = nextf[nunits]) == NULL) |
726f6388 JA |
786 | { |
787 | busy[nunits] = 0; | |
cce855bc | 788 | return NULL; |
726f6388 JA |
789 | } |
790 | nextf[nunits] = CHAIN (p); | |
791 | busy[nunits] = 0; | |
792 | ||
793 | /* Check for free block clobbered */ | |
cce855bc JA |
794 | /* If not for this check, we would gobble a clobbered free chain ptr |
795 | and bomb out on the NEXT allocate of this size block */ | |
796 | if (p->mh_alloc != ISFREE || p->mh_index != nunits) | |
b80f6443 | 797 | xbotch ((PTR_T)(p+1), 0, _("malloc: block on free list clobbered"), file, line); |
726f6388 | 798 | |
f73dda09 | 799 | /* Fill in the info, and set up the magic numbers for range checking. */ |
cce855bc | 800 | p->mh_alloc = ISALLOC; |
cce855bc | 801 | p->mh_magic2 = MAGIC2; |
f73dda09 | 802 | p->mh_nbytes = n; |
726f6388 | 803 | |
7117c2d2 JA |
804 | /* End guard */ |
805 | mg.i = n; | |
806 | z = mg.s; | |
807 | m = (char *) (p + 1) + n; | |
808 | *m++ = *z++, *m++ = *z++, *m++ = *z++, *m++ = *z++; | |
cce855bc | 809 | |
ccc6cda3 | 810 | #ifdef MEMSCRAMBLE |
7117c2d2 JA |
811 | if (n) |
812 | MALLOC_MEMSET ((char *)(p + 1), 0xdf, n); /* scramble previous contents */ | |
ccc6cda3 | 813 | #endif |
cce855bc JA |
814 | #ifdef MALLOC_STATS |
815 | _mstats.nmalloc[nunits]++; | |
816 | _mstats.tmalloc[nunits]++; | |
817 | _mstats.nmal++; | |
7117c2d2 | 818 | _mstats.bytesreq += n; |
cce855bc | 819 | #endif /* MALLOC_STATS */ |
f73dda09 JA |
820 | |
821 | #ifdef MALLOC_TRACE | |
822 | if (malloc_trace && (flags & MALLOC_NOTRACE) == 0) | |
823 | mtrace_alloc ("malloc", p + 1, n, file, line); | |
7117c2d2 JA |
824 | else if (_malloc_trace_buckets[nunits]) |
825 | mtrace_alloc ("malloc", p + 1, n, file, line); | |
f73dda09 JA |
826 | #endif |
827 | ||
828 | #ifdef MALLOC_REGISTER | |
829 | if (malloc_register && (flags & MALLOC_NOREG) == 0) | |
830 | mregister_alloc ("malloc", p + 1, n, file, line); | |
831 | #endif | |
832 | ||
7117c2d2 JA |
833 | #ifdef MALLOC_WATCH |
834 | if (_malloc_nwatch > 0) | |
835 | _malloc_ckwatch (p + 1, file, line, W_ALLOC, n); | |
836 | #endif | |
837 | ||
838 | return (PTR_T) (p + 1); | |
726f6388 JA |
839 | } |
840 | ||
f73dda09 JA |
841 | static void |
842 | internal_free (mem, file, line, flags) | |
bb70624e | 843 | PTR_T mem; |
f73dda09 JA |
844 | const char *file; |
845 | int line, flags; | |
726f6388 | 846 | { |
cce855bc | 847 | register union mhead *p; |
7117c2d2 | 848 | register char *ap, *z; |
cce855bc | 849 | register int nunits; |
f73dda09 JA |
850 | register unsigned int nbytes; |
851 | int ubytes; /* caller-requested size */ | |
7117c2d2 | 852 | mguard_t mg; |
cce855bc | 853 | |
bb70624e | 854 | if ((ap = (char *)mem) == 0) |
cce855bc | 855 | return; |
726f6388 | 856 | |
cce855bc | 857 | p = (union mhead *) ap - 1; |
726f6388 | 858 | |
cce855bc JA |
859 | if (p->mh_alloc == ISMEMALIGN) |
860 | { | |
861 | ap -= p->mh_nbytes; | |
862 | p = (union mhead *) ap - 1; | |
863 | } | |
864 | ||
f73dda09 JA |
865 | #if defined (MALLOC_TRACE) || defined (MALLOC_REGISTER) |
866 | if (malloc_trace || malloc_register) | |
867 | ubytes = p->mh_nbytes; | |
868 | #endif | |
869 | ||
cce855bc JA |
870 | if (p->mh_alloc != ISALLOC) |
871 | { | |
872 | if (p->mh_alloc == ISFREE) | |
f73dda09 | 873 | xbotch (mem, ERR_DUPFREE, |
b80f6443 | 874 | _("free: called with already freed block argument"), file, line); |
cce855bc | 875 | else |
f73dda09 | 876 | xbotch (mem, ERR_UNALLOC, |
b80f6443 | 877 | _("free: called with unallocated block argument"), file, line); |
cce855bc JA |
878 | } |
879 | ||
880 | ASSERT (p->mh_magic2 == MAGIC2); | |
f73dda09 JA |
881 | |
882 | nunits = p->mh_index; | |
883 | nbytes = ALLOCATED_BYTES(p->mh_nbytes); | |
884 | /* Since the sizeof(u_bits32_t) bytes before the memory handed to the user | |
885 | are now used for the number of bytes allocated, a simple check of | |
886 | mh_magic2 is no longer sufficient to catch things like p[-1] = 'x'. | |
887 | We sanity-check the value of mh_nbytes against the size of the blocks | |
888 | in the appropriate bucket before we use it. This can still cause problems | |
889 | and obscure errors if mh_nbytes is wrong but still within range; the | |
7117c2d2 JA |
890 | checks against the size recorded at the end of the chunk will probably |
891 | fail then. Using MALLOC_REGISTER will help here, since it saves the | |
892 | original number of bytes requested. */ | |
893 | ||
f73dda09 JA |
894 | if (IN_BUCKET(nbytes, nunits) == 0) |
895 | xbotch (mem, ERR_UNDERFLOW, | |
b80f6443 | 896 | _("free: underflow detected; mh_nbytes out of range"), file, line); |
f73dda09 | 897 | |
cce855bc | 898 | ap += p->mh_nbytes; |
7117c2d2 JA |
899 | z = mg.s; |
900 | *z++ = *ap++, *z++ = *ap++, *z++ = *ap++, *z++ = *ap++; | |
901 | if (mg.i != p->mh_nbytes) | |
b80f6443 | 902 | xbotch (mem, ERR_ASSERT_FAILED, _("free: start and end chunk sizes differ"), file, line); |
7117c2d2 JA |
903 | |
904 | #if 1 | |
905 | if (nunits >= LESSCORE_MIN && ((char *)p + binsize(nunits) == memtop)) | |
906 | #else | |
907 | if (((char *)p + binsize(nunits) == memtop) && nunits >= LESSCORE_MIN) | |
908 | #endif | |
909 | { | |
910 | /* If above LESSCORE_FRC, give back unconditionally. This should be set | |
911 | high enough to be infrequently encountered. If between LESSCORE_MIN | |
95732b49 JA |
912 | and LESSCORE_FRC, call lesscore if the bucket is marked as busy or if |
913 | there's already a block on the free list. */ | |
7117c2d2 JA |
914 | if ((nunits >= LESSCORE_FRC) || busy[nunits] || nextf[nunits] != 0) |
915 | { | |
916 | lesscore (nunits); | |
917 | /* keeps the tracing and registering code in one place */ | |
918 | goto free_return; | |
919 | } | |
920 | } | |
726f6388 | 921 | |
ccc6cda3 | 922 | #ifdef MEMSCRAMBLE |
7117c2d2 JA |
923 | if (p->mh_nbytes) |
924 | MALLOC_MEMSET (mem, 0xcf, p->mh_nbytes); | |
ccc6cda3 | 925 | #endif |
cce855bc | 926 | |
cce855bc | 927 | ASSERT (nunits < NBUCKETS); |
cce855bc | 928 | |
bb70624e | 929 | if (busy[nunits] == 1) |
95732b49 JA |
930 | { |
931 | xsplit (p, nunits); /* split block and add to different chain */ | |
932 | goto free_return; | |
933 | } | |
bb70624e | 934 | |
95732b49 | 935 | p->mh_alloc = ISFREE; |
cce855bc JA |
936 | /* Protect against signal handlers calling malloc. */ |
937 | busy[nunits] = 1; | |
938 | /* Put this block on the free list. */ | |
939 | CHAIN (p) = nextf[nunits]; | |
940 | nextf[nunits] = p; | |
941 | busy[nunits] = 0; | |
942 | ||
7117c2d2 | 943 | free_return: |
b80f6443 | 944 | ; /* Empty statement in case this is the end of the function */ |
7117c2d2 | 945 | |
cce855bc JA |
946 | #ifdef MALLOC_STATS |
947 | _mstats.nmalloc[nunits]--; | |
948 | _mstats.nfre++; | |
949 | #endif /* MALLOC_STATS */ | |
f73dda09 JA |
950 | |
951 | #ifdef MALLOC_TRACE | |
952 | if (malloc_trace && (flags & MALLOC_NOTRACE) == 0) | |
953 | mtrace_free (mem, ubytes, file, line); | |
7117c2d2 JA |
954 | else if (_malloc_trace_buckets[nunits]) |
955 | mtrace_free (mem, ubytes, file, line); | |
f73dda09 JA |
956 | #endif |
957 | ||
958 | #ifdef MALLOC_REGISTER | |
959 | if (malloc_register && (flags & MALLOC_NOREG) == 0) | |
960 | mregister_free (mem, ubytes, file, line); | |
961 | #endif | |
7117c2d2 JA |
962 | |
963 | #ifdef MALLOC_WATCH | |
964 | if (_malloc_nwatch > 0) | |
965 | _malloc_ckwatch (mem, file, line, W_FREE, ubytes); | |
966 | #endif | |
726f6388 JA |
967 | } |
968 | ||
f73dda09 JA |
969 | static PTR_T |
970 | internal_realloc (mem, n, file, line, flags) | |
bb70624e | 971 | PTR_T mem; |
cce855bc | 972 | register size_t n; |
f73dda09 JA |
973 | const char *file; |
974 | int line, flags; | |
726f6388 | 975 | { |
cce855bc | 976 | register union mhead *p; |
bb70624e | 977 | register u_bits32_t tocopy; |
726f6388 JA |
978 | register unsigned int nbytes; |
979 | register int nunits; | |
7117c2d2 JA |
980 | register char *m, *z; |
981 | mguard_t mg; | |
cce855bc JA |
982 | |
983 | #ifdef MALLOC_STATS | |
984 | _mstats.nrealloc++; | |
985 | #endif | |
726f6388 | 986 | |
cce855bc JA |
987 | if (n == 0) |
988 | { | |
f73dda09 | 989 | internal_free (mem, file, line, MALLOC_INTERNAL); |
cce855bc JA |
990 | return (NULL); |
991 | } | |
992 | if ((p = (union mhead *) mem) == 0) | |
f73dda09 JA |
993 | return internal_malloc (n, file, line, MALLOC_INTERNAL); |
994 | ||
726f6388 | 995 | p--; |
cce855bc | 996 | nunits = p->mh_index; |
f73dda09 JA |
997 | ASSERT (nunits < NBUCKETS); |
998 | ||
999 | if (p->mh_alloc != ISALLOC) | |
1000 | xbotch (mem, ERR_UNALLOC, | |
b80f6443 | 1001 | _("realloc: called with unallocated block argument"), file, line); |
f73dda09 | 1002 | |
cce855bc | 1003 | ASSERT (p->mh_magic2 == MAGIC2); |
f73dda09 JA |
1004 | nbytes = ALLOCATED_BYTES(p->mh_nbytes); |
1005 | /* Since the sizeof(u_bits32_t) bytes before the memory handed to the user | |
1006 | are now used for the number of bytes allocated, a simple check of | |
1007 | mh_magic2 is no longer sufficient to catch things like p[-1] = 'x'. | |
1008 | We sanity-check the value of mh_nbytes against the size of the blocks | |
1009 | in the appropriate bucket before we use it. This can still cause problems | |
1010 | and obscure errors if mh_nbytes is wrong but still within range; the | |
7117c2d2 JA |
1011 | checks against the size recorded at the end of the chunk will probably |
1012 | fail then. Using MALLOC_REGISTER will help here, since it saves the | |
1013 | original number of bytes requested. */ | |
f73dda09 JA |
1014 | if (IN_BUCKET(nbytes, nunits) == 0) |
1015 | xbotch (mem, ERR_UNDERFLOW, | |
b80f6443 | 1016 | _("realloc: underflow detected; mh_nbytes out of range"), file, line); |
cce855bc | 1017 | |
bb70624e | 1018 | m = (char *)mem + (tocopy = p->mh_nbytes); |
7117c2d2 JA |
1019 | z = mg.s; |
1020 | *z++ = *m++, *z++ = *m++, *z++ = *m++, *z++ = *m++; | |
1021 | if (mg.i != p->mh_nbytes) | |
b80f6443 | 1022 | xbotch (mem, ERR_ASSERT_FAILED, _("realloc: start and end chunk sizes differ"), file, line); |
7117c2d2 JA |
1023 | |
1024 | #ifdef MALLOC_WATCH | |
1025 | if (_malloc_nwatch > 0) | |
1026 | _malloc_ckwatch (p + 1, file, line, W_REALLOC, n); | |
1027 | #endif | |
1028 | #ifdef MALLOC_STATS | |
1029 | _mstats.bytesreq += (n < tocopy) ? 0 : n - tocopy; | |
1030 | #endif | |
726f6388 JA |
1031 | |
1032 | /* See if desired size rounds to same power of 2 as actual size. */ | |
f73dda09 | 1033 | nbytes = ALLOCATED_BYTES(n); |
726f6388 JA |
1034 | |
1035 | /* If ok, use the same block, just marking its size as changed. */ | |
7117c2d2 | 1036 | if (RIGHT_BUCKET(nbytes, nunits)) |
726f6388 | 1037 | { |
7117c2d2 JA |
1038 | #if 0 |
1039 | m = (char *)mem + p->mh_nbytes; | |
1040 | #else | |
1041 | /* Compensate for increment above. */ | |
1042 | m -= 4; | |
1043 | #endif | |
726f6388 | 1044 | *m++ = 0; *m++ = 0; *m++ = 0; *m++ = 0; |
7117c2d2 JA |
1045 | m = (char *)mem + (p->mh_nbytes = n); |
1046 | ||
1047 | mg.i = n; | |
1048 | z = mg.s; | |
1049 | *m++ = *z++, *m++ = *z++, *m++ = *z++, *m++ = *z++; | |
1050 | ||
726f6388 JA |
1051 | return mem; |
1052 | } | |
1053 | ||
7117c2d2 JA |
1054 | if (n < tocopy) |
1055 | tocopy = n; | |
1056 | ||
cce855bc JA |
1057 | #ifdef MALLOC_STATS |
1058 | _mstats.nrcopy++; | |
1059 | #endif | |
1060 | ||
f73dda09 | 1061 | if ((m = internal_malloc (n, file, line, MALLOC_INTERNAL|MALLOC_NOTRACE|MALLOC_NOREG)) == 0) |
cce855bc JA |
1062 | return 0; |
1063 | FASTCOPY (mem, m, tocopy); | |
f73dda09 JA |
1064 | internal_free (mem, file, line, MALLOC_INTERNAL); |
1065 | ||
1066 | #ifdef MALLOC_TRACE | |
1067 | if (malloc_trace && (flags & MALLOC_NOTRACE) == 0) | |
1068 | mtrace_alloc ("realloc", m, n, file, line); | |
7117c2d2 JA |
1069 | else if (_malloc_trace_buckets[nunits]) |
1070 | mtrace_alloc ("realloc", m, n, file, line); | |
f73dda09 JA |
1071 | #endif |
1072 | ||
1073 | #ifdef MALLOC_REGISTER | |
1074 | if (malloc_register && (flags & MALLOC_NOREG) == 0) | |
1075 | mregister_alloc ("realloc", m, n, file, line); | |
1076 | #endif | |
1077 | ||
7117c2d2 JA |
1078 | #ifdef MALLOC_WATCH |
1079 | if (_malloc_nwatch > 0) | |
1080 | _malloc_ckwatch (m, file, line, W_RESIZED, n); | |
1081 | #endif | |
1082 | ||
cce855bc | 1083 | return m; |
726f6388 JA |
1084 | } |
1085 | ||
f73dda09 JA |
1086 | static PTR_T |
1087 | internal_memalign (alignment, size, file, line, flags) | |
95732b49 | 1088 | size_t alignment; |
cce855bc | 1089 | size_t size; |
f73dda09 JA |
1090 | const char *file; |
1091 | int line, flags; | |
726f6388 | 1092 | { |
ccc6cda3 | 1093 | register char *ptr; |
726f6388 | 1094 | register char *aligned; |
cce855bc | 1095 | register union mhead *p; |
726f6388 | 1096 | |
f73dda09 | 1097 | ptr = internal_malloc (size + alignment, file, line, MALLOC_INTERNAL); |
ccc6cda3 | 1098 | |
726f6388 JA |
1099 | if (ptr == 0) |
1100 | return 0; | |
1101 | /* If entire block has the desired alignment, just accept it. */ | |
f73dda09 | 1102 | if (((long) ptr & (alignment - 1)) == 0) |
726f6388 JA |
1103 | return ptr; |
1104 | /* Otherwise, get address of byte in the block that has that alignment. */ | |
f73dda09 JA |
1105 | #if 0 |
1106 | aligned = (char *) (((long) ptr + alignment - 1) & -alignment); | |
1107 | #else | |
1108 | aligned = (char *) (((long) ptr + alignment - 1) & (~alignment + 1)); | |
1109 | #endif | |
726f6388 JA |
1110 | |
1111 | /* Store a suitable indication of how to free the block, | |
1112 | so that free can find the true beginning of it. */ | |
cce855bc JA |
1113 | p = (union mhead *) aligned - 1; |
1114 | p->mh_nbytes = aligned - ptr; | |
1115 | p->mh_alloc = ISMEMALIGN; | |
f73dda09 | 1116 | |
726f6388 JA |
1117 | return aligned; |
1118 | } | |
1119 | ||
f73dda09 | 1120 | #if !defined (NO_VALLOC) |
726f6388 JA |
1121 | /* This runs into trouble with getpagesize on HPUX, and Multimax machines. |
1122 | Patching out seems cleaner than the ugly fix needed. */ | |
f73dda09 JA |
1123 | static PTR_T |
1124 | internal_valloc (size, file, line, flags) | |
ccc6cda3 | 1125 | size_t size; |
f73dda09 JA |
1126 | const char *file; |
1127 | int line, flags; | |
726f6388 | 1128 | { |
f73dda09 | 1129 | return internal_memalign (getpagesize (), size, file, line, flags|MALLOC_INTERNAL); |
726f6388 | 1130 | } |
f73dda09 | 1131 | #endif /* !NO_VALLOC */ |
ccc6cda3 JA |
1132 | |
1133 | #ifndef NO_CALLOC | |
f73dda09 JA |
1134 | static PTR_T |
1135 | internal_calloc (n, s, file, line, flags) | |
ccc6cda3 | 1136 | size_t n, s; |
f73dda09 JA |
1137 | const char *file; |
1138 | int line, flags; | |
ccc6cda3 JA |
1139 | { |
1140 | size_t total; | |
f73dda09 | 1141 | PTR_T result; |
ccc6cda3 JA |
1142 | |
1143 | total = n * s; | |
f73dda09 | 1144 | result = internal_malloc (total, file, line, flags|MALLOC_INTERNAL); |
ccc6cda3 | 1145 | if (result) |
7117c2d2 | 1146 | memset (result, 0, total); |
ccc6cda3 JA |
1147 | return result; |
1148 | } | |
1149 | ||
f73dda09 JA |
1150 | static void |
1151 | internal_cfree (p, file, line, flags) | |
bb70624e | 1152 | PTR_T p; |
f73dda09 JA |
1153 | const char *file; |
1154 | int line, flags; | |
ccc6cda3 | 1155 | { |
f73dda09 | 1156 | internal_free (p, file, line, flags|MALLOC_INTERNAL); |
ccc6cda3 JA |
1157 | } |
1158 | #endif /* !NO_CALLOC */ | |
1159 | ||
cce855bc | 1160 | #ifdef MALLOC_STATS |
f73dda09 JA |
1161 | int |
1162 | malloc_free_blocks (size) | |
726f6388 JA |
1163 | int size; |
1164 | { | |
f73dda09 | 1165 | int nfree; |
cce855bc | 1166 | register union mhead *p; |
726f6388 | 1167 | |
f73dda09 JA |
1168 | nfree = 0; |
1169 | for (p = nextf[size]; p; p = CHAIN (p)) | |
1170 | nfree++; | |
726f6388 | 1171 | |
f73dda09 JA |
1172 | return nfree; |
1173 | } | |
1174 | #endif | |
726f6388 | 1175 | |
7117c2d2 | 1176 | #if defined (MALLOC_WRAPFUNCS) |
f73dda09 JA |
1177 | PTR_T |
1178 | sh_malloc (bytes, file, line) | |
1179 | size_t bytes; | |
1180 | const char *file; | |
1181 | int line; | |
1182 | { | |
1183 | return internal_malloc (bytes, file, line, MALLOC_WRAPPER); | |
1184 | } | |
726f6388 | 1185 | |
f73dda09 JA |
1186 | PTR_T |
1187 | sh_realloc (ptr, size, file, line) | |
1188 | PTR_T ptr; | |
1189 | size_t size; | |
1190 | const char *file; | |
1191 | int line; | |
1192 | { | |
1193 | return internal_realloc (ptr, size, file, line, MALLOC_WRAPPER); | |
1194 | } | |
726f6388 | 1195 | |
f73dda09 JA |
1196 | void |
1197 | sh_free (mem, file, line) | |
1198 | PTR_T mem; | |
1199 | const char *file; | |
1200 | int line; | |
1201 | { | |
1202 | internal_free (mem, file, line, MALLOC_WRAPPER); | |
726f6388 | 1203 | } |
ccc6cda3 | 1204 | |
f73dda09 JA |
1205 | PTR_T |
1206 | sh_memalign (alignment, size, file, line) | |
95732b49 | 1207 | size_t alignment; |
f73dda09 JA |
1208 | size_t size; |
1209 | const char *file; | |
1210 | int line; | |
cce855bc | 1211 | { |
f73dda09 JA |
1212 | return internal_memalign (alignment, size, file, line, MALLOC_WRAPPER); |
1213 | } | |
726f6388 | 1214 | |
f73dda09 JA |
1215 | #ifndef NO_CALLOC |
1216 | PTR_T | |
1217 | sh_calloc (n, s, file, line) | |
1218 | size_t n, s; | |
1219 | const char *file; | |
1220 | int line; | |
1221 | { | |
1222 | return internal_calloc (n, s, file, line, MALLOC_WRAPPER); | |
726f6388 JA |
1223 | } |
1224 | ||
f73dda09 JA |
1225 | void |
1226 | sh_cfree (mem, file, line) | |
1227 | PTR_T mem; | |
1228 | const char *file; | |
1229 | int line; | |
726f6388 | 1230 | { |
f73dda09 JA |
1231 | internal_cfree (mem, file, line, MALLOC_WRAPPER); |
1232 | } | |
1233 | #endif | |
726f6388 | 1234 | |
f73dda09 JA |
1235 | #ifndef NO_VALLOC |
1236 | PTR_T | |
1237 | sh_valloc (size, file, line) | |
1238 | size_t size; | |
1239 | const char *file; | |
1240 | int line; | |
1241 | { | |
1242 | return internal_valloc (size, file, line, MALLOC_WRAPPER); | |
1243 | } | |
7117c2d2 | 1244 | #endif /* !NO_VALLOC */ |
f73dda09 | 1245 | |
7117c2d2 | 1246 | #endif /* MALLOC_WRAPFUNCS */ |
f73dda09 JA |
1247 | |
1248 | /* Externally-available functions that call their internal counterparts. */ | |
1249 | ||
1250 | PTR_T | |
1251 | malloc (size) | |
1252 | size_t size; | |
1253 | { | |
1254 | return internal_malloc (size, (char *)NULL, 0, 0); | |
1255 | } | |
1256 | ||
1257 | PTR_T | |
1258 | realloc (mem, nbytes) | |
1259 | PTR_T mem; | |
1260 | size_t nbytes; | |
1261 | { | |
1262 | return internal_realloc (mem, nbytes, (char *)NULL, 0, 0); | |
bb70624e JA |
1263 | } |
1264 | ||
1265 | void | |
f73dda09 JA |
1266 | free (mem) |
1267 | PTR_T mem; | |
bb70624e | 1268 | { |
f73dda09 | 1269 | internal_free (mem, (char *)NULL, 0, 0); |
bb70624e JA |
1270 | } |
1271 | ||
f73dda09 JA |
1272 | PTR_T |
1273 | memalign (alignment, size) | |
95732b49 | 1274 | size_t alignment; |
f73dda09 JA |
1275 | size_t size; |
1276 | { | |
1277 | return internal_memalign (alignment, size, (char *)NULL, 0, 0); | |
1278 | } | |
1279 | ||
1280 | #ifndef NO_VALLOC | |
1281 | PTR_T | |
1282 | valloc (size) | |
1283 | size_t size; | |
1284 | { | |
1285 | return internal_valloc (size, (char *)NULL, 0, 0); | |
1286 | } | |
1287 | #endif | |
1288 | ||
1289 | #ifndef NO_CALLOC | |
1290 | PTR_T | |
1291 | calloc (n, s) | |
1292 | size_t n, s; | |
1293 | { | |
1294 | return internal_calloc (n, s, (char *)NULL, 0, 0); | |
1295 | } | |
bb70624e JA |
1296 | |
1297 | void | |
f73dda09 JA |
1298 | cfree (mem) |
1299 | PTR_T mem; | |
bb70624e | 1300 | { |
f73dda09 | 1301 | internal_cfree (mem, (char *)NULL, 0, 0); |
726f6388 | 1302 | } |
f73dda09 | 1303 | #endif |