]>
Commit | Line | Data |
---|---|---|
1 | /* malloc.c - dynamic memory allocation for bash. */ | |
2 | ||
3 | /* Copyright (C) 1985-2021 Free Software Foundation, Inc. | |
4 | ||
5 | This file is part of GNU Bash, the Bourne-Again SHell. | |
6 | ||
7 | Bash is free software: you can redistribute it and/or modify | |
8 | it under the terms of the GNU General Public License as published by | |
9 | the Free Software Foundation, either version 3 of the License, or | |
10 | (at your option) any later version. | |
11 | ||
12 | Bash is distributed in the hope that it will be useful, | |
13 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
15 | GNU General Public License for more details. | |
16 | ||
17 | You should have received a copy of the GNU General Public License | |
18 | along with Bash. If not, see <http://www.gnu.org/licenses/>. | |
19 | */ | |
20 | ||
21 | /* | |
22 | * @(#)nmalloc.c 1 (Caltech) 2/21/82 | |
23 | * | |
24 | * U of M Modified: 20 Jun 1983 ACT: strange hacks for Emacs | |
25 | * | |
26 | * Nov 1983, Mike@BRL, Added support for 4.1C/4.2 BSD. | |
27 | * | |
28 | * [VERY] old explanation: | |
29 | * | |
30 | * This is a very fast storage allocator. It allocates blocks of a small | |
31 | * number of different sizes, and keeps free lists of each size. Blocks | |
32 | * that don't exactly fit are passed up to the next larger size. In this | |
33 | * implementation, the available sizes are (2^n)-4 (or -16) bytes long. | |
34 | * This is designed for use in a program that uses vast quantities of | |
35 | * memory, but bombs when it runs out. To make it a little better, it | |
36 | * warns the user when he starts to get near the end. | |
37 | * | |
38 | * June 84, ACT: modified rcheck code to check the range given to malloc, | |
39 | * rather than the range determined by the 2-power used. | |
40 | * | |
41 | * Jan 85, RMS: calls malloc_warning to issue warning on nearly full. | |
42 | * No longer Emacs-specific; can serve as all-purpose malloc for GNU. | |
43 | * You should call malloc_init to reinitialize after loading dumped Emacs. | |
44 | * Call malloc_stats to get info on memory stats if MALLOC_STATS turned on. | |
45 | * realloc knows how to return same block given, just changing its size, | |
46 | * if the power of 2 is correct. | |
47 | */ | |
48 | ||
49 | /* | |
50 | * nextf[i] is the pointer to the next free block of size 2^(i+5). The | |
51 | * smallest allocatable block is 32 bytes. The overhead information will | |
52 | * go in the first 16 bytes of the block, and the returned pointer will point | |
53 | * to the rest. | |
54 | */ | |
55 | ||
56 | /* Define MEMSCRAMBLE to have free() write 0xcf into memory as it's freed, to | |
57 | uncover callers that refer to freed memory, and to have malloc() write 0xdf | |
58 | into memory as it's allocated to avoid referring to previous contents. */ | |
59 | ||
60 | /* SCO 3.2v4 getcwd and possibly other libc routines fail with MEMSCRAMBLE; | |
61 | handled by configure. */ | |
62 | ||
63 | #if defined (HAVE_CONFIG_H) | |
64 | # include <config.h> | |
65 | #endif /* HAVE_CONFIG_H */ | |
66 | ||
67 | #if defined (SHELL) | |
68 | # include "bashtypes.h" | |
69 | # include "stdc.h" | |
70 | #else | |
71 | # include <sys/types.h> | |
72 | #endif | |
73 | ||
74 | #if defined (HAVE_UNISTD_H) | |
75 | # include <unistd.h> | |
76 | #endif | |
77 | ||
78 | /* Determine which kind of system this is. */ | |
79 | #include <signal.h> | |
80 | ||
81 | #if defined (HAVE_STRING_H) | |
82 | # include <string.h> | |
83 | #else | |
84 | # include <strings.h> | |
85 | #endif | |
86 | #include <errno.h> | |
87 | #include <stdio.h> | |
88 | ||
89 | #if !defined (botch) | |
90 | #include <stdlib.h> | |
91 | #endif | |
92 | ||
93 | #if defined (HAVE_MMAP) | |
94 | #include <sys/mman.h> | |
95 | #endif | |
96 | ||
97 | /* Define getpagesize () if the system does not. */ | |
98 | #ifndef HAVE_GETPAGESIZE | |
99 | # include "getpagesize.h" | |
100 | #endif | |
101 | ||
102 | #include "imalloc.h" | |
103 | #ifdef MALLOC_STATS | |
104 | # include "mstats.h" | |
105 | #endif | |
106 | #ifdef MALLOC_REGISTER | |
107 | # include "table.h" | |
108 | #endif | |
109 | #ifdef MALLOC_WATCH | |
110 | # include "watch.h" | |
111 | #endif | |
112 | ||
113 | #ifdef powerof2 | |
114 | # undef powerof2 | |
115 | #endif | |
116 | /* Could also use (((x) & -(x)) == (x)) */ | |
117 | #define powerof2(x) ((((x) - 1) & (x)) == 0) | |
118 | ||
119 | /* System-specific omissions. */ | |
120 | #ifdef HPUX | |
121 | # define NO_VALLOC | |
122 | #endif | |
123 | ||
124 | #define MALLOC_PAGESIZE_MIN 4096 | |
125 | #define MALLOC_INCR_PAGES 8192 | |
126 | ||
127 | #define ISALLOC ((char) 0xf7) /* magic byte that implies allocation */ | |
128 | #define ISFREE ((char) 0x54) /* magic byte that implies free block */ | |
129 | /* this is for error checking only */ | |
130 | #define ISMEMALIGN ((char) 0xd6) /* Stored before the value returned by | |
131 | memalign, with the rest of the word | |
132 | being the distance to the true | |
133 | beginning of the block. */ | |
134 | ||
135 | ||
136 | /* We have a flag indicating whether memory is allocated, an index in | |
137 | nextf[], a size field, and a sentinel value to determine whether or | |
138 | not a caller wrote before the start of allocated memory; to realloc() | |
139 | memory we either copy mh_nbytes or just change mh_nbytes if there is | |
140 | enough room in the block for the new size. Range checking is always | |
141 | done. */ | |
142 | union mhead { | |
143 | bits64_t mh_align[2]; /* 16 */ | |
144 | struct { | |
145 | char mi_alloc; /* ISALLOC or ISFREE */ /* 1 */ | |
146 | char mi_index; /* index in nextf[] */ /* 1 */ | |
147 | /* Remainder are valid only when block is allocated */ | |
148 | u_bits16_t mi_magic2; /* should be == MAGIC2 */ /* 2 */ | |
149 | u_bits32_t mi_nbytes; /* # of bytes allocated */ /* 4 */ | |
150 | char mi_magic8[8]; /* MAGIC1 guard bytes */ /* 8 */ | |
151 | } minfo; | |
152 | }; | |
153 | #define mh_alloc minfo.mi_alloc | |
154 | #define mh_index minfo.mi_index | |
155 | #define mh_nbytes minfo.mi_nbytes | |
156 | #define mh_magic2 minfo.mi_magic2 | |
157 | #define mh_magic8 minfo.mi_magic8 | |
158 | ||
159 | #define MAGIC8_NUMBYTES 8 | |
160 | #define MALLOC_SIZE_T u_bits32_t | |
161 | ||
162 | #define MOVERHEAD sizeof(union mhead) | |
163 | ||
164 | #define MALIGN_MASK 15 /* one less than desired alignment */ | |
165 | ||
166 | /* Guard bytes we write at the end of the allocation, encoding the size. */ | |
167 | typedef union _malloc_guard { | |
168 | char s[4]; | |
169 | u_bits32_t i; | |
170 | } mguard_t; | |
171 | ||
172 | /* Access free-list pointer of a block. | |
173 | It is stored at block + sizeof (char *). | |
174 | This is not a field in the minfo structure member of union mhead | |
175 | because we want sizeof (union mhead) | |
176 | to describe the overhead for when the block is in use, | |
177 | and we do not want the free-list pointer to count in that. */ | |
178 | /* If we have mmap, this is not used for chunks larger than mmap_threshold, | |
179 | since we munmap immediately on free(). */ | |
180 | ||
181 | /* If SIZEOF_CHAR_P == 8, this goes into the mh_magic8 buffer at the end of | |
182 | the rest of the struct. This may need adjusting. */ | |
183 | #define CHAIN(a) \ | |
184 | (*(union mhead **) (sizeof (char *) + (char *) (a))) | |
185 | ||
186 | /* To implement range checking, we write magic values in at the beginning | |
187 | and end of each allocated block, and make sure they are undisturbed | |
188 | whenever a free or a realloc occurs. */ | |
189 | ||
190 | /* Written in the bytes before the block's real space (-SIZEOF_CHAR_P bytes) */ | |
191 | #define MAGIC1 0x55 | |
192 | #define MAGIC2 0x5555 | |
193 | ||
194 | #define MSLOP 4 /* 4 bytes extra for u_bits32_t end guard size */ | |
195 | ||
196 | /* How many bytes are actually allocated for a request of size N -- | |
197 | rounded up to nearest multiple of 16 (alignment) after accounting for | |
198 | malloc overhead. */ | |
199 | #define ALLOCATED_BYTES(n) \ | |
200 | (((n) + MOVERHEAD + MSLOP + MALIGN_MASK) & ~MALIGN_MASK) | |
201 | ||
202 | #define ASSERT(p) \ | |
203 | do \ | |
204 | { \ | |
205 | if (!(p)) xbotch((PTR_T)0, ERR_ASSERT_FAILED, CPP_STRING(p), file, line); \ | |
206 | } \ | |
207 | while (0) | |
208 | ||
209 | /* Minimum and maximum bucket indices for block splitting (and to bound | |
210 | the search for a block to split). */ | |
211 | #define SPLIT_MIN 1 /* 64 */ | |
212 | #define SPLIT_MID 9 /* 16384 */ | |
213 | #define SPLIT_MAX 12 /* 131072 */ | |
214 | ||
215 | /* Minimum and maximum bucket indices for block coalescing. */ | |
216 | #define COMBINE_MIN 1 /* 64 */ | |
217 | #define COMBINE_MAX (pagebucket - 1) /* 2048 for 4096-byte pages */ | |
218 | ||
219 | #define LESSCORE_MIN 8 /* 8192 */ | |
220 | #define LESSCORE_FRC 11 /* 65536 */ | |
221 | ||
222 | /* Which bin do we prepopulate with the initial sbrk memory? */ | |
223 | #define PREPOP_BIN 1 | |
224 | #define PREPOP_SIZE 64 | |
225 | ||
226 | #define STARTBUCK 0 | |
227 | ||
228 | /* Should we use mmap for large allocations? */ | |
229 | #if defined (HAVE_MMAP) | |
230 | # if defined (MAP_ANON) && !defined (MAP_ANONYMOUS) | |
231 | # define MAP_ANONYMOUS MAP_ANON | |
232 | # endif | |
233 | #endif | |
234 | ||
235 | #if defined (HAVE_MMAP) && defined (MAP_ANONYMOUS) | |
236 | # define USE_MMAP 1 | |
237 | #endif | |
238 | ||
239 | #if defined (USE_MMAP) | |
240 | # define MMAP_THRESHOLD 12 /* must be >= SPLIT_MAX, COMBINE_MAX */ | |
241 | #else | |
242 | # define MMAP_THRESHOLD (8 * SIZEOF_LONG) | |
243 | #endif | |
244 | ||
245 | /* We don't try to decipher the differences between the Linux-style and | |
246 | BSD-style implementations of mremap here; we use the Linux one. */ | |
247 | #if USE_MMAP == 1 && defined (HAVE_MREMAP) && defined (MREMAP_MAYMOVE) | |
248 | # define USE_MREMAP 1 | |
249 | #endif | |
250 | ||
251 | /* usable bins from STARTBUCK..NBUCKETS-1 */ | |
252 | ||
253 | #define NBUCKETS 28 | |
254 | ||
255 | /* Flags for the internal functions. */ | |
256 | #define MALLOC_WRAPPER 0x01 /* wrapper function */ | |
257 | #define MALLOC_INTERNAL 0x02 /* internal function calling another */ | |
258 | #define MALLOC_NOTRACE 0x04 /* don't trace this allocation or free */ | |
259 | #define MALLOC_NOREG 0x08 /* don't register this allocation or free */ | |
260 | ||
261 | /* Future use. */ | |
262 | #define ERR_DUPFREE 0x01 | |
263 | #define ERR_UNALLOC 0x02 | |
264 | #define ERR_UNDERFLOW 0x04 | |
265 | #define ERR_ASSERT_FAILED 0x08 | |
266 | ||
267 | /* Evaluates to true if NB is appropriate for bucket NU. NB is adjusted | |
268 | appropriately by the caller to account for malloc overhead. This only | |
269 | checks that the recorded size is not too big for the bucket. We | |
270 | can't check whether or not it's in between NU and NU-1 because we | |
271 | might have encountered a busy bucket when allocating and moved up to | |
272 | the next size. */ | |
273 | #define IN_BUCKET(nb, nu) ((nb) <= binsizes[(nu)]) | |
274 | ||
275 | /* Use this when we want to be sure that NB is in bucket NU. */ | |
276 | #define RIGHT_BUCKET(nb, nu) \ | |
277 | (((nb) > binsizes[(nu)-1]) && ((nb) <= binsizes[(nu)])) | |
278 | ||
279 | /* nextf[i] is free list of blocks of size 2**(i + 5) */ | |
280 | ||
281 | static union mhead *nextf[NBUCKETS]; | |
282 | ||
283 | /* busy[i] is nonzero while allocation or free of block size i is in progress. */ | |
284 | ||
285 | static char busy[NBUCKETS]; | |
286 | ||
287 | static int pagesz; /* system page size. */ | |
288 | static int pagebucket; /* bucket for requests a page in size */ | |
289 | static int maxbuck; /* highest bucket receiving allocation request. */ | |
290 | ||
291 | static char *memtop; /* top of heap */ | |
292 | ||
293 | static const unsigned long binsizes[NBUCKETS] = { | |
294 | 32UL, 64UL, 128UL, 256UL, 512UL, 1024UL, 2048UL, 4096UL, | |
295 | 8192UL, 16384UL, 32768UL, 65536UL, 131072UL, 262144UL, 524288UL, | |
296 | 1048576UL, 2097152UL, 4194304UL, 8388608UL, 16777216UL, 33554432UL, | |
297 | 67108864UL, 134217728UL, 268435456UL, 536870912UL, 1073741824UL, | |
298 | 2147483648UL, 4294967295UL | |
299 | }; | |
300 | ||
301 | /* binsizes[x] == (1 << ((x) + 5)) */ | |
302 | #define binsize(x) binsizes[(x)] | |
303 | ||
304 | #define MAXALLOC_SIZE binsizes[NBUCKETS-1] | |
305 | ||
306 | #if !defined (errno) | |
307 | extern int errno; | |
308 | #endif | |
309 | ||
310 | /* Declarations for internal functions */ | |
311 | static PTR_T internal_malloc PARAMS((size_t, const char *, int, int)); | |
312 | static PTR_T internal_realloc PARAMS((PTR_T, size_t, const char *, int, int)); | |
313 | static void internal_free PARAMS((PTR_T, const char *, int, int)); | |
314 | static PTR_T internal_memalign PARAMS((size_t, size_t, const char *, int, int)); | |
315 | #ifndef NO_CALLOC | |
316 | static PTR_T internal_calloc PARAMS((size_t, size_t, const char *, int, int)); | |
317 | static void internal_cfree PARAMS((PTR_T, const char *, int, int)); | |
318 | #endif | |
319 | #ifndef NO_VALLOC | |
320 | static PTR_T internal_valloc PARAMS((size_t, const char *, int, int)); | |
321 | #endif | |
322 | static PTR_T internal_remap PARAMS((PTR_T, size_t, int, int)); | |
323 | ||
324 | #if defined (botch) | |
325 | extern void botch (); | |
326 | #else | |
327 | static void botch PARAMS((const char *, const char *, int)); | |
328 | #endif | |
329 | static void xbotch PARAMS((PTR_T, int, const char *, const char *, int)); | |
330 | ||
331 | #if !HAVE_DECL_SBRK | |
332 | extern char *sbrk (); | |
333 | #endif /* !HAVE_DECL_SBRK */ | |
334 | ||
335 | #ifdef SHELL | |
336 | extern int running_trap; | |
337 | extern int signal_is_trapped PARAMS((int)); | |
338 | #endif | |
339 | ||
340 | #ifdef MALLOC_STATS | |
341 | struct _malstats _mstats; | |
342 | #endif /* MALLOC_STATS */ | |
343 | ||
344 | /* Debugging variables available to applications. */ | |
345 | int malloc_flags = 0; /* future use */ | |
346 | int malloc_trace = 0; /* trace allocations and frees to stderr */ | |
347 | int malloc_register = 0; /* future use */ | |
348 | ||
349 | /* Use a variable in case we want to dynamically adapt it in the future */ | |
350 | int malloc_mmap_threshold = MMAP_THRESHOLD; | |
351 | ||
352 | #ifdef MALLOC_TRACE | |
353 | char _malloc_trace_buckets[NBUCKETS]; | |
354 | ||
355 | /* These should really go into a header file. */ | |
356 | extern void mtrace_alloc PARAMS((const char *, PTR_T, size_t, const char *, int)); | |
357 | extern void mtrace_free PARAMS((PTR_T, int, const char *, int)); | |
358 | #endif | |
359 | ||
360 | #if !defined (botch) | |
361 | static void | |
362 | botch (s, file, line) | |
363 | const char *s; | |
364 | const char *file; | |
365 | int line; | |
366 | { | |
367 | fprintf (stderr, _("malloc: failed assertion: %s\n"), s); | |
368 | (void)fflush (stderr); | |
369 | abort (); | |
370 | } | |
371 | #endif | |
372 | ||
373 | /* print the file and line number that caused the assertion failure and | |
374 | call botch() to do whatever the application wants with the information */ | |
375 | static void | |
376 | xbotch (mem, e, s, file, line) | |
377 | PTR_T mem; | |
378 | int e; | |
379 | const char *s; | |
380 | const char *file; | |
381 | int line; | |
382 | { | |
383 | fprintf (stderr, _("\r\nmalloc: %s:%d: assertion botched\r\n"), | |
384 | file ? file : _("unknown"), line); | |
385 | #ifdef MALLOC_REGISTER | |
386 | if (mem != NULL && malloc_register) | |
387 | mregister_describe_mem (mem, stderr); | |
388 | #endif | |
389 | (void)fflush (stderr); | |
390 | botch(s, file, line); | |
391 | } | |
392 | ||
393 | /* Coalesce two adjacent free blocks off the free list for size NU - 1, | |
394 | as long as we can find two adjacent free blocks. nextf[NU -1] is | |
395 | assumed to not be busy; the caller (morecore()) checks for this. | |
396 | BUSY[NU] must be set to 1. */ | |
397 | static void | |
398 | bcoalesce (nu) | |
399 | register int nu; | |
400 | { | |
401 | register union mhead *mp, *mp1, *mp2; | |
402 | register int nbuck; | |
403 | unsigned long siz; | |
404 | ||
405 | nbuck = nu - 1; | |
406 | if (nextf[nbuck] == 0 || busy[nbuck]) | |
407 | return; | |
408 | ||
409 | busy[nbuck] = 1; | |
410 | siz = binsize (nbuck); | |
411 | ||
412 | mp2 = mp1 = nextf[nbuck]; | |
413 | mp = CHAIN (mp1); | |
414 | while (mp && mp != (union mhead *)((char *)mp1 + siz)) | |
415 | { | |
416 | mp2 = mp1; | |
417 | mp1 = mp; | |
418 | mp = CHAIN (mp); | |
419 | } | |
420 | ||
421 | if (mp == 0) | |
422 | { | |
423 | busy[nbuck] = 0; | |
424 | return; | |
425 | } | |
426 | ||
427 | /* OK, now we have mp1 pointing to the block we want to add to nextf[NU]. | |
428 | CHAIN(mp2) must equal mp1. Check that mp1 and mp are adjacent. */ | |
429 | if (mp2 != mp1 && CHAIN(mp2) != mp1) | |
430 | { | |
431 | busy[nbuck] = 0; | |
432 | xbotch ((PTR_T)0, 0, "bcoalesce: CHAIN(mp2) != mp1", (char *)NULL, 0); | |
433 | } | |
434 | ||
435 | #ifdef MALLOC_DEBUG | |
436 | if (CHAIN (mp1) != (union mhead *)((char *)mp1 + siz)) | |
437 | { | |
438 | busy[nbuck] = 0; | |
439 | return; /* not adjacent */ | |
440 | } | |
441 | #endif | |
442 | ||
443 | /* Since they are adjacent, remove them from the free list */ | |
444 | if (mp1 == nextf[nbuck]) | |
445 | nextf[nbuck] = CHAIN (mp); | |
446 | else | |
447 | CHAIN (mp2) = CHAIN (mp); | |
448 | busy[nbuck] = 0; | |
449 | ||
450 | #ifdef MALLOC_STATS | |
451 | _mstats.tbcoalesce++; | |
452 | _mstats.ncoalesce[nbuck]++; | |
453 | #endif | |
454 | ||
455 | /* And add the combined two blocks to nextf[NU]. */ | |
456 | mp1->mh_alloc = ISFREE; | |
457 | mp1->mh_index = nu; | |
458 | CHAIN (mp1) = nextf[nu]; | |
459 | nextf[nu] = mp1; | |
460 | } | |
461 | ||
462 | /* Split a block at index > NU (but less than SPLIT_MAX) into a set of | |
463 | blocks of the correct size, and attach them to nextf[NU]. nextf[NU] | |
464 | is assumed to be empty. Must be called with signals blocked (e.g., | |
465 | by morecore()). BUSY[NU] must be set to 1. */ | |
466 | static void | |
467 | bsplit (nu) | |
468 | register int nu; | |
469 | { | |
470 | register union mhead *mp; | |
471 | int nbuck, nblks, split_max; | |
472 | unsigned long siz; | |
473 | ||
474 | split_max = (maxbuck > SPLIT_MAX) ? maxbuck : SPLIT_MAX; | |
475 | ||
476 | if (nu >= SPLIT_MID) | |
477 | { | |
478 | for (nbuck = split_max; nbuck > nu; nbuck--) | |
479 | { | |
480 | if (busy[nbuck] || nextf[nbuck] == 0) | |
481 | continue; | |
482 | break; | |
483 | } | |
484 | } | |
485 | else | |
486 | { | |
487 | for (nbuck = nu + 1; nbuck <= split_max; nbuck++) | |
488 | { | |
489 | if (busy[nbuck] || nextf[nbuck] == 0) | |
490 | continue; | |
491 | break; | |
492 | } | |
493 | } | |
494 | ||
495 | if (nbuck > split_max || nbuck <= nu) | |
496 | return; | |
497 | ||
498 | /* XXX might want to split only if nextf[nbuck] has >= 2 blocks free | |
499 | and nbuck is below some threshold. */ | |
500 | ||
501 | /* Remove the block from the chain of larger blocks. */ | |
502 | busy[nbuck] = 1; | |
503 | mp = nextf[nbuck]; | |
504 | nextf[nbuck] = CHAIN (mp); | |
505 | busy[nbuck] = 0; | |
506 | ||
507 | #ifdef MALLOC_STATS | |
508 | _mstats.tbsplit++; | |
509 | _mstats.nsplit[nbuck]++; | |
510 | #endif | |
511 | ||
512 | /* Figure out how many blocks we'll get. */ | |
513 | siz = binsize (nu); | |
514 | nblks = binsize (nbuck) / siz; | |
515 | ||
516 | /* Split the block and put it on the requested chain. */ | |
517 | nextf[nu] = mp; | |
518 | while (1) | |
519 | { | |
520 | mp->mh_alloc = ISFREE; | |
521 | mp->mh_index = nu; | |
522 | if (--nblks <= 0) break; | |
523 | CHAIN (mp) = (union mhead *)((char *)mp + siz); | |
524 | mp = (union mhead *)((char *)mp + siz); | |
525 | } | |
526 | CHAIN (mp) = 0; | |
527 | } | |
528 | ||
529 | /* Take the memory block MP and add it to a chain < NU. NU is the right bucket, | |
530 | but is busy. This avoids memory orphaning. */ | |
531 | static void | |
532 | xsplit (mp, nu) | |
533 | union mhead *mp; | |
534 | int nu; | |
535 | { | |
536 | union mhead *nh; | |
537 | int nbuck, nblks, split_max; | |
538 | unsigned long siz; | |
539 | ||
540 | nbuck = nu - 1; | |
541 | while (nbuck >= SPLIT_MIN && busy[nbuck]) | |
542 | nbuck--; | |
543 | if (nbuck < SPLIT_MIN) | |
544 | return; | |
545 | ||
546 | #ifdef MALLOC_STATS | |
547 | _mstats.tbsplit++; | |
548 | _mstats.nsplit[nu]++; | |
549 | #endif | |
550 | ||
551 | /* Figure out how many blocks we'll get. */ | |
552 | siz = binsize (nu); /* original block size */ | |
553 | nblks = siz / binsize (nbuck); /* should be 2 most of the time */ | |
554 | ||
555 | /* And add it to nextf[nbuck] */ | |
556 | siz = binsize (nbuck); /* XXX - resetting here */ | |
557 | nh = mp; | |
558 | while (1) | |
559 | { | |
560 | mp->mh_alloc = ISFREE; | |
561 | mp->mh_index = nbuck; | |
562 | if (--nblks <= 0) break; | |
563 | CHAIN (mp) = (union mhead *)((char *)mp + siz); | |
564 | mp = (union mhead *)((char *)mp + siz); | |
565 | } | |
566 | busy[nbuck] = 1; | |
567 | CHAIN (mp) = nextf[nbuck]; | |
568 | nextf[nbuck] = nh; | |
569 | busy[nbuck] = 0; | |
570 | } | |
571 | ||
572 | void | |
573 | _malloc_block_signals (setp, osetp) | |
574 | sigset_t *setp, *osetp; | |
575 | { | |
576 | #ifdef HAVE_POSIX_SIGNALS | |
577 | sigfillset (setp); | |
578 | sigemptyset (osetp); | |
579 | sigprocmask (SIG_BLOCK, setp, osetp); | |
580 | #else | |
581 | # if defined (HAVE_BSD_SIGNALS) | |
582 | *osetp = sigsetmask (-1); | |
583 | # endif | |
584 | #endif | |
585 | } | |
586 | ||
587 | void | |
588 | _malloc_unblock_signals (setp, osetp) | |
589 | sigset_t *setp, *osetp; | |
590 | { | |
591 | #ifdef HAVE_POSIX_SIGNALS | |
592 | sigprocmask (SIG_SETMASK, osetp, (sigset_t *)NULL); | |
593 | #else | |
594 | # if defined (HAVE_BSD_SIGNALS) | |
595 | sigsetmask (*osetp); | |
596 | # endif | |
597 | #endif | |
598 | } | |
599 | ||
600 | #if defined (USE_LESSCORE) | |
601 | /* Return some memory to the system by reducing the break. This is only | |
602 | called with NU > pagebucket, so we're always assured of giving back | |
603 | more than one page of memory. */ | |
604 | static void | |
605 | lesscore (nu) /* give system back some memory */ | |
606 | register int nu; /* size index we're discarding */ | |
607 | { | |
608 | long siz; | |
609 | ||
610 | siz = binsize (nu); | |
611 | /* Should check for errors here, I guess. */ | |
612 | sbrk (-siz); | |
613 | memtop -= siz; | |
614 | ||
615 | #ifdef MALLOC_STATS | |
616 | _mstats.nsbrk++; | |
617 | _mstats.tsbrk -= siz; | |
618 | _mstats.nlesscore[nu]++; | |
619 | #endif | |
620 | } | |
621 | #endif /* USE_LESSCORE */ | |
622 | ||
623 | /* Ask system for more memory; add to NEXTF[NU]. BUSY[NU] must be set to 1. */ | |
624 | static void | |
625 | morecore (nu) | |
626 | register int nu; /* size index to get more of */ | |
627 | { | |
628 | register union mhead *mp; | |
629 | register int nblks; | |
630 | register long siz; | |
631 | long sbrk_amt; /* amount to get via sbrk() */ | |
632 | sigset_t set, oset; | |
633 | int blocked_sigs; | |
634 | ||
635 | /* Block all signals in case we are executed from a signal handler. */ | |
636 | blocked_sigs = 0; | |
637 | #ifdef SHELL | |
638 | # if defined (SIGCHLD) | |
639 | if (running_trap || signal_is_trapped (SIGINT) || signal_is_trapped (SIGCHLD)) | |
640 | # else | |
641 | if (running_trap || signal_is_trapped (SIGINT)) | |
642 | # endif | |
643 | #endif | |
644 | { | |
645 | _malloc_block_signals (&set, &oset); | |
646 | blocked_sigs = 1; | |
647 | } | |
648 | ||
649 | siz = binsize (nu); /* size of desired block for nextf[nu] */ | |
650 | ||
651 | if (siz < 0) | |
652 | goto morecore_done; /* oops */ | |
653 | ||
654 | #ifdef MALLOC_STATS | |
655 | _mstats.nmorecore[nu]++; | |
656 | #endif | |
657 | ||
658 | /* Try to split a larger block here, if we're within the range of sizes | |
659 | to split. */ | |
660 | if (nu >= SPLIT_MIN && nu <= malloc_mmap_threshold) | |
661 | { | |
662 | bsplit (nu); | |
663 | if (nextf[nu] != 0) | |
664 | goto morecore_done; | |
665 | } | |
666 | ||
667 | /* Try to coalesce two adjacent blocks from the free list on nextf[nu - 1], | |
668 | if we can, and we're within the range of the block coalescing limits. */ | |
669 | if (nu >= COMBINE_MIN && nu < COMBINE_MAX && nu <= malloc_mmap_threshold && busy[nu - 1] == 0 && nextf[nu - 1]) | |
670 | { | |
671 | bcoalesce (nu); | |
672 | if (nextf[nu] != 0) | |
673 | goto morecore_done; | |
674 | } | |
675 | ||
676 | /* Take at least a page, and figure out how many blocks of the requested | |
677 | size we're getting. */ | |
678 | if (siz <= pagesz) | |
679 | { | |
680 | sbrk_amt = pagesz; | |
681 | nblks = sbrk_amt / siz; | |
682 | } | |
683 | else | |
684 | { | |
685 | /* We always want to request an integral multiple of the page size | |
686 | from the kernel, so let's compute whether or not `siz' is such | |
687 | an amount. If it is, we can just request it. If not, we want | |
688 | the smallest integral multiple of pagesize that is larger than | |
689 | `siz' and will satisfy the request. */ | |
690 | sbrk_amt = siz & (pagesz - 1); | |
691 | if (sbrk_amt == 0) | |
692 | sbrk_amt = siz; | |
693 | else | |
694 | sbrk_amt = siz + pagesz - sbrk_amt; | |
695 | nblks = 1; | |
696 | } | |
697 | ||
698 | #if defined (USE_MMAP) | |
699 | if (nu > malloc_mmap_threshold) | |
700 | { | |
701 | mp = (union mhead *)mmap (0, sbrk_amt, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); | |
702 | if ((void *)mp == MAP_FAILED) | |
703 | goto morecore_done; | |
704 | nextf[nu] = mp; | |
705 | mp->mh_alloc = ISFREE; | |
706 | mp->mh_index = nu; | |
707 | CHAIN (mp) = 0; | |
708 | #ifdef MALLOC_STATS | |
709 | _mstats.nmmap++; | |
710 | _mstats.tmmap += sbrk_amt; | |
711 | #endif | |
712 | goto morecore_done; | |
713 | } | |
714 | #endif | |
715 | ||
716 | ||
717 | #ifdef MALLOC_STATS | |
718 | _mstats.nsbrk++; | |
719 | _mstats.tsbrk += sbrk_amt; | |
720 | #endif | |
721 | ||
722 | mp = (union mhead *) sbrk (sbrk_amt); | |
723 | ||
724 | /* Totally out of memory. */ | |
725 | if ((long)mp == -1) | |
726 | goto morecore_done; | |
727 | ||
728 | memtop += sbrk_amt; | |
729 | ||
730 | /* shouldn't happen, but just in case -- require 8- or 16-byte alignment */ | |
731 | if ((long)mp & MALIGN_MASK) | |
732 | { | |
733 | mp = (union mhead *) (((long)mp + MALIGN_MASK) & ~MALIGN_MASK); | |
734 | nblks--; | |
735 | } | |
736 | ||
737 | /* save new header and link the nblks blocks together */ | |
738 | nextf[nu] = mp; | |
739 | while (1) | |
740 | { | |
741 | mp->mh_alloc = ISFREE; | |
742 | mp->mh_index = nu; | |
743 | if (--nblks <= 0) break; | |
744 | CHAIN (mp) = (union mhead *)((char *)mp + siz); | |
745 | mp = (union mhead *)((char *)mp + siz); | |
746 | } | |
747 | CHAIN (mp) = 0; | |
748 | ||
749 | morecore_done: | |
750 | if (blocked_sigs) | |
751 | _malloc_unblock_signals (&set, &oset); | |
752 | } | |
753 | ||
754 | static void | |
755 | malloc_debug_dummy () | |
756 | { | |
757 | write (1, "malloc_debug_dummy\n", 19); | |
758 | } | |
759 | ||
760 | static int | |
761 | pagealign () | |
762 | { | |
763 | register int nunits; | |
764 | register union mhead *mp; | |
765 | long sbrk_needed; | |
766 | char *curbrk; | |
767 | ||
768 | pagesz = getpagesize (); | |
769 | if (pagesz < MALLOC_PAGESIZE_MIN) | |
770 | pagesz = MALLOC_PAGESIZE_MIN; | |
771 | ||
772 | /* OK, how much do we need to allocate to make things page-aligned? | |
773 | Some of this partial page will be wasted space, but we'll use as | |
774 | much as we can. Once we figure out how much to advance the break | |
775 | pointer, go ahead and do it. */ | |
776 | memtop = curbrk = sbrk (0); | |
777 | sbrk_needed = pagesz - ((long)curbrk & (pagesz - 1)); /* sbrk(0) % pagesz */ | |
778 | if (sbrk_needed < 0) | |
779 | sbrk_needed += pagesz; | |
780 | ||
781 | /* Now allocate the wasted space. */ | |
782 | if (sbrk_needed) | |
783 | { | |
784 | #ifdef MALLOC_STATS | |
785 | _mstats.nsbrk++; | |
786 | _mstats.tsbrk += sbrk_needed; | |
787 | #endif | |
788 | curbrk = sbrk (sbrk_needed); | |
789 | if ((long)curbrk == -1) | |
790 | return -1; | |
791 | memtop += sbrk_needed; | |
792 | ||
793 | /* Take the memory which would otherwise be wasted and populate the most | |
794 | popular bin (3 == 64 bytes) with it. Add whatever we need to curbrk | |
795 | to make things 64-byte aligned, compute how many 64-byte chunks we're | |
796 | going to get, and set up the bin. */ | |
797 | curbrk += sbrk_needed & (PREPOP_SIZE - 1); | |
798 | sbrk_needed -= sbrk_needed & (PREPOP_SIZE - 1); | |
799 | nunits = sbrk_needed / PREPOP_SIZE; | |
800 | ||
801 | if (nunits > 0) | |
802 | { | |
803 | mp = (union mhead *)curbrk; | |
804 | ||
805 | nextf[PREPOP_BIN] = mp; | |
806 | while (1) | |
807 | { | |
808 | mp->mh_alloc = ISFREE; | |
809 | mp->mh_index = PREPOP_BIN; | |
810 | if (--nunits <= 0) break; | |
811 | CHAIN(mp) = (union mhead *)((char *)mp + PREPOP_SIZE); | |
812 | mp = (union mhead *)((char *)mp + PREPOP_SIZE); | |
813 | } | |
814 | CHAIN(mp) = 0; | |
815 | } | |
816 | } | |
817 | ||
818 | /* compute which bin corresponds to the page size. */ | |
819 | for (nunits = 7; nunits < NBUCKETS; nunits++) | |
820 | if (pagesz <= binsize(nunits)) | |
821 | break; | |
822 | pagebucket = nunits; | |
823 | ||
824 | return 0; | |
825 | } | |
826 | ||
827 | static PTR_T | |
828 | internal_malloc (n, file, line, flags) /* get a block */ | |
829 | size_t n; | |
830 | const char *file; | |
831 | int line, flags; | |
832 | { | |
833 | register union mhead *p; | |
834 | register int nunits; | |
835 | register char *m, *z; | |
836 | MALLOC_SIZE_T nbytes; | |
837 | mguard_t mg; | |
838 | ||
839 | /* Get the system page size and align break pointer so future sbrks will | |
840 | be page-aligned. The page size must be at least 1K -- anything | |
841 | smaller is increased. */ | |
842 | if (pagesz == 0) | |
843 | if (pagealign () < 0) | |
844 | return ((PTR_T)NULL); | |
845 | ||
846 | /* Figure out how many bytes are required, rounding up to the nearest | |
847 | multiple of 8, then figure out which nextf[] area to use. Try to | |
848 | be smart about where to start searching -- if the number of bytes | |
849 | needed is greater than the page size, we can start at pagebucket. */ | |
850 | #if SIZEOF_SIZE_T == 8 | |
851 | if (ALLOCATED_BYTES(n) > MAXALLOC_SIZE) | |
852 | return ((PTR_T) NULL); | |
853 | #endif | |
854 | nbytes = ALLOCATED_BYTES(n); | |
855 | nunits = (nbytes <= (pagesz >> 1)) ? STARTBUCK : pagebucket; | |
856 | for ( ; nunits < NBUCKETS; nunits++) | |
857 | if (nbytes <= binsize(nunits)) | |
858 | break; | |
859 | ||
860 | /* Silently reject too-large requests. XXX - can increase this if HAVE_MMAP */ | |
861 | if (nunits >= NBUCKETS) | |
862 | return ((PTR_T) NULL); | |
863 | ||
864 | /* In case this is reentrant use of malloc from signal handler, | |
865 | pick a block size that no other malloc level is currently | |
866 | trying to allocate. That's the easiest harmless way not to | |
867 | interfere with the other level of execution. */ | |
868 | #ifdef MALLOC_STATS | |
869 | if (busy[nunits]) _mstats.nrecurse++; | |
870 | #endif | |
871 | while (busy[nunits]) nunits++; | |
872 | busy[nunits] = 1; | |
873 | ||
874 | if (nunits > maxbuck) | |
875 | maxbuck = nunits; | |
876 | ||
877 | /* If there are no blocks of the appropriate size, go get some */ | |
878 | if (nextf[nunits] == 0) | |
879 | morecore (nunits); | |
880 | ||
881 | /* Get one block off the list, and set the new list head */ | |
882 | if ((p = nextf[nunits]) == NULL) | |
883 | { | |
884 | busy[nunits] = 0; | |
885 | return NULL; | |
886 | } | |
887 | nextf[nunits] = CHAIN (p); | |
888 | busy[nunits] = 0; | |
889 | ||
890 | /* Check for free block clobbered */ | |
891 | /* If not for this check, we would gobble a clobbered free chain ptr | |
892 | and bomb out on the NEXT allocate of this size block */ | |
893 | if (p->mh_alloc != ISFREE || p->mh_index != nunits) | |
894 | xbotch ((PTR_T)(p+1), 0, _("malloc: block on free list clobbered"), file, line); | |
895 | ||
896 | /* Fill in the info, and set up the magic numbers for range checking. */ | |
897 | p->mh_alloc = ISALLOC; | |
898 | p->mh_magic2 = MAGIC2; | |
899 | p->mh_nbytes = n; | |
900 | ||
901 | /* Begin guard */ | |
902 | MALLOC_MEMSET ((char *)p->mh_magic8, MAGIC1, MAGIC8_NUMBYTES); | |
903 | ||
904 | /* End guard */ | |
905 | mg.i = n; | |
906 | z = mg.s; | |
907 | m = (char *) (p + 1) + n; | |
908 | *m++ = *z++, *m++ = *z++, *m++ = *z++, *m++ = *z++; | |
909 | ||
910 | #ifdef MEMSCRAMBLE | |
911 | if (n) | |
912 | MALLOC_MEMSET ((char *)(p + 1), 0xdf, n); /* scramble previous contents */ | |
913 | #endif | |
914 | #ifdef MALLOC_STATS | |
915 | _mstats.nmalloc[nunits]++; | |
916 | _mstats.tmalloc[nunits]++; | |
917 | _mstats.nmal++; | |
918 | _mstats.bytesreq += n; | |
919 | #endif /* MALLOC_STATS */ | |
920 | ||
921 | #ifdef MALLOC_TRACE | |
922 | if (malloc_trace && (flags & MALLOC_NOTRACE) == 0) | |
923 | mtrace_alloc ("malloc", p + 1, n, file, line); | |
924 | else if (_malloc_trace_buckets[nunits]) | |
925 | mtrace_alloc ("malloc", p + 1, n, file, line); | |
926 | #endif | |
927 | ||
928 | #ifdef MALLOC_REGISTER | |
929 | if (malloc_register && (flags & MALLOC_NOREG) == 0) | |
930 | mregister_alloc ("malloc", p + 1, n, file, line); | |
931 | #endif | |
932 | ||
933 | #ifdef MALLOC_WATCH | |
934 | if (_malloc_nwatch > 0) | |
935 | _malloc_ckwatch (p + 1, file, line, W_ALLOC, n); | |
936 | #endif | |
937 | ||
938 | #if defined (MALLOC_DEBUG) | |
939 | z = (char *) (p + 1); | |
940 | /* Check alignment of returned pointer */ | |
941 | if ((unsigned long)z & MALIGN_MASK) | |
942 | fprintf (stderr, "malloc: %s:%d: warning: request for %d bytes not aligned on %d byte boundary\r\n", | |
943 | file ? file : _("unknown"), line, p->mh_nbytes, MALIGN_MASK+1); | |
944 | #endif | |
945 | ||
946 | return (PTR_T) (p + 1); | |
947 | } | |
948 | ||
949 | static void | |
950 | internal_free (mem, file, line, flags) | |
951 | PTR_T mem; | |
952 | const char *file; | |
953 | int line, flags; | |
954 | { | |
955 | register union mhead *p; | |
956 | register char *ap, *z; | |
957 | register int nunits; | |
958 | register MALLOC_SIZE_T nbytes; | |
959 | MALLOC_SIZE_T ubytes; /* caller-requested size */ | |
960 | mguard_t mg; | |
961 | ||
962 | if ((ap = (char *)mem) == 0) | |
963 | return; | |
964 | ||
965 | p = (union mhead *) ap - 1; | |
966 | ||
967 | if (p->mh_alloc == ISMEMALIGN) | |
968 | { | |
969 | ap -= p->mh_nbytes; | |
970 | p = (union mhead *) ap - 1; | |
971 | } | |
972 | ||
973 | #if defined (MALLOC_TRACE) || defined (MALLOC_REGISTER) || defined (MALLOC_WATCH) | |
974 | if (malloc_trace || malloc_register || _malloc_nwatch > 0) | |
975 | ubytes = p->mh_nbytes; | |
976 | #endif | |
977 | ||
978 | if (p->mh_alloc != ISALLOC) | |
979 | { | |
980 | if (p->mh_alloc == ISFREE) | |
981 | xbotch (mem, ERR_DUPFREE, | |
982 | _("free: called with already freed block argument"), file, line); | |
983 | else | |
984 | xbotch (mem, ERR_UNALLOC, | |
985 | _("free: called with unallocated block argument"), file, line); | |
986 | } | |
987 | ||
988 | ASSERT (p->mh_magic2 == MAGIC2); | |
989 | ||
990 | nunits = p->mh_index; | |
991 | nbytes = ALLOCATED_BYTES(p->mh_nbytes); | |
992 | /* The MAGIC8_NUMBYTES bytes before the memory handed to the user are now | |
993 | used for a simple check to catch things like p[-1] = 'x'. | |
994 | We sanity-check the value of mh_nbytes against the size of the blocks | |
995 | in the appropriate bucket before we use it. This can still cause problems | |
996 | and obscure errors if mh_nbytes is wrong but still within range; the | |
997 | checks against the size recorded at the end of the chunk will probably | |
998 | fail then. Using MALLOC_REGISTER will help here, since it saves the | |
999 | original number of bytes requested. */ | |
1000 | ||
1001 | if (IN_BUCKET(nbytes, nunits) == 0) | |
1002 | xbotch (mem, ERR_UNDERFLOW, | |
1003 | _("free: underflow detected; mh_nbytes out of range"), file, line); | |
1004 | { | |
1005 | int i; | |
1006 | for (i = 0, z = p->mh_magic8; i < MAGIC8_NUMBYTES; i++) | |
1007 | if (*z++ != MAGIC1) | |
1008 | xbotch (mem, ERR_UNDERFLOW, | |
1009 | _("free: underflow detected; magic8 corrupted"), file, line); | |
1010 | } | |
1011 | ||
1012 | ap += p->mh_nbytes; | |
1013 | z = mg.s; | |
1014 | *z++ = *ap++, *z++ = *ap++, *z++ = *ap++, *z++ = *ap++; | |
1015 | if (mg.i != p->mh_nbytes) | |
1016 | xbotch (mem, ERR_ASSERT_FAILED, _("free: start and end chunk sizes differ"), file, line); | |
1017 | ||
1018 | #if defined (USE_MMAP) | |
1019 | if (nunits > malloc_mmap_threshold) | |
1020 | { | |
1021 | munmap (p, binsize (nunits)); | |
1022 | #if defined (MALLOC_STATS) | |
1023 | _mstats.nlesscore[nunits]++; | |
1024 | #endif | |
1025 | goto free_return; | |
1026 | } | |
1027 | #endif | |
1028 | ||
1029 | #if defined (USE_LESSCORE) | |
1030 | /* We take care of the mmap case and munmap above */ | |
1031 | if (nunits >= LESSCORE_MIN && ((char *)p + binsize(nunits) == memtop)) | |
1032 | { | |
1033 | /* If above LESSCORE_FRC, give back unconditionally. This should be set | |
1034 | high enough to be infrequently encountered. If between LESSCORE_MIN | |
1035 | and LESSCORE_FRC, call lesscore if the bucket is marked as busy or if | |
1036 | there's already a block on the free list. */ | |
1037 | if ((nunits >= LESSCORE_FRC) || busy[nunits] || nextf[nunits] != 0) | |
1038 | { | |
1039 | lesscore (nunits); | |
1040 | /* keeps the tracing and registering code in one place */ | |
1041 | goto free_return; | |
1042 | } | |
1043 | } | |
1044 | #endif /* USE_LESSCORE */ | |
1045 | ||
1046 | #ifdef MEMSCRAMBLE | |
1047 | if (p->mh_nbytes) | |
1048 | MALLOC_MEMSET (mem, 0xcf, p->mh_nbytes); | |
1049 | #endif | |
1050 | ||
1051 | ASSERT (nunits < NBUCKETS); | |
1052 | ||
1053 | if (busy[nunits] == 1) | |
1054 | { | |
1055 | xsplit (p, nunits); /* split block and add to different chain */ | |
1056 | goto free_return; | |
1057 | } | |
1058 | ||
1059 | p->mh_alloc = ISFREE; | |
1060 | /* Protect against signal handlers calling malloc. */ | |
1061 | busy[nunits] = 1; | |
1062 | /* Put this block on the free list. */ | |
1063 | CHAIN (p) = nextf[nunits]; | |
1064 | nextf[nunits] = p; | |
1065 | busy[nunits] = 0; | |
1066 | ||
1067 | free_return: | |
1068 | ; /* Empty statement in case this is the end of the function */ | |
1069 | ||
1070 | #ifdef MALLOC_STATS | |
1071 | _mstats.nmalloc[nunits]--; | |
1072 | _mstats.nfre++; | |
1073 | #endif /* MALLOC_STATS */ | |
1074 | ||
1075 | #ifdef MALLOC_TRACE | |
1076 | if (malloc_trace && (flags & MALLOC_NOTRACE) == 0) | |
1077 | mtrace_free (mem, ubytes, file, line); | |
1078 | else if (_malloc_trace_buckets[nunits]) | |
1079 | mtrace_free (mem, ubytes, file, line); | |
1080 | #endif | |
1081 | ||
1082 | #ifdef MALLOC_REGISTER | |
1083 | if (malloc_register && (flags & MALLOC_NOREG) == 0) | |
1084 | mregister_free (mem, ubytes, file, line); | |
1085 | #endif | |
1086 | ||
1087 | #ifdef MALLOC_WATCH | |
1088 | if (_malloc_nwatch > 0) | |
1089 | _malloc_ckwatch (mem, file, line, W_FREE, ubytes); | |
1090 | #endif | |
1091 | } | |
1092 | ||
1093 | #if USE_MREMAP == 1 | |
1094 | /* Assume the caller (internal_realloc) has already performed the sanity and | |
1095 | overflow tests. Basically we kill the old guard information, determine the | |
1096 | new size, call mremap with the new size, and add the bookkeeping and guard | |
1097 | information back in. */ | |
1098 | static PTR_T | |
1099 | internal_remap (mem, n, nunits, flags) | |
1100 | PTR_T mem; | |
1101 | register size_t n; | |
1102 | int nunits; | |
1103 | int flags; | |
1104 | { | |
1105 | register union mhead *p, *np; | |
1106 | char *m, *z; | |
1107 | mguard_t mg; | |
1108 | MALLOC_SIZE_T nbytes; | |
1109 | ||
1110 | if (nunits >= NBUCKETS) /* Uh oh */ | |
1111 | return ((PTR_T) NULL); | |
1112 | ||
1113 | p = (union mhead *)mem - 1; | |
1114 | ||
1115 | m = (char *)mem + p->mh_nbytes; | |
1116 | z = mg.s; | |
1117 | *m++ = 0; *m++ = 0; *m++ = 0; *m++ = 0; /* erase guard */ | |
1118 | ||
1119 | nbytes = ALLOCATED_BYTES(n); | |
1120 | ||
1121 | busy[nunits] = 1; | |
1122 | np = (union mhead *)mremap (p, binsize (p->mh_index), binsize (nunits), MREMAP_MAYMOVE); | |
1123 | busy[nunits] = 0; | |
1124 | if (np == MAP_FAILED) | |
1125 | return (PTR_T)NULL; | |
1126 | ||
1127 | if (np != p) | |
1128 | { | |
1129 | np->mh_alloc = ISALLOC; | |
1130 | np->mh_magic2 = MAGIC2; | |
1131 | MALLOC_MEMSET ((char *)np->mh_magic8, MAGIC1, MAGIC8_NUMBYTES); | |
1132 | } | |
1133 | np->mh_index = nunits; | |
1134 | np->mh_nbytes = n; | |
1135 | ||
1136 | mg.i = n; | |
1137 | z = mg.s; | |
1138 | m = (char *)(np + 1) + n; | |
1139 | *m++ = *z++, *m++ = *z++, *m++ = *z++, *m++ = *z++; | |
1140 | ||
1141 | return ((PTR_T)(np + 1)); | |
1142 | } | |
1143 | #endif | |
1144 | ||
1145 | static PTR_T | |
1146 | internal_realloc (mem, n, file, line, flags) | |
1147 | PTR_T mem; | |
1148 | register size_t n; | |
1149 | const char *file; | |
1150 | int line, flags; | |
1151 | { | |
1152 | register union mhead *p; | |
1153 | register MALLOC_SIZE_T tocopy; | |
1154 | register MALLOC_SIZE_T nbytes; | |
1155 | register int newunits, nunits; | |
1156 | register char *m, *z; | |
1157 | mguard_t mg; | |
1158 | ||
1159 | #ifdef MALLOC_STATS | |
1160 | _mstats.nrealloc++; | |
1161 | #endif | |
1162 | ||
1163 | if (n == 0) | |
1164 | { | |
1165 | internal_free (mem, file, line, MALLOC_INTERNAL); | |
1166 | return (NULL); | |
1167 | } | |
1168 | if ((p = (union mhead *) mem) == 0) | |
1169 | return internal_malloc (n, file, line, MALLOC_INTERNAL); | |
1170 | ||
1171 | p--; | |
1172 | nunits = p->mh_index; | |
1173 | ASSERT (nunits < NBUCKETS); | |
1174 | ||
1175 | if (p->mh_alloc != ISALLOC) | |
1176 | xbotch (mem, ERR_UNALLOC, | |
1177 | _("realloc: called with unallocated block argument"), file, line); | |
1178 | ||
1179 | ASSERT (p->mh_magic2 == MAGIC2); | |
1180 | nbytes = ALLOCATED_BYTES(p->mh_nbytes); | |
1181 | /* Since the sizeof(u_bits32_t) bytes before the memory handed to the user | |
1182 | are now used for the number of bytes allocated, a simple check of | |
1183 | mh_magic2 is no longer sufficient to catch things like p[-1] = 'x'. | |
1184 | We sanity-check the value of mh_nbytes against the size of the blocks | |
1185 | in the appropriate bucket before we use it. This can still cause problems | |
1186 | and obscure errors if mh_nbytes is wrong but still within range; the | |
1187 | checks against the size recorded at the end of the chunk will probably | |
1188 | fail then. Using MALLOC_REGISTER will help here, since it saves the | |
1189 | original number of bytes requested. */ | |
1190 | if (IN_BUCKET(nbytes, nunits) == 0) | |
1191 | xbotch (mem, ERR_UNDERFLOW, | |
1192 | _("realloc: underflow detected; mh_nbytes out of range"), file, line); | |
1193 | { | |
1194 | int i; | |
1195 | for (i = 0, z = p->mh_magic8; i < MAGIC8_NUMBYTES; i++) | |
1196 | if (*z++ != MAGIC1) | |
1197 | xbotch (mem, ERR_UNDERFLOW, | |
1198 | _("realloc: underflow detected; magic8 corrupted"), file, line); | |
1199 | ||
1200 | } | |
1201 | ||
1202 | m = (char *)mem + (tocopy = p->mh_nbytes); | |
1203 | z = mg.s; | |
1204 | *z++ = *m++, *z++ = *m++, *z++ = *m++, *z++ = *m++; | |
1205 | if (mg.i != p->mh_nbytes) | |
1206 | xbotch (mem, ERR_ASSERT_FAILED, _("realloc: start and end chunk sizes differ"), file, line); | |
1207 | ||
1208 | #ifdef MALLOC_WATCH | |
1209 | if (_malloc_nwatch > 0) | |
1210 | _malloc_ckwatch (p + 1, file, line, W_REALLOC, n); | |
1211 | #endif | |
1212 | #ifdef MALLOC_STATS | |
1213 | _mstats.bytesreq += (n < tocopy) ? 0 : n - tocopy; | |
1214 | #endif | |
1215 | ||
1216 | /* If we're reallocating to the same size as previously, return now */ | |
1217 | if (n == p->mh_nbytes) | |
1218 | return mem; | |
1219 | ||
1220 | #if SIZEOF_SIZE_T == 8 | |
1221 | if (ALLOCATED_BYTES(n) > MAXALLOC_SIZE) | |
1222 | return ((PTR_T) NULL); | |
1223 | #endif | |
1224 | /* See if desired size rounds to same power of 2 as actual size. */ | |
1225 | nbytes = ALLOCATED_BYTES(n); | |
1226 | ||
1227 | /* If ok, use the same block, just marking its size as changed. */ | |
1228 | if (RIGHT_BUCKET(nbytes, nunits) || RIGHT_BUCKET(nbytes, nunits-1)) | |
1229 | { | |
1230 | /* Compensate for increment above. */ | |
1231 | m -= 4; | |
1232 | ||
1233 | *m++ = 0; *m++ = 0; *m++ = 0; *m++ = 0; | |
1234 | m = (char *)mem + (p->mh_nbytes = n); | |
1235 | ||
1236 | mg.i = n; | |
1237 | z = mg.s; | |
1238 | *m++ = *z++, *m++ = *z++, *m++ = *z++, *m++ = *z++; | |
1239 | ||
1240 | return mem; | |
1241 | } | |
1242 | ||
1243 | if (n < tocopy) | |
1244 | tocopy = n; | |
1245 | ||
1246 | #ifdef MALLOC_STATS | |
1247 | _mstats.nrcopy++; | |
1248 | #endif | |
1249 | ||
1250 | #if USE_MREMAP == 1 | |
1251 | /* If we are using mmap and have mremap, we use it here. Make sure that | |
1252 | the old size and new size are above the threshold where we use mmap */ | |
1253 | if (nbytes > p->mh_nbytes) | |
1254 | newunits = nunits; | |
1255 | else | |
1256 | newunits = (nbytes <= (pagesz >> 1)) ? STARTBUCK : pagebucket; | |
1257 | for ( ; newunits < NBUCKETS; newunits++) | |
1258 | if (nbytes <= binsize(newunits)) | |
1259 | break; | |
1260 | ||
1261 | if (nunits > malloc_mmap_threshold && newunits > malloc_mmap_threshold) | |
1262 | { | |
1263 | m = internal_remap (mem, n, newunits, MALLOC_INTERNAL); | |
1264 | if (m == 0) | |
1265 | return 0; | |
1266 | } | |
1267 | else | |
1268 | #endif /* USE_MREMAP */ | |
1269 | { | |
1270 | if ((m = internal_malloc (n, file, line, MALLOC_INTERNAL|MALLOC_NOTRACE|MALLOC_NOREG)) == 0) | |
1271 | return 0; | |
1272 | FASTCOPY (mem, m, tocopy); | |
1273 | internal_free (mem, file, line, MALLOC_INTERNAL); | |
1274 | } | |
1275 | ||
1276 | #ifdef MALLOC_TRACE | |
1277 | if (malloc_trace && (flags & MALLOC_NOTRACE) == 0) | |
1278 | mtrace_alloc ("realloc", m, n, file, line); | |
1279 | else if (_malloc_trace_buckets[nunits]) | |
1280 | mtrace_alloc ("realloc", m, n, file, line); | |
1281 | #endif | |
1282 | ||
1283 | #ifdef MALLOC_REGISTER | |
1284 | if (malloc_register && (flags & MALLOC_NOREG) == 0) | |
1285 | mregister_alloc ("realloc", m, n, file, line); | |
1286 | #endif | |
1287 | ||
1288 | #ifdef MALLOC_WATCH | |
1289 | if (_malloc_nwatch > 0) | |
1290 | _malloc_ckwatch (m, file, line, W_RESIZED, n); | |
1291 | #endif | |
1292 | ||
1293 | return m; | |
1294 | } | |
1295 | ||
1296 | static PTR_T | |
1297 | internal_memalign (alignment, size, file, line, flags) | |
1298 | size_t alignment; | |
1299 | size_t size; | |
1300 | const char *file; | |
1301 | int line, flags; | |
1302 | { | |
1303 | register char *ptr; | |
1304 | register char *aligned; | |
1305 | register union mhead *p; | |
1306 | ||
1307 | ptr = internal_malloc (size + alignment, file, line, MALLOC_INTERNAL); | |
1308 | ||
1309 | if (ptr == 0) | |
1310 | return 0; | |
1311 | /* If entire block has the desired alignment, just accept it. */ | |
1312 | if (((long) ptr & (alignment - 1)) == 0) | |
1313 | return ptr; | |
1314 | /* Otherwise, get address of byte in the block that has that alignment. */ | |
1315 | aligned = (char *) (((long) ptr + alignment - 1) & (~alignment + 1)); | |
1316 | ||
1317 | /* Store a suitable indication of how to free the block, | |
1318 | so that free can find the true beginning of it. */ | |
1319 | p = (union mhead *) aligned - 1; | |
1320 | p->mh_nbytes = aligned - ptr; | |
1321 | p->mh_alloc = ISMEMALIGN; | |
1322 | ||
1323 | return aligned; | |
1324 | } | |
1325 | ||
1326 | int | |
1327 | posix_memalign (memptr, alignment, size) | |
1328 | void **memptr; | |
1329 | size_t alignment, size; | |
1330 | { | |
1331 | void *mem; | |
1332 | ||
1333 | /* Perform posix-mandated error checking here */ | |
1334 | if ((alignment % sizeof (void *) != 0) || alignment == 0) | |
1335 | return EINVAL; | |
1336 | else if (powerof2 (alignment) == 0) | |
1337 | return EINVAL; | |
1338 | ||
1339 | mem = internal_memalign (alignment, size, (char *)0, 0, 0); | |
1340 | if (mem != 0) | |
1341 | { | |
1342 | *memptr = mem; | |
1343 | return 0; | |
1344 | } | |
1345 | return ENOMEM; | |
1346 | } | |
1347 | ||
1348 | size_t | |
1349 | malloc_usable_size (mem) | |
1350 | void *mem; | |
1351 | { | |
1352 | register union mhead *p; | |
1353 | register char *ap; | |
1354 | ||
1355 | if ((ap = (char *)mem) == 0) | |
1356 | return 0; | |
1357 | ||
1358 | /* Find the true start of the memory block to discover which bin */ | |
1359 | p = (union mhead *) ap - 1; | |
1360 | ||
1361 | if (p->mh_alloc == ISMEMALIGN) | |
1362 | { | |
1363 | ap -= p->mh_nbytes; | |
1364 | p = (union mhead *) ap - 1; | |
1365 | } | |
1366 | ||
1367 | /* return 0 if ISFREE */ | |
1368 | if (p->mh_alloc == ISFREE) | |
1369 | return 0; | |
1370 | ||
1371 | /* Since we use bounds checking, the usable size is the last requested size. */ | |
1372 | return (p->mh_nbytes); | |
1373 | } | |
1374 | ||
1375 | #if !defined (NO_VALLOC) | |
1376 | /* This runs into trouble with getpagesize on HPUX, and Multimax machines. | |
1377 | Patching out seems cleaner than the ugly fix needed. */ | |
1378 | static PTR_T | |
1379 | internal_valloc (size, file, line, flags) | |
1380 | size_t size; | |
1381 | const char *file; | |
1382 | int line, flags; | |
1383 | { | |
1384 | return internal_memalign (getpagesize (), size, file, line, flags|MALLOC_INTERNAL); | |
1385 | } | |
1386 | #endif /* !NO_VALLOC */ | |
1387 | ||
1388 | #ifndef NO_CALLOC | |
1389 | static PTR_T | |
1390 | internal_calloc (n, s, file, line, flags) | |
1391 | size_t n, s; | |
1392 | const char *file; | |
1393 | int line, flags; | |
1394 | { | |
1395 | size_t total; | |
1396 | PTR_T result; | |
1397 | ||
1398 | total = n * s; | |
1399 | result = internal_malloc (total, file, line, flags|MALLOC_INTERNAL); | |
1400 | if (result) | |
1401 | memset (result, 0, total); | |
1402 | return result; | |
1403 | } | |
1404 | ||
1405 | static void | |
1406 | internal_cfree (p, file, line, flags) | |
1407 | PTR_T p; | |
1408 | const char *file; | |
1409 | int line, flags; | |
1410 | { | |
1411 | internal_free (p, file, line, flags|MALLOC_INTERNAL); | |
1412 | } | |
1413 | #endif /* !NO_CALLOC */ | |
1414 | ||
1415 | #ifdef MALLOC_STATS | |
1416 | int | |
1417 | malloc_free_blocks (size) | |
1418 | int size; | |
1419 | { | |
1420 | int nfree; | |
1421 | register union mhead *p; | |
1422 | ||
1423 | nfree = 0; | |
1424 | for (p = nextf[size]; p; p = CHAIN (p)) | |
1425 | nfree++; | |
1426 | ||
1427 | return nfree; | |
1428 | } | |
1429 | #endif | |
1430 | ||
1431 | #if defined (MALLOC_WRAPFUNCS) | |
1432 | PTR_T | |
1433 | sh_malloc (bytes, file, line) | |
1434 | size_t bytes; | |
1435 | const char *file; | |
1436 | int line; | |
1437 | { | |
1438 | return internal_malloc (bytes, file, line, MALLOC_WRAPPER); | |
1439 | } | |
1440 | ||
1441 | PTR_T | |
1442 | sh_realloc (ptr, size, file, line) | |
1443 | PTR_T ptr; | |
1444 | size_t size; | |
1445 | const char *file; | |
1446 | int line; | |
1447 | { | |
1448 | return internal_realloc (ptr, size, file, line, MALLOC_WRAPPER); | |
1449 | } | |
1450 | ||
1451 | void | |
1452 | sh_free (mem, file, line) | |
1453 | PTR_T mem; | |
1454 | const char *file; | |
1455 | int line; | |
1456 | { | |
1457 | internal_free (mem, file, line, MALLOC_WRAPPER); | |
1458 | } | |
1459 | ||
1460 | PTR_T | |
1461 | sh_memalign (alignment, size, file, line) | |
1462 | size_t alignment; | |
1463 | size_t size; | |
1464 | const char *file; | |
1465 | int line; | |
1466 | { | |
1467 | return internal_memalign (alignment, size, file, line, MALLOC_WRAPPER); | |
1468 | } | |
1469 | ||
1470 | #ifndef NO_CALLOC | |
1471 | PTR_T | |
1472 | sh_calloc (n, s, file, line) | |
1473 | size_t n, s; | |
1474 | const char *file; | |
1475 | int line; | |
1476 | { | |
1477 | return internal_calloc (n, s, file, line, MALLOC_WRAPPER); | |
1478 | } | |
1479 | ||
1480 | void | |
1481 | sh_cfree (mem, file, line) | |
1482 | PTR_T mem; | |
1483 | const char *file; | |
1484 | int line; | |
1485 | { | |
1486 | internal_cfree (mem, file, line, MALLOC_WRAPPER); | |
1487 | } | |
1488 | #endif | |
1489 | ||
1490 | #ifndef NO_VALLOC | |
1491 | PTR_T | |
1492 | sh_valloc (size, file, line) | |
1493 | size_t size; | |
1494 | const char *file; | |
1495 | int line; | |
1496 | { | |
1497 | return internal_valloc (size, file, line, MALLOC_WRAPPER); | |
1498 | } | |
1499 | #endif /* !NO_VALLOC */ | |
1500 | ||
1501 | #endif /* MALLOC_WRAPFUNCS */ | |
1502 | ||
1503 | /* Externally-available functions that call their internal counterparts. */ | |
1504 | ||
1505 | PTR_T | |
1506 | malloc (size) | |
1507 | size_t size; | |
1508 | { | |
1509 | return internal_malloc (size, (char *)NULL, 0, 0); | |
1510 | } | |
1511 | ||
1512 | PTR_T | |
1513 | realloc (mem, nbytes) | |
1514 | PTR_T mem; | |
1515 | size_t nbytes; | |
1516 | { | |
1517 | return internal_realloc (mem, nbytes, (char *)NULL, 0, 0); | |
1518 | } | |
1519 | ||
1520 | void | |
1521 | free (mem) | |
1522 | PTR_T mem; | |
1523 | { | |
1524 | internal_free (mem, (char *)NULL, 0, 0); | |
1525 | } | |
1526 | ||
1527 | PTR_T | |
1528 | memalign (alignment, size) | |
1529 | size_t alignment; | |
1530 | size_t size; | |
1531 | { | |
1532 | return internal_memalign (alignment, size, (char *)NULL, 0, 0); | |
1533 | } | |
1534 | ||
1535 | #ifndef NO_VALLOC | |
1536 | PTR_T | |
1537 | valloc (size) | |
1538 | size_t size; | |
1539 | { | |
1540 | return internal_valloc (size, (char *)NULL, 0, 0); | |
1541 | } | |
1542 | #endif | |
1543 | ||
1544 | #ifndef NO_CALLOC | |
1545 | PTR_T | |
1546 | calloc (n, s) | |
1547 | size_t n, s; | |
1548 | { | |
1549 | return internal_calloc (n, s, (char *)NULL, 0, 0); | |
1550 | } | |
1551 | ||
1552 | void | |
1553 | cfree (mem) | |
1554 | PTR_T mem; | |
1555 | { | |
1556 | internal_cfree (mem, (char *)NULL, 0, 0); | |
1557 | } | |
1558 | #endif |