]> git.ipfire.org Git - thirdparty/bash.git/blob - lib/malloc/gmalloc.c
8441b0f976aa33715b2b21099fb0d8fc7432d6eb
[thirdparty/bash.git] / lib / malloc / gmalloc.c
1 /* The malloc headers and source files from the C library follow here. */
2
3 /* Declarations for `malloc' and friends.
4 Copyright 1990, 91, 92, 93, 95, 96 Free Software Foundation, Inc.
5 Written May 1989 by Mike Haertel.
6
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Library General Public License as
9 published by the Free Software Foundation; either version 2 of the
10 License, or (at your option) any later version.
11
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Library General Public License for more details.
16
17 You should have received a copy of the GNU Library General Public
18 License along with this library; see the file COPYING.LIB. If
19 ot, write to the Free Software Foundation, Inc.,
20 59 Temple Place, Suite 330, Boston, MA 02111 USA.
21
22 The author may be reached (Email) at the address mike@ai.mit.edu,
23 or (US mail) as Mike Haertel c/o Free Software Foundation. */
24
25 /* XXX NOTES:
26 1. Augment the mstats struct so we can see how many blocks for fragments
27 and how many blocks for large requests were allocated.
28 */
29
30 /* CHANGES:
31 1. Reorganized the source for my benefit.
32 2. Integrated the range-checking code by default.
33 3. free(0) no longer dumps core.
34 4. Extended the statistics.
35 5. Fixed a couple of places where the stats were not kept correctly.
36 */
37
38 #ifdef HAVE_CONFIG_H
39 #include <config.h>
40 #endif
41
42 #if defined (HAVE_STRING_H)
43 # include <string.h>
44 #else
45 # include <strings.h>
46 #endif
47
48 #if defined (HAVE_LIMITS_H)
49 # include <limits.h>
50 #endif
51
52 #if defined (HAVE_UNISTD_H)
53 # ifdef _MINIX
54 # include <sys/types.h>
55 # endif
56 # include <unistd.h>
57 #endif
58
59 #if defined (HAVE_STDDEF_H)
60 # include <stddef.h>
61 #endif
62 #include <errno.h>
63
64 #if defined (RCHECK) && !defined (botch)
65 # include <stdio.h>
66 # define STDIO_H_INCLUDED
67 #endif
68
69 #include "stdc.h"
70
71 #ifndef errno
72 extern int errno;
73 #endif
74
75 /* Need an autoconf test for this. */
76 #if __STDC__
77 # undef genptr_t
78 # define genptr_t void *
79 #else
80 # undef genptr_t
81 # define genptr_t char *
82 #endif /* !__STDC__ */
83
84 #if !defined (HAVE_MEMSET)
85 # define memset(s, zero, n) bzero ((s), (n))
86 #endif
87 #if !defined (HAVE_MEMCPY)
88 # define memcpy(d, s, n) bcopy ((s), (d), (n))
89 #endif
90
91 /* Cope with systems lacking `memmove'. */
92 #if !defined (HAVE_MEMMOVE) && !defined (memmove)
93 static void malloc_safe_bcopy __P ((genptr_t, genptr_t, size_t));
94 # define memmove(to, from, size) malloc_safe_bcopy ((from), (to), (size))
95 #endif
96
97 #ifndef NULL
98 #define NULL 0
99 #endif
100
101 #ifndef min
102 #define min(A, B) ((A) < (B) ? (A) : (B))
103 #endif
104
105 /* Return values for `mprobe': these are the kinds of inconsistencies that
106 `mcheck' enables detection of. */
107 enum mcheck_status
108 {
109 MCHECK_DISABLED = -1, /* Consistency checking is not turned on. */
110 MCHECK_OK, /* Block is fine. */
111 MCHECK_FREE, /* Block freed twice. */
112 MCHECK_HEAD, /* Memory before the block was clobbered. */
113 MCHECK_TAIL /* Memory after the block was clobbered. */
114 };
115
116 /* Statistics available to the user. */
117 struct mstats
118 {
119 size_t bytes_total; /* Total size of the heap. */
120 size_t chunks_used; /* Chunks allocated by the user. */
121 size_t bytes_used; /* Byte total of user-allocated chunks. */
122 size_t chunks_free; /* Chunks in the free list. */
123 size_t bytes_free; /* Byte total of chunks in the free list. */
124 int nmalloc; /* Total number of calls to malloc. */
125 int nfree; /* Total number of calls to free. */
126 int nrealloc; /* Total number of calls to realloc. */
127 int nsbrk; /* Total number of calls to sbrk. */
128 size_t tsbrk; /* Total number of bytes allocated via sbrk. */
129 int negsbrk; /* Total number of calls to sbrk with a negative arg */
130 size_t tnegsbrk; /* Total number of bytes returned to the kernel. */
131 };
132
133 #ifdef RCHECK
134 /* Arbitrary magical numbers. */
135 #define MAGICWORD 0xfedabeeb
136 #define MAGICFREE 0xd8675309
137 #define MAGICBYTE ((char) 0xd7)
138 #define MALLOCFLOOD ((char) 0x93)
139 #define FREEFLOOD ((char) 0x95)
140
141 struct hdr
142 {
143 size_t size; /* Exact size requested by user. */
144 u_bits32_t magic; /* Magic number to check header integrity. */
145 };
146 #endif /* RCHECK */
147
148 /* Functions exported by this library. */
149 /* Allocate SIZE bytes of memory. */
150 extern genptr_t malloc __P ((size_t __size));
151
152 /* Re-allocate the previously allocated block
153 in genptr_t, making the new block SIZE bytes long. */
154 extern genptr_t realloc __P ((genptr_t __ptr, size_t __size));
155
156 /* Allocate NMEMB elements of SIZE bytes each, all initialized to 0. */
157 extern genptr_t calloc __P ((size_t __nmemb, size_t __size));
158
159 /* Free a block allocated by `malloc', `realloc' or `calloc'. */
160 extern void free __P ((genptr_t __ptr));
161
162 /* Allocate SIZE bytes allocated to ALIGNMENT bytes. */
163 extern genptr_t memalign __P ((size_t __alignment, size_t __size));
164
165 /* Pick up the current statistics. */
166 extern struct mstats mstats __P ((void));
167
168 #ifdef RCHECK
169 extern enum mcheck_status mprobe __P((genptr_t ptr));
170 #endif
171
172 /* End of exported functions. */
173
174 /* The allocator divides the heap into blocks of fixed size; large
175 requests receive one or more whole blocks, and small requests
176 receive a fragment of a block. Fragment sizes are powers of two,
177 and all fragments of a block are the same size. When all the
178 fragments in a block have been freed, the block itself is freed. */
179 #define BLOCKLOG 12
180 #define BLOCKSIZE 4096 /* 1 << BLOCKLOG */
181 #define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
182
183 /* Determine the amount of memory spanned by the initial heap table
184 (not an absolute limit). */
185 #define HEAP 4194304 /* 1 << 22 */
186
187 /* Number of contiguous free blocks allowed to build up at the end of
188 memory before they will be returned to the system. */
189 #define FINAL_FREE_BLOCKS 8
190
191 /* Data structure giving per-block information. */
192 typedef union
193 {
194 /* Heap information for a busy block. */
195 struct
196 {
197 /* Zero for a large (multiblock) object, or positive giving the
198 logarithm to the base two of the fragment size. */
199 int type;
200 union
201 {
202 struct
203 {
204 size_t nfree; /* Free frags in a fragmented block. */
205 size_t first; /* First free fragment of the block. */
206 } frag;
207 /* For a large object, in its first block, this has the number
208 of blocks in the object. In the other blocks, this has a
209 negative number which says how far back the first block is. */
210 ptrdiff_t size;
211 } info;
212 } busy;
213 /* Heap information for a free block (that may be the first of a
214 free cluster). */
215 struct
216 {
217 size_t size; /* Size (in blocks) of a free cluster. */
218 size_t next; /* Index of next free cluster. */
219 size_t prev; /* Index of previous free cluster. */
220 } free;
221 } malloc_info;
222
223 /* Pointer to first block of the heap. */
224 static char *_heapbase;
225
226 /* Table indexed by block number giving per-block information. */
227 static malloc_info *_heapinfo;
228
229 /* Address to block number and vice versa. */
230 #define BLOCK(A) (((char *) (A) - _heapbase) / BLOCKSIZE + 1)
231 #define ADDRESS(B) ((genptr_t) (((B) - 1) * BLOCKSIZE + _heapbase))
232
233 /* Number of info entries. */
234 static size_t heapsize;
235
236 /* Current search index for the heap table. */
237 static size_t _heapindex;
238
239 /* Limit of valid info table indices. */
240 static size_t _heaplimit;
241
242 /* Doubly linked lists of free fragments. */
243 struct list
244 {
245 struct list *next;
246 struct list *prev;
247 };
248
249 /* Free list headers for each fragment size. */
250 static struct list _fraghead[BLOCKLOG];
251
252 /* List of blocks allocated with `memalign'. */
253 struct alignlist
254 {
255 struct alignlist *next;
256 genptr_t aligned; /* The address that memaligned returned. */
257 genptr_t exact; /* The address that malloc returned. */
258 };
259
260 /* List of blocks allocated by memalign. */
261 static struct alignlist *_aligned_blocks = NULL;
262
263 /* Internal versions of `malloc', `realloc', and `free'
264 used when these functions need to call each other. */
265 static genptr_t imalloc __P ((size_t __size));
266 static genptr_t irealloc __P ((genptr_t __ptr, size_t __size));
267 static void ifree __P ((genptr_t __ptr));
268
269 /* Given an address in the middle of a malloc'd object,
270 return the address of the beginning of the object. */
271 static genptr_t malloc_find_object_address __P ((genptr_t __ptr));
272
273 /* Underlying allocation function; successive calls should
274 return contiguous pieces of memory. */
275 static genptr_t default_morecore __P ((ptrdiff_t __size));
276
277 /* Number of extra blocks to get each time we ask for more core.
278 This reduces the frequency of calling `default_morecore'. */
279 static size_t malloc_extra_blocks;
280
281 /* Nonzero if `malloc' has been called and done its initialization. */
282 static int malloc_initialized;
283 /* Function called to initialize malloc data structures. */
284 static int malloc_initialize __P ((void));
285
286 #ifdef RCHECK
287 static void zmemset __P((genptr_t, int, size_t));
288 static enum mcheck_status checkhdr __P((const struct hdr *));
289 static void mabort __P((enum mcheck_status));
290 #endif
291
292 /* Instrumentation. */
293 static size_t chunks_used;
294 static size_t bytes_used;
295 static size_t chunks_free;
296 static size_t bytes_free;
297 static int nmalloc, nfree, nrealloc;
298 static int nsbrk;
299 static size_t tsbrk;
300 static int negsbrk;
301 static size_t tnegsbrk;
302
303 /* Aligned allocation. */
304 static genptr_t
305 align (size)
306 size_t size;
307 {
308 genptr_t result;
309 unsigned long int adj;
310
311 result = default_morecore (size);
312 adj = (unsigned long int) ((unsigned long int) ((char *) result -
313 (char *) NULL)) % BLOCKSIZE;
314 if (adj != 0)
315 {
316 genptr_t new;
317 adj = BLOCKSIZE - adj;
318 new = default_morecore (adj);
319 result = (char *) result + adj;
320 }
321
322 return result;
323 }
324
325 /* Get SIZE bytes, if we can get them starting at END.
326 Return the address of the space we got.
327 If we cannot get space at END, fail and return -1. */
328 static genptr_t
329 get_contiguous_space (size, position)
330 ptrdiff_t size;
331 genptr_t position;
332 {
333 genptr_t before;
334 genptr_t after;
335
336 before = default_morecore (0);
337 /* If we can tell in advance that the break is at the wrong place,
338 fail now. */
339 if (before != position)
340 return 0;
341
342 /* Allocate SIZE bytes and get the address of them. */
343 after = default_morecore (size);
344 if (!after)
345 return 0;
346
347 /* It was not contiguous--reject it. */
348 if (after != position)
349 {
350 default_morecore (- size);
351 return 0;
352 }
353
354 return after;
355 }
356
357 /* This is called when `_heapinfo' and `heapsize' have just
358 been set to describe a new info table. Set up the table
359 to describe itself and account for it in the statistics. */
360 inline static void
361 register_heapinfo ()
362 {
363 size_t block, blocks;
364
365 block = BLOCK (_heapinfo);
366 blocks = BLOCKIFY (heapsize * sizeof (malloc_info));
367
368 /* Account for the _heapinfo block itself in the statistics. */
369 bytes_used += blocks * BLOCKSIZE;
370 ++chunks_used;
371
372 /* Describe the heapinfo block itself in the heapinfo. */
373 _heapinfo[block].busy.type = 0;
374 _heapinfo[block].busy.info.size = blocks;
375 /* Leave back-pointers for malloc_find_address. */
376 while (--blocks > 0)
377 _heapinfo[block + blocks].busy.info.size = -blocks;
378 }
379
380 /* Set everything up and remember that we have. */
381 static int
382 malloc_initialize ()
383 {
384 if (malloc_initialized)
385 return 0;
386
387 heapsize = HEAP / BLOCKSIZE;
388 _heapinfo = (malloc_info *) align (heapsize * sizeof (malloc_info));
389 if (_heapinfo == NULL)
390 return 0;
391 memset (_heapinfo, 0, heapsize * sizeof (malloc_info));
392 _heapinfo[0].free.size = 0;
393 _heapinfo[0].free.next = _heapinfo[0].free.prev = 0;
394 _heapindex = 0;
395 _heapbase = (char *) _heapinfo;
396 _heaplimit = BLOCK (_heapbase + heapsize * sizeof (malloc_info));
397
398 register_heapinfo ();
399
400 malloc_initialized = 1;
401 return 1;
402 }
403
404 /* Allocate INCREMENT more bytes of data space,
405 and return the start of data space, or NULL on errors.
406 If INCREMENT is negative, shrink data space. */
407 static genptr_t
408 default_morecore (increment)
409 ptrdiff_t increment;
410 {
411 genptr_t result;
412
413 nsbrk++;
414 tsbrk += increment;
415 if (increment < 0)
416 {
417 negsbrk++;
418 tnegsbrk += -increment;
419 }
420 result = (genptr_t) sbrk (increment);
421 if ((long)result == -1L)
422 return NULL;
423 return result;
424 }
425
426 static int morecore_recursing;
427
428 /* Get neatly aligned memory, initializing or
429 growing the heap info table as necessary. */
430 static genptr_t
431 morecore (size)
432 size_t size;
433 {
434 genptr_t result;
435 malloc_info *newinfo, *oldinfo;
436 size_t newsize;
437
438 if (morecore_recursing)
439 /* Avoid recursion. The caller will know how to handle a null return. */
440 return NULL;
441
442 result = align (size);
443 if (result == NULL)
444 return NULL;
445
446 /* Check if we need to grow the info table. */
447 if ((size_t) BLOCK ((char *) result + size) > heapsize)
448 {
449 /* Calculate the new _heapinfo table size. We do not account for the
450 added blocks in the table itself, as we hope to place them in
451 existing free space, which is already covered by part of the
452 existing table. */
453 newsize = heapsize;
454 do
455 newsize <<= 1;
456 while ((size_t) BLOCK ((char *) result + size) > newsize);
457
458 /* We must not reuse existing core for the new info table when called
459 from realloc in the case of growing a large block, because the
460 block being grown is momentarily marked as free. In this case
461 _heaplimit is zero so we know not to reuse space for internal
462 allocation. */
463 if (_heaplimit != 0)
464 {
465 /* First try to allocate the new info table in core we already
466 have, in the usual way using realloc. If realloc cannot
467 extend it in place or relocate it to existing sufficient core,
468 we will get called again, and the code above will notice the
469 `morecore_recursing' flag and return null. */
470 int save = errno; /* Don't want to clobber errno with ENOMEM. */
471 morecore_recursing = 1;
472 newinfo = (malloc_info *) irealloc (_heapinfo, newsize * sizeof (malloc_info));
473 morecore_recursing = 0;
474 if (newinfo == NULL)
475 errno = save;
476 else
477 {
478 /* We found some space in core, and realloc has put the old
479 table's blocks on the free list. Now zero the new part
480 of the table and install the new table location. */
481 memset (&newinfo[heapsize], 0, (newsize - heapsize) * sizeof (malloc_info));
482 _heapinfo = newinfo;
483 heapsize = newsize;
484 goto got_heap;
485 }
486 }
487
488 /* Allocate new space for the malloc info table. */
489 while (1)
490 {
491 newinfo = (malloc_info *) align (newsize * sizeof (malloc_info));
492
493 /* Did it fail? */
494 if (newinfo == NULL)
495 {
496 default_morecore (-size);
497 return NULL;
498 }
499
500 /* Is it big enough to record status for its own space?
501 If so, we win. */
502 if ((size_t) BLOCK ((char *) newinfo + newsize * sizeof (malloc_info)) < newsize)
503 break;
504
505 /* Must try again. First give back most of what we just got. */
506 default_morecore (- newsize * sizeof (malloc_info));
507 newsize *= 2;
508 }
509
510 /* Copy the old table to the beginning of the new,
511 and zero the rest of the new table. */
512 memcpy (newinfo, _heapinfo, heapsize * sizeof (malloc_info));
513 memset (&newinfo[heapsize], 0, (newsize - heapsize) * sizeof (malloc_info));
514 oldinfo = _heapinfo;
515 _heapinfo = newinfo;
516 heapsize = newsize;
517
518 register_heapinfo ();
519
520 /* Reset _heaplimit so ifree never decides
521 it can relocate or resize the info table. */
522 _heaplimit = 0;
523 ifree (oldinfo);
524
525 /* The new heap limit includes the new table just allocated. */
526 _heaplimit = BLOCK ((char *) newinfo + heapsize * sizeof (malloc_info));
527 return result;
528 }
529
530 got_heap:
531 _heaplimit = BLOCK ((char *) result + size);
532 return result;
533 }
534
535 /* Allocate memory from the heap. */
536 static genptr_t
537 imalloc (size)
538 size_t size;
539 {
540 genptr_t result;
541 size_t block, blocks, lastblocks, start;
542 register size_t i;
543 struct list *next;
544
545 /* ANSI C allows `malloc (0)' to either return NULL, or to return a
546 valid address you can realloc and free (though not dereference).
547
548 It turns out that some extant code (sunrpc, at least Ultrix's version)
549 expects `malloc (0)' to return non-NULL and breaks otherwise.
550 Be compatible. */
551
552 #if 0
553 if (size == 0)
554 return NULL;
555 #endif
556
557 if (size < sizeof (struct list))
558 size = sizeof (struct list);
559
560 #ifdef SUNOS_LOCALTIME_BUG
561 if (size < 16)
562 size = 16;
563 #endif
564
565 /* Determine the allocation policy based on the request size. */
566 if (size <= BLOCKSIZE / 2)
567 {
568 /* Small allocation to receive a fragment of a block.
569 Determine the logarithm to base two of the fragment size. */
570 register size_t log = 1;
571 --size;
572 while ((size /= 2) != 0)
573 ++log;
574
575 /* Look in the fragment lists for a
576 free fragment of the desired size. */
577 next = _fraghead[log].next;
578 if (next != NULL)
579 {
580 /* There are free fragments of this size.
581 Pop a fragment out of the fragment list and return it.
582 Update the block's nfree and first counters. */
583 result = (genptr_t) next;
584 next->prev->next = next->next;
585 if (next->next != NULL)
586 next->next->prev = next->prev;
587 block = BLOCK (result);
588 if (--_heapinfo[block].busy.info.frag.nfree != 0)
589 _heapinfo[block].busy.info.frag.first = (unsigned long int)
590 ((unsigned long int) ((char *) next->next - (char *) NULL)
591 % BLOCKSIZE) >> log;
592
593 /* Update the statistics. */
594 ++chunks_used;
595 bytes_used += 1 << log;
596 --chunks_free;
597 bytes_free -= 1 << log;
598 }
599 else
600 {
601 /* No free fragments of the desired size, so get a new block
602 and break it into fragments, returning the first. */
603 result = imalloc (BLOCKSIZE);
604 if (result == NULL)
605 return NULL;
606
607 /* Link all fragments but the first into the free list. */
608 next = (struct list *) ((char *) result + (1 << log));
609 next->next = NULL;
610 next->prev = &_fraghead[log];
611 _fraghead[log].next = next;
612
613 for (i = 2; i < (size_t) (BLOCKSIZE >> log); ++i)
614 {
615 next = (struct list *) ((char *) result + (i << log));
616 next->next = _fraghead[log].next;
617 next->prev = &_fraghead[log];
618 next->prev->next = next;
619 next->next->prev = next;
620 }
621
622 /* Initialize the nfree and first counters for this block. */
623 block = BLOCK (result);
624 _heapinfo[block].busy.type = log;
625 _heapinfo[block].busy.info.frag.nfree = i - 1;
626 _heapinfo[block].busy.info.frag.first = i - 1;
627
628 chunks_free += (BLOCKSIZE >> log) - 1;
629 bytes_free += BLOCKSIZE - (1 << log);
630 bytes_used -= BLOCKSIZE - (1 << log);
631 }
632 }
633 else
634 {
635 /* Large allocation to receive one or more blocks.
636 Search the free list in a circle starting at the last place visited.
637 If we loop completely around without finding a large enough
638 space we will have to get more memory from the system. */
639 blocks = BLOCKIFY (size);
640 start = block = _heapindex;
641 while (_heapinfo[block].free.size < blocks)
642 {
643 block = _heapinfo[block].free.next;
644 if (block == start)
645 {
646 /* Need to get more from the system. Get a little extra. */
647 size_t wantblocks = blocks + malloc_extra_blocks;
648 block = _heapinfo[0].free.prev;
649 lastblocks = _heapinfo[block].free.size;
650 /* Check to see if the new core will be contiguous with the
651 final free block; if so we don't need to get as much. */
652 if (_heaplimit != 0 && block + lastblocks == _heaplimit &&
653 /* We can't do this if we will have to make the heap info
654 table bigger to accomodate the new space. */
655 block + wantblocks <= heapsize &&
656 get_contiguous_space ((wantblocks - lastblocks) * BLOCKSIZE,
657 ADDRESS (block + lastblocks)))
658 {
659 /* We got it contiguously. Which block we are extending
660 (the `final free block' referred to above) might have
661 changed, if it got combined with a freed info table. */
662 block = _heapinfo[0].free.prev;
663 _heapinfo[block].free.size += (wantblocks - lastblocks);
664 bytes_free += (wantblocks - lastblocks) * BLOCKSIZE;
665 _heaplimit += wantblocks - lastblocks;
666 continue;
667 }
668 result = morecore (wantblocks * BLOCKSIZE);
669 if (result == NULL)
670 return NULL;
671 block = BLOCK (result);
672 /* Put the new block at the end of the free list. */
673 _heapinfo[block].free.size = wantblocks;
674 _heapinfo[block].free.prev = _heapinfo[0].free.prev;
675 _heapinfo[block].free.next = 0;
676 _heapinfo[0].free.prev = block;
677 _heapinfo[_heapinfo[block].free.prev].free.next = block;
678 ++chunks_free;
679 bytes_free += wantblocks * BLOCKSIZE;
680 /* Now loop to use some of that block for this allocation. */
681 }
682 }
683
684 /* At this point we have found a suitable free list entry.
685 Figure out how to remove what we need from the list. */
686 result = ADDRESS (block);
687 if (_heapinfo[block].free.size > blocks)
688 {
689 /* The block we found has a bit left over,
690 so relink the tail end back into the free list. */
691 _heapinfo[block + blocks].free.size
692 = _heapinfo[block].free.size - blocks;
693 _heapinfo[block + blocks].free.next
694 = _heapinfo[block].free.next;
695 _heapinfo[block + blocks].free.prev
696 = _heapinfo[block].free.prev;
697 _heapinfo[_heapinfo[block].free.prev].free.next
698 = _heapinfo[_heapinfo[block].free.next].free.prev
699 = _heapindex = block + blocks;
700 }
701 else
702 {
703 /* The block exactly matches our requirements,
704 so just remove it from the list. */
705 _heapinfo[_heapinfo[block].free.next].free.prev
706 = _heapinfo[block].free.prev;
707 _heapinfo[_heapinfo[block].free.prev].free.next
708 = _heapindex = _heapinfo[block].free.next;
709 --chunks_free;
710 }
711
712 _heapinfo[block].busy.type = 0;
713 _heapinfo[block].busy.info.size = blocks;
714 ++chunks_used;
715 bytes_used += blocks * BLOCKSIZE;
716 bytes_free -= blocks * BLOCKSIZE;
717
718 /* Mark all the blocks of the object just allocated except for the
719 first with a negative number so you can find the first block by
720 adding that adjustment. */
721 while (--blocks > 0)
722 _heapinfo[block + blocks].busy.info.size = -blocks;
723 }
724
725 return result;
726 }
727
728 genptr_t
729 malloc (size)
730 size_t size;
731 {
732 #ifdef RCHECK
733 struct hdr *hdr;
734 #endif
735
736 nmalloc++;
737
738 if (malloc_initialized == 0 && malloc_initialize () == 0)
739 return NULL;
740
741 #ifdef RCHECK
742 hdr = (struct hdr *) imalloc (sizeof (struct hdr) + size + 1);
743 if (hdr == NULL)
744 return NULL;
745
746 hdr->size = size;
747 hdr->magic = MAGICWORD;
748 ((char *) &hdr[1])[size] = MAGICBYTE;
749 zmemset ((genptr_t) (hdr + 1), MALLOCFLOOD, size);
750 return (genptr_t) (hdr + 1);
751 #else
752 return (imalloc (size));
753 #endif
754 }
755
756 /* Free a block of memory allocated by `malloc'. */
757
758 /* Return memory to the heap. */
759 static void
760 ifree (ptr)
761 genptr_t ptr;
762 {
763 int type;
764 size_t block, blocks;
765 register size_t i;
766 struct list *prev, *next;
767 genptr_t curbrk;
768 size_t lesscore_threshold;
769 register struct alignlist *l;
770
771 if (ptr == NULL)
772 return;
773
774 /* Threshold of free space at which we will return some to the system. */
775 lesscore_threshold = FINAL_FREE_BLOCKS + 2 * malloc_extra_blocks;
776
777 for (l = _aligned_blocks; l != NULL; l = l->next)
778 if (l->aligned == ptr)
779 {
780 l->aligned = NULL; /* Mark the slot in the list as free. */
781 ptr = l->exact;
782 break;
783 }
784
785 block = BLOCK (ptr);
786
787 type = _heapinfo[block].busy.type;
788 switch (type)
789 {
790 case 0:
791 /* Get as many statistics as early as we can. */
792 --chunks_used;
793 bytes_used -= _heapinfo[block].busy.info.size * BLOCKSIZE;
794 bytes_free += _heapinfo[block].busy.info.size * BLOCKSIZE;
795
796 /* Find the free cluster previous to this one in the free list.
797 Start searching at the last block referenced; this may benefit
798 programs with locality of allocation. */
799 i = _heapindex;
800 if (i > block)
801 while (i > block)
802 i = _heapinfo[i].free.prev;
803 else
804 {
805 do
806 i = _heapinfo[i].free.next;
807 while (i > 0 && i < block);
808 i = _heapinfo[i].free.prev;
809 }
810
811 /* Determine how to link this block into the free list. */
812 if (block == i + _heapinfo[i].free.size)
813 {
814 /* Coalesce this block with its predecessor. */
815 _heapinfo[i].free.size += _heapinfo[block].busy.info.size;
816 block = i;
817 }
818 else
819 {
820 /* Really link this block back into the free list. */
821 _heapinfo[block].free.size = _heapinfo[block].busy.info.size;
822 _heapinfo[block].free.next = _heapinfo[i].free.next;
823 _heapinfo[block].free.prev = i;
824 _heapinfo[i].free.next = block;
825 _heapinfo[_heapinfo[block].free.next].free.prev = block;
826 ++chunks_free;
827 }
828
829 /* Now that the block is linked in, see if we can coalesce it
830 with its successor (by deleting its successor from the list
831 and adding in its size). */
832 if (block + _heapinfo[block].free.size == _heapinfo[block].free.next)
833 {
834 _heapinfo[block].free.size
835 += _heapinfo[_heapinfo[block].free.next].free.size;
836 _heapinfo[block].free.next
837 = _heapinfo[_heapinfo[block].free.next].free.next;
838 _heapinfo[_heapinfo[block].free.next].free.prev = block;
839 --chunks_free;
840 }
841
842 /* How many trailing free blocks are there now? */
843 blocks = _heapinfo[block].free.size;
844
845 /* Where is the current end of accessible core? */
846 curbrk = default_morecore (0);
847
848 if (_heaplimit != 0 && curbrk == ADDRESS (_heaplimit))
849 {
850 /* The end of the malloc heap is at the end of accessible core.
851 It's possible that moving _heapinfo will allow us to
852 return some space to the system. */
853
854 size_t info_block = BLOCK (_heapinfo);
855 size_t info_blocks = _heapinfo[info_block].busy.info.size;
856 size_t prev_block = _heapinfo[block].free.prev;
857 size_t prev_blocks = _heapinfo[prev_block].free.size;
858 size_t next_block = _heapinfo[block].free.next;
859 size_t next_blocks = _heapinfo[next_block].free.size;
860
861 if (/* Win if this block being freed is last in core, the info table
862 is just before it, the previous free block is just before the
863 info table, and the two free blocks together form a useful
864 amount to return to the system. */
865 (block + blocks == _heaplimit &&
866 info_block + info_blocks == block &&
867 prev_block != 0 && prev_block + prev_blocks == info_block &&
868 blocks + prev_blocks >= lesscore_threshold) ||
869 /* Nope, not the case. We can also win if this block being
870 freed is just before the info table, and the table extends
871 to the end of core or is followed only by a free block,
872 and the total free space is worth returning to the system. */
873 (block + blocks == info_block &&
874 ((info_block + info_blocks == _heaplimit &&
875 blocks >= lesscore_threshold) ||
876 (info_block + info_blocks == next_block &&
877 next_block + next_blocks == _heaplimit &&
878 blocks + next_blocks >= lesscore_threshold)))
879 )
880 {
881 malloc_info *newinfo;
882 size_t oldlimit = _heaplimit;
883
884 /* Free the old info table, clearing _heaplimit to avoid
885 recursion into this code. We don't want to return the
886 table's blocks to the system before we have copied them to
887 the new location. */
888 _heaplimit = 0;
889 ifree (_heapinfo);
890 _heaplimit = oldlimit;
891
892 /* Tell malloc to search from the beginning of the heap for
893 free blocks, so it doesn't reuse the ones just freed. */
894 _heapindex = 0;
895
896 /* Allocate new space for the info table and move its data. */
897 newinfo = (malloc_info *) imalloc (info_blocks
898 * BLOCKSIZE);
899 memmove (newinfo, _heapinfo, info_blocks * BLOCKSIZE);
900 _heapinfo = newinfo;
901
902 /* We should now have coalesced the free block with the
903 blocks freed from the old info table. Examine the entire
904 trailing free block to decide below whether to return some
905 to the system. */
906 block = _heapinfo[0].free.prev;
907 blocks = _heapinfo[block].free.size;
908 }
909
910 /* Now see if we can return stuff to the system. */
911 if (block + blocks == _heaplimit && blocks >= lesscore_threshold)
912 {
913 register size_t bytes = blocks * BLOCKSIZE;
914 _heaplimit -= blocks;
915 default_morecore (-bytes);
916 _heapinfo[_heapinfo[block].free.prev].free.next
917 = _heapinfo[block].free.next;
918 _heapinfo[_heapinfo[block].free.next].free.prev
919 = _heapinfo[block].free.prev;
920 block = _heapinfo[block].free.prev;
921 --chunks_free;
922 bytes_free -= bytes;
923 }
924 }
925
926 /* Set the next search to begin at this block. */
927 _heapindex = block;
928 break;
929
930 default:
931 /* Do some of the statistics. */
932 --chunks_used;
933 bytes_used -= 1 << type;
934 ++chunks_free;
935 bytes_free += 1 << type;
936
937 /* Get the address of the first free fragment in this block. */
938 prev = (struct list *) ((char *) ADDRESS (block) +
939 (_heapinfo[block].busy.info.frag.first << type));
940
941 if (_heapinfo[block].busy.info.frag.nfree == (BLOCKSIZE >> type) - 1)
942 {
943 /* If all fragments of this block are free, remove them
944 from the fragment list and free the whole block. */
945 next = prev;
946 for (i = 1; i < (size_t) (BLOCKSIZE >> type); ++i)
947 next = next->next;
948 prev->prev->next = next;
949 if (next != NULL)
950 next->prev = prev->prev;
951 _heapinfo[block].busy.type = 0;
952 _heapinfo[block].busy.info.size = 1;
953
954 /* Keep the statistics accurate. */
955 ++chunks_used;
956 bytes_used += BLOCKSIZE;
957 chunks_free -= BLOCKSIZE >> type;
958 bytes_free -= BLOCKSIZE;
959
960 ifree (ADDRESS (block));
961 }
962 else if (_heapinfo[block].busy.info.frag.nfree != 0)
963 {
964 /* If some fragments of this block are free, link this
965 fragment into the fragment list after the first free
966 fragment of this block. */
967 next = (struct list *) ptr;
968 next->next = prev->next;
969 next->prev = prev;
970 prev->next = next;
971 if (next->next != NULL)
972 next->next->prev = next;
973 ++_heapinfo[block].busy.info.frag.nfree;
974 }
975 else
976 {
977 /* No fragments of this block are free, so link this
978 fragment into the fragment list and announce that
979 it is the first free fragment of this block. */
980 prev = (struct list *) ptr;
981 _heapinfo[block].busy.info.frag.nfree = 1;
982 _heapinfo[block].busy.info.frag.first = (unsigned long int)
983 ((unsigned long int) ((char *) ptr - (char *) NULL)
984 % BLOCKSIZE >> type);
985 prev->next = _fraghead[type].next;
986 prev->prev = &_fraghead[type];
987 prev->prev->next = prev;
988 if (prev->next != NULL)
989 prev->next->prev = prev;
990 }
991 break;
992 }
993 }
994
995 /* Return memory to the heap. */
996 void
997 free (ptr)
998 genptr_t ptr;
999 {
1000 #ifdef RCHECK
1001 struct hdr *hdr;
1002 #endif
1003
1004 nfree++;
1005
1006 if (ptr == 0)
1007 return;
1008
1009 #ifdef RCHECK
1010 hdr = ((struct hdr *) ptr) - 1;
1011 checkhdr (hdr);
1012 hdr->magic = MAGICFREE;
1013 zmemset (ptr, FREEFLOOD, hdr->size);
1014 ifree (hdr);
1015 #else
1016 ifree (ptr);
1017 #endif
1018 }
1019
1020 /* Change the size of a block allocated by `malloc'. */
1021
1022 #ifndef HAVE_MEMMOVE
1023 /* Snarfed directly from Emacs src/dispnew.c:
1024 XXX Should use system bcopy if it handles overlap. */
1025
1026 /* Like bcopy except never gets confused by overlap. */
1027
1028 static void
1029 malloc_safe_bcopy (afrom, ato, size)
1030 genptr_t afrom;
1031 genptr_t ato;
1032 size_t size;
1033 {
1034 char *from, *to;
1035
1036 from = afrom;
1037 to = ato;
1038 if (size <= 0 || from == to)
1039 return;
1040
1041 /* If the source and destination don't overlap, then bcopy can
1042 handle it. If they do overlap, but the destination is lower in
1043 memory than the source, we'll assume bcopy can handle that. */
1044 if (to < from || from + size <= to)
1045 bcopy (from, to, size);
1046
1047 /* Otherwise, we'll copy from the end. */
1048 else
1049 {
1050 register char *endf = from + size;
1051 register char *endt = to + size;
1052
1053 /* If TO - FROM is large, then we should break the copy into
1054 nonoverlapping chunks of TO - FROM bytes each. However, if
1055 TO - FROM is small, then the bcopy function call overhead
1056 makes this not worth it. The crossover point could be about
1057 anywhere. Since I don't think the obvious copy loop is too
1058 bad, I'm trying to err in its favor. */
1059 if (to - from < 64)
1060 {
1061 do
1062 *--endt = *--endf;
1063 while (endf != from);
1064 }
1065 else
1066 {
1067 for (;;)
1068 {
1069 endt -= (to - from);
1070 endf -= (to - from);
1071
1072 if (endt < to)
1073 break;
1074
1075 bcopy (endf, endt, to - from);
1076 }
1077
1078 /* If SIZE wasn't a multiple of TO - FROM, there will be a
1079 little left over. The amount left over is
1080 (endt + (to - from)) - to, which is endt - from. */
1081 bcopy (from, to, endt - from);
1082 }
1083 }
1084 }
1085 #endif /* !HAVE_MEMMOVE */
1086
1087 /* Resize the given region to the new size, returning a pointer
1088 to the (possibly moved) region. This is optimized for speed;
1089 some benchmarks seem to indicate that greater compactness is
1090 achieved by unconditionally allocating and copying to a
1091 new region. This module has incestuous knowledge of the
1092 internals of both free and malloc. */
1093 static genptr_t
1094 irealloc (ptr, size)
1095 genptr_t ptr;
1096 size_t size;
1097 {
1098 genptr_t result;
1099 int type;
1100 size_t block, blocks, oldlimit;
1101
1102 if (size == 0)
1103 {
1104 ifree (ptr);
1105 return imalloc (0);
1106 }
1107 else if (ptr == NULL)
1108 return imalloc (size);
1109
1110 block = BLOCK (ptr);
1111
1112 type = _heapinfo[block].busy.type;
1113 switch (type)
1114 {
1115 case 0:
1116 /* Maybe reallocate a large block to a small fragment. */
1117 if (size <= BLOCKSIZE / 2)
1118 {
1119 result = imalloc (size);
1120 if (result != NULL)
1121 {
1122 memcpy (result, ptr, size);
1123 ifree (ptr);
1124 return result;
1125 }
1126 }
1127
1128 /* The new size is a large allocation as well;
1129 see if we can hold it in place. */
1130 blocks = BLOCKIFY (size);
1131 if (blocks < _heapinfo[block].busy.info.size)
1132 {
1133 /* The new size is smaller; return
1134 excess memory to the free list. */
1135 _heapinfo[block + blocks].busy.type = 0;
1136 _heapinfo[block + blocks].busy.info.size
1137 = _heapinfo[block].busy.info.size - blocks;
1138 _heapinfo[block].busy.info.size = blocks;
1139 /* We have just created a new chunk by splitting a chunk in two.
1140 Now we will free this chunk; increment the statistics counter
1141 so it doesn't become wrong when ifree decrements it. */
1142 ++chunks_used;
1143 ifree (ADDRESS (block + blocks));
1144 result = ptr;
1145 }
1146 else if (blocks == _heapinfo[block].busy.info.size)
1147 /* No size change necessary. */
1148 result = ptr;
1149 else
1150 {
1151 /* Won't fit, so allocate a new region that will.
1152 Free the old region first in case there is sufficient
1153 adjacent free space to grow without moving. */
1154 blocks = _heapinfo[block].busy.info.size;
1155 /* Prevent free from actually returning memory to the system. */
1156 oldlimit = _heaplimit;
1157 _heaplimit = 0;
1158 ifree (ptr);
1159 result = imalloc (size);
1160 if (_heaplimit == 0)
1161 _heaplimit = oldlimit;
1162 if (result == NULL)
1163 {
1164 /* Now we're really in trouble. We have to unfree
1165 the thing we just freed. Unfortunately it might
1166 have been coalesced with its neighbors. */
1167 if (_heapindex == block)
1168 (void) imalloc (blocks * BLOCKSIZE);
1169 else
1170 {
1171 genptr_t previous;
1172 previous = imalloc ((block - _heapindex) * BLOCKSIZE);
1173 (void) imalloc (blocks * BLOCKSIZE);
1174 ifree (previous);
1175 }
1176 return NULL;
1177 }
1178 if (ptr != result)
1179 memmove (result, ptr, blocks * BLOCKSIZE);
1180 }
1181 break;
1182
1183 default:
1184 /* Old size is a fragment; type is logarithm
1185 to base two of the fragment size. */
1186 if (size > (size_t) (1 << (type - 1)) &&
1187 size <= (size_t) (1 << type))
1188 /* The new size is the same kind of fragment. */
1189 result = ptr;
1190 else
1191 {
1192 /* The new size is different; allocate a new space,
1193 and copy the lesser of the new size and the old. */
1194 result = imalloc (size);
1195 if (result == NULL)
1196 return NULL;
1197 memcpy (result, ptr, min (size, (size_t) 1 << type));
1198 ifree (ptr);
1199 }
1200 break;
1201 }
1202
1203 return result;
1204 }
1205
1206 genptr_t
1207 realloc (ptr, size)
1208 genptr_t ptr;
1209 size_t size;
1210 {
1211 #ifdef RCHECK
1212 struct hdr *hdr;
1213 size_t osize;
1214 #endif
1215
1216 if (malloc_initialized == 0 && malloc_initialize () == 0)
1217 return NULL;
1218
1219 nrealloc++;
1220
1221 #ifdef RCHECK
1222 hdr = ((struct hdr *) ptr) - 1;
1223 osize = hdr->size;
1224
1225 checkhdr (hdr);
1226 if (size < osize)
1227 zmemset ((char *) ptr + size, FREEFLOOD, osize - size);
1228 hdr = (struct hdr *) irealloc ((genptr_t) hdr, sizeof (struct hdr) + size + 1);
1229 if (hdr == NULL)
1230 return NULL;
1231
1232 hdr->size = size;
1233 hdr->magic = MAGICWORD;
1234 ((char *) &hdr[1])[size] = MAGICBYTE;
1235 if (size > osize)
1236 zmemset ((char *) (hdr + 1) + osize, MALLOCFLOOD, size - osize);
1237 return (genptr_t) (hdr + 1);
1238 #else
1239 return (irealloc (ptr, size));
1240 #endif
1241 }
1242
1243 /* Allocate an array of NMEMB elements each SIZE bytes long.
1244 The entire array is initialized to zeros. */
1245 genptr_t
1246 calloc (nmemb, size)
1247 register size_t nmemb;
1248 register size_t size;
1249 {
1250 register genptr_t result;
1251
1252 result = malloc (nmemb * size);
1253 if (result != NULL)
1254 (void) memset (result, 0, nmemb * size);
1255
1256 return result;
1257 }
1258
1259 /* Define the `cfree' alias for `free'. */
1260 void
1261 cfree (ptr)
1262 genptr_t ptr;
1263 {
1264 free (ptr);
1265 }
1266
1267 genptr_t
1268 memalign (alignment, size)
1269 size_t alignment;
1270 size_t size;
1271 {
1272 genptr_t result;
1273 unsigned long int adj, lastadj;
1274
1275 /* Allocate a block with enough extra space to pad the block with up to
1276 (ALIGNMENT - 1) bytes if necessary. */
1277 result = malloc (size + alignment - 1);
1278 if (result == NULL)
1279 return NULL;
1280
1281 /* Figure out how much we will need to pad this particular block
1282 to achieve the required alignment. */
1283 adj = (unsigned long int) ((char *) result - (char *) NULL) % alignment;
1284
1285 do
1286 {
1287 /* Reallocate the block with only as much excess as it needs. */
1288 free (result);
1289 result = malloc (adj + size);
1290 if (result == NULL) /* Impossible unless interrupted. */
1291 return NULL;
1292
1293 lastadj = adj;
1294 adj = (unsigned long int) ((char *) result - (char *) NULL) % alignment;
1295 /* It's conceivable we might have been so unlucky as to get a
1296 different block with weaker alignment. If so, this block is too
1297 short to contain SIZE after alignment correction. So we must
1298 try again and get another block, slightly larger. */
1299 } while (adj > lastadj);
1300
1301 if (adj != 0)
1302 {
1303 /* Record this block in the list of aligned blocks, so that `free'
1304 can identify the pointer it is passed, which will be in the middle
1305 of an allocated block. */
1306
1307 struct alignlist *l;
1308 for (l = _aligned_blocks; l != NULL; l = l->next)
1309 if (l->aligned == NULL)
1310 /* This slot is free. Use it. */
1311 break;
1312 if (l == NULL)
1313 {
1314 l = (struct alignlist *) imalloc (sizeof (struct alignlist));
1315 if (l == NULL)
1316 {
1317 free (result);
1318 return NULL;
1319 }
1320 l->next = _aligned_blocks;
1321 _aligned_blocks = l;
1322 }
1323 l->exact = result;
1324 result = l->aligned = (char *) result + alignment - adj;
1325 }
1326
1327 return result;
1328 }
1329
1330 /* On some ANSI C systems, some libc functions call _malloc, _free
1331 and _realloc. Make them use the GNU functions. */
1332
1333 genptr_t
1334 _malloc (size)
1335 size_t size;
1336 {
1337 return malloc (size);
1338 }
1339
1340 void
1341 _free (ptr)
1342 genptr_t ptr;
1343 {
1344 free (ptr);
1345 }
1346
1347 genptr_t
1348 _realloc (ptr, size)
1349 genptr_t ptr;
1350 size_t size;
1351 {
1352 return realloc (ptr, size);
1353 }
1354
1355 struct mstats
1356 mstats ()
1357 {
1358 struct mstats result;
1359
1360 result.bytes_total = (char *) default_morecore (0) - _heapbase;
1361 result.chunks_used = chunks_used;
1362 result.bytes_used = bytes_used;
1363 result.chunks_free = chunks_free;
1364 result.bytes_free = bytes_free;
1365 result.nmalloc = nmalloc;
1366 result.nrealloc = nrealloc;
1367 result.nfree = nfree;
1368 result.nsbrk = nsbrk;
1369 result.tsbrk = tsbrk;
1370 result.negsbrk = negsbrk;
1371 result.tnegsbrk = tnegsbrk;
1372
1373 return result;
1374 }
1375
1376 #ifdef RCHECK
1377 /* Standard debugging hooks for `malloc'. */
1378
1379 static void
1380 zmemset (ptr, val, size)
1381 genptr_t ptr;
1382 int val;
1383 size_t size;
1384 {
1385 char *cp = ptr;
1386
1387 while (size--)
1388 *cp++ = val;
1389 }
1390
1391 static enum mcheck_status
1392 checkhdr (hdr)
1393 const struct hdr *hdr;
1394 {
1395 enum mcheck_status status;
1396
1397 switch (hdr->magic)
1398 {
1399 default:
1400 status = MCHECK_HEAD;
1401 break;
1402 case MAGICFREE:
1403 status = MCHECK_FREE;
1404 break;
1405 case MAGICWORD:
1406 if (((char *) &hdr[1])[hdr->size] != MAGICBYTE)
1407 status = MCHECK_TAIL;
1408 else
1409 status = MCHECK_OK;
1410 break;
1411 }
1412 if (status != MCHECK_OK)
1413 mabort (status);
1414 return status;
1415 }
1416
1417 #ifndef botch
1418 botch (msg)
1419 char *msg;
1420 {
1421 fprintf (stderr, "mcheck: %s\n", msg);
1422 fflush (stderr);
1423 abort ();
1424 }
1425 #endif
1426
1427 static void
1428 mabort (status)
1429 enum mcheck_status status;
1430 {
1431 const char *msg;
1432
1433 switch (status)
1434 {
1435 case MCHECK_OK:
1436 msg = "memory is consistent, library is buggy";
1437 break;
1438 case MCHECK_HEAD:
1439 msg = "memory clobbered before allocated block";
1440 break;
1441 case MCHECK_TAIL:
1442 msg = "memory clobbered past end of allocated block";
1443 break;
1444 case MCHECK_FREE:
1445 msg = "block freed twice";
1446 break;
1447 default:
1448 msg = "bogus mcheck_status, library is buggy";
1449 break;
1450 }
1451
1452 botch (msg);
1453 }
1454
1455 enum mcheck_status
1456 mprobe (ptr)
1457 genptr_t ptr;
1458 {
1459 return checkhdr ((struct hdr *)ptr);
1460 }
1461
1462 #ifndef STDIO_H_INCLUDED
1463 # include <stdio.h>
1464 #endif
1465
1466 void
1467 print_malloc_stats (s)
1468 char *s;
1469 {
1470 struct mstats ms;
1471
1472 ms = mstats ();
1473 fprintf (stderr, "Memory allocation statistics: %s\n", s ? s : "");
1474 fprintf (stderr, "\nTotal chunks in use: %d, total chunks free: %d\n",
1475 ms.chunks_used, ms.chunks_free);
1476 fprintf (stderr, "Total bytes in use: %u, total bytes free: %u\n",
1477 ms.bytes_used, ms.bytes_free);
1478 fprintf (stderr, "Total bytes (from heapbase): %d\n", ms.bytes_total);
1479 fprintf (stderr, "Total mallocs: %d, total frees: %d, total reallocs: %d\n",
1480 ms.nmalloc, ms.nfree, ms.nrealloc);
1481 fprintf (stderr, "Total sbrks: %d, total bytes via sbrk: %d\n",
1482 ms.nsbrk, ms.tsbrk);
1483 fprintf (stderr, "Total negative sbrks: %d, total bytes returned to kernel: %d\n",
1484 ms.negsbrk, ms.tnegsbrk);
1485 }
1486 #endif /* RCHECK */