]> git.ipfire.org Git - people/ms/u-boot.git/blame - common/dlmalloc.c
ARM: dts: DRA7: use new dra7-specific compatible string
[people/ms/u-boot.git] / common / dlmalloc.c
CommitLineData
81673e9a
KG
1#include <common.h>
2
4e33316f 3#if defined(CONFIG_UNIT_TEST)
6d7601e7
SG
4#define DEBUG
5#endif
6
217c9dad 7#include <malloc.h>
d59476b6
SG
8#include <asm/io.h>
9
ea882baf 10#ifdef DEBUG
217c9dad
WD
11#if __STD_C
12static void malloc_update_mallinfo (void);
13void malloc_stats (void);
14#else
15static void malloc_update_mallinfo ();
16void malloc_stats();
17#endif
ea882baf 18#endif /* DEBUG */
217c9dad 19
d87080b7
WD
20DECLARE_GLOBAL_DATA_PTR;
21
217c9dad
WD
22/*
23 Emulation of sbrk for WIN32
24 All code within the ifdef WIN32 is untested by me.
25
26 Thanks to Martin Fong and others for supplying this.
27*/
28
29
30#ifdef WIN32
31
32#define AlignPage(add) (((add) + (malloc_getpagesize-1)) & \
33~(malloc_getpagesize-1))
34#define AlignPage64K(add) (((add) + (0x10000 - 1)) & ~(0x10000 - 1))
35
36/* resrve 64MB to insure large contiguous space */
37#define RESERVED_SIZE (1024*1024*64)
38#define NEXT_SIZE (2048*1024)
39#define TOP_MEMORY ((unsigned long)2*1024*1024*1024)
40
41struct GmListElement;
42typedef struct GmListElement GmListElement;
43
44struct GmListElement
45{
46 GmListElement* next;
47 void* base;
48};
49
50static GmListElement* head = 0;
51static unsigned int gNextAddress = 0;
52static unsigned int gAddressBase = 0;
53static unsigned int gAllocatedSize = 0;
54
55static
56GmListElement* makeGmListElement (void* bas)
57{
58 GmListElement* this;
59 this = (GmListElement*)(void*)LocalAlloc (0, sizeof (GmListElement));
60 assert (this);
61 if (this)
62 {
63 this->base = bas;
64 this->next = head;
65 head = this;
66 }
67 return this;
68}
69
70void gcleanup ()
71{
72 BOOL rval;
73 assert ( (head == NULL) || (head->base == (void*)gAddressBase));
74 if (gAddressBase && (gNextAddress - gAddressBase))
75 {
76 rval = VirtualFree ((void*)gAddressBase,
77 gNextAddress - gAddressBase,
78 MEM_DECOMMIT);
8bde7f77 79 assert (rval);
217c9dad
WD
80 }
81 while (head)
82 {
83 GmListElement* next = head->next;
84 rval = VirtualFree (head->base, 0, MEM_RELEASE);
85 assert (rval);
86 LocalFree (head);
87 head = next;
88 }
89}
90
91static
92void* findRegion (void* start_address, unsigned long size)
93{
94 MEMORY_BASIC_INFORMATION info;
95 if (size >= TOP_MEMORY) return NULL;
96
97 while ((unsigned long)start_address + size < TOP_MEMORY)
98 {
99 VirtualQuery (start_address, &info, sizeof (info));
100 if ((info.State == MEM_FREE) && (info.RegionSize >= size))
101 return start_address;
102 else
103 {
8bde7f77
WD
104 /* Requested region is not available so see if the */
105 /* next region is available. Set 'start_address' */
106 /* to the next region and call 'VirtualQuery()' */
107 /* again. */
217c9dad
WD
108
109 start_address = (char*)info.BaseAddress + info.RegionSize;
110
8bde7f77
WD
111 /* Make sure we start looking for the next region */
112 /* on the *next* 64K boundary. Otherwise, even if */
113 /* the new region is free according to */
114 /* 'VirtualQuery()', the subsequent call to */
115 /* 'VirtualAlloc()' (which follows the call to */
116 /* this routine in 'wsbrk()') will round *down* */
117 /* the requested address to a 64K boundary which */
118 /* we already know is an address in the */
119 /* unavailable region. Thus, the subsequent call */
120 /* to 'VirtualAlloc()' will fail and bring us back */
121 /* here, causing us to go into an infinite loop. */
217c9dad
WD
122
123 start_address =
124 (void *) AlignPage64K((unsigned long) start_address);
125 }
126 }
127 return NULL;
128
129}
130
131
132void* wsbrk (long size)
133{
134 void* tmp;
135 if (size > 0)
136 {
137 if (gAddressBase == 0)
138 {
139 gAllocatedSize = max (RESERVED_SIZE, AlignPage (size));
140 gNextAddress = gAddressBase =
141 (unsigned int)VirtualAlloc (NULL, gAllocatedSize,
142 MEM_RESERVE, PAGE_NOACCESS);
143 } else if (AlignPage (gNextAddress + size) > (gAddressBase +
144gAllocatedSize))
145 {
146 long new_size = max (NEXT_SIZE, AlignPage (size));
147 void* new_address = (void*)(gAddressBase+gAllocatedSize);
148 do
149 {
150 new_address = findRegion (new_address, new_size);
151
a874cac3 152 if (!new_address)
217c9dad
WD
153 return (void*)-1;
154
155 gAddressBase = gNextAddress =
156 (unsigned int)VirtualAlloc (new_address, new_size,
157 MEM_RESERVE, PAGE_NOACCESS);
8bde7f77
WD
158 /* repeat in case of race condition */
159 /* The region that we found has been snagged */
160 /* by another thread */
217c9dad
WD
161 }
162 while (gAddressBase == 0);
163
164 assert (new_address == (void*)gAddressBase);
165
166 gAllocatedSize = new_size;
167
168 if (!makeGmListElement ((void*)gAddressBase))
169 return (void*)-1;
170 }
171 if ((size + gNextAddress) > AlignPage (gNextAddress))
172 {
173 void* res;
174 res = VirtualAlloc ((void*)AlignPage (gNextAddress),
175 (size + gNextAddress -
176 AlignPage (gNextAddress)),
177 MEM_COMMIT, PAGE_READWRITE);
a874cac3 178 if (!res)
217c9dad
WD
179 return (void*)-1;
180 }
181 tmp = (void*)gNextAddress;
182 gNextAddress = (unsigned int)tmp + size;
183 return tmp;
184 }
185 else if (size < 0)
186 {
187 unsigned int alignedGoal = AlignPage (gNextAddress + size);
188 /* Trim by releasing the virtual memory */
189 if (alignedGoal >= gAddressBase)
190 {
191 VirtualFree ((void*)alignedGoal, gNextAddress - alignedGoal,
192 MEM_DECOMMIT);
193 gNextAddress = gNextAddress + size;
194 return (void*)gNextAddress;
195 }
196 else
197 {
198 VirtualFree ((void*)gAddressBase, gNextAddress - gAddressBase,
199 MEM_DECOMMIT);
200 gNextAddress = gAddressBase;
201 return (void*)-1;
202 }
203 }
204 else
205 {
206 return (void*)gNextAddress;
207 }
208}
209
210#endif
211
d93041a4 212
217c9dad
WD
213
214/*
215 Type declarations
216*/
217
218
219struct malloc_chunk
220{
221 INTERNAL_SIZE_T prev_size; /* Size of previous chunk (if free). */
222 INTERNAL_SIZE_T size; /* Size in bytes, including overhead. */
223 struct malloc_chunk* fd; /* double links -- used only if free. */
224 struct malloc_chunk* bk;
1ba91ba2 225} __attribute__((__may_alias__)) ;
217c9dad
WD
226
227typedef struct malloc_chunk* mchunkptr;
228
229/*
230
231 malloc_chunk details:
232
233 (The following includes lightly edited explanations by Colin Plumb.)
234
235 Chunks of memory are maintained using a `boundary tag' method as
236 described in e.g., Knuth or Standish. (See the paper by Paul
237 Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a
238 survey of such techniques.) Sizes of free chunks are stored both
239 in the front of each chunk and at the end. This makes
240 consolidating fragmented chunks into bigger chunks very fast. The
241 size fields also hold bits representing whether chunks are free or
242 in use.
243
244 An allocated chunk looks like this:
245
246
247 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
8bde7f77
WD
248 | Size of previous chunk, if allocated | |
249 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
250 | Size of chunk, in bytes |P|
217c9dad 251 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
8bde7f77
WD
252 | User data starts here... .
253 . .
254 . (malloc_usable_space() bytes) .
255 . |
217c9dad 256nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
8bde7f77
WD
257 | Size of chunk |
258 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
217c9dad
WD
259
260
261 Where "chunk" is the front of the chunk for the purpose of most of
262 the malloc code, but "mem" is the pointer that is returned to the
263 user. "Nextchunk" is the beginning of the next contiguous chunk.
264
265 Chunks always begin on even word boundries, so the mem portion
266 (which is returned to the user) is also on an even word boundary, and
267 thus double-word aligned.
268
269 Free chunks are stored in circular doubly-linked lists, and look like this:
270
271 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
8bde7f77
WD
272 | Size of previous chunk |
273 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
217c9dad
WD
274 `head:' | Size of chunk, in bytes |P|
275 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
8bde7f77
WD
276 | Forward pointer to next chunk in list |
277 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
278 | Back pointer to previous chunk in list |
279 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
280 | Unused space (may be 0 bytes long) .
281 . .
282 . |
217c9dad
WD
283nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
284 `foot:' | Size of chunk, in bytes |
8bde7f77 285 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
217c9dad
WD
286
287 The P (PREV_INUSE) bit, stored in the unused low-order bit of the
288 chunk size (which is always a multiple of two words), is an in-use
289 bit for the *previous* chunk. If that bit is *clear*, then the
290 word before the current chunk size contains the previous chunk
291 size, and can be used to find the front of the previous chunk.
292 (The very first chunk allocated always has this bit set,
293 preventing access to non-existent (or non-owned) memory.)
294
295 Note that the `foot' of the current chunk is actually represented
296 as the prev_size of the NEXT chunk. (This makes it easier to
297 deal with alignments etc).
298
299 The two exceptions to all this are
300
301 1. The special chunk `top', which doesn't bother using the
8bde7f77
WD
302 trailing size field since there is no
303 next contiguous chunk that would have to index off it. (After
304 initialization, `top' is forced to always exist. If it would
305 become less than MINSIZE bytes long, it is replenished via
306 malloc_extend_top.)
217c9dad
WD
307
308 2. Chunks allocated via mmap, which have the second-lowest-order
8bde7f77
WD
309 bit (IS_MMAPPED) set in their size fields. Because they are
310 never merged or traversed from any other chunk, they have no
311 foot size or inuse information.
217c9dad
WD
312
313 Available chunks are kept in any of several places (all declared below):
314
315 * `av': An array of chunks serving as bin headers for consolidated
316 chunks. Each bin is doubly linked. The bins are approximately
317 proportionally (log) spaced. There are a lot of these bins
318 (128). This may look excessive, but works very well in
319 practice. All procedures maintain the invariant that no
320 consolidated chunk physically borders another one. Chunks in
321 bins are kept in size order, with ties going to the
322 approximately least recently used chunk.
323
324 The chunks in each bin are maintained in decreasing sorted order by
325 size. This is irrelevant for the small bins, which all contain
326 the same-sized chunks, but facilitates best-fit allocation for
327 larger chunks. (These lists are just sequential. Keeping them in
328 order almost never requires enough traversal to warrant using
329 fancier ordered data structures.) Chunks of the same size are
330 linked with the most recently freed at the front, and allocations
331 are taken from the back. This results in LRU or FIFO allocation
332 order, which tends to give each chunk an equal opportunity to be
333 consolidated with adjacent freed chunks, resulting in larger free
334 chunks and less fragmentation.
335
336 * `top': The top-most available chunk (i.e., the one bordering the
337 end of available memory) is treated specially. It is never
338 included in any bin, is used only if no other chunk is
339 available, and is released back to the system if it is very
340 large (see M_TRIM_THRESHOLD).
341
342 * `last_remainder': A bin holding only the remainder of the
343 most recently split (non-top) chunk. This bin is checked
344 before other non-fitting chunks, so as to provide better
345 locality for runs of sequentially allocated chunks.
346
347 * Implicitly, through the host system's memory mapping tables.
348 If supported, requests greater than a threshold are usually
349 serviced via calls to mmap, and then later released via munmap.
350
351*/
d93041a4 352
217c9dad
WD
353/* sizes, alignments */
354
355#define SIZE_SZ (sizeof(INTERNAL_SIZE_T))
356#define MALLOC_ALIGNMENT (SIZE_SZ + SIZE_SZ)
357#define MALLOC_ALIGN_MASK (MALLOC_ALIGNMENT - 1)
358#define MINSIZE (sizeof(struct malloc_chunk))
359
360/* conversion from malloc headers to user pointers, and back */
361
362#define chunk2mem(p) ((Void_t*)((char*)(p) + 2*SIZE_SZ))
363#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ))
364
365/* pad request bytes into a usable size */
366
367#define request2size(req) \
368 (((long)((req) + (SIZE_SZ + MALLOC_ALIGN_MASK)) < \
369 (long)(MINSIZE + MALLOC_ALIGN_MASK)) ? MINSIZE : \
370 (((req) + (SIZE_SZ + MALLOC_ALIGN_MASK)) & ~(MALLOC_ALIGN_MASK)))
371
372/* Check if m has acceptable alignment */
373
374#define aligned_OK(m) (((unsigned long)((m)) & (MALLOC_ALIGN_MASK)) == 0)
375
376
d93041a4 377
217c9dad
WD
378
379/*
380 Physical chunk operations
381*/
382
383
384/* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
385
386#define PREV_INUSE 0x1
387
388/* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
389
390#define IS_MMAPPED 0x2
391
392/* Bits to mask off when extracting size */
393
394#define SIZE_BITS (PREV_INUSE|IS_MMAPPED)
395
396
397/* Ptr to next physical malloc_chunk. */
398
399#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & ~PREV_INUSE) ))
400
401/* Ptr to previous physical malloc_chunk */
402
403#define prev_chunk(p)\
404 ((mchunkptr)( ((char*)(p)) - ((p)->prev_size) ))
405
406
407/* Treat space at ptr + offset as a chunk */
408
409#define chunk_at_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
410
411
d93041a4 412
217c9dad
WD
413
414/*
415 Dealing with use bits
416*/
417
418/* extract p's inuse bit */
419
420#define inuse(p)\
421((((mchunkptr)(((char*)(p))+((p)->size & ~PREV_INUSE)))->size) & PREV_INUSE)
422
423/* extract inuse bit of previous chunk */
424
425#define prev_inuse(p) ((p)->size & PREV_INUSE)
426
427/* check for mmap()'ed chunk */
428
429#define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED)
430
431/* set/clear chunk as in use without otherwise disturbing */
432
433#define set_inuse(p)\
434((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size |= PREV_INUSE
435
436#define clear_inuse(p)\
437((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size &= ~(PREV_INUSE)
438
439/* check/set/clear inuse bits in known places */
440
441#define inuse_bit_at_offset(p, s)\
442 (((mchunkptr)(((char*)(p)) + (s)))->size & PREV_INUSE)
443
444#define set_inuse_bit_at_offset(p, s)\
445 (((mchunkptr)(((char*)(p)) + (s)))->size |= PREV_INUSE)
446
447#define clear_inuse_bit_at_offset(p, s)\
448 (((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE))
449
450
d93041a4 451
217c9dad
WD
452
453/*
454 Dealing with size fields
455*/
456
457/* Get size, ignoring use bits */
458
459#define chunksize(p) ((p)->size & ~(SIZE_BITS))
460
461/* Set size at head, without disturbing its use bit */
462
463#define set_head_size(p, s) ((p)->size = (((p)->size & PREV_INUSE) | (s)))
464
465/* Set size/use ignoring previous bits in header */
466
467#define set_head(p, s) ((p)->size = (s))
468
469/* Set size at footer (only when chunk is not in use) */
470
471#define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_size = (s))
472
473
d93041a4 474
217c9dad
WD
475
476
477/*
478 Bins
479
480 The bins, `av_' are an array of pairs of pointers serving as the
481 heads of (initially empty) doubly-linked lists of chunks, laid out
482 in a way so that each pair can be treated as if it were in a
483 malloc_chunk. (This way, the fd/bk offsets for linking bin heads
484 and chunks are the same).
485
486 Bins for sizes < 512 bytes contain chunks of all the same size, spaced
487 8 bytes apart. Larger bins are approximately logarithmically
488 spaced. (See the table below.) The `av_' array is never mentioned
489 directly in the code, but instead via bin access macros.
490
491 Bin layout:
492
493 64 bins of size 8
494 32 bins of size 64
495 16 bins of size 512
496 8 bins of size 4096
497 4 bins of size 32768
498 2 bins of size 262144
499 1 bin of size what's left
500
501 There is actually a little bit of slop in the numbers in bin_index
502 for the sake of speed. This makes no difference elsewhere.
503
504 The special chunks `top' and `last_remainder' get their own bins,
505 (this is implemented via yet more trickery with the av_ array),
506 although `top' is never properly linked to its bin since it is
507 always handled specially.
508
509*/
510
511#define NAV 128 /* number of bins */
512
513typedef struct malloc_chunk* mbinptr;
514
515/* access macros */
516
517#define bin_at(i) ((mbinptr)((char*)&(av_[2*(i) + 2]) - 2*SIZE_SZ))
518#define next_bin(b) ((mbinptr)((char*)(b) + 2 * sizeof(mbinptr)))
519#define prev_bin(b) ((mbinptr)((char*)(b) - 2 * sizeof(mbinptr)))
520
521/*
522 The first 2 bins are never indexed. The corresponding av_ cells are instead
523 used for bookkeeping. This is not to save space, but to simplify
524 indexing, maintain locality, and avoid some initialization tests.
525*/
526
f2302d44 527#define top (av_[2]) /* The topmost chunk */
217c9dad
WD
528#define last_remainder (bin_at(1)) /* remainder from last split */
529
530
531/*
532 Because top initially points to its own bin with initial
533 zero size, thus forcing extension on the first malloc request,
534 we avoid having any special code in malloc to check whether
535 it even exists yet. But we still need to in malloc_extend_top.
536*/
537
538#define initial_top ((mchunkptr)(bin_at(0)))
539
540/* Helper macro to initialize bins */
541
542#define IAV(i) bin_at(i), bin_at(i)
543
544static mbinptr av_[NAV * 2 + 2] = {
199adb60 545 NULL, NULL,
217c9dad
WD
546 IAV(0), IAV(1), IAV(2), IAV(3), IAV(4), IAV(5), IAV(6), IAV(7),
547 IAV(8), IAV(9), IAV(10), IAV(11), IAV(12), IAV(13), IAV(14), IAV(15),
548 IAV(16), IAV(17), IAV(18), IAV(19), IAV(20), IAV(21), IAV(22), IAV(23),
549 IAV(24), IAV(25), IAV(26), IAV(27), IAV(28), IAV(29), IAV(30), IAV(31),
550 IAV(32), IAV(33), IAV(34), IAV(35), IAV(36), IAV(37), IAV(38), IAV(39),
551 IAV(40), IAV(41), IAV(42), IAV(43), IAV(44), IAV(45), IAV(46), IAV(47),
552 IAV(48), IAV(49), IAV(50), IAV(51), IAV(52), IAV(53), IAV(54), IAV(55),
553 IAV(56), IAV(57), IAV(58), IAV(59), IAV(60), IAV(61), IAV(62), IAV(63),
554 IAV(64), IAV(65), IAV(66), IAV(67), IAV(68), IAV(69), IAV(70), IAV(71),
555 IAV(72), IAV(73), IAV(74), IAV(75), IAV(76), IAV(77), IAV(78), IAV(79),
556 IAV(80), IAV(81), IAV(82), IAV(83), IAV(84), IAV(85), IAV(86), IAV(87),
557 IAV(88), IAV(89), IAV(90), IAV(91), IAV(92), IAV(93), IAV(94), IAV(95),
558 IAV(96), IAV(97), IAV(98), IAV(99), IAV(100), IAV(101), IAV(102), IAV(103),
559 IAV(104), IAV(105), IAV(106), IAV(107), IAV(108), IAV(109), IAV(110), IAV(111),
560 IAV(112), IAV(113), IAV(114), IAV(115), IAV(116), IAV(117), IAV(118), IAV(119),
561 IAV(120), IAV(121), IAV(122), IAV(123), IAV(124), IAV(125), IAV(126), IAV(127)
562};
563
2e5167cc 564#ifdef CONFIG_NEEDS_MANUAL_RELOC
7b395232 565static void malloc_bin_reloc(void)
217c9dad 566{
93691842
SG
567 mbinptr *p = &av_[2];
568 size_t i;
569
570 for (i = 2; i < ARRAY_SIZE(av_); ++i, ++p)
571 *p = (mbinptr)((ulong)*p + gd->reloc_off);
217c9dad 572}
7b395232
GJ
573#else
574static inline void malloc_bin_reloc(void) {}
521af04d 575#endif
5e93bd1c
PT
576
577ulong mem_malloc_start = 0;
578ulong mem_malloc_end = 0;
579ulong mem_malloc_brk = 0;
580
581void *sbrk(ptrdiff_t increment)
582{
583 ulong old = mem_malloc_brk;
584 ulong new = old + increment;
585
6163f5b4
KG
586 /*
587 * if we are giving memory back make sure we clear it out since
588 * we set MORECORE_CLEARS to 1
589 */
590 if (increment < 0)
591 memset((void *)new, 0, -increment);
592
5e93bd1c 593 if ((new < mem_malloc_start) || (new > mem_malloc_end))
ae30b8c2 594 return (void *)MORECORE_FAILURE;
5e93bd1c
PT
595
596 mem_malloc_brk = new;
597
598 return (void *)old;
599}
217c9dad 600
d4e8ada0
PT
601void mem_malloc_init(ulong start, ulong size)
602{
603 mem_malloc_start = start;
604 mem_malloc_end = start + size;
605 mem_malloc_brk = start;
606
868de51d
TR
607 debug("using memory %#lx-%#lx for malloc()\n", mem_malloc_start,
608 mem_malloc_end);
0aa8a4ad
PM
609#ifdef CONFIG_SYS_MALLOC_CLEAR_ON_INIT
610 memset((void *)mem_malloc_start, 0x0, size);
611#endif
7b395232 612 malloc_bin_reloc();
d4e8ada0 613}
d4e8ada0 614
217c9dad
WD
615/* field-extraction macros */
616
617#define first(b) ((b)->fd)
618#define last(b) ((b)->bk)
619
620/*
621 Indexing into bins
622*/
623
624#define bin_index(sz) \
625(((((unsigned long)(sz)) >> 9) == 0) ? (((unsigned long)(sz)) >> 3): \
626 ((((unsigned long)(sz)) >> 9) <= 4) ? 56 + (((unsigned long)(sz)) >> 6): \
627 ((((unsigned long)(sz)) >> 9) <= 20) ? 91 + (((unsigned long)(sz)) >> 9): \
628 ((((unsigned long)(sz)) >> 9) <= 84) ? 110 + (((unsigned long)(sz)) >> 12): \
629 ((((unsigned long)(sz)) >> 9) <= 340) ? 119 + (((unsigned long)(sz)) >> 15): \
630 ((((unsigned long)(sz)) >> 9) <= 1364) ? 124 + (((unsigned long)(sz)) >> 18): \
8bde7f77 631 126)
217c9dad
WD
632/*
633 bins for chunks < 512 are all spaced 8 bytes apart, and hold
634 identically sized chunks. This is exploited in malloc.
635*/
636
637#define MAX_SMALLBIN 63
638#define MAX_SMALLBIN_SIZE 512
639#define SMALLBIN_WIDTH 8
640
641#define smallbin_index(sz) (((unsigned long)(sz)) >> 3)
642
643/*
644 Requests are `small' if both the corresponding and the next bin are small
645*/
646
647#define is_small_request(nb) (nb < MAX_SMALLBIN_SIZE - SMALLBIN_WIDTH)
648
d93041a4 649
217c9dad
WD
650
651/*
652 To help compensate for the large number of bins, a one-level index
653 structure is used for bin-by-bin searching. `binblocks' is a
654 one-word bitvector recording whether groups of BINBLOCKWIDTH bins
655 have any (possibly) non-empty bins, so they can be skipped over
656 all at once during during traversals. The bits are NOT always
657 cleared as soon as all bins in a block are empty, but instead only
658 when all are noticed to be empty during traversal in malloc.
659*/
660
661#define BINBLOCKWIDTH 4 /* bins per block */
662
f2302d44
SR
663#define binblocks_r ((INTERNAL_SIZE_T)av_[1]) /* bitvector of nonempty blocks */
664#define binblocks_w (av_[1])
217c9dad
WD
665
666/* bin<->block macros */
667
668#define idx2binblock(ix) ((unsigned)1 << (ix / BINBLOCKWIDTH))
f2302d44
SR
669#define mark_binblock(ii) (binblocks_w = (mbinptr)(binblocks_r | idx2binblock(ii)))
670#define clear_binblock(ii) (binblocks_w = (mbinptr)(binblocks_r & ~(idx2binblock(ii))))
217c9dad
WD
671
672
d93041a4 673
217c9dad
WD
674
675
676/* Other static bookkeeping data */
677
678/* variables holding tunable values */
679
680static unsigned long trim_threshold = DEFAULT_TRIM_THRESHOLD;
681static unsigned long top_pad = DEFAULT_TOP_PAD;
682static unsigned int n_mmaps_max = DEFAULT_MMAP_MAX;
683static unsigned long mmap_threshold = DEFAULT_MMAP_THRESHOLD;
684
685/* The first value returned from sbrk */
686static char* sbrk_base = (char*)(-1);
687
688/* The maximum memory obtained from system via sbrk */
689static unsigned long max_sbrked_mem = 0;
690
691/* The maximum via either sbrk or mmap */
692static unsigned long max_total_mem = 0;
693
694/* internal working copy of mallinfo */
695static struct mallinfo current_mallinfo = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
696
697/* The total memory obtained from system via sbrk */
698#define sbrked_mem (current_mallinfo.arena)
699
700/* Tracking mmaps */
701
ea882baf 702#ifdef DEBUG
217c9dad 703static unsigned int n_mmaps = 0;
ea882baf 704#endif /* DEBUG */
217c9dad
WD
705static unsigned long mmapped_mem = 0;
706#if HAVE_MMAP
707static unsigned int max_n_mmaps = 0;
708static unsigned long max_mmapped_mem = 0;
709#endif
710
d93041a4 711
217c9dad
WD
712
713/*
714 Debugging support
715*/
716
717#ifdef DEBUG
718
719
720/*
721 These routines make a number of assertions about the states
722 of data structures that should be true at all times. If any
723 are not true, it's very likely that a user program has somehow
724 trashed memory. (It's also possible that there is a coding error
725 in malloc. In which case, please report it!)
726*/
727
728#if __STD_C
729static void do_check_chunk(mchunkptr p)
730#else
731static void do_check_chunk(p) mchunkptr p;
732#endif
733{
217c9dad 734 INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
217c9dad
WD
735
736 /* No checkable chunk is mmapped */
737 assert(!chunk_is_mmapped(p));
738
739 /* Check for legal address ... */
740 assert((char*)p >= sbrk_base);
741 if (p != top)
742 assert((char*)p + sz <= (char*)top);
743 else
744 assert((char*)p + sz <= sbrk_base + sbrked_mem);
745
746}
747
748
749#if __STD_C
750static void do_check_free_chunk(mchunkptr p)
751#else
752static void do_check_free_chunk(p) mchunkptr p;
753#endif
754{
755 INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
217c9dad 756 mchunkptr next = chunk_at_offset(p, sz);
217c9dad
WD
757
758 do_check_chunk(p);
759
760 /* Check whether it claims to be free ... */
761 assert(!inuse(p));
762
763 /* Unless a special marker, must have OK fields */
764 if ((long)sz >= (long)MINSIZE)
765 {
766 assert((sz & MALLOC_ALIGN_MASK) == 0);
767 assert(aligned_OK(chunk2mem(p)));
768 /* ... matching footer field */
769 assert(next->prev_size == sz);
770 /* ... and is fully consolidated */
771 assert(prev_inuse(p));
772 assert (next == top || inuse(next));
773
774 /* ... and has minimally sane links */
775 assert(p->fd->bk == p);
776 assert(p->bk->fd == p);
777 }
778 else /* markers are always of size SIZE_SZ */
779 assert(sz == SIZE_SZ);
780}
781
782#if __STD_C
783static void do_check_inuse_chunk(mchunkptr p)
784#else
785static void do_check_inuse_chunk(p) mchunkptr p;
786#endif
787{
788 mchunkptr next = next_chunk(p);
789 do_check_chunk(p);
790
791 /* Check whether it claims to be in use ... */
792 assert(inuse(p));
793
794 /* ... and is surrounded by OK chunks.
795 Since more things can be checked with free chunks than inuse ones,
796 if an inuse chunk borders them and debug is on, it's worth doing them.
797 */
798 if (!prev_inuse(p))
799 {
800 mchunkptr prv = prev_chunk(p);
801 assert(next_chunk(prv) == p);
802 do_check_free_chunk(prv);
803 }
804 if (next == top)
805 {
806 assert(prev_inuse(next));
807 assert(chunksize(next) >= MINSIZE);
808 }
809 else if (!inuse(next))
810 do_check_free_chunk(next);
811
812}
813
814#if __STD_C
815static void do_check_malloced_chunk(mchunkptr p, INTERNAL_SIZE_T s)
816#else
817static void do_check_malloced_chunk(p, s) mchunkptr p; INTERNAL_SIZE_T s;
818#endif
819{
217c9dad
WD
820 INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
821 long room = sz - s;
217c9dad
WD
822
823 do_check_inuse_chunk(p);
824
825 /* Legal size ... */
826 assert((long)sz >= (long)MINSIZE);
827 assert((sz & MALLOC_ALIGN_MASK) == 0);
828 assert(room >= 0);
829 assert(room < (long)MINSIZE);
830
831 /* ... and alignment */
832 assert(aligned_OK(chunk2mem(p)));
833
834
835 /* ... and was allocated at front of an available chunk */
836 assert(prev_inuse(p));
837
838}
839
840
841#define check_free_chunk(P) do_check_free_chunk(P)
842#define check_inuse_chunk(P) do_check_inuse_chunk(P)
843#define check_chunk(P) do_check_chunk(P)
844#define check_malloced_chunk(P,N) do_check_malloced_chunk(P,N)
845#else
846#define check_free_chunk(P)
847#define check_inuse_chunk(P)
848#define check_chunk(P)
849#define check_malloced_chunk(P,N)
850#endif
851
d93041a4 852
217c9dad
WD
853
854/*
855 Macro-based internal utilities
856*/
857
858
859/*
860 Linking chunks in bin lists.
861 Call these only with variables, not arbitrary expressions, as arguments.
862*/
863
864/*
865 Place chunk p of size s in its bin, in size order,
866 putting it ahead of others of same size.
867*/
868
869
870#define frontlink(P, S, IDX, BK, FD) \
871{ \
872 if (S < MAX_SMALLBIN_SIZE) \
873 { \
874 IDX = smallbin_index(S); \
875 mark_binblock(IDX); \
876 BK = bin_at(IDX); \
877 FD = BK->fd; \
878 P->bk = BK; \
879 P->fd = FD; \
880 FD->bk = BK->fd = P; \
881 } \
882 else \
883 { \
884 IDX = bin_index(S); \
885 BK = bin_at(IDX); \
886 FD = BK->fd; \
887 if (FD == BK) mark_binblock(IDX); \
888 else \
889 { \
890 while (FD != BK && S < chunksize(FD)) FD = FD->fd; \
891 BK = FD->bk; \
892 } \
893 P->bk = BK; \
894 P->fd = FD; \
895 FD->bk = BK->fd = P; \
896 } \
897}
898
899
900/* take a chunk off a list */
901
902#define unlink(P, BK, FD) \
903{ \
904 BK = P->bk; \
905 FD = P->fd; \
906 FD->bk = BK; \
907 BK->fd = FD; \
908} \
909
910/* Place p as the last remainder */
911
912#define link_last_remainder(P) \
913{ \
914 last_remainder->fd = last_remainder->bk = P; \
915 P->fd = P->bk = last_remainder; \
916}
917
918/* Clear the last_remainder bin */
919
920#define clear_last_remainder \
921 (last_remainder->fd = last_remainder->bk = last_remainder)
922
923
d93041a4 924
217c9dad
WD
925
926
927/* Routines dealing with mmap(). */
928
929#if HAVE_MMAP
930
931#if __STD_C
932static mchunkptr mmap_chunk(size_t size)
933#else
934static mchunkptr mmap_chunk(size) size_t size;
935#endif
936{
937 size_t page_mask = malloc_getpagesize - 1;
938 mchunkptr p;
939
940#ifndef MAP_ANONYMOUS
941 static int fd = -1;
942#endif
943
944 if(n_mmaps >= n_mmaps_max) return 0; /* too many regions */
945
946 /* For mmapped chunks, the overhead is one SIZE_SZ unit larger, because
947 * there is no following chunk whose prev_size field could be used.
948 */
949 size = (size + SIZE_SZ + page_mask) & ~page_mask;
950
951#ifdef MAP_ANONYMOUS
952 p = (mchunkptr)mmap(0, size, PROT_READ|PROT_WRITE,
953 MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
954#else /* !MAP_ANONYMOUS */
955 if (fd < 0)
956 {
957 fd = open("/dev/zero", O_RDWR);
958 if(fd < 0) return 0;
959 }
960 p = (mchunkptr)mmap(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE, fd, 0);
961#endif
962
963 if(p == (mchunkptr)-1) return 0;
964
965 n_mmaps++;
966 if (n_mmaps > max_n_mmaps) max_n_mmaps = n_mmaps;
967
968 /* We demand that eight bytes into a page must be 8-byte aligned. */
969 assert(aligned_OK(chunk2mem(p)));
970
971 /* The offset to the start of the mmapped region is stored
972 * in the prev_size field of the chunk; normally it is zero,
973 * but that can be changed in memalign().
974 */
975 p->prev_size = 0;
976 set_head(p, size|IS_MMAPPED);
977
978 mmapped_mem += size;
979 if ((unsigned long)mmapped_mem > (unsigned long)max_mmapped_mem)
980 max_mmapped_mem = mmapped_mem;
981 if ((unsigned long)(mmapped_mem + sbrked_mem) > (unsigned long)max_total_mem)
982 max_total_mem = mmapped_mem + sbrked_mem;
983 return p;
984}
985
986#if __STD_C
987static void munmap_chunk(mchunkptr p)
988#else
989static void munmap_chunk(p) mchunkptr p;
990#endif
991{
992 INTERNAL_SIZE_T size = chunksize(p);
993 int ret;
994
995 assert (chunk_is_mmapped(p));
996 assert(! ((char*)p >= sbrk_base && (char*)p < sbrk_base + sbrked_mem));
997 assert((n_mmaps > 0));
998 assert(((p->prev_size + size) & (malloc_getpagesize-1)) == 0);
999
1000 n_mmaps--;
1001 mmapped_mem -= (size + p->prev_size);
1002
1003 ret = munmap((char *)p - p->prev_size, size + p->prev_size);
1004
1005 /* munmap returns non-zero on failure */
1006 assert(ret == 0);
1007}
1008
1009#if HAVE_MREMAP
1010
1011#if __STD_C
1012static mchunkptr mremap_chunk(mchunkptr p, size_t new_size)
1013#else
1014static mchunkptr mremap_chunk(p, new_size) mchunkptr p; size_t new_size;
1015#endif
1016{
1017 size_t page_mask = malloc_getpagesize - 1;
1018 INTERNAL_SIZE_T offset = p->prev_size;
1019 INTERNAL_SIZE_T size = chunksize(p);
1020 char *cp;
1021
1022 assert (chunk_is_mmapped(p));
1023 assert(! ((char*)p >= sbrk_base && (char*)p < sbrk_base + sbrked_mem));
1024 assert((n_mmaps > 0));
1025 assert(((size + offset) & (malloc_getpagesize-1)) == 0);
1026
1027 /* Note the extra SIZE_SZ overhead as in mmap_chunk(). */
1028 new_size = (new_size + offset + SIZE_SZ + page_mask) & ~page_mask;
1029
1030 cp = (char *)mremap((char *)p - offset, size + offset, new_size, 1);
1031
1032 if (cp == (char *)-1) return 0;
1033
1034 p = (mchunkptr)(cp + offset);
1035
1036 assert(aligned_OK(chunk2mem(p)));
1037
1038 assert((p->prev_size == offset));
1039 set_head(p, (new_size - offset)|IS_MMAPPED);
1040
1041 mmapped_mem -= size + offset;
1042 mmapped_mem += new_size;
1043 if ((unsigned long)mmapped_mem > (unsigned long)max_mmapped_mem)
1044 max_mmapped_mem = mmapped_mem;
1045 if ((unsigned long)(mmapped_mem + sbrked_mem) > (unsigned long)max_total_mem)
1046 max_total_mem = mmapped_mem + sbrked_mem;
1047 return p;
1048}
1049
1050#endif /* HAVE_MREMAP */
1051
1052#endif /* HAVE_MMAP */
1053
1054
d93041a4 1055
217c9dad
WD
1056
1057/*
1058 Extend the top-most chunk by obtaining memory from system.
1059 Main interface to sbrk (but see also malloc_trim).
1060*/
1061
1062#if __STD_C
1063static void malloc_extend_top(INTERNAL_SIZE_T nb)
1064#else
1065static void malloc_extend_top(nb) INTERNAL_SIZE_T nb;
1066#endif
1067{
1068 char* brk; /* return value from sbrk */
1069 INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of sbrked space */
1070 INTERNAL_SIZE_T correction; /* bytes for 2nd sbrk call */
1071 char* new_brk; /* return of 2nd sbrk call */
1072 INTERNAL_SIZE_T top_size; /* new size of top chunk */
1073
1074 mchunkptr old_top = top; /* Record state of old top */
1075 INTERNAL_SIZE_T old_top_size = chunksize(old_top);
1076 char* old_end = (char*)(chunk_at_offset(old_top, old_top_size));
1077
1078 /* Pad request with top_pad plus minimal overhead */
1079
1080 INTERNAL_SIZE_T sbrk_size = nb + top_pad + MINSIZE;
1081 unsigned long pagesz = malloc_getpagesize;
1082
1083 /* If not the first time through, round to preserve page boundary */
1084 /* Otherwise, we need to correct to a page size below anyway. */
1085 /* (We also correct below if an intervening foreign sbrk call.) */
1086
1087 if (sbrk_base != (char*)(-1))
1088 sbrk_size = (sbrk_size + (pagesz - 1)) & ~(pagesz - 1);
1089
1090 brk = (char*)(MORECORE (sbrk_size));
1091
1092 /* Fail if sbrk failed or if a foreign sbrk call killed our space */
1093 if (brk == (char*)(MORECORE_FAILURE) ||
1094 (brk < old_end && old_top != initial_top))
1095 return;
1096
1097 sbrked_mem += sbrk_size;
1098
1099 if (brk == old_end) /* can just add bytes to current top */
1100 {
1101 top_size = sbrk_size + old_top_size;
1102 set_head(top, top_size | PREV_INUSE);
1103 }
1104 else
1105 {
1106 if (sbrk_base == (char*)(-1)) /* First time through. Record base */
1107 sbrk_base = brk;
1108 else /* Someone else called sbrk(). Count those bytes as sbrked_mem. */
1109 sbrked_mem += brk - (char*)old_end;
1110
1111 /* Guarantee alignment of first new chunk made from this space */
1112 front_misalign = (unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK;
1113 if (front_misalign > 0)
1114 {
1115 correction = (MALLOC_ALIGNMENT) - front_misalign;
1116 brk += correction;
1117 }
1118 else
1119 correction = 0;
1120
1121 /* Guarantee the next brk will be at a page boundary */
1122
1123 correction += ((((unsigned long)(brk + sbrk_size))+(pagesz-1)) &
8bde7f77 1124 ~(pagesz - 1)) - ((unsigned long)(brk + sbrk_size));
217c9dad
WD
1125
1126 /* Allocate correction */
1127 new_brk = (char*)(MORECORE (correction));
1128 if (new_brk == (char*)(MORECORE_FAILURE)) return;
1129
1130 sbrked_mem += correction;
1131
1132 top = (mchunkptr)brk;
1133 top_size = new_brk - brk + correction;
1134 set_head(top, top_size | PREV_INUSE);
1135
1136 if (old_top != initial_top)
1137 {
1138
1139 /* There must have been an intervening foreign sbrk call. */
1140 /* A double fencepost is necessary to prevent consolidation */
1141
1142 /* If not enough space to do this, then user did something very wrong */
1143 if (old_top_size < MINSIZE)
1144 {
8bde7f77
WD
1145 set_head(top, PREV_INUSE); /* will force null return from malloc */
1146 return;
217c9dad
WD
1147 }
1148
1149 /* Also keep size a multiple of MALLOC_ALIGNMENT */
1150 old_top_size = (old_top_size - 3*SIZE_SZ) & ~MALLOC_ALIGN_MASK;
1151 set_head_size(old_top, old_top_size);
1152 chunk_at_offset(old_top, old_top_size )->size =
8bde7f77 1153 SIZE_SZ|PREV_INUSE;
217c9dad 1154 chunk_at_offset(old_top, old_top_size + SIZE_SZ)->size =
8bde7f77 1155 SIZE_SZ|PREV_INUSE;
217c9dad
WD
1156 /* If possible, release the rest. */
1157 if (old_top_size >= MINSIZE)
8bde7f77 1158 fREe(chunk2mem(old_top));
217c9dad
WD
1159 }
1160 }
1161
1162 if ((unsigned long)sbrked_mem > (unsigned long)max_sbrked_mem)
1163 max_sbrked_mem = sbrked_mem;
1164 if ((unsigned long)(mmapped_mem + sbrked_mem) > (unsigned long)max_total_mem)
1165 max_total_mem = mmapped_mem + sbrked_mem;
1166
1167 /* We always land on a page boundary */
1168 assert(((unsigned long)((char*)top + top_size) & (pagesz - 1)) == 0);
1169}
1170
1171
d93041a4 1172
217c9dad
WD
1173
1174/* Main public routines */
1175
1176
1177/*
1178 Malloc Algorthim:
1179
1180 The requested size is first converted into a usable form, `nb'.
1181 This currently means to add 4 bytes overhead plus possibly more to
1182 obtain 8-byte alignment and/or to obtain a size of at least
1183 MINSIZE (currently 16 bytes), the smallest allocatable size.
1184 (All fits are considered `exact' if they are within MINSIZE bytes.)
1185
1186 From there, the first successful of the following steps is taken:
1187
1188 1. The bin corresponding to the request size is scanned, and if
8bde7f77 1189 a chunk of exactly the right size is found, it is taken.
217c9dad
WD
1190
1191 2. The most recently remaindered chunk is used if it is big
8bde7f77
WD
1192 enough. This is a form of (roving) first fit, used only in
1193 the absence of exact fits. Runs of consecutive requests use
1194 the remainder of the chunk used for the previous such request
1195 whenever possible. This limited use of a first-fit style
1196 allocation strategy tends to give contiguous chunks
1197 coextensive lifetimes, which improves locality and can reduce
1198 fragmentation in the long run.
217c9dad
WD
1199
1200 3. Other bins are scanned in increasing size order, using a
8bde7f77
WD
1201 chunk big enough to fulfill the request, and splitting off
1202 any remainder. This search is strictly by best-fit; i.e.,
1203 the smallest (with ties going to approximately the least
1204 recently used) chunk that fits is selected.
217c9dad
WD
1205
1206 4. If large enough, the chunk bordering the end of memory
8bde7f77
WD
1207 (`top') is split off. (This use of `top' is in accord with
1208 the best-fit search rule. In effect, `top' is treated as
1209 larger (and thus less well fitting) than any other available
1210 chunk since it can be extended to be as large as necessary
1211 (up to system limitations).
217c9dad
WD
1212
1213 5. If the request size meets the mmap threshold and the
8bde7f77
WD
1214 system supports mmap, and there are few enough currently
1215 allocated mmapped regions, and a call to mmap succeeds,
1216 the request is allocated via direct memory mapping.
217c9dad
WD
1217
1218 6. Otherwise, the top of memory is extended by
8bde7f77
WD
1219 obtaining more space from the system (normally using sbrk,
1220 but definable to anything else via the MORECORE macro).
1221 Memory is gathered from the system (in system page-sized
1222 units) in a way that allows chunks obtained across different
1223 sbrk calls to be consolidated, but does not require
1224 contiguous memory. Thus, it should be safe to intersperse
1225 mallocs with other sbrk calls.
217c9dad
WD
1226
1227
1228 All allocations are made from the the `lowest' part of any found
1229 chunk. (The implementation invariant is that prev_inuse is
1230 always true of any allocated chunk; i.e., that each allocated
1231 chunk borders either a previously allocated and still in-use chunk,
1232 or the base of its memory arena.)
1233
1234*/
1235
1236#if __STD_C
1237Void_t* mALLOc(size_t bytes)
1238#else
1239Void_t* mALLOc(bytes) size_t bytes;
1240#endif
1241{
1242 mchunkptr victim; /* inspected/selected chunk */
1243 INTERNAL_SIZE_T victim_size; /* its size */
1244 int idx; /* index for bin traversal */
1245 mbinptr bin; /* associated bin */
1246 mchunkptr remainder; /* remainder from a split */
1247 long remainder_size; /* its size */
1248 int remainder_index; /* its bin index */
1249 unsigned long block; /* block traverser bit */
1250 int startidx; /* first bin of a traversed block */
1251 mchunkptr fwd; /* misc temp for linking */
1252 mchunkptr bck; /* misc temp for linking */
1253 mbinptr q; /* misc temp */
1254
1255 INTERNAL_SIZE_T nb;
1256
f1896c45 1257#if CONFIG_VAL(SYS_MALLOC_F_LEN)
deff6fb3 1258 if (!(gd->flags & GD_FLG_FULL_MALLOC_INIT))
c9356be3 1259 return malloc_simple(bytes);
d59476b6
SG
1260#endif
1261
27405448
WD
1262 /* check if mem_malloc_init() was run */
1263 if ((mem_malloc_start == 0) && (mem_malloc_end == 0)) {
1264 /* not initialized yet */
199adb60 1265 return NULL;
27405448
WD
1266 }
1267
199adb60 1268 if ((long)bytes < 0) return NULL;
217c9dad
WD
1269
1270 nb = request2size(bytes); /* padded request size; */
1271
1272 /* Check for exact match in a bin */
1273
1274 if (is_small_request(nb)) /* Faster version for small requests */
1275 {
1276 idx = smallbin_index(nb);
1277
1278 /* No traversal or size check necessary for small bins. */
1279
1280 q = bin_at(idx);
1281 victim = last(q);
1282
1283 /* Also scan the next one, since it would have a remainder < MINSIZE */
1284 if (victim == q)
1285 {
1286 q = next_bin(q);
1287 victim = last(q);
1288 }
1289 if (victim != q)
1290 {
1291 victim_size = chunksize(victim);
1292 unlink(victim, bck, fwd);
1293 set_inuse_bit_at_offset(victim, victim_size);
1294 check_malloced_chunk(victim, nb);
1295 return chunk2mem(victim);
1296 }
1297
1298 idx += 2; /* Set for bin scan below. We've already scanned 2 bins. */
1299
1300 }
1301 else
1302 {
1303 idx = bin_index(nb);
1304 bin = bin_at(idx);
1305
1306 for (victim = last(bin); victim != bin; victim = victim->bk)
1307 {
1308 victim_size = chunksize(victim);
1309 remainder_size = victim_size - nb;
1310
1311 if (remainder_size >= (long)MINSIZE) /* too big */
1312 {
8bde7f77
WD
1313 --idx; /* adjust to rescan below after checking last remainder */
1314 break;
217c9dad
WD
1315 }
1316
1317 else if (remainder_size >= 0) /* exact fit */
1318 {
8bde7f77
WD
1319 unlink(victim, bck, fwd);
1320 set_inuse_bit_at_offset(victim, victim_size);
1321 check_malloced_chunk(victim, nb);
1322 return chunk2mem(victim);
217c9dad
WD
1323 }
1324 }
1325
1326 ++idx;
1327
1328 }
1329
1330 /* Try to use the last split-off remainder */
1331
1332 if ( (victim = last_remainder->fd) != last_remainder)
1333 {
1334 victim_size = chunksize(victim);
1335 remainder_size = victim_size - nb;
1336
1337 if (remainder_size >= (long)MINSIZE) /* re-split */
1338 {
1339 remainder = chunk_at_offset(victim, nb);
1340 set_head(victim, nb | PREV_INUSE);
1341 link_last_remainder(remainder);
1342 set_head(remainder, remainder_size | PREV_INUSE);
1343 set_foot(remainder, remainder_size);
1344 check_malloced_chunk(victim, nb);
1345 return chunk2mem(victim);
1346 }
1347
1348 clear_last_remainder;
1349
1350 if (remainder_size >= 0) /* exhaust */
1351 {
1352 set_inuse_bit_at_offset(victim, victim_size);
1353 check_malloced_chunk(victim, nb);
1354 return chunk2mem(victim);
1355 }
1356
1357 /* Else place in bin */
1358
1359 frontlink(victim, victim_size, remainder_index, bck, fwd);
1360 }
1361
1362 /*
1363 If there are any possibly nonempty big-enough blocks,
1364 search for best fitting chunk by scanning bins in blockwidth units.
1365 */
1366
f2302d44 1367 if ( (block = idx2binblock(idx)) <= binblocks_r)
217c9dad
WD
1368 {
1369
1370 /* Get to the first marked block */
1371
f2302d44 1372 if ( (block & binblocks_r) == 0)
217c9dad
WD
1373 {
1374 /* force to an even block boundary */
1375 idx = (idx & ~(BINBLOCKWIDTH - 1)) + BINBLOCKWIDTH;
1376 block <<= 1;
f2302d44 1377 while ((block & binblocks_r) == 0)
217c9dad 1378 {
8bde7f77
WD
1379 idx += BINBLOCKWIDTH;
1380 block <<= 1;
217c9dad
WD
1381 }
1382 }
1383
1384 /* For each possibly nonempty block ... */
1385 for (;;)
1386 {
1387 startidx = idx; /* (track incomplete blocks) */
1388 q = bin = bin_at(idx);
1389
1390 /* For each bin in this block ... */
1391 do
1392 {
8bde7f77
WD
1393 /* Find and use first big enough chunk ... */
1394
1395 for (victim = last(bin); victim != bin; victim = victim->bk)
1396 {
1397 victim_size = chunksize(victim);
1398 remainder_size = victim_size - nb;
1399
1400 if (remainder_size >= (long)MINSIZE) /* split */
1401 {
1402 remainder = chunk_at_offset(victim, nb);
1403 set_head(victim, nb | PREV_INUSE);
1404 unlink(victim, bck, fwd);
1405 link_last_remainder(remainder);
1406 set_head(remainder, remainder_size | PREV_INUSE);
1407 set_foot(remainder, remainder_size);
1408 check_malloced_chunk(victim, nb);
1409 return chunk2mem(victim);
1410 }
1411
1412 else if (remainder_size >= 0) /* take */
1413 {
1414 set_inuse_bit_at_offset(victim, victim_size);
1415 unlink(victim, bck, fwd);
1416 check_malloced_chunk(victim, nb);
1417 return chunk2mem(victim);
1418 }
1419
1420 }
217c9dad
WD
1421
1422 bin = next_bin(bin);
1423
1424 } while ((++idx & (BINBLOCKWIDTH - 1)) != 0);
1425
1426 /* Clear out the block bit. */
1427
1428 do /* Possibly backtrack to try to clear a partial block */
1429 {
8bde7f77
WD
1430 if ((startidx & (BINBLOCKWIDTH - 1)) == 0)
1431 {
f2302d44 1432 av_[1] = (mbinptr)(binblocks_r & ~block);
8bde7f77
WD
1433 break;
1434 }
1435 --startidx;
217c9dad
WD
1436 q = prev_bin(q);
1437 } while (first(q) == q);
1438
1439 /* Get to the next possibly nonempty block */
1440
f2302d44 1441 if ( (block <<= 1) <= binblocks_r && (block != 0) )
217c9dad 1442 {
f2302d44 1443 while ((block & binblocks_r) == 0)
8bde7f77
WD
1444 {
1445 idx += BINBLOCKWIDTH;
1446 block <<= 1;
1447 }
217c9dad
WD
1448 }
1449 else
8bde7f77 1450 break;
217c9dad
WD
1451 }
1452 }
1453
1454
1455 /* Try to use top chunk */
1456
1457 /* Require that there be a remainder, ensuring top always exists */
1458 if ( (remainder_size = chunksize(top) - nb) < (long)MINSIZE)
1459 {
1460
1461#if HAVE_MMAP
1462 /* If big and would otherwise need to extend, try to use mmap instead */
1463 if ((unsigned long)nb >= (unsigned long)mmap_threshold &&
a874cac3 1464 (victim = mmap_chunk(nb)))
217c9dad
WD
1465 return chunk2mem(victim);
1466#endif
1467
1468 /* Try to extend */
1469 malloc_extend_top(nb);
1470 if ( (remainder_size = chunksize(top) - nb) < (long)MINSIZE)
199adb60 1471 return NULL; /* propagate failure */
217c9dad
WD
1472 }
1473
1474 victim = top;
1475 set_head(victim, nb | PREV_INUSE);
1476 top = chunk_at_offset(victim, nb);
1477 set_head(top, remainder_size | PREV_INUSE);
1478 check_malloced_chunk(victim, nb);
1479 return chunk2mem(victim);
1480
1481}
1482
1483
d93041a4 1484
217c9dad
WD
1485
1486/*
1487
1488 free() algorithm :
1489
1490 cases:
1491
1492 1. free(0) has no effect.
1493
1494 2. If the chunk was allocated via mmap, it is release via munmap().
1495
1496 3. If a returned chunk borders the current high end of memory,
8bde7f77
WD
1497 it is consolidated into the top, and if the total unused
1498 topmost memory exceeds the trim threshold, malloc_trim is
1499 called.
217c9dad
WD
1500
1501 4. Other chunks are consolidated as they arrive, and
8bde7f77
WD
1502 placed in corresponding bins. (This includes the case of
1503 consolidating with the current `last_remainder').
217c9dad
WD
1504
1505*/
1506
1507
1508#if __STD_C
1509void fREe(Void_t* mem)
1510#else
1511void fREe(mem) Void_t* mem;
1512#endif
1513{
1514 mchunkptr p; /* chunk corresponding to mem */
1515 INTERNAL_SIZE_T hd; /* its head field */
1516 INTERNAL_SIZE_T sz; /* its size */
1517 int idx; /* its bin index */
1518 mchunkptr next; /* next contiguous chunk */
1519 INTERNAL_SIZE_T nextsz; /* its size */
1520 INTERNAL_SIZE_T prevsz; /* size of previous contiguous chunk */
1521 mchunkptr bck; /* misc temp for linking */
1522 mchunkptr fwd; /* misc temp for linking */
1523 int islr; /* track whether merging with last_remainder */
1524
f1896c45 1525#if CONFIG_VAL(SYS_MALLOC_F_LEN)
d59476b6 1526 /* free() is a no-op - all the memory will be freed on relocation */
c9356be3 1527 if (!(gd->flags & GD_FLG_FULL_MALLOC_INIT))
d59476b6
SG
1528 return;
1529#endif
1530
199adb60 1531 if (mem == NULL) /* free(0) has no effect */
217c9dad
WD
1532 return;
1533
1534 p = mem2chunk(mem);
1535 hd = p->size;
1536
1537#if HAVE_MMAP
1538 if (hd & IS_MMAPPED) /* release mmapped memory. */
1539 {
1540 munmap_chunk(p);
1541 return;
1542 }
1543#endif
1544
1545 check_inuse_chunk(p);
1546
1547 sz = hd & ~PREV_INUSE;
1548 next = chunk_at_offset(p, sz);
1549 nextsz = chunksize(next);
1550
1551 if (next == top) /* merge with top */
1552 {
1553 sz += nextsz;
1554
1555 if (!(hd & PREV_INUSE)) /* consolidate backward */
1556 {
1557 prevsz = p->prev_size;
1558 p = chunk_at_offset(p, -((long) prevsz));
1559 sz += prevsz;
1560 unlink(p, bck, fwd);
1561 }
1562
1563 set_head(p, sz | PREV_INUSE);
1564 top = p;
1565 if ((unsigned long)(sz) >= (unsigned long)trim_threshold)
1566 malloc_trim(top_pad);
1567 return;
1568 }
1569
1570 set_head(next, nextsz); /* clear inuse bit */
1571
1572 islr = 0;
1573
1574 if (!(hd & PREV_INUSE)) /* consolidate backward */
1575 {
1576 prevsz = p->prev_size;
1577 p = chunk_at_offset(p, -((long) prevsz));
1578 sz += prevsz;
1579
1580 if (p->fd == last_remainder) /* keep as last_remainder */
1581 islr = 1;
1582 else
1583 unlink(p, bck, fwd);
1584 }
1585
1586 if (!(inuse_bit_at_offset(next, nextsz))) /* consolidate forward */
1587 {
1588 sz += nextsz;
1589
1590 if (!islr && next->fd == last_remainder) /* re-insert last_remainder */
1591 {
1592 islr = 1;
1593 link_last_remainder(p);
1594 }
1595 else
1596 unlink(next, bck, fwd);
1597 }
1598
1599
1600 set_head(p, sz | PREV_INUSE);
1601 set_foot(p, sz);
1602 if (!islr)
1603 frontlink(p, sz, idx, bck, fwd);
1604}
1605
1606
d93041a4 1607
217c9dad
WD
1608
1609
1610/*
1611
1612 Realloc algorithm:
1613
1614 Chunks that were obtained via mmap cannot be extended or shrunk
1615 unless HAVE_MREMAP is defined, in which case mremap is used.
1616 Otherwise, if their reallocation is for additional space, they are
1617 copied. If for less, they are just left alone.
1618
1619 Otherwise, if the reallocation is for additional space, and the
1620 chunk can be extended, it is, else a malloc-copy-free sequence is
1621 taken. There are several different ways that a chunk could be
1622 extended. All are tried:
1623
1624 * Extending forward into following adjacent free chunk.
1625 * Shifting backwards, joining preceding adjacent space
1626 * Both shifting backwards and extending forward.
1627 * Extending into newly sbrked space
1628
1629 Unless the #define REALLOC_ZERO_BYTES_FREES is set, realloc with a
1630 size argument of zero (re)allocates a minimum-sized chunk.
1631
1632 If the reallocation is for less space, and the new request is for
1633 a `small' (<512 bytes) size, then the newly unused space is lopped
1634 off and freed.
1635
1636 The old unix realloc convention of allowing the last-free'd chunk
1637 to be used as an argument to realloc is no longer supported.
1638 I don't know of any programs still relying on this feature,
1639 and allowing it would also allow too many other incorrect
1640 usages of realloc to be sensible.
1641
1642
1643*/
1644
1645
1646#if __STD_C
1647Void_t* rEALLOc(Void_t* oldmem, size_t bytes)
1648#else
1649Void_t* rEALLOc(oldmem, bytes) Void_t* oldmem; size_t bytes;
1650#endif
1651{
1652 INTERNAL_SIZE_T nb; /* padded request size */
1653
1654 mchunkptr oldp; /* chunk corresponding to oldmem */
1655 INTERNAL_SIZE_T oldsize; /* its size */
1656
1657 mchunkptr newp; /* chunk to return */
1658 INTERNAL_SIZE_T newsize; /* its size */
1659 Void_t* newmem; /* corresponding user mem */
1660
1661 mchunkptr next; /* next contiguous chunk after oldp */
1662 INTERNAL_SIZE_T nextsize; /* its size */
1663
1664 mchunkptr prev; /* previous contiguous chunk before oldp */
1665 INTERNAL_SIZE_T prevsize; /* its size */
1666
1667 mchunkptr remainder; /* holds split off extra space from newp */
1668 INTERNAL_SIZE_T remainder_size; /* its size */
1669
1670 mchunkptr bck; /* misc temp for linking */
1671 mchunkptr fwd; /* misc temp for linking */
1672
1673#ifdef REALLOC_ZERO_BYTES_FREES
a874cac3
HS
1674 if (!bytes) {
1675 fREe(oldmem);
1676 return NULL;
1677 }
217c9dad
WD
1678#endif
1679
199adb60 1680 if ((long)bytes < 0) return NULL;
217c9dad
WD
1681
1682 /* realloc of null is supposed to be same as malloc */
199adb60 1683 if (oldmem == NULL) return mALLOc(bytes);
217c9dad 1684
f1896c45 1685#if CONFIG_VAL(SYS_MALLOC_F_LEN)
c9356be3 1686 if (!(gd->flags & GD_FLG_FULL_MALLOC_INIT)) {
d59476b6
SG
1687 /* This is harder to support and should not be needed */
1688 panic("pre-reloc realloc() is not supported");
1689 }
1690#endif
1691
217c9dad
WD
1692 newp = oldp = mem2chunk(oldmem);
1693 newsize = oldsize = chunksize(oldp);
1694
1695
1696 nb = request2size(bytes);
1697
1698#if HAVE_MMAP
1699 if (chunk_is_mmapped(oldp))
1700 {
1701#if HAVE_MREMAP
1702 newp = mremap_chunk(oldp, nb);
1703 if(newp) return chunk2mem(newp);
1704#endif
1705 /* Note the extra SIZE_SZ overhead. */
1706 if(oldsize - SIZE_SZ >= nb) return oldmem; /* do nothing */
1707 /* Must alloc, copy, free. */
1708 newmem = mALLOc(bytes);
a874cac3
HS
1709 if (!newmem)
1710 return NULL; /* propagate failure */
217c9dad
WD
1711 MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ);
1712 munmap_chunk(oldp);
1713 return newmem;
1714 }
1715#endif
1716
1717 check_inuse_chunk(oldp);
1718
1719 if ((long)(oldsize) < (long)(nb))
1720 {
1721
1722 /* Try expanding forward */
1723
1724 next = chunk_at_offset(oldp, oldsize);
1725 if (next == top || !inuse(next))
1726 {
1727 nextsize = chunksize(next);
1728
1729 /* Forward into top only if a remainder */
1730 if (next == top)
1731 {
8bde7f77
WD
1732 if ((long)(nextsize + newsize) >= (long)(nb + MINSIZE))
1733 {
1734 newsize += nextsize;
1735 top = chunk_at_offset(oldp, nb);
1736 set_head(top, (newsize - nb) | PREV_INUSE);
1737 set_head_size(oldp, nb);
1738 return chunk2mem(oldp);
1739 }
217c9dad
WD
1740 }
1741
1742 /* Forward into next chunk */
1743 else if (((long)(nextsize + newsize) >= (long)(nb)))
1744 {
8bde7f77
WD
1745 unlink(next, bck, fwd);
1746 newsize += nextsize;
1747 goto split;
217c9dad
WD
1748 }
1749 }
1750 else
1751 {
199adb60 1752 next = NULL;
217c9dad
WD
1753 nextsize = 0;
1754 }
1755
1756 /* Try shifting backwards. */
1757
1758 if (!prev_inuse(oldp))
1759 {
1760 prev = prev_chunk(oldp);
1761 prevsize = chunksize(prev);
1762
1763 /* try forward + backward first to save a later consolidation */
1764
199adb60 1765 if (next != NULL)
217c9dad 1766 {
8bde7f77
WD
1767 /* into top */
1768 if (next == top)
1769 {
1770 if ((long)(nextsize + prevsize + newsize) >= (long)(nb + MINSIZE))
1771 {
1772 unlink(prev, bck, fwd);
1773 newp = prev;
1774 newsize += prevsize + nextsize;
1775 newmem = chunk2mem(newp);
1776 MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
1777 top = chunk_at_offset(newp, nb);
1778 set_head(top, (newsize - nb) | PREV_INUSE);
1779 set_head_size(newp, nb);
1780 return newmem;
1781 }
1782 }
1783
1784 /* into next chunk */
1785 else if (((long)(nextsize + prevsize + newsize) >= (long)(nb)))
1786 {
1787 unlink(next, bck, fwd);
1788 unlink(prev, bck, fwd);
1789 newp = prev;
1790 newsize += nextsize + prevsize;
1791 newmem = chunk2mem(newp);
1792 MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
1793 goto split;
1794 }
217c9dad
WD
1795 }
1796
1797 /* backward only */
199adb60 1798 if (prev != NULL && (long)(prevsize + newsize) >= (long)nb)
217c9dad 1799 {
8bde7f77
WD
1800 unlink(prev, bck, fwd);
1801 newp = prev;
1802 newsize += prevsize;
1803 newmem = chunk2mem(newp);
1804 MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
1805 goto split;
217c9dad
WD
1806 }
1807 }
1808
1809 /* Must allocate */
1810
1811 newmem = mALLOc (bytes);
1812
199adb60
KP
1813 if (newmem == NULL) /* propagate failure */
1814 return NULL;
217c9dad
WD
1815
1816 /* Avoid copy if newp is next chunk after oldp. */
1817 /* (This can only happen when new chunk is sbrk'ed.) */
1818
1819 if ( (newp = mem2chunk(newmem)) == next_chunk(oldp))
1820 {
1821 newsize += chunksize(newp);
1822 newp = oldp;
1823 goto split;
1824 }
1825
1826 /* Otherwise copy, free, and exit */
1827 MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
1828 fREe(oldmem);
1829 return newmem;
1830 }
1831
1832
1833 split: /* split off extra room in old or expanded chunk */
1834
1835 if (newsize - nb >= MINSIZE) /* split off remainder */
1836 {
1837 remainder = chunk_at_offset(newp, nb);
1838 remainder_size = newsize - nb;
1839 set_head_size(newp, nb);
1840 set_head(remainder, remainder_size | PREV_INUSE);
1841 set_inuse_bit_at_offset(remainder, remainder_size);
1842 fREe(chunk2mem(remainder)); /* let free() deal with it */
1843 }
1844 else
1845 {
1846 set_head_size(newp, newsize);
1847 set_inuse_bit_at_offset(newp, newsize);
1848 }
1849
1850 check_inuse_chunk(newp);
1851 return chunk2mem(newp);
1852}
1853
1854
d93041a4 1855
217c9dad
WD
1856
1857/*
1858
1859 memalign algorithm:
1860
1861 memalign requests more than enough space from malloc, finds a spot
1862 within that chunk that meets the alignment request, and then
1863 possibly frees the leading and trailing space.
1864
1865 The alignment argument must be a power of two. This property is not
1866 checked by memalign, so misuse may result in random runtime errors.
1867
1868 8-byte alignment is guaranteed by normal malloc calls, so don't
1869 bother calling memalign with an argument of 8 or less.
1870
1871 Overreliance on memalign is a sure way to fragment space.
1872
1873*/
1874
1875
1876#if __STD_C
1877Void_t* mEMALIGn(size_t alignment, size_t bytes)
1878#else
1879Void_t* mEMALIGn(alignment, bytes) size_t alignment; size_t bytes;
1880#endif
1881{
1882 INTERNAL_SIZE_T nb; /* padded request size */
1883 char* m; /* memory returned by malloc call */
1884 mchunkptr p; /* corresponding chunk */
1885 char* brk; /* alignment point within p */
1886 mchunkptr newp; /* chunk to return */
1887 INTERNAL_SIZE_T newsize; /* its size */
1888 INTERNAL_SIZE_T leadsize; /* leading space befor alignment point */
1889 mchunkptr remainder; /* spare room at end to split off */
1890 long remainder_size; /* its size */
1891
199adb60 1892 if ((long)bytes < 0) return NULL;
217c9dad
WD
1893
1894 /* If need less alignment than we give anyway, just relay to malloc */
1895
1896 if (alignment <= MALLOC_ALIGNMENT) return mALLOc(bytes);
1897
1898 /* Otherwise, ensure that it is at least a minimum chunk size */
1899
1900 if (alignment < MINSIZE) alignment = MINSIZE;
1901
1902 /* Call malloc with worst case padding to hit alignment. */
1903
1904 nb = request2size(bytes);
1905 m = (char*)(mALLOc(nb + alignment + MINSIZE));
1906
4f144a41
SW
1907 /*
1908 * The attempt to over-allocate (with a size large enough to guarantee the
1909 * ability to find an aligned region within allocated memory) failed.
1910 *
1911 * Try again, this time only allocating exactly the size the user wants. If
1912 * the allocation now succeeds and just happens to be aligned, we can still
1913 * fulfill the user's request.
1914 */
1915 if (m == NULL) {
034eda86 1916 size_t extra, extra2;
4f144a41
SW
1917 /*
1918 * Use bytes not nb, since mALLOc internally calls request2size too, and
1919 * each call increases the size to allocate, to account for the header.
1920 */
1921 m = (char*)(mALLOc(bytes));
1922 /* Aligned -> return it */
1923 if ((((unsigned long)(m)) % alignment) == 0)
1924 return m;
034eda86
SW
1925 /*
1926 * Otherwise, try again, requesting enough extra space to be able to
1927 * acquire alignment.
1928 */
4f144a41 1929 fREe(m);
034eda86
SW
1930 /* Add in extra bytes to match misalignment of unexpanded allocation */
1931 extra = alignment - (((unsigned long)(m)) % alignment);
1932 m = (char*)(mALLOc(bytes + extra));
1933 /*
1934 * m might not be the same as before. Validate that the previous value of
1935 * extra still works for the current value of m.
1936 * If (!m), extra2=alignment so
1937 */
1938 if (m) {
1939 extra2 = alignment - (((unsigned long)(m)) % alignment);
1940 if (extra2 > extra) {
1941 fREe(m);
1942 m = NULL;
1943 }
1944 }
1945 /* Fall through to original NULL check and chunk splitting logic */
4f144a41
SW
1946 }
1947
199adb60 1948 if (m == NULL) return NULL; /* propagate failure */
217c9dad
WD
1949
1950 p = mem2chunk(m);
1951
1952 if ((((unsigned long)(m)) % alignment) == 0) /* aligned */
1953 {
1954#if HAVE_MMAP
1955 if(chunk_is_mmapped(p))
1956 return chunk2mem(p); /* nothing more to do */
1957#endif
1958 }
1959 else /* misaligned */
1960 {
1961 /*
1962 Find an aligned spot inside chunk.
1963 Since we need to give back leading space in a chunk of at
1964 least MINSIZE, if the first calculation places us at
1965 a spot with less than MINSIZE leader, we can move to the
1966 next aligned spot -- we've allocated enough total room so that
1967 this is always possible.
1968 */
1969
1970 brk = (char*)mem2chunk(((unsigned long)(m + alignment - 1)) & -((signed) alignment));
1971 if ((long)(brk - (char*)(p)) < MINSIZE) brk = brk + alignment;
1972
1973 newp = (mchunkptr)brk;
1974 leadsize = brk - (char*)(p);
1975 newsize = chunksize(p) - leadsize;
1976
1977#if HAVE_MMAP
1978 if(chunk_is_mmapped(p))
1979 {
1980 newp->prev_size = p->prev_size + leadsize;
1981 set_head(newp, newsize|IS_MMAPPED);
1982 return chunk2mem(newp);
1983 }
1984#endif
1985
1986 /* give back leader, use the rest */
1987
1988 set_head(newp, newsize | PREV_INUSE);
1989 set_inuse_bit_at_offset(newp, newsize);
1990 set_head_size(p, leadsize);
1991 fREe(chunk2mem(p));
1992 p = newp;
1993
1994 assert (newsize >= nb && (((unsigned long)(chunk2mem(p))) % alignment) == 0);
1995 }
1996
1997 /* Also give back spare room at the end */
1998
1999 remainder_size = chunksize(p) - nb;
2000
2001 if (remainder_size >= (long)MINSIZE)
2002 {
2003 remainder = chunk_at_offset(p, nb);
2004 set_head(remainder, remainder_size | PREV_INUSE);
2005 set_head_size(p, nb);
2006 fREe(chunk2mem(remainder));
2007 }
2008
2009 check_inuse_chunk(p);
2010 return chunk2mem(p);
2011
2012}
2013
d93041a4 2014
217c9dad
WD
2015
2016
2017/*
2018 valloc just invokes memalign with alignment argument equal
2019 to the page size of the system (or as near to this as can
2020 be figured out from all the includes/defines above.)
2021*/
2022
2023#if __STD_C
2024Void_t* vALLOc(size_t bytes)
2025#else
2026Void_t* vALLOc(bytes) size_t bytes;
2027#endif
2028{
2029 return mEMALIGn (malloc_getpagesize, bytes);
2030}
2031
2032/*
2033 pvalloc just invokes valloc for the nearest pagesize
2034 that will accommodate request
2035*/
2036
2037
2038#if __STD_C
2039Void_t* pvALLOc(size_t bytes)
2040#else
2041Void_t* pvALLOc(bytes) size_t bytes;
2042#endif
2043{
2044 size_t pagesize = malloc_getpagesize;
2045 return mEMALIGn (pagesize, (bytes + pagesize - 1) & ~(pagesize - 1));
2046}
2047
2048/*
2049
2050 calloc calls malloc, then zeroes out the allocated chunk.
2051
2052*/
2053
2054#if __STD_C
2055Void_t* cALLOc(size_t n, size_t elem_size)
2056#else
2057Void_t* cALLOc(n, elem_size) size_t n; size_t elem_size;
2058#endif
2059{
2060 mchunkptr p;
2061 INTERNAL_SIZE_T csz;
2062
2063 INTERNAL_SIZE_T sz = n * elem_size;
2064
2065
2066 /* check if expand_top called, in which case don't need to clear */
0aa8a4ad 2067#ifdef CONFIG_SYS_MALLOC_CLEAR_ON_INIT
217c9dad
WD
2068#if MORECORE_CLEARS
2069 mchunkptr oldtop = top;
2070 INTERNAL_SIZE_T oldtopsize = chunksize(top);
0aa8a4ad 2071#endif
217c9dad
WD
2072#endif
2073 Void_t* mem = mALLOc (sz);
2074
199adb60 2075 if ((long)n < 0) return NULL;
217c9dad 2076
199adb60
KP
2077 if (mem == NULL)
2078 return NULL;
217c9dad
WD
2079 else
2080 {
f1896c45 2081#if CONFIG_VAL(SYS_MALLOC_F_LEN)
c9356be3 2082 if (!(gd->flags & GD_FLG_FULL_MALLOC_INIT)) {
d59476b6
SG
2083 MALLOC_ZERO(mem, sz);
2084 return mem;
2085 }
2086#endif
217c9dad
WD
2087 p = mem2chunk(mem);
2088
2089 /* Two optional cases in which clearing not necessary */
2090
2091
2092#if HAVE_MMAP
2093 if (chunk_is_mmapped(p)) return mem;
2094#endif
2095
2096 csz = chunksize(p);
2097
0aa8a4ad 2098#ifdef CONFIG_SYS_MALLOC_CLEAR_ON_INIT
217c9dad
WD
2099#if MORECORE_CLEARS
2100 if (p == oldtop && csz > oldtopsize)
2101 {
2102 /* clear only the bytes from non-freshly-sbrked memory */
2103 csz = oldtopsize;
2104 }
0aa8a4ad 2105#endif
217c9dad
WD
2106#endif
2107
2108 MALLOC_ZERO(mem, csz - SIZE_SZ);
2109 return mem;
2110 }
2111}
2112
2113/*
2114
2115 cfree just calls free. It is needed/defined on some systems
2116 that pair it with calloc, presumably for odd historical reasons.
2117
2118*/
2119
2120#if !defined(INTERNAL_LINUX_C_LIB) || !defined(__ELF__)
2121#if __STD_C
2122void cfree(Void_t *mem)
2123#else
2124void cfree(mem) Void_t *mem;
2125#endif
2126{
2127 fREe(mem);
2128}
2129#endif
2130
d93041a4 2131
217c9dad
WD
2132
2133/*
2134
2135 Malloc_trim gives memory back to the system (via negative
2136 arguments to sbrk) if there is unused memory at the `high' end of
2137 the malloc pool. You can call this after freeing large blocks of
2138 memory to potentially reduce the system-level memory requirements
2139 of a program. However, it cannot guarantee to reduce memory. Under
2140 some allocation patterns, some large free blocks of memory will be
2141 locked between two used chunks, so they cannot be given back to
2142 the system.
2143
2144 The `pad' argument to malloc_trim represents the amount of free
2145 trailing space to leave untrimmed. If this argument is zero,
2146 only the minimum amount of memory to maintain internal data
2147 structures will be left (one page or less). Non-zero arguments
2148 can be supplied to maintain enough trailing space to service
2149 future expected allocations without having to re-obtain memory
2150 from the system.
2151
2152 Malloc_trim returns 1 if it actually released any memory, else 0.
2153
2154*/
2155
2156#if __STD_C
2157int malloc_trim(size_t pad)
2158#else
2159int malloc_trim(pad) size_t pad;
2160#endif
2161{
2162 long top_size; /* Amount of top-most memory */
2163 long extra; /* Amount to release */
2164 char* current_brk; /* address returned by pre-check sbrk call */
2165 char* new_brk; /* address returned by negative sbrk call */
2166
2167 unsigned long pagesz = malloc_getpagesize;
2168
2169 top_size = chunksize(top);
2170 extra = ((top_size - pad - MINSIZE + (pagesz-1)) / pagesz - 1) * pagesz;
2171
2172 if (extra < (long)pagesz) /* Not enough memory to release */
2173 return 0;
2174
2175 else
2176 {
2177 /* Test to make sure no one else called sbrk */
2178 current_brk = (char*)(MORECORE (0));
2179 if (current_brk != (char*)(top) + top_size)
2180 return 0; /* Apparently we don't own memory; must fail */
2181
2182 else
2183 {
2184 new_brk = (char*)(MORECORE (-extra));
2185
2186 if (new_brk == (char*)(MORECORE_FAILURE)) /* sbrk failed? */
2187 {
8bde7f77
WD
2188 /* Try to figure out what we have */
2189 current_brk = (char*)(MORECORE (0));
2190 top_size = current_brk - (char*)top;
2191 if (top_size >= (long)MINSIZE) /* if not, we are very very dead! */
2192 {
2193 sbrked_mem = current_brk - sbrk_base;
2194 set_head(top, top_size | PREV_INUSE);
2195 }
2196 check_chunk(top);
2197 return 0;
217c9dad
WD
2198 }
2199
2200 else
2201 {
8bde7f77
WD
2202 /* Success. Adjust top accordingly. */
2203 set_head(top, (top_size - extra) | PREV_INUSE);
2204 sbrked_mem -= extra;
2205 check_chunk(top);
2206 return 1;
217c9dad
WD
2207 }
2208 }
2209 }
2210}
2211
d93041a4 2212
217c9dad
WD
2213
2214/*
2215 malloc_usable_size:
2216
2217 This routine tells you how many bytes you can actually use in an
2218 allocated chunk, which may be more than you requested (although
2219 often not). You can use this many bytes without worrying about
2220 overwriting other allocated objects. Not a particularly great
2221 programming practice, but still sometimes useful.
2222
2223*/
2224
2225#if __STD_C
2226size_t malloc_usable_size(Void_t* mem)
2227#else
2228size_t malloc_usable_size(mem) Void_t* mem;
2229#endif
2230{
2231 mchunkptr p;
199adb60 2232 if (mem == NULL)
217c9dad
WD
2233 return 0;
2234 else
2235 {
2236 p = mem2chunk(mem);
2237 if(!chunk_is_mmapped(p))
2238 {
2239 if (!inuse(p)) return 0;
2240 check_inuse_chunk(p);
2241 return chunksize(p) - SIZE_SZ;
2242 }
2243 return chunksize(p) - 2*SIZE_SZ;
2244 }
2245}
2246
2247
d93041a4 2248
217c9dad
WD
2249
2250/* Utility to update current_mallinfo for malloc_stats and mallinfo() */
2251
ea882baf 2252#ifdef DEBUG
217c9dad
WD
2253static void malloc_update_mallinfo()
2254{
2255 int i;
2256 mbinptr b;
2257 mchunkptr p;
2258#ifdef DEBUG
2259 mchunkptr q;
2260#endif
2261
2262 INTERNAL_SIZE_T avail = chunksize(top);
2263 int navail = ((long)(avail) >= (long)MINSIZE)? 1 : 0;
2264
2265 for (i = 1; i < NAV; ++i)
2266 {
2267 b = bin_at(i);
2268 for (p = last(b); p != b; p = p->bk)
2269 {
2270#ifdef DEBUG
2271 check_free_chunk(p);
2272 for (q = next_chunk(p);
8bde7f77
WD
2273 q < top && inuse(q) && (long)(chunksize(q)) >= (long)MINSIZE;
2274 q = next_chunk(q))
2275 check_inuse_chunk(q);
217c9dad
WD
2276#endif
2277 avail += chunksize(p);
2278 navail++;
2279 }
2280 }
2281
2282 current_mallinfo.ordblks = navail;
2283 current_mallinfo.uordblks = sbrked_mem - avail;
2284 current_mallinfo.fordblks = avail;
2285 current_mallinfo.hblks = n_mmaps;
2286 current_mallinfo.hblkhd = mmapped_mem;
2287 current_mallinfo.keepcost = chunksize(top);
2288
2289}
ea882baf 2290#endif /* DEBUG */
217c9dad 2291
d93041a4 2292
217c9dad
WD
2293
2294/*
2295
2296 malloc_stats:
2297
2298 Prints on the amount of space obtain from the system (both
2299 via sbrk and mmap), the maximum amount (which may be more than
2300 current if malloc_trim and/or munmap got called), the maximum
2301 number of simultaneous mmap regions used, and the current number
2302 of bytes allocated via malloc (or realloc, etc) but not yet
2303 freed. (Note that this is the number of bytes allocated, not the
2304 number requested. It will be larger than the number requested
2305 because of alignment and bookkeeping overhead.)
2306
2307*/
2308
ea882baf 2309#ifdef DEBUG
217c9dad
WD
2310void malloc_stats()
2311{
2312 malloc_update_mallinfo();
2313 printf("max system bytes = %10u\n",
8bde7f77 2314 (unsigned int)(max_total_mem));
217c9dad 2315 printf("system bytes = %10u\n",
8bde7f77 2316 (unsigned int)(sbrked_mem + mmapped_mem));
217c9dad 2317 printf("in use bytes = %10u\n",
8bde7f77 2318 (unsigned int)(current_mallinfo.uordblks + mmapped_mem));
217c9dad
WD
2319#if HAVE_MMAP
2320 printf("max mmap regions = %10u\n",
8bde7f77 2321 (unsigned int)max_n_mmaps);
217c9dad
WD
2322#endif
2323}
ea882baf 2324#endif /* DEBUG */
217c9dad
WD
2325
2326/*
2327 mallinfo returns a copy of updated current mallinfo.
2328*/
2329
ea882baf 2330#ifdef DEBUG
217c9dad
WD
2331struct mallinfo mALLINFo()
2332{
2333 malloc_update_mallinfo();
2334 return current_mallinfo;
2335}
ea882baf 2336#endif /* DEBUG */
217c9dad
WD
2337
2338
d93041a4 2339
217c9dad
WD
2340
2341/*
2342 mallopt:
2343
2344 mallopt is the general SVID/XPG interface to tunable parameters.
2345 The format is to provide a (parameter-number, parameter-value) pair.
2346 mallopt then sets the corresponding parameter to the argument
2347 value if it can (i.e., so long as the value is meaningful),
2348 and returns 1 if successful else 0.
2349
2350 See descriptions of tunable parameters above.
2351
2352*/
2353
2354#if __STD_C
2355int mALLOPt(int param_number, int value)
2356#else
2357int mALLOPt(param_number, value) int param_number; int value;
2358#endif
2359{
2360 switch(param_number)
2361 {
2362 case M_TRIM_THRESHOLD:
2363 trim_threshold = value; return 1;
2364 case M_TOP_PAD:
2365 top_pad = value; return 1;
2366 case M_MMAP_THRESHOLD:
2367 mmap_threshold = value; return 1;
2368 case M_MMAP_MAX:
2369#if HAVE_MMAP
2370 n_mmaps_max = value; return 1;
2371#else
2372 if (value != 0) return 0; else n_mmaps_max = value; return 1;
2373#endif
2374
2375 default:
2376 return 0;
2377 }
2378}
2379
fb5cf7f1
SG
2380int initf_malloc(void)
2381{
f1896c45 2382#if CONFIG_VAL(SYS_MALLOC_F_LEN)
fb5cf7f1 2383 assert(gd->malloc_base); /* Set up by crt0.S */
f1896c45 2384 gd->malloc_limit = CONFIG_VAL(SYS_MALLOC_F_LEN);
fb5cf7f1
SG
2385 gd->malloc_ptr = 0;
2386#endif
2387
2388 return 0;
2389}
2390
217c9dad
WD
2391/*
2392
2393History:
2394
2395 V2.6.6 Sun Dec 5 07:42:19 1999 Doug Lea (dl at gee)
2396 * return null for negative arguments
2397 * Added Several WIN32 cleanups from Martin C. Fong <mcfong@yahoo.com>
8bde7f77
WD
2398 * Add 'LACKS_SYS_PARAM_H' for those systems without 'sys/param.h'
2399 (e.g. WIN32 platforms)
2400 * Cleanup up header file inclusion for WIN32 platforms
2401 * Cleanup code to avoid Microsoft Visual C++ compiler complaints
2402 * Add 'USE_DL_PREFIX' to quickly allow co-existence with existing
2403 memory allocation routines
2404 * Set 'malloc_getpagesize' for WIN32 platforms (needs more work)
2405 * Use 'assert' rather than 'ASSERT' in WIN32 code to conform to
217c9dad 2406 usage of 'assert' in non-WIN32 code
8bde7f77
WD
2407 * Improve WIN32 'sbrk()' emulation's 'findRegion()' routine to
2408 avoid infinite loop
217c9dad
WD
2409 * Always call 'fREe()' rather than 'free()'
2410
2411 V2.6.5 Wed Jun 17 15:57:31 1998 Doug Lea (dl at gee)
2412 * Fixed ordering problem with boundary-stamping
2413
2414 V2.6.3 Sun May 19 08:17:58 1996 Doug Lea (dl at gee)
2415 * Added pvalloc, as recommended by H.J. Liu
2416 * Added 64bit pointer support mainly from Wolfram Gloger
2417 * Added anonymously donated WIN32 sbrk emulation
2418 * Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen
2419 * malloc_extend_top: fix mask error that caused wastage after
8bde7f77 2420 foreign sbrks
217c9dad
WD
2421 * Add linux mremap support code from HJ Liu
2422
2423 V2.6.2 Tue Dec 5 06:52:55 1995 Doug Lea (dl at gee)
2424 * Integrated most documentation with the code.
2425 * Add support for mmap, with help from
8bde7f77 2426 Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
217c9dad
WD
2427 * Use last_remainder in more cases.
2428 * Pack bins using idea from colin@nyx10.cs.du.edu
2429 * Use ordered bins instead of best-fit threshhold
2430 * Eliminate block-local decls to simplify tracing and debugging.
2431 * Support another case of realloc via move into top
2432 * Fix error occuring when initial sbrk_base not word-aligned.
2433 * Rely on page size for units instead of SBRK_UNIT to
8bde7f77 2434 avoid surprises about sbrk alignment conventions.
217c9dad 2435 * Add mallinfo, mallopt. Thanks to Raymond Nijssen
8bde7f77 2436 (raymond@es.ele.tue.nl) for the suggestion.
217c9dad
WD
2437 * Add `pad' argument to malloc_trim and top_pad mallopt parameter.
2438 * More precautions for cases where other routines call sbrk,
8bde7f77 2439 courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
217c9dad 2440 * Added macros etc., allowing use in linux libc from
8bde7f77 2441 H.J. Lu (hjl@gnu.ai.mit.edu)
217c9dad
WD
2442 * Inverted this history list
2443
2444 V2.6.1 Sat Dec 2 14:10:57 1995 Doug Lea (dl at gee)
2445 * Re-tuned and fixed to behave more nicely with V2.6.0 changes.
2446 * Removed all preallocation code since under current scheme
8bde7f77
WD
2447 the work required to undo bad preallocations exceeds
2448 the work saved in good cases for most test programs.
217c9dad 2449 * No longer use return list or unconsolidated bins since
8bde7f77
WD
2450 no scheme using them consistently outperforms those that don't
2451 given above changes.
217c9dad
WD
2452 * Use best fit for very large chunks to prevent some worst-cases.
2453 * Added some support for debugging
2454
2455 V2.6.0 Sat Nov 4 07:05:23 1995 Doug Lea (dl at gee)
2456 * Removed footers when chunks are in use. Thanks to
8bde7f77 2457 Paul Wilson (wilson@cs.texas.edu) for the suggestion.
217c9dad
WD
2458
2459 V2.5.4 Wed Nov 1 07:54:51 1995 Doug Lea (dl at gee)
2460 * Added malloc_trim, with help from Wolfram Gloger
8bde7f77 2461 (wmglo@Dent.MED.Uni-Muenchen.DE).
217c9dad
WD
2462
2463 V2.5.3 Tue Apr 26 10:16:01 1994 Doug Lea (dl at g)
2464
2465 V2.5.2 Tue Apr 5 16:20:40 1994 Doug Lea (dl at g)
2466 * realloc: try to expand in both directions
2467 * malloc: swap order of clean-bin strategy;
2468 * realloc: only conditionally expand backwards
2469 * Try not to scavenge used bins
2470 * Use bin counts as a guide to preallocation
2471 * Occasionally bin return list chunks in first scan
2472 * Add a few optimizations from colin@nyx10.cs.du.edu
2473
2474 V2.5.1 Sat Aug 14 15:40:43 1993 Doug Lea (dl at g)
2475 * faster bin computation & slightly different binning
2476 * merged all consolidations to one part of malloc proper
8bde7f77 2477 (eliminating old malloc_find_space & malloc_clean_bin)
217c9dad
WD
2478 * Scan 2 returns chunks (not just 1)
2479 * Propagate failure in realloc if malloc returns 0
2480 * Add stuff to allow compilation on non-ANSI compilers
8bde7f77 2481 from kpv@research.att.com
217c9dad
WD
2482
2483 V2.5 Sat Aug 7 07:41:59 1993 Doug Lea (dl at g.oswego.edu)
2484 * removed potential for odd address access in prev_chunk
2485 * removed dependency on getpagesize.h
2486 * misc cosmetics and a bit more internal documentation
2487 * anticosmetics: mangled names in macros to evade debugger strangeness
2488 * tested on sparc, hp-700, dec-mips, rs6000
8bde7f77
WD
2489 with gcc & native cc (hp, dec only) allowing
2490 Detlefs & Zorn comparison study (in SIGPLAN Notices.)
217c9dad
WD
2491
2492 Trial version Fri Aug 28 13:14:29 1992 Doug Lea (dl at g.oswego.edu)
2493 * Based loosely on libg++-1.2X malloc. (It retains some of the overall
8bde7f77 2494 structure of old version, but most details differ.)
217c9dad
WD
2495
2496*/