unsigned long ul;
heap_info *h;
- if(size < HEAP_MIN_SIZE)
+ if(size+top_pad < HEAP_MIN_SIZE)
size = HEAP_MIN_SIZE;
- size = (size + page_mask) & ~page_mask;
- if(size > HEAP_MAX_SIZE)
+ else if(size+top_pad <= HEAP_MAX_SIZE)
+ size += top_pad;
+ else if(size > HEAP_MAX_SIZE)
return 0;
+ else
+ size = HEAP_MAX_SIZE;
+ size = (size + page_mask) & ~page_mask;
+
p1 = (char *)MMAP(HEAP_MAX_SIZE<<1, PROT_NONE);
if(p1 == (char *)-1)
return 0;
(brk < old_end && old_top != initial_top(&main_arena)))
return;
+#if defined(_LIBC) || defined(MALLOC_HOOKS)
/* Call the `morecore' hook if necessary. */
if (__after_morecore_hook)
(*__after_morecore_hook) ();
+#endif
sbrked_mem += sbrk_size;
new_brk = (char*)(MORECORE (correction));
if (new_brk == (char*)(MORECORE_FAILURE)) return;
+#if defined(_LIBC) || defined(MALLOC_HOOKS)
/* Call the `morecore' hook if necessary. */
if (__after_morecore_hook)
- (*__after_morecore_hook) ();
+ (*__after_morecore_hook) ();
+#endif
sbrked_mem += correction;
}
/* A new heap must be created. */
- heap = new_heap(nb + top_pad + (MINSIZE + sizeof(*heap)));
+ heap = new_heap(nb + (MINSIZE + sizeof(*heap)));
if(!heap)
return;
heap->ar_ptr = ar_ptr;
#endif
nb = request2size(bytes);
- arena_get(ar_ptr, nb + top_pad);
+ arena_get(ar_ptr, nb);
if(!ar_ptr)
return 0;
victim = chunk_alloc(ar_ptr, nb);
(void)mutex_unlock(&ar_ptr->mutex);
- return victim ? chunk2mem(victim) : 0;
+ if(!victim) {
+ /* Maybe the failure is due to running out of mmapped areas. */
+ if(ar_ptr != &main_arena) {
+ (void)mutex_lock(&main_arena.mutex);
+ victim = chunk_alloc(&main_arena, nb);
+ (void)mutex_unlock(&main_arena.mutex);
+ }
+ if(!victim) return 0;
+ }
+ return chunk2mem(victim);
}
static mchunkptr
return;
}
- set_head(next, nextsz); /* clear inuse bit */
-
islr = 0;
if (!(hd & PREV_INUSE)) /* consolidate backward */
}
else
unlink(next, bck, fwd);
+
+ next = chunk_at_offset(p, sz);
}
+ else
+ set_head(next, nextsz); /* clear inuse bit */
set_head(p, sz | PREV_INUSE);
- set_foot(p, sz);
+ next->prev_size = sz;
if (!islr)
frontlink(ar_ptr, p, sz, idx, bck, fwd);
+
+#ifndef NO_THREADS
+ /* Check whether the heap containing top can go away now. */
+ if(next->size < MINSIZE &&
+ (unsigned long)sz > trim_threshold &&
+ ar_ptr != &main_arena) { /* fencepost */
+ heap_info* heap = heap_for_ptr(top(ar_ptr));
+
+ if(top(ar_ptr) == chunk_at_offset(heap, sizeof(*heap)) &&
+ heap->prev == heap_for_ptr(p))
+ heap_trim(heap, top_pad);
+ }
+#endif
}
newp = chunk_alloc (ar_ptr, nb);
- if (newp == 0) /* propagate failure */
- return 0;
+ if (newp == 0) {
+ /* Maybe the failure is due to running out of mmapped areas. */
+ if (ar_ptr != &main_arena) {
+ (void)mutex_lock(&main_arena.mutex);
+ newp = chunk_alloc(&main_arena, nb);
+ (void)mutex_unlock(&main_arena.mutex);
+ }
+ if (newp == 0) /* propagate failure */
+ return 0;
+ }
/* Avoid copy if newp is next chunk after oldp. */
/* (This can only happen when new chunk is sbrk'ed.) */
return 0;
p = chunk_align(ar_ptr, nb, alignment);
(void)mutex_unlock(&ar_ptr->mutex);
- return p ? chunk2mem(p) : NULL;
+ if(!p) {
+ /* Maybe the failure is due to running out of mmapped areas. */
+ if(ar_ptr != &main_arena) {
+ (void)mutex_lock(&main_arena.mutex);
+ p = chunk_align(&main_arena, nb, alignment);
+ (void)mutex_unlock(&main_arena.mutex);
+ }
+ if(!p) return 0;
+ }
+ return chunk2mem(p);
}
static mchunkptr
/* Only clearing follows, so we can unlock early. */
(void)mutex_unlock(&ar_ptr->mutex);
- if (p == 0)
- return 0;
- else
- {
- mem = chunk2mem(p);
+ if (p == 0) {
+ /* Maybe the failure is due to running out of mmapped areas. */
+ if(ar_ptr != &main_arena) {
+ (void)mutex_lock(&main_arena.mutex);
+ p = chunk_alloc(&main_arena, sz);
+ (void)mutex_unlock(&main_arena.mutex);
+ }
+ if (p == 0) return 0;
+ }
+ mem = chunk2mem(p);
- /* Two optional cases in which clearing not necessary */
+ /* Two optional cases in which clearing not necessary */
#if HAVE_MMAP
- if (chunk_is_mmapped(p)) return mem;
+ if (chunk_is_mmapped(p)) return mem;
#endif
- csz = chunksize(p);
+ csz = chunksize(p);
#if MORECORE_CLEARS
- if (p == oldtop && csz > oldtopsize)
- {
- /* clear only the bytes from non-freshly-sbrked memory */
- csz = oldtopsize;
- }
+ if (p == oldtop && csz > oldtopsize) {
+ /* clear only the bytes from non-freshly-sbrked memory */
+ csz = oldtopsize;
+ }
#endif
- MALLOC_ZERO(mem, csz - SIZE_SZ);
- return mem;
- }
+ MALLOC_ZERO(mem, csz - SIZE_SZ);
+ return mem;
}
/*
new_brk = (char*)(MORECORE (-extra));
+#if defined(_LIBC) || defined(MALLOC_HOOKS)
/* Call the `morecore' hook if necessary. */
if (__after_morecore_hook)
(*__after_morecore_hook) ();
+#endif
if (new_brk == (char*)(MORECORE_FAILURE)) { /* sbrk failed? */
/* Try to figure out what we have */
#endif
#if !defined(NO_THREADS) && MALLOC_DEBUG > 1
if(ar_ptr != &main_arena) {
+ heap_info* heap;
(void)mutex_lock(&ar_ptr->mutex);
- heap_info *heap = heap_for_ptr(top(ar_ptr));
+ heap = heap_for_ptr(top(ar_ptr));
while(heap) { dump_heap(heap); heap = heap->prev; }
(void)mutex_unlock(&ar_ptr->mutex);
}
#endif
#if HAVE_MMAP
fprintf(stderr, "max mmap regions = %10u\n", (unsigned int)max_n_mmaps);
+ fprintf(stderr, "max mmap bytes = %10lu\n", max_mmapped_mem);
#endif
#if THREAD_STATS
fprintf(stderr, "heaps created = %10d\n", stat_n_heaps);