#include <malloc-machine.h>
#include <malloc-sysdep.h>
-
#include <atomic.h>
#include <_itoa.h>
#include <bits/wordsize.h>
/*
---------- Size and alignment checks and conversions ----------
*/
-
+#ifndef __CHKP__
/* conversion from malloc headers to user pointers, and back */
-#define chunk2mem(p) ((void*)((char*)(p) + 2*SIZE_SZ))
-#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ))
+# define chunk2mem(p) ((void*)((char*)(p) + 2*SIZE_SZ))
+# define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ))
+#endif
/* The smallest possible chunk */
#define MIN_CHUNK_SIZE (offsetof(struct malloc_chunk, fd_nextsize))
/* Check if m has acceptable alignment */
#define aligned_OK(m) (((unsigned long)(m) & MALLOC_ALIGN_MASK) == 0)
+#define aligned_chunk_OK(p) (((unsigned long)((char *)(p) + 2 * SIZE_SZ) & MALLOC_ALIGN_MASK) == 0)
#define misaligned_chunk(p) \
- ((uintptr_t)(MALLOC_ALIGNMENT == 2 * SIZE_SZ ? (p) : chunk2mem (p)) \
+ ((uintptr_t)(MALLOC_ALIGNMENT == 2 * SIZE_SZ ? (p) : ((char *)(p) + 2 * SIZE_SZ)) \
& MALLOC_ALIGN_MASK)
-
-
/*
Check if a request is so large that it would wrap around zero when
padded and aligned. To simplify some other code, the bound is made
/* Get size, ignoring use bits */
#define chunksize(p) ((p)->size & ~(SIZE_BITS))
+#ifdef __CHKP__
+static void* chunk2mem (void *p) {
+ void *t = p + 2 * SIZE_SZ;
+ if (chunk_is_mmapped((mchunkptr)p))
+ return (void *) __bnd_narrow_ptr_bounds (t, t, chunksize((mchunkptr)p) - 2 * SIZE_SZ);
+ /* prev_size field of the next chunk can be used */
+ return (void *) __bnd_set_ptr_bounds(t, chunksize((mchunkptr)p) - SIZE_SZ);
+}
+
+static mchunkptr mem2chunk(void *mem) {
+ mchunkptr temp = (mchunkptr)((char *)(mem) - 2 * SIZE_SZ);
+ temp = __bnd_set_ptr_bounds (temp, sizeof (struct malloc_chunk));
+ return __bnd_set_ptr_bounds (temp, chunksize(temp) > sizeof(struct malloc_chunk) ?
+ chunksize(temp) : sizeof(struct malloc_chunk));
+}
+
+static mchunkptr next_chunk (mchunkptr p) {
+ mchunkptr temp = (mchunkptr)((char*) p + chunksize(p));
+ return (mchunkptr) __bnd_set_ptr_bounds ((void *) temp, sizeof(struct malloc_chunk));
+}
-/* Ptr to next physical malloc_chunk. */
-#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & ~SIZE_BITS) ))
+static mchunkptr prev_chunk (mchunkptr p) {
+ mchunkptr temp = (mchunkptr)((char*) p - p->prev_size);
+ return (mchunkptr) __bnd_set_ptr_bounds ((void *) temp, sizeof(struct malloc_chunk));
+}
+
+static mchunkptr chunk_at_offset (mchunkptr p, INTERNAL_SIZE_T s) {
+ mchunkptr temp = (mchunkptr)((char*) p + s);
+ return (mchunkptr) __bnd_set_ptr_bounds ((void *) temp, sizeof(struct malloc_chunk));
+}
+
+static int inuse (mchunkptr p) {
+ return next_chunk(p)->size & PREV_INUSE;
+}
+
+static int inuse_bit_at_offset (mchunkptr p, INTERNAL_SIZE_T s) {
+ return chunk_at_offset(p, s)->size & PREV_INUSE;
+}
+
+static void set_inuse_bit_at_offset (mchunkptr p, INTERNAL_SIZE_T s) {
+ chunk_at_offset(p, s)->size |= PREV_INUSE;
+}
+
+static void clear_inuse_bit_at_offset (mchunkptr p, INTERNAL_SIZE_T s) {
+ chunk_at_offset(p, s)->size &= ~(PREV_INUSE);
+}
+
+/* Set size at head, without disturbing its use bit */
+# define set_head_size(p, s) \
+{ \
+ (p) = (__typeof(p)) __bnd_set_ptr_bounds ((void *) (p), (s) > sizeof (__typeof(p)) ? \
+ (s) : sizeof(__typeof(p))); \
+ (p)->size = ((p)->size & SIZE_BITS) | (s); \
+}
+
+/* Set size/use field */
+# define set_head(p, s) \
+({ \
+ (p) = (__typeof(p)) __bnd_set_ptr_bounds ((void *) (p), \
+ ((s) & ~(SIZE_BITS)) > sizeof (__typeof(p)) ? \
+ ((s) & ~(SIZE_BITS)): sizeof (__typeof(p))); \
+ (p)->size = (s); \
+})
+
+/* Set size at footer (only when chunk is not in use) */
+static void set_foot (mchunkptr p, INTERNAL_SIZE_T s) {
+ chunk_at_offset(p, s)->prev_size = s;
+}
+#else
+/* Ptr to next physicaly100y malloc_chunk. */
+# define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & ~SIZE_BITS) ))
/* Ptr to previous physical malloc_chunk */
-#define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_size) ))
+# define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_size) ))
/* Treat space at ptr + offset as a chunk */
-#define chunk_at_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
+# define chunk_at_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
/* extract p's inuse bit */
-#define inuse(p)\
+# define inuse(p)\
((((mchunkptr)(((char*)(p))+((p)->size & ~SIZE_BITS)))->size) & PREV_INUSE)
/* set/clear chunk as being inuse without otherwise disturbing */
-#define set_inuse(p)\
+# define set_inuse(p)\
((mchunkptr)(((char*)(p)) + ((p)->size & ~SIZE_BITS)))->size |= PREV_INUSE
-#define clear_inuse(p)\
+# define clear_inuse(p)\
((mchunkptr)(((char*)(p)) + ((p)->size & ~SIZE_BITS)))->size &= ~(PREV_INUSE)
/* check/set/clear inuse bits in known places */
-#define inuse_bit_at_offset(p, s)\
+# define inuse_bit_at_offset(p, s)\
(((mchunkptr)(((char*)(p)) + (s)))->size & PREV_INUSE)
-#define set_inuse_bit_at_offset(p, s)\
+# define set_inuse_bit_at_offset(p, s)\
(((mchunkptr)(((char*)(p)) + (s)))->size |= PREV_INUSE)
-#define clear_inuse_bit_at_offset(p, s)\
+# define clear_inuse_bit_at_offset(p, s)\
(((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE))
/* Set size at head, without disturbing its use bit */
-#define set_head_size(p, s) ((p)->size = (((p)->size & SIZE_BITS) | (s)))
+# define set_head_size(p, s) ((p)->size = (((p)->size & SIZE_BITS) | (s)))
/* Set size/use field */
-#define set_head(p, s) ((p)->size = (s))
+# define set_head(p, s) ((p)->size = (s))
/* Set size at footer (only when chunk is not in use) */
-#define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_size = (s))
-
-
+# define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_size = (s))
+#endif
/*
-------------------- Internal data structures --------------------
/* chunk is page-aligned */
assert(((p->prev_size + sz) & (GLRO(dl_pagesize)-1)) == 0);
/* mem is aligned */
- assert(aligned_OK(chunk2mem(p)));
+ assert(aligned_chunk_OK(p));
}
}
if ((unsigned long)(sz) >= MINSIZE)
{
assert((sz & MALLOC_ALIGN_MASK) == 0);
- assert(aligned_OK(chunk2mem(p)));
+ assert(aligned_chunk_OK(p));
/* ... matching footer field */
assert(next->prev_size == sz);
/* ... and is fully consolidated */
assert((sz & MALLOC_ALIGN_MASK) == 0);
assert((unsigned long)(sz) >= MINSIZE);
/* ... and alignment */
- assert(aligned_OK(chunk2mem(p)));
+ assert(aligned_chunk_OK(p));
/* chunk is less than MINSIZE more than request */
assert((long)(sz) - (long)(s) >= 0);
assert((long)(sz) - (long)(s + MINSIZE) < 0);
/* For glibc, chunk2mem increases the address by 2*SIZE_SZ and
MALLOC_ALIGN_MASK is 2*SIZE_SZ-1. Each mmap'ed area is page
aligned and therefore definitely MALLOC_ALIGN_MASK-aligned. */
- assert (((INTERNAL_SIZE_T)chunk2mem(mm) & MALLOC_ALIGN_MASK) == 0);
+ assert (((INTERNAL_SIZE_T)((void *)mm + 2 * SIZE_SZ) & MALLOC_ALIGN_MASK) == 0);
front_misalign = 0;
}
else
- front_misalign = (INTERNAL_SIZE_T)chunk2mem(mm) & MALLOC_ALIGN_MASK;
+ front_misalign = (INTERNAL_SIZE_T)((void *)mm + 2 * SIZE_SZ) & MALLOC_ALIGN_MASK;
if (front_misalign > 0) {
correction = MALLOC_ALIGNMENT - front_misalign;
p = (mchunkptr)(mm + correction);
- p->prev_size = correction;
set_head(p, (size - correction) |IS_MMAPPED);
+ p->prev_size = correction;
}
else
{
/* Record incoming configuration of top */
old_top = av->top;
- old_size = chunksize(old_top);
+ if (old_top == initial_top(av)) {
+ old_size = 0;
+ } else {
+ old_size = chunksize(old_top);
+ }
old_end = (char*)(chunk_at_offset(old_top, old_size));
brk = snd_brk = (char*)(MORECORE_FAILURE);
become the top chunk again later. Note that a footer is set
up, too, although the chunk is marked in use. */
old_size = (old_size - MINSIZE) & ~MALLOC_ALIGN_MASK;
- set_head(chunk_at_offset(old_top, old_size + 2*SIZE_SZ), 0|PREV_INUSE);
+ chunk_at_offset(old_top, old_size + 2*SIZE_SZ)->size = 0|PREV_INUSE;
if (old_size >= MINSIZE) {
- set_head(chunk_at_offset(old_top, old_size), (2*SIZE_SZ)|PREV_INUSE);
+ chunk_at_offset(old_top, old_size)->size = (2*SIZE_SZ)|PREV_INUSE;
set_foot(chunk_at_offset(old_top, old_size), (2*SIZE_SZ));
set_head(old_top, old_size|PREV_INUSE|NON_MAIN_ARENA);
_int_free(av, old_top, 1);
/* Guarantee alignment of first new chunk made from this space */
- front_misalign = (INTERNAL_SIZE_T)chunk2mem(brk) & MALLOC_ALIGN_MASK;
+ front_misalign = (INTERNAL_SIZE_T)((void *)brk + 2*SIZE_SZ) & MALLOC_ALIGN_MASK;
if (front_misalign > 0) {
/*
else {
if (MALLOC_ALIGNMENT == 2 * SIZE_SZ)
/* MORECORE/mmap must correctly align */
- assert(((unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK) == 0);
+ assert(((unsigned long)((void *)brk + 2*SIZE_SZ) & MALLOC_ALIGN_MASK) == 0);
else {
- front_misalign = (INTERNAL_SIZE_T)chunk2mem(brk) & MALLOC_ALIGN_MASK;
+ front_misalign = (INTERNAL_SIZE_T)((void *)brk + 2*SIZE_SZ) & MALLOC_ALIGN_MASK;
if (front_misalign > 0) {
/*
/* finally, do the allocation */
p = av->top;
- size = chunksize(p);
-
+ if (p != initial_top(av)) {
+ size = chunksize(p);
+ }
+ else {
+ size = 0;
+ }
/* check that one of the above allocation paths succeeded */
if ((unsigned long)(size) >= (unsigned long)(nb + MINSIZE)) {
remainder_size = size - nb;
p = (mchunkptr)(cp + offset);
- assert(aligned_OK(chunk2mem(p)));
-
- assert((p->prev_size == offset));
+ assert(aligned_chunk_OK(p));
set_head(p, (new_size - offset)|IS_MMAPPED);
+ assert((p->prev_size == offset));
mp_.mmapped_mem -= size + offset;
mp_.mmapped_mem += new_size;
if ((unsigned long)mp_.mmapped_mem > (unsigned long)mp_.max_mmapped_mem)
(void)mutex_unlock(&ar_ptr->mutex);
assert(!victim || chunk_is_mmapped(mem2chunk(victim)) ||
ar_ptr == arena_for_chunk(mem2chunk(victim)));
+#ifndef __CHKP__
return victim;
+#else
+ return __bnd_narrow_ptr_bounds (victim, victim, bytes);
+#endif
}
libc_hidden_def(__libc_malloc)
if(newp) return chunk2mem(newp);
#endif
/* Note the extra SIZE_SZ overhead. */
- if(oldsize - SIZE_SZ >= nb) return oldmem; /* do nothing */
+ if(oldsize - SIZE_SZ >= nb)
+#ifndef __CHKP__
+ return oldmem; /* do nothing */
+#else
+ return __bnd_narrow_ptr_bounds(oldmem, oldmem, bytes); /* do nothing */
+#endif
/* Must alloc, copy, free. */
newmem = __libc_malloc(bytes);
if (newmem == 0) return 0; /* propagate failure */
_int_free(ar_ptr, oldp, 0);
}
}
-
+#ifndef __CHKP__
return newp;
+#else
+ return __bnd_narrow_ptr_bounds(newp, newp, bytes);
+#endif
}
libc_hidden_def (__libc_realloc)
(void)mutex_unlock(&ar_ptr->mutex);
assert(!p || chunk_is_mmapped(mem2chunk(p)) ||
ar_ptr == arena_for_chunk(mem2chunk(p)));
+#ifndef __CHKP__
return p;
+#else
+ return __bnd_narrow_ptr_bounds(p, p, bytes);
+#endif
}
/* For ISO C11. */
weak_alias (__libc_memalign, aligned_alloc)
(void)mutex_unlock (&ar_ptr->mutex);
assert(!p || chunk_is_mmapped(mem2chunk(p)) ||
ar_ptr == arena_for_chunk(mem2chunk(p)));
-
+#ifndef __CHKP__
return p;
+#else
+ return __bnd_narrow_ptr_bounds(p, p, bytes);
+#endif
}
void*
assert(!p || chunk_is_mmapped(mem2chunk(p)) ||
ar_ptr == arena_for_chunk(mem2chunk(p)));
+#ifndef __CHKP__
return p;
+#else
+ return __bnd_narrow_ptr_bounds(p, p, bytes);
+#endif
}
void*
mem = (*hook)(sz, RETURN_ADDRESS (0));
if(mem == 0)
return 0;
+#ifdef __CHKP__
+ mem = __bnd_narrow_ptr_bounds(mem, mem, sz);
+#endif
return memset(mem, 0, sz);
}
need to clear. */
#if MORECORE_CLEARS
oldtop = top(av);
- oldtopsize = chunksize(top(av));
+ if (oldtop == initial_top(av))
+ {
+ oldtopsize = 0;
+ } else {
+ oldtopsize = chunksize(top(av));
+ }
#if MORECORE_CLEARS < 2
/* Only newly allocated memory is guaranteed to be cleared. */
if (av == &main_arena &&
/* Two optional cases in which clearing not necessary */
if (chunk_is_mmapped (p))
{
+#ifdef __CHKP__
+ mem = __bnd_narrow_ptr_bounds(mem, mem, sz);
+#endif
if (__builtin_expect (perturb_byte, 0))
MALLOC_ZERO (mem, sz);
return mem;
}
}
}
-
+#ifndef __CHKP__
return mem;
+#else
+ return __bnd_narrow_ptr_bounds(mem, mem, sz);
+#endif
}
/*
*/
victim = av->top;
- size = chunksize(victim);
+ if (victim == initial_top(av)) {
+ size = 0;
+ } else {
+ size = chunksize(victim);
+ }
if ((unsigned long)(size) >= (unsigned long)(nb + MINSIZE)) {
remainder_size = size - nb;
p = atomic_exchange_acq (fb, 0);
if (p != 0) {
do {
+#ifdef __CHKP__
+ p = __bnd_set_ptr_bounds(p, sizeof (struct malloc_chunk));
+#endif
check_inuse_chunk(av, p);
nextp = p->fd;
/* For mmapped chunks, just adjust offset */
if (chunk_is_mmapped(p)) {
- newp->prev_size = p->prev_size + leadsize;
set_head(newp, newsize|IS_MMAPPED);
+ newp->prev_size = p->prev_size + leadsize;
return chunk2mem(newp);
}
p = newp;
assert (newsize >= nb &&
- (((unsigned long)(chunk2mem(p))) % alignment) == 0);
+ (((unsigned long)((char *)p + 2 * SIZE_SZ) % alignment) == 0));
}
/* Also give back spare room at the end */
+ sizeof (struct malloc_chunk)
+ psm1) & ~psm1);
- assert ((char *) chunk2mem (p) + 4 * SIZE_SZ <= paligned_mem);
+ assert ((char *) (p) + 6 * SIZE_SZ <= paligned_mem);
assert ((char *) p + size > paligned_mem);
/* This is the size we could potentially free. */
}
weak_alias (__posix_memalign, posix_memalign)
-
int
malloc_info (int options, FILE *fp)
{
return 0;
}
-
strong_alias (__libc_calloc, __calloc) weak_alias (__libc_calloc, calloc)
strong_alias (__libc_free, __cfree) weak_alias (__libc_free, cfree)
strong_alias (__libc_free, __free) strong_alias (__libc_free, free)