if (ar_ptr == &main_arena)
break;
}
+ cap_fork_lock ();
}
void
if (!__malloc_initialized)
return;
+ cap_fork_unlock_parent ();
+
for (mstate ar_ptr = &main_arena;; )
{
__libc_lock_unlock (ar_ptr->mutex);
if (!__malloc_initialized)
return;
+ cap_fork_unlock_child ();
+
/* Push all arenas to the free list, except thread_arena, which is
attached to the current thread. */
__libc_lock_init (free_list_lock);
tcache_key_initialize ();
#endif
+ cap_init ();
+
#ifdef USE_MTAG
if ((TUNABLE_GET_FULL (glibc, mem, tagging, int32_t, NULL) & 1) != 0)
{
else
aligned_heap_area = p2 + max_size;
__munmap (p2 + max_size, max_size - ul);
+#ifdef __CHERI_PURE_CAPABILITY__
+ p2 = __builtin_cheri_bounds_set_exact (p2, max_size);
+#endif
}
else
{
return 0;
}
+ if (!cap_map_add (p2))
+ {
+ __munmap (p2, max_size);
+ return 0;
+ }
+
madvise_thp (p2, size);
h = (heap_info *) p2;
LIBC_PROBE (memory_heap_free, 2, heap, heap->size);
if ((char *) heap + max_size == aligned_heap_area)
aligned_heap_area = NULL;
+ cap_map_del (heap);
__munmap (heap, max_size);
heap = prev_heap;
if (!prev_inuse (p)) /* consolidate backward */
# define cap_narrowing_enabled 0
#endif
+static __always_inline void
+cap_init (void)
+{
+ if (cap_narrowing_enabled)
+ assert (__libc_cap_init ());
+}
+
+static __always_inline void
+cap_fork_lock (void)
+{
+ if (cap_narrowing_enabled)
+ __libc_cap_fork_lock ();
+}
+
+static __always_inline void
+cap_fork_unlock_parent (void)
+{
+ if (cap_narrowing_enabled)
+ __libc_cap_fork_unlock_parent ();
+}
+
+static __always_inline void
+cap_fork_unlock_child (void)
+{
+ if (cap_narrowing_enabled)
+ __libc_cap_fork_unlock_child ();
+}
+
+static __always_inline bool
+cap_map_add (void *p)
+{
+ if (cap_narrowing_enabled)
+ return __libc_cap_map_add (p);
+ return true;
+}
+
+static __always_inline void
+cap_map_del (void *p)
+{
+ if (cap_narrowing_enabled)
+ __libc_cap_map_del (p);
+}
+
/* Round up size so capability bounds can be represented. */
static __always_inline size_t
cap_roundup (size_t n)
return 1;
}
-/* Narrow the bounds of p to [p, p+n) exactly unless p is NULL. */
+/* Narrow the bounds of p to [p, p+n) exactly unless p is NULL.
+ Must match a previous cap_reserve call. */
static __always_inline void *
cap_narrow (void *p, size_t n)
{
- if (cap_narrowing_enabled && p != NULL)
- return __libc_cap_narrow (p, n);
+ if (cap_narrowing_enabled)
+ {
+ if (p == NULL)
+ __libc_cap_unreserve ();
+ else
+ p = __libc_cap_narrow (p, n);
+ }
+ return p;
+}
+
+/* Used in realloc if p is already narrowed or NULL.
+ Must match a previous cap_reserve call. */
+static __always_inline bool
+cap_narrow_check (void *p, void *oldp)
+{
+ if (cap_narrowing_enabled)
+ {
+ if (p == NULL)
+ (void) __libc_cap_narrow (oldp, 0);
+ else
+ __libc_cap_unreserve ();
+ }
+ return p != NULL;
+}
+
+/* Used in realloc if p is new allocation or NULL but not yet narrowed.
+ Must match a previous cap_reserve call. */
+static __always_inline void *
+cap_narrow_try (void *p, size_t n, void *oldp)
+{
+ if (cap_narrowing_enabled)
+ {
+ if (p == NULL)
+ (void) __libc_cap_narrow (oldp, 0);
+ else
+ p = __libc_cap_narrow (p, n);
+ }
return p;
}
return p;
}
+/* Reserve memory for the following cap_narrow, this may fail with ENOMEM. */
+static __always_inline bool
+cap_reserve (void)
+{
+ if (cap_narrowing_enabled)
+ return __libc_cap_reserve ();
+ return true;
+}
+
+/* Release the reserved memory by cap_reserve. */
+static __always_inline void
+cap_unreserve (void)
+{
+ if (cap_narrowing_enabled)
+ __libc_cap_unreserve ();
+}
+
+/* Remove p so cap_widen no longer works on it. */
+static __always_inline void
+cap_drop (void *p)
+{
+ if (cap_narrowing_enabled)
+ __libc_cap_drop (p);
+}
+
#include <string.h>
/*
if (mm == MAP_FAILED)
return mm;
+ if (!cap_map_add (mm))
+ {
+ __munmap (mm, size);
+ return MAP_FAILED;
+ }
+
#ifdef MAP_HUGETLB
if (!(extra_flags & MAP_HUGETLB))
madvise_thp (mm, size);
if (mbrk == MAP_FAILED)
return MAP_FAILED;
+ if (!cap_map_add (mbrk))
+ {
+ __munmap (mbrk, size);
+ return MAP_FAILED;
+ }
+
#ifdef MAP_HUGETLB
if (!(extra_flags & MAP_HUGETLB))
madvise_thp (mbrk, size);
atomic_decrement (&mp_.n_mmaps);
atomic_add (&mp_.mmapped_mem, -total_size);
+ cap_map_del ((void *) block);
+
/* If munmap failed the process virtual memory address space is in a
bad shape. Just leave the block hanging around, the process will
terminate shortly anyway since not much can be done. */
if (cp == MAP_FAILED)
return 0;
+ cap_map_del ((void *) block);
+ cap_map_add (cp);
+
madvise_thp (cp, new_size);
p = (mchunkptr) (cp + offset);
&& tcache
&& tcache->counts[tc_idx] > 0)
{
+ if (!cap_reserve ())
+ return NULL;
victim = tcache_get (tc_idx);
victim = tag_new_usable (victim);
victim = cap_narrow (victim, bytes);
if (align > MALLOC_ALIGNMENT)
return _mid_memalign (align, bytes, 0);
+ if (!cap_reserve ())
+ return NULL;
+
if (SINGLE_THREAD_P)
{
victim = tag_new_usable (_int_malloc (&main_arena, bytes));
return;
mem = cap_widen (mem);
+ cap_drop (mem);
/* Quickly check that the freed pointer matches the tag for the memory.
This gives a useful double-free detection. */
return NULL;
}
+ /* Every return path below should unreserve using the cap_narrow* apis. */
+ if (!cap_reserve ())
+ return NULL;
+ cap_drop (oldmem);
+
if (chunk_is_mmapped (oldp))
{
void *newmem;
caller for doing this, so we might want to
reconsider. */
newmem = tag_new_usable (newmem);
- newmem = cap_narrow (newmem, bytes);
+ newmem = cap_narrow_try (newmem, bytes, oldmem);
return newmem;
}
#endif
else
#endif
newmem = __libc_malloc (bytes);
- if (newmem == 0)
+ if (!cap_narrow_check (newmem, oldmem))
return 0; /* propagate failure */
#ifdef __CHERI_PURE_CAPABILITY__
{
/* Use memalign, copy, free. */
void *newmem = _mid_memalign (align, bytes, 0);
- if (newmem == NULL)
+ if (!cap_narrow_check (newmem, oldmem))
return newmem;
size_t sz = oldsize - CHUNK_HDR_SZ;
memcpy (newmem, oldmem, sz < bytes ? sz : bytes);
newp = _int_realloc (ar_ptr, oldp, oldsize, nb);
assert (!newp || chunk_is_mmapped (mem2chunk (newp)) ||
ar_ptr == arena_for_chunk (mem2chunk (newp)));
-
- return cap_narrow (newp, bytes);
+ return cap_narrow_try (newp, bytes, oldmem);
}
__libc_lock_lock (ar_ptr->mutex);
/* Try harder to allocate memory in other arenas. */
LIBC_PROBE (memory_realloc_retry, 2, bytes, oldmem);
newp = __libc_malloc (bytes);
- if (newp != NULL)
- {
- size_t sz = memsize (oldp);
- memcpy (newp, oldmem, sz);
- (void) tag_region (chunk2mem (oldp), sz);
- _int_free (ar_ptr, oldp, 0);
- }
+ if (!cap_narrow_check (newp, oldmem))
+ return NULL;
+ size_t sz = memsize (oldp);
+ memcpy (newp, oldmem, sz);
+ (void) tag_region (chunk2mem (oldp), sz);
+ _int_free (ar_ptr, oldp, 0);
}
else
newp = cap_narrow (newp, bytes);
return 0;
}
+ if (!cap_reserve ())
+ return NULL;
/* Make sure alignment is power of 2. */
if (!powerof2 (alignment))
MAYBE_INIT_TCACHE ();
+ if (!cap_reserve ())
+ return NULL;
+
if (SINGLE_THREAD_P)
av = &main_arena;
else
}
/* Allocation failed even after a retry. */
+ if (mem == 0)
+ cap_unreserve ();
if (mem == 0)
return 0;
#ifndef _AARCH64_MORELLO_LIBC_CAP_H
#define _AARCH64_MORELLO_LIBC_CAP_H 1
+#include <stdint.h>
+#include <sys/mman.h>
+#include <libc-lock.h>
+
+/* Hash table for __libc_cap_widen. */
+
+#define HT_MIN_LEN (65536 / sizeof (struct htentry))
+#define HT_MAX_LEN (1UL << 58)
+
+struct htentry
+{
+ uint64_t key;
+ uint64_t unused;
+ void *value;
+};
+
+struct ht
+{
+ __libc_lock_define(,mutex);
+ size_t mask; /* Length - 1, note: length is powerof2. */
+ size_t fill; /* Used + deleted entries. */
+ size_t used;
+ size_t reserve; /* Planned adds. */
+ struct htentry *tab;
+};
+
+static inline bool
+htentry_isempty (struct htentry *e)
+{
+ return e->key == 0;
+}
+
+static inline bool
+htentry_isdeleted (struct htentry *e)
+{
+ return e->key == -1;
+}
+
+static inline bool
+htentry_isused (struct htentry *e)
+{
+ return e->key != 0 && e->key != -1;
+}
+
+static inline uint64_t
+ht_key_hash (uint64_t key)
+{
+ return (key >> 4) ^ (key >> 18);
+}
+
+static struct htentry *
+ht_tab_alloc (size_t n)
+{
+ size_t size = n * sizeof (struct htentry);
+ assert (size && (size & 65535) == 0);
+ void *p = __mmap (0, size, PROT_READ|PROT_WRITE,
+ MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
+ if (p == MAP_FAILED)
+ return NULL;
+ return p;
+}
+
+static void
+ht_tab_free (struct htentry *tab, size_t n)
+{
+ int r = __munmap (tab, n * sizeof (struct htentry));
+ assert (r == 0);
+}
+
+static bool
+ht_init (struct ht *ht)
+{
+ __libc_lock_init (ht->mutex);
+ ht->mask = HT_MIN_LEN - 1;
+ ht->fill = 0;
+ ht->used = 0;
+ ht->reserve = 0;
+ ht->tab = ht_tab_alloc (ht->mask + 1);
+ return ht->tab != NULL;
+}
+
+static struct htentry *
+ht_lookup (struct ht *ht, uint64_t key, uint64_t hash)
+{
+ size_t mask = ht->mask;
+ size_t i = hash;
+ size_t j;
+ struct htentry *e = ht->tab + (i & mask);
+ struct htentry *del;
+
+ if (e->key == key || htentry_isempty (e))
+ return e;
+ if (htentry_isdeleted (e))
+ del = e;
+ else
+ del = NULL;
+
+ /* Quadratic probing. */
+ for (j =1, i += j++; ; i += j++)
+ {
+ e = ht->tab + (i & mask);
+ if (e->key == key)
+ return e;
+ if (htentry_isempty (e))
+ return del != NULL ? del : e;
+ if (del == NULL && htentry_isdeleted (e))
+ del = e;
+ }
+}
+
+static bool
+ht_resize (struct ht *ht)
+{
+ size_t len;
+ size_t used = ht->used;
+ size_t n = ht->used + ht->reserve;
+ size_t oldlen = ht->mask + 1;
+
+ if (2 * n >= HT_MAX_LEN)
+ len = HT_MAX_LEN;
+ else
+ for (len = HT_MIN_LEN; len < 2 * n; len *= 2);
+ struct htentry *newtab = ht_tab_alloc (len);
+ struct htentry *oldtab = ht->tab;
+ struct htentry *e;
+ if (newtab == NULL)
+ return false;
+
+ ht->tab = newtab;
+ ht->mask = len - 1;
+ ht->fill = ht->used;
+ for (e = oldtab; used > 0; e++)
+ {
+ if (htentry_isused (e))
+ {
+ uint64_t hash = ht_key_hash (e->key);
+ used--;
+ *ht_lookup (ht, e->key, hash) = *e;
+ }
+ }
+ ht_tab_free (oldtab, oldlen);
+ return true;
+}
+
+static bool
+ht_reserve (struct ht *ht)
+{
+ bool r = true;
+ __libc_lock_lock (ht->mutex);
+ ht->reserve++;
+ size_t future_fill = ht->fill + ht->reserve;
+ size_t future_used = ht->used + ht->reserve;
+ /* Resize at 3/4 fill or if there are many deleted entries. */
+ if (future_fill > ht->mask - ht->mask / 4
+ || future_fill > future_used * 4)
+ r = ht_resize (ht);
+ if (!r)
+ ht->reserve--;
+ __libc_lock_unlock (ht->mutex);
+ return r;
+}
+
+static void
+ht_unreserve (struct ht *ht)
+{
+ __libc_lock_lock (ht->mutex);
+ assert (ht->reserve > 0);
+ ht->reserve--;
+ __libc_lock_unlock (ht->mutex);
+}
+
+static bool
+ht_add (struct ht *ht, uint64_t key, void *value)
+{
+ __libc_lock_lock (ht->mutex);
+ assert (ht->reserve > 0);
+ ht->reserve--;
+ uint64_t hash = ht_key_hash (key);
+ struct htentry *e = ht_lookup (ht, key, hash);
+ bool r = false;
+ if (!htentry_isused (e))
+ {
+ if (htentry_isempty (e))
+ ht->fill++;
+ ht->used++;
+ e->key = key;
+ r = true;
+ }
+ e->value = value;
+ __libc_lock_unlock (ht->mutex);
+ return r;
+}
+
+static bool
+ht_del (struct ht *ht, uint64_t key)
+{
+ __libc_lock_lock (ht->mutex);
+ struct htentry *e = ht_lookup (ht, key, ht_key_hash (key));
+ bool r = htentry_isused (e);
+ if (r)
+ {
+ ht->used--;
+ e->key = -1;
+ }
+ __libc_lock_unlock (ht->mutex);
+ return r;
+}
+
+static void *
+ht_get (struct ht *ht, uint64_t key)
+{
+ __libc_lock_lock (ht->mutex);
+ struct htentry *e = ht_lookup (ht, key, ht_key_hash (key));
+ void *v = htentry_isused (e) ? e->value : NULL;
+ __libc_lock_unlock (ht->mutex);
+ return v;
+}
+
+/* Capability narrowing APIs. */
+
+static struct ht __libc_cap_ht;
+
+static __always_inline bool
+__libc_cap_init (void)
+{
+ return ht_init (&__libc_cap_ht);
+}
+
+static __always_inline void
+__libc_cap_fork_lock (void)
+{
+ __libc_lock_lock (__libc_cap_ht.mutex);
+}
+
+static __always_inline void
+__libc_cap_fork_unlock_parent (void)
+{
+ __libc_lock_unlock (__libc_cap_ht.mutex);
+}
+
+static __always_inline void
+__libc_cap_fork_unlock_child (void)
+{
+ __libc_lock_init (__libc_cap_ht.mutex);
+}
+
+static __always_inline bool
+__libc_cap_map_add (void *p)
+{
+ assert (p != NULL);
+// TODO: depends on pcuabi
+// assert (__builtin_cheri_base_get (p) == (uint64_t) p);
+ return true;
+}
+
+static __always_inline void
+__libc_cap_map_del (void *p)
+{
+ assert (p != NULL);
+// assert (__builtin_cheri_base_get (p) == (uint64_t) p);
+}
+
/* No special alignment is needed for n <= __CAP_ALIGN_THRESHOLD
allocations, i.e. __libc_cap_align (n) <= MALLOC_ALIGNMENT. */
#define __CAP_ALIGN_THRESHOLD 32759
static __always_inline void *
__libc_cap_narrow (void *p, size_t n)
{
- return __builtin_cheri_bounds_set_exact (p, n);
+ assert (p != NULL);
+ uint64_t key = (uint64_t)(uintptr_t) p;
+ assert (ht_add (&__libc_cap_ht, key, p));
+ void *narrow = __builtin_cheri_bounds_set_exact (p, n);
+ return narrow;
}
/* Given a p with narrowed bound (output of __libc_cap_narrow) return
static __always_inline void *
__libc_cap_widen (void *p)
{
- void *cap = __builtin_cheri_global_data_get ();
- return __builtin_cheri_address_set (cap, p);
+ assert (__builtin_cheri_tag_get (p) && __builtin_cheri_offset_get (p) == 0);
+ uint64_t key = (uint64_t)(uintptr_t) p;
+ void *cap = ht_get (&__libc_cap_ht, key);
+ assert (cap == p);
+ return cap;
+}
+
+static __always_inline bool
+__libc_cap_reserve (void)
+{
+ return ht_reserve (&__libc_cap_ht);
+}
+
+static __always_inline void
+__libc_cap_unreserve (void)
+{
+ ht_unreserve (&__libc_cap_ht);
+}
+
+static __always_inline void
+__libc_cap_drop (void *p)
+{
+ assert (p != NULL);
+ uint64_t key = (uint64_t)(uintptr_t) p;
+ assert (ht_del (&__libc_cap_ht, key));
}
#endif
void __libc_cap_link_error (void);
#define __libc_cap_fail(rtype) (__libc_cap_link_error (), (rtype) 0)
+#define __libc_cap_init() __libc_cap_fail (bool)
+#define __libc_cap_fork_lock() __libc_cap_fail (void)
+#define __libc_cap_fork_unlock_parent() __libc_cap_fail (void)
+#define __libc_cap_fork_unlock_child() __libc_cap_fail (void)
+#define __libc_cap_map_add(p) __libc_cap_fail (bool)
+#define __libc_cap_map_del(p) __libc_cap_fail (void)
#define __libc_cap_roundup(n) __libc_cap_fail (size_t)
#define __libc_cap_align(n) __libc_cap_fail (size_t)
#define __libc_cap_narrow(p, n) __libc_cap_fail (void *)
#define __libc_cap_widen(p) __libc_cap_fail (void *)
+#define __libc_cap_reserve(p) __libc_cap_fail (bool)
+#define __libc_cap_unreserve(p) __libc_cap_fail (void)
+#define __libc_cap_drop(p) __libc_cap_fail (void)
#endif