#ifndef _COMMON_MEMORY_H
#define _COMMON_MEMORY_H
-#include <sys/mman.h>
-
-#include <stdlib.h>
#include <string.h>
-#include <unistd.h>
#include <haproxy/api.h>
#include <haproxy/freq_ctr.h>
#include <haproxy/list.h>
+#include <haproxy/pool-os.h>
#include <haproxy/pool-t.h>
#include <haproxy/thread.h>
_HA_ATOMIC_SUB(&pool->used, 1);
if (unlikely(pool_is_crowded(pool))) {
- free(ptr);
+ pool_free_area(ptr, pool->size + POOL_EXTRA);
_HA_ATOMIC_SUB(&pool->allocated, 1);
} else {
do {
return p;
}
-#ifndef DEBUG_UAF /* normal allocator */
-
-/* allocates an area of size <size> and returns it. The semantics are similar
- * to those of malloc().
- */
-static inline void *pool_alloc_area(size_t size)
-{
- return malloc(size);
-}
-
-/* frees an area <area> of size <size> allocated by pool_alloc_area(). The
- * semantics are identical to free() except that the size is specified and
- * may be ignored.
- */
-static inline void pool_free_area(void *area, size_t __maybe_unused size)
-{
- free(area);
-}
-
-#else /* use-after-free detector */
-
-/* allocates an area of size <size> and returns it. The semantics are similar
- * to those of malloc(). However the allocation is rounded up to 4kB so that a
- * full page is allocated. This ensures the object can be freed alone so that
- * future dereferences are easily detected. The returned object is always
- * 16-bytes aligned to avoid issues with unaligned structure objects. In case
- * some padding is added, the area's start address is copied at the end of the
- * padding to help detect underflows.
- */
-#include <errno.h>
-static inline void *pool_alloc_area(size_t size)
-{
- size_t pad = (4096 - size) & 0xFF0;
- int isolated;
- void *ret;
-
- isolated = thread_isolated();
- if (!isolated)
- thread_harmless_now();
- ret = mmap(NULL, (size + 4095) & -4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
- if (ret != MAP_FAILED) {
- /* let's dereference the page before returning so that the real
- * allocation in the system is performed without holding the lock.
- */
- *(int *)ret = 0;
- if (pad >= sizeof(void *))
- *(void **)(ret + pad - sizeof(void *)) = ret + pad;
- ret += pad;
- } else {
- ret = NULL;
- }
- if (!isolated)
- thread_harmless_end();
- return ret;
-}
-
-/* frees an area <area> of size <size> allocated by pool_alloc_area(). The
- * semantics are identical to free() except that the size must absolutely match
- * the one passed to pool_alloc_area(). In case some padding is added, the
- * area's start address is compared to the one at the end of the padding, and
- * a segfault is triggered if they don't match, indicating an underflow.
- */
-static inline void pool_free_area(void *area, size_t size)
-{
- size_t pad = (4096 - size) & 0xFF0;
-
- if (pad >= sizeof(void *) && *(void **)(area - sizeof(void *)) != area)
- *DISGUISE((volatile int *)0) = 0;
-
- thread_harmless_now();
- munmap(area - pad, (size + 4095) & -4096);
- thread_harmless_end();
-}
-
-#endif /* DEBUG_UAF */
-
/*
* Returns a pointer to type <type> taken from the pool <pool_type> or
* dynamically allocated. In the first case, <pool_type> is updated to point to
HA_SPIN_LOCK(POOL_LOCK, &pool->lock);
pool->used--;
if (pool_is_crowded(pool)) {
- free(ptr);
+ pool_free_area(ptr, pool->size + POOL_EXTRA);
pool->allocated--;
} else {
*POOL_LINK(pool, ptr) = (void *)pool->free_list;
--- /dev/null
+/*
+ * include/haproxy/pool-os.h
+ * OS-level interface for memory management
+ *
+ * Copyright (C) 2000-2020 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_POOL_OS_H
+#define _HAPROXY_POOL_OS_H
+
+#include <sys/mman.h>
+#include <stdlib.h>
+#include <haproxy/api.h>
+#include <haproxy/pool-t.h>
+#include <haproxy/thread.h>
+
+
+#ifndef DEBUG_UAF
+
+/************* normal allocator *************/
+
+/* allocates an area of size <size> and returns it. The semantics are similar
+ * to those of malloc().
+ */
+static inline void *pool_alloc_area(size_t size)
+{
+ return malloc(size);
+}
+
+/* frees an area <area> of size <size> allocated by pool_alloc_area(). The
+ * semantics are identical to free() except that the size is specified and
+ * may be ignored.
+ */
+static inline void pool_free_area(void *area, size_t __maybe_unused size)
+{
+ free(area);
+}
+
+#else
+
+/************* use-after-free allocator *************/
+
+/* allocates an area of size <size> and returns it. The semantics are similar
+ * to those of malloc(). However the allocation is rounded up to 4kB so that a
+ * full page is allocated. This ensures the object can be freed alone so that
+ * future dereferences are easily detected. The returned object is always
+ * 16-bytes aligned to avoid issues with unaligned structure objects. In case
+ * some padding is added, the area's start address is copied at the end of the
+ * padding to help detect underflows.
+ */
+static inline void *pool_alloc_area(size_t size)
+{
+ size_t pad = (4096 - size) & 0xFF0;
+ int isolated;
+ void *ret;
+
+ isolated = thread_isolated();
+ if (!isolated)
+ thread_harmless_now();
+ ret = mmap(NULL, (size + 4095) & -4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
+ if (ret != MAP_FAILED) {
+ /* let's dereference the page before returning so that the real
+ * allocation in the system is performed without holding the lock.
+ */
+ *(int *)ret = 0;
+ if (pad >= sizeof(void *))
+ *(void **)(ret + pad - sizeof(void *)) = ret + pad;
+ ret += pad;
+ } else {
+ ret = NULL;
+ }
+ if (!isolated)
+ thread_harmless_end();
+ return ret;
+}
+
+/* frees an area <area> of size <size> allocated by pool_alloc_area(). The
+ * semantics are identical to free() except that the size must absolutely match
+ * the one passed to pool_alloc_area(). In case some padding is added, the
+ * area's start address is compared to the one at the end of the padding, and
+ * a segfault is triggered if they don't match, indicating an underflow.
+ */
+static inline void pool_free_area(void *area, size_t size)
+{
+ size_t pad = (4096 - size) & 0xFF0;
+
+ if (pad >= sizeof(void *) && *(void **)(area - sizeof(void *)) != area)
+ ABORT_NOW();
+
+ thread_harmless_now();
+ munmap(area - pad, (size + 4095) & -4096);
+ thread_harmless_end();
+}
+
+#endif /* DEBUG_UAF */
+
+#endif /* _HAPROXY_POOL_OS_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
swrate_add_scaled(&pool->needed_avg, POOL_AVG_SAMPLES, pool->allocated, POOL_AVG_SAMPLES/4);
- ptr = malloc(size + POOL_EXTRA);
+ ptr = pool_alloc_area(size + POOL_EXTRA);
if (!ptr) {
_HA_ATOMIC_ADD(&pool->failed, 1);
if (failed) {
temp = next;
next = *POOL_LINK(pool, temp);
removed++;
- free(temp);
+ pool_free_area(temp, pool->size + POOL_EXTRA);
}
pool->free_list = next;
_HA_ATOMIC_SUB(&pool->allocated, removed);
new.seq = cmp.seq + 1;
if (HA_ATOMIC_DWCAS(&entry->free_list, &cmp, &new) == 0)
continue;
- free(cmp.free_list);
+ pool_free_area(cmp.free_list, entry->size + POOL_EXTRA);
_HA_ATOMIC_SUB(&entry->allocated, 1);
}
}