From: Willy Tarreau Date: Tue, 5 Aug 2025 16:12:18 +0000 (+0200) Subject: MEDIUM: pools: respect pool alignment in allocations X-Git-Tag: v3.3-dev6~1 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=ef915e672a59903dccbf39629e6cebbd074b7643;p=thirdparty%2Fhaproxy.git MEDIUM: pools: respect pool alignment in allocations Now pool_alloc_area() takes the alignment in argument and makes use of ha_aligned_malloc() instead of malloc(). pool_alloc_area_uaf() simply applies the alignment before returning the mapped area. The pool_free() functionn calls ha_aligned_free() so as to permit to use a specific API for aligned alloc/free like mingw requires. Note that it's possible to see warnings about mismatching sized during pool_free() since we know both the pool and the type. In pool_free, adding just this is sufficient to detect potential offenders: WARN_ON(__alignof__(*__ptr) > pool->align); --- diff --git a/include/haproxy/pool-os.h b/include/haproxy/pool-os.h index cf29c5888..db938b069 100644 --- a/include/haproxy/pool-os.h +++ b/include/haproxy/pool-os.h @@ -25,6 +25,7 @@ #include #include #include +#include /************* normal allocator *************/ @@ -32,9 +33,9 @@ /* allocates an area of size and returns it. The semantics are similar * to those of malloc(). */ -static forceinline void *pool_alloc_area(size_t size) +static forceinline void *pool_alloc_area(size_t size, size_t align) { - return malloc(size); + return ha_aligned_alloc(align, size); } /* frees an area of size allocated by pool_alloc_area(). The @@ -43,8 +44,7 @@ static forceinline void *pool_alloc_area(size_t size) */ static forceinline void pool_free_area(void *area, size_t __maybe_unused size) { - will_free(area, size); - free(area); + ha_aligned_free_size(area, size); } /************* use-after-free allocator *************/ @@ -52,14 +52,15 @@ static forceinline void pool_free_area(void *area, size_t __maybe_unused size) /* allocates an area of size and returns it. The semantics are similar * to those of malloc(). However the allocation is rounded up to 4kB so that a * full page is allocated. This ensures the object can be freed alone so that - * future dereferences are easily detected. The returned object is always - * 16-bytes aligned to avoid issues with unaligned structure objects. In case - * some padding is added, the area's start address is copied at the end of the - * padding to help detect underflows. + * future dereferences are easily detected. The returned object is always at + * least 16-bytes aligned to avoid issues with unaligned structure objects, and + * in any case, is always at least aligned as required by the pool, though no + * more than 4096. In case some padding is added, the area's start address is + * copied at the end of the padding to help detect underflows. */ -static inline void *pool_alloc_area_uaf(size_t size) +static inline void *pool_alloc_area_uaf(size_t size, size_t align) { - size_t pad = (4096 - size) & 0xFF0; + size_t pad = (4096 - size) & 0xFF0 & -align; void *ret; ret = mmap(NULL, (size + 4095) & -4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); diff --git a/src/pool.c b/src/pool.c index 5fa5e5530..d7f68e69b 100644 --- a/src/pool.c +++ b/src/pool.c @@ -490,9 +490,9 @@ void *pool_get_from_os_noinc(struct pool_head *pool) void *ptr; if ((pool_debugging & POOL_DBG_UAF) || (pool->flags & MEM_F_UAF)) - ptr = pool_alloc_area_uaf(pool->alloc_sz); + ptr = pool_alloc_area_uaf(pool->alloc_sz, pool->align); else - ptr = pool_alloc_area(pool->alloc_sz); + ptr = pool_alloc_area(pool->alloc_sz, pool->align); if (ptr) return ptr; _HA_ATOMIC_INC(&pool->buckets[pool_tbucket()].failed);