]> git.ipfire.org Git - thirdparty/haproxy.git/commitdiff
MINOR: tools: implement ha_aligned_zalloc()
authorWilly Tarreau <w@1wt.eu>
Mon, 11 Aug 2025 16:46:28 +0000 (18:46 +0200)
committerWilly Tarreau <w@1wt.eu>
Mon, 11 Aug 2025 17:55:30 +0000 (19:55 +0200)
This one is exactly ha_aligned_alloc() followed by a memset(0), as
it will be convenient for a number of call places as a replacement
for calloc().

Note that ideally we should also have a calloc version that performs
basic multiply overflow checks, but these are essentially used with
numbers of threads times small structs so that's fine, and we already
do the same everywhere in malloc() calls.

include/haproxy/bug.h
include/haproxy/tools.h

index 5bbe47a51177db73526d27274b0b548047d7703e..e294e377ea609449a40e1a4d138c5b177f7e8493 100644 (file)
@@ -639,6 +639,24 @@ struct mem_stats {
        _ha_aligned_alloc(__a, __s);                                    \
 })
 
+#undef ha_aligned_zalloc
+#define ha_aligned_zalloc(a,s)  ({                                     \
+       size_t __a = (a);                                               \
+       size_t __s = (s);                                               \
+       static struct mem_stats _ __attribute__((used,__section__("mem_stats"),__aligned__(sizeof(void*)))) = { \
+               .caller = {                                             \
+                       .file = __FILE__, .line = __LINE__,             \
+                       .what = MEM_STATS_TYPE_MALLOC,                  \
+                       .func = __func__,                               \
+               },                                                      \
+       };                                                              \
+       HA_WEAK(__start_mem_stats);                                     \
+       HA_WEAK(__stop_mem_stats);                                      \
+       _HA_ATOMIC_INC(&_.calls);                                       \
+       _HA_ATOMIC_ADD(&_.size, __s);                                   \
+       _ha_aligned_zalloc(__a, __s);                                   \
+})
+
 #undef ha_aligned_alloc_safe
 #define ha_aligned_alloc_safe(a,s)  ({                                 \
        size_t __a = (a);                                               \
@@ -657,6 +675,24 @@ struct mem_stats {
        _ha_aligned_alloc_safe(__a, __s);                               \
 })
 
+#undef ha_aligned_zalloc_safe
+#define ha_aligned_zalloc_safe(a,s)  ({                                        \
+       size_t __a = (a);                                               \
+       size_t __s = (s);                                               \
+       static struct mem_stats _ __attribute__((used,__section__("mem_stats"),__aligned__(sizeof(void*)))) = { \
+               .caller = {                                             \
+                       .file = __FILE__, .line = __LINE__,             \
+                       .what = MEM_STATS_TYPE_MALLOC,                  \
+                       .func = __func__,                               \
+               },                                                      \
+       };                                                              \
+       HA_WEAK(__start_mem_stats);                                     \
+       HA_WEAK(__stop_mem_stats);                                      \
+       _HA_ATOMIC_INC(&_.calls);                                       \
+       _HA_ATOMIC_ADD(&_.size, __s);                                   \
+       _ha_aligned_zalloc_safe(__a, __s);                              \
+})
+
 #undef ha_aligned_free
 #define ha_aligned_free(x)  ({                                                 \
        typeof(x) __x = (x);                                            \
@@ -703,7 +739,9 @@ struct mem_stats {
 
 #define will_free(x, y) do { } while (0)
 #define ha_aligned_alloc(a,s) _ha_aligned_alloc(a, s)
+#define ha_aligned_zalloc(a,s) _ha_aligned_zalloc(a, s)
 #define ha_aligned_alloc_safe(a,s) _ha_aligned_alloc_safe(a, s)
+#define ha_aligned_zalloc_safe(a,s) _ha_aligned_zalloc_safe(a, s)
 #define ha_aligned_free(p) _ha_aligned_free(p)
 #define ha_aligned_free_size(p,s) _ha_aligned_free(p)
 
index a8bab3e65dda063496d7f96b9dbad620e08f3539..160d5f3ac27adc1fd23d4462e7e937526e23a48e 100644 (file)
@@ -1212,6 +1212,16 @@ static inline void *_ha_aligned_alloc(size_t alignment, size_t size)
 #endif
 }
 
+/* Like above but zeroing the area */
+static inline void *_ha_aligned_zalloc(size_t alignment, size_t size)
+{
+       void *ret = _ha_aligned_alloc(alignment, size);
+
+       if (ret)
+               memset(ret, 0, size);
+       return ret;
+}
+
 /* portable memalign(): tries to accommodate OS specificities, and may fall
  * back to plain malloc() if not supported, meaning that alignment guarantees
  * are only a performance bonus but not granted. The size will automatically be
@@ -1239,6 +1249,16 @@ static inline void *_ha_aligned_alloc_safe(size_t alignment, size_t size)
        return _ha_aligned_alloc(alignment, size);
 }
 
+/* Like above but zeroing the area */
+static inline void *_ha_aligned_zalloc_safe(size_t alignment, size_t size)
+{
+       void *ret = _ha_aligned_alloc_safe(alignment, size);
+
+       if (ret)
+               memset(ret, 0, size);
+       return ret;
+}
+
 /* To be used to free a pointer returned by _ha_aligned_alloc() or
  * _ha_aligned_alloc_safe(). Please use ha_aligned_free() instead
  * (which does perform accounting).