]> git.ipfire.org Git - thirdparty/haproxy.git/commitdiff
MINOR: implement ha_aligned_alloc() to return aligned memory areas
authorWilly Tarreau <w@1wt.eu>
Thu, 31 Jul 2025 13:26:58 +0000 (15:26 +0200)
committerWilly Tarreau <w@1wt.eu>
Wed, 6 Aug 2025 17:19:27 +0000 (19:19 +0200)
We have two versions, _safe() which verifies and adjusts alignment,
and the regular one which trusts the caller. There's also a dedicated
ha_aligned_free() due to mingw.

The currently detected OSes are mingw, unixes older than POSIX 200112
which require memalign(), and those post 200112 which will use
posix_memalign(). Solaris 10 reports 200112 (probably through
_GNU_SOURCE since it does not do it by default), and Solaris 11 still
supports memalign() so for all Solaris we use memalign(). The memstats
wrappers are also implemented, and have the exported names. This was
the opportunity for providing a separate free call that lets the caller
specify the size (e.g. for use with pools).

For now this code is not used.

include/haproxy/bug.h
include/haproxy/tools.h

index c43e0cdea3fe9798307eee71e2be066671bfd98e..5bbe47a51177db73526d27274b0b548047d7703e 100644 (file)
@@ -620,9 +620,92 @@ struct mem_stats {
        _HA_ATOMIC_ADD(&_.size, __y);                                   \
        strdup(__x);                                                    \
 })
+
+#undef ha_aligned_alloc
+#define ha_aligned_alloc(a,s)  ({                                      \
+       size_t __a = (a);                                               \
+       size_t __s = (s);                                               \
+       static struct mem_stats _ __attribute__((used,__section__("mem_stats"),__aligned__(sizeof(void*)))) = { \
+               .caller = {                                             \
+                       .file = __FILE__, .line = __LINE__,             \
+                       .what = MEM_STATS_TYPE_MALLOC,                  \
+                       .func = __func__,                               \
+               },                                                      \
+       };                                                              \
+       HA_WEAK(__start_mem_stats);                                     \
+       HA_WEAK(__stop_mem_stats);                                      \
+       _HA_ATOMIC_INC(&_.calls);                                       \
+       _HA_ATOMIC_ADD(&_.size, __s);                                   \
+       _ha_aligned_alloc(__a, __s);                                    \
+})
+
+#undef ha_aligned_alloc_safe
+#define ha_aligned_alloc_safe(a,s)  ({                                 \
+       size_t __a = (a);                                               \
+       size_t __s = (s);                                               \
+       static struct mem_stats _ __attribute__((used,__section__("mem_stats"),__aligned__(sizeof(void*)))) = { \
+               .caller = {                                             \
+                       .file = __FILE__, .line = __LINE__,             \
+                       .what = MEM_STATS_TYPE_MALLOC,                  \
+                       .func = __func__,                               \
+               },                                                      \
+       };                                                              \
+       HA_WEAK(__start_mem_stats);                                     \
+       HA_WEAK(__stop_mem_stats);                                      \
+       _HA_ATOMIC_INC(&_.calls);                                       \
+       _HA_ATOMIC_ADD(&_.size, __s);                                   \
+       _ha_aligned_alloc_safe(__a, __s);                               \
+})
+
+#undef ha_aligned_free
+#define ha_aligned_free(x)  ({                                                 \
+       typeof(x) __x = (x);                                            \
+       static struct mem_stats _ __attribute__((used,__section__("mem_stats"),__aligned__(sizeof(void*)))) = { \
+               .caller = {                                             \
+                       .file = __FILE__, .line = __LINE__,             \
+                       .what = MEM_STATS_TYPE_FREE,                    \
+                       .func = __func__,                               \
+               },                                                      \
+       };                                                              \
+       HA_WEAK(__start_mem_stats);                                     \
+       HA_WEAK(__stop_mem_stats);                                      \
+       if (__builtin_constant_p((x))) {  \
+               HA_LINK_ERROR(call_to_ha_aligned_free_attempts_to_free_a_constant); \
+       }                                                               \
+       if (__x)                                                        \
+               _HA_ATOMIC_INC(&_.calls);                               \
+       _ha_aligned_free(__x);                                          \
+})
+
+#undef ha_aligned_free_size
+#define ha_aligned_free_size(p,s)  ({                                  \
+       void *__p = (p); size_t __s = (s);                              \
+       static struct mem_stats _ __attribute__((used,__section__("mem_stats"),__aligned__(sizeof(void*)))) = { \
+               .caller = {                                             \
+                       .file = __FILE__, .line = __LINE__,             \
+                       .what = MEM_STATS_TYPE_FREE,                    \
+                       .func = __func__,                               \
+               },                                                      \
+       };                                                              \
+       HA_WEAK(__start_mem_stats);                                     \
+       HA_WEAK(__stop_mem_stats);                                      \
+       if (__builtin_constant_p((p))) {  \
+               HA_LINK_ERROR(call_to_ha_aligned_free_attempts_to_free_a_constant); \
+       }                                                               \
+       if (__p) {                                                      \
+               _HA_ATOMIC_INC(&_.calls);                               \
+               _HA_ATOMIC_ADD(&_.size, __s);                           \
+       }                                                               \
+       _ha_aligned_free(__p);                                          \
+})
+
 #else // DEBUG_MEM_STATS
 
 #define will_free(x, y) do { } while (0)
+#define ha_aligned_alloc(a,s) _ha_aligned_alloc(a, s)
+#define ha_aligned_alloc_safe(a,s) _ha_aligned_alloc_safe(a, s)
+#define ha_aligned_free(p) _ha_aligned_free(p)
+#define ha_aligned_free_size(p,s) _ha_aligned_free(p)
 
 #endif /* DEBUG_MEM_STATS*/
 
index abb8f736df52115c3a6a7a6d16c2df93758c3860..a8bab3e65dda063496d7f96b9dbad620e08f3539 100644 (file)
@@ -1178,6 +1178,80 @@ static inline void *my_realloc2(void *ptr, size_t size)
        return ret;
 }
 
+/* portable memalign(): tries to accommodate OS specificities, and may fall
+ * back to plain malloc() if not supported, meaning that alignment guarantees
+ * are only a performance bonus but not granted. The caller is responsible for
+ * guaranteeing that the requested alignment is at least sizeof(void*) and a
+ * power of two. If uncertain, use ha_aligned_alloc() instead. The pointer
+ * needs to be passed to ha_aligned_free() for freeing (due to cygwin). Please
+ * use ha_aligned_alloc() instead (which does perform accounting).
+ */
+static inline void *_ha_aligned_alloc(size_t alignment, size_t size)
+{
+       /* let's consider that most OSes have posix_memalign() and make the
+        * exception for the other ones. This way if an OS fails to build,
+        * we'll know about it and handle it as a new exception instead of
+        * relying on old fallbacks that may break (e.g. most BSDs have
+        * dropped memalign()).
+        */
+
+#if defined(_WIN32)
+       /* MINGW (Cygwin) uses _aligned_malloc() */
+       return _aligned_malloc(size, alignment);
+#elif _POSIX_VERSION < 200112L || defined(__sun)
+       /* Old OSes or Solaris */
+       return memalign(alignment, size);
+#else
+       void *ret;
+
+       /* most BSD, Linux since glibc 2.2, Solaris 11 */
+       if (posix_memalign(&ret, alignment, size) == 0)
+               return ret;
+       else
+               return NULL;
+#endif
+}
+
+/* portable memalign(): tries to accommodate OS specificities, and may fall
+ * back to plain malloc() if not supported, meaning that alignment guarantees
+ * are only a performance bonus but not granted. The size will automatically be
+ * rounded up to the next power of two and set to a minimum of sizeof(void*).
+ * The checks are cheap and generally optimized away by the compiler since most
+ * input arguments are build time constants. The pointer needs to be passed to
+ * ha_aligned_free() for freeing (due to cygwin). Please use
+ * ha_aligned_alloc_safe() instead (which does perform accounting).
+ */
+static inline void *_ha_aligned_alloc_safe(size_t alignment, size_t size)
+{
+       if (unlikely(alignment < sizeof(void*)))
+               alignment = sizeof(void*);
+       else if (unlikely(alignment & (alignment - 1))) {
+               /* not power of two! round up to next power of two by filling
+                * all LSB in O(log(log(N))) then increment the result.
+                */
+               int shift = 1;
+               do {
+                       alignment |= alignment >> shift;
+                       shift *= 2;
+               } while (unlikely(alignment & (alignment + 1)));
+               alignment++;
+       }
+       return _ha_aligned_alloc(alignment, size);
+}
+
+/* To be used to free a pointer returned by _ha_aligned_alloc() or
+ * _ha_aligned_alloc_safe(). Please use ha_aligned_free() instead
+ * (which does perform accounting).
+ */
+static inline void _ha_aligned_free(void *ptr)
+{
+#if defined(_WIN32)
+       return _aligned_free(ptr);
+#else
+       free(ptr);
+#endif
+}
+
 int parse_dotted_uints(const char *s, unsigned int **nums, size_t *sz);
 
 /* PRNG */