]> git.ipfire.org Git - thirdparty/haproxy.git/commitdiff
BUG/MEDIUM: Threads: Only use the gcc >= 4.7 builtins when using gcc >= 4.7.
authorOlivier Houchard <cognet@ci0.org>
Mon, 15 Apr 2019 19:14:25 +0000 (21:14 +0200)
committerOlivier Houchard <cognet@ci0.org>
Mon, 15 Apr 2019 19:16:24 +0000 (21:16 +0200)
Move the definition of the various _HA_ATOMIC_* macros that use
__atomic_* in the #if GCC_VERSION >= 4.7, not just after it, so that we
can build with older versions of gcc again.

include/common/hathreads.h

index a3b696439c9e877266924c1b4d40b91f72e864ae..7cbb3495b8b8170b4998bee285d16d54925849d0 100644 (file)
@@ -297,7 +297,23 @@ static inline unsigned long thread_isolated()
 
 #define HA_ATOMIC_XCHG(val, new)     __atomic_exchange_n(val, new, __ATOMIC_SEQ_CST)
 #define HA_ATOMIC_STORE(val, new)    __atomic_store_n(val, new, __ATOMIC_SEQ_CST)
-#endif
+
+/* Variants that don't generate any memory barrier.
+ * If you're unsure how to deal with barriers, just use the HA_ATOMIC_* version,
+ * that will always generate correct code.
+ * Usually it's fine to use those when updating data that have no dependency,
+ * ie updating a counter. Otherwise a barrier is required.
+ */
+#define _HA_ATOMIC_CAS(val, old, new) __atomic_compare_exchange_n(val, old, new, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED)
+#define _HA_ATOMIC_ADD(val, i)        __atomic_add_fetch(val, i, __ATOMIC_RELAXED)
+#define _HA_ATOMIC_XADD(val, i)       __atomic_fetch_add(val, i, __ATOMIC_RELAXED)
+#define _HA_ATOMIC_SUB(val, i)        __atomic_sub_fetch(val, i, __ATOMIC_RELAXED)
+#define _HA_ATOMIC_AND(val, flags)    __atomic_and_fetch(val, flags, __ATOMIC_RELAXED)
+#define _HA_ATOMIC_OR(val, flags)     __atomic_or_fetch(val,  flags, __ATOMIC_RELAXED)
+#define _HA_ATOMIC_XCHG(val, new)     __atomic_exchange_n(val, new, __ATOMIC_RELAXED)
+#define _HA_ATOMIC_STORE(val, new)    __atomic_store_n(val, new, __ATOMIC_RELAXED)
+
+#endif /* gcc >= 4.7 */
 
 #define HA_ATOMIC_UPDATE_MAX(val, new)                                 \
        ({                                                              \
@@ -320,20 +336,6 @@ static inline unsigned long thread_isolated()
 
 #define HA_BARRIER() pl_barrier()
 
-/* Variants that don't generate any memory barrier.
- * If you're unsure how to deal with barriers, just use the HA_ATOMIC_* version,
- * that will always generate correct code.
- * Usually it's fine to use those when updating data that have no dependency,
- * ie updating a counter. Otherwise a barrier is required.
- */
-#define _HA_ATOMIC_CAS(val, old, new) __atomic_compare_exchange_n(val, old, new, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED)
-#define _HA_ATOMIC_ADD(val, i)        __atomic_add_fetch(val, i, __ATOMIC_RELAXED)
-#define _HA_ATOMIC_XADD(val, i)       __atomic_fetch_add(val, i, __ATOMIC_RELAXED)
-#define _HA_ATOMIC_SUB(val, i)        __atomic_sub_fetch(val, i, __ATOMIC_RELAXED)
-#define _HA_ATOMIC_AND(val, flags)    __atomic_and_fetch(val, flags, __ATOMIC_RELAXED)
-#define _HA_ATOMIC_OR(val, flags)     __atomic_or_fetch(val,  flags, __ATOMIC_RELAXED)
-#define _HA_ATOMIC_XCHG(val, new)     __atomic_exchange_n(val, new, __ATOMIC_RELAXED)
-#define _HA_ATOMIC_STORE(val, new)    __atomic_store_n(val, new, __ATOMIC_RELAXED)
 void thread_harmless_till_end();
 void thread_isolate();
 void thread_release();