#define HA_HAVE_CAS_DW
#endif
+/*********************** IMPORTANT NOTE ABOUT ALIGNMENT **********************\
+ * Alignment works fine for variables. It also works on types and struct *
+ * members by propagating the alignment to the container struct itself, *
+ * but this requires that variables of the affected type are properly *
+ * aligned themselves. While regular variables will always abide, those *
+ * allocated using malloc() will not! Most platforms provide posix_memalign()*
+ * for this, but it's not available everywhere. As such one ought not to use *
+ * these alignment declarations inside structures that are dynamically *
+ * allocated. If the purpose is only to avoid false sharing of cache lines *
+ * for multi_threading, see THREAD_PAD() below. *
+\*****************************************************************************/
/* sets alignment for current field or variable */
#ifndef ALIGNED
#endif
#endif
+/* add optional padding of the specified size between fields in a structure,
+ * only when threads are enabled. This is used to avoid false sharing of cache
+ * lines for dynamically allocated structures which cannot guarantee alignment.
+ */
+#ifndef THREAD_PAD
+# ifdef USE_THREAD
+# define __THREAD_PAD(x,l) char __pad_##l[x]
+# define _THREAD_PAD(x,l) __THREAD_PAD(x, l)
+# define THREAD_PAD(x) _THREAD_PAD(x, __LINE__)
+# else
+# define THREAD_PAD(x)
+# endif
+#endif
+
/* The THREAD_LOCAL type attribute defines thread-local storage and is defined
* to __thread when threads are enabled or empty when disabled.
*/
/* The elements below may be changed on every single request by any
* thread, and generally at the same time.
*/
- ALWAYS_ALIGN(64);
+ THREAD_PAD(63);
struct eb32_node idle_node; /* When to next do cleanup in the idle connections */
unsigned int curr_idle_conns; /* Current number of orphan idling connections, both the idle and the safe lists */
unsigned int curr_idle_nb; /* Current number of connections in the idle list */
/* Element below are usd by LB algorithms and must be doable in
* parallel to other threads reusing connections above.
*/
- ALWAYS_ALIGN(64);
+ THREAD_PAD(63);
__decl_thread(HA_SPINLOCK_T lock); /* may enclose the proxy's lock, must not be taken under */
unsigned npos, lpos; /* next and last positions in the LB tree, protected by LB lock */
struct eb32_node lb_node; /* node used for tree-based load balancing */
struct server *next_full; /* next server in the temporary full list */
/* usually atomically updated by any thread during parsing or on end of request */
- ALWAYS_ALIGN(64);
+ THREAD_PAD(63);
int cur_sess; /* number of currently active sessions (including syn_sent) */
int served; /* # of active sessions currently being served (ie not pending) */
int consecutive_errors; /* current number of consecutive errors */
struct be_counters counters; /* statistics counters */
/* Below are some relatively stable settings, only changed under the lock */
- ALWAYS_ALIGN(64);
+ THREAD_PAD(63);
struct eb_root *lb_tree; /* we want to know in what tree the server is */
struct tree_occ *lb_nodes; /* lb_nodes_tot * struct tree_occ */