struct ring_v2a {
size_t size; // storage size
size_t rsvd; // header length (used for file-backed maps)
- size_t tail __attribute__((aligned(64))); // storage tail
- size_t head __attribute__((aligned(64))); // storage head
- char area[0] __attribute__((aligned(64))); // storage area begins immediately here
+ size_t tail ALIGNED(64); // storage tail
+ size_t head ALIGNED(64); // storage head
+ char area[0] ALIGNED(64); // storage area begins immediately here
};
/* display the message and exit with the code */
size_t size;
struct ha_caller caller;
const void *extra; // extra info specific to this call (e.g. pool ptr)
-} __attribute__((aligned(sizeof(void*))));
+} ALIGNED(sizeof(void*));
#undef calloc
#define calloc(x,y) ({ \
struct mt_list toremove_conns;
struct task *cleanup_task;
__decl_thread(HA_SPINLOCK_T idle_conns_lock);
-} THREAD_ALIGNED(64);
+} THREAD_ALIGNED();
/* Termination events logs:
#ifdef DEBUG_FD
unsigned int event_count; /* number of events reported */
#endif
-} THREAD_ALIGNED(64);
+} THREAD_ALIGNED();
/* polled mask, one bit per thread and per direction for each FD */
struct polled_mask {
/* XXX 4 bytes unused */
/* Blocks representing the HTTP message itself */
- char blocks[VAR_ARRAY] __attribute__((aligned(8)));
+ char blocks[VAR_ARRAY] ALIGNED(8);
};
#endif /* _HAPROXY_HTX_T_H */
struct accept_queue_ring {
uint32_t idx; /* (head << 16) | tail */
struct tasklet *tasklet; /* tasklet of the thread owning this ring */
- struct connection *entry[ACCEPT_QUEUE_SIZE] __attribute((aligned(64)));
+ struct connection *entry[ACCEPT_QUEUE_SIZE] THREAD_ALIGNED();
};
int unique_id; /* Each pattern reference have unique id. */
unsigned long long revision; /* updated for each update */
unsigned long long entry_cnt; /* the total number of entries */
- THREAD_ALIGN(64);
+ THREAD_ALIGN();
__decl_thread(HA_RWLOCK_T lock); /* Lock used to protect pat ref elements */
event_hdl_sub_list e_subs; /* event_hdl: pat_ref's subscribers list (atomically updated) */
};
unsigned int tid; /* thread id, for debugging only */
struct pool_head *pool; /* assigned pool, for debugging only */
ulong fill_pattern; /* pattern used to fill the area on free */
-} THREAD_ALIGNED(64);
+} THREAD_ALIGNED();
/* This describes a pool registration, which is what was passed to
* create_pool() and that might have been merged with an existing pool.
struct list regs; /* registrations: alt names for this pool */
/* heavily read-write part */
- THREAD_ALIGN(64);
+ THREAD_ALIGN();
/* these entries depend on the pointer value, they're used to reduce
* the contention on fast-changing values. The alignment here is
* just meant to shard elements and there are no per-free_list stats.
*/
struct {
- THREAD_ALIGN(64);
+ THREAD_ALIGN();
struct pool_item *free_list; /* list of free shared objects */
unsigned int allocated; /* how many chunks have been allocated */
unsigned int used; /* how many chunks are currently in use */
struct proxy_per_tgroup {
struct queue queue;
struct lbprm_per_tgrp lbprm;
-} THREAD_ALIGNED(64);
+} THREAD_ALIGNED();
struct proxy {
enum obj_type obj_type; /* object type == OBJ_TYPE_PROXY */
EXTRA_COUNTERS(extra_counters_fe);
EXTRA_COUNTERS(extra_counters_be);
- THREAD_ALIGN(64);
+ THREAD_ALIGN();
unsigned int queueslength; /* Sum of the length of each queue */
int served; /* # of active sessions currently being served */
int totpend; /* total number of pending connections on this instance (for stats) */
struct ring_storage {
size_t size; // storage size
size_t rsvd; // header length (used for file-backed maps)
- THREAD_ALIGN(64);
+ THREAD_ALIGN();
size_t tail; // storage tail
- THREAD_ALIGN(64);
+ THREAD_ALIGN();
size_t head; // storage head
- THREAD_ALIGN(64);
+ THREAD_ALIGN();
char area[0]; // storage area begins immediately here
};
/* keep the queue in a separate cache line below */
struct {
- THREAD_ALIGN(64);
+ THREAD_ALIGN();
struct ring_wait_cell *ptr;
} queue[RING_WAIT_QUEUES + 1]; // wait queue + 1 spacer
};
struct eb_root *lb_tree; /* For LB algos with split between thread groups, the tree to be used, for each group */
unsigned npos, lpos; /* next and last positions in the LB tree, protected by LB lock */
unsigned rweight; /* remainder of weight in the current LB tree */
-} THREAD_ALIGNED(64);
+} THREAD_ALIGNED();
/* Configure the protocol selection for websocket */
enum __attribute__((__packed__)) srv_ws_mode {
/* The elements below may be changed on every single request by any
* thread, and generally at the same time.
*/
- THREAD_ALIGN(64);
+ THREAD_ALIGN();
struct eb32_node idle_node; /* When to next do cleanup in the idle connections */
unsigned int curr_idle_conns; /* Current number of orphan idling connections, both the idle and the safe lists */
unsigned int curr_idle_nb; /* Current number of connections in the idle list */
/* Element below are usd by LB algorithms and must be doable in
* parallel to other threads reusing connections above.
*/
- THREAD_ALIGN(64);
+ THREAD_ALIGN();
__decl_thread(HA_SPINLOCK_T lock); /* may enclose the proxy's lock, must not be taken under */
union {
struct eb32_node lb_node; /* node used for tree-based load balancing */
};
/* usually atomically updated by any thread during parsing or on end of request */
- THREAD_ALIGN(64);
+ THREAD_ALIGN();
int cur_sess; /* number of currently active sessions (including syn_sent) */
int served; /* # of active sessions currently being served (ie not pending) */
int consecutive_errors; /* current number of consecutive errors */
struct be_counters counters; /* statistics counters */
/* Below are some relatively stable settings, only changed under the lock */
- THREAD_ALIGN(64);
+ THREAD_ALIGN();
struct eb_root *lb_tree; /* we want to know in what tree the server is */
struct tree_occ *lb_nodes; /* lb_nodes_tot * struct tree_occ */
void *ptr; /* generic ptr to check if set or not */
} write_to; /* updates received on the source table will also update write_to */
- THREAD_ALIGN(64);
+ THREAD_ALIGN();
struct {
struct eb_root keys; /* head of sticky session tree */
unsigned int refcnt; /* number of local peer over all peers sections
attached to this table */
unsigned int current; /* number of sticky sessions currently in table */
- THREAD_ALIGN(64);
+ THREAD_ALIGN();
struct eb_root updates; /* head of sticky updates sequence tree, uses updt_lock */
struct mt_list *pend_updts; /* list of updates to be added to the update sequence tree, one per thread-group */
unsigned int localupdate; /* uses updt_lock */
struct tasklet *updt_task;/* tasklet responsible for pushing the pending updates into the tree */
- THREAD_ALIGN(64);
+ THREAD_ALIGN();
/* this lock is heavily used and must be on its own cache line */
__decl_thread(HA_RWLOCK_T updt_lock); /* lock protecting the updates part */
extern struct pool_head *pool_head_tasklet;
extern struct pool_head *pool_head_notification;
-__decl_thread(extern HA_RWLOCK_T wq_lock THREAD_ALIGNED(64));
+__decl_thread(extern HA_RWLOCK_T wq_lock THREAD_ALIGNED());
void __tasklet_wakeup_on(struct tasklet *tl, int thr);
struct list *__tasklet_wakeup_after(struct list *head, struct tasklet *tl);
/* declare a self-initializing spinlock, aligned on a cache line */
#define __decl_aligned_spinlock(lock) \
- HA_SPINLOCK_T (lock) __attribute__((aligned(64))) = 0;
+ HA_SPINLOCK_T (lock) ALIGNED(64) = 0;
/* declare a self-initializing rwlock */
#define __decl_rwlock(lock) \
/* declare a self-initializing rwlock, aligned on a cache line */
#define __decl_aligned_rwlock(lock) \
- HA_RWLOCK_T (lock) __attribute__((aligned(64))) = 0;
+ HA_RWLOCK_T (lock) ALIGNED(64) = 0;
#else /* !USE_THREAD */
volatile uint *global_now_ms; /* common monotonic date in milliseconds (may wrap), may point to _global_now_ms or shared memory */
/* when CLOCK_MONOTONIC is supported, the offset is applied from th_ctx->prev_mono_time instead */
-THREAD_ALIGNED(64) static llong now_offset; /* global offset between system time and global time in ns */
+THREAD_ALIGNED() static llong now_offset; /* global offset between system time and global time in ns */
THREAD_LOCAL ullong now_ns; /* internal monotonic date derived from real clock, in ns (wraps every 585 yr) */
THREAD_LOCAL uint now_ms; /* internal monotonic date in milliseconds (may wrap) */
size_t limit;
};
-static struct hlua_mem_allocator hlua_global_allocator THREAD_ALIGNED(64);
+static struct hlua_mem_allocator hlua_global_allocator THREAD_ALIGNED();
/* hlua event subscription */
struct hlua_event_sub {
DECLARE_STATIC_TYPED_POOL(var_pool, "vars", struct var);
/* list of variables for the process scope. */
-struct vars proc_vars THREAD_ALIGNED(64);
+struct vars proc_vars THREAD_ALIGNED();
/* This array of int contains the system limits per context. */
static unsigned int var_global_limit = 0;