unsigned int ctr2; // general purposee debug counter
#endif
char __pad[0]; // unused except to check remaining room
- char __end[0] __attribute__((aligned(64))); // align size to 64.
-};
+ char __end[0] THREAD_ALIGNED();
+} THREAD_ALIGNED();
/* 256 entries for callers * callees should be highly sufficient (~45 seen usually) */
#define SCHED_ACT_HASH_BITS 8
uint64_t lkw_time; /* lock waiting time */
uint64_t lkd_time; /* locked time */
uint64_t mem_time; /* memory ops wait time */
-};
+} THREAD_ALIGNED();
#endif /* _HAPROXY_ACTIVITY_T_H */
struct task *accept_queue_process(struct task *t, void *context, unsigned int state);
struct task *manage_global_listener_queue(struct task *t, void *context, unsigned int state);
-extern struct accept_queue_ring accept_queue_rings[MAX_THREADS] __attribute__((aligned(64)));
+extern struct accept_queue_ring accept_queue_rings[MAX_THREADS] THREAD_ALIGNED();
extern const char* li_status_st[LI_STATE_COUNT];
enum li_status get_li_status(struct listener *l);
unsigned int failed; /* failed allocations (indexed by hash of TID) */
} buckets[CONFIG_HAP_POOL_BUCKETS];
- struct pool_cache_head cache[MAX_THREADS] THREAD_ALIGNED(64); /* pool caches */
-} __attribute__((aligned(64)));
+ struct pool_cache_head cache[MAX_THREADS] THREAD_ALIGNED(); /* pool caches */
+} THREAD_ALIGNED();
#endif /* _HAPROXY_POOL_T_H */
/* declare a self-initializing spinlock, aligned on a cache line */
#define __decl_aligned_spinlock(lock) \
- HA_SPINLOCK_T (lock) __attribute__((aligned(64))); \
+ HA_SPINLOCK_T (lock) THREAD_ALIGNED(); \
INITCALL1(STG_LOCK, ha_spin_init, &(lock))
/* declare a self-initializing rwlock */
/* declare a self-initializing rwlock, aligned on a cache line */
#define __decl_aligned_rwlock(lock) \
- HA_RWLOCK_T (lock) __attribute__((aligned(64))); \
+ HA_RWLOCK_T (lock) THREAD_ALIGNED(); \
INITCALL1(STG_LOCK, ha_rwlock_init, &(lock))
#endif /* USE_THREAD */
/* pad to cache line (64B) */
char __pad[0]; /* unused except to check remaining room */
- char __end[0] __attribute__((aligned(64)));
+ char __end[0] THREAD_ALIGNED();
};
/* This structure describes the group-specific context (e.g. active threads
/* pad to cache line (64B) */
char __pad[0]; /* unused except to check remaining room */
- char __end[0] __attribute__((aligned(64)));
+ char __end[0] THREAD_ALIGNED();
};
/* This structure describes all the per-thread info we need. When threads are
/* pad to cache line (64B) */
char __pad[0]; /* unused except to check remaining room */
- char __end[0] __attribute__((aligned(64)));
+ char __end[0] THREAD_ALIGNED();
};
/* This structure describes all the per-thread context we need. This is
uint64_t prof_mem_stop_ns = 0;
/* One struct per thread containing all collected measurements */
-struct activity activity[MAX_THREADS] __attribute__((aligned(64))) = { };
+struct activity activity[MAX_THREADS] = { };
/* One struct per function pointer hash entry (SCHED_ACT_HASH_BUCKETS values, 0=collision) */
-struct sched_activity sched_activity[SCHED_ACT_HASH_BUCKETS] __attribute__((aligned(64))) = { };
+struct sched_activity sched_activity[SCHED_ACT_HASH_BUCKETS] = { };
#ifdef USE_MEMORY_PROFILING
static int cli_io_handler_show_profiling(struct appctx *appctx)
{
struct show_prof_ctx *ctx = appctx->svcctx;
- struct sched_activity tmp_activity[SCHED_ACT_HASH_BUCKETS] __attribute__((aligned(64)));
+ struct sched_activity tmp_activity[SCHED_ACT_HASH_BUCKETS];
#ifdef USE_MEMORY_PROFILING
struct memprof_stats tmp_memstats[MEMPROF_HASH_BUCKETS + 1];
unsigned long long tot_alloc_calls, tot_free_calls;
*/
static int cli_io_handler_show_tasks(struct appctx *appctx)
{
- struct sched_activity tmp_activity[SCHED_ACT_HASH_BUCKETS] __attribute__((aligned(64)));
+ struct sched_activity tmp_activity[SCHED_ACT_HASH_BUCKETS];
struct buffer *name_buffer = get_trash_chunk();
struct sched_activity *entry;
const struct tasklet *tl;
#if defined(USE_THREAD)
-struct accept_queue_ring accept_queue_rings[MAX_THREADS] __attribute__((aligned(64))) = { };
+struct accept_queue_ring accept_queue_rings[MAX_THREADS] THREAD_ALIGNED();
/* dequeue and process a pending connection from the local accept queue (single
* consumer). Returns the accepted connection or NULL if none was found.
/* used to detect if the scheduler looks stuck (for warnings) */
static struct {
- int sched_stuck ALIGNED(64);
+ int sched_stuck THREAD_ALIGNED();
} sched_ctx[MAX_THREADS];
/* Flags the task <t> for immediate destruction and puts it into its first