extern struct buffer buf_empty;
extern struct buffer buf_wanted;
extern struct list buffer_wq;
-#ifdef USE_THREAD
-extern HA_SPINLOCK_T buffer_wq_lock;
-#endif
+__decl_hathreads(HA_SPINLOCK_T buffer_wq_lock);
int init_buffer();
void deinit_buffer();
#ifndef USE_THREAD
+#define __decl_hathreads(decl)
+
#define HA_ATOMIC_CAS(val, old, new) ({((*val) == (*old)) ? (*(val) = (new) , 1) : (*(old) = *(val), 0);})
#define HA_ATOMIC_ADD(val, i) ({*(val) += (i);})
#define HA_ATOMIC_SUB(val, i) ({*(val) -= (i);})
#include <pthread.h>
#include <import/plock.h>
+#define __decl_hathreads(decl) decl
+
/* TODO: thread: For now, we rely on GCC builtins but it could be a good idea to
* have a header file regrouping all functions dealing with threads. */
#define HA_ATOMIC_CAS(val, old, new) __atomic_compare_exchange_n(val, old, new, 0, 0, 0)
struct pool_head {
void **free_list;
-#ifdef USE_THREAD
- HA_SPINLOCK_T lock; /* the spin lock */
-#endif
+ __decl_hathreads(HA_SPINLOCK_T lock); /* the spin lock */
struct list list; /* list of all known pools */
unsigned int used; /* how many chunks are currently in use */
unsigned int allocated; /* how many chunks have been allocated */
extern unsigned int nb_applets;
extern unsigned int applets_active_queue;
-#ifdef USE_THREAD
-extern HA_SPINLOCK_T applet_active_lock;
-#endif
+__decl_hathreads(extern HA_SPINLOCK_T applet_active_lock);
extern struct list applet_active_queue;
void applet_run_active();
extern THREAD_LOCAL int *fd_updt; // FD updates list
extern THREAD_LOCAL int fd_nbupdt; // number of updates in the list
-#ifdef USE_THREAD
-HA_SPINLOCK_T fdtab_lock; /* global lock to protect fdtab array */
-HA_RWLOCK_T fdcache_lock; /* global lock to protect fd_cache array */
-HA_SPINLOCK_T poll_lock; /* global lock to protect poll info */
-#endif
+__decl_hathreads(HA_SPINLOCK_T fdtab_lock); /* global lock to protect fdtab array */
+__decl_hathreads(HA_RWLOCK_T fdcache_lock); /* global lock to protect fd_cache array */
+__decl_hathreads(HA_SPINLOCK_T poll_lock); /* global lock to protect poll info */
/* Deletes an FD from the fdsets, and recomputes the maxfd limit.
* The file descriptor is also closed.
extern struct signal_descriptor signal_state[];
extern struct pool_head *pool2_sig_handlers;
-#ifdef USE_THREAD
-extern HA_SPINLOCK_T signals_lock;
-#endif
+__decl_hathreads(extern HA_SPINLOCK_T signals_lock);
void signal_handler(int sig);
void __signal_process_queue();
extern unsigned int niced_tasks; /* number of niced tasks in the run queue */
extern struct pool_head *pool2_task;
extern struct pool_head *pool2_notification;
-#ifdef USE_THREAD
-extern HA_SPINLOCK_T rq_lock; /* spin lock related to run queue */
-extern HA_SPINLOCK_T wq_lock; /* spin lock related to wait queue */
-#endif
+
+__decl_hathreads(extern HA_SPINLOCK_T rq_lock); /* spin lock related to run queue */
+__decl_hathreads(extern HA_SPINLOCK_T wq_lock); /* spin lock related to wait queue */
/* return 0 if task is in run queue, otherwise non-zero */
static inline int task_in_rq(struct task *t)
struct lb_fwlc fwlc;
struct lb_chash chash;
struct lb_fas fas;
-#ifdef USE_THREAD
- HA_SPINLOCK_T lock;
-#endif
+ __decl_hathreads(HA_SPINLOCK_T lock);
+
/* Call backs for some actions. Any of them may be NULL (thus should be ignored). */
void (*update_server_eweight)(struct server *); /* to be called after eweight change */
void (*set_server_status_up)(struct server *); /* to be called after status changes to UP */
struct eb_root query_ids; /* tree to quickly lookup/retrieve query ids currently in use
* used by each nameserver, but stored in resolvers since there must
* be a unique relation between an eb_root and an eb_node (resolution) */
-#ifdef USE_THREAD
- HA_SPINLOCK_T lock;
-#endif
+ __decl_hathreads(HA_SPINLOCK_T lock);
struct list list; /* resolvers list */
};
void (*iocb)(int fd); /* I/O handler */
void *owner; /* the connection or listener associated with this fd, NULL if closed */
unsigned long thread_mask; /* mask of thread IDs authorized to process the task */
-#ifdef USE_THREAD
- HA_SPINLOCK_T lock;
-#endif
+ __decl_hathreads(HA_SPINLOCK_T lock);
unsigned int cache; /* position+1 in the FD cache. 0=not in cache. */
unsigned char state; /* FD state for read and write directions (2*3 bits) */
unsigned char ev; /* event seen in return of poll() : FD_POLL_* */
} unix_bind;
#ifdef USE_CPU_AFFINITY
unsigned long cpu_map[LONGBITS]; /* list of CPU masks for the 32/64 first processes */
-
-#ifdef USE_THREAD
- unsigned long thread_map[LONGBITS][LONGBITS]; /* list of CPU masks for the 32/64 first threads per process */
-#endif
-
+ __decl_hathreads(unsigned long thread_map[LONGBITS][LONGBITS]); /* list of CPU masks for the 32/64 first threads per process */
#endif
struct proxy *stats_fe; /* the frontend holding the stats settings */
struct vars vars; /* list of variables for the process scope. */
int tcp_ut; /* for TCP, user timeout */
char *interface; /* interface name or NULL */
-#ifdef USE_THREAD
- HA_SPINLOCK_T lock;
-#endif
+ __decl_hathreads(HA_SPINLOCK_T lock);
const struct netns_entry *netns; /* network namespace of the listener*/
char *display; /* String displayed to identify the pattern origin. */
struct list head; /* The head of the list of struct pat_ref_elt. */
struct list pat; /* The head of the list of struct pattern_expr. */
-#ifdef USE_THREAD
- HA_SPINLOCK_T lock; /* Lock used to protect pat ref elements */
-#endif
+ __decl_hathreads(HA_SPINLOCK_T lock); /* Lock used to protect pat ref elements */
};
/* This is a part of struct pat_ref. Each entry contain one
struct eb_root pattern_tree; /* may be used for lookup in large datasets */
struct eb_root pattern_tree_2; /* may be used for different types */
int mflags; /* flags relative to the parsing or matching method. */
-#ifdef USE_THREAD
- HA_RWLOCK_T lock; /* lock used to protect patterns */
-#endif
+ __decl_hathreads(HA_RWLOCK_T lock); /* lock used to protect patterns */
};
/* This is a list of expression. A struct pattern_expr can be used by
struct shared_table *remote_table;
struct shared_table *last_local_table;
struct shared_table *tables;
-#ifdef USE_THREAD
- HA_SPINLOCK_T lock; /* lock used to handle this peer section */
-#endif
+ __decl_hathreads(HA_SPINLOCK_T lock); /* lock used to handle this peer section */
struct peer *next; /* next peer in the list */
};
* code even though they are not checks. This structure
* is as a parameter to the check code.
* Each check corresponds to a mailer */
-#ifdef USE_THREAD
- HA_SPINLOCK_T lock;
-#endif
+ __decl_hathreads(HA_SPINLOCK_T lock);
};
struct proxy {
* name is used
*/
struct list filter_configs; /* list of the filters that are declared on this proxy */
-
-#ifdef USE_THREAD
- HA_SPINLOCK_T lock;
-#endif
+ __decl_hathreads(HA_SPINLOCK_T lock);
};
struct switching_rule {
struct sample_expr *sni; /* sample expression for SNI */
} ssl_ctx;
#endif
-
-#ifdef USE_THREAD
- HA_SPINLOCK_T lock;
-#endif
+ __decl_hathreads(HA_SPINLOCK_T lock);
struct {
const char *file; /* file where the section appears */
int line; /* line where the section appears */
struct list applets; /* List of available SPOE applets */
struct list sending_queue; /* Queue of streams waiting to send data */
struct list waiting_queue; /* Queue of streams waiting for a ack, in async mode */
-
-#ifdef USE_THREAD
- HA_SPINLOCK_T lock;
-#endif
+ __decl_hathreads(HA_SPINLOCK_T lock);
} *rt;
};
struct stksess {
unsigned int expire; /* session expiration date */
unsigned int ref_cnt; /* reference count, can only purge when zero */
-#ifdef USE_THREAD
- HA_RWLOCK_T lock; /* lock related to the table entry */
-#endif
+ __decl_hathreads(HA_RWLOCK_T lock); /* lock related to the table entry */
struct eb32_node exp; /* ebtree node used to hold the session in expiration tree */
struct eb32_node upd; /* ebtree node used to hold the update sequence tree */
struct ebmb_node key; /* ebtree node used to hold the session in table */
struct eb_root exps; /* head of sticky session expiration tree */
struct eb_root updates; /* head of sticky updates sequence tree */
struct pool_head *pool; /* pool used to allocate sticky sessions */
-#ifdef USE_THREAD
- HA_SPINLOCK_T lock; /* spin lock related to the table */
-#endif
+ __decl_hathreads(HA_SPINLOCK_T lock); /* spin lock related to the table */
struct task *exp_task; /* expiration task */
struct task *sync_task; /* sync task */
unsigned int update;
struct list wake_me; /* Part of list of signals to be targeted if an
event occurs. */
struct task *task; /* The task to be wake if an event occurs. */
-#ifdef USE_THREAD
- HA_SPINLOCK_T lock;
-#endif
+ __decl_hathreads(HA_SPINLOCK_T lock);
};
/* The base for all tasks */
struct list head;
enum vars_scope scope;
unsigned int size;
-#ifdef USE_THREAD
- HA_RWLOCK_T rwlock;
-#endif
+ __decl_hathreads(HA_RWLOCK_T rwlock);
};
/* This struct describes a variable. */
unsigned int nb_applets = 0;
unsigned int applets_active_queue = 0;
-
-#ifdef USE_THREAD
-HA_SPINLOCK_T applet_active_lock; /* spin lock related to applet active queue */
-#endif
+__decl_hathreads(HA_SPINLOCK_T applet_active_lock); /* spin lock related to applet active queue */
struct list applet_active_queue = LIST_HEAD_INIT(applet_active_queue);
/* list of objects waiting for at least one buffer */
struct list buffer_wq = LIST_HEAD_INIT(buffer_wq);
-#ifdef USE_THREAD
-HA_SPINLOCK_T buffer_wq_lock;
-#endif
+__decl_hathreads(HA_SPINLOCK_T buffer_wq_lock);
/* this buffer is always the same size as standard buffers and is used for
* swapping data inside a buffer.
static struct list pid_list = LIST_HEAD_INIT(pid_list);
static struct pool_head *pool2_pid_list;
-#ifdef USE_THREAD
-HA_SPINLOCK_T pid_list_lock;
-#endif
+__decl_hathreads(HA_SPINLOCK_T pid_list_lock);
void block_sigchld(void)
{
#if defined(USE_SLZ) || defined(USE_ZLIB)
-#ifdef USE_THREAD
-static HA_SPINLOCK_T comp_pool_lock;
-#endif
+__decl_hathreads(static HA_SPINLOCK_T comp_pool_lock);
#endif
#ifdef USE_ZLIB
THREAD_LOCAL int *fd_updt = NULL; // FD updates list
THREAD_LOCAL int fd_nbupdt = 0; // number of updates in the list
-#ifdef USE_THREAD
-HA_SPINLOCK_T fdtab_lock; /* global lock to protect fdtab array */
-HA_RWLOCK_T fdcache_lock; /* global lock to protect fd_cache array */
-HA_SPINLOCK_T poll_lock; /* global lock to protect poll info */
-#endif
+__decl_hathreads(HA_SPINLOCK_T fdtab_lock); /* global lock to protect fdtab array */
+__decl_hathreads(HA_RWLOCK_T fdcache_lock); /* global lock to protect fd_cache array */
+__decl_hathreads(HA_SPINLOCK_T poll_lock); /* global lock to protect poll info */
/* Deletes an FD from the fdsets, and recomputes the maxfd limit.
* The file descriptor is also closed.
* and RESET_SAFE_LJMP manipulates the Lua stack, so it will be careful
* to set mutex around these functions.
*/
-#ifdef USE_THREAD
-HA_SPINLOCK_T hlua_global_lock;
-#endif
+__decl_hathreads(HA_SPINLOCK_T hlua_global_lock);
THREAD_LOCAL jmp_buf safe_ljmp_env;
static int hlua_panic_safe(lua_State *L) { return 0; }
static int hlua_panic_ljmp(lua_State *L) { longjmp(safe_ljmp_env, 1); }
#include <proto/stream.h>
#include <proto/task.h>
-#ifdef USE_THREAD
/* listner_queue lock (same for global and per proxy queues) */
-static HA_SPINLOCK_T lq_lock;
-#endif
+__decl_hathreads(static HA_SPINLOCK_T lq_lock);
/* List head of all known bind keywords */
static struct bind_kw_list bind_keywords = {
struct list pattern_reference = LIST_HEAD_INIT(pattern_reference);
static struct lru64_head *pat_lru_tree;
-#ifdef USE_THREAD
-HA_SPINLOCK_T pat_lru_tree_lock;
-#endif
+__decl_hathreads(HA_SPINLOCK_T pat_lru_tree_lock);
static unsigned long long pat_lru_seed;
/*
struct pool_head *pool2_pipe = NULL;
struct pipe *pipes_live = NULL; /* pipes which are still ready to use */
-#ifdef USE_THREAD
-HA_SPINLOCK_T pipes_lock; /* lock used to protect pipes list */
-#endif
+
+__decl_hathreads(HA_SPINLOCK_T pipes_lock); /* lock used to protect pipes list */
+
int pipes_used = 0; /* # of pipes in use (2 fds each) */
int pipes_free = 0; /* # of pipes unused */
#include <netinet/tcp.h>
struct list updated_servers = LIST_HEAD_INIT(updated_servers);
-
-#ifdef USE_THREAD
-HA_SPINLOCK_T updated_servers_lock;
-#endif
+__decl_hathreads(HA_SPINLOCK_T updated_servers_lock);
static void srv_register_update(struct server *srv);
static void srv_update_state(struct server *srv, int version, char **params);
sigset_t blocked_sig;
int signal_pending = 0; /* non-zero if t least one signal remains unprocessed */
-#ifdef USE_THREAD
-HA_SPINLOCK_T signals_lock;
-#endif
+__decl_hathreads(HA_SPINLOCK_T signals_lock);
/* Common signal handler, used by all signals. Received signals are queued.
* Signal number zero has a specific status, as it cannot be delivered by the
};
#ifdef USE_THREAD
+
static HA_RWLOCK_T *ssl_rwlocks;
return 0;
}
+
#endif
static struct lru64_head *ssl_ctx_lru_tree = NULL;
static unsigned int ssl_ctx_lru_seed = 0;
static unsigned int ssl_ctx_serial;
-
-#ifdef USE_THREAD
-static HA_RWLOCK_T ssl_ctx_lru_rwlock;
-#endif
+__decl_hathreads(static HA_RWLOCK_T ssl_ctx_lru_rwlock);
#endif // SSL_CTRL_SET_TLSEXT_HOSTNAME
struct pool_head *pool2_stream;
struct list streams;
+__decl_hathreads(HA_SPINLOCK_T streams_lock);
-#ifdef USE_THREAD
-HA_SPINLOCK_T streams_lock;
-#endif
/* List of all use-service keywords. */
static struct list service_keywords = LIST_HEAD_INIT(service_keywords);
unsigned int nb_tasks_cur = 0; /* copy of the tasks count */
unsigned int niced_tasks = 0; /* number of niced tasks in the run queue */
-#ifdef USE_THREAD
-HA_SPINLOCK_T rq_lock; /* spin lock related to run queue */
-HA_SPINLOCK_T wq_lock; /* spin lock related to wait queue */
-#endif
+__decl_hathreads(HA_SPINLOCK_T rq_lock); /* spin lock related to run queue */
+__decl_hathreads(HA_SPINLOCK_T wq_lock); /* spin lock related to wait queue */
static struct eb_root timers; /* sorted timers tree */
static struct eb_root rqueue; /* tree constituting the run queue */
static unsigned int var_reqres_limit = 0;
-#ifdef USE_THREAD
-HA_RWLOCK_T var_names_rwlock;
-#endif
+__decl_hathreads(HA_RWLOCK_T var_names_rwlock);
/* This function adds or remove memory size from the accounting. The inner
* pointers may be null when setting the outer ones only.