#include <proto/log.h>
#include <proto/freq_ctr.h>
+
+__decl_hathreads(extern HA_SPINLOCK_T idle_conn_srv_lock);
+extern struct eb_root idle_conn_srv;
+extern struct task *idle_conn_task;
+extern struct task *idle_conn_cleanup[MAX_THREADS];
+extern struct list toremove_connections[MAX_THREADS];
+
int srv_downtime(const struct server *s);
int srv_lastsession(const struct server *s);
int srv_getinter(const struct check *check);
int snr_resolution_error_cb(struct dns_requester *requester, int error_code);
struct server *snr_check_ip_callback(struct server *srv, void *ip, unsigned char *ip_family);
struct task *srv_cleanup_idle_connections(struct task *task, void *ctx, unsigned short state);
+struct task *srv_cleanup_toremove_connections(struct task *task, void *context, unsigned short state);
/* increase the number of cumulated connections on the designated server */
static void inline srv_inc_sess_ctr(struct server *s)
srv->curr_idle_thr[tid]++;
conn->idle_time = now_ms;
- if (!(task_in_wq(srv->idle_task[tid])) &&
- !(task_in_rq(srv->idle_task[tid])))
- task_schedule(srv->idle_task[tid],
- tick_add(now_ms, srv->pool_purge_delay));
+ __ha_barrier_full();
+ if ((volatile void *)srv->idle_node.node.leaf_p == NULL) {
+ HA_SPIN_LOCK(OTHER_LOCK, &idle_conn_srv_lock);
+ if ((volatile void *)srv->idle_node.node.leaf_p == NULL) {
+ srv->idle_node.key = tick_add(srv->pool_purge_delay,
+ now_ms);
+ eb32_insert(&idle_conn_srv, &srv->idle_node);
+ if (!task_in_wq(idle_conn_task) && !
+ task_in_rq(idle_conn_task)) {
+ task_schedule(idle_conn_task,
+ srv->idle_node.key);
+ }
+
+ }
+ HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conn_srv_lock);
+ }
return 1;
}
return 0;
unsigned int curr_idle_conns; /* Current number of orphan idling connections */
unsigned int *curr_idle_thr; /* Current number of orphan idling connections per thread */
int max_reuse; /* Max number of requests on a same connection */
- struct task **idle_task; /* task responsible for cleaning idle orphan connections */
+ struct eb32_node idle_node; /* When to next do cleanup in the idle connections */
struct task *warmup; /* the task dedicated to the warmup when slowstart is set */
struct conn_src conn_src; /* connection source settings */
HA_ATOMIC_SUB(&srv->curr_idle_conns, 1);
srv->curr_idle_thr[tid]--;
LIST_ADDQ(&srv->idle_conns[tid], &srv_conn->list);
- if (LIST_ISEMPTY(&srv->idle_orphan_conns[tid]))
- task_unlink_wq(srv->idle_task[tid]);
} else if (reuse) {
if (srv_conn->flags & CO_FL_SESS_IDLE) {
struct session *sess = srv_conn->owner;
}
if (newsrv->max_idle_conns != 0) {
+ if (idle_conn_task == NULL) {
+ idle_conn_task = task_new(MAX_THREADS_MASK);
+ if (!idle_conn_task)
+ goto err;
+ idle_conn_task->process = srv_cleanup_idle_connections;
+ idle_conn_task->context = NULL;
+ for (i = 0; i < global.nbthread; i++) {
+ idle_conn_cleanup[i] = task_new(1 << i);
+ if (!idle_conn_cleanup[i])
+ goto err;
+ idle_conn_cleanup[i]->process = srv_cleanup_toremove_connections;
+ idle_conn_cleanup[i]->context = NULL;
+ LIST_INIT(&toremove_connections[i]);
+ }
+ }
newsrv->idle_orphan_conns = calloc(global.nbthread, sizeof(*newsrv->idle_orphan_conns));
- newsrv->idle_task = calloc(global.nbthread, sizeof(*newsrv->idle_task));
- if (!newsrv->idle_orphan_conns || !newsrv->idle_task)
+ if (!newsrv->idle_orphan_conns)
goto err;
- for (i = 0; i < global.nbthread; i++) {
+ for (i = 0; i < global.nbthread; i++)
LIST_INIT(&newsrv->idle_orphan_conns[i]);
- newsrv->idle_task[i] = task_new(1 << i);
- if (!newsrv->idle_task[i])
- goto err;
- newsrv->idle_task[i]->process = srv_cleanup_idle_connections;
- newsrv->idle_task[i]->context = newsrv;
- }
newsrv->curr_idle_thr = calloc(global.nbthread, sizeof(int));
if (!newsrv->curr_idle_thr)
goto err;
free(s->safe_conns);
free(s->idle_orphan_conns);
free(s->curr_idle_thr);
- if (s->idle_task) {
- int i;
-
- for (i = 0; i < global.nbthread; i++)
- task_free(s->idle_task[i]);
- free(s->idle_task);
- }
if (s->use_ssl || s->check.use_ssl) {
if (xprt_get(XPRT_SSL) && xprt_get(XPRT_SSL)->destroy_srv)
free(global.desc); global.desc = NULL;
free(oldpids); oldpids = NULL;
task_free(global_listener_queue_task); global_listener_queue_task = NULL;
+ task_free(idle_conn_task);
+ idle_conn_task = NULL;
list_for_each_entry_safe(log, logb, &global.logsrvs, list) {
LIST_DEL(&log->list);
.list = LIST_HEAD_INIT(srv_keywords.list)
};
+__decl_hathreads(HA_SPINLOCK_T idle_conn_srv_lock);
+struct eb_root idle_conn_srv = EB_ROOT;
+struct task *idle_conn_task = NULL;
+struct task *idle_conn_cleanup[MAX_THREADS] = { NULL };
+struct list toremove_connections[MAX_THREADS];
+
int srv_downtime(const struct server *s)
{
if ((s->cur_state != SRV_ST_STOPPED) && s->last_change < now.tv_sec) // ignore negative time
*s->adm_st_chg_cause = 0;
}
-struct task *srv_cleanup_idle_connections(struct task *task, void *context, unsigned short state)
+struct task *srv_cleanup_toremove_connections(struct task *task, void *context, unsigned short state)
{
- struct server *srv = context;
- struct connection *conn, *conn_back;
- unsigned int to_destroy = srv->curr_idle_thr[tid] / 2 + (srv->curr_idle_thr[tid] & 1);
- unsigned int i = 0;
+ struct connection *conn;
+ while ((conn = LIST_POP_LOCKED(&toremove_connections[tid],
+ struct connection *, list)) != NULL) {
+ LIST_INIT(&conn->list);
+ conn->mux->destroy(conn);
+ }
+ return task;
+}
- list_for_each_entry_safe(conn, conn_back, &srv->idle_orphan_conns[tid], list) {
- if (i == to_destroy)
+struct task *srv_cleanup_idle_connections(struct task *task, void *context, unsigned short state)
+{
+ struct server *srv;
+ struct eb32_node *eb;
+ int i;
+ unsigned int next_wakeup;
+ int need_wakeup = 0;
+
+ HA_SPIN_LOCK(OTHER_LOCK, &idle_conn_srv_lock);
+ while (1) {
+ int srv_is_empty = 1;
+
+ eb = eb32_lookup_ge(&idle_conn_srv, now_ms - TIMER_LOOK_BACK);
+ if (!eb) {
+ /* we might have reached the end of the tree, typically because
+ * <now_ms> is in the first half and we're first scanning the last
+ * half. Let's loop back to the beginning of the tree now.
+ */
+
+ eb = eb32_first(&idle_conn_srv);
+ if (likely(!eb))
+ break;
+ }
+ if (tick_is_lt(now_ms, eb->key)) {
+ /* timer not expired yet, revisit it later */
+ next_wakeup = eb->key;
+ need_wakeup = 1;
break;
- conn->mux->destroy(conn);
- i++;
+ }
+ srv = eb32_entry(eb, struct server, idle_node);
+ for (i = 0; i < global.nbthread; i++) {
+ int max_conn = (srv->curr_idle_thr[i] / 2) +
+ (srv->curr_idle_thr[i] & 1);
+ int j;
+ int did_remove = 0;
+
+ for (j = 0; j < max_conn; j++) {
+ struct connection *conn = LIST_POP_LOCKED(&srv->idle_orphan_conns[i], struct connection *, list);
+ if (!conn)
+ break;
+ did_remove = 1;
+ LIST_ADDQ_LOCKED(&toremove_connections[i], &conn->list);
+ }
+ if (did_remove && max_conn < srv->curr_idle_thr[i])
+ srv_is_empty = 0;
+ if (did_remove)
+ task_wakeup(idle_conn_cleanup[i], TASK_WOKEN_OTHER);
+ }
+ eb32_delete(&srv->idle_node);
+ if (!srv_is_empty) {
+ /* There are still more idle connections, add the
+ * server back in the tree.
+ */
+ srv->idle_node.key = tick_add(srv->pool_purge_delay,
+ now_ms);
+ eb32_insert(&idle_conn_srv, &srv->idle_node);
+ }
}
- if (!LIST_ISEMPTY(&srv->idle_orphan_conns[tid]))
- task_schedule(task, tick_add(now_ms, srv->pool_purge_delay));
+ HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conn_srv_lock);
+
+ if (need_wakeup)
+ task->expire = next_wakeup;
else
task->expire = TICK_ETERNITY;
+
return task;
}
/*