const struct mux_ops *force_mux_ops);
int conn_install_mux_chk(struct connection *conn, void *ctx, struct session *sess);
-void conn_delete_from_tree(struct connection *conn);
+void conn_delete_from_tree(struct connection *conn, int thr);
void conn_init(struct connection *conn, void *target);
struct connection *conn_new(void *target);
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
conn = srv_lookup_conn(is_safe ? &srv->per_thr[tid].safe_conns : &srv->per_thr[tid].idle_conns, hash);
if (conn)
- conn_delete_from_tree(conn);
+ conn_delete_from_tree(conn, tid);
/* If we failed to pick a connection from the idle list, let's try again with
* the safe list.
if (!conn && !is_safe && srv->curr_safe_nb > 0) {
conn = srv_lookup_conn(&srv->per_thr[tid].safe_conns, hash);
if (conn) {
- conn_delete_from_tree(conn);
+ conn_delete_from_tree(conn, tid);
is_safe = 1;
}
}
conn = srv_lookup_conn(tree, hash);
while (conn) {
if (conn->mux->takeover && conn->mux->takeover(conn, i, 0) == 0) {
- conn_delete_from_tree(conn);
+ conn_delete_from_tree(conn, i);
_HA_ATOMIC_INC(&activity[tid].fd_takeover);
found = 1;
break;
hash_node = eb64_entry(node, struct conn_hash_node, node);
conn = hash_node->conn;
if (conn && conn->mux->takeover && conn->mux->takeover(conn, curtid, 1) == 0) {
- conn_delete_from_tree(conn);
+ conn_delete_from_tree(conn, curtid);
return conn;
}
node = eb64_next(node);
if (avail <= 1) {
/* no more streams available, remove it from the list */
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
- conn_delete_from_tree(srv_conn);
+ conn_delete_from_tree(srv_conn, tid);
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
}
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
if (!LIST_ISEMPTY(&srv->per_thr[tid].idle_conn_list)) {
tokill_conn = LIST_ELEM(srv->per_thr[tid].idle_conn_list.n, struct connection *, idle_list);
- conn_delete_from_tree(tokill_conn);
+ conn_delete_from_tree(tokill_conn, tid);
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
/* Release the idle lock before calling mux->destroy.
if (!LIST_ISEMPTY(&srv->per_thr[i].idle_conn_list)) {
tokill_conn = LIST_ELEM(srv->per_thr[i].idle_conn_list.n, struct connection *, idle_list);
- conn_delete_from_tree(tokill_conn);
+ conn_delete_from_tree(tokill_conn, i);
}
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[i].idle_conns_lock);
return NULL;
}
-/* Remove <conn> idle connection from its attached tree (idle, safe or avail).
- * If also present in the secondary server idle list, conn is removed from it.
+/* Remove <conn> idle connection from its attached tree (idle, safe or avail)
+ * for the server in the connection's target and thread <thr>. If also present
+ * in the secondary server idle list, conn is removed from it.
*
* Must be called with idle_conns_lock held.
*/
-void conn_delete_from_tree(struct connection *conn)
+void conn_delete_from_tree(struct connection *conn, int thr)
{
LIST_DEL_INIT(&conn->idle_list);
eb64_delete(&conn->hash_node->node);
conn_in_list = 0;
}
else {
- conn_delete_from_tree(conn);
+ conn_delete_from_tree(conn, tid);
}
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
}
conn_in_list = 0;
}
else {
- conn_delete_from_tree(conn);
+ conn_delete_from_tree(conn, tid);
}
}
* to steal it from us.
*/
if (fconn->conn->flags & CO_FL_LIST_MASK)
- conn_delete_from_tree(fconn->conn);
+ conn_delete_from_tree(fconn->conn, tid);
else if (fconn->conn->flags & CO_FL_SESS_IDLE)
session_detach_idle_conn(fconn->conn->owner, fconn->conn);
conn_in_list = 0;
}
else {
- conn_delete_from_tree(conn);
+ conn_delete_from_tree(conn, tid);
}
}
* to steal it from us.
*/
if (h1c->conn->flags & CO_FL_LIST_MASK)
- conn_delete_from_tree(h1c->conn);
+ conn_delete_from_tree(h1c->conn, tid);
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
conn_in_list = 0;
}
else {
- conn_delete_from_tree(conn);
+ conn_delete_from_tree(conn, tid);
}
}
/* connections in error must be removed from the idle lists */
if (conn->flags & CO_FL_LIST_MASK) {
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
- conn_delete_from_tree(conn);
+ conn_delete_from_tree(conn, tid);
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
}
}
/* connections in error must be removed from the idle lists */
if (conn->flags & CO_FL_LIST_MASK) {
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
- conn_delete_from_tree(conn);
+ conn_delete_from_tree(conn, tid);
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
}
}
* to steal it from us.
*/
if (h2c->conn->flags & CO_FL_LIST_MASK)
- conn_delete_from_tree(h2c->conn);
+ conn_delete_from_tree(h2c->conn, tid);
else if (h2c->conn->flags & CO_FL_SESS_IDLE)
session_detach_idle_conn(h2c->conn->owner, h2c->conn);
/* in any case this connection must not be considered idle anymore */
if (h2c->conn->flags & CO_FL_LIST_MASK) {
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
- conn_delete_from_tree(h2c->conn);
+ conn_delete_from_tree(h2c->conn, tid);
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
}
/* A connection is not reusable if app layer is closed. */
if (qcc->flags & QC_CF_IS_BACK)
- conn_delete_from_tree(qcc->conn);
+ conn_delete_from_tree(qcc->conn, tid);
out:
qcc->app_st = QCC_APP_ST_SHUT;
if (conn->flags & CO_FL_SESS_IDLE)
session_detach_idle_conn(conn->owner, conn);
else
- conn_delete_from_tree(conn);
+ conn_delete_from_tree(conn, tid);
}
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
* attempts to steal it from us.
*/
if (qcc->conn->flags & CO_FL_LIST_MASK)
- conn_delete_from_tree(qcc->conn);
+ conn_delete_from_tree(qcc->conn, tid);
else if (qcc->conn->flags & CO_FL_SESS_IDLE)
session_unown_conn(qcc->conn->owner, qcc->conn);
conn_in_list = 0;
}
else {
- conn_delete_from_tree(conn);
+ conn_delete_from_tree(conn, tid);
}
}
/* connections in error must be removed from the idle lists */
if (conn->flags & CO_FL_LIST_MASK) {
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
- conn_delete_from_tree(conn);
+ conn_delete_from_tree(conn, tid);
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
}
}
* to steal it from us.
*/
if (spop_conn->conn->flags & CO_FL_LIST_MASK)
- conn_delete_from_tree(spop_conn->conn);
+ conn_delete_from_tree(spop_conn->conn, tid);
else if (spop_conn->conn->flags & CO_FL_SESS_IDLE)
session_detach_idle_conn(spop_conn->conn->owner, spop_conn->conn);
break;
conn = LIST_ELEM(srv->per_thr[thr].idle_conn_list.n, struct connection *, idle_list);
- conn_delete_from_tree(conn);
+ conn_delete_from_tree(conn, thr);
MT_LIST_APPEND(&idle_conns[thr].toremove_conns, &conn->toremove_list);
i++;
}
/* Remove the connection from any tree (safe, idle or available) */
if (conn->hash_node) {
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
- conn_delete_from_tree(conn);
+ conn_delete_from_tree(conn, tid);
conn->flags &= ~CO_FL_LIST_MASK;
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
}
_HA_ATOMIC_DEC(&srv->curr_used_conns);
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
- conn_delete_from_tree(conn);
+ conn_delete_from_tree(conn, tid);
if (is_safe) {
conn->flags = (conn->flags & ~CO_FL_LIST_MASK) | CO_FL_SAFE_LIST;
if (conn->ctrl->ctrl_close)
conn->ctrl->ctrl_close(conn);
- conn_delete_from_tree(conn);
+ conn_delete_from_tree(conn, i);
}
}
}
conn_in_list = 0;
}
else {
- conn_delete_from_tree(conn);
+ conn_delete_from_tree(conn, tid);
}
}