* - end of HANDSHAKE with CONNECTED set
* - regular data layer activity
*
+ * One tricky case is the wake up on read0 or error on an idle
+ * backend connection, that can happen on a connection that is still
+ * polled while at the same moment another thread is about to perform a
+ * takeover. The solution against this is to remove the connection from
+ * the idle list if it was in it, and possibly reinsert it at the end
+ * if the connection remains valid. The cost is non-null (locked tree
+ * removal) but remains low given that this is extremely rarely called.
+ * In any case it's guaranteed by the FD's thread_mask that we're
+ * called from the same thread the connection is queued in.
+ *
* Note that the wake callback is allowed to release the connection and
* the fd (and return < 0 in this case).
*/
((conn->flags ^ old_flags) & CO_FL_NOTIFY_DONE) ||
((old_flags & CO_FL_WAIT_XPRT) && !(conn->flags & CO_FL_WAIT_XPRT))) &&
conn->mux && conn->mux->wake) {
+ uint conn_in_list = conn->flags & CO_FL_LIST_MASK;
+ struct server *srv = objt_server(conn->target);
+
+ if (conn_in_list) {
+ HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
+ conn_delete_from_tree(&conn->hash_node->node);
+ HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
+ }
+
ret = conn->mux->wake(conn);
if (ret < 0)
goto done;
+
+ if (conn_in_list) {
+ struct eb_root *root = (conn_in_list == CO_FL_SAFE_LIST) ?
+ &srv->per_thr[tid].safe_conns :
+ &srv->per_thr[tid].idle_conns;
+
+ HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
+ ebmb_insert(root, &conn->hash_node->node, sizeof(conn->hash_node->hash));
+ HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
+ }
}
done:
return ret;