struct eb32_node node;
};
+/* Each server will have one occurrence of this structure per thread */
+struct srv_per_thread {
+ struct eb_root idle_conns; /* Shareable idle connections */
+ struct eb_root safe_conns; /* Safe idle connections */
+ struct eb_root avail_conns; /* Connections in use, but with still new streams available */
+};
+
struct proxy;
struct server {
enum obj_type obj_type; /* object type == OBJ_TYPE_SERVER */
struct eb_root pendconns; /* pending connections */
struct mt_list actconns; /* active connections (used by "shutdown server sessions") */
- struct eb_root *idle_conns_tree; /* shareable idle connections*/
- struct eb_root *safe_conns_tree; /* safe idle connections */
- struct eb_root *available_conns_tree; /* Connection in used, but with still new streams available */
+ struct srv_per_thread *per_thr; /* array of per-thread stuff such as connections lists, may be null */
unsigned int pool_purge_delay; /* Delay before starting to purge the idle conns pool */
unsigned int low_idle_conns; /* min idle connection count to start picking from other threads */
unsigned int max_idle_conns; /* Max number of connection allowed in the orphan connections list */
((srv->proxy->options & PR_O_REUSE_MASK) != PR_O_REUSE_NEVR) &&
ha_used_fds < global.tune.pool_high_count &&
(srv->max_idle_conns == -1 || srv->max_idle_conns > srv->curr_idle_conns) &&
- ((eb_is_empty(&srv->safe_conns_tree[tid]) &&
- (is_safe || eb_is_empty(&srv->idle_conns_tree[tid]))) ||
+ ((eb_is_empty(&srv->per_thr[tid].safe_conns) &&
+ (is_safe || eb_is_empty(&srv->per_thr[tid].idle_conns))) ||
(ha_used_fds < global.tune.pool_low_count &&
(srv->curr_used_conns + srv->curr_idle_conns <=
MAX(srv->curr_used_conns, srv->est_need_conns) + srv->low_idle_conns))) &&
if (is_safe) {
conn->flags = (conn->flags & ~CO_FL_LIST_MASK) | CO_FL_SAFE_LIST;
- ebmb_insert(&srv->safe_conns_tree[tid], &conn->hash_node->node, sizeof(conn->hash_node->hash));
+ ebmb_insert(&srv->per_thr[tid].safe_conns, &conn->hash_node->node, sizeof(conn->hash_node->hash));
_HA_ATOMIC_ADD(&srv->curr_safe_nb, 1);
} else {
conn->flags = (conn->flags & ~CO_FL_LIST_MASK) | CO_FL_IDLE_LIST;
- ebmb_insert(&srv->idle_conns_tree[tid], &conn->hash_node->node, sizeof(conn->hash_node->hash));
+ ebmb_insert(&srv->per_thr[tid].idle_conns, &conn->hash_node->node, sizeof(conn->hash_node->hash));
_HA_ATOMIC_ADD(&srv->curr_idle_nb, 1);
}
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
*/
static struct connection *conn_backend_get(struct stream *s, struct server *srv, int is_safe, int64_t hash)
{
- struct eb_root *tree = is_safe ? srv->safe_conns_tree : srv->idle_conns_tree;
struct connection *conn = NULL;
int i; // thread number
int found = 0;
*/
i = tid;
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
- conn = srv_lookup_conn(&tree[tid], hash);
+ conn = srv_lookup_conn(is_safe ? &srv->per_thr[tid].safe_conns : &srv->per_thr[tid].idle_conns, hash);
if (conn)
conn_delete_from_tree(&conn->hash_node->node);
* the safe list.
*/
if (!conn && !is_safe && srv->curr_safe_nb > 0) {
- conn = srv_lookup_conn(&srv->safe_conns_tree[tid], hash);
+ conn = srv_lookup_conn(&srv->per_thr[tid].safe_conns, hash);
if (conn) {
conn_delete_from_tree(&conn->hash_node->node);
is_safe = 1;
- tree = srv->safe_conns_tree;
}
}
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
if (HA_SPIN_TRYLOCK(IDLE_CONNS_LOCK, &idle_conns[i].idle_conns_lock) != 0)
continue;
- conn = srv_lookup_conn(&tree[i], hash);
+ conn = srv_lookup_conn(is_safe ? &srv->per_thr[i].safe_conns : &srv->per_thr[i].idle_conns, hash);
while (conn) {
if (conn->mux->takeover && conn->mux->takeover(conn, i) == 0) {
conn_delete_from_tree(&conn->hash_node->node);
}
if (!found && !is_safe && srv->curr_safe_nb > 0) {
- conn = srv_lookup_conn(&srv->safe_conns_tree[i], hash);
+ conn = srv_lookup_conn(&srv->per_thr[i].safe_conns, hash);
while (conn) {
if (conn->mux->takeover && conn->mux->takeover(conn, i) == 0) {
conn_delete_from_tree(&conn->hash_node->node);
_HA_ATOMIC_ADD(&activity[tid].fd_takeover, 1);
found = 1;
is_safe = 1;
- tree = srv->safe_conns_tree;
break;
}
session_add_conn(s->sess, conn, conn->target);
}
else {
- ebmb_insert(&srv->available_conns_tree[tid],
+ ebmb_insert(&srv->per_thr[tid].avail_conns,
&conn->hash_node->node,
sizeof(conn->hash_node->hash));
}
*/
si_release_endpoint(&s->si[1]);
- /* do not reuse if mode is not http or if avail list is not allocated */
- if ((s->be->mode != PR_MODE_HTTP) || (srv && !srv->available_conns_tree))
+ /* do not reuse if mode is not http */
+ if (s->be->mode != PR_MODE_HTTP)
goto skip_reuse;
/* first, search for a matching connection in the session's idle conns */
* Idle conns are necessarily looked up on the same thread so
* that there is no concurrency issues.
*/
- if (!eb_is_empty(&srv->available_conns_tree[tid])) {
- srv_conn = srv_lookup_conn(&srv->available_conns_tree[tid], hash);
+ if (!eb_is_empty(&srv->per_thr[tid].avail_conns)) {
+ srv_conn = srv_lookup_conn(&srv->per_thr[tid].avail_conns, hash);
if (srv_conn)
reuse = 1;
}
* is OK.
*/
- if (ha_used_fds > global.tune.pool_high_count && srv && srv->idle_conns_tree) {
+ if (ha_used_fds > global.tune.pool_high_count && srv) {
struct connection *tokill_conn = NULL;
struct conn_hash_node *conn_node = NULL;
struct ebmb_node *node = NULL;
*/
/* First, try from our own idle list */
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
- node = ebmb_first(&srv->idle_conns_tree[tid]);
+ node = ebmb_first(&srv->per_thr[tid].idle_conns);
if (node) {
conn_node = ebmb_entry(node, struct conn_hash_node, node);
tokill_conn = conn_node->conn;
ALREADY_CHECKED(i);
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[i].idle_conns_lock);
- node = ebmb_first(&srv->idle_conns_tree[i]);
+ node = ebmb_first(&srv->per_thr[i].idle_conns);
if (node) {
conn_node = ebmb_entry(node, struct conn_hash_node, node);
tokill_conn = conn_node->conn;
}
if (!tokill_conn) {
- node = ebmb_first(&srv->safe_conns_tree[i]);
+ node = ebmb_first(&srv->per_thr[i].safe_conns);
if (node) {
conn_node = ebmb_entry(node, struct conn_hash_node, node);
tokill_conn = conn_node->conn;
if (srv && reuse_mode == PR_O_REUSE_ALWS &&
!(srv_conn->flags & CO_FL_PRIVATE) &&
srv_conn->mux->avail_streams(srv_conn) > 0) {
- ebmb_insert(&srv->available_conns_tree[tid], &srv_conn->hash_node->node, sizeof(srv_conn->hash_node->hash));
+ ebmb_insert(&srv->per_thr[tid].avail_conns, &srv_conn->hash_node->node, sizeof(srv_conn->hash_node->hash));
}
else if (srv_conn->flags & CO_FL_PRIVATE ||
(reuse_mode == PR_O_REUSE_SAFE &&
/* initialize idle conns lists */
int i;
- newsrv->available_conns_tree = calloc(global.nbthread, sizeof(*newsrv->available_conns_tree));
-
- if (!newsrv->available_conns_tree) {
- ha_alert("parsing [%s:%d] : failed to allocate idle connections for server '%s'.\n",
- newsrv->conf.file, newsrv->conf.line, newsrv->id);
+ newsrv->per_thr = calloc(global.nbthread, sizeof(*newsrv->per_thr));
+ if (!newsrv->per_thr) {
+ ha_alert("parsing [%s:%d] : failed to allocate per-thread lists for server '%s'.\n",
+ newsrv->conf.file, newsrv->conf.line, newsrv->id);
cfgerr++;
continue;
}
- for (i = 0; i < global.nbthread; i++)
- newsrv->available_conns_tree[i] = EB_ROOT;
+ for (i = 0; i < global.nbthread; i++) {
+ newsrv->per_thr[i].idle_conns = EB_ROOT;
+ newsrv->per_thr[i].safe_conns = EB_ROOT;
+ newsrv->per_thr[i].avail_conns = EB_ROOT;
+ }
if (newsrv->max_idle_conns != 0) {
if (idle_conn_task == NULL) {
}
}
- newsrv->idle_conns_tree = calloc((unsigned short)global.nbthread, sizeof(*newsrv->idle_conns_tree));
- if (!newsrv->idle_conns_tree) {
- ha_alert("parsing [%s:%d] : failed to allocate idle connections for server '%s'.\n",
- newsrv->conf.file, newsrv->conf.line, newsrv->id);
- cfgerr++;
- continue;
- }
-
- for (i = 0; i < global.nbthread; i++)
- newsrv->idle_conns_tree[i] = EB_ROOT;
-
- newsrv->safe_conns_tree = calloc(global.nbthread, sizeof(*newsrv->safe_conns_tree));
- if (!newsrv->safe_conns_tree) {
- ha_alert("parsing [%s:%d] : failed to allocate idle connections for server '%s'.\n",
- newsrv->conf.file, newsrv->conf.line, newsrv->id);
- cfgerr++;
- continue;
- }
-
- for (i = 0; i < global.nbthread; i++)
- newsrv->safe_conns_tree[i] = EB_ROOT;
-
newsrv->curr_idle_thr = calloc(global.nbthread, sizeof(*newsrv->curr_idle_thr));
if (!newsrv->curr_idle_thr)
goto err;
*/
if (srv && ((srv->proxy->options & PR_O_REUSE_MASK) == PR_O_REUSE_ALWS) &&
!(conn->flags & CO_FL_PRIVATE) && conn->mux->avail_streams(conn) > 0)
- ebmb_insert(&srv->available_conns_tree[tid], &conn->hash_node->node, sizeof(conn->hash_node->hash));
+ ebmb_insert(&srv->per_thr[tid].avail_conns, &conn->hash_node->node, sizeof(conn->hash_node->hash));
else if (conn->flags & CO_FL_PRIVATE) {
/* If it fail now, the same will be done in mux->detach() callback */
session_add_conn(sess, conn, conn->target);
free(s->hostname);
free(s->hostname_dn);
free((char*)s->conf.file);
- free(s->idle_conns_tree);
- free(s->safe_conns_tree);
- free(s->available_conns_tree);
+ free(s->per_thr);
free(s->curr_idle_thr);
free(s->resolvers_id);
free(s->addr_node.key);
socket_tcp.obj_type = OBJ_TYPE_SERVER;
MT_LIST_INIT(&socket_tcp.actconns);
socket_tcp.pendconns = EB_ROOT;
- socket_tcp.idle_conns_tree = NULL;
- socket_tcp.safe_conns_tree = NULL;
LIST_ADD(&servers_list, &socket_tcp.global_list);
socket_tcp.next_state = SRV_ST_RUNNING; /* early server setup */
socket_tcp.last_change = 0;
socket_ssl.obj_type = OBJ_TYPE_SERVER;
MT_LIST_INIT(&socket_ssl.actconns);
socket_ssl.pendconns = EB_ROOT;
- socket_ssl.idle_conns_tree = NULL;
- socket_ssl.safe_conns_tree = NULL;
LIST_ADD(&servers_list, &socket_ssl.global_list);
socket_ssl.next_state = SRV_ST_RUNNING; /* early server setup */
socket_ssl.last_change = 0;
if (hlua_states[thr])
lua_close(hlua_states[thr]);
}
+
+ ha_free(&socket_tcp.per_thr);
ha_free((char**)&socket_tcp.conf.file);
+
#ifdef USE_OPENSSL
+ ha_free(&socket_ssl.per_thr);
ha_free((char**)&socket_ssl.conf.file);
#endif
}
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
if (conn_in_list == CO_FL_SAFE_LIST)
- ebmb_insert(&srv->safe_conns_tree[tid], &conn->hash_node->node, sizeof(conn->hash_node->hash));
+ ebmb_insert(&srv->per_thr[tid].safe_conns, &conn->hash_node->node, sizeof(conn->hash_node->hash));
else
- ebmb_insert(&srv->idle_conns_tree[tid], &conn->hash_node->node, sizeof(conn->hash_node->hash));
+ ebmb_insert(&srv->per_thr[tid].idle_conns, &conn->hash_node->node, sizeof(conn->hash_node->hash));
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
}
return NULL;
else if (!fconn->conn->hash_node->node.node.leaf_p &&
fcgi_avail_streams(fconn->conn) > 0 && objt_server(fconn->conn->target) &&
!LIST_ADDED(&fconn->conn->session_list)) {
- ebmb_insert(&__objt_server(fconn->conn->target)->available_conns_tree[tid],
+ ebmb_insert(&__objt_server(fconn->conn->target)->per_thr[tid].avail_conns,
&fconn->conn->hash_node->node,
sizeof(fconn->conn->hash_node->hash));
}
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
if (conn_in_list == CO_FL_SAFE_LIST)
- ebmb_insert(&srv->safe_conns_tree[tid], &conn->hash_node->node, sizeof(conn->hash_node->hash));
+ ebmb_insert(&srv->per_thr[tid].safe_conns, &conn->hash_node->node, sizeof(conn->hash_node->hash));
else
- ebmb_insert(&srv->idle_conns_tree[tid], &conn->hash_node->node, sizeof(conn->hash_node->hash));
+ ebmb_insert(&srv->per_thr[tid].idle_conns, &conn->hash_node->node, sizeof(conn->hash_node->hash));
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
}
return NULL;
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
if (conn_in_list == CO_FL_SAFE_LIST)
- ebmb_insert(&srv->safe_conns_tree[tid], &conn->hash_node->node, sizeof(conn->hash_node->hash));
+ ebmb_insert(&srv->per_thr[tid].safe_conns, &conn->hash_node->node, sizeof(conn->hash_node->hash));
else
- ebmb_insert(&srv->idle_conns_tree[tid], &conn->hash_node->node, sizeof(conn->hash_node->hash));
+ ebmb_insert(&srv->per_thr[tid].idle_conns, &conn->hash_node->node, sizeof(conn->hash_node->hash));
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
}
else if (!h2c->conn->hash_node->node.node.leaf_p &&
h2_avail_streams(h2c->conn) > 0 && objt_server(h2c->conn->target) &&
!LIST_ADDED(&h2c->conn->session_list)) {
- ebmb_insert(&__objt_server(h2c->conn->target)->available_conns_tree[tid],
+ ebmb_insert(&__objt_server(h2c->conn->target)->per_thr[tid].avail_conns,
&h2c->conn->hash_node->node,
sizeof(h2c->conn->hash_node->hash));
}
for (i = tid;;) {
did_remove = 0;
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[i].idle_conns_lock);
- if (srv_migrate_conns_to_remove(&srv->idle_conns_tree[i], &idle_conns[i].toremove_conns, -1) > 0)
+ if (srv_migrate_conns_to_remove(&srv->per_thr[i].idle_conns, &idle_conns[i].toremove_conns, -1) > 0)
did_remove = 1;
- if (srv_migrate_conns_to_remove(&srv->safe_conns_tree[i], &idle_conns[i].toremove_conns, -1) > 0)
+ if (srv_migrate_conns_to_remove(&srv->per_thr[i].safe_conns, &idle_conns[i].toremove_conns, -1) > 0)
did_remove = 1;
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[i].idle_conns_lock);
if (did_remove)
curr_idle + 1;
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[i].idle_conns_lock);
- j = srv_migrate_conns_to_remove(&srv->idle_conns_tree[i], &idle_conns[i].toremove_conns, max_conn);
+ j = srv_migrate_conns_to_remove(&srv->per_thr[i].idle_conns, &idle_conns[i].toremove_conns, max_conn);
if (j > 0)
did_remove = 1;
if (max_conn - j > 0 &&
- srv_migrate_conns_to_remove(&srv->safe_conns_tree[i], &idle_conns[i].toremove_conns, max_conn - j) > 0)
+ srv_migrate_conns_to_remove(&srv->per_thr[i].safe_conns, &idle_conns[i].toremove_conns, max_conn - j) > 0)
did_remove = 1;
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[i].idle_conns_lock);
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
if (conn_in_list == CO_FL_SAFE_LIST)
- ebmb_insert(&srv->safe_conns_tree[tid], &conn->hash_node->node, sizeof(conn->hash_node->hash));
+ ebmb_insert(&srv->per_thr[tid].safe_conns, &conn->hash_node->node, sizeof(conn->hash_node->hash));
else
- ebmb_insert(&srv->idle_conns_tree[tid], &conn->hash_node->node, sizeof(conn->hash_node->hash));
+ ebmb_insert(&srv->per_thr[tid].idle_conns, &conn->hash_node->node, sizeof(conn->hash_node->hash));
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
}
return NULL;