performed. This has an impact on the kernel's memory footprint, so this must
not be changed if impacts are not understood.
+tune.pool-low-fd-ratio <number>
+ This setting sets the max number of file descriptors (in percentage) used by
+ haproxy globally against the maximum number of file descriptors haproxy can
+ use before we stop putting connection into the idle pool for reuse. The
+ default is 20.
+
+tune.pool-high-fd-ratio <number>
+ This setting sets the max number of file descriptors (in percentage) used by
+ haproxy globally against the maximum number of file descriptors haproxy can
+ use before we start killing idle connections when we can't reuse a connection
+ and we have to create a new one. The default is 25 (one quarter of the file
+ descriptor will mean that roughly half of the maximum front connections can
+ keep an idle connection behind, anything beyond this probably doesn't make
+ much sense in the general case when targetting connection reuse).
+
tune.rcvbuf.client <number>
tune.rcvbuf.server <number>
Forces the kernel socket receive buffer size on the client or the server side
(srv->max_idle_conns == -1 || srv->max_idle_conns > srv->curr_idle_conns) &&
!(conn->flags & CO_FL_PRIVATE) &&
((srv->proxy->options & PR_O_REUSE_MASK) != PR_O_REUSE_NEVR) &&
- !conn->mux->used_streams(conn) && conn->mux->avail_streams(conn)) {
+ !conn->mux->used_streams(conn) && conn->mux->avail_streams(conn) &&
+ ha_used_fds < global.tune.pool_low_count) {
int retadd;
retadd = _HA_ATOMIC_ADD(&srv->curr_idle_conns, 1);
int pattern_cache; /* max number of entries in the pattern cache. */
int sslcachesize; /* SSL cache size in session, defaults to 20000 */
int comp_maxlevel; /* max HTTP compression level */
+ int pool_low_ratio; /* max ratio of FDs used before we stop using new idle connections */
+ int pool_high_ratio; /* max ratio of FDs used before we start killing idle connections when creating new connections */
+ int pool_low_count; /* max number of opened fd before we stop using new idle connections */
+ int pool_high_count; /* max number of opened fd before we start killing idle connections when creating new connections */
unsigned short idle_timer; /* how long before an empty buffer is considered idle (ms) */
} tune;
struct {
reuse = 0;
}
}
+ if ((!reuse || (srv_conn && !(srv_conn->flags & CO_FL_CONNECTED)))
+ && ha_used_fds > global.tune.pool_high_count) {
+ struct connection *tokill_conn;
+
+ /* We can't reuse a connection, and e have more FDs than deemd
+ * acceptable, attempt to kill an idling connection
+ */
+ /* First, try from our own idle list */
+ tokill_conn = LIST_POP_LOCKED(&srv->idle_orphan_conns[tid],
+ struct connection *, list);
+ if (tokill_conn)
+ tokill_conn->mux->destroy(tokill_conn->ctx);
+ /* If not, iterate over other thread's idling pool, and try to grab one */
+ else {
+ int i;
+
+ for (i = 0; i < global.nbthread; i++) {
+ if (i == tid)
+ continue;
+ tokill_conn = LIST_POP_LOCKED(&srv->idle_orphan_conns[i],
+ struct connection *, list);
+ if (tokill_conn) {
+ /* We got one, put it into the concerned thread's to kill list, and wake it's kill task */
+
+ LIST_ADDQ_LOCKED(&toremove_connections[i],
+ &tokill_conn->list);
+ task_wakeup(idle_conn_cleanup[i], TASK_WOKEN_OTHER);
+ break;
+ }
+ }
+ }
+
+ }
/* If we're really reusing the connection, remove it from the orphan
* list and add it back to the idle list.
*/
.chksize = (BUFSIZE + 2*sizeof(void *) - 1) & -(2*sizeof(void *)),
.reserved_bufs = RESERVED_BUFS,
.pattern_cache = DEFAULT_PAT_LRU_SIZE,
+ .pool_low_ratio = 20,
+ .pool_high_ratio = 25,
#ifdef USE_OPENSSL
.sslcachesize = SSLCACHESIZE,
#endif
global.maxsock += global.maxconn * sides * global.ssl_used_async_engines;
}
+ /* update connection pool thresholds */
+ global.tune.pool_low_count = ((long long)global.maxsock * global.tune.pool_low_ratio + 99) / 100;
+ global.tune.pool_high_count = ((long long)global.maxsock * global.tune.pool_high_ratio + 99) / 100;
+
proxy_adjust_all_maxconn();
if (global.tune.maxpollevents <= 0)
return task;
}
+
+/* config parser for global "tune.pool-{low,high}-fd-ratio" */
+static int cfg_parse_pool_fd_ratio(char **args, int section_type, struct proxy *curpx,
+ struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ int arg = -1;
+
+ if (too_many_args(1, args, err, NULL))
+ return -1;
+
+ if (*(args[1]) != 0)
+ arg = atoi(args[1]);
+
+ if (arg < 0 || arg > 100) {
+ memprintf(err, "'%s' expects an integer argument between 0 and 100.", args[0]);
+ return -1;
+ }
+
+ if (args[0][10] == 'h')
+ global.tune.pool_high_ratio = arg;
+ else
+ global.tune.pool_low_ratio = arg;
+ return 0;
+}
+
+/* config keyword parsers */
+static struct cfg_kw_list cfg_kws = {ILH, {
+ { CFG_GLOBAL, "tune.pool-high-fd-ratio", cfg_parse_pool_fd_ratio },
+ { CFG_GLOBAL, "tune.pool-low-fd-ratio", cfg_parse_pool_fd_ratio },
+ { 0, NULL, NULL }
+}};
+
+INITCALL1(STG_REGISTER, cfg_register_keywords, &cfg_kws);
+
/*
* Local variables:
* c-indent-level: 8