limit too high.
tune.maxaccept <number>
- Sets the maximum number of consecutive accepts that a process may perform on
- a single wake up. High values give higher priority to high connection rates,
- while lower values give higher priority to already established connections.
- This value is limited to 100 by default in single process mode. However, in
- multi-process mode (nbproc > 1), it defaults to 8 so that when one process
- wakes up, it does not take all incoming connections for itself and leaves a
- part of them to other processes. Setting this value to -1 completely disables
- the limitation. It should normally not be needed to tweak this value.
+ Sets the maximum number of consecutive connections a process may accept in a
+ row before switching to other work. In single process mode, higher numbers
+ give better performance at high connection rates. However in multi-process
+ modes, keeping a bit of fairness between processes generally is better to
+ increase performance. This value applies individually to each listener, so
+ that the number of processes a listener is bound to is taken into account.
+ This value defaults to 64. In multi-process mode, it is divided by twice
+ the number of processes the listener is bound to. Setting this value to -1
+ completely disables the limitation. It should normally not be needed to tweak
+ this value.
tune.maxpollevents <number>
Sets the maximum amount of events that can be processed at once in a call to
int nbconn; /* current number of connections on this listener */
int maxconn; /* maximum connections allowed on this listener */
unsigned int backlog; /* if set, listen backlog */
+ unsigned int maxaccept; /* if set, max number of connections accepted at once */
struct list proto_list; /* list in the protocol header */
int (*accept)(struct listener *l, int fd, struct sockaddr_storage *addr); /* upper layer's accept() */
struct task * (*handler)(struct task *t); /* protocol handler. It is a task */
struct tcp_rule *trule;
struct listener *listener;
unsigned int next_id;
+ int nbproc;
if (curproxy->uuid < 0) {
/* proxy ID not set, use automatic numbering with first
continue;
}
+ /* number of processes this proxy is bound to */
+ nbproc = curproxy->bind_proc ? popcount(curproxy->bind_proc) : global.nbproc;
+
switch (curproxy->mode) {
case PR_MODE_HEALTH:
cfgerr += proxy_cfg_ensure_no_http(curproxy);
listener->maxconn = curproxy->maxconn;
if (!listener->backlog)
listener->backlog = curproxy->backlog;
+ if (!listener->maxaccept)
+ listener->maxaccept = global.tune.maxaccept ? global.tune.maxaccept : 64;
+
+ /* we want to have an optimal behaviour on single process mode to
+ * maximize the work at once, but in multi-process we want to keep
+ * some fairness between processes, so we target half of the max
+ * number of events to be balanced over all the processes the proxy
+ * is bound to. Rememeber that maxaccept = -1 must be kept as it is
+ * used to disable the limit.
+ */
+ if (listener->maxaccept > 0) {
+ if (nbproc > 1)
+ listener->maxaccept = (listener->maxaccept + 1) / 2;
+ listener->maxaccept = (listener->maxaccept + nbproc - 1) / nbproc;
+ }
+
listener->timeout = &curproxy->timeout.client;
listener->accept = session_accept;
listener->handler = process_session;
if (global.tune.maxpollevents <= 0)
global.tune.maxpollevents = MAX_POLL_EVENTS;
- if (global.tune.maxaccept == 0) {
- /* Note: we should not try to accept too many connections at once,
- * because past one point we're significantly reducing the cache
- * efficiency and the highest session rate significantly drops.
- * Values between 15 and 35 seem fine on a Core i5 with 4M L3 cache.
- */
- if (global.nbproc > 1)
- global.tune.maxaccept = 8; /* leave some conns to other processes */
- else
- global.tune.maxaccept = 32; /* accept more incoming conns at once */
- }
-
if (global.tune.recv_enough == 0)
global.tune.recv_enough = MIN_RECV_AT_ONCE_ENOUGH;
{
struct listener *l = fdtab[fd].owner;
struct proxy *p = l->frontend;
- int max_accept = global.tune.maxaccept;
+ int max_accept = l->maxaccept;
int cfd;
int ret;