/* Default to only allow L4 retries */
p->retry_type = PR_RE_CONN_FAILED;
- HA_SPIN_INIT(&p->lock);
+ HA_RWLOCK_INIT(&p->lock);
}
/* to be called under the proxy lock after stopping some listeners. This will
{
struct listener *l;
- HA_SPIN_LOCK(PROXY_LOCK, &p->lock);
+ HA_RWLOCK_WRLOCK(PROXY_LOCK, &p->lock);
list_for_each_entry(l, &p->conf.listeners, by_fe)
stop_listener(l, 1, 0, 0);
p->disabled = 1;
}
- HA_SPIN_UNLOCK(PROXY_LOCK, &p->lock);
+ HA_RWLOCK_WRUNLOCK(PROXY_LOCK, &p->lock);
}
/* This function resumes listening on the specified proxy. It scans all of its
/* note: we still lock since we have to be certain that nobody is
* dumping the output while we free.
*/
- HA_SPIN_LOCK(PROXY_LOCK, &proxy->lock);
+ HA_RWLOCK_WRLOCK(PROXY_LOCK, &proxy->lock);
if (is_back) {
es = HA_ATOMIC_XCHG(&proxy->invalid_rep, es);
} else {
es = HA_ATOMIC_XCHG(&proxy->invalid_req, es);
}
free(es);
- HA_SPIN_UNLOCK(PROXY_LOCK, &proxy->lock);
+ HA_RWLOCK_WRUNLOCK(PROXY_LOCK, &proxy->lock);
}
/* Configure all proxies which lack a maxconn setting to use the global one by
/* Note: this lock is to make sure this doesn't change while another
* thread is in srv_set_dyncookie().
*/
- HA_SPIN_LOCK(PROXY_LOCK, &px->lock);
+ HA_RWLOCK_WRLOCK(PROXY_LOCK, &px->lock);
px->ck_opts |= PR_CK_DYNAMIC;
- HA_SPIN_UNLOCK(PROXY_LOCK, &px->lock);
+ HA_RWLOCK_WRUNLOCK(PROXY_LOCK, &px->lock);
for (s = px->srv; s != NULL; s = s->next) {
HA_SPIN_LOCK(SERVER_LOCK, &s->lock);
/* Note: this lock is to make sure this doesn't change while another
* thread is in srv_set_dyncookie().
*/
- HA_SPIN_LOCK(PROXY_LOCK, &px->lock);
+ HA_RWLOCK_WRLOCK(PROXY_LOCK, &px->lock);
px->ck_opts &= ~PR_CK_DYNAMIC;
- HA_SPIN_UNLOCK(PROXY_LOCK, &px->lock);
+ HA_RWLOCK_WRUNLOCK(PROXY_LOCK, &px->lock);
for (s = px->srv; s != NULL; s = s->next) {
HA_SPIN_LOCK(SERVER_LOCK, &s->lock);
/* Note: this lock is to make sure this doesn't change while another
* thread is in srv_set_dyncookie().
*/
- HA_SPIN_LOCK(PROXY_LOCK, &px->lock);
+ HA_RWLOCK_WRLOCK(PROXY_LOCK, &px->lock);
free(px->dyncookie_key);
px->dyncookie_key = newkey;
- HA_SPIN_UNLOCK(PROXY_LOCK, &px->lock);
+ HA_RWLOCK_WRUNLOCK(PROXY_LOCK, &px->lock);
for (s = px->srv; s != NULL; s = s->next) {
HA_SPIN_LOCK(SERVER_LOCK, &s->lock);
/* OK, the value is fine, so we assign it to the proxy and to all of
* its listeners. The blocked ones will be dequeued.
*/
- HA_SPIN_LOCK(PROXY_LOCK, &px->lock);
+ HA_RWLOCK_WRLOCK(PROXY_LOCK, &px->lock);
px->maxconn = v;
list_for_each_entry(l, &px->conf.listeners, by_fe) {
if (px->maxconn > px->feconn)
dequeue_proxy_listeners(px);
- HA_SPIN_UNLOCK(PROXY_LOCK, &px->lock);
+ HA_RWLOCK_WRUNLOCK(PROXY_LOCK, &px->lock);
return 1;
}
if (!px->li_ready)
return cli_msg(appctx, LOG_NOTICE, "All sockets are already disabled.\n");
- HA_SPIN_LOCK(PROXY_LOCK, &px->lock);
+ HA_RWLOCK_WRLOCK(PROXY_LOCK, &px->lock);
ret = pause_proxy(px);
- HA_SPIN_UNLOCK(PROXY_LOCK, &px->lock);
+ HA_RWLOCK_WRUNLOCK(PROXY_LOCK, &px->lock);
if (!ret)
return cli_err(appctx, "Failed to pause frontend, check logs for precise cause.\n");
if (px->li_ready == px->li_all)
return cli_msg(appctx, LOG_NOTICE, "All sockets are already enabled.\n");
- HA_SPIN_LOCK(PROXY_LOCK, &px->lock);
+ HA_RWLOCK_WRLOCK(PROXY_LOCK, &px->lock);
ret = resume_proxy(px);
- HA_SPIN_UNLOCK(PROXY_LOCK, &px->lock);
+ HA_RWLOCK_WRUNLOCK(PROXY_LOCK, &px->lock);
if (!ret)
return cli_err(appctx, "Failed to resume frontend, check logs for precise cause (port conflict?).\n");
while (appctx->ctx.errors.px) {
struct error_snapshot *es;
- HA_SPIN_LOCK(PROXY_LOCK, &appctx->ctx.errors.px->lock);
+ HA_RWLOCK_WRLOCK(PROXY_LOCK, &appctx->ctx.errors.px->lock);
if ((appctx->ctx.errors.flag & 1) == 0) {
es = appctx->ctx.errors.px->invalid_req;
appctx->ctx.errors.bol = newline;
};
next:
- HA_SPIN_UNLOCK(PROXY_LOCK, &appctx->ctx.errors.px->lock);
+ HA_RWLOCK_WRUNLOCK(PROXY_LOCK, &appctx->ctx.errors.px->lock);
appctx->ctx.errors.bol = 0;
appctx->ctx.errors.ptr = -1;
appctx->ctx.errors.flag ^= 1;
return 1;
cant_send_unlock:
- HA_SPIN_UNLOCK(PROXY_LOCK, &appctx->ctx.errors.px->lock);
+ HA_RWLOCK_WRUNLOCK(PROXY_LOCK, &appctx->ctx.errors.px->lock);
cant_send:
si_rx_room_blk(si);
return 0;
if (p->srv)
HA_SPIN_LOCK(SERVER_LOCK, &p->srv->lock);
else
- HA_SPIN_LOCK(PROXY_LOCK, &p->px->lock);
+ HA_RWLOCK_WRLOCK(PROXY_LOCK, &p->px->lock);
}
/* Unlocks the queue the pendconn element belongs to. This relies on both p->px
if (p->srv)
HA_SPIN_UNLOCK(SERVER_LOCK, &p->srv->lock);
else
- HA_SPIN_UNLOCK(PROXY_LOCK, &p->px->lock);
+ HA_RWLOCK_WRUNLOCK(PROXY_LOCK, &p->px->lock);
}
/* Removes the pendconn from the server/proxy queue. At this stage, the
int maxconn;
HA_SPIN_LOCK(SERVER_LOCK, &s->lock);
- HA_SPIN_LOCK(PROXY_LOCK, &p->lock);
+ HA_RWLOCK_WRLOCK(PROXY_LOCK, &p->lock);
maxconn = srv_dynamic_maxconn(s);
while (s->served < maxconn) {
int ret = pendconn_process_next_strm(s, p);
if (!ret)
break;
}
- HA_SPIN_UNLOCK(PROXY_LOCK, &p->lock);
+ HA_RWLOCK_WRUNLOCK(PROXY_LOCK, &p->lock);
HA_SPIN_UNLOCK(SERVER_LOCK, &s->lock);
}
((s != s->proxy->lbprm.fbck) && !(s->proxy->options & PR_O_USE_ALL_BK))))
return 0;
- HA_SPIN_LOCK(PROXY_LOCK, &s->proxy->lock);
+ HA_RWLOCK_WRLOCK(PROXY_LOCK, &s->proxy->lock);
maxconn = srv_dynamic_maxconn(s);
while ((p = pendconn_first(&s->proxy->pendconns))) {
if (s->maxconn && s->served + xferred >= maxconn)
task_wakeup(p->strm->task, TASK_WOKEN_RES);
xferred++;
}
- HA_SPIN_UNLOCK(PROXY_LOCK, &s->proxy->lock);
+ HA_RWLOCK_WRUNLOCK(PROXY_LOCK, &s->proxy->lock);
return xferred;
}