int arg_opt2; /* extra option 2 for the LB algo (algo-specific) */
int arg_opt3; /* extra option 3 for the LB algo (algo-specific) */
struct server *fbck; /* first backup server when !PR_O_USE_ALL_BK, or NULL */
- __decl_thread(HA_SPINLOCK_T lock);
+ __decl_thread(HA_RWLOCK_T lock);
/* Call backs for some actions. Any of them may be NULL (thus should be ignored). */
void (*update_server_eweight)(struct server *); /* to be called after eweight change */
}
break;
}
- HA_SPIN_INIT(&curproxy->lbprm.lock);
+ HA_RWLOCK_INIT(&curproxy->lbprm.lock);
if (curproxy->options & PR_O_LOGASAP)
curproxy->to_log &= ~LW_BYTES;
p0 = p;
p = p->next;
- HA_SPIN_DESTROY(&p0->lbprm.lock);
+ HA_RWLOCK_DESTROY(&p0->lbprm.lock);
HA_SPIN_DESTROY(&p0->lock);
free(p0);
}/* end while(p) */
if (!srv_lb_status_changed(srv))
return;
- HA_SPIN_LOCK(LBPRM_LOCK, &p->lbprm.lock);
+ HA_RWLOCK_WRLOCK(LBPRM_LOCK, &p->lbprm.lock);
if (srv_willbe_usable(srv))
goto out_update_state;
out_update_state:
srv_lb_commit_status(srv);
- HA_SPIN_UNLOCK(LBPRM_LOCK, &p->lbprm.lock);
+ HA_RWLOCK_WRUNLOCK(LBPRM_LOCK, &p->lbprm.lock);
}
/* This function updates the server trees according to server <srv>'s new
if (!srv_lb_status_changed(srv))
return;
- HA_SPIN_LOCK(LBPRM_LOCK, &p->lbprm.lock);
+ HA_RWLOCK_WRLOCK(LBPRM_LOCK, &p->lbprm.lock);
if (!srv_willbe_usable(srv))
goto out_update_state;
out_update_state:
srv_lb_commit_status(srv);
- HA_SPIN_UNLOCK(LBPRM_LOCK, &p->lbprm.lock);
+ HA_RWLOCK_WRUNLOCK(LBPRM_LOCK, &p->lbprm.lock);
}
/* This function must be called after an update to server <srv>'s effective
return;
}
- HA_SPIN_LOCK(LBPRM_LOCK, &p->lbprm.lock);
+ HA_RWLOCK_WRLOCK(LBPRM_LOCK, &p->lbprm.lock);
/* only adjust the server's presence in the tree */
chash_queue_dequeue_srv(srv);
update_backend_weight(p);
srv_lb_commit_status(srv);
- HA_SPIN_UNLOCK(LBPRM_LOCK, &p->lbprm.lock);
+ HA_RWLOCK_WRUNLOCK(LBPRM_LOCK, &p->lbprm.lock);
}
/*
unsigned int dn, dp;
int loop;
- HA_SPIN_LOCK(LBPRM_LOCK, &p->lbprm.lock);
+ HA_RWLOCK_WRLOCK(LBPRM_LOCK, &p->lbprm.lock);
if (p->srv_act)
root = &p->lbprm.chash.act;
}
out:
- HA_SPIN_UNLOCK(LBPRM_LOCK, &p->lbprm.lock);
+ HA_RWLOCK_WRUNLOCK(LBPRM_LOCK, &p->lbprm.lock);
return nsrv;
}
srv = avoided = NULL;
avoided_node = NULL;
- HA_SPIN_LOCK(LBPRM_LOCK, &p->lbprm.lock);
+ HA_RWLOCK_WRLOCK(LBPRM_LOCK, &p->lbprm.lock);
if (p->srv_act)
root = &p->lbprm.chash.act;
else if (p->lbprm.fbck) {
}
out:
- HA_SPIN_UNLOCK(LBPRM_LOCK, &p->lbprm.lock);
+ HA_RWLOCK_WRUNLOCK(LBPRM_LOCK, &p->lbprm.lock);
return srv;
}
*/
static void fas_srv_reposition(struct server *s)
{
- HA_SPIN_LOCK(LBPRM_LOCK, &s->proxy->lbprm.lock);
+ HA_RWLOCK_WRLOCK(LBPRM_LOCK, &s->proxy->lbprm.lock);
if (s->lb_tree) {
fas_dequeue_srv(s);
fas_queue_srv(s);
}
- HA_SPIN_UNLOCK(LBPRM_LOCK, &s->proxy->lbprm.lock);
+ HA_RWLOCK_WRUNLOCK(LBPRM_LOCK, &s->proxy->lbprm.lock);
}
/* This function updates the server trees according to server <srv>'s new
if (srv_willbe_usable(srv))
goto out_update_state;
- HA_SPIN_LOCK(LBPRM_LOCK, &p->lbprm.lock);
+ HA_RWLOCK_WRLOCK(LBPRM_LOCK, &p->lbprm.lock);
if (!srv_currently_usable(srv))
/* server was already down */
out_update_backend:
/* check/update tot_used, tot_weight */
update_backend_weight(p);
- HA_SPIN_UNLOCK(LBPRM_LOCK, &p->lbprm.lock);
+ HA_RWLOCK_WRUNLOCK(LBPRM_LOCK, &p->lbprm.lock);
out_update_state:
srv_lb_commit_status(srv);
if (!srv_willbe_usable(srv))
goto out_update_state;
- HA_SPIN_LOCK(LBPRM_LOCK, &p->lbprm.lock);
+ HA_RWLOCK_WRLOCK(LBPRM_LOCK, &p->lbprm.lock);
if (srv_currently_usable(srv))
/* server was already up */
out_update_backend:
/* check/update tot_used, tot_weight */
update_backend_weight(p);
- HA_SPIN_UNLOCK(LBPRM_LOCK, &p->lbprm.lock);
+ HA_RWLOCK_WRUNLOCK(LBPRM_LOCK, &p->lbprm.lock);
out_update_state:
srv_lb_commit_status(srv);
return;
}
- HA_SPIN_LOCK(LBPRM_LOCK, &p->lbprm.lock);
+ HA_RWLOCK_WRLOCK(LBPRM_LOCK, &p->lbprm.lock);
if (srv->lb_tree)
fas_dequeue_srv(srv);
fas_queue_srv(srv);
update_backend_weight(p);
- HA_SPIN_UNLOCK(LBPRM_LOCK, &p->lbprm.lock);
+ HA_RWLOCK_WRUNLOCK(LBPRM_LOCK, &p->lbprm.lock);
srv_lb_commit_status(srv);
}
srv = avoided = NULL;
- HA_SPIN_LOCK(LBPRM_LOCK, &p->lbprm.lock);
+ HA_RWLOCK_WRLOCK(LBPRM_LOCK, &p->lbprm.lock);
if (p->srv_act)
node = eb32_first(&p->lbprm.fas.act);
else if (p->lbprm.fbck) {
if (!srv)
srv = avoided;
out:
- HA_SPIN_UNLOCK(LBPRM_LOCK, &p->lbprm.lock);
+ HA_RWLOCK_WRUNLOCK(LBPRM_LOCK, &p->lbprm.lock);
return srv;
}
*/
static void fwlc_srv_reposition(struct server *s)
{
- HA_SPIN_LOCK(LBPRM_LOCK, &s->proxy->lbprm.lock);
+ HA_RWLOCK_WRLOCK(LBPRM_LOCK, &s->proxy->lbprm.lock);
if (s->lb_tree) {
fwlc_dequeue_srv(s);
fwlc_queue_srv(s);
}
- HA_SPIN_UNLOCK(LBPRM_LOCK, &s->proxy->lbprm.lock);
+ HA_RWLOCK_WRUNLOCK(LBPRM_LOCK, &s->proxy->lbprm.lock);
}
/* This function updates the server trees according to server <srv>'s new
if (srv_willbe_usable(srv))
goto out_update_state;
- HA_SPIN_LOCK(LBPRM_LOCK, &p->lbprm.lock);
+ HA_RWLOCK_WRLOCK(LBPRM_LOCK, &p->lbprm.lock);
if (!srv_currently_usable(srv))
out_update_backend:
/* check/update tot_used, tot_weight */
update_backend_weight(p);
- HA_SPIN_UNLOCK(LBPRM_LOCK, &p->lbprm.lock);
+ HA_RWLOCK_WRUNLOCK(LBPRM_LOCK, &p->lbprm.lock);
out_update_state:
srv_lb_commit_status(srv);
if (!srv_willbe_usable(srv))
goto out_update_state;
- HA_SPIN_LOCK(LBPRM_LOCK, &p->lbprm.lock);
+ HA_RWLOCK_WRLOCK(LBPRM_LOCK, &p->lbprm.lock);
if (srv_currently_usable(srv))
/* server was already up */
out_update_backend:
/* check/update tot_used, tot_weight */
update_backend_weight(p);
- HA_SPIN_UNLOCK(LBPRM_LOCK, &p->lbprm.lock);
+ HA_RWLOCK_WRUNLOCK(LBPRM_LOCK, &p->lbprm.lock);
out_update_state:
srv_lb_commit_status(srv);
return;
}
- HA_SPIN_LOCK(LBPRM_LOCK, &p->lbprm.lock);
+ HA_RWLOCK_WRLOCK(LBPRM_LOCK, &p->lbprm.lock);
if (srv->lb_tree)
fwlc_dequeue_srv(srv);
fwlc_queue_srv(srv);
update_backend_weight(p);
- HA_SPIN_UNLOCK(LBPRM_LOCK, &p->lbprm.lock);
+ HA_RWLOCK_WRUNLOCK(LBPRM_LOCK, &p->lbprm.lock);
srv_lb_commit_status(srv);
}
srv = avoided = NULL;
- HA_SPIN_LOCK(LBPRM_LOCK, &p->lbprm.lock);
+ HA_RWLOCK_WRLOCK(LBPRM_LOCK, &p->lbprm.lock);
if (p->srv_act)
node = eb32_first(&p->lbprm.fwlc.act);
else if (p->lbprm.fbck) {
if (!srv)
srv = avoided;
out:
- HA_SPIN_UNLOCK(LBPRM_LOCK, &p->lbprm.lock);
+ HA_RWLOCK_WRUNLOCK(LBPRM_LOCK, &p->lbprm.lock);
return srv;
}
if (srv_willbe_usable(srv))
goto out_update_state;
- HA_SPIN_LOCK(LBPRM_LOCK, &p->lbprm.lock);
+ HA_RWLOCK_WRLOCK(LBPRM_LOCK, &p->lbprm.lock);
if (!srv_currently_usable(srv))
/* server was already down */
out_update_backend:
/* check/update tot_used, tot_weight */
update_backend_weight(p);
- HA_SPIN_UNLOCK(LBPRM_LOCK, &p->lbprm.lock);
+ HA_RWLOCK_WRUNLOCK(LBPRM_LOCK, &p->lbprm.lock);
out_update_state:
srv_lb_commit_status(srv);
if (!srv_willbe_usable(srv))
goto out_update_state;
- HA_SPIN_LOCK(LBPRM_LOCK, &p->lbprm.lock);
+ HA_RWLOCK_WRLOCK(LBPRM_LOCK, &p->lbprm.lock);
if (srv_currently_usable(srv))
/* server was already up */
out_update_backend:
/* check/update tot_used, tot_weight */
update_backend_weight(p);
- HA_SPIN_UNLOCK(LBPRM_LOCK, &p->lbprm.lock);
+ HA_RWLOCK_WRUNLOCK(LBPRM_LOCK, &p->lbprm.lock);
out_update_state:
srv_lb_commit_status(srv);
return;
}
- HA_SPIN_LOCK(LBPRM_LOCK, &p->lbprm.lock);
+ HA_RWLOCK_WRLOCK(LBPRM_LOCK, &p->lbprm.lock);
grp = (srv->flags & SRV_F_BACKUP) ? &p->lbprm.fwrr.bck : &p->lbprm.fwrr.act;
grp->next_weight = grp->next_weight - srv->cur_eweight + srv->next_eweight;
}
update_backend_weight(p);
- HA_SPIN_UNLOCK(LBPRM_LOCK, &p->lbprm.lock);
+ HA_RWLOCK_WRUNLOCK(LBPRM_LOCK, &p->lbprm.lock);
srv_lb_commit_status(srv);
}
struct fwrr_group *grp;
int switched;
- HA_SPIN_LOCK(LBPRM_LOCK, &p->lbprm.lock);
+ HA_RWLOCK_WRLOCK(LBPRM_LOCK, &p->lbprm.lock);
if (p->srv_act)
grp = &p->lbprm.fwrr.act;
else if (p->lbprm.fbck) {
}
}
out:
- HA_SPIN_UNLOCK(LBPRM_LOCK, &p->lbprm.lock);
+ HA_RWLOCK_WRUNLOCK(LBPRM_LOCK, &p->lbprm.lock);
return srv;
}
goto out_update_state;
/* FIXME: could be optimized since we know what changed */
- HA_SPIN_LOCK(LBPRM_LOCK, &p->lbprm.lock);
+ HA_RWLOCK_WRLOCK(LBPRM_LOCK, &p->lbprm.lock);
recount_servers(p);
update_backend_weight(p);
recalc_server_map(p);
- HA_SPIN_UNLOCK(LBPRM_LOCK, &p->lbprm.lock);
+ HA_RWLOCK_WRUNLOCK(LBPRM_LOCK, &p->lbprm.lock);
out_update_state:
srv_lb_commit_status(srv);
}
goto out_update_state;
/* FIXME: could be optimized since we know what changed */
- HA_SPIN_LOCK(LBPRM_LOCK, &p->lbprm.lock);
+ HA_RWLOCK_WRLOCK(LBPRM_LOCK, &p->lbprm.lock);
recount_servers(p);
update_backend_weight(p);
recalc_server_map(p);
- HA_SPIN_UNLOCK(LBPRM_LOCK, &p->lbprm.lock);
+ HA_RWLOCK_WRUNLOCK(LBPRM_LOCK, &p->lbprm.lock);
out_update_state:
srv_lb_commit_status(srv);
}
int newidx, avoididx;
struct server *srv, *avoided;
- HA_SPIN_LOCK(LBPRM_LOCK, &px->lbprm.lock);
+ HA_RWLOCK_WRLOCK(LBPRM_LOCK, &px->lbprm.lock);
if (px->lbprm.tot_weight == 0) {
avoided = NULL;
goto out;
px->lbprm.map.rr_idx = avoididx;
out:
- HA_SPIN_UNLOCK(LBPRM_LOCK, &px->lbprm.lock);
+ HA_RWLOCK_WRUNLOCK(LBPRM_LOCK, &px->lbprm.lock);
/* return NULL or srvtoavoid if found */
return avoided;
}
{
struct server *srv = NULL;
- HA_SPIN_LOCK(LBPRM_LOCK, &px->lbprm.lock);
+ HA_RWLOCK_WRLOCK(LBPRM_LOCK, &px->lbprm.lock);
if (px->lbprm.tot_weight)
srv = px->lbprm.map.srv[hash % px->lbprm.tot_weight];
- HA_SPIN_UNLOCK(LBPRM_LOCK, &px->lbprm.lock);
+ HA_RWLOCK_WRUNLOCK(LBPRM_LOCK, &px->lbprm.lock);
return srv;
}