PROXY_LOCK,
SERVER_LOCK,
UPDATED_SERVERS_LOCK,
+ LBPRM_LOCK,
SIGNALS_LOCK,
LOCK_LABELS
};
const char *labels[LOCK_LABELS] = {"THREAD_SYNC", "FDTAB", "FDCACHE", "FD", "POLL",
"TASK_RQ", "TASK_WQ", "POOL",
"LISTENER", "LISTENER_QUEUE", "PROXY", "SERVER",
- "UPDATED_SERVERS", "SIGNALS" };
+ "UPDATED_SERVERS", "LBPRM", "SIGNALS" };
int lbl;
for (lbl = 0; lbl < LOCK_LABELS; lbl++) {
#include <types/proxy.h>
#include <types/server.h>
-void map_set_server_status_down(struct server *srv);
-void map_set_server_status_up(struct server *srv);
void recalc_server_map(struct proxy *px);
void init_server_map(struct proxy *p);
struct server *map_get_server_rr(struct proxy *px, struct server *srvtoavoid);
#define _TYPES_BACKEND_H
#include <common/config.h>
+#include <common/hathreads.h>
+
#include <types/lb_chash.h>
#include <types/lb_fas.h>
#include <types/lb_fwlc.h>
struct lb_fwlc fwlc;
struct lb_chash chash;
struct lb_fas fas;
+#ifdef USE_THREAD
+ HA_SPINLOCK_T lock;
+#endif
/* Call backs for some actions. Any of them may be NULL (thus should be ignored). */
void (*update_server_eweight)(struct server *); /* to be called after eweight change */
void (*set_server_status_up)(struct server *); /* to be called after status changes to UP */
#include <common/config.h>
#include <types/server.h>
-/* values for map.state */
-#define LB_MAP_RECALC (1 << 0)
-
struct lb_map {
struct server **srv; /* the server map used to apply weights */
int rr_idx; /* next server to be elected in round robin mode */
- int state; /* LB_MAP_RECALC */
};
#endif /* _TYPES_LB_MAP_H */
* this.
* This functions is designed to be called before server's weight and state
* commit so it uses 'next' weight and states values.
+ *
+ * threads: this is the caller responsibility to lock data. For now, this
+ * function is called from lb modules, so it should be ok. But if you need to
+ * call it from another place, be careful (and update this comment).
*/
void recount_servers(struct proxy *px)
{
/* This function simply updates the backend's tot_weight and tot_used values
* after servers weights have been updated. It is designed to be used after
* recount_servers() or equivalent.
+ *
+ * threads: this is the caller responsibility to lock data. For now, this
+ * function is called from lb modules, so it should be ok. But if you need to
+ * call it from another place, be careful (and update this comment).
*/
void update_backend_weight(struct proxy *px)
{
return map_get_server_hash(px, hash);
}
-/*
+/*
* This function tries to find a running server for the proxy <px> following
* the URL parameter hash method. It looks for a specific parameter in the
* URL and hashes it to compute the server ID. This is useful to optimize
else
return map_get_server_hash(px, hash);
}
-
+
/*
* This function applies the load-balancing algorithm to the stream, as
* defined by the backend it is assigned to. The stream is then marked as
s->target = &srv->obj_type;
}
else if (s->be->lbprm.algo & BE_LB_KIND) {
+
/* we must check if we have at least one server available */
if (!s->be->lbprm.tot_weight) {
err = SRV_STATUS_NOSRV;
}
break;
}
+ SPIN_INIT(&curproxy->lbprm.lock);
if (curproxy->options & PR_O_LOGASAP)
curproxy->to_log &= ~LW_BYTES;
p0 = p;
p = p->next;
+ SPIN_DESTROY(&p0->lbprm.lock);
SPIN_DESTROY(&p0->lock);
free(p0);
}/* end while(p) */
struct proxy *p = srv->proxy;
if (!srv_lb_status_changed(srv))
- return;
+ return;
if (srv_willbe_usable(srv))
goto out_update_state;
struct proxy *p = srv->proxy;
if (!srv_lb_status_changed(srv))
- return;
+ return;
if (!srv_willbe_usable(srv))
goto out_update_state;
srv = avoided = NULL;
avoided_node = NULL;
+ SPIN_LOCK(LBPRM_LOCK, &p->lbprm.lock);
if (p->srv_act)
root = &p->lbprm.chash.act;
- else if (p->lbprm.fbck)
- return p->lbprm.fbck;
+ else if (p->lbprm.fbck) {
+ srv = p->lbprm.fbck;
+ goto out;
+ }
else if (p->srv_bck)
root = &p->lbprm.chash.bck;
- else
- return NULL;
+ else {
+ srv = NULL;
+ goto out;
+ }
stop = node = p->lbprm.chash.last;
do {
p->lbprm.chash.last = avoided_node;
}
+ out:
+ SPIN_UNLOCK(LBPRM_LOCK, &p->lbprm.lock);
return srv;
}
{
if (!s->lb_tree)
return;
+
+ SPIN_LOCK(LBPRM_LOCK, &s->proxy->lbprm.lock);
fas_dequeue_srv(s);
fas_queue_srv(s);
+ SPIN_UNLOCK(LBPRM_LOCK, &s->proxy->lbprm.lock);
}
/* This function updates the server trees according to server <srv>'s new
fas_dequeue_srv(srv);
fas_remove_from_tree(srv);
-out_update_backend:
+ out_update_backend:
/* check/update tot_used, tot_weight */
update_backend_weight(p);
out_update_state:
srv = avoided = NULL;
+ SPIN_LOCK(LBPRM_LOCK, &p->lbprm.lock);
if (p->srv_act)
node = eb32_first(&p->lbprm.fas.act);
- else if (p->lbprm.fbck)
- return p->lbprm.fbck;
+ else if (p->lbprm.fbck) {
+ srv = p->lbprm.fbck;
+ goto out;
+ }
else if (p->srv_bck)
node = eb32_first(&p->lbprm.fas.bck);
- else
- return NULL;
+ else {
+ srv = NULL;
+ goto out;
+ }
while (node) {
/* OK, we have a server. However, it may be saturated, in which
if (!srv)
srv = avoided;
-
+ out:
+ SPIN_UNLOCK(LBPRM_LOCK, &p->lbprm.lock);
return srv;
}
{
if (!s->lb_tree)
return;
+
+ SPIN_LOCK(LBPRM_LOCK, &s->proxy->lbprm.lock);
fwlc_dequeue_srv(s);
fwlc_queue_srv(s);
+ SPIN_UNLOCK(LBPRM_LOCK, &s->proxy->lbprm.lock);
}
/* This function updates the server trees according to server <srv>'s new
srv = avoided = NULL;
+ SPIN_LOCK(LBPRM_LOCK, &p->lbprm.lock);
if (p->srv_act)
node = eb32_first(&p->lbprm.fwlc.act);
- else if (p->lbprm.fbck)
- return p->lbprm.fbck;
+ else if (p->lbprm.fbck) {
+ srv = p->lbprm.fbck;
+ goto out;
+ }
else if (p->srv_bck)
node = eb32_first(&p->lbprm.fwlc.bck);
- else
- return NULL;
+ else {
+ srv = NULL;
+ goto out;
+ }
while (node) {
/* OK, we have a server. However, it may be saturated, in which
if (!srv)
srv = avoided;
-
+ out:
+ SPIN_UNLOCK(LBPRM_LOCK, &p->lbprm.lock);
return srv;
}
struct fwrr_group *grp;
grp = (s->flags & SRV_F_BACKUP) ? &p->lbprm.fwrr.bck : &p->lbprm.fwrr.act;
-
+
/* Delay everything which does not fit into the window and everything
* which does not fit into the theorical new window.
*/
s->npos >= grp->curr_weight + grp->next_weight) {
/* put into next tree, and readjust npos in case we could
* finally take this back to current. */
- s->npos -= grp->curr_weight;
+ HA_ATOMIC_SUB(&s->npos, grp->curr_weight);
fwrr_queue_by_weight(grp->next, s);
}
else {
&s->proxy->lbprm.fwrr.bck :
&s->proxy->lbprm.fwrr.act;
- s->npos += grp->curr_weight;
+ HA_ATOMIC_ADD(&s->npos, grp->curr_weight);
}
/* prepares a server when it was marked down */
node = eb32_first(&grp->curr);
s = eb32_entry(node, struct server, lb_node);
-
+
if (!node || s->npos > grp->curr_pos) {
/* either we have no server left, or we have a hole */
struct eb32_node *node2;
/* first time ever for this server */
s->lpos = grp->curr_pos;
s->npos = grp->curr_pos + grp->next_weight / s->cur_eweight;
- s->rweight += grp->next_weight % s->cur_eweight;
+ HA_ATOMIC_ADD(&s->rweight, (grp->next_weight % s->cur_eweight));
if (s->rweight >= s->cur_eweight) {
- s->rweight -= s->cur_eweight;
- s->npos++;
+ HA_ATOMIC_SUB(&s->rweight, s->cur_eweight);
+ HA_ATOMIC_ADD(&s->npos, 1);
}
} else {
s->lpos = s->npos;
- s->npos += grp->next_weight / s->cur_eweight;
- s->rweight += grp->next_weight % s->cur_eweight;
+ HA_ATOMIC_ADD(&s->npos, (grp->next_weight / s->cur_eweight));
+ HA_ATOMIC_ADD(&s->rweight, (grp->next_weight % s->cur_eweight));
if (s->rweight >= s->cur_eweight) {
- s->rweight -= s->cur_eweight;
- s->npos++;
+ HA_ATOMIC_SUB(&s->rweight, s->cur_eweight);
+ HA_ATOMIC_ADD(&s->npos, 1);
}
}
}
struct fwrr_group *grp;
int switched;
+ SPIN_LOCK(LBPRM_LOCK, &p->lbprm.lock);
if (p->srv_act)
grp = &p->lbprm.fwrr.act;
- else if (p->lbprm.fbck)
- return p->lbprm.fbck;
+ else if (p->lbprm.fbck) {
+ srv = p->lbprm.fbck;
+ goto out;
+ }
else if (p->srv_bck)
grp = &p->lbprm.fwrr.bck;
- else
- return NULL;
+ else {
+ srv = NULL;
+ goto out;
+ }
switched = 0;
avoided = NULL;
} while (full);
}
}
+ out:
+ SPIN_UNLOCK(LBPRM_LOCK, &p->lbprm.lock);
return srv;
}
#include <types/server.h>
#include <proto/backend.h>
+#include <proto/lb_map.h>
#include <proto/proto_http.h>
#include <proto/proto_tcp.h>
#include <proto/queue.h>
/* FIXME: could be optimized since we know what changed */
recount_servers(p);
update_backend_weight(p);
- p->lbprm.map.state |= LB_MAP_RECALC;
+ recalc_server_map(p);
out_update_state:
srv_lb_commit_status(srv);
}
/* FIXME: could be optimized since we know what changed */
recount_servers(p);
update_backend_weight(p);
- p->lbprm.map.state |= LB_MAP_RECALC;
+ recalc_server_map(p);
out_update_state:
srv_lb_commit_status(srv);
}
switch (px->lbprm.tot_used) {
case 0: /* no server */
- px->lbprm.map.state &= ~LB_MAP_RECALC;
return;
default:
tot = px->lbprm.tot_weight;
break;
}
- cur->wscore += cur->next_eweight;
+ HA_ATOMIC_ADD(&cur->wscore, cur->next_eweight);
v = (cur->wscore + tot) / tot; /* result between 0 and 3 */
if (best == NULL || v > max) {
max = v;
}
}
px->lbprm.map.srv[o] = best;
- best->wscore -= tot;
+ HA_ATOMIC_ADD(&best->wscore, tot);
}
- px->lbprm.map.state &= ~LB_MAP_RECALC;
}
/* This function is responsible of building the server MAP for map-based LB
p->lbprm.map.srv = calloc(act, sizeof(struct server *));
/* recounts servers and their weights */
- p->lbprm.map.state = LB_MAP_RECALC;
recount_servers(p);
update_backend_weight(p);
recalc_server_map(p);
int newidx, avoididx;
struct server *srv, *avoided;
- if (px->lbprm.tot_weight == 0)
- return NULL;
-
- if (px->lbprm.map.state & LB_MAP_RECALC)
- recalc_server_map(px);
+ SPIN_LOCK(LBPRM_LOCK, &px->lbprm.lock);
+ if (px->lbprm.tot_weight == 0) {
+ avoided = NULL;
+ goto out;
+ }
if (px->lbprm.map.rr_idx < 0 || px->lbprm.map.rr_idx >= px->lbprm.tot_weight)
px->lbprm.map.rr_idx = 0;
if (avoided)
px->lbprm.map.rr_idx = avoididx;
+ out:
+ SPIN_UNLOCK(LBPRM_LOCK, &px->lbprm.lock);
/* return NULL or srvtoavoid if found */
return avoided;
}
{
if (px->lbprm.tot_weight == 0)
return NULL;
-
- if (px->lbprm.map.state & LB_MAP_RECALC)
- recalc_server_map(px);
-
return px->lbprm.map.srv[hash % px->lbprm.tot_weight];
}