unsigned int srv_dynamic_maxconn(const struct server *s)
{
unsigned int max;
- unsigned long last_change;
if (s->proxy->beconn >= s->proxy->fullconn)
/* no fullconn or proxy is full */
else max = MAX(s->minconn,
s->proxy->beconn * s->maxconn / s->proxy->fullconn);
- last_change = COUNTERS_SHARED_LAST(s->counters.shared->tg, last_change);
-
if ((s->cur_state == SRV_ST_STARTING) &&
- ns_to_sec(now_ns) < last_change + s->slowstart &&
- ns_to_sec(now_ns) >= last_change) {
+ ns_to_sec(now_ns) < s->last_change + s->slowstart &&
+ ns_to_sec(now_ns) >= s->last_change) {
unsigned int ratio;
- ratio = 100 * (ns_to_sec(now_ns) - last_change) / s->slowstart;
+ ratio = 100 * (ns_to_sec(now_ns) - s->last_change) / s->slowstart;
max = MAX(1, max * ratio / 100);
}
return max;
int srv_downtime(const struct server *s)
{
- unsigned long last_change = COUNTERS_SHARED_LAST(s->counters.shared->tg, last_change);
-
- if ((s->cur_state != SRV_ST_STOPPED) || last_change >= ns_to_sec(now_ns)) // ignore negative time
+ if ((s->cur_state != SRV_ST_STOPPED) || s->last_change >= ns_to_sec(now_ns)) // ignore negative time
return s->down_time;
- return ns_to_sec(now_ns) - last_change + s->down_time;
+ return ns_to_sec(now_ns) - s->last_change + s->down_time;
}
int srv_getinter(const struct check *check)
*/
void server_recalc_eweight(struct server *sv, int must_update)
{
- unsigned long last_change = COUNTERS_SHARED_LAST(sv->counters.shared->tg, last_change);
struct proxy *px = sv->proxy;
unsigned w;
- if (ns_to_sec(now_ns) < last_change || ns_to_sec(now_ns) >= last_change + sv->slowstart) {
+ if (ns_to_sec(now_ns) < sv->last_change || ns_to_sec(now_ns) >= sv->last_change + sv->slowstart) {
/* go to full throttle if the slowstart interval is reached unless server is currently down */
if ((sv->cur_state != SRV_ST_STOPPED) && (sv->next_state == SRV_ST_STARTING))
sv->next_state = SRV_ST_RUNNING;
if ((sv->cur_state == SRV_ST_STOPPED) && (sv->next_state == SRV_ST_STARTING) && (px->lbprm.algo & BE_LB_PROP_DYN))
w = 1;
else if ((sv->next_state == SRV_ST_STARTING) && (px->lbprm.algo & BE_LB_PROP_DYN))
- w = (px->lbprm.wdiv * (ns_to_sec(now_ns) - last_change) + sv->slowstart) / sv->slowstart;
+ w = (px->lbprm.wdiv * (ns_to_sec(now_ns) - sv->last_change) + sv->slowstart) / sv->slowstart;
else
w = px->lbprm.wdiv;
srv->rid = 0; /* rid defaults to 0 */
srv->next_state = SRV_ST_RUNNING; /* early server setup */
+ srv->last_change = ns_to_sec(now_ns);
srv->check.obj_type = OBJ_TYPE_CHECK;
srv->check.status = HCHK_STATUS_INI;
/* updates the server's weight during a warmup stage. Once the final weight is
* reached, the task automatically stops. Note that any server status change
- * must have updated s->counters.last_change accordingly.
+ * must have updated server last_change accordingly.
*/
static struct task *server_warmup(struct task *t, void *context, unsigned int state)
{
if (srv->next_state == SRV_ST_STARTING) {
task_schedule(srv->warmup,
tick_add(now_ms,
- MS_TO_TICKS(MAX(1000, (ns_to_sec(now_ns) - COUNTERS_SHARED_LAST(srv->counters.shared->tg, last_change))) / 20)));
+ MS_TO_TICKS(MAX(1000, (ns_to_sec(now_ns) - srv->last_change)) / 20)));
}
}
/* check if server stats must be updated due the the server state change */
if (srv_prev_state != s->cur_state) {
if (srv_prev_state == SRV_ST_STOPPED) {
- unsigned long last_change = COUNTERS_SHARED_LAST(s->counters.shared->tg, last_change);
-
/* server was down and no longer is */
- if (last_change < ns_to_sec(now_ns)) // ignore negative times
- s->down_time += ns_to_sec(now_ns) - last_change;
+ if (s->last_change < ns_to_sec(now_ns)) // ignore negative times
+ s->down_time += ns_to_sec(now_ns) - s->last_change;
_srv_event_hdl_publish(EVENT_HDL_SUB_SERVER_UP, cb_data.common, s);
}
else if (s->cur_state == SRV_ST_STOPPED) {
HA_ATOMIC_STORE(&s->proxy->ready_srv, NULL);
HA_ATOMIC_STORE(&s->counters.shared->tg[tgid - 1]->last_change, ns_to_sec(now_ns));
+ s->last_change = ns_to_sec(now_ns);
/* publish the state change */
_srv_event_hdl_prepare_state(&cb_data.state,