bool enqueue = false;
if (unlikely(qdisc_qlen(sch) >= sch->limit)) {
- q->stats.overlimit++;
+ WRITE_ONCE(q->stats.overlimit, q->stats.overlimit + 1);
goto out;
}
/* If packet is ecn capable, mark it if drop probability
* is lower than 10%, else drop it.
*/
- q->stats.ecn_mark++;
+ WRITE_ONCE(q->stats.ecn_mark, q->stats.ecn_mark + 1);
enqueue = true;
}
if (!q->params.dq_rate_estimator)
pie_set_enqueue_time(skb);
- q->stats.packets_in++;
+ WRITE_ONCE(q->stats.packets_in, q->stats.packets_in + 1);
if (qdisc_qlen(sch) > q->stats.maxq)
- q->stats.maxq = qdisc_qlen(sch);
+ WRITE_ONCE(q->stats.maxq, qdisc_qlen(sch));
return qdisc_enqueue_tail(skb, sch);
}
out:
- q->stats.dropped++;
+ WRITE_ONCE(q->stats.dropped, q->stats.dropped + 1);
q->vars.accu_prob = 0;
return qdisc_drop_reason(skb, sch, to_free, reason);
}
count = count / dtime;
if (vars->avg_dq_rate == 0)
- vars->avg_dq_rate = count;
+ WRITE_ONCE(vars->avg_dq_rate, count);
else
- vars->avg_dq_rate =
+ WRITE_ONCE(vars->avg_dq_rate,
(vars->avg_dq_rate -
- (vars->avg_dq_rate >> 3)) + (count >> 3);
+ (vars->avg_dq_rate >> 3)) + (count >> 3));
/* If the queue has receded below the threshold, we hold
* on to the last drain rate calculated, else we reset
if (delta > 0) {
/* prevent overflow */
if (vars->prob < oldprob) {
- vars->prob = MAX_PROB;
+ WRITE_ONCE(vars->prob, MAX_PROB);
/* Prevent normalization error. If probability is at
* maximum value already, we normalize it here, and
* skip the check to do a non-linear drop in the next
} else {
/* prevent underflow */
if (vars->prob > oldprob)
- vars->prob = 0;
+ WRITE_ONCE(vars->prob, 0);
}
/* Non-linear drop in probability: Reduce drop probability quickly if
/* Reduce drop probability to 98.4% */
vars->prob -= vars->prob / 64;
- vars->qdelay = qdelay;
+ WRITE_ONCE(vars->qdelay, qdelay);
vars->backlog_old = backlog;
/* We restart the measurement cycle if the following conditions are met
struct pie_sched_data *q = qdisc_priv(sch);
struct tc_pie_xstats st = {
.prob = q->vars.prob << BITS_PER_BYTE,
- .delay = ((u32)PSCHED_TICKS2NS(q->vars.qdelay)) /
+ .delay = ((u32)PSCHED_TICKS2NS(READ_ONCE(q->vars.qdelay))) /
NSEC_PER_USEC,
- .packets_in = q->stats.packets_in,
- .overlimit = q->stats.overlimit,
- .maxq = q->stats.maxq,
- .dropped = q->stats.dropped,
- .ecn_mark = q->stats.ecn_mark,
+ .packets_in = READ_ONCE(q->stats.packets_in),
+ .overlimit = READ_ONCE(q->stats.overlimit),
+ .maxq = READ_ONCE(q->stats.maxq),
+ .dropped = READ_ONCE(q->stats.dropped),
+ .ecn_mark = READ_ONCE(q->stats.ecn_mark),
};
/* avg_dq_rate is only valid if dq_rate_estimator is enabled */
st.dq_rate_estimating = q->params.dq_rate_estimator;
/* unscale and return dq_rate in bytes per sec */
- if (q->params.dq_rate_estimator)
- st.avg_dq_rate = q->vars.avg_dq_rate *
+ if (st.dq_rate_estimating)
+ st.avg_dq_rate = READ_ONCE(q->vars.avg_dq_rate) *
(PSCHED_TICKS_PER_SEC) >> PIE_SCALE;
return gnet_stats_copy_app(d, &st, sizeof(st));