* packet timestamp.
*/
if (!params->dq_rate_estimator) {
- vars->qdelay = now - pie_get_enqueue_time(skb);
+ WRITE_ONCE(vars->qdelay,
+ backlog ? now - pie_get_enqueue_time(skb) : 0);
if (vars->dq_tstamp != DTIME_INVALID)
dtime = now - vars->dq_tstamp;
vars->dq_tstamp = now;
- if (backlog == 0)
- vars->qdelay = 0;
-
if (dtime == 0)
return;
if (qdelay > (PSCHED_NS2TICKS(250 * NSEC_PER_MSEC)))
delta += MAX_PROB / (100 / 2);
- vars->prob += delta;
+ WRITE_ONCE(vars->prob, vars->prob + delta);
if (delta > 0) {
/* prevent overflow */
if (qdelay == 0 && qdelay_old == 0 && update_prob)
/* Reduce drop probability to 98.4% */
- vars->prob -= vars->prob / 64;
+ WRITE_ONCE(vars->prob, vars->prob - vars->prob / 64);
WRITE_ONCE(vars->qdelay, qdelay);
vars->backlog_old = backlog;
{
struct pie_sched_data *q = qdisc_priv(sch);
struct tc_pie_xstats st = {
- .prob = q->vars.prob << BITS_PER_BYTE,
+ .prob = READ_ONCE(q->vars.prob) << BITS_PER_BYTE,
.delay = ((u32)PSCHED_TICKS2NS(READ_ONCE(q->vars.qdelay))) /
NSEC_PER_USEC,
.packets_in = READ_ONCE(q->stats.packets_in),
};
/* avg_dq_rate is only valid if dq_rate_estimator is enabled */
- st.dq_rate_estimating = q->params.dq_rate_estimator;
+ st.dq_rate_estimating = READ_ONCE(q->params.dq_rate_estimator);
/* unscale and return dq_rate in bytes per sec */
if (st.dq_rate_estimating)