struct tcf_t tcfa_tm;
struct gnet_stats_basic_sync tcfa_bstats;
struct gnet_stats_basic_sync tcfa_bstats_hw;
- struct gnet_stats_queue tcfa_qstats;
+
+ atomic_t tcfa_drops;
+ atomic_t tcfa_overlimits;
+
struct net_rate_estimator __rcu *tcfa_rate_est;
spinlock_t tcfa_lock;
struct gnet_stats_basic_sync __percpu *cpu_bstats;
#define tcf_action common.tcfa_action
#define tcf_tm common.tcfa_tm
#define tcf_bstats common.tcfa_bstats
-#define tcf_qstats common.tcfa_qstats
#define tcf_rate_est common.tcfa_rate_est
#define tcf_lock common.tcfa_lock
qstats_drop_inc(this_cpu_ptr(a->cpu_qstats));
return;
}
- spin_lock(&a->tcfa_lock);
- qstats_drop_inc(&a->tcfa_qstats);
- spin_unlock(&a->tcfa_lock);
+ atomic_inc(&a->tcfa_drops);
}
static inline void tcf_action_inc_overlimit_qstats(struct tc_action *a)
qstats_overlimit_inc(this_cpu_ptr(a->cpu_qstats));
return;
}
- spin_lock(&a->tcfa_lock);
- qstats_overlimit_inc(&a->tcfa_qstats);
- spin_unlock(&a->tcfa_lock);
+ atomic_inc(&a->tcfa_overlimits);
}
void tcf_action_update_stats(struct tc_action *a, u64 bytes, u64 packets,
}
_bstats_update(&a->tcfa_bstats, bytes, packets);
- a->tcfa_qstats.drops += drops;
+ atomic_add(drops, &a->tcfa_drops);
if (hw)
_bstats_update(&a->tcfa_bstats_hw, bytes, packets);
}
int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *p,
int compat_mode)
{
- int err = 0;
+ struct gnet_stats_queue qstats = {0};
struct gnet_dump d;
+ int err = 0;
if (p == NULL)
goto errout;
if (err < 0)
goto errout;
+ qstats.drops = atomic_read(&p->tcfa_drops);
+ qstats.overlimits = atomic_read(&p->tcfa_overlimits);
+
if (gnet_stats_copy_basic(&d, p->cpu_bstats,
&p->tcfa_bstats, false) < 0 ||
gnet_stats_copy_basic_hw(&d, p->cpu_bstats_hw,
&p->tcfa_bstats_hw, false) < 0 ||
gnet_stats_copy_rate_est(&d, &p->tcfa_rate_est) < 0 ||
gnet_stats_copy_queue(&d, p->cpu_qstats,
- &p->tcfa_qstats,
- p->tcfa_qstats.qlen) < 0)
+ &qstats,
+ qstats.qlen) < 0)
goto errout;
if (gnet_stats_finish_copy(&d) < 0)