]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
net_sched: sch_hhf: annotate data-races in hhf_dump_stats()
authorEric Dumazet <edumazet@google.com>
Tue, 21 Apr 2026 14:33:49 +0000 (14:33 +0000)
committerJakub Kicinski <kuba@kernel.org>
Thu, 23 Apr 2026 04:12:40 +0000 (21:12 -0700)
hhf_dump_stats() only runs with RTNL held,
reading fields that can be changed in qdisc fast path.

Add READ_ONCE()/WRITE_ONCE() annotations.

Fixes: edb09eb17ed8 ("net: sched: do not acquire qdisc spinlock in qdisc/class stats dump")
Signed-off-by: Eric Dumazet <edumazet@google.com>
Reviewed-by: Jamal Hadi Salim <jhs@mojatatu.com>
Link: https://patch.msgid.link/20260421143349.4052215-1-edumazet@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
net/sched/sch_hhf.c

index 95e5d9bfd9c8c0cac08e080b8f1e0332e722aa3b..96021f52d835b56339509565ca03fe796593e231 100644 (file)
@@ -198,7 +198,8 @@ static struct hh_flow_state *seek_list(const u32 hash,
                                return NULL;
                        list_del(&flow->flowchain);
                        kfree(flow);
-                       q->hh_flows_current_cnt--;
+                       WRITE_ONCE(q->hh_flows_current_cnt,
+                                  q->hh_flows_current_cnt - 1);
                } else if (flow->hash_id == hash) {
                        return flow;
                }
@@ -226,7 +227,7 @@ static struct hh_flow_state *alloc_new_hh(struct list_head *head,
        }
 
        if (q->hh_flows_current_cnt >= q->hh_flows_limit) {
-               q->hh_flows_overlimit++;
+               WRITE_ONCE(q->hh_flows_overlimit, q->hh_flows_overlimit + 1);
                return NULL;
        }
        /* Create new entry. */
@@ -234,7 +235,7 @@ static struct hh_flow_state *alloc_new_hh(struct list_head *head,
        if (!flow)
                return NULL;
 
-       q->hh_flows_current_cnt++;
+       WRITE_ONCE(q->hh_flows_current_cnt, q->hh_flows_current_cnt + 1);
        INIT_LIST_HEAD(&flow->flowchain);
        list_add_tail(&flow->flowchain, head);
 
@@ -309,7 +310,7 @@ static enum wdrr_bucket_idx hhf_classify(struct sk_buff *skb, struct Qdisc *sch)
                        return WDRR_BUCKET_FOR_NON_HH;
                flow->hash_id = hash;
                flow->hit_timestamp = now;
-               q->hh_flows_total_cnt++;
+               WRITE_ONCE(q->hh_flows_total_cnt, q->hh_flows_total_cnt + 1);
 
                /* By returning without updating counters in q->hhf_arrays,
                 * we implicitly implement "shielding" (see Optimization O1).
@@ -403,7 +404,7 @@ static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch,
                return NET_XMIT_SUCCESS;
 
        prev_backlog = sch->qstats.backlog;
-       q->drop_overlimit++;
+       WRITE_ONCE(q->drop_overlimit, q->drop_overlimit + 1);
        /* Return Congestion Notification only if we dropped a packet from this
         * bucket.
         */
@@ -686,10 +687,10 @@ static int hhf_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
 {
        struct hhf_sched_data *q = qdisc_priv(sch);
        struct tc_hhf_xstats st = {
-               .drop_overlimit = q->drop_overlimit,
-               .hh_overlimit   = q->hh_flows_overlimit,
-               .hh_tot_count   = q->hh_flows_total_cnt,
-               .hh_cur_count   = q->hh_flows_current_cnt,
+               .drop_overlimit = READ_ONCE(q->drop_overlimit),
+               .hh_overlimit   = READ_ONCE(q->hh_flows_overlimit),
+               .hh_tot_count   = READ_ONCE(q->hh_flows_total_cnt),
+               .hh_cur_count   = READ_ONCE(q->hh_flows_current_cnt),
        };
 
        return gnet_stats_copy_app(d, &st, sizeof(st));