]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
net_sched: Flush gso_skb list too during ->change()
authorCong Wang <xiyou.wangcong@gmail.com>
Wed, 7 May 2025 04:35:58 +0000 (21:35 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 4 Jun 2025 12:36:56 +0000 (14:36 +0200)
[ Upstream commit 2d3cbfd6d54a2c39ce3244f33f85c595844bd7b8 ]

Previously, when reducing a qdisc's limit via the ->change() operation, only
the main skb queue was trimmed, potentially leaving packets in the gso_skb
list. This could result in NULL pointer dereference when we only check
sch->limit against sch->q.qlen.

This patch introduces a new helper, qdisc_dequeue_internal(), which ensures
both the gso_skb list and the main queue are properly flushed when trimming
excess packets. All relevant qdiscs (codel, fq, fq_codel, fq_pie, hhf, pie)
are updated to use this helper in their ->change() routines.

Fixes: 76e3cc126bb2 ("codel: Controlled Delay AQM")
Fixes: 4b549a2ef4be ("fq_codel: Fair Queue Codel AQM")
Fixes: afe4fd062416 ("pkt_sched: fq: Fair Queue packet scheduler")
Fixes: ec97ecf1ebe4 ("net: sched: add Flow Queue PIE packet scheduler")
Fixes: 10239edf86f1 ("net-qdisc-hhf: Heavy-Hitter Filter (HHF) qdisc")
Fixes: d4b36210c2e6 ("net: pkt_sched: PIE AQM scheme")
Reported-by: Will <willsroot@protonmail.com>
Reported-by: Savy <savy@syst3mfailure.io>
Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Sasha Levin <sashal@kernel.org>
include/net/sch_generic.h
net/sched/sch_codel.c
net/sched/sch_fq.c
net/sched/sch_fq_codel.c
net/sched/sch_fq_pie.c
net/sched/sch_hhf.c
net/sched/sch_pie.c

index 4db11c4695cf63dc83043999fa5a9ff82db39742..a4c670ad8c4ac11d0afdd86c5347dbd6efdbfff0 100644 (file)
@@ -1036,6 +1036,21 @@ static inline struct sk_buff *__qdisc_dequeue_head(struct qdisc_skb_head *qh)
        return skb;
 }
 
+static inline struct sk_buff *qdisc_dequeue_internal(struct Qdisc *sch, bool direct)
+{
+       struct sk_buff *skb;
+
+       skb = __skb_dequeue(&sch->gso_skb);
+       if (skb) {
+               sch->q.qlen--;
+               return skb;
+       }
+       if (direct)
+               return __qdisc_dequeue_head(&sch->q);
+       else
+               return sch->dequeue(sch);
+}
+
 static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
 {
        struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
index 30169b3adbbb064c51b6006755d56446570f974c..d99c7386e24e6b66f3fbd66b57312e3da261e60a 100644 (file)
@@ -174,7 +174,7 @@ static int codel_change(struct Qdisc *sch, struct nlattr *opt,
 
        qlen = sch->q.qlen;
        while (sch->q.qlen > sch->limit) {
-               struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
+               struct sk_buff *skb = qdisc_dequeue_internal(sch, true);
 
                dropped += qdisc_pkt_len(skb);
                qdisc_qstats_backlog_dec(sch, skb);
index 5a1274199fe336cb14b3301e701408734c0d7428..65b12b39e2ec5df3254368b5265ff4a8ba6726dc 100644 (file)
@@ -904,7 +904,7 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt,
                sch_tree_lock(sch);
        }
        while (sch->q.qlen > sch->limit) {
-               struct sk_buff *skb = fq_dequeue(sch);
+               struct sk_buff *skb = qdisc_dequeue_internal(sch, false);
 
                if (!skb)
                        break;
index 01d6eea5b0ce9695706b5f3b1164bd951db704af..60dbc549e99103c2d47ce74fd4d892086a28c680 100644 (file)
@@ -429,7 +429,7 @@ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt,
 
        while (sch->q.qlen > sch->limit ||
               q->memory_usage > q->memory_limit) {
-               struct sk_buff *skb = fq_codel_dequeue(sch);
+               struct sk_buff *skb = qdisc_dequeue_internal(sch, false);
 
                q->cstats.drop_len += qdisc_pkt_len(skb);
                rtnl_kfree_skbs(skb, skb);
index a5b63158f081c8d6935199b8d9c5241de7d520e8..d4bfa3382e118e4f1b84aac5545832c3f8790331 100644 (file)
@@ -360,7 +360,7 @@ static int fq_pie_change(struct Qdisc *sch, struct nlattr *opt,
 
        /* Drop excess packets if new limit is lower */
        while (sch->q.qlen > sch->limit) {
-               struct sk_buff *skb = fq_pie_qdisc_dequeue(sch);
+               struct sk_buff *skb = qdisc_dequeue_internal(sch, false);
 
                len_dropped += qdisc_pkt_len(skb);
                num_dropped += 1;
index 420ede8753229faae172c0829b60d81e8eaa8de8..433bddcbc0c72450d7307244be372370a3184cd6 100644 (file)
@@ -563,7 +563,7 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt,
        qlen = sch->q.qlen;
        prev_backlog = sch->qstats.backlog;
        while (sch->q.qlen > sch->limit) {
-               struct sk_buff *skb = hhf_dequeue(sch);
+               struct sk_buff *skb = qdisc_dequeue_internal(sch, false);
 
                rtnl_kfree_skbs(skb, skb);
        }
index c65077f0c0f39832ee97f4e89f25639306b19281..47f5d4adb5a336dfc2a5534f8d7ab9cb5a0a9559 100644 (file)
@@ -193,7 +193,7 @@ static int pie_change(struct Qdisc *sch, struct nlattr *opt,
        /* Drop excess packets if new limit is lower */
        qlen = sch->q.qlen;
        while (sch->q.qlen > sch->limit) {
-               struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
+               struct sk_buff *skb = qdisc_dequeue_internal(sch, true);
 
                dropped += qdisc_pkt_len(skb);
                qdisc_qstats_backlog_dec(sch, skb);