]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
net_sched: Flush gso_skb list too during ->change()
authorCong Wang <xiyou.wangcong@gmail.com>
Wed, 7 May 2025 04:35:58 +0000 (21:35 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 22 May 2025 12:12:15 +0000 (14:12 +0200)
[ Upstream commit 2d3cbfd6d54a2c39ce3244f33f85c595844bd7b8 ]

Previously, when reducing a qdisc's limit via the ->change() operation, only
the main skb queue was trimmed, potentially leaving packets in the gso_skb
list. This could result in NULL pointer dereference when we only check
sch->limit against sch->q.qlen.

This patch introduces a new helper, qdisc_dequeue_internal(), which ensures
both the gso_skb list and the main queue are properly flushed when trimming
excess packets. All relevant qdiscs (codel, fq, fq_codel, fq_pie, hhf, pie)
are updated to use this helper in their ->change() routines.

Fixes: 76e3cc126bb2 ("codel: Controlled Delay AQM")
Fixes: 4b549a2ef4be ("fq_codel: Fair Queue Codel AQM")
Fixes: afe4fd062416 ("pkt_sched: fq: Fair Queue packet scheduler")
Fixes: ec97ecf1ebe4 ("net: sched: add Flow Queue PIE packet scheduler")
Fixes: 10239edf86f1 ("net-qdisc-hhf: Heavy-Hitter Filter (HHF) qdisc")
Fixes: d4b36210c2e6 ("net: pkt_sched: PIE AQM scheme")
Reported-by: Will <willsroot@protonmail.com>
Reported-by: Savy <savy@syst3mfailure.io>
Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Sasha Levin <sashal@kernel.org>
include/net/sch_generic.h
net/sched/sch_codel.c
net/sched/sch_fq.c
net/sched/sch_fq_codel.c
net/sched/sch_fq_pie.c
net/sched/sch_hhf.c
net/sched/sch_pie.c

index 4ec2a948ae3dbb0b28d20f856c4068aa1934380b..3287988a6a987823a91a8e4d419522e93b84ef89 100644 (file)
@@ -1029,6 +1029,21 @@ static inline struct sk_buff *__qdisc_dequeue_head(struct qdisc_skb_head *qh)
        return skb;
 }
 
+static inline struct sk_buff *qdisc_dequeue_internal(struct Qdisc *sch, bool direct)
+{
+       struct sk_buff *skb;
+
+       skb = __skb_dequeue(&sch->gso_skb);
+       if (skb) {
+               sch->q.qlen--;
+               return skb;
+       }
+       if (direct)
+               return __qdisc_dequeue_head(&sch->q);
+       else
+               return sch->dequeue(sch);
+}
+
 static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
 {
        struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
index 5f2e0681574567e14c0589e84c67820a4038b063..63c02040b426ae948b8f7f698009c7149a33e835 100644 (file)
@@ -168,7 +168,7 @@ static int codel_change(struct Qdisc *sch, struct nlattr *opt,
 
        qlen = sch->q.qlen;
        while (sch->q.qlen > sch->limit) {
-               struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
+               struct sk_buff *skb = qdisc_dequeue_internal(sch, true);
 
                dropped += qdisc_pkt_len(skb);
                qdisc_qstats_backlog_dec(sch, skb);
index f59a2cb2c803d79bd1f0eb1806464a0220824f9e..91f5ef6be0f23131c83fb888e96df5a51891196d 100644 (file)
@@ -901,7 +901,7 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt,
                sch_tree_lock(sch);
        }
        while (sch->q.qlen > sch->limit) {
-               struct sk_buff *skb = fq_dequeue(sch);
+               struct sk_buff *skb = qdisc_dequeue_internal(sch, false);
 
                if (!skb)
                        break;
index 9330923a624c02706005ebf171cc84f801d2fd73..47b5a056165cb0c7669f240345ac10336ce8e4e1 100644 (file)
@@ -431,7 +431,7 @@ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt,
 
        while (sch->q.qlen > sch->limit ||
               q->memory_usage > q->memory_limit) {
-               struct sk_buff *skb = fq_codel_dequeue(sch);
+               struct sk_buff *skb = qdisc_dequeue_internal(sch, false);
 
                q->cstats.drop_len += qdisc_pkt_len(skb);
                rtnl_kfree_skbs(skb, skb);
index 68e6acd0f130d93de42b75f8af40513899b5e2bd..607c580d75e4b65301c7d06f7531ac27a4200e69 100644 (file)
@@ -357,7 +357,7 @@ static int fq_pie_change(struct Qdisc *sch, struct nlattr *opt,
 
        /* Drop excess packets if new limit is lower */
        while (sch->q.qlen > sch->limit) {
-               struct sk_buff *skb = fq_pie_qdisc_dequeue(sch);
+               struct sk_buff *skb = qdisc_dequeue_internal(sch, false);
 
                len_dropped += qdisc_pkt_len(skb);
                num_dropped += 1;
index d26cd436cbe31bd37b1b651e0760845ae7ea2616..83fc44f20e31cbf4b4d61025ba1454722c230bba 100644 (file)
@@ -560,7 +560,7 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt,
        qlen = sch->q.qlen;
        prev_backlog = sch->qstats.backlog;
        while (sch->q.qlen > sch->limit) {
-               struct sk_buff *skb = hhf_dequeue(sch);
+               struct sk_buff *skb = qdisc_dequeue_internal(sch, false);
 
                rtnl_kfree_skbs(skb, skb);
        }
index 2da6250ec3463686a1d588da224314befcc1478b..48c5ab8ec143c1de91dd1f02f676012ec5b53e68 100644 (file)
@@ -190,7 +190,7 @@ static int pie_change(struct Qdisc *sch, struct nlattr *opt,
        /* Drop excess packets if new limit is lower */
        qlen = sch->q.qlen;
        while (sch->q.qlen > sch->limit) {
-               struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
+               struct sk_buff *skb = qdisc_dequeue_internal(sch, true);
 
                dropped += qdisc_pkt_len(skb);
                qdisc_qstats_backlog_dec(sch, skb);