]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
net/sched: sch_dualpi2: drain both C-queue and L-queue in dualpi2_change()
authorChia-Yu Chang <chia-yu.chang@nokia-bell-labs.com>
Fri, 17 Apr 2026 15:25:51 +0000 (17:25 +0200)
committerPaolo Abeni <pabeni@redhat.com>
Tue, 21 Apr 2026 13:00:39 +0000 (15:00 +0200)
Fix dualpi2_change() to correctly enforce updated limit and memlimit
values after a configuration change of the dualpi2 qdisc.

Before this patch, dualpi2_change() always attempted to dequeue packets
via the root qdisc (C-queue) when reducing backlog or memory usage, and
unconditionally assumed that a valid skb will be returned. When traffic
classification results in packets being queued in the L-queue while the
C-queue is empty, this leads to a NULL skb dereference during limit or
memlimit enforcement.

This is fixed by first dequeuing from the C-queue path if it is
non-empty. Once the C-queue is empty, packets are dequeued directly from
the L-queue. Return values from qdisc_dequeue_internal() are checked for
both queues. When dequeuing from the L-queue, the parent qdisc qlen and
backlog counters are updated explicitly to keep overall qdisc statistics
consistent.

Fixes: 320d031ad6e4 ("sched: Struct definition and parsing of dualpi2 qdisc")
Reported-by: "Kito Xu (veritas501)" <hxzene@gmail.com>
Closes: https://lore.kernel.org/netdev/20260413075740.2234828-1-hxzene@gmail.com/
Signed-off-by: Chia-Yu Chang <chia-yu.chang@nokia-bell-labs.com>
Link: https://patch.msgid.link/20260417152551.71648-1-chia-yu.chang@nokia-bell-labs.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
net/sched/sch_dualpi2.c

index fe6f5e8896257674b9f175e01428b89e299a7dda..241e6a46bd00e39820f5ba9dc71d559f205a4de0 100644 (file)
@@ -868,11 +868,35 @@ static int dualpi2_change(struct Qdisc *sch, struct nlattr *opt,
        old_backlog = sch->qstats.backlog;
        while (qdisc_qlen(sch) > sch->limit ||
               q->memory_used > q->memory_limit) {
-               struct sk_buff *skb = qdisc_dequeue_internal(sch, true);
+               struct sk_buff *skb = NULL;
 
-               q->memory_used -= skb->truesize;
-               qdisc_qstats_backlog_dec(sch, skb);
-               rtnl_qdisc_drop(skb, sch);
+               if (qdisc_qlen(sch) > qdisc_qlen(q->l_queue)) {
+                       skb = qdisc_dequeue_internal(sch, true);
+                       if (unlikely(!skb)) {
+                               WARN_ON_ONCE(1);
+                               break;
+                       }
+                       q->memory_used -= skb->truesize;
+                       rtnl_qdisc_drop(skb, sch);
+               } else if (qdisc_qlen(q->l_queue)) {
+                       skb = qdisc_dequeue_internal(q->l_queue, true);
+                       if (unlikely(!skb)) {
+                               WARN_ON_ONCE(1);
+                               break;
+                       }
+                       /* L-queue packets are counted in both sch and
+                        * l_queue on enqueue; qdisc_dequeue_internal()
+                        * handled l_queue, so we further account for sch.
+                        */
+                       --sch->q.qlen;
+                       qdisc_qstats_backlog_dec(sch, skb);
+                       q->memory_used -= skb->truesize;
+                       rtnl_qdisc_drop(skb, q->l_queue);
+                       qdisc_qstats_drop(sch);
+               } else {
+                       WARN_ON_ONCE(1);
+                       break;
+               }
        }
        qdisc_tree_reduce_backlog(sch, old_qlen - qdisc_qlen(sch),
                                  old_backlog - sch->qstats.backlog);