old_backlog = sch->qstats.backlog;
while (qdisc_qlen(sch) > sch->limit ||
q->memory_used > q->memory_limit) {
- struct sk_buff *skb = qdisc_dequeue_internal(sch, true);
+ struct sk_buff *skb = NULL;
- q->memory_used -= skb->truesize;
- qdisc_qstats_backlog_dec(sch, skb);
- rtnl_qdisc_drop(skb, sch);
+ if (qdisc_qlen(sch) > qdisc_qlen(q->l_queue)) {
+ skb = qdisc_dequeue_internal(sch, true);
+ if (unlikely(!skb)) {
+ WARN_ON_ONCE(1);
+ break;
+ }
+ q->memory_used -= skb->truesize;
+ rtnl_qdisc_drop(skb, sch);
+ } else if (qdisc_qlen(q->l_queue)) {
+ skb = qdisc_dequeue_internal(q->l_queue, true);
+ if (unlikely(!skb)) {
+ WARN_ON_ONCE(1);
+ break;
+ }
+ /* L-queue packets are counted in both sch and
+ * l_queue on enqueue; qdisc_dequeue_internal()
+ * handled l_queue, so we further account for sch.
+ */
+ --sch->q.qlen;
+ qdisc_qstats_backlog_dec(sch, skb);
+ q->memory_used -= skb->truesize;
+ rtnl_qdisc_drop(skb, q->l_queue);
+ qdisc_qstats_drop(sch);
+ } else {
+ WARN_ON_ONCE(1);
+ break;
+ }
}
qdisc_tree_reduce_backlog(sch, old_qlen - qdisc_qlen(sch),
old_backlog - sch->qstats.backlog);