]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
net_sched: sch_fq: prefetch one skb ahead in dequeue()
authorEric Dumazet <edumazet@google.com>
Fri, 21 Nov 2025 08:32:51 +0000 (08:32 +0000)
committerPaolo Abeni <pabeni@redhat.com>
Tue, 25 Nov 2025 15:10:32 +0000 (16:10 +0100)
prefetch the skb that we are likely to dequeue at the next dequeue().

Also call fq_dequeue_skb() a bit sooner in fq_dequeue().

This reduces the window between read of q.qlen and
changes of fields in the cache line that could be dirtied
by another cpu trying to queue a packet.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Link: https://patch.msgid.link/20251121083256.674562-10-edumazet@google.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
net/sched/sch_fq.c

index 0b0ca1aa9251f959e87dd5dc504fbe0f4cbc75eb..6e5f2f4f241546605f8ba37f96275446c8836eee 100644 (file)
@@ -480,7 +480,10 @@ static void fq_erase_head(struct Qdisc *sch, struct fq_flow *flow,
                          struct sk_buff *skb)
 {
        if (skb == flow->head) {
-               flow->head = skb->next;
+               struct sk_buff *next = skb->next;
+
+               prefetch(next);
+               flow->head = next;
        } else {
                rb_erase(&skb->rbnode, &flow->t_root);
                skb->dev = qdisc_dev(sch);
@@ -712,6 +715,7 @@ begin:
                        goto begin;
                }
                prefetch(&skb->end);
+               fq_dequeue_skb(sch, f, skb);
                if ((s64)(now - time_next_packet - q->ce_threshold) > 0) {
                        INET_ECN_set_ce(skb);
                        q->stat_ce_mark++;
@@ -719,7 +723,6 @@ begin:
                if (--f->qlen == 0)
                        q->inactive_flows++;
                q->band_pkt_count[fq_skb_cb(skb)->band]--;
-               fq_dequeue_skb(sch, f, skb);
        } else {
                head->first = f->next;
                /* force a pass through old_flows to prevent starvation */