]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
net: optimize enqueue_to_backlog() for the fast path
authorEric Dumazet <edumazet@google.com>
Fri, 24 Oct 2025 09:05:17 +0000 (09:05 +0000)
committerJakub Kicinski <kuba@kernel.org>
Wed, 29 Oct 2025 00:39:03 +0000 (17:39 -0700)
Add likely() and unlikely() clauses for the common cases:

Device is running.
Queue is not full.
Queue is less than half capacity.

Add max_backlog parameter to skb_flow_limit() to avoid
a second READ_ONCE(net_hotdata.max_backlog).

skb_flow_limit() does not need the backlog_lock protection,
and can be called before we acquire the lock, for even better
resistance to attacks.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Reviewed-by: Willem de Bruijn <willemb@google.com>
Reviewed-by: Kuniyuki Iwashima <kuniyu@google.com>
Link: https://patch.msgid.link/20251024090517.3289181-1-edumazet@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
net/core/dev.c

index 378c2d010faf251ffd874ebf0cc3dd6968eee447..d32f0b0c03bbd069d3651f5a6b772c8029baf96c 100644 (file)
@@ -5249,14 +5249,15 @@ void kick_defer_list_purge(unsigned int cpu)
 int netdev_flow_limit_table_len __read_mostly = (1 << 12);
 #endif
 
-static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
+static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen,
+                          int max_backlog)
 {
 #ifdef CONFIG_NET_FLOW_LIMIT
-       struct sd_flow_limit *fl;
-       struct softnet_data *sd;
        unsigned int old_flow, new_flow;
+       const struct softnet_data *sd;
+       struct sd_flow_limit *fl;
 
-       if (qlen < (READ_ONCE(net_hotdata.max_backlog) >> 1))
+       if (likely(qlen < (max_backlog >> 1)))
                return false;
 
        sd = this_cpu_ptr(&softnet_data);
@@ -5301,19 +5302,19 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
        u32 tail;
 
        reason = SKB_DROP_REASON_DEV_READY;
-       if (!netif_running(skb->dev))
+       if (unlikely(!netif_running(skb->dev)))
                goto bad_dev;
 
-       reason = SKB_DROP_REASON_CPU_BACKLOG;
        sd = &per_cpu(softnet_data, cpu);
 
        qlen = skb_queue_len_lockless(&sd->input_pkt_queue);
        max_backlog = READ_ONCE(net_hotdata.max_backlog);
-       if (unlikely(qlen > max_backlog))
+       if (unlikely(qlen > max_backlog) ||
+           skb_flow_limit(skb, qlen, max_backlog))
                goto cpu_backlog_drop;
        backlog_lock_irq_save(sd, &flags);
        qlen = skb_queue_len(&sd->input_pkt_queue);
-       if (qlen <= max_backlog && !skb_flow_limit(skb, qlen)) {
+       if (likely(qlen <= max_backlog)) {
                if (!qlen) {
                        /* Schedule NAPI for backlog device. We can use
                         * non atomic operation as we own the queue lock.
@@ -5334,6 +5335,7 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
        backlog_unlock_irq_restore(sd, &flags);
 
 cpu_backlog_drop:
+       reason = SKB_DROP_REASON_CPU_BACKLOG;
        numa_drop_add(&sd->drop_counters, 1);
 bad_dev:
        dev_core_stats_rx_dropped_inc(skb->dev);