]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
net: fully inline backlog_unlock_irq_restore()
authorEric Dumazet <edumazet@google.com>
Mon, 5 Jan 2026 16:30:54 +0000 (16:30 +0000)
committerJakub Kicinski <kuba@kernel.org>
Wed, 7 Jan 2026 01:14:35 +0000 (17:14 -0800)
Some arches (like x86) do not inline spin_unlock_irqrestore().

backlog_unlock_irq_restore() is in RPS/RFS critical path,
we prefer using spin_unlock() + local_irq_restore() for
optimal performance.

Also change backlog_unlock_irq_restore() second argument
to avoid a pointless dereference.

No difference in net/core/dev.o code size.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Link: https://patch.msgid.link/20260105163054.13698-1-edumazet@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
net/core/dev.c

index 36dc5199037edb1506e67f6ab5e977ff41efef59..c711da3355105a4e69b13ae58d4696c3c6c80e89 100644 (file)
@@ -246,12 +246,11 @@ static inline void backlog_lock_irq_disable(struct softnet_data *sd)
 }
 
 static inline void backlog_unlock_irq_restore(struct softnet_data *sd,
-                                             unsigned long *flags)
+                                             unsigned long flags)
 {
        if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
-               spin_unlock_irqrestore(&sd->input_pkt_queue.lock, *flags);
-       else
-               local_irq_restore(*flags);
+               spin_unlock(&sd->input_pkt_queue.lock);
+       local_irq_restore(flags);
 }
 
 static inline void backlog_unlock_irq_enable(struct softnet_data *sd)
@@ -5247,7 +5246,7 @@ void kick_defer_list_purge(unsigned int cpu)
                if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state))
                        __napi_schedule_irqoff(&sd->backlog);
 
-               backlog_unlock_irq_restore(sd, &flags);
+               backlog_unlock_irq_restore(sd, flags);
 
        } else if (!cmpxchg(&sd->defer_ipi_scheduled, 0, 1)) {
                smp_call_function_single_async(cpu, &sd->defer_csd);
@@ -5334,14 +5333,14 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
                }
                __skb_queue_tail(&sd->input_pkt_queue, skb);
                tail = rps_input_queue_tail_incr(sd);
-               backlog_unlock_irq_restore(sd, &flags);
+               backlog_unlock_irq_restore(sd, flags);
 
                /* save the tail outside of the critical section */
                rps_input_queue_tail_save(qtail, tail);
                return NET_RX_SUCCESS;
        }
 
-       backlog_unlock_irq_restore(sd, &flags);
+       backlog_unlock_irq_restore(sd, flags);
 
 cpu_backlog_drop:
        reason = SKB_DROP_REASON_CPU_BACKLOG;