From 27a01c1969a5d5ed4739e45777957445af96322d Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 5 Jan 2026 16:30:54 +0000 Subject: [PATCH] net: fully inline backlog_unlock_irq_restore() Some arches (like x86) do not inline spin_unlock_irqrestore(). backlog_unlock_irq_restore() is in RPS/RFS critical path, we prefer using spin_unlock() + local_irq_restore() for optimal performance. Also change backlog_unlock_irq_restore() second argument to avoid a pointless dereference. No difference in net/core/dev.o code size. Signed-off-by: Eric Dumazet Link: https://patch.msgid.link/20260105163054.13698-1-edumazet@google.com Signed-off-by: Jakub Kicinski --- net/core/dev.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/net/core/dev.c b/net/core/dev.c index 36dc5199037ed..c711da3355105 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -246,12 +246,11 @@ static inline void backlog_lock_irq_disable(struct softnet_data *sd) } static inline void backlog_unlock_irq_restore(struct softnet_data *sd, - unsigned long *flags) + unsigned long flags) { if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads()) - spin_unlock_irqrestore(&sd->input_pkt_queue.lock, *flags); - else - local_irq_restore(*flags); + spin_unlock(&sd->input_pkt_queue.lock); + local_irq_restore(flags); } static inline void backlog_unlock_irq_enable(struct softnet_data *sd) @@ -5247,7 +5246,7 @@ void kick_defer_list_purge(unsigned int cpu) if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) __napi_schedule_irqoff(&sd->backlog); - backlog_unlock_irq_restore(sd, &flags); + backlog_unlock_irq_restore(sd, flags); } else if (!cmpxchg(&sd->defer_ipi_scheduled, 0, 1)) { smp_call_function_single_async(cpu, &sd->defer_csd); @@ -5334,14 +5333,14 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu, } __skb_queue_tail(&sd->input_pkt_queue, skb); tail = rps_input_queue_tail_incr(sd); - backlog_unlock_irq_restore(sd, &flags); + backlog_unlock_irq_restore(sd, flags); /* save the tail outside of the critical section */ rps_input_queue_tail_save(qtail, tail); return NET_RX_SUCCESS; } - backlog_unlock_irq_restore(sd, &flags); + backlog_unlock_irq_restore(sd, flags); cpu_backlog_drop: reason = SKB_DROP_REASON_CPU_BACKLOG; -- 2.47.3