bond_start_xmit() spends some cycles in is_netpoll_tx_blocked():
if (unlikely(is_netpoll_tx_blocked(dev)))
return NETDEV_TX_BUSY;
because of the "pushf;pop reg" sequence (aka irqs_disabled()).
Let's swap the conditions in is_netpoll_tx_blocked() and
convert netpoll_block_tx to a static key.
Before:
1.23 │ mov %gs:0x28,%rax
1.24 │ mov %rax,0x18(%rsp)
29.45 │ pushfq
0.50 │ pop %rax
0.47 │ test $0x200,%eax
│ ↓ je 1b4
0.49 │ 32: lea 0x980(%rsi),%rbx
After:
0.72 │ mov %gs:0x28,%rax
0.81 │ mov %rax,0x18(%rsp)
0.82 │ nop
2.77 │ 2a: lea 0x980(%rsi),%rbx
Suggested-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: Kuniyuki Iwashima <kuniyu@google.com>
Reviewed-by: Eric Dumazet <edumazet@google.com>
Link: https://patch.msgid.link/20260223230749.2376145-1-kuniyu@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
/*----------------------------- Global variables ----------------------------*/
#ifdef CONFIG_NET_POLL_CONTROLLER
-atomic_t netpoll_block_tx = ATOMIC_INIT(0);
+DEFINE_STATIC_KEY_FALSE(netpoll_block_tx);
#endif
unsigned int bond_net_id __read_mostly;
#ifdef CONFIG_NET_POLL_CONTROLLER
/* Make sure we don't have an imbalance on our netpoll blocking */
- WARN_ON(atomic_read(&netpoll_block_tx));
+ WARN_ON(static_branch_unlikely(&netpoll_block_tx));
#endif
}
NETIF_F_GSO_ESP)
#ifdef CONFIG_NET_POLL_CONTROLLER
-extern atomic_t netpoll_block_tx;
+DECLARE_STATIC_KEY_FALSE(netpoll_block_tx);
static inline void block_netpoll_tx(void)
{
- atomic_inc(&netpoll_block_tx);
+ static_branch_inc(&netpoll_block_tx);
}
static inline void unblock_netpoll_tx(void)
{
- atomic_dec(&netpoll_block_tx);
+ static_branch_dec(&netpoll_block_tx);
}
static inline int is_netpoll_tx_blocked(struct net_device *dev)
{
- if (unlikely(netpoll_tx_running(dev)))
- return atomic_read(&netpoll_block_tx);
+ if (static_branch_unlikely(&netpoll_block_tx))
+ return netpoll_tx_running(dev);
return 0;
}
#else