]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
xsk: do not enable/disable irq when grabbing/releasing xsk_tx_list_lock
authorJason Xing <kernelxing@tencent.com>
Thu, 30 Oct 2025 00:06:45 +0000 (08:06 +0800)
committerPaolo Abeni <pabeni@redhat.com>
Tue, 4 Nov 2025 15:10:52 +0000 (16:10 +0100)
The commit ac98d8aab61b ("xsk: wire upp Tx zero-copy functions")
originally introducing this lock put the deletion process in the
sk_destruct which can run in irq context obviously, so the
xxx_irqsave()/xxx_irqrestore() pair was used. But later another
commit 541d7fdd7694 ("xsk: proper AF_XDP socket teardown ordering")
moved the deletion into xsk_release() that only happens in process
context. It means that since this commit, it doesn't necessarily
need that pair.

Now, there are two places that use this xsk_tx_list_lock and only
run in the process context. So avoid manipulating the irq then.

Signed-off-by: Jason Xing <kernelxing@tencent.com>
Acked-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
Link: https://patch.msgid.link/20251030000646.18859-2-kerneljasonxing@gmail.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
net/xdp/xsk_buff_pool.c

index aa9788f20d0db5db20dfad4e7e40b4df07729786..309075050b2a0d97a42bfe48b99e0bcb6d5215f9 100644 (file)
 
 void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs)
 {
-       unsigned long flags;
-
        if (!xs->tx)
                return;
 
-       spin_lock_irqsave(&pool->xsk_tx_list_lock, flags);
+       spin_lock(&pool->xsk_tx_list_lock);
        list_add_rcu(&xs->tx_list, &pool->xsk_tx_list);
-       spin_unlock_irqrestore(&pool->xsk_tx_list_lock, flags);
+       spin_unlock(&pool->xsk_tx_list_lock);
 }
 
 void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs)
 {
-       unsigned long flags;
-
        if (!xs->tx)
                return;
 
-       spin_lock_irqsave(&pool->xsk_tx_list_lock, flags);
+       spin_lock(&pool->xsk_tx_list_lock);
        list_del_rcu(&xs->tx_list);
-       spin_unlock_irqrestore(&pool->xsk_tx_list_lock, flags);
+       spin_unlock(&pool->xsk_tx_list_lock);
 }
 
 void xp_destroy(struct xsk_buff_pool *pool)