]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
xsk: Proxy pool management for leased queues
authorDaniel Borkmann <daniel@iogearbox.net>
Thu, 15 Jan 2026 08:25:55 +0000 (09:25 +0100)
committerPaolo Abeni <pabeni@redhat.com>
Tue, 20 Jan 2026 10:58:50 +0000 (11:58 +0100)
Similarly to the net_mp_{open,close}_rxq handling for leased queues, proxy
the xsk_{reg,clear}_pool_at_qid via netif_get_rx_queue_lease_locked such
that in case a virtual netdev picked a leased rxq, the request gets through
to the real rxq in the physical netdev. The proxying is only relevant for
queue_id < dev->real_num_rx_queues since right now its only supported for
rxqs.

Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Co-developed-by: David Wei <dw@davidwei.uk>
Signed-off-by: David Wei <dw@davidwei.uk>
Acked-by: Stanislav Fomichev <sdf@fomichev.me>
Reviewed-by: Nikolay Aleksandrov <razor@blackwall.org>
Link: https://patch.msgid.link/20260115082603.219152-9-daniel@iogearbox.net
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
net/xdp/xsk.c

index 747865f9341d37e641963931a3112a8e56e06ab5..92f791433725123c8ac670792ed8685f17a4825f 100644 (file)
@@ -23,6 +23,8 @@
 #include <linux/netdevice.h>
 #include <linux/rculist.h>
 #include <linux/vmalloc.h>
+
+#include <net/netdev_queues.h>
 #include <net/xdp_sock_drv.h>
 #include <net/busy_poll.h>
 #include <net/netdev_lock.h>
@@ -117,10 +119,18 @@ EXPORT_SYMBOL(xsk_get_pool_from_qid);
 
 void xsk_clear_pool_at_qid(struct net_device *dev, u16 queue_id)
 {
-       if (queue_id < dev->num_rx_queues)
-               dev->_rx[queue_id].pool = NULL;
-       if (queue_id < dev->num_tx_queues)
-               dev->_tx[queue_id].pool = NULL;
+       struct net_device *orig_dev = dev;
+       unsigned int id = queue_id;
+
+       if (id < dev->real_num_rx_queues)
+               WARN_ON_ONCE(!netif_get_rx_queue_lease_locked(&dev, &id));
+
+       if (id < dev->real_num_rx_queues)
+               dev->_rx[id].pool = NULL;
+       if (id < dev->real_num_tx_queues)
+               dev->_tx[id].pool = NULL;
+
+       netif_put_rx_queue_lease_locked(orig_dev, dev);
 }
 
 /* The buffer pool is stored both in the _rx struct and the _tx struct as we do
@@ -130,17 +140,29 @@ void xsk_clear_pool_at_qid(struct net_device *dev, u16 queue_id)
 int xsk_reg_pool_at_qid(struct net_device *dev, struct xsk_buff_pool *pool,
                        u16 queue_id)
 {
-       if (queue_id >= max_t(unsigned int,
-                             dev->real_num_rx_queues,
-                             dev->real_num_tx_queues))
-               return -EINVAL;
+       struct net_device *orig_dev = dev;
+       unsigned int id = queue_id;
+       int ret = 0;
 
-       if (queue_id < dev->real_num_rx_queues)
-               dev->_rx[queue_id].pool = pool;
-       if (queue_id < dev->real_num_tx_queues)
-               dev->_tx[queue_id].pool = pool;
+       if (id >= max(dev->real_num_rx_queues,
+                     dev->real_num_tx_queues))
+               return -EINVAL;
+       if (id < dev->real_num_rx_queues) {
+               if (!netif_get_rx_queue_lease_locked(&dev, &id))
+                       return -EBUSY;
+               if (xsk_get_pool_from_qid(dev, id)) {
+                       ret = -EBUSY;
+                       goto out;
+               }
+       }
 
-       return 0;
+       if (id < dev->real_num_rx_queues)
+               dev->_rx[id].pool = pool;
+       if (id < dev->real_num_tx_queues)
+               dev->_tx[id].pool = pool;
+out:
+       netif_put_rx_queue_lease_locked(orig_dev, dev);
+       return ret;
 }
 
 static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff_xsk *xskb, u32 len,