]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
net: remove the netif_get_rx_queue_lease_locked() helpers
authorJakub Kicinski <kuba@kernel.org>
Wed, 8 Apr 2026 22:12:51 +0000 (15:12 -0700)
committerJakub Kicinski <kuba@kernel.org>
Fri, 10 Apr 2026 01:26:28 +0000 (18:26 -0700)
The netif_get_rx_queue_lease_locked() API hides the locking
and the descend onto the leased queue. Making the code
harder to follow (at least to me). Remove the API and open
code the descend a bit. Most of the code now looks like:

 if (!leased)
     return __helper(x);

 hw_rxq = ..
 netdev_lock(hw_rxq->dev);
 ret = __helper(x);
 netdev_unlock(hw_rxq->dev);

 return ret;

Of course if we have more code paths that need the wrapping
we may need to revisit. For now, IMHO, having to know what
netif_get_rx_queue_lease_locked() does is not worth the 20LoC
it saves.

Link: https://patch.msgid.link/20260408151251.72bd2482@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
include/net/netdev_rx_queue.h
net/core/dev.h
net/core/netdev-genl.c
net/core/netdev_queues.c
net/core/netdev_rx_queue.c
net/xdp/xsk.c

index 7e98c679ea84f49aa0cc299ce9045062cc668a0d..9415a94d333dd898b2d4e538db8ceacacafc789f 100644 (file)
@@ -76,11 +76,6 @@ struct netdev_rx_queue *
 __netif_get_rx_queue_lease(struct net_device **dev, unsigned int *rxq,
                           enum netif_lease_dir dir);
 
-struct netdev_rx_queue *
-netif_get_rx_queue_lease_locked(struct net_device **dev, unsigned int *rxq);
-void netif_put_rx_queue_lease_locked(struct net_device *orig_dev,
-                                    struct net_device *dev);
-
 int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq);
 void netdev_rx_queue_lease(struct netdev_rx_queue *rxq_dst,
                           struct netdev_rx_queue *rxq_src);
index 95edb2d4eff8b88bd36186318723ce946015f00a..376bac4a82daf5e225dbcd679bffcf4654370508 100644 (file)
@@ -101,6 +101,7 @@ int netdev_queue_config_validate(struct net_device *dev, int rxq_idx,
 
 bool netif_rxq_has_mp(struct net_device *dev, unsigned int rxq_idx);
 bool netif_rxq_is_leased(struct net_device *dev, unsigned int rxq_idx);
+bool netif_is_queue_leasee(const struct net_device *dev);
 
 void __netif_mp_uninstall_rxq(struct netdev_rx_queue *rxq,
                              const struct pp_memory_provider_params *p);
index 056460d019408112a123d9362e8aea9e17d28072..b8f6076d80072fd1114ef4d4000647c18416a8bb 100644 (file)
@@ -395,8 +395,7 @@ netdev_nl_queue_fill_lease(struct sk_buff *rsp, struct net_device *netdev,
        struct netdev_rx_queue *rxq;
        struct net *net, *peer_net;
 
-       rxq = __netif_get_rx_queue_lease(&netdev, &q_idx,
-                                        NETIF_PHYS_TO_VIRT);
+       rxq = __netif_get_rx_queue_lease(&netdev, &q_idx, NETIF_PHYS_TO_VIRT);
        if (!rxq || orig_netdev == netdev)
                return 0;
 
@@ -436,13 +435,45 @@ nla_put_failure:
        return -ENOMEM;
 }
 
+static int
+__netdev_nl_queue_fill_mp(struct sk_buff *rsp, struct netdev_rx_queue *rxq)
+{
+       struct pp_memory_provider_params *params = &rxq->mp_params;
+
+       if (params->mp_ops &&
+           params->mp_ops->nl_fill(params->mp_priv, rsp, rxq))
+               return -EMSGSIZE;
+
+#ifdef CONFIG_XDP_SOCKETS
+       if (rxq->pool)
+               if (nla_put_empty_nest(rsp, NETDEV_A_QUEUE_XSK))
+                       return -EMSGSIZE;
+#endif
+       return 0;
+}
+
+static int
+netdev_nl_queue_fill_mp(struct sk_buff *rsp, struct net_device *netdev,
+                       struct netdev_rx_queue *rxq)
+{
+       struct netdev_rx_queue *hw_rxq;
+       int ret;
+
+       hw_rxq = rxq->lease;
+       if (!hw_rxq || !netif_is_queue_leasee(netdev))
+               return __netdev_nl_queue_fill_mp(rsp, rxq);
+
+       netdev_lock(hw_rxq->dev);
+       ret = __netdev_nl_queue_fill_mp(rsp, hw_rxq);
+       netdev_unlock(hw_rxq->dev);
+       return ret;
+}
+
 static int
 netdev_nl_queue_fill_one(struct sk_buff *rsp, struct net_device *netdev,
                         u32 q_idx, u32 q_type, const struct genl_info *info)
 {
-       struct pp_memory_provider_params *params;
-       struct net_device *orig_netdev = netdev;
-       struct netdev_rx_queue *rxq, *rxq_lease;
+       struct netdev_rx_queue *rxq;
        struct netdev_queue *txq;
        void *hdr;
 
@@ -462,20 +493,8 @@ netdev_nl_queue_fill_one(struct sk_buff *rsp, struct net_device *netdev,
                        goto nla_put_failure;
                if (netdev_nl_queue_fill_lease(rsp, netdev, q_idx, q_type))
                        goto nla_put_failure;
-
-               rxq_lease = netif_get_rx_queue_lease_locked(&netdev, &q_idx);
-               if (rxq_lease)
-                       rxq = rxq_lease;
-               params = &rxq->mp_params;
-               if (params->mp_ops &&
-                   params->mp_ops->nl_fill(params->mp_priv, rsp, rxq))
-                       goto nla_put_failure_lease;
-#ifdef CONFIG_XDP_SOCKETS
-               if (rxq->pool)
-                       if (nla_put_empty_nest(rsp, NETDEV_A_QUEUE_XSK))
-                               goto nla_put_failure_lease;
-#endif
-               netif_put_rx_queue_lease_locked(orig_netdev, netdev);
+               if (netdev_nl_queue_fill_mp(rsp, netdev, rxq))
+                       goto nla_put_failure;
                break;
        case NETDEV_QUEUE_TYPE_TX:
                txq = netdev_get_tx_queue(netdev, q_idx);
@@ -493,8 +512,6 @@ netdev_nl_queue_fill_one(struct sk_buff *rsp, struct net_device *netdev,
 
        return 0;
 
-nla_put_failure_lease:
-       netif_put_rx_queue_lease_locked(orig_netdev, netdev);
 nla_put_failure:
        genlmsg_cancel(rsp, hdr);
        return -EMSGSIZE;
index 265161e12a9c19e7469c1aa28ac42a2cb5432e9a..73fb28087a93d651084c4ccbc099b4b8e3d06838 100644 (file)
@@ -37,18 +37,24 @@ struct device *netdev_queue_get_dma_dev(struct net_device *dev,
                                        unsigned int idx,
                                        enum netdev_queue_type type)
 {
-       struct net_device *orig_dev = dev;
+       struct netdev_rx_queue *hw_rxq;
        struct device *dma_dev;
 
+       netdev_ops_assert_locked(dev);
+
        /* Only RX side supports queue leasing today. */
        if (type != NETDEV_QUEUE_TYPE_RX || !netif_rxq_is_leased(dev, idx))
                return __netdev_queue_get_dma_dev(dev, idx);
-
-       if (!netif_get_rx_queue_lease_locked(&dev, &idx))
+       if (!netif_is_queue_leasee(dev))
                return NULL;
 
-       dma_dev = __netdev_queue_get_dma_dev(dev, idx);
-       netif_put_rx_queue_lease_locked(orig_dev, dev);
+       hw_rxq = __netif_get_rx_queue(dev, idx)->lease;
+
+       netdev_lock(hw_rxq->dev);
+       idx = get_netdev_rx_queue_index(hw_rxq);
+       dma_dev = __netdev_queue_get_dma_dev(hw_rxq->dev, idx);
+       netdev_unlock(hw_rxq->dev);
+
        return dma_dev;
 }
 
index 469319451ba29e597f27bd0d0d851f42d7aeaa0b..8771e06a0afef1bf40df9397b0522f069dc1ecd8 100644 (file)
@@ -57,6 +57,11 @@ static bool netif_lease_dir_ok(const struct net_device *dev,
        return false;
 }
 
+bool netif_is_queue_leasee(const struct net_device *dev)
+{
+       return netif_lease_dir_ok(dev, NETIF_VIRT_TO_PHYS);
+}
+
 struct netdev_rx_queue *
 __netif_get_rx_queue_lease(struct net_device **dev, unsigned int *rxq_idx,
                           enum netif_lease_dir dir)
@@ -74,29 +79,6 @@ __netif_get_rx_queue_lease(struct net_device **dev, unsigned int *rxq_idx,
        return rxq;
 }
 
-struct netdev_rx_queue *
-netif_get_rx_queue_lease_locked(struct net_device **dev, unsigned int *rxq_idx)
-{
-       struct net_device *orig_dev = *dev;
-       struct netdev_rx_queue *rxq;
-
-       /* Locking order is always from the virtual to the physical device
-        * see netdev_nl_queue_create_doit().
-        */
-       netdev_ops_assert_locked(orig_dev);
-       rxq = __netif_get_rx_queue_lease(dev, rxq_idx, NETIF_VIRT_TO_PHYS);
-       if (rxq && orig_dev != *dev)
-               netdev_lock(*dev);
-       return rxq;
-}
-
-void netif_put_rx_queue_lease_locked(struct net_device *orig_dev,
-                                    struct net_device *dev)
-{
-       if (orig_dev != dev)
-               netdev_unlock(dev);
-}
-
 /* See also page_pool_is_unreadable() */
 bool netif_rxq_has_unreadable_mp(struct net_device *dev, unsigned int rxq_idx)
 {
@@ -264,7 +246,6 @@ int netif_mp_open_rxq(struct net_device *dev, unsigned int rxq_idx,
                      const struct pp_memory_provider_params *p,
                      struct netlink_ext_ack *extack)
 {
-       struct net_device *orig_dev = dev;
        int ret;
 
        if (!netdev_need_ops_lock(dev))
@@ -279,19 +260,18 @@ int netif_mp_open_rxq(struct net_device *dev, unsigned int rxq_idx,
        if (!netif_rxq_is_leased(dev, rxq_idx))
                return __netif_mp_open_rxq(dev, rxq_idx, p, extack);
 
-       if (!netif_get_rx_queue_lease_locked(&dev, &rxq_idx)) {
+       if (!__netif_get_rx_queue_lease(&dev, &rxq_idx, NETIF_VIRT_TO_PHYS)) {
                NL_SET_ERR_MSG(extack, "rx queue leased to a virtual netdev");
                return -EBUSY;
        }
        if (!dev->dev.parent) {
                NL_SET_ERR_MSG(extack, "rx queue belongs to a virtual netdev");
-               ret = -EOPNOTSUPP;
-               goto out;
+               return -EOPNOTSUPP;
        }
 
+       netdev_lock(dev);
        ret = __netif_mp_open_rxq(dev, rxq_idx, p, extack);
-out:
-       netif_put_rx_queue_lease_locked(orig_dev, dev);
+       netdev_unlock(dev);
        return ret;
 }
 
@@ -326,18 +306,18 @@ static void __netif_mp_close_rxq(struct net_device *dev, unsigned int ifq_idx,
 void netif_mp_close_rxq(struct net_device *dev, unsigned int ifq_idx,
                        const struct pp_memory_provider_params *old_p)
 {
-       struct net_device *orig_dev = dev;
-
        if (WARN_ON_ONCE(ifq_idx >= dev->real_num_rx_queues))
                return;
        if (!netif_rxq_is_leased(dev, ifq_idx))
                return __netif_mp_close_rxq(dev, ifq_idx, old_p);
 
-       if (WARN_ON_ONCE(!netif_get_rx_queue_lease_locked(&dev, &ifq_idx)))
+       if (!__netif_get_rx_queue_lease(&dev, &ifq_idx, NETIF_VIRT_TO_PHYS)) {
+               WARN_ON_ONCE(1);
                return;
-
+       }
+       netdev_lock(dev);
        __netif_mp_close_rxq(dev, ifq_idx, old_p);
-       netif_put_rx_queue_lease_locked(orig_dev, dev);
+       netdev_unlock(dev);
 }
 
 void __netif_mp_uninstall_rxq(struct netdev_rx_queue *rxq,
index 60be6561f48657362249e875839a514b02f07215..145876089b4b718b3280178854067a1f404463ab 100644 (file)
@@ -31,6 +31,8 @@
 #include <net/netdev_rx_queue.h>
 #include <net/xdp.h>
 
+#include "../core/dev.h"
+
 #include "xsk_queue.h"
 #include "xdp_umem.h"
 #include "xsk.h"
@@ -117,20 +119,42 @@ struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev,
 }
 EXPORT_SYMBOL(xsk_get_pool_from_qid);
 
+static void __xsk_clear_pool_at_qid(struct net_device *dev, u16 queue_id)
+{
+       if (queue_id < dev->num_rx_queues)
+               dev->_rx[queue_id].pool = NULL;
+       if (queue_id < dev->num_tx_queues)
+               dev->_tx[queue_id].pool = NULL;
+}
+
 void xsk_clear_pool_at_qid(struct net_device *dev, u16 queue_id)
 {
-       struct net_device *orig_dev = dev;
-       unsigned int id = queue_id;
+       struct netdev_rx_queue *hw_rxq;
+
+       if (!netif_rxq_is_leased(dev, queue_id))
+               return __xsk_clear_pool_at_qid(dev, queue_id);
+       WARN_ON_ONCE(!netif_is_queue_leasee(dev));
 
-       if (id < dev->real_num_rx_queues)
-               WARN_ON_ONCE(!netif_get_rx_queue_lease_locked(&dev, &id));
+       hw_rxq = __netif_get_rx_queue(dev, queue_id)->lease;
 
-       if (id < dev->num_rx_queues)
-               dev->_rx[id].pool = NULL;
-       if (id < dev->num_tx_queues)
-               dev->_tx[id].pool = NULL;
+       netdev_lock(hw_rxq->dev);
+       queue_id = get_netdev_rx_queue_index(hw_rxq);
+       __xsk_clear_pool_at_qid(hw_rxq->dev, queue_id);
+       netdev_unlock(hw_rxq->dev);
+}
+
+static int __xsk_reg_pool_at_qid(struct net_device *dev,
+                                struct xsk_buff_pool *pool, u16 queue_id)
+{
+       if (xsk_get_pool_from_qid(dev, queue_id))
+               return -EBUSY;
+
+       if (queue_id < dev->real_num_rx_queues)
+               dev->_rx[queue_id].pool = pool;
+       if (queue_id < dev->real_num_tx_queues)
+               dev->_tx[queue_id].pool = pool;
 
-       netif_put_rx_queue_lease_locked(orig_dev, dev);
+       return 0;
 }
 
 /* The buffer pool is stored both in the _rx struct and the _tx struct as we do
@@ -140,29 +164,26 @@ void xsk_clear_pool_at_qid(struct net_device *dev, u16 queue_id)
 int xsk_reg_pool_at_qid(struct net_device *dev, struct xsk_buff_pool *pool,
                        u16 queue_id)
 {
-       struct net_device *orig_dev = dev;
-       unsigned int id = queue_id;
-       int ret = 0;
+       struct netdev_rx_queue *hw_rxq;
+       int ret;
 
-       if (id >= max(dev->real_num_rx_queues,
-                     dev->real_num_tx_queues))
+       if (queue_id >= max(dev->real_num_rx_queues,
+                           dev->real_num_tx_queues))
                return -EINVAL;
 
-       if (id < dev->real_num_rx_queues) {
-               if (!netif_get_rx_queue_lease_locked(&dev, &id))
-                       return -EBUSY;
-               if (xsk_get_pool_from_qid(dev, id)) {
-                       ret = -EBUSY;
-                       goto out;
-               }
-       }
+       if (queue_id >= dev->real_num_rx_queues ||
+           !netif_rxq_is_leased(dev, queue_id))
+               return __xsk_reg_pool_at_qid(dev, pool, queue_id);
+       if (!netif_is_queue_leasee(dev))
+               return -EBUSY;
+
+       hw_rxq = __netif_get_rx_queue(dev, queue_id)->lease;
+
+       netdev_lock(hw_rxq->dev);
+       queue_id = get_netdev_rx_queue_index(hw_rxq);
+       ret = __xsk_reg_pool_at_qid(hw_rxq->dev, pool, queue_id);
+       netdev_unlock(hw_rxq->dev);
 
-       if (id < dev->real_num_rx_queues)
-               dev->_rx[id].pool = pool;
-       if (id < dev->real_num_tx_queues)
-               dev->_tx[id].pool = pool;
-out:
-       netif_put_rx_queue_lease_locked(orig_dev, dev);
        return ret;
 }