]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
net: move mp dev config validation to __net_mp_open_rxq()
authorJakub Kicinski <kuba@kernel.org>
Thu, 3 Apr 2025 01:34:04 +0000 (18:34 -0700)
committerJakub Kicinski <kuba@kernel.org>
Fri, 4 Apr 2025 14:35:38 +0000 (07:35 -0700)
devmem code performs a number of safety checks to avoid having
to reimplement all of them in the drivers. Move those to
__net_mp_open_rxq() and reuse that function for binding to make
sure that io_uring ZC also benefits from them.

While at it rename the queue ID variable to rxq_idx in
__net_mp_open_rxq(), we touch most of the relevant lines.

The XArray insertion is reordered after the netdev_rx_queue_restart()
call, otherwise we'd need to duplicate the queue index check
or risk inserting an invalid pointer. The XArray allocation
failures should be extremely rare.

Reviewed-by: Mina Almasry <almasrymina@google.com>
Acked-by: Stanislav Fomichev <sdf@fomichev.me>
Fixes: 6e18ed929d3b ("net: add helpers for setting a memory provider on an rx queue")
Link: https://patch.msgid.link/20250403013405.2827250-2-kuba@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
include/net/page_pool/memory_provider.h
net/core/devmem.c
net/core/netdev-genl.c
net/core/netdev_rx_queue.c

index b3e66589776758f1b32217a67aca2690366d2193..ada4f968960aec851f8a4c66180d0811dede074a 100644 (file)
@@ -6,6 +6,7 @@
 #include <net/page_pool/types.h>
 
 struct netdev_rx_queue;
+struct netlink_ext_ack;
 struct sk_buff;
 
 struct memory_provider_ops {
@@ -24,8 +25,13 @@ void net_mp_niov_clear_page_pool(struct net_iov *niov);
 
 int net_mp_open_rxq(struct net_device *dev, unsigned ifq_idx,
                    struct pp_memory_provider_params *p);
+int __net_mp_open_rxq(struct net_device *dev, unsigned int rxq_idx,
+                     const struct pp_memory_provider_params *p,
+                     struct netlink_ext_ack *extack);
 void net_mp_close_rxq(struct net_device *dev, unsigned ifq_idx,
                      struct pp_memory_provider_params *old_p);
+void __net_mp_close_rxq(struct net_device *dev, unsigned int rxq_idx,
+                       const struct pp_memory_provider_params *old_p);
 
 /**
   * net_mp_netmem_place_in_cache() - give a netmem to a page pool
index ee145a2aa41c289d37e07b77d7f27a88d6414396..f2ce3c2ebc9744a31aaf386162ee5720ef562141 100644 (file)
@@ -8,7 +8,6 @@
  */
 
 #include <linux/dma-buf.h>
-#include <linux/ethtool_netlink.h>
 #include <linux/genalloc.h>
 #include <linux/mm.h>
 #include <linux/netdevice.h>
@@ -143,57 +142,28 @@ int net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
                                    struct net_devmem_dmabuf_binding *binding,
                                    struct netlink_ext_ack *extack)
 {
+       struct pp_memory_provider_params mp_params = {
+               .mp_priv        = binding,
+               .mp_ops         = &dmabuf_devmem_ops,
+       };
        struct netdev_rx_queue *rxq;
        u32 xa_idx;
        int err;
 
-       if (rxq_idx >= dev->real_num_rx_queues) {
-               NL_SET_ERR_MSG(extack, "rx queue index out of range");
-               return -ERANGE;
-       }
-
-       if (dev->cfg->hds_config != ETHTOOL_TCP_DATA_SPLIT_ENABLED) {
-               NL_SET_ERR_MSG(extack, "tcp-data-split is disabled");
-               return -EINVAL;
-       }
-
-       if (dev->cfg->hds_thresh) {
-               NL_SET_ERR_MSG(extack, "hds-thresh is not zero");
-               return -EINVAL;
-       }
+       err = __net_mp_open_rxq(dev, rxq_idx, &mp_params, extack);
+       if (err)
+               return err;
 
        rxq = __netif_get_rx_queue(dev, rxq_idx);
-       if (rxq->mp_params.mp_ops) {
-               NL_SET_ERR_MSG(extack, "designated queue already memory provider bound");
-               return -EEXIST;
-       }
-
-#ifdef CONFIG_XDP_SOCKETS
-       if (rxq->pool) {
-               NL_SET_ERR_MSG(extack, "designated queue already in use by AF_XDP");
-               return -EBUSY;
-       }
-#endif
-
        err = xa_alloc(&binding->bound_rxqs, &xa_idx, rxq, xa_limit_32b,
                       GFP_KERNEL);
        if (err)
-               return err;
-
-       rxq->mp_params.mp_priv = binding;
-       rxq->mp_params.mp_ops = &dmabuf_devmem_ops;
-
-       err = netdev_rx_queue_restart(dev, rxq_idx);
-       if (err)
-               goto err_xa_erase;
+               goto err_close_rxq;
 
        return 0;
 
-err_xa_erase:
-       rxq->mp_params.mp_priv = NULL;
-       rxq->mp_params.mp_ops = NULL;
-       xa_erase(&binding->bound_rxqs, xa_idx);
-
+err_close_rxq:
+       __net_mp_close_rxq(dev, rxq_idx, &mp_params);
        return err;
 }
 
index 3afeaa8c5dc510a5e7ea904ea7f3c3d9e2773fa5..5d7af50fe7027273881f4b0e384145b9d77168be 100644 (file)
@@ -874,12 +874,6 @@ int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info)
                goto err_unlock;
        }
 
-       if (dev_xdp_prog_count(netdev)) {
-               NL_SET_ERR_MSG(info->extack, "unable to bind dmabuf to device with XDP program attached");
-               err = -EEXIST;
-               goto err_unlock;
-       }
-
        binding = net_devmem_bind_dmabuf(netdev, dmabuf_fd, info->extack);
        if (IS_ERR(binding)) {
                err = PTR_ERR(binding);
index 3af716f77a13bf80c18ab71e286e507ec27f5c45..556b5393ec9f27345b20f7ce7d87e3ce00b39af2 100644 (file)
@@ -1,5 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-or-later
 
+#include <linux/ethtool_netlink.h>
 #include <linux/netdevice.h>
 #include <net/netdev_lock.h>
 #include <net/netdev_queues.h>
@@ -86,8 +87,9 @@ err_free_new_mem:
 }
 EXPORT_SYMBOL_NS_GPL(netdev_rx_queue_restart, "NETDEV_INTERNAL");
 
-static int __net_mp_open_rxq(struct net_device *dev, unsigned ifq_idx,
-                            struct pp_memory_provider_params *p)
+int __net_mp_open_rxq(struct net_device *dev, unsigned int rxq_idx,
+                     const struct pp_memory_provider_params *p,
+                     struct netlink_ext_ack *extack)
 {
        struct netdev_rx_queue *rxq;
        int ret;
@@ -95,16 +97,41 @@ static int __net_mp_open_rxq(struct net_device *dev, unsigned ifq_idx,
        if (!netdev_need_ops_lock(dev))
                return -EOPNOTSUPP;
 
-       if (ifq_idx >= dev->real_num_rx_queues)
+       if (rxq_idx >= dev->real_num_rx_queues)
                return -EINVAL;
-       ifq_idx = array_index_nospec(ifq_idx, dev->real_num_rx_queues);
+       rxq_idx = array_index_nospec(rxq_idx, dev->real_num_rx_queues);
 
-       rxq = __netif_get_rx_queue(dev, ifq_idx);
-       if (rxq->mp_params.mp_ops)
+       if (rxq_idx >= dev->real_num_rx_queues) {
+               NL_SET_ERR_MSG(extack, "rx queue index out of range");
+               return -ERANGE;
+       }
+       if (dev->cfg->hds_config != ETHTOOL_TCP_DATA_SPLIT_ENABLED) {
+               NL_SET_ERR_MSG(extack, "tcp-data-split is disabled");
+               return -EINVAL;
+       }
+       if (dev->cfg->hds_thresh) {
+               NL_SET_ERR_MSG(extack, "hds-thresh is not zero");
+               return -EINVAL;
+       }
+       if (dev_xdp_prog_count(dev)) {
+               NL_SET_ERR_MSG(extack, "unable to custom memory provider to device with XDP program attached");
                return -EEXIST;
+       }
+
+       rxq = __netif_get_rx_queue(dev, rxq_idx);
+       if (rxq->mp_params.mp_ops) {
+               NL_SET_ERR_MSG(extack, "designated queue already memory provider bound");
+               return -EEXIST;
+       }
+#ifdef CONFIG_XDP_SOCKETS
+       if (rxq->pool) {
+               NL_SET_ERR_MSG(extack, "designated queue already in use by AF_XDP");
+               return -EBUSY;
+       }
+#endif
 
        rxq->mp_params = *p;
-       ret = netdev_rx_queue_restart(dev, ifq_idx);
+       ret = netdev_rx_queue_restart(dev, rxq_idx);
        if (ret) {
                rxq->mp_params.mp_ops = NULL;
                rxq->mp_params.mp_priv = NULL;
@@ -112,19 +139,19 @@ static int __net_mp_open_rxq(struct net_device *dev, unsigned ifq_idx,
        return ret;
 }
 
-int net_mp_open_rxq(struct net_device *dev, unsigned ifq_idx,
+int net_mp_open_rxq(struct net_device *dev, unsigned int rxq_idx,
                    struct pp_memory_provider_params *p)
 {
        int ret;
 
        netdev_lock(dev);
-       ret = __net_mp_open_rxq(dev, ifq_idx, p);
+       ret = __net_mp_open_rxq(dev, rxq_idx, p, NULL);
        netdev_unlock(dev);
        return ret;
 }
 
-static void __net_mp_close_rxq(struct net_device *dev, unsigned ifq_idx,
-                             struct pp_memory_provider_params *old_p)
+void __net_mp_close_rxq(struct net_device *dev, unsigned int ifq_idx,
+                       const struct pp_memory_provider_params *old_p)
 {
        struct netdev_rx_queue *rxq;