]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
net: devmem: pull out dma_dev out of net_devmem_bind_dmabuf
authorDragos Tatulea <dtatulea@nvidia.com>
Wed, 27 Aug 2025 14:39:59 +0000 (17:39 +0300)
committerJakub Kicinski <kuba@kernel.org>
Thu, 28 Aug 2025 23:05:32 +0000 (16:05 -0700)
Fetch the DMA device before calling net_devmem_bind_dmabuf()
and pass it on as a parameter.

This is needed for an upcoming change which will read the
DMA device per queue.

This patch has no functional changes.

Signed-off-by: Dragos Tatulea <dtatulea@nvidia.com>
Reviewed-by: Mina Almasry <almasrymina@google.com>
Link: https://patch.msgid.link/20250827144017.1529208-7-dtatulea@nvidia.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
net/core/devmem.c
net/core/devmem.h
net/core/netdev-genl.c

index c58b241287272ec6b7e94379e0ee6a1fae4abab0..d9de31a6cc7f89da62c8d66b45f9207b6b7f4fc4 100644 (file)
@@ -176,30 +176,28 @@ err_close_rxq:
 
 struct net_devmem_dmabuf_binding *
 net_devmem_bind_dmabuf(struct net_device *dev,
+                      struct device *dma_dev,
                       enum dma_data_direction direction,
                       unsigned int dmabuf_fd, struct netdev_nl_sock *priv,
                       struct netlink_ext_ack *extack)
 {
        struct net_devmem_dmabuf_binding *binding;
        static u32 id_alloc_next;
-       struct device *dma_dev;
        struct scatterlist *sg;
        struct dma_buf *dmabuf;
        unsigned int sg_idx, i;
        unsigned long virtual;
        int err;
 
-       dmabuf = dma_buf_get(dmabuf_fd);
-       if (IS_ERR(dmabuf))
-               return ERR_CAST(dmabuf);
-
-       dma_dev = netdev_queue_get_dma_dev(dev, 0);
        if (!dma_dev) {
-               err = -EOPNOTSUPP;
                NL_SET_ERR_MSG(extack, "Device doesn't support DMA");
-               goto err_put_dmabuf;
+               return ERR_PTR(-EOPNOTSUPP);
        }
 
+       dmabuf = dma_buf_get(dmabuf_fd);
+       if (IS_ERR(dmabuf))
+               return ERR_CAST(dmabuf);
+
        binding = kzalloc_node(sizeof(*binding), GFP_KERNEL,
                               dev_to_node(&dev->dev));
        if (!binding) {
index 41cd6e1c914128b3dca73d224b7d15bb6ead54cb..101150d761af27eb0bcb2ce23c5709bb1d1ec212 100644 (file)
@@ -85,6 +85,7 @@ struct dmabuf_genpool_chunk_owner {
 void __net_devmem_dmabuf_binding_free(struct work_struct *wq);
 struct net_devmem_dmabuf_binding *
 net_devmem_bind_dmabuf(struct net_device *dev,
+                      struct device *dma_dev,
                       enum dma_data_direction direction,
                       unsigned int dmabuf_fd, struct netdev_nl_sock *priv,
                       struct netlink_ext_ack *extack);
@@ -170,6 +171,7 @@ static inline void net_devmem_put_net_iov(struct net_iov *niov)
 
 static inline struct net_devmem_dmabuf_binding *
 net_devmem_bind_dmabuf(struct net_device *dev,
+                      struct device *dma_dev,
                       enum dma_data_direction direction,
                       unsigned int dmabuf_fd,
                       struct netdev_nl_sock *priv,
index 6314eb7bdf69bf2d37ff1c7f1a56275f2294908b..3e2d6aa6e0606fe14da394500d2c5e5bbf6ff206 100644 (file)
@@ -876,6 +876,7 @@ int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info)
        u32 ifindex, dmabuf_fd, rxq_idx;
        struct netdev_nl_sock *priv;
        struct net_device *netdev;
+       struct device *dma_dev;
        struct sk_buff *rsp;
        struct nlattr *attr;
        int rem, err = 0;
@@ -921,8 +922,9 @@ int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info)
                goto err_unlock;
        }
 
-       binding = net_devmem_bind_dmabuf(netdev, DMA_FROM_DEVICE, dmabuf_fd,
-                                        priv, info->extack);
+       dma_dev = netdev_queue_get_dma_dev(netdev, 0);
+       binding = net_devmem_bind_dmabuf(netdev, dma_dev, DMA_FROM_DEVICE,
+                                        dmabuf_fd, priv, info->extack);
        if (IS_ERR(binding)) {
                err = PTR_ERR(binding);
                goto err_unlock;
@@ -986,6 +988,7 @@ int netdev_nl_bind_tx_doit(struct sk_buff *skb, struct genl_info *info)
        struct net_devmem_dmabuf_binding *binding;
        struct netdev_nl_sock *priv;
        struct net_device *netdev;
+       struct device *dma_dev;
        u32 ifindex, dmabuf_fd;
        struct sk_buff *rsp;
        int err = 0;
@@ -1032,8 +1035,9 @@ int netdev_nl_bind_tx_doit(struct sk_buff *skb, struct genl_info *info)
                goto err_unlock_netdev;
        }
 
-       binding = net_devmem_bind_dmabuf(netdev, DMA_TO_DEVICE, dmabuf_fd, priv,
-                                        info->extack);
+       dma_dev = netdev_queue_get_dma_dev(netdev, 0);
+       binding = net_devmem_bind_dmabuf(netdev, dma_dev, DMA_TO_DEVICE,
+                                        dmabuf_fd, priv, info->extack);
        if (IS_ERR(binding)) {
                err = PTR_ERR(binding);
                goto err_unlock_netdev;