]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
RDMA/umem: Add ib_umem_dmabuf_get_pinned_and_lock helper
authorJacob Moroni <jmoroni@google.com>
Thu, 5 Mar 2026 17:08:22 +0000 (17:08 +0000)
committerLeon Romanovsky <leon@kernel.org>
Sun, 8 Mar 2026 12:37:38 +0000 (08:37 -0400)
Move the inner logic of ib_umem_dmabuf_get_pinned_with_dma_device()
to a new static function that returns with the lock held upon success.

The intent is to allow reuse for the future get_pinned_revocable_and_lock
function.

Signed-off-by: Jacob Moroni <jmoroni@google.com>
Link: https://patch.msgid.link/20260305170826.3803155-2-jmoroni@google.com
Signed-off-by: Leon Romanovsky <leon@kernel.org>
drivers/infiniband/core/umem_dmabuf.c

index d30f24b90bca9ed96745e9a37f6e816207f2c341..0c0098285c38be5f4fd036e3a89f87f411c9a014 100644 (file)
@@ -195,18 +195,19 @@ static struct dma_buf_attach_ops ib_umem_dmabuf_attach_pinned_ops = {
        .move_notify = ib_umem_dmabuf_unsupported_move_notify,
 };
 
-struct ib_umem_dmabuf *
-ib_umem_dmabuf_get_pinned_with_dma_device(struct ib_device *device,
-                                         struct device *dma_device,
-                                         unsigned long offset, size_t size,
-                                         int fd, int access)
+static struct ib_umem_dmabuf *
+ib_umem_dmabuf_get_pinned_and_lock(struct ib_device *device,
+                                  struct device *dma_device,
+                                  unsigned long offset,
+                                  size_t size, int fd, int access,
+                                  const struct dma_buf_attach_ops *ops)
 {
        struct ib_umem_dmabuf *umem_dmabuf;
        int err;
 
-       umem_dmabuf = ib_umem_dmabuf_get_with_dma_device(device, dma_device, offset,
-                                                        size, fd, access,
-                                                        &ib_umem_dmabuf_attach_pinned_ops);
+       umem_dmabuf =
+               ib_umem_dmabuf_get_with_dma_device(device, dma_device, offset,
+                                                  size, fd, access, ops);
        if (IS_ERR(umem_dmabuf))
                return umem_dmabuf;
 
@@ -219,7 +220,6 @@ ib_umem_dmabuf_get_pinned_with_dma_device(struct ib_device *device,
        err = ib_umem_dmabuf_map_pages(umem_dmabuf);
        if (err)
                goto err_release;
-       dma_resv_unlock(umem_dmabuf->attach->dmabuf->resv);
 
        return umem_dmabuf;
 
@@ -228,6 +228,23 @@ err_release:
        ib_umem_release(&umem_dmabuf->umem);
        return ERR_PTR(err);
 }
+
+struct ib_umem_dmabuf *
+ib_umem_dmabuf_get_pinned_with_dma_device(struct ib_device *device,
+                                         struct device *dma_device,
+                                         unsigned long offset, size_t size,
+                                         int fd, int access)
+{
+       struct ib_umem_dmabuf *umem_dmabuf =
+               ib_umem_dmabuf_get_pinned_and_lock(device, dma_device, offset,
+                                                  size, fd, access,
+                                                  &ib_umem_dmabuf_attach_pinned_ops);
+       if (IS_ERR(umem_dmabuf))
+               return umem_dmabuf;
+
+       dma_resv_unlock(umem_dmabuf->attach->dmabuf->resv);
+       return umem_dmabuf;
+}
 EXPORT_SYMBOL(ib_umem_dmabuf_get_pinned_with_dma_device);
 
 struct ib_umem_dmabuf *ib_umem_dmabuf_get_pinned(struct ib_device *device,