]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
RDMA/umem: Separate implicit ODP initialization from explicit ODP
authorLeon Romanovsky <leonro@nvidia.com>
Mon, 28 Apr 2025 09:22:20 +0000 (12:22 +0300)
committerLeon Romanovsky <leon@kernel.org>
Mon, 12 May 2025 10:06:55 +0000 (06:06 -0400)
Create separate functions for the implicit ODP initialization
which is different from the explicit ODP initialization.

Tested-by: Jens Axboe <axboe@kernel.dk>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
drivers/infiniband/core/umem_odp.c

index 30cd8f35347694bb161dbd283905abffd4c3b8b5..51d518989914e8b660a171e76ac817a0e8eaa2bc 100644 (file)
 
 #include "uverbs.h"
 
-static inline int ib_init_umem_odp(struct ib_umem_odp *umem_odp,
-                                  const struct mmu_interval_notifier_ops *ops)
+static void ib_init_umem_implicit_odp(struct ib_umem_odp *umem_odp)
+{
+       umem_odp->is_implicit_odp = 1;
+       umem_odp->umem.is_odp = 1;
+       mutex_init(&umem_odp->umem_mutex);
+}
+
+static int ib_init_umem_odp(struct ib_umem_odp *umem_odp,
+                           const struct mmu_interval_notifier_ops *ops)
 {
        struct ib_device *dev = umem_odp->umem.ibdev;
+       size_t page_size = 1UL << umem_odp->page_shift;
+       unsigned long start;
+       unsigned long end;
        int ret;
 
        umem_odp->umem.is_odp = 1;
        mutex_init(&umem_odp->umem_mutex);
 
-       if (!umem_odp->is_implicit_odp) {
-               size_t page_size = 1UL << umem_odp->page_shift;
-               unsigned long start;
-               unsigned long end;
-
-               start = ALIGN_DOWN(umem_odp->umem.address, page_size);
-               if (check_add_overflow(umem_odp->umem.address,
-                                      (unsigned long)umem_odp->umem.length,
-                                      &end))
-                       return -EOVERFLOW;
-               end = ALIGN(end, page_size);
-               if (unlikely(end < page_size))
-                       return -EOVERFLOW;
-
-               ret = hmm_dma_map_alloc(dev->dma_device, &umem_odp->map,
-                                       (end - start) >> PAGE_SHIFT,
-                                       1 << umem_odp->page_shift);
-               if (ret)
-                       return ret;
-
-               ret = mmu_interval_notifier_insert(&umem_odp->notifier,
-                                                  umem_odp->umem.owning_mm,
-                                                  start, end - start, ops);
-               if (ret)
-                       goto out_free_map;
-       }
+       start = ALIGN_DOWN(umem_odp->umem.address, page_size);
+       if (check_add_overflow(umem_odp->umem.address,
+                              (unsigned long)umem_odp->umem.length, &end))
+               return -EOVERFLOW;
+       end = ALIGN(end, page_size);
+       if (unlikely(end < page_size))
+               return -EOVERFLOW;
+
+       ret = hmm_dma_map_alloc(dev->dma_device, &umem_odp->map,
+                               (end - start) >> PAGE_SHIFT,
+                               1 << umem_odp->page_shift);
+       if (ret)
+               return ret;
+
+       ret = mmu_interval_notifier_insert(&umem_odp->notifier,
+                                          umem_odp->umem.owning_mm, start,
+                                          end - start, ops);
+       if (ret)
+               goto out_free_map;
 
        return 0;
 
@@ -106,7 +109,6 @@ struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_device *device,
 {
        struct ib_umem *umem;
        struct ib_umem_odp *umem_odp;
-       int ret;
 
        if (access & IB_ACCESS_HUGETLB)
                return ERR_PTR(-EINVAL);
@@ -118,16 +120,10 @@ struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_device *device,
        umem->ibdev = device;
        umem->writable = ib_access_writable(access);
        umem->owning_mm = current->mm;
-       umem_odp->is_implicit_odp = 1;
        umem_odp->page_shift = PAGE_SHIFT;
 
        umem_odp->tgid = get_task_pid(current->group_leader, PIDTYPE_PID);
-       ret = ib_init_umem_odp(umem_odp, NULL);
-       if (ret) {
-               put_pid(umem_odp->tgid);
-               kfree(umem_odp);
-               return ERR_PTR(ret);
-       }
+       ib_init_umem_implicit_odp(umem_odp);
        return umem_odp;
 }
 EXPORT_SYMBOL(ib_umem_odp_alloc_implicit);
@@ -248,7 +244,7 @@ err_put_pid:
 }
 EXPORT_SYMBOL(ib_umem_odp_get);
 
-void ib_umem_odp_release(struct ib_umem_odp *umem_odp)
+static void ib_umem_odp_free(struct ib_umem_odp *umem_odp)
 {
        struct ib_device *dev = umem_odp->umem.ibdev;
 
@@ -258,14 +254,19 @@ void ib_umem_odp_release(struct ib_umem_odp *umem_odp)
         * It is the driver's responsibility to ensure, before calling us,
         * that the hardware will not attempt to access the MR any more.
         */
-       if (!umem_odp->is_implicit_odp) {
-               mutex_lock(&umem_odp->umem_mutex);
-               ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem_odp),
-                                           ib_umem_end(umem_odp));
-               mutex_unlock(&umem_odp->umem_mutex);
-               mmu_interval_notifier_remove(&umem_odp->notifier);
-               hmm_dma_map_free(dev->dma_device, &umem_odp->map);
-       }
+       mutex_lock(&umem_odp->umem_mutex);
+       ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem_odp),
+                                   ib_umem_end(umem_odp));
+       mutex_unlock(&umem_odp->umem_mutex);
+       mmu_interval_notifier_remove(&umem_odp->notifier);
+       hmm_dma_map_free(dev->dma_device, &umem_odp->map);
+}
+
+void ib_umem_odp_release(struct ib_umem_odp *umem_odp)
+{
+       if (!umem_odp->is_implicit_odp)
+               ib_umem_odp_free(umem_odp);
+
        put_pid(umem_odp->tgid);
        kfree(umem_odp);
 }