]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
RDMA/mana_ib: Implement DMABUF MR support
authorKonstantin Taranov <kotaranov@microsoft.com>
Thu, 13 Feb 2025 13:54:21 +0000 (05:54 -0800)
committerLeon Romanovsky <leon@kernel.org>
Tue, 18 Feb 2025 13:26:27 +0000 (08:26 -0500)
Add support of dmabuf MRs to mana_ib.

Signed-off-by: Konstantin Taranov <kotaranov@microsoft.com>
Link: https://patch.msgid.link/1739454861-4456-1-git-send-email-kotaranov@linux.microsoft.com
Reviewed-by: Long Li <longli@microsoft.com>
Signed-off-by: Leon Romanovsky <leon@kernel.org>
drivers/infiniband/hw/mana/device.c
drivers/infiniband/hw/mana/mana_ib.h
drivers/infiniband/hw/mana/mr.c

index 0c7a9929168f3f68ccb11c6fc9f663394f059abd..d1a02c54a236f99ab4699246dcc8a22290c40cfc 100644 (file)
@@ -48,6 +48,7 @@ static const struct ib_device_ops mana_ib_dev_ops = {
        .query_pkey = mana_ib_query_pkey,
        .query_port = mana_ib_query_port,
        .reg_user_mr = mana_ib_reg_user_mr,
+       .reg_user_mr_dmabuf = mana_ib_reg_user_mr_dmabuf,
        .req_notify_cq = mana_ib_arm_cq,
 
        INIT_RDMA_OBJ_SIZE(ib_ah, mana_ib_ah, ibah),
index 7e342e070b7089bbd2cae6fae6def6c832fdb21e..77fc1032eda8fa668523108f44e0eca7dfd43c75 100644 (file)
@@ -682,4 +682,8 @@ int mana_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
 
 int mana_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
 int mana_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
+
+struct ib_mr *mana_ib_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 start, u64 length,
+                                        u64 iova, int fd, int mr_access_flags,
+                                        struct uverbs_attr_bundle *attrs);
 #endif
index 3a047f8c9fc52c9b84e27b338d8c6fd64d830f8d..f99557ec77678132e394fc4df63b5e3d450e3160 100644 (file)
@@ -173,6 +173,75 @@ err_free:
        return ERR_PTR(err);
 }
 
+struct ib_mr *mana_ib_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 start, u64 length,
+                                        u64 iova, int fd, int access_flags,
+                                        struct uverbs_attr_bundle *attrs)
+{
+       struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd);
+       struct gdma_create_mr_params mr_params = {};
+       struct ib_device *ibdev = ibpd->device;
+       struct ib_umem_dmabuf *umem_dmabuf;
+       struct mana_ib_dev *dev;
+       struct mana_ib_mr *mr;
+       u64 dma_region_handle;
+       int err;
+
+       dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
+
+       access_flags &= ~IB_ACCESS_OPTIONAL;
+       if (access_flags & ~VALID_MR_FLAGS)
+               return ERR_PTR(-EOPNOTSUPP);
+
+       mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+       if (!mr)
+               return ERR_PTR(-ENOMEM);
+
+       umem_dmabuf = ib_umem_dmabuf_get_pinned(ibdev, start, length, fd, access_flags);
+       if (IS_ERR(umem_dmabuf)) {
+               err = PTR_ERR(umem_dmabuf);
+               ibdev_dbg(ibdev, "Failed to get dmabuf umem, %d\n", err);
+               goto err_free;
+       }
+
+       mr->umem = &umem_dmabuf->umem;
+
+       err = mana_ib_create_dma_region(dev, mr->umem, &dma_region_handle, iova);
+       if (err) {
+               ibdev_dbg(ibdev, "Failed create dma region for user-mr, %d\n",
+                         err);
+               goto err_umem;
+       }
+
+       mr_params.pd_handle = pd->pd_handle;
+       mr_params.mr_type = GDMA_MR_TYPE_GVA;
+       mr_params.gva.dma_region_handle = dma_region_handle;
+       mr_params.gva.virtual_address = iova;
+       mr_params.gva.access_flags =
+               mana_ib_verbs_to_gdma_access_flags(access_flags);
+
+       err = mana_ib_gd_create_mr(dev, mr, &mr_params);
+       if (err)
+               goto err_dma_region;
+
+       /*
+        * There is no need to keep track of dma_region_handle after MR is
+        * successfully created. The dma_region_handle is tracked in the PF
+        * as part of the lifecycle of this MR.
+        */
+
+       return &mr->ibmr;
+
+err_dma_region:
+       mana_gd_destroy_dma_region(mdev_to_gc(dev), dma_region_handle);
+
+err_umem:
+       ib_umem_release(mr->umem);
+
+err_free:
+       kfree(mr);
+       return ERR_PTR(err);
+}
+
 struct ib_mr *mana_ib_get_dma_mr(struct ib_pd *ibpd, int access_flags)
 {
        struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd);