]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
RDMA/mana_ib: helpers to allocate kernel queues
authorKonstantin Taranov <kotaranov@microsoft.com>
Mon, 20 Jan 2025 17:27:09 +0000 (09:27 -0800)
committerLeon Romanovsky <leon@kernel.org>
Tue, 4 Feb 2025 13:27:03 +0000 (08:27 -0500)
Introduce helpers to allocate queues for kernel-level use.

Signed-off-by: Konstantin Taranov <kotaranov@microsoft.com>
Link: https://patch.msgid.link/1737394039-28772-4-git-send-email-kotaranov@linux.microsoft.com
Reviewed-by: Shiraz Saleem <shirazsaleem@microsoft.com>
Reviewed-by: Long Li <longli@microsoft.com>
Signed-off-by: Leon Romanovsky <leon@kernel.org>
drivers/infiniband/hw/mana/main.c
drivers/infiniband/hw/mana/mana_ib.h
drivers/net/ethernet/microsoft/mana/gdma_main.c

index 45b251b911310e083ecc4077fe5dcd10597cdde2..f2f6bb35204836647bfe9e1087ab6aad3d9360ba 100644 (file)
@@ -240,6 +240,27 @@ void mana_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
                ibdev_dbg(ibdev, "Failed to destroy doorbell page %d\n", ret);
 }
 
+int mana_ib_create_kernel_queue(struct mana_ib_dev *mdev, u32 size, enum gdma_queue_type type,
+                               struct mana_ib_queue *queue)
+{
+       struct gdma_context *gc = mdev_to_gc(mdev);
+       struct gdma_queue_spec spec = {};
+       int err;
+
+       queue->id = INVALID_QUEUE_ID;
+       queue->gdma_region = GDMA_INVALID_DMA_REGION;
+       spec.type = type;
+       spec.monitor_avl_buf = false;
+       spec.queue_size = size;
+       err = mana_gd_create_mana_wq_cq(&gc->mana_ib, &spec, &queue->kmem);
+       if (err)
+               return err;
+       /* take ownership into mana_ib from mana */
+       queue->gdma_region = queue->kmem->mem_info.dma_region_handle;
+       queue->kmem->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
+       return 0;
+}
+
 int mana_ib_create_queue(struct mana_ib_dev *mdev, u64 addr, u32 size,
                         struct mana_ib_queue *queue)
 {
@@ -279,6 +300,8 @@ void mana_ib_destroy_queue(struct mana_ib_dev *mdev, struct mana_ib_queue *queue
         */
        mana_ib_gd_destroy_dma_region(mdev, queue->gdma_region);
        ib_umem_release(queue->umem);
+       if (queue->kmem)
+               mana_gd_destroy_queue(mdev_to_gc(mdev), queue->kmem);
 }
 
 static int
index b53a5b4de908d05a5ceadc95fe4ac40cb3e3b7de..79ebd95980061883d4d2b5e9e82732f7d764146f 100644 (file)
@@ -52,6 +52,7 @@ struct mana_ib_adapter_caps {
 
 struct mana_ib_queue {
        struct ib_umem *umem;
+       struct gdma_queue *kmem;
        u64 gdma_region;
        u64 id;
 };
@@ -388,6 +389,8 @@ int mana_ib_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
 int mana_ib_gd_destroy_dma_region(struct mana_ib_dev *dev,
                                  mana_handle_t gdma_region);
 
+int mana_ib_create_kernel_queue(struct mana_ib_dev *mdev, u32 size, enum gdma_queue_type type,
+                               struct mana_ib_queue *queue);
 int mana_ib_create_queue(struct mana_ib_dev *mdev, u64 addr, u32 size,
                         struct mana_ib_queue *queue);
 void mana_ib_destroy_queue(struct mana_ib_dev *mdev, struct mana_ib_queue *queue);
index be95336ce089aaa657df55bd4094b81d386e49fe..4e71987bbf2cd856cc774197beb6719858998c49 100644 (file)
@@ -867,6 +867,7 @@ free_q:
        kfree(queue);
        return err;
 }
+EXPORT_SYMBOL_NS(mana_gd_create_mana_wq_cq, "NET_MANA");
 
 void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue)
 {