]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
RDMA/bnxt_re: Support application specific CQs
authorSriharsha Basavapatna <sriharsha.basavapatna@broadcom.com>
Mon, 2 Mar 2026 11:00:36 +0000 (16:30 +0530)
committerJason Gunthorpe <jgg@nvidia.com>
Sun, 8 Mar 2026 10:20:25 +0000 (06:20 -0400)
This patch supports application allocated memory for CQs.

The application allocates and manages the CQs directly. To support
this, the driver exports a new comp_mask to indicate direct control
of the CQ. When this comp_mask bit is set in the ureq, the driver
maps this application allocated CQ memory into hardware. As the
application manages this memory, the CQ depth ('cqe') passed by it
must be used as is and the driver shouldn't update it.

For CQs, ib_core supports pinning dmabuf based application memory,
specified through provider attributes. This umem is mananged by the
ib_core and is available in ib_cq. Register 'create_cq_user' devop
to process this umem. The driver also supports the legacy interface
that allocates umem internally.

Link: https://patch.msgid.link/r/20260302110036.36387-7-sriharsha.basavapatna@broadcom.com
Signed-off-by: Sriharsha Basavapatna <sriharsha.basavapatna@broadcom.com>
Reviewed-by: Selvin Xavier <selvin.xavier@broadcom.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
drivers/infiniband/hw/bnxt_re/ib_verbs.c
drivers/infiniband/hw/bnxt_re/ib_verbs.h
drivers/infiniband/hw/bnxt_re/main.c
include/uapi/rdma/bnxt_re-abi.h

index 3952251697423883ad6e67f058696f7af19a38ad..182128ee4f242b369eae395965ecc69806b06ff7 100644 (file)
@@ -3342,7 +3342,6 @@ int bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
        bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
 
        bnxt_re_put_nq(rdev, nq);
-       ib_umem_release(cq->umem);
 
        atomic_dec(&rdev->stats.res.cq_count);
        kfree(cq->cql);
@@ -3369,8 +3368,8 @@ static int bnxt_re_setup_sginfo(struct bnxt_re_dev *rdev,
        return 0;
 }
 
-static int bnxt_re_create_user_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
-                                 struct uverbs_attr_bundle *attrs)
+int bnxt_re_create_user_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+                          struct uverbs_attr_bundle *attrs)
 {
        struct bnxt_re_cq *cq = container_of(ibcq, struct bnxt_re_cq, ib_cq);
        struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibcq->device, ibdev);
@@ -3402,19 +3401,25 @@ static int bnxt_re_create_user_cq(struct ib_cq *ibcq, const struct ib_cq_init_at
        if (entries > dev_attr->max_cq_wqes + 1)
                entries = dev_attr->max_cq_wqes + 1;
 
-       rc = ib_copy_validate_udata_in(udata, req, cq_handle);
+       rc = ib_copy_validate_udata_in_cm(udata, req, cq_handle,
+                                         BNXT_RE_CQ_FIXED_NUM_CQE_ENABLE);
        if (rc)
                return rc;
 
-       cq->umem = ib_umem_get(&rdev->ibdev, req.cq_va,
-                              entries * sizeof(struct cq_base),
-                              IB_ACCESS_LOCAL_WRITE);
-       if (IS_ERR(cq->umem)) {
-               rc = PTR_ERR(cq->umem);
-               return rc;
+       if (req.comp_mask & BNXT_RE_CQ_FIXED_NUM_CQE_ENABLE)
+               entries = cqe;
+
+       if (!ibcq->umem) {
+               ibcq->umem = ib_umem_get(&rdev->ibdev, req.cq_va,
+                                        entries * sizeof(struct cq_base),
+                                        IB_ACCESS_LOCAL_WRITE);
+               if (IS_ERR(ibcq->umem)) {
+                       rc = PTR_ERR(ibcq->umem);
+                       goto fail;
+               }
        }
 
-       rc = bnxt_re_setup_sginfo(rdev, cq->umem, &cq->qplib_cq.sg_info);
+       rc = bnxt_re_setup_sginfo(rdev, ibcq->umem, &cq->qplib_cq.sg_info);
        if (rc)
                goto fail;
 
@@ -3462,7 +3467,6 @@ static int bnxt_re_create_user_cq(struct ib_cq *ibcq, const struct ib_cq_init_at
 free_mem:
        free_page((unsigned long)cq->uctx_cq_page);
 fail:
-       ib_umem_release(cq->umem);
        return rc;
 }
 
@@ -3475,7 +3479,6 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
        struct bnxt_re_ucontext *uctx =
                rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
        struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr;
-       struct bnxt_qplib_chip_ctx *cctx;
        int cqe = attr->cqe;
        int rc, entries;
        u32 active_cqs;
@@ -3493,7 +3496,6 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
        }
 
        cq->rdev = rdev;
-       cctx = rdev->chip_ctx;
        cq->qplib_cq.cq_handle = (u64)(unsigned long)(&cq->qplib_cq);
 
        entries = bnxt_re_init_depth(cqe + 1, uctx);
@@ -3542,8 +3544,8 @@ static void bnxt_re_resize_cq_complete(struct bnxt_re_cq *cq)
 
        cq->qplib_cq.max_wqe = cq->resize_cqe;
        if (cq->resize_umem) {
-               ib_umem_release(cq->umem);
-               cq->umem = cq->resize_umem;
+               ib_umem_release(cq->ib_cq.umem);
+               cq->ib_cq.umem = cq->resize_umem;
                cq->resize_umem = NULL;
                cq->resize_cqe = 0;
        }
@@ -4142,7 +4144,7 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
        /* User CQ; the only processing we do is to
         * complete any pending CQ resize operation.
         */
-       if (cq->umem) {
+       if (cq->ib_cq.umem) {
                if (cq->resize_umem)
                        bnxt_re_resize_cq_complete(cq);
                return 0;
index 33e0f66b39eb6d923b9791472fd2d976781ecc7e..3d02c16f54b61dc839708d3d540efadcc5ac4e77 100644 (file)
@@ -108,7 +108,6 @@ struct bnxt_re_cq {
        struct bnxt_qplib_cqe   *cql;
 #define MAX_CQL_PER_POLL       1024
        u32                     max_cql;
-       struct ib_umem          *umem;
        struct ib_umem          *resize_umem;
        int                     resize_cqe;
        void                    *uctx_cq_page;
@@ -254,6 +253,8 @@ int bnxt_re_post_recv(struct ib_qp *qp, const struct ib_recv_wr *recv_wr,
                      const struct ib_recv_wr **bad_recv_wr);
 int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
                      struct uverbs_attr_bundle *attrs);
+int bnxt_re_create_user_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+                          struct uverbs_attr_bundle *attrs);
 int bnxt_re_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
 int bnxt_re_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
 int bnxt_re_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
index 7af514524632e2accf05e559c45327d3577526a5..13ad63b9b1de70112a0b32de9500119654a02921 100644 (file)
@@ -1335,6 +1335,7 @@ static const struct ib_device_ops bnxt_re_dev_ops = {
        .alloc_ucontext = bnxt_re_alloc_ucontext,
        .create_ah = bnxt_re_create_ah,
        .create_cq = bnxt_re_create_cq,
+       .create_user_cq = bnxt_re_create_user_cq,
        .create_qp = bnxt_re_create_qp,
        .create_srq = bnxt_re_create_srq,
        .create_user_ah = bnxt_re_create_ah,
index ef14e24836b12e23eb59668d6a5532c6c700143f..40955eaba32e60ee0598452065ef0d5ea69af05e 100644 (file)
@@ -102,12 +102,17 @@ struct bnxt_re_pd_resp {
 struct bnxt_re_cq_req {
        __aligned_u64 cq_va;
        __aligned_u64 cq_handle;
+       __aligned_u64 comp_mask;
 };
 
-enum bnxt_re_cq_mask {
+enum bnxt_re_resp_cq_mask {
        BNXT_RE_CQ_TOGGLE_PAGE_SUPPORT = 0x1,
 };
 
+enum bnxt_re_req_cq_mask {
+       BNXT_RE_CQ_FIXED_NUM_CQE_ENABLE = 0x1,
+};
+
 struct bnxt_re_cq_resp {
        __u32 cqid;
        __u32 tail;