]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
RDMA/bnxt_re: Separate kernel and user CQ creation paths
authorSriharsha Basavapatna <sriharsha.basavapatna@broadcom.com>
Mon, 2 Mar 2026 11:00:35 +0000 (16:30 +0530)
committerJason Gunthorpe <jgg@nvidia.com>
Sun, 8 Mar 2026 10:20:25 +0000 (06:20 -0400)
This patch refactors kernel and user CQ creation logic into
two separate code paths. This will be used to support dmabuf
based user CQ memory in the next patch. There is no functional
change in this patch.

Link: https://patch.msgid.link/r/20260302110036.36387-6-sriharsha.basavapatna@broadcom.com
Signed-off-by: Sriharsha Basavapatna <sriharsha.basavapatna@broadcom.com>
Reviewed-by: Selvin Xavier <selvin.xavier@broadcom.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
drivers/infiniband/hw/bnxt_re/ib_verbs.c

index 99200c7da660bff17fec07092ddc04d64858078c..3952251697423883ad6e67f058696f7af19a38ad 100644 (file)
@@ -3369,8 +3369,8 @@ static int bnxt_re_setup_sginfo(struct bnxt_re_dev *rdev,
        return 0;
 }
 
-int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
-                     struct uverbs_attr_bundle *attrs)
+static int bnxt_re_create_user_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+                                 struct uverbs_attr_bundle *attrs)
 {
        struct bnxt_re_cq *cq = container_of(ibcq, struct bnxt_re_cq, ib_cq);
        struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibcq->device, ibdev);
@@ -3379,6 +3379,8 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
                rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
        struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr;
        struct bnxt_qplib_chip_ctx *cctx;
+       struct bnxt_re_cq_resp resp = {};
+       struct bnxt_re_cq_req req;
        int cqe = attr->cqe;
        int rc, entries;
        u32 active_cqs;
@@ -3400,37 +3402,23 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
        if (entries > dev_attr->max_cq_wqes + 1)
                entries = dev_attr->max_cq_wqes + 1;
 
-       if (udata) {
-               struct bnxt_re_cq_req req;
-
-               rc = ib_copy_validate_udata_in(udata, req, cq_handle);
-               if (rc)
-                       goto fail;
+       rc = ib_copy_validate_udata_in(udata, req, cq_handle);
+       if (rc)
+               return rc;
 
-               cq->umem = ib_umem_get(&rdev->ibdev, req.cq_va,
-                                      entries * sizeof(struct cq_base),
-                                      IB_ACCESS_LOCAL_WRITE);
-               if (IS_ERR(cq->umem)) {
-                       rc = PTR_ERR(cq->umem);
-                       goto fail;
-               }
-               rc = bnxt_re_setup_sginfo(rdev, cq->umem, &cq->qplib_cq.sg_info);
-               if (rc)
-                       goto fail;
+       cq->umem = ib_umem_get(&rdev->ibdev, req.cq_va,
+                              entries * sizeof(struct cq_base),
+                              IB_ACCESS_LOCAL_WRITE);
+       if (IS_ERR(cq->umem)) {
+               rc = PTR_ERR(cq->umem);
+               return rc;
+       }
 
-               cq->qplib_cq.dpi = &uctx->dpi;
-       } else {
-               cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL);
-               cq->cql = kzalloc_objs(struct bnxt_qplib_cqe, cq->max_cql);
-               if (!cq->cql) {
-                       rc = -ENOMEM;
-                       goto fail;
-               }
+       rc = bnxt_re_setup_sginfo(rdev, cq->umem, &cq->qplib_cq.sg_info);
+       if (rc)
+               goto fail;
 
-               cq->qplib_cq.sg_info.pgsize = SZ_4K;
-               cq->qplib_cq.sg_info.pgshft = __builtin_ctz(SZ_4K);
-               cq->qplib_cq.dpi = &rdev->dpi_privileged;
-       }
+       cq->qplib_cq.dpi = &uctx->dpi;
        cq->qplib_cq.max_wqe = entries;
        cq->qplib_cq.coalescing = &rdev->cq_coalescing;
        cq->qplib_cq.nq = bnxt_re_get_nq(rdev);
@@ -3444,42 +3432,103 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
 
        cq->ib_cq.cqe = entries;
        cq->cq_period = cq->qplib_cq.period;
-
        active_cqs = atomic_inc_return(&rdev->stats.res.cq_count);
        if (active_cqs > rdev->stats.res.cq_watermark)
                rdev->stats.res.cq_watermark = active_cqs;
        spin_lock_init(&cq->cq_lock);
 
-       if (udata) {
-               struct bnxt_re_cq_resp resp = {};
-
-               if (cctx->modes.toggle_bits & BNXT_QPLIB_CQ_TOGGLE_BIT) {
-                       hash_add(rdev->cq_hash, &cq->hash_entry, cq->qplib_cq.id);
-                       /* Allocate a page */
-                       cq->uctx_cq_page = (void *)get_zeroed_page(GFP_KERNEL);
-                       if (!cq->uctx_cq_page) {
-                               rc = -ENOMEM;
-                               goto c2fail;
-                       }
-                       resp.comp_mask |= BNXT_RE_CQ_TOGGLE_PAGE_SUPPORT;
-               }
-               resp.cqid = cq->qplib_cq.id;
-               resp.tail = cq->qplib_cq.hwq.cons;
-               resp.phase = cq->qplib_cq.period;
-               resp.rsvd = 0;
-               rc = ib_respond_udata(udata, resp);
-               if (rc) {
-                       bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
-                       goto free_mem;
+       if (cctx->modes.toggle_bits & BNXT_QPLIB_CQ_TOGGLE_BIT) {
+               hash_add(rdev->cq_hash, &cq->hash_entry, cq->qplib_cq.id);
+               /* Allocate a page */
+               cq->uctx_cq_page = (void *)get_zeroed_page(GFP_KERNEL);
+               if (!cq->uctx_cq_page) {
+                       rc = -ENOMEM;
+                       goto fail;
                }
+               resp.comp_mask |= BNXT_RE_CQ_TOGGLE_PAGE_SUPPORT;
+       }
+       resp.cqid = cq->qplib_cq.id;
+       resp.tail = cq->qplib_cq.hwq.cons;
+       resp.phase = cq->qplib_cq.period;
+       resp.rsvd = 0;
+       rc = ib_respond_udata(udata, resp);
+       if (rc) {
+               bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
+               goto free_mem;
        }
 
        return 0;
 
 free_mem:
        free_page((unsigned long)cq->uctx_cq_page);
-c2fail:
+fail:
        ib_umem_release(cq->umem);
+       return rc;
+}
+
+int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+                     struct uverbs_attr_bundle *attrs)
+{
+       struct bnxt_re_cq *cq = container_of(ibcq, struct bnxt_re_cq, ib_cq);
+       struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibcq->device, ibdev);
+       struct ib_udata *udata = &attrs->driver_udata;
+       struct bnxt_re_ucontext *uctx =
+               rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
+       struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr;
+       struct bnxt_qplib_chip_ctx *cctx;
+       int cqe = attr->cqe;
+       int rc, entries;
+       u32 active_cqs;
+
+       if (udata)
+               return bnxt_re_create_user_cq(ibcq, attr, attrs);
+
+       if (attr->flags)
+               return -EOPNOTSUPP;
+
+       /* Validate CQ fields */
+       if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
+               ibdev_err(&rdev->ibdev, "Failed to create CQ -max exceeded");
+               return -EINVAL;
+       }
+
+       cq->rdev = rdev;
+       cctx = rdev->chip_ctx;
+       cq->qplib_cq.cq_handle = (u64)(unsigned long)(&cq->qplib_cq);
+
+       entries = bnxt_re_init_depth(cqe + 1, uctx);
+       if (entries > dev_attr->max_cq_wqes + 1)
+               entries = dev_attr->max_cq_wqes + 1;
+
+       cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL);
+       cq->cql = kcalloc(cq->max_cql, sizeof(struct bnxt_qplib_cqe),
+                         GFP_KERNEL);
+       if (!cq->cql)
+               return -ENOMEM;
+
+       cq->qplib_cq.sg_info.pgsize = SZ_4K;
+       cq->qplib_cq.sg_info.pgshft = __builtin_ctz(SZ_4K);
+       cq->qplib_cq.dpi = &rdev->dpi_privileged;
+       cq->qplib_cq.max_wqe = entries;
+       cq->qplib_cq.coalescing = &rdev->cq_coalescing;
+       cq->qplib_cq.nq = bnxt_re_get_nq(rdev);
+       cq->qplib_cq.cnq_hw_ring_id = cq->qplib_cq.nq->ring_id;
+
+       rc = bnxt_qplib_create_cq(&rdev->qplib_res, &cq->qplib_cq);
+       if (rc) {
+               ibdev_err(&rdev->ibdev, "Failed to create HW CQ");
+               goto fail;
+       }
+
+       cq->ib_cq.cqe = entries;
+       cq->cq_period = cq->qplib_cq.period;
+       active_cqs = atomic_inc_return(&rdev->stats.res.cq_count);
+       if (active_cqs > rdev->stats.res.cq_watermark)
+               rdev->stats.res.cq_watermark = active_cqs;
+       spin_lock_init(&cq->cq_lock);
+
+       return 0;
+
 fail:
        kfree(cq->cql);
        return rc;