]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
RDMA/bnxt_re: Refactor bnxt_re_create_cq()
authorSriharsha Basavapatna <sriharsha.basavapatna@broadcom.com>
Mon, 2 Mar 2026 11:00:34 +0000 (16:30 +0530)
committerJason Gunthorpe <jgg@nvidia.com>
Sun, 8 Mar 2026 10:20:25 +0000 (06:20 -0400)
Some applications may allocate dmabuf based memory for CQs. To support
this, update the existing code to use SZ_4K to specify supported HW
page size for CQs, as we support only 4K pages for now.
Call ib_umem_find_best_pgsz() to ensure umem supports this requested
page size. A helper function includes these changes.

Link: https://patch.msgid.link/r/20260302110036.36387-5-sriharsha.basavapatna@broadcom.com
Signed-off-by: Sriharsha Basavapatna <sriharsha.basavapatna@broadcom.com>
Reviewed-by: Selvin Xavier <selvin.xavier@broadcom.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
drivers/infiniband/hw/bnxt_re/ib_verbs.c

index cc082e8125c82bc25c71d57e71905b621bc62c45..99200c7da660bff17fec07092ddc04d64858078c 100644 (file)
@@ -3349,6 +3349,26 @@ int bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
        return ib_respond_empty_udata(udata);
 }
 
+static int bnxt_re_setup_sginfo(struct bnxt_re_dev *rdev,
+                               struct ib_umem *umem,
+                               struct bnxt_qplib_sg_info *sginfo)
+{
+       unsigned long page_size;
+
+       if (!umem)
+               return -EINVAL;
+
+       page_size = ib_umem_find_best_pgsz(umem, SZ_4K, 0);
+       if (!page_size || page_size != SZ_4K)
+               return -EINVAL;
+
+       sginfo->umem = umem;
+       sginfo->npages = ib_umem_num_dma_blocks(umem, page_size);
+       sginfo->pgsize = page_size;
+       sginfo->pgshft = __builtin_ctz(page_size);
+       return 0;
+}
+
 int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
                      struct uverbs_attr_bundle *attrs)
 {
@@ -3380,8 +3400,6 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
        if (entries > dev_attr->max_cq_wqes + 1)
                entries = dev_attr->max_cq_wqes + 1;
 
-       cq->qplib_cq.sg_info.pgsize = PAGE_SIZE;
-       cq->qplib_cq.sg_info.pgshft = PAGE_SHIFT;
        if (udata) {
                struct bnxt_re_cq_req req;
 
@@ -3396,7 +3414,10 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
                        rc = PTR_ERR(cq->umem);
                        goto fail;
                }
-               cq->qplib_cq.sg_info.umem = cq->umem;
+               rc = bnxt_re_setup_sginfo(rdev, cq->umem, &cq->qplib_cq.sg_info);
+               if (rc)
+                       goto fail;
+
                cq->qplib_cq.dpi = &uctx->dpi;
        } else {
                cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL);
@@ -3406,6 +3427,8 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
                        goto fail;
                }
 
+               cq->qplib_cq.sg_info.pgsize = SZ_4K;
+               cq->qplib_cq.sg_info.pgshft = __builtin_ctz(SZ_4K);
                cq->qplib_cq.dpi = &rdev->dpi_privileged;
        }
        cq->qplib_cq.max_wqe = entries;