]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
RDMA/bnxt_re: Avoid initializing the software queue for user queues
authorSelvin Xavier <selvin.xavier@broadcom.com>
Wed, 4 Dec 2024 07:54:13 +0000 (13:24 +0530)
committerLeon Romanovsky <leon@kernel.org>
Thu, 5 Dec 2024 08:57:52 +0000 (03:57 -0500)
Software Queues to hold the WRs needs to be created
for only kernel queues. Avoid allocating the unnecessary
memory for user Queues.

Fixes: 1ac5a4047975 ("RDMA/bnxt_re: Add bnxt_re RoCE driver")
Fixes: 159fb4ceacd7 ("RDMA/bnxt_re: introduce a function to allocate swq")
Signed-off-by: Selvin Xavier <selvin.xavier@broadcom.com>
Link: https://patch.msgid.link/20241204075416.478431-3-kalesh-anakkur.purayil@broadcom.com
Signed-off-by: Leon Romanovsky <leon@kernel.org>
drivers/infiniband/hw/bnxt_re/qplib_fp.c

index 9af8aaadc99a86e2eee4dd1d27fef72321f807ec..72f35070f671c04b478509bb0e15384c7bc1d54c 100644 (file)
@@ -659,13 +659,6 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
        rc = bnxt_qplib_alloc_init_hwq(&srq->hwq, &hwq_attr);
        if (rc)
                return rc;
-
-       srq->swq = kcalloc(srq->hwq.max_elements, sizeof(*srq->swq),
-                          GFP_KERNEL);
-       if (!srq->swq) {
-               rc = -ENOMEM;
-               goto fail;
-       }
        srq->dbinfo.flags = 0;
        bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
                                 CMDQ_BASE_OPCODE_CREATE_SRQ,
@@ -694,9 +687,17 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
        spin_lock_init(&srq->lock);
        srq->start_idx = 0;
        srq->last_idx = srq->hwq.max_elements - 1;
-       for (idx = 0; idx < srq->hwq.max_elements; idx++)
-               srq->swq[idx].next_idx = idx + 1;
-       srq->swq[srq->last_idx].next_idx = -1;
+       if (!srq->hwq.is_user) {
+               srq->swq = kcalloc(srq->hwq.max_elements, sizeof(*srq->swq),
+                                  GFP_KERNEL);
+               if (!srq->swq) {
+                       rc = -ENOMEM;
+                       goto fail;
+               }
+               for (idx = 0; idx < srq->hwq.max_elements; idx++)
+                       srq->swq[idx].next_idx = idx + 1;
+               srq->swq[srq->last_idx].next_idx = -1;
+       }
 
        srq->id = le32_to_cpu(resp.xid);
        srq->dbinfo.hwq = &srq->hwq;
@@ -1042,13 +1043,14 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
        if (rc)
                return rc;
 
-       rc = bnxt_qplib_alloc_init_swq(sq);
-       if (rc)
-               goto fail_sq;
-
-       if (psn_sz)
-               bnxt_qplib_init_psn_ptr(qp, psn_sz);
+       if (!sq->hwq.is_user) {
+               rc = bnxt_qplib_alloc_init_swq(sq);
+               if (rc)
+                       goto fail_sq;
 
+               if (psn_sz)
+                       bnxt_qplib_init_psn_ptr(qp, psn_sz);
+       }
        req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
        pbl = &sq->hwq.pbl[PBL_LVL_0];
        req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
@@ -1074,9 +1076,11 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
                rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
                if (rc)
                        goto sq_swq;
-               rc = bnxt_qplib_alloc_init_swq(rq);
-               if (rc)
-                       goto fail_rq;
+               if (!rq->hwq.is_user) {
+                       rc = bnxt_qplib_alloc_init_swq(rq);
+                       if (rc)
+                               goto fail_rq;
+               }
 
                req.rq_size = cpu_to_le32(rq->max_wqe);
                pbl = &rq->hwq.pbl[PBL_LVL_0];