]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
RDMA/bnxt_re: Add support for MR Relaxed Ordering
authorKalesh AP <kalesh-anakkur.purayil@broadcom.com>
Mon, 2 Sep 2024 05:52:31 +0000 (22:52 -0700)
committerLeon Romanovsky <leon@kernel.org>
Mon, 2 Sep 2024 09:33:47 +0000 (12:33 +0300)
Some of the adapters support Relaxed Ordering for the MRs.
Driver queries support for Memory region relax ordering  support from
firmware and  set relax ordering bit in REGISTER_MR request, if the users
request for the support. Also, this is supported only if the PCIe device
has enabled relaxed ordering attribute.

Reviewed-by: Chandramohan Akula <chandramohan.akula@broadcom.com>
Reviewed-by: Selvin Xavier <selvin.xavier@broadcom.com>
Reviewed-by: Vijay Kumar Mandadapu <vijaykumar.mandadapu@broadcom.com>
Signed-off-by: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
Signed-off-by: Selvin Xavier <selvin.xavier@broadcom.com>
Link: https://patch.msgid.link/1725256351-12751-5-git-send-email-selvin.xavier@broadcom.com
Signed-off-by: Leon Romanovsky <leon@kernel.org>
drivers/infiniband/hw/bnxt_re/ib_verbs.c
drivers/infiniband/hw/bnxt_re/qplib_res.h

index 13e3d71d256aa750d0d9b04c8fd2405d4fced329..82c1f3b2f8250db082099be9a7fc35f509a27fab 100644 (file)
@@ -115,6 +115,14 @@ static enum ib_access_flags __to_ib_access_flags(int qflags)
        return iflags;
 };
 
+static void bnxt_re_check_and_set_relaxed_ordering(struct bnxt_re_dev *rdev,
+                                                  struct bnxt_qplib_mrw *qplib_mr)
+{
+       if (_is_relaxed_ordering_supported(rdev->dev_attr.dev_cap_flags2) &&
+           pcie_relaxed_ordering_enabled(rdev->en_dev->pdev))
+               qplib_mr->flags |= CMDQ_REGISTER_MR_FLAGS_ENABLE_RO;
+}
+
 static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list,
                             struct bnxt_qplib_sge *sg_list, int num)
 {
@@ -3888,6 +3896,9 @@ struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *ib_pd, int mr_access_flags)
        mr->qplib_mr.access_flags = __from_ib_access_flags(mr_access_flags);
        mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
 
+       if (mr_access_flags & IB_ACCESS_RELAXED_ORDERING)
+               bnxt_re_check_and_set_relaxed_ordering(rdev, &mr->qplib_mr);
+
        /* Allocate and register 0 as the address */
        rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
        if (rc)
@@ -4121,6 +4132,9 @@ static struct ib_mr *__bnxt_re_user_reg_mr(struct ib_pd *ib_pd, u64 length, u64
        mr->qplib_mr.va = virt_addr;
        mr->qplib_mr.total_size = length;
 
+       if (mr_access_flags & IB_ACCESS_RELAXED_ORDERING)
+               bnxt_re_check_and_set_relaxed_ordering(rdev, &mr->qplib_mr);
+
        umem_pgs = ib_umem_num_dma_blocks(umem, page_size);
        rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, umem,
                               umem_pgs, page_size);
index b452b2f46ceb6e67f11d255472b334ce7b9b4ced..049805ac95cfc576b2d4c0c4d35bfaaed9b44a73 100644 (file)
@@ -570,4 +570,9 @@ static inline bool _is_alloc_mr_unified(u16 dev_cap_flags)
        return dev_cap_flags & CREQ_QUERY_FUNC_RESP_SB_MR_REGISTER_ALLOC;
 }
 
+static inline bool _is_relaxed_ordering_supported(u16 dev_cap_ext_flags2)
+{
+       return dev_cap_ext_flags2 & CREQ_QUERY_FUNC_RESP_SB_MEMORY_REGION_RO_SUPPORTED;
+}
+
 #endif /* __BNXT_QPLIB_RES_H__ */