]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
RDMA/hns: Fix mis-modifying default congestion control algorithm
authorLuoyouming <luoyouming@huawei.com>
Mon, 19 Feb 2024 06:18:05 +0000 (14:18 +0800)
committerSasha Levin <sashal@kernel.org>
Tue, 26 Mar 2024 22:20:01 +0000 (18:20 -0400)
[ Upstream commit d20a7cf9f714f0763efb56f0f2eeca1cb91315ed ]

Commit 27c5fd271d8b ("RDMA/hns: The UD mode can only be configured
with DCQCN") adds a check of congest control alorithm for UD. But
that patch causes a problem: hr_dev->caps.congest_type is global,
used by all QPs, so modifying this field to DCQCN for UD QPs causes
other QPs unable to use any other algorithm except DCQCN.

Revert the modification in commit 27c5fd271d8b ("RDMA/hns: The UD
mode can only be configured with DCQCN"). Add a new field cong_type
to struct hns_roce_qp and configure DCQCN for UD QPs.

Fixes: 27c5fd271d8b ("RDMA/hns: The UD mode can only be configured with DCQCN")
Fixes: f91696f2f053 ("RDMA/hns: Support congestion control type selection according to the FW")
Signed-off-by: Luoyouming <luoyouming@huawei.com>
Signed-off-by: Junxian Huang <huangjunxian6@hisilicon.com>
Link: https://lore.kernel.org/r/20240219061805.668170-1-huangjunxian6@hisilicon.com
Signed-off-by: Leon Romanovsky <leon@kernel.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
drivers/infiniband/hw/hns/hns_roce_device.h
drivers/infiniband/hw/hns/hns_roce_hw_v2.c

index 7f0d0288beb1e0ee02c49ca3b692885c85d79bb8..82066859cc113e74a34eefbf63e0b12083d4f880 100644 (file)
@@ -581,6 +581,13 @@ struct hns_roce_work {
        u32 queue_num;
 };
 
+enum hns_roce_cong_type {
+       CONG_TYPE_DCQCN,
+       CONG_TYPE_LDCP,
+       CONG_TYPE_HC3,
+       CONG_TYPE_DIP,
+};
+
 struct hns_roce_qp {
        struct ib_qp            ibqp;
        struct hns_roce_wq      rq;
@@ -624,6 +631,7 @@ struct hns_roce_qp {
        struct list_head        sq_node; /* all send qps are on a list */
        struct hns_user_mmap_entry *dwqe_mmap_entry;
        u32                     config;
+       enum hns_roce_cong_type cong_type;
 };
 
 struct hns_roce_ib_iboe {
@@ -695,13 +703,6 @@ struct hns_roce_eq_table {
        struct hns_roce_eq      *eq;
 };
 
-enum cong_type {
-       CONG_TYPE_DCQCN,
-       CONG_TYPE_LDCP,
-       CONG_TYPE_HC3,
-       CONG_TYPE_DIP,
-};
-
 struct hns_roce_caps {
        u64             fw_ver;
        u8              num_ports;
@@ -831,7 +832,7 @@ struct hns_roce_caps {
        u16             default_aeq_period;
        u16             default_aeq_arm_st;
        u16             default_ceq_arm_st;
-       enum cong_type  cong_type;
+       enum hns_roce_cong_type cong_type;
 };
 
 enum hns_roce_device_state {
index 3c62a0042da48d6f58347ccf3369d8cf620dd6fc..b4799c83282e262fbfde68c289ae167e1875cad7 100644 (file)
@@ -4728,12 +4728,15 @@ static int check_cong_type(struct ib_qp *ibqp,
                           struct hns_roce_congestion_algorithm *cong_alg)
 {
        struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+       struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
 
-       if (ibqp->qp_type == IB_QPT_UD)
-               hr_dev->caps.cong_type = CONG_TYPE_DCQCN;
+       if (ibqp->qp_type == IB_QPT_UD || ibqp->qp_type == IB_QPT_GSI)
+               hr_qp->cong_type = CONG_TYPE_DCQCN;
+       else
+               hr_qp->cong_type = hr_dev->caps.cong_type;
 
        /* different congestion types match different configurations */
-       switch (hr_dev->caps.cong_type) {
+       switch (hr_qp->cong_type) {
        case CONG_TYPE_DCQCN:
                cong_alg->alg_sel = CONG_DCQCN;
                cong_alg->alg_sub_sel = UNSUPPORT_CONG_LEVEL;
@@ -4761,8 +4764,8 @@ static int check_cong_type(struct ib_qp *ibqp,
        default:
                ibdev_warn(&hr_dev->ib_dev,
                           "invalid type(%u) for congestion selection.\n",
-                          hr_dev->caps.cong_type);
-               hr_dev->caps.cong_type = CONG_TYPE_DCQCN;
+                          hr_qp->cong_type);
+               hr_qp->cong_type = CONG_TYPE_DCQCN;
                cong_alg->alg_sel = CONG_DCQCN;
                cong_alg->alg_sub_sel = UNSUPPORT_CONG_LEVEL;
                cong_alg->dip_vld = DIP_INVALID;
@@ -4781,6 +4784,7 @@ static int fill_cong_field(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
        struct hns_roce_congestion_algorithm cong_field;
        struct ib_device *ibdev = ibqp->device;
        struct hns_roce_dev *hr_dev = to_hr_dev(ibdev);
+       struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
        u32 dip_idx = 0;
        int ret;
 
@@ -4793,7 +4797,7 @@ static int fill_cong_field(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
                return ret;
 
        hr_reg_write(context, QPC_CONG_ALGO_TMPL_ID, hr_dev->cong_algo_tmpl_id +
-                    hr_dev->caps.cong_type * HNS_ROCE_CONG_SIZE);
+                    hr_qp->cong_type * HNS_ROCE_CONG_SIZE);
        hr_reg_clear(qpc_mask, QPC_CONG_ALGO_TMPL_ID);
        hr_reg_write(&context->ext, QPCEX_CONG_ALG_SEL, cong_field.alg_sel);
        hr_reg_clear(&qpc_mask->ext, QPCEX_CONG_ALG_SEL);