]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
RDMA/hns: Force rewrite inline flag of WQE
authorLang Cheng <chenglang@huawei.com>
Fri, 18 Jun 2021 10:10:11 +0000 (18:10 +0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 14 Jul 2021 15:00:00 +0000 (17:00 +0200)
[ Upstream commit e13026578b727becf2614f34a4f35e7f0ed21be1 ]

When a non-inline WR reuses a WQE that was used for inline last time, the
remaining inline flag should be cleared.

Fixes: 62490fd5a865 ("RDMA/hns: Avoid unnecessary memset on WQEs in post_send")
Link: https://lore.kernel.org/r/1624011020-16992-2-git-send-email-liweihang@huawei.com
Signed-off-by: Lang Cheng <chenglang@huawei.com>
Signed-off-by: Weihang Li <liweihang@huawei.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
drivers/infiniband/hw/hns/hns_roce_hw_v2.c

index 3344b80ecf047b7bb1f8a74f1b2583b81f6de1ef..851acc9d050f76b1935fe56e05b8f3e1ea559d81 100644 (file)
@@ -268,8 +268,6 @@ static int set_rc_inl(struct hns_roce_qp *qp, const struct ib_send_wr *wr,
 
        dseg += sizeof(struct hns_roce_v2_rc_send_wqe);
 
-       roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_INLINE_S, 1);
-
        if (msg_len <= HNS_ROCE_V2_MAX_RC_INL_INN_SZ) {
                roce_set_bit(rc_sq_wqe->byte_20,
                             V2_RC_SEND_WQE_BYTE_20_INL_TYPE_S, 0);
@@ -314,6 +312,8 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
                       V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
                       (*sge_ind) & (qp->sge.sge_cnt - 1));
 
+       roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_INLINE_S,
+                    !!(wr->send_flags & IB_SEND_INLINE));
        if (wr->send_flags & IB_SEND_INLINE)
                return set_rc_inl(qp, wr, rc_sq_wqe, sge_ind);