From: Jack Wang Date: Wed, 21 Aug 2024 11:22:08 +0000 (+0200) Subject: RDMA/rtrs-clt: Fix need_inv setting in error case X-Git-Tag: v6.12-rc1~72^2~67 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=8c8dd4e13bd5a716a0a2fa58e7e2f08b4af073f7;p=thirdparty%2Flinux.git RDMA/rtrs-clt: Fix need_inv setting in error case In some cases need_inv can be missed for write requests, additionally driver has to handle missing invalidates for write requests. While at it, remove the else case from write invalidate path as it is possible to reach there. Signed-off-by: Jack Wang Signed-off-by: Md Haris Iqbal Signed-off-by: Grzegorz Prajsner Link: https://patch.msgid.link/20240821112217.41827-3-haris.iqbal@ionos.com Signed-off-by: Leon Romanovsky --- diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c index 66ac4dba990f9..d09018c11ecee 100644 --- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c +++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c @@ -391,11 +391,12 @@ static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno, clt_path = to_clt_path(con->c.path); if (req->sg_cnt) { - if (req->dir == DMA_FROM_DEVICE && req->need_inv) { + if (req->need_inv) { /* - * We are here to invalidate read requests + * We are here to invalidate read/write requests * ourselves. In normal scenario server should - * send INV for all read requests, but + * send INV for all read requests, we do chained local + * invalidate for write requests, but * we are here, thus two things could happen: * * 1. this is failover, when errno != 0 @@ -422,14 +423,6 @@ static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno, req->mr->rkey, err); } else if (can_wait) { wait_for_completion(&req->inv_comp); - } else { - /* - * Something went wrong, so request will be - * completed from INV callback. - */ - WARN_ON_ONCE(1); - - return; } if (!refcount_dec_and_test(&req->ref)) return; @@ -1146,6 +1139,7 @@ static int rtrs_clt_write_req(struct rtrs_clt_io_req *req) }; wr = &rwr.wr; fr_en = true; + req->need_inv = true; refcount_inc(&req->ref); } /* @@ -1164,6 +1158,10 @@ static int rtrs_clt_write_req(struct rtrs_clt_io_req *req) clt_path->hca_port); if (req->mp_policy == MP_POLICY_MIN_INFLIGHT) atomic_dec(&clt_path->stats->inflight); + if (req->need_inv) { + req->need_inv = false; + refcount_dec(&req->ref); + } if (req->sg_cnt) ib_dma_unmap_sg(clt_path->s.dev->ib_dev, req->sglist, req->sg_cnt, req->dir);