]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
svcrdma: Fix page leak in svc_rdma_recv_read_chunk()
authorChuck Lever <chuck.lever@oracle.com>
Thu, 11 Jun 2020 16:44:56 +0000 (12:44 -0400)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 19 Aug 2020 06:24:09 +0000 (08:24 +0200)
[ Upstream commit e814eecbe3bbeaa8b004d25a4b8974d232b765a9 ]

Commit 07d0ff3b0cd2 ("svcrdma: Clean up Read chunk path") moved the
page saver logic so that it gets executed event when an error occurs.
In that case, the I/O is never posted, and those pages are then
leaked. Errors in this path, however, are quite rare.

Fixes: 07d0ff3b0cd2 ("svcrdma: Clean up Read chunk path")
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
net/sunrpc/xprtrdma/svc_rdma_rw.c

index 23c2d3ce0dc9a01d162a84f4aba6e607f34a05cf..e0a0ae39848c4da77d28c82134d3a7059fa959ed 100644 (file)
@@ -678,7 +678,6 @@ static int svc_rdma_build_read_chunk(struct svc_rqst *rqstp,
                                     struct svc_rdma_read_info *info,
                                     __be32 *p)
 {
-       unsigned int i;
        int ret;
 
        ret = -EINVAL;
@@ -701,12 +700,6 @@ static int svc_rdma_build_read_chunk(struct svc_rqst *rqstp,
                info->ri_chunklen += rs_length;
        }
 
-       /* Pages under I/O have been copied to head->rc_pages.
-        * Prevent their premature release by svc_xprt_release() .
-        */
-       for (i = 0; i < info->ri_readctxt->rc_page_count; i++)
-               rqstp->rq_pages[i] = NULL;
-
        return ret;
 }
 
@@ -801,6 +794,26 @@ out:
        return ret;
 }
 
+/* Pages under I/O have been copied to head->rc_pages. Ensure they
+ * are not released by svc_xprt_release() until the I/O is complete.
+ *
+ * This has to be done after all Read WRs are constructed to properly
+ * handle a page that is part of I/O on behalf of two different RDMA
+ * segments.
+ *
+ * Do this only if I/O has been posted. Otherwise, we do indeed want
+ * svc_xprt_release() to clean things up properly.
+ */
+static void svc_rdma_save_io_pages(struct svc_rqst *rqstp,
+                                  const unsigned int start,
+                                  const unsigned int num_pages)
+{
+       unsigned int i;
+
+       for (i = start; i < num_pages + start; i++)
+               rqstp->rq_pages[i] = NULL;
+}
+
 /**
  * svc_rdma_recv_read_chunk - Pull a Read chunk from the client
  * @rdma: controlling RDMA transport
@@ -854,6 +867,7 @@ int svc_rdma_recv_read_chunk(struct svcxprt_rdma *rdma, struct svc_rqst *rqstp,
        ret = svc_rdma_post_chunk_ctxt(&info->ri_cc);
        if (ret < 0)
                goto out_err;
+       svc_rdma_save_io_pages(rqstp, 0, head->rc_page_count);
        return 0;
 
 out_err: