]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blob - queue-4.9/ib-rdmavt-fix-frwr-memory-registration.patch
23c69da31dc2cd3cac62b72b37dc2c4f8d51a199
[thirdparty/kernel/stable-queue.git] / queue-4.9 / ib-rdmavt-fix-frwr-memory-registration.patch
1 From 7c39f7f671d2acc0a1f39ebbbee4303ad499bbfa Mon Sep 17 00:00:00 2001
2 From: Josh Collier <josh.d.collier@intel.com>
3 Date: Mon, 15 Apr 2019 11:34:22 -0700
4 Subject: IB/rdmavt: Fix frwr memory registration
5
6 From: Josh Collier <josh.d.collier@intel.com>
7
8 commit 7c39f7f671d2acc0a1f39ebbbee4303ad499bbfa upstream.
9
10 Current implementation was not properly handling frwr memory
11 registrations. This was uncovered by commit 27f26cec761das ("xprtrdma:
12 Plant XID in on-the-wire RDMA offset (FRWR)") in which xprtrdma, which is
13 used for NFS over RDMA, started failing as it was the first ULP to modify
14 the ib_mr iova resulting in the NFS server getting REMOTE ACCESS ERROR
15 when attempting to perform RDMA Writes to the client.
16
17 The fix is to properly capture the true iova, offset, and length in the
18 call to ib_map_mr_sg, and then update the iova when processing the
19 IB_WR_REG_MEM on the send queue.
20
21 Fixes: a41081aa5936 ("IB/rdmavt: Add support for ib_map_mr_sg")
22 Cc: stable@vger.kernel.org
23 Reviewed-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
24 Reviewed-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
25 Reviewed-by: Michael J. Ruhl <michael.j.ruhl@intel.com>
26 Signed-off-by: Josh Collier <josh.d.collier@intel.com>
27 Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
28 Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
29 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
30
31 ---
32 drivers/infiniband/sw/rdmavt/mr.c | 17 ++++++++++-------
33 1 file changed, 10 insertions(+), 7 deletions(-)
34
35 --- a/drivers/infiniband/sw/rdmavt/mr.c
36 +++ b/drivers/infiniband/sw/rdmavt/mr.c
37 @@ -497,11 +497,6 @@ static int rvt_set_page(struct ib_mr *ib
38 if (unlikely(mapped_segs == mr->mr.max_segs))
39 return -ENOMEM;
40
41 - if (mr->mr.length == 0) {
42 - mr->mr.user_base = addr;
43 - mr->mr.iova = addr;
44 - }
45 -
46 m = mapped_segs / RVT_SEGSZ;
47 n = mapped_segs % RVT_SEGSZ;
48 mr->mr.map[m]->segs[n].vaddr = (void *)addr;
49 @@ -518,17 +513,24 @@ static int rvt_set_page(struct ib_mr *ib
50 * @sg_nents: number of entries in sg
51 * @sg_offset: offset in bytes into sg
52 *
53 + * Overwrite rvt_mr length with mr length calculated by ib_sg_to_pages.
54 + *
55 * Return: number of sg elements mapped to the memory region
56 */
57 int rvt_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
58 int sg_nents, unsigned int *sg_offset)
59 {
60 struct rvt_mr *mr = to_imr(ibmr);
61 + int ret;
62
63 mr->mr.length = 0;
64 mr->mr.page_shift = PAGE_SHIFT;
65 - return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset,
66 - rvt_set_page);
67 + ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rvt_set_page);
68 + mr->mr.user_base = ibmr->iova;
69 + mr->mr.iova = ibmr->iova;
70 + mr->mr.offset = ibmr->iova - (u64)mr->mr.map[0]->segs[0].vaddr;
71 + mr->mr.length = (size_t)ibmr->length;
72 + return ret;
73 }
74
75 /**
76 @@ -559,6 +561,7 @@ int rvt_fast_reg_mr(struct rvt_qp *qp, s
77 ibmr->rkey = key;
78 mr->mr.lkey = key;
79 mr->mr.access_flags = access;
80 + mr->mr.iova = ibmr->iova;
81 atomic_set(&mr->mr.lkey_invalid, 0);
82
83 return 0;