]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
RDMA/rtrs-srv: fix SG mapping
authorRoman Penyaev <r.peniaev@gmail.com>
Wed, 7 Jan 2026 16:15:08 +0000 (17:15 +0100)
committerLeon Romanovsky <leon@kernel.org>
Tue, 13 Jan 2026 13:01:13 +0000 (08:01 -0500)
This fixes the following error on the server side:

   RTRS server session allocation failed: -EINVAL

caused by the caller of the `ib_dma_map_sg()`, which does not expect
less mapped entries, than requested, which is in the order of things
and can be easily reproduced on the machine with enabled IOMMU.

The fix is to treat any positive number of mapped sg entries as a
successful mapping and cache DMA addresses by traversing modified
SG table.

Fixes: 9cb837480424 ("RDMA/rtrs: server: main functionality")
Signed-off-by: Roman Penyaev <r.peniaev@gmail.com>
Signed-off-by: Jack Wang <jinpu.wang@ionos.com>
Signed-off-by: Grzegorz Prajsner <grzegorz.prajsner@ionos.com>
Link: https://patch.msgid.link/20260107161517.56357-2-haris.iqbal@ionos.com
Signed-off-by: Leon Romanovsky <leon@kernel.org>
drivers/infiniband/ulp/rtrs/rtrs-srv.c

index 7a402eb8e0bf0c653a845562a2ea38754091a185..adb798e2a54ae1afe5b730f0d2e49aedd6c9dc6a 100644 (file)
@@ -595,7 +595,7 @@ static int map_cont_bufs(struct rtrs_srv_path *srv_path)
             srv_path->mrs_num++) {
                struct rtrs_srv_mr *srv_mr = &srv_path->mrs[srv_path->mrs_num];
                struct scatterlist *s;
-               int nr, nr_sgt, chunks;
+               int nr, nr_sgt, chunks, ind;
 
                sgt = &srv_mr->sgt;
                chunks = chunks_per_mr * srv_path->mrs_num;
@@ -625,7 +625,7 @@ static int map_cont_bufs(struct rtrs_srv_path *srv_path)
                }
                nr = ib_map_mr_sg(mr, sgt->sgl, nr_sgt,
                                  NULL, max_chunk_size);
-               if (nr != nr_sgt) {
+               if (nr < nr_sgt) {
                        err = nr < 0 ? nr : -EINVAL;
                        goto dereg_mr;
                }
@@ -641,9 +641,24 @@ static int map_cont_bufs(struct rtrs_srv_path *srv_path)
                                goto dereg_mr;
                        }
                }
-               /* Eventually dma addr for each chunk can be cached */
-               for_each_sg(sgt->sgl, s, nr_sgt, i)
-                       srv_path->dma_addr[chunks + i] = sg_dma_address(s);
+
+               /*
+                * Cache DMA addresses by traversing sg entries.  If
+                * regions were merged, an inner loop is required to
+                * populate the DMA address array by traversing larger
+                * regions.
+                */
+               ind = chunks;
+               for_each_sg(sgt->sgl, s, nr_sgt, i) {
+                       unsigned int dma_len = sg_dma_len(s);
+                       u64 dma_addr = sg_dma_address(s);
+                       u64 dma_addr_end = dma_addr + dma_len;
+
+                       do {
+                               srv_path->dma_addr[ind++] = dma_addr;
+                               dma_addr += max_chunk_size;
+                       } while (dma_addr < dma_addr_end);
+               }
 
                ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey));
                srv_mr->mr = mr;