]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
RDMA/rtrs: Add optional support for IB_MR_TYPE_SG_GAPS
authorMd Haris Iqbal <haris.iqbal@ionos.com>
Wed, 7 Jan 2026 16:15:10 +0000 (17:15 +0100)
committerLeon Romanovsky <leon@kernel.org>
Tue, 13 Jan 2026 13:01:13 +0000 (08:01 -0500)
Support IB_MR_TYPE_SG_GAPS, which has less limitations
than standard IB_MR_TYPE_MEM_REG, a few ULP support this.

Signed-off-by: Md Haris Iqbal <haris.iqbal@ionos.com>
Signed-off-by: Kim Zhu <zhu.yanjun@ionos.com>
Signed-off-by: Jack Wang <jinpu.wang@ionos.com>
Signed-off-by: Grzegorz Prajsner <grzegorz.prajsner@ionos.com>
Link: https://patch.msgid.link/20260107161517.56357-4-haris.iqbal@ionos.com
Signed-off-by: Leon Romanovsky <leon@kernel.org>
drivers/infiniband/ulp/rtrs/rtrs-clt.c
drivers/infiniband/ulp/rtrs/rtrs-srv.c

index ee7d505ff016ce06f5848f527d982a68cae6fbd5..58042d835045fb9593be3c6a49d857bde86168cd 100644 (file)
@@ -1359,7 +1359,9 @@ static void free_path_reqs(struct rtrs_clt_path *clt_path)
 
 static int alloc_path_reqs(struct rtrs_clt_path *clt_path)
 {
+       struct ib_device *ib_dev = clt_path->s.dev->ib_dev;
        struct rtrs_clt_io_req *req;
+       enum ib_mr_type mr_type;
        int i, err = -ENOMEM;
 
        clt_path->reqs = kcalloc(clt_path->queue_depth,
@@ -1368,6 +1370,11 @@ static int alloc_path_reqs(struct rtrs_clt_path *clt_path)
        if (!clt_path->reqs)
                return -ENOMEM;
 
+       if (ib_dev->attrs.kernel_cap_flags & IBK_SG_GAPS_REG)
+               mr_type = IB_MR_TYPE_SG_GAPS;
+       else
+               mr_type = IB_MR_TYPE_MEM_REG;
+
        for (i = 0; i < clt_path->queue_depth; ++i) {
                req = &clt_path->reqs[i];
                req->iu = rtrs_iu_alloc(1, clt_path->max_hdr_size, GFP_KERNEL,
@@ -1381,8 +1388,7 @@ static int alloc_path_reqs(struct rtrs_clt_path *clt_path)
                if (!req->sge)
                        goto out;
 
-               req->mr = ib_alloc_mr(clt_path->s.dev->ib_pd,
-                                     IB_MR_TYPE_MEM_REG,
+               req->mr = ib_alloc_mr(clt_path->s.dev->ib_pd, mr_type,
                                      clt_path->max_pages_per_mr);
                if (IS_ERR(req->mr)) {
                        err = PTR_ERR(req->mr);
index be44fd1b9944c53ede408b7fb73034908058684a..7ed8910ef7f5af11d2e97ef39c99ac05a8185f6c 100644 (file)
@@ -562,13 +562,15 @@ static void unmap_cont_bufs(struct rtrs_srv_path *srv_path)
 
 static int map_cont_bufs(struct rtrs_srv_path *srv_path)
 {
+       struct ib_device *ib_dev = srv_path->s.dev->ib_dev;
        struct rtrs_srv_sess *srv = srv_path->srv;
        struct rtrs_path *ss = &srv_path->s;
        int i, err, mrs_num;
        unsigned int chunk_bits;
+       enum ib_mr_type mr_type;
        int chunks_per_mr = 1;
-       struct ib_mr *mr;
        struct sg_table *sgt;
+       struct ib_mr *mr;
 
        /*
         * Here we map queue_depth chunks to MR.  Firstly we have to
@@ -617,8 +619,13 @@ static int map_cont_bufs(struct rtrs_srv_path *srv_path)
                        err = -EINVAL;
                        goto free_sg;
                }
-               mr = ib_alloc_mr(srv_path->s.dev->ib_pd, IB_MR_TYPE_MEM_REG,
-                                nr_sgt);
+
+               if (ib_dev->attrs.kernel_cap_flags & IBK_SG_GAPS_REG)
+                       mr_type = IB_MR_TYPE_SG_GAPS;
+               else
+                       mr_type = IB_MR_TYPE_MEM_REG;
+
+               mr = ib_alloc_mr(srv_path->s.dev->ib_pd, mr_type, nr_sgt);
                if (IS_ERR(mr)) {
                        err = PTR_ERR(mr);
                        goto unmap_sg;