]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
ksmbd: smbd: create MR pool
authorHyunchul Lee <hyc.lee@gmail.com>
Mon, 18 Dec 2023 15:32:39 +0000 (00:32 +0900)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 23 Dec 2023 09:41:50 +0000 (10:41 +0100)
[ Upstream commit c9f189271cff85d5d735e25dfa4bc95952ec12d8 ]

Create a memory region pool because rdma_rw_ctx_init()
uses memory registration if memory registration yields
better performance than using multiple SGE entries.

Acked-by: Namjae Jeon <linkinjeon@kernel.org>
Signed-off-by: Hyunchul Lee <hyc.lee@gmail.com>
Signed-off-by: Steve French <stfrench@microsoft.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
fs/ksmbd/transport_rdma.c

index ba2d3d28601715281cd84ab4afd0cef764147bc1..5901c4a2ece1d7f3fbd7103bab3cf1488a437a8a 100644 (file)
@@ -434,6 +434,7 @@ static void free_transport(struct smb_direct_transport *t)
 
        if (t->qp) {
                ib_drain_qp(t->qp);
+               ib_mr_pool_destroy(t->qp, &t->qp->rdma_mrs);
                ib_destroy_qp(t->qp);
        }
 
@@ -1714,7 +1715,9 @@ static int smb_direct_init_params(struct smb_direct_transport *t,
        cap->max_send_sge = SMB_DIRECT_MAX_SEND_SGES;
        cap->max_recv_sge = SMB_DIRECT_MAX_RECV_SGES;
        cap->max_inline_data = 0;
-       cap->max_rdma_ctxs = 0;
+       cap->max_rdma_ctxs =
+               rdma_rw_mr_factor(device, t->cm_id->port_num, max_pages) *
+               smb_direct_max_outstanding_rw_ops;
        return 0;
 }
 
@@ -1796,6 +1799,7 @@ static int smb_direct_create_qpair(struct smb_direct_transport *t,
 {
        int ret;
        struct ib_qp_init_attr qp_attr;
+       int pages_per_rw;
 
        t->pd = ib_alloc_pd(t->cm_id->device, 0);
        if (IS_ERR(t->pd)) {
@@ -1843,6 +1847,23 @@ static int smb_direct_create_qpair(struct smb_direct_transport *t,
        t->qp = t->cm_id->qp;
        t->cm_id->event_handler = smb_direct_cm_handler;
 
+       pages_per_rw = DIV_ROUND_UP(t->max_rdma_rw_size, PAGE_SIZE) + 1;
+       if (pages_per_rw > t->cm_id->device->attrs.max_sgl_rd) {
+               int pages_per_mr, mr_count;
+
+               pages_per_mr = min_t(int, pages_per_rw,
+                                    t->cm_id->device->attrs.max_fast_reg_page_list_len);
+               mr_count = DIV_ROUND_UP(pages_per_rw, pages_per_mr) *
+                       atomic_read(&t->rw_avail_ops);
+               ret = ib_mr_pool_init(t->qp, &t->qp->rdma_mrs, mr_count,
+                                     IB_MR_TYPE_MEM_REG, pages_per_mr, 0);
+               if (ret) {
+                       pr_err("failed to init mr pool count %d pages %d\n",
+                              mr_count, pages_per_mr);
+                       goto err;
+               }
+       }
+
        return 0;
 err:
        if (t->qp) {