]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
svcrdma: Adjust the number of entries in svc_rdma_send_ctxt::sc_pages
authorChuck Lever <chuck.lever@oracle.com>
Mon, 28 Apr 2025 19:36:57 +0000 (15:36 -0400)
committerChuck Lever <chuck.lever@oracle.com>
Thu, 15 May 2025 20:16:26 +0000 (16:16 -0400)
Allow allocation of more entries in the sc_pages[] array when the
maximum size of an RPC message is increased.

Reviewed-by: Jeff Layton <jlayton@kernel.org>
Reviewed-by: NeilBrown <neil@brown.name>
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
include/linux/sunrpc/svc_rdma.h
net/sunrpc/xprtrdma/svc_rdma_sendto.c

index 1016f2feddc4b12c7e5a0f41bc277d50aa9e4aaa..22704c2e5b9b23fe84d2834ac659d698d38040e0 100644 (file)
@@ -245,7 +245,8 @@ struct svc_rdma_send_ctxt {
        void                    *sc_xprt_buf;
        int                     sc_page_count;
        int                     sc_cur_sge_no;
-       struct page             *sc_pages[RPCSVC_MAXPAGES];
+       unsigned long           sc_maxpages;
+       struct page             **sc_pages;
        struct ib_sge           sc_sges[];
 };
 
index 96154a2367a1106f749dd42663375df3b4126bb8..914cd263c2f179c05e65cd572022a87fcc56b3f2 100644 (file)
@@ -118,6 +118,7 @@ svc_rdma_send_ctxt_alloc(struct svcxprt_rdma *rdma)
 {
        int node = ibdev_to_node(rdma->sc_cm_id->device);
        struct svc_rdma_send_ctxt *ctxt;
+       unsigned long pages;
        dma_addr_t addr;
        void *buffer;
        int i;
@@ -126,13 +127,19 @@ svc_rdma_send_ctxt_alloc(struct svcxprt_rdma *rdma)
                            GFP_KERNEL, node);
        if (!ctxt)
                goto fail0;
+       pages = svc_serv_maxpages(rdma->sc_xprt.xpt_server);
+       ctxt->sc_pages = kcalloc_node(pages, sizeof(struct page *),
+                                     GFP_KERNEL, node);
+       if (!ctxt->sc_pages)
+               goto fail1;
+       ctxt->sc_maxpages = pages;
        buffer = kmalloc_node(rdma->sc_max_req_size, GFP_KERNEL, node);
        if (!buffer)
-               goto fail1;
+               goto fail2;
        addr = ib_dma_map_single(rdma->sc_pd->device, buffer,
                                 rdma->sc_max_req_size, DMA_TO_DEVICE);
        if (ib_dma_mapping_error(rdma->sc_pd->device, addr))
-               goto fail2;
+               goto fail3;
 
        svc_rdma_send_cid_init(rdma, &ctxt->sc_cid);
 
@@ -151,8 +158,10 @@ svc_rdma_send_ctxt_alloc(struct svcxprt_rdma *rdma)
                ctxt->sc_sges[i].lkey = rdma->sc_pd->local_dma_lkey;
        return ctxt;
 
-fail2:
+fail3:
        kfree(buffer);
+fail2:
+       kfree(ctxt->sc_pages);
 fail1:
        kfree(ctxt);
 fail0:
@@ -176,6 +185,7 @@ void svc_rdma_send_ctxts_destroy(struct svcxprt_rdma *rdma)
                                    rdma->sc_max_req_size,
                                    DMA_TO_DEVICE);
                kfree(ctxt->sc_xprt_buf);
+               kfree(ctxt->sc_pages);
                kfree(ctxt);
        }
 }