]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
svcrdma: Clean up use of rdma->sc_pd->device
authorChuck Lever <chuck.lever@oracle.com>
Fri, 27 Feb 2026 14:03:30 +0000 (09:03 -0500)
committerChuck Lever <chuck.lever@oracle.com>
Mon, 30 Mar 2026 01:25:09 +0000 (21:25 -0400)
I can't think of a reason why svcrdma is using the PD's device. Most
other consumers of the IB DMA API use the ib_device pointer from the
connection's rdma_cm_id.

I don't think there's any functional difference between the two, but
it is a little confusing to see some uses of rdma_cm_id and some of
ib_pd.

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
net/sunrpc/xprtrdma/svc_rdma_sendto.c

index 02559947272aba88c4bbc15ce392d8b5c48c1b37..bef68efa7034d320c3119b92f5838c0c1823d104 100644 (file)
@@ -116,7 +116,8 @@ static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc);
 static struct svc_rdma_send_ctxt *
 svc_rdma_send_ctxt_alloc(struct svcxprt_rdma *rdma)
 {
-       int node = ibdev_to_node(rdma->sc_cm_id->device);
+       struct ib_device *device = rdma->sc_cm_id->device;
+       int node = ibdev_to_node(device);
        struct svc_rdma_send_ctxt *ctxt;
        unsigned long pages;
        dma_addr_t addr;
@@ -136,9 +137,9 @@ svc_rdma_send_ctxt_alloc(struct svcxprt_rdma *rdma)
        buffer = kmalloc_node(rdma->sc_max_req_size, GFP_KERNEL, node);
        if (!buffer)
                goto fail2;
-       addr = ib_dma_map_single(rdma->sc_pd->device, buffer,
-                                rdma->sc_max_req_size, DMA_TO_DEVICE);
-       if (ib_dma_mapping_error(rdma->sc_pd->device, addr))
+       addr = ib_dma_map_single(device, buffer, rdma->sc_max_req_size,
+                                DMA_TO_DEVICE);
+       if (ib_dma_mapping_error(device, addr))
                goto fail3;
 
        svc_rdma_send_cid_init(rdma, &ctxt->sc_cid);
@@ -175,15 +176,14 @@ fail0:
  */
 void svc_rdma_send_ctxts_destroy(struct svcxprt_rdma *rdma)
 {
+       struct ib_device *device = rdma->sc_cm_id->device;
        struct svc_rdma_send_ctxt *ctxt;
        struct llist_node *node;
 
        while ((node = llist_del_first(&rdma->sc_send_ctxts)) != NULL) {
                ctxt = llist_entry(node, struct svc_rdma_send_ctxt, sc_node);
-               ib_dma_unmap_single(rdma->sc_pd->device,
-                                   ctxt->sc_sges[0].addr,
-                                   rdma->sc_max_req_size,
-                                   DMA_TO_DEVICE);
+               ib_dma_unmap_single(device, ctxt->sc_sges[0].addr,
+                                   rdma->sc_max_req_size, DMA_TO_DEVICE);
                kfree(ctxt->sc_xprt_buf);
                kfree(ctxt->sc_pages);
                kfree(ctxt);
@@ -463,7 +463,7 @@ int svc_rdma_post_send(struct svcxprt_rdma *rdma,
        might_sleep();
 
        /* Sync the transport header buffer */
-       ib_dma_sync_single_for_device(rdma->sc_pd->device,
+       ib_dma_sync_single_for_device(rdma->sc_cm_id->device,
                                      send_wr->sg_list[0].addr,
                                      send_wr->sg_list[0].length,
                                      DMA_TO_DEVICE);