]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
io_uring/zcrx: introduce io_populate_area_dma
authorPavel Begunkov <asml.silence@gmail.com>
Wed, 2 Jul 2025 14:29:06 +0000 (15:29 +0100)
committerJens Axboe <axboe@kernel.dk>
Tue, 8 Jul 2025 17:59:56 +0000 (11:59 -0600)
Add a helper that initialises page-pool dma addresses from a sg table.
It'll be reused in following patches.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Reviewed-by: David Wei <dw@davidwei.uk>
Link: https://lore.kernel.org/r/a8972a77be9b5675abc585d6e2e6e30f9c7dbd85.1751466461.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
io_uring/zcrx.c

index 6fb7c9bedfcb16f358055e950dadc708fc7c7815..172eb67ddc62590b58e48cd6120428bb01f51db0 100644 (file)
@@ -47,6 +47,35 @@ static inline struct page *io_zcrx_iov_page(const struct net_iov *niov)
        return area->mem.pages[net_iov_idx(niov)];
 }
 
+static int io_populate_area_dma(struct io_zcrx_ifq *ifq,
+                               struct io_zcrx_area *area,
+                               struct sg_table *sgt, unsigned long off)
+{
+       struct scatterlist *sg;
+       unsigned i, niov_idx = 0;
+
+       for_each_sgtable_dma_sg(sgt, sg, i) {
+               dma_addr_t dma = sg_dma_address(sg);
+               unsigned long sg_len = sg_dma_len(sg);
+               unsigned long sg_off = min(sg_len, off);
+
+               off -= sg_off;
+               sg_len -= sg_off;
+               dma += sg_off;
+
+               while (sg_len && niov_idx < area->nia.num_niovs) {
+                       struct net_iov *niov = &area->nia.niovs[niov_idx];
+
+                       if (net_mp_niov_set_dma_addr(niov, dma))
+                               return -EFAULT;
+                       sg_len -= PAGE_SIZE;
+                       dma += PAGE_SIZE;
+                       niov_idx++;
+               }
+       }
+       return 0;
+}
+
 static void io_release_dmabuf(struct io_zcrx_mem *mem)
 {
        if (!IS_ENABLED(CONFIG_DMA_SHARED_BUFFER))
@@ -121,33 +150,10 @@ err:
 
 static int io_zcrx_map_area_dmabuf(struct io_zcrx_ifq *ifq, struct io_zcrx_area *area)
 {
-       unsigned long off = area->mem.dmabuf_offset;
-       struct scatterlist *sg;
-       unsigned i, niov_idx = 0;
-
        if (!IS_ENABLED(CONFIG_DMA_SHARED_BUFFER))
                return -EINVAL;
-
-       for_each_sgtable_dma_sg(area->mem.sgt, sg, i) {
-               dma_addr_t dma = sg_dma_address(sg);
-               unsigned long sg_len = sg_dma_len(sg);
-               unsigned long sg_off = min(sg_len, off);
-
-               off -= sg_off;
-               sg_len -= sg_off;
-               dma += sg_off;
-
-               while (sg_len && niov_idx < area->nia.num_niovs) {
-                       struct net_iov *niov = &area->nia.niovs[niov_idx];
-
-                       if (net_mp_niov_set_dma_addr(niov, dma))
-                               return -EFAULT;
-                       sg_len -= PAGE_SIZE;
-                       dma += PAGE_SIZE;
-                       niov_idx++;
-               }
-       }
-       return 0;
+       return io_populate_area_dma(ifq, area, area->mem.sgt,
+                                   area->mem.dmabuf_offset);
 }
 
 static int io_import_umem(struct io_zcrx_ifq *ifq,