]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
io_uring/zcrx: allocate sgtable for umem areas
authorPavel Begunkov <asml.silence@gmail.com>
Wed, 2 Jul 2025 14:29:07 +0000 (15:29 +0100)
committerJens Axboe <axboe@kernel.dk>
Tue, 8 Jul 2025 17:59:56 +0000 (11:59 -0600)
Currently, dma addresses for umem areas are stored directly in niovs.
It's memory efficient but inconvenient. I need a better format 1) to
share code with dmabuf areas, and 2) for disentangling page, folio and
niov sizes. dmabuf already provides sg_table, create one for user memory
as well.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Reviewed-by: David Wei <dw@davidwei.uk>
Link: https://lore.kernel.org/r/f3c15081827c1bf5427d3a2e693bc526476b87ee.1751466461.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
io_uring/zcrx.c
io_uring/zcrx.h

index 172eb67ddc62590b58e48cd6120428bb01f51db0..3f3c8cbde61ed5f1da2c431f6c6521a107d464ee 100644 (file)
@@ -161,7 +161,7 @@ static int io_import_umem(struct io_zcrx_ifq *ifq,
                          struct io_uring_zcrx_area_reg *area_reg)
 {
        struct page **pages;
-       int nr_pages;
+       int nr_pages, ret;
 
        if (area_reg->dmabuf_fd)
                return -EINVAL;
@@ -172,6 +172,12 @@ static int io_import_umem(struct io_zcrx_ifq *ifq,
        if (IS_ERR(pages))
                return PTR_ERR(pages);
 
+       ret = sg_alloc_table_from_pages(&mem->page_sg_table, pages, nr_pages,
+                                       0, nr_pages << PAGE_SHIFT,
+                                       GFP_KERNEL_ACCOUNT);
+       if (ret)
+               return ret;
+
        mem->pages = pages;
        mem->nr_folios = nr_pages;
        mem->size = area_reg->len;
@@ -186,6 +192,7 @@ static void io_release_area_mem(struct io_zcrx_mem *mem)
        }
        if (mem->pages) {
                unpin_user_pages(mem->pages, mem->nr_folios);
+               sg_free_table(&mem->page_sg_table);
                kvfree(mem->pages);
        }
 }
@@ -207,67 +214,36 @@ static int io_import_area(struct io_zcrx_ifq *ifq,
        return io_import_umem(ifq, mem, area_reg);
 }
 
-static void io_zcrx_unmap_umem(struct io_zcrx_ifq *ifq,
-                               struct io_zcrx_area *area, int nr_mapped)
-{
-       int i;
-
-       for (i = 0; i < nr_mapped; i++) {
-               netmem_ref netmem = net_iov_to_netmem(&area->nia.niovs[i]);
-               dma_addr_t dma = page_pool_get_dma_addr_netmem(netmem);
-
-               dma_unmap_page_attrs(ifq->dev, dma, PAGE_SIZE,
-                                    DMA_FROM_DEVICE, IO_DMA_ATTR);
-       }
-}
-
-static void __io_zcrx_unmap_area(struct io_zcrx_ifq *ifq,
-                                struct io_zcrx_area *area, int nr_mapped)
+static void io_zcrx_unmap_area(struct io_zcrx_ifq *ifq,
+                               struct io_zcrx_area *area)
 {
        int i;
 
-       if (area->mem.is_dmabuf)
-               io_release_dmabuf(&area->mem);
-       else
-               io_zcrx_unmap_umem(ifq, area, nr_mapped);
+       guard(mutex)(&ifq->dma_lock);
+       if (!area->is_mapped)
+               return;
+       area->is_mapped = false;
 
        for (i = 0; i < area->nia.num_niovs; i++)
                net_mp_niov_set_dma_addr(&area->nia.niovs[i], 0);
-}
-
-static void io_zcrx_unmap_area(struct io_zcrx_ifq *ifq, struct io_zcrx_area *area)
-{
-       guard(mutex)(&ifq->dma_lock);
 
-       if (area->is_mapped)
-               __io_zcrx_unmap_area(ifq, area, area->nia.num_niovs);
-       area->is_mapped = false;
+       if (area->mem.is_dmabuf) {
+               io_release_dmabuf(&area->mem);
+       } else {
+               dma_unmap_sgtable(ifq->dev, &area->mem.page_sg_table,
+                                 DMA_FROM_DEVICE, IO_DMA_ATTR);
+       }
 }
 
-static int io_zcrx_map_area_umem(struct io_zcrx_ifq *ifq, struct io_zcrx_area *area)
+static unsigned io_zcrx_map_area_umem(struct io_zcrx_ifq *ifq, struct io_zcrx_area *area)
 {
-       int i;
-
-       for (i = 0; i < area->nia.num_niovs; i++) {
-               struct net_iov *niov = &area->nia.niovs[i];
-               dma_addr_t dma;
-
-               dma = dma_map_page_attrs(ifq->dev, area->mem.pages[i], 0,
-                                        PAGE_SIZE, DMA_FROM_DEVICE, IO_DMA_ATTR);
-               if (dma_mapping_error(ifq->dev, dma))
-                       break;
-               if (net_mp_niov_set_dma_addr(niov, dma)) {
-                       dma_unmap_page_attrs(ifq->dev, dma, PAGE_SIZE,
-                                            DMA_FROM_DEVICE, IO_DMA_ATTR);
-                       break;
-               }
-       }
+       int ret;
 
-       if (i != area->nia.num_niovs) {
-               __io_zcrx_unmap_area(ifq, area, i);
-               return -EINVAL;
-       }
-       return 0;
+       ret = dma_map_sgtable(ifq->dev, &area->mem.page_sg_table,
+                               DMA_FROM_DEVICE, IO_DMA_ATTR);
+       if (ret < 0)
+               return ret;
+       return io_populate_area_dma(ifq, area, &area->mem.page_sg_table, 0);
 }
 
 static int io_zcrx_map_area(struct io_zcrx_ifq *ifq, struct io_zcrx_area *area)
index 2f5e26389f2218b15469a3c78bfc03affdbbea8d..89015b92391132729a3611f617ec9e770451fa06 100644 (file)
@@ -14,6 +14,7 @@ struct io_zcrx_mem {
 
        struct page                     **pages;
        unsigned long                   nr_folios;
+       struct sg_table                 page_sg_table;
 
        struct dma_buf_attachment       *attach;
        struct dma_buf                  *dmabuf;