From: Pavel Begunkov Date: Mon, 23 Mar 2026 12:43:52 +0000 (+0000) Subject: io_uring/zcrx: always dma map in advance X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=b8d6eb6c1c80852dfcad8642f346c26aabf34833;p=thirdparty%2Fkernel%2Flinux.git io_uring/zcrx: always dma map in advance zcrx was originally establisihing dma mappings at a late stage when it was being bound to a page pool. Dma-buf couldn't work this way, so it's initialised during area creation. It's messy having them do it at different spots, just move everything to the area creation time. Signed-off-by: Pavel Begunkov Link: https://patch.msgid.link/334092a2cbdd4aabd7c025050aa99f05ace89bb5.1774261953.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- diff --git a/io_uring/zcrx.c b/io_uring/zcrx.c index 2f60193365ce8..a6a08ee48b34f 100644 --- a/io_uring/zcrx.c +++ b/io_uring/zcrx.c @@ -194,6 +194,7 @@ static int io_import_umem(struct io_zcrx_ifq *ifq, { struct page **pages; int nr_pages, ret; + bool mapped = false; if (area_reg->dmabuf_fd) return -EINVAL; @@ -210,6 +211,12 @@ static int io_import_umem(struct io_zcrx_ifq *ifq, if (ret) goto out_err; + ret = dma_map_sgtable(ifq->dev, &mem->page_sg_table, + DMA_FROM_DEVICE, IO_DMA_ATTR); + if (ret < 0) + goto out_err; + mapped = true; + mem->account_pages = io_count_account_pages(pages, nr_pages); ret = io_account_mem(ifq->user, ifq->mm_account, mem->account_pages); if (ret < 0) { @@ -223,6 +230,9 @@ static int io_import_umem(struct io_zcrx_ifq *ifq, mem->size = area_reg->len; return ret; out_err: + if (mapped) + dma_unmap_sgtable(ifq->dev, &mem->page_sg_table, + DMA_FROM_DEVICE, IO_DMA_ATTR); sg_free_table(&mem->page_sg_table); unpin_user_pages(pages, nr_pages); kvfree(pages); @@ -288,30 +298,6 @@ static void io_zcrx_unmap_area(struct io_zcrx_ifq *ifq, } } -static int io_zcrx_map_area(struct io_zcrx_ifq *ifq, struct io_zcrx_area *area) -{ - int ret; - - guard(mutex)(&ifq->pp_lock); - if (area->is_mapped) - return 0; - - if (!area->mem.is_dmabuf) { - ret = dma_map_sgtable(ifq->dev, &area->mem.page_sg_table, - DMA_FROM_DEVICE, IO_DMA_ATTR); - if (ret < 0) - return ret; - } - - ret = io_populate_area_dma(ifq, area); - if (ret && !area->mem.is_dmabuf) - dma_unmap_sgtable(ifq->dev, &area->mem.page_sg_table, - DMA_FROM_DEVICE, IO_DMA_ATTR); - if (ret == 0) - area->is_mapped = true; - return ret; -} - static void io_zcrx_sync_for_device(struct page_pool *pool, struct net_iov *niov) { @@ -464,6 +450,7 @@ static int io_zcrx_create_area(struct io_zcrx_ifq *ifq, ret = io_import_area(ifq, &area->mem, area_reg); if (ret) goto err; + area->is_mapped = true; if (buf_size_shift > io_area_max_shift(&area->mem)) { ret = -ERANGE; @@ -499,6 +486,10 @@ static int io_zcrx_create_area(struct io_zcrx_ifq *ifq, niov->type = NET_IOV_IOURING; } + ret = io_populate_area_dma(ifq, area); + if (ret) + goto err; + area->free_count = nr_iovs; /* we're only supporting one area per ifq for now */ area->area_id = 0; @@ -1082,7 +1073,6 @@ static bool io_pp_zc_release_netmem(struct page_pool *pp, netmem_ref netmem) static int io_pp_zc_init(struct page_pool *pp) { struct io_zcrx_ifq *ifq = io_pp_to_ifq(pp); - int ret; if (WARN_ON_ONCE(!ifq)) return -EINVAL; @@ -1095,10 +1085,6 @@ static int io_pp_zc_init(struct page_pool *pp) if (pp->p.dma_dir != DMA_FROM_DEVICE) return -EOPNOTSUPP; - ret = io_zcrx_map_area(ifq, ifq->area); - if (ret) - return ret; - refcount_inc(&ifq->refs); return 0; }