From: Pavel Begunkov Date: Mon, 23 Mar 2026 12:43:58 +0000 (+0000) Subject: io_uring/zcrx: move count check into zcrx_get_free_niov X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=7df542a6657534694779948195cc4d36ace575b5;p=thirdparty%2Fkernel%2Flinux.git io_uring/zcrx: move count check into zcrx_get_free_niov Instead of relying on the caller of __io_zcrx_get_free_niov() to check that there are free niovs available (i.e. free_count > 0), move the check into the function and return NULL if can't allocate. It consolidates the free count checks, and it'll be easier to extend the niov free list allocator in the future. Signed-off-by: Pavel Begunkov Link: https://patch.msgid.link/6df04a6b3a6170f86d4345da9864f238311163f9.1774261953.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- diff --git a/io_uring/zcrx.c b/io_uring/zcrx.c index 6457690e1af41..a7790d609f40d 100644 --- a/io_uring/zcrx.c +++ b/io_uring/zcrx.c @@ -590,6 +590,19 @@ static void io_zcrx_return_niov_freelist(struct net_iov *niov) area->freelist[area->free_count++] = net_iov_idx(niov); } +static struct net_iov *zcrx_get_free_niov(struct io_zcrx_area *area) +{ + unsigned niov_idx; + + lockdep_assert_held(&area->freelist_lock); + + if (unlikely(!area->free_count)) + return NULL; + + niov_idx = area->freelist[--area->free_count]; + return &area->nia.niovs[niov_idx]; +} + static void io_zcrx_return_niov(struct net_iov *niov) { netmem_ref netmem = net_iov_to_netmem(niov); @@ -903,16 +916,6 @@ ifq_free: return ret; } -static struct net_iov *__io_zcrx_get_free_niov(struct io_zcrx_area *area) -{ - unsigned niov_idx; - - lockdep_assert_held(&area->freelist_lock); - - niov_idx = area->freelist[--area->free_count]; - return &area->nia.niovs[niov_idx]; -} - static inline bool is_zcrx_entry_marked(struct io_ring_ctx *ctx, unsigned long id) { return xa_get_mark(&ctx->zcrx_ctxs, id, XA_MARK_0); @@ -1054,12 +1057,15 @@ static void io_zcrx_refill_slow(struct page_pool *pp, struct io_zcrx_ifq *ifq) guard(spinlock_bh)(&area->freelist_lock); - while (area->free_count && pp->alloc.count < PP_ALLOC_CACHE_REFILL) { - struct net_iov *niov = __io_zcrx_get_free_niov(area); - netmem_ref netmem = net_iov_to_netmem(niov); + while (pp->alloc.count < PP_ALLOC_CACHE_REFILL) { + struct net_iov *niov = zcrx_get_free_niov(area); + netmem_ref netmem; + if (!niov) + break; net_mp_niov_set_page_pool(pp, niov); io_zcrx_sync_for_device(pp, niov); + netmem = net_iov_to_netmem(niov); net_mp_netmem_place_in_cache(pp, netmem); } } @@ -1284,10 +1290,8 @@ static struct net_iov *io_alloc_fallback_niov(struct io_zcrx_ifq *ifq) if (area->mem.is_dmabuf) return NULL; - scoped_guard(spinlock_bh, &area->freelist_lock) { - if (area->free_count) - niov = __io_zcrx_get_free_niov(area); - } + scoped_guard(spinlock_bh, &area->freelist_lock) + niov = zcrx_get_free_niov(area); if (niov) page_pool_fragment_netmem(net_iov_to_netmem(niov), 1);