From: Pavel Begunkov Date: Mon, 23 Mar 2026 12:43:57 +0000 (+0000) Subject: io_uring/zcrx: use guards for locking X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=898ad80d1207cbdb22b21bafb6de4adfd7627bd0;p=thirdparty%2Fkernel%2Flinux.git io_uring/zcrx: use guards for locking Convert last several places using manual locking to guards to simplify the code. Signed-off-by: Pavel Begunkov Link: https://patch.msgid.link/eb4667cfaf88c559700f6399da9e434889f5b04a.1774261953.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- diff --git a/io_uring/zcrx.c b/io_uring/zcrx.c index 2112b652a6992..6457690e1af41 100644 --- a/io_uring/zcrx.c +++ b/io_uring/zcrx.c @@ -586,9 +586,8 @@ static void io_zcrx_return_niov_freelist(struct net_iov *niov) { struct io_zcrx_area *area = io_zcrx_iov_to_area(niov); - spin_lock_bh(&area->freelist_lock); + guard(spinlock_bh)(&area->freelist_lock); area->freelist[area->free_count++] = net_iov_idx(niov); - spin_unlock_bh(&area->freelist_lock); } static void io_zcrx_return_niov(struct net_iov *niov) @@ -1053,7 +1052,8 @@ static void io_zcrx_refill_slow(struct page_pool *pp, struct io_zcrx_ifq *ifq) { struct io_zcrx_area *area = ifq->area; - spin_lock_bh(&area->freelist_lock); + guard(spinlock_bh)(&area->freelist_lock); + while (area->free_count && pp->alloc.count < PP_ALLOC_CACHE_REFILL) { struct net_iov *niov = __io_zcrx_get_free_niov(area); netmem_ref netmem = net_iov_to_netmem(niov); @@ -1062,7 +1062,6 @@ static void io_zcrx_refill_slow(struct page_pool *pp, struct io_zcrx_ifq *ifq) io_zcrx_sync_for_device(pp, niov); net_mp_netmem_place_in_cache(pp, netmem); } - spin_unlock_bh(&area->freelist_lock); } static netmem_ref io_pp_zc_alloc_netmems(struct page_pool *pp, gfp_t gfp) @@ -1285,10 +1284,10 @@ static struct net_iov *io_alloc_fallback_niov(struct io_zcrx_ifq *ifq) if (area->mem.is_dmabuf) return NULL; - spin_lock_bh(&area->freelist_lock); - if (area->free_count) - niov = __io_zcrx_get_free_niov(area); - spin_unlock_bh(&area->freelist_lock); + scoped_guard(spinlock_bh, &area->freelist_lock) { + if (area->free_count) + niov = __io_zcrx_get_free_niov(area); + } if (niov) page_pool_fragment_netmem(net_iov_to_netmem(niov), 1);