]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
io_uring/zcrx: use guards for locking
authorPavel Begunkov <asml.silence@gmail.com>
Mon, 23 Mar 2026 12:43:57 +0000 (12:43 +0000)
committerJens Axboe <axboe@kernel.dk>
Wed, 1 Apr 2026 16:21:12 +0000 (10:21 -0600)
Convert last several places using manual locking to guards to simplify
the code.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://patch.msgid.link/eb4667cfaf88c559700f6399da9e434889f5b04a.1774261953.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
io_uring/zcrx.c

index 2112b652a6992c5df4047541af6d22f5b7b9494c..6457690e1af4102514d495f179978a790bf1ef0c 100644 (file)
@@ -586,9 +586,8 @@ static void io_zcrx_return_niov_freelist(struct net_iov *niov)
 {
        struct io_zcrx_area *area = io_zcrx_iov_to_area(niov);
 
-       spin_lock_bh(&area->freelist_lock);
+       guard(spinlock_bh)(&area->freelist_lock);
        area->freelist[area->free_count++] = net_iov_idx(niov);
-       spin_unlock_bh(&area->freelist_lock);
 }
 
 static void io_zcrx_return_niov(struct net_iov *niov)
@@ -1053,7 +1052,8 @@ static void io_zcrx_refill_slow(struct page_pool *pp, struct io_zcrx_ifq *ifq)
 {
        struct io_zcrx_area *area = ifq->area;
 
-       spin_lock_bh(&area->freelist_lock);
+       guard(spinlock_bh)(&area->freelist_lock);
+
        while (area->free_count && pp->alloc.count < PP_ALLOC_CACHE_REFILL) {
                struct net_iov *niov = __io_zcrx_get_free_niov(area);
                netmem_ref netmem = net_iov_to_netmem(niov);
@@ -1062,7 +1062,6 @@ static void io_zcrx_refill_slow(struct page_pool *pp, struct io_zcrx_ifq *ifq)
                io_zcrx_sync_for_device(pp, niov);
                net_mp_netmem_place_in_cache(pp, netmem);
        }
-       spin_unlock_bh(&area->freelist_lock);
 }
 
 static netmem_ref io_pp_zc_alloc_netmems(struct page_pool *pp, gfp_t gfp)
@@ -1285,10 +1284,10 @@ static struct net_iov *io_alloc_fallback_niov(struct io_zcrx_ifq *ifq)
        if (area->mem.is_dmabuf)
                return NULL;
 
-       spin_lock_bh(&area->freelist_lock);
-       if (area->free_count)
-               niov = __io_zcrx_get_free_niov(area);
-       spin_unlock_bh(&area->freelist_lock);
+       scoped_guard(spinlock_bh, &area->freelist_lock) {
+               if (area->free_count)
+                       niov = __io_zcrx_get_free_niov(area);
+       }
 
        if (niov)
                page_pool_fragment_netmem(net_iov_to_netmem(niov), 1);