{
struct io_zcrx_area *area = io_zcrx_iov_to_area(niov);
- spin_lock_bh(&area->freelist_lock);
+ guard(spinlock_bh)(&area->freelist_lock);
area->freelist[area->free_count++] = net_iov_idx(niov);
- spin_unlock_bh(&area->freelist_lock);
}
static void io_zcrx_return_niov(struct net_iov *niov)
{
struct io_zcrx_area *area = ifq->area;
- spin_lock_bh(&area->freelist_lock);
+ guard(spinlock_bh)(&area->freelist_lock);
+
while (area->free_count && pp->alloc.count < PP_ALLOC_CACHE_REFILL) {
struct net_iov *niov = __io_zcrx_get_free_niov(area);
netmem_ref netmem = net_iov_to_netmem(niov);
io_zcrx_sync_for_device(pp, niov);
net_mp_netmem_place_in_cache(pp, netmem);
}
- spin_unlock_bh(&area->freelist_lock);
}
static netmem_ref io_pp_zc_alloc_netmems(struct page_pool *pp, gfp_t gfp)
if (area->mem.is_dmabuf)
return NULL;
- spin_lock_bh(&area->freelist_lock);
- if (area->free_count)
- niov = __io_zcrx_get_free_niov(area);
- spin_unlock_bh(&area->freelist_lock);
+ scoped_guard(spinlock_bh, &area->freelist_lock) {
+ if (area->free_count)
+ niov = __io_zcrx_get_free_niov(area);
+ }
if (niov)
page_pool_fragment_netmem(net_iov_to_netmem(niov), 1);