area->freelist[area->free_count++] = net_iov_idx(niov);
}
+static struct net_iov *zcrx_get_free_niov(struct io_zcrx_area *area)
+{
+ unsigned niov_idx;
+
+ lockdep_assert_held(&area->freelist_lock);
+
+ if (unlikely(!area->free_count))
+ return NULL;
+
+ niov_idx = area->freelist[--area->free_count];
+ return &area->nia.niovs[niov_idx];
+}
+
static void io_zcrx_return_niov(struct net_iov *niov)
{
netmem_ref netmem = net_iov_to_netmem(niov);
return ret;
}
-static struct net_iov *__io_zcrx_get_free_niov(struct io_zcrx_area *area)
-{
- unsigned niov_idx;
-
- lockdep_assert_held(&area->freelist_lock);
-
- niov_idx = area->freelist[--area->free_count];
- return &area->nia.niovs[niov_idx];
-}
-
static inline bool is_zcrx_entry_marked(struct io_ring_ctx *ctx, unsigned long id)
{
return xa_get_mark(&ctx->zcrx_ctxs, id, XA_MARK_0);
guard(spinlock_bh)(&area->freelist_lock);
- while (area->free_count && pp->alloc.count < PP_ALLOC_CACHE_REFILL) {
- struct net_iov *niov = __io_zcrx_get_free_niov(area);
- netmem_ref netmem = net_iov_to_netmem(niov);
+ while (pp->alloc.count < PP_ALLOC_CACHE_REFILL) {
+ struct net_iov *niov = zcrx_get_free_niov(area);
+ netmem_ref netmem;
+ if (!niov)
+ break;
net_mp_niov_set_page_pool(pp, niov);
io_zcrx_sync_for_device(pp, niov);
+ netmem = net_iov_to_netmem(niov);
net_mp_netmem_place_in_cache(pp, netmem);
}
}
if (area->mem.is_dmabuf)
return NULL;
- scoped_guard(spinlock_bh, &area->freelist_lock) {
- if (area->free_count)
- niov = __io_zcrx_get_free_niov(area);
- }
+ scoped_guard(spinlock_bh, &area->freelist_lock)
+ niov = zcrx_get_free_niov(area);
if (niov)
page_pool_fragment_netmem(net_iov_to_netmem(niov), 1);