In preparation for reusing the lock for other purposes, rename it to
"pp_lock". As before, it can be taken deeper inside the networking stack
by page pool, and so the syscall io_uring must avoid holding it while
doing queue reconfiguration or anything that can result in immediate pp
init/destruction.
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
{
int i;
- guard(mutex)(&ifq->dma_lock);
+ guard(mutex)(&ifq->pp_lock);
if (!area->is_mapped)
return;
area->is_mapped = false;
{
int ret;
- guard(mutex)(&ifq->dma_lock);
+ guard(mutex)(&ifq->pp_lock);
if (area->is_mapped)
return 0;
ifq->ctx = ctx;
spin_lock_init(&ifq->lock);
spin_lock_init(&ifq->rq_lock);
- mutex_init(&ifq->dma_lock);
+ mutex_init(&ifq->pp_lock);
return ifq;
}
put_device(ifq->dev);
io_free_rbuf_ring(ifq);
- mutex_destroy(&ifq->dma_lock);
+ mutex_destroy(&ifq->pp_lock);
kfree(ifq);
}
struct net_device *netdev;
netdevice_tracker netdev_tracker;
spinlock_t lock;
- struct mutex dma_lock;
+
+ /*
+ * Page pool and net configuration lock, can be taken deeper in the
+ * net stack.
+ */
+ struct mutex pp_lock;
struct io_mapped_region region;
};