]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
io_uring/zcrx: add sync refill queue flushing
authorPavel Begunkov <asml.silence@gmail.com>
Thu, 13 Nov 2025 10:46:13 +0000 (10:46 +0000)
committerJens Axboe <axboe@kernel.dk>
Thu, 13 Nov 2025 18:19:37 +0000 (11:19 -0700)
Add an zcrx interface via IORING_REGISTER_ZCRX_CTRL that forces the
kernel to flush / consume entries from the refill queue. Just as with
the IORING_REGISTER_ZCRX_REFILL attempt, the motivation is to address
cases where the refill queue becomes full, and the user can't return
buffers and needs to stash them. It's still a slow path, and the user
should size refill queue appropriately, but it should be helpful for
handling temporary traffic spikes and other unpredictable conditions.

The interface is simpler comparing to ZCRX_REFILL as it doesn't need
temporary refill entry arrays and gives natural batching, whereas
ZCRX_REFILL requires even more user logic to be somewhat efficient.

Also, add a structure for the operation. It's not currently used but
can serve for future improvements like limiting the number of buffers to
process, etc.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
include/uapi/linux/io_uring.h
io_uring/zcrx.c

index 0e1d353fab1d2f29f641b533cd0e8ce3c7364e67..db47fced2cc671efd2bf8df295addacc9153b5fd 100644 (file)
@@ -1082,13 +1082,21 @@ struct io_uring_zcrx_ifq_reg {
 };
 
 enum zcrx_ctrl_op {
+       ZCRX_CTRL_FLUSH_RQ,
+
        __ZCRX_CTRL_LAST,
 };
 
+struct zcrx_ctrl_flush_rq {
+       __u64           __resv[6];
+};
+
 struct zcrx_ctrl {
        __u32   zcrx_id;
        __u32   op; /* see enum zcrx_ctrl_op */
-       __u64   __resv[8];
+       __u64   __resv[2];
+
+       struct zcrx_ctrl_flush_rq       zc_flush;
 };
 
 #ifdef __cplusplus
index 0b5f4320c7a98b623a8402c91fc2cff0c4a39ae0..08c103af69bcddf89000b0a2b19aefe349c5556b 100644 (file)
@@ -941,6 +941,71 @@ static const struct memory_provider_ops io_uring_pp_zc_ops = {
        .uninstall              = io_pp_uninstall,
 };
 
+static unsigned zcrx_parse_rq(netmem_ref *netmem_array, unsigned nr,
+                             struct io_zcrx_ifq *zcrx)
+{
+       unsigned int mask = zcrx->rq_entries - 1;
+       unsigned int i;
+
+       guard(spinlock_bh)(&zcrx->rq_lock);
+
+       nr = min(nr, io_zcrx_rqring_entries(zcrx));
+       for (i = 0; i < nr; i++) {
+               struct io_uring_zcrx_rqe *rqe = io_zcrx_get_rqe(zcrx, mask);
+               struct net_iov *niov;
+
+               if (!io_parse_rqe(rqe, zcrx, &niov))
+                       break;
+               netmem_array[i] = net_iov_to_netmem(niov);
+       }
+
+       smp_store_release(&zcrx->rq_ring->head, zcrx->cached_rq_head);
+       return i;
+}
+
+#define ZCRX_FLUSH_BATCH 32
+
+static void zcrx_return_buffers(netmem_ref *netmems, unsigned nr)
+{
+       unsigned i;
+
+       for (i = 0; i < nr; i++) {
+               netmem_ref netmem = netmems[i];
+               struct net_iov *niov = netmem_to_net_iov(netmem);
+
+               if (!io_zcrx_put_niov_uref(niov))
+                       continue;
+               if (!page_pool_unref_and_test(netmem))
+                       continue;
+               io_zcrx_return_niov(niov);
+       }
+}
+
+static int zcrx_flush_rq(struct io_ring_ctx *ctx, struct io_zcrx_ifq *zcrx,
+                        struct zcrx_ctrl *ctrl)
+{
+       struct zcrx_ctrl_flush_rq *frq = &ctrl->zc_flush;
+       netmem_ref netmems[ZCRX_FLUSH_BATCH];
+       unsigned total = 0;
+       unsigned nr;
+
+       if (!mem_is_zero(&frq->__resv, sizeof(frq->__resv)))
+               return -EINVAL;
+
+       do {
+               nr = zcrx_parse_rq(netmems, ZCRX_FLUSH_BATCH, zcrx);
+
+               zcrx_return_buffers(netmems, nr);
+               total += nr;
+
+               if (fatal_signal_pending(current))
+                       break;
+               cond_resched();
+       } while (nr == ZCRX_FLUSH_BATCH && total < zcrx->rq_entries);
+
+       return 0;
+}
+
 int io_zcrx_ctrl(struct io_ring_ctx *ctx, void __user *arg, unsigned nr_args)
 {
        struct zcrx_ctrl ctrl;
@@ -956,10 +1021,13 @@ int io_zcrx_ctrl(struct io_ring_ctx *ctx, void __user *arg, unsigned nr_args)
        zcrx = xa_load(&ctx->zcrx_ctxs, ctrl.zcrx_id);
        if (!zcrx)
                return -ENXIO;
-       if (ctrl.op >= __ZCRX_CTRL_LAST)
-               return -EOPNOTSUPP;
 
-       return -EINVAL;
+       switch (ctrl.op) {
+       case ZCRX_CTRL_FLUSH_RQ:
+               return zcrx_flush_rq(ctx, zcrx, &ctrl);
+       }
+
+       return -EOPNOTSUPP;
 }
 
 static bool io_zcrx_queue_cqe(struct io_kiocb *req, struct net_iov *niov,