]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
io_uring/zcrx: throttle receive requests
authorPavel Begunkov <asml.silence@gmail.com>
Sat, 15 Feb 2025 00:09:43 +0000 (16:09 -0800)
committerJens Axboe <axboe@kernel.dk>
Mon, 17 Feb 2025 12:41:09 +0000 (05:41 -0700)
io_zc_rx_tcp_recvmsg() continues until it fails or there is nothing to
receive. If the other side sends fast enough, we might get stuck in
io_zc_rx_tcp_recvmsg() producing more and more CQEs but not letting the
user to handle them leading to unbound latencies.

Break out of it based on an arbitrarily chosen limit, the upper layer
will either return to userspace or requeue the request.

Reviewed-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: David Wei <dw@davidwei.uk>
Acked-by: Jakub Kicinski <kuba@kernel.org>
Link: https://lore.kernel.org/r/20250215000947.789731-9-dw@davidwei.uk
Signed-off-by: Jens Axboe <axboe@kernel.dk>
io_uring/net.c
io_uring/zcrx.c

index 260eb73a585490de9d5fcc5a417c3dd358c07c9b..000dc70d08d0d1fe2d10ddcdebbf1a8abf900399 100644 (file)
@@ -1285,6 +1285,8 @@ int io_recvzc(struct io_kiocb *req, unsigned int issue_flags)
        if (unlikely(ret <= 0) && ret != -EAGAIN) {
                if (ret == -ERESTARTSYS)
                        ret = -EINTR;
+               if (ret == IOU_REQUEUE)
+                       return IOU_REQUEUE;
 
                req_set_fail(req);
                io_req_set_res(req, ret, 0);
index 7d24fc98b30662a48135a32eb5330929f252d826..7e0cba1e0f3965d7cf5a020ff51f7de82117ab3d 100644 (file)
@@ -94,10 +94,13 @@ static void io_zcrx_sync_for_device(const struct page_pool *pool,
 
 #define IO_RQ_MAX_ENTRIES              32768
 
+#define IO_SKBS_PER_CALL_LIMIT 20
+
 struct io_zcrx_args {
        struct io_kiocb         *req;
        struct io_zcrx_ifq      *ifq;
        struct socket           *sock;
+       unsigned                nr_skbs;
 };
 
 static const struct memory_provider_ops io_uring_pp_zc_ops;
@@ -720,6 +723,9 @@ io_zcrx_recv_skb(read_descriptor_t *desc, struct sk_buff *skb,
        int i, copy, end, off;
        int ret = 0;
 
+       if (unlikely(args->nr_skbs++ > IO_SKBS_PER_CALL_LIMIT))
+               return -EAGAIN;
+
        start = skb_headlen(skb);
        start_off = offset;
 
@@ -810,6 +816,9 @@ static int io_zcrx_tcp_recvmsg(struct io_kiocb *req, struct io_zcrx_ifq *ifq,
                        ret = -ENOTCONN;
                else
                        ret = -EAGAIN;
+       } else if (unlikely(args.nr_skbs > IO_SKBS_PER_CALL_LIMIT) &&
+                  (issue_flags & IO_URING_F_MULTISHOT)) {
+               ret = IOU_REQUEUE;
        } else if (sock_flag(sk, SOCK_DONE)) {
                /* Make it to retry until it finally gets 0. */
                if (issue_flags & IO_URING_F_MULTISHOT)