--- /dev/null
+From c299ba660bed768e3d1ecafe67c144a44ed21f2f Mon Sep 17 00:00:00 2001
+From: Jens Axboe <axboe@kernel.dk>
+Date: Mon, 29 Jan 2024 12:00:58 -0700
+Subject: io_uring/net: limit inline multishot retries
+
+From: Jens Axboe <axboe@kernel.dk>
+
+Commit 76b367a2d83163cf19173d5cb0b562acbabc8eac upstream.
+
+If we have multiple clients and some/all are flooding the receives to
+such an extent that we can retry a LOT handling multishot receives, then
+we can be starving some clients and hence serving traffic in an
+imbalanced fashion.
+
+Limit multishot retry attempts to some arbitrary value, whose only
+purpose serves to ensure that we don't keep serving a single connection
+for way too long. We default to 32 retries, which should be more than
+enough to provide fairness, yet not so small that we'll spend too much
+time requeuing rather than handling traffic.
+
+Cc: stable@vger.kernel.org
+Depends-on: 704ea888d646 ("io_uring/poll: add requeue return code from poll multishot handling")
+Depends-on: 1e5d765a82f ("io_uring/net: un-indent mshot retry path in io_recv_finish()")
+Depends-on: e84b01a880f6 ("io_uring/poll: move poll execution helpers higher up")
+Fixes: b3fdea6ecb55 ("io_uring: multishot recv")
+Fixes: 9bb66906f23e ("io_uring: support multishot in recvmsg")
+Link: https://github.com/axboe/liburing/issues/1043
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ io_uring/net.c | 23 ++++++++++++++++++++---
+ 1 file changed, 20 insertions(+), 3 deletions(-)
+
+--- a/io_uring/net.c
++++ b/io_uring/net.c
+@@ -60,6 +60,7 @@ struct io_sr_msg {
+ unsigned len;
+ unsigned done_io;
+ unsigned msg_flags;
++ unsigned nr_multishot_loops;
+ u16 flags;
+ /* initialised and used only by !msg send variants */
+ u16 addr_len;
+@@ -70,6 +71,13 @@ struct io_sr_msg {
+ struct io_kiocb *notif;
+ };
+
++/*
++ * Number of times we'll try and do receives if there's more data. If we
++ * exceed this limit, then add us to the back of the queue and retry from
++ * there. This helps fairness between flooding clients.
++ */
++#define MULTISHOT_MAX_RETRY 32
++
+ static inline bool io_check_multishot(struct io_kiocb *req,
+ unsigned int issue_flags)
+ {
+@@ -611,6 +619,7 @@ int io_recvmsg_prep(struct io_kiocb *req
+ sr->msg_flags |= MSG_CMSG_COMPAT;
+ #endif
+ sr->done_io = 0;
++ sr->nr_multishot_loops = 0;
+ return 0;
+ }
+
+@@ -654,12 +663,20 @@ static inline bool io_recv_finish(struct
+ */
+ if (io_fill_cqe_req_aux(req, issue_flags & IO_URING_F_COMPLETE_DEFER,
+ *ret, cflags | IORING_CQE_F_MORE)) {
++ struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
++ int mshot_retry_ret = IOU_ISSUE_SKIP_COMPLETE;
++
+ io_recv_prep_retry(req);
+ /* Known not-empty or unknown state, retry */
+- if (cflags & IORING_CQE_F_SOCK_NONEMPTY || msg->msg_inq == -1)
+- return false;
++ if (cflags & IORING_CQE_F_SOCK_NONEMPTY || msg->msg_inq == -1) {
++ if (sr->nr_multishot_loops++ < MULTISHOT_MAX_RETRY)
++ return false;
++ /* mshot retries exceeded, force a requeue */
++ sr->nr_multishot_loops = 0;
++ mshot_retry_ret = IOU_REQUEUE;
++ }
+ if (issue_flags & IO_URING_F_MULTISHOT)
+- *ret = IOU_ISSUE_SKIP_COMPLETE;
++ *ret = mshot_retry_ret;
+ else
+ *ret = -EAGAIN;
+ return true;
--- /dev/null
+From 1a45c2b50f7f10e84a4ee57b0b0556d76f995866 Mon Sep 17 00:00:00 2001
+From: Jens Axboe <axboe@kernel.dk>
+Date: Mon, 29 Jan 2024 11:57:11 -0700
+Subject: io_uring/poll: add requeue return code from poll multishot handling
+
+From: Jens Axboe <axboe@kernel.dk>
+
+Commit 704ea888d646cb9d715662944cf389c823252ee0 upstream.
+
+Since our poll handling is edge triggered, multishot handlers retry
+internally until they know that no more data is available. In
+preparation for limiting these retries, add an internal return code,
+IOU_REQUEUE, which can be used to inform the poll backend about the
+handler wanting to retry, but that this should happen through a normal
+task_work requeue rather than keep hammering on the issue side for this
+one request.
+
+No functional changes in this patch, nobody is using this return code
+just yet.
+
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ io_uring/io_uring.h | 7 +++++++
+ io_uring/poll.c | 9 ++++++++-
+ 2 files changed, 15 insertions(+), 1 deletion(-)
+
+--- a/io_uring/io_uring.h
++++ b/io_uring/io_uring.h
+@@ -31,6 +31,13 @@ enum {
+ IOU_ISSUE_SKIP_COMPLETE = -EIOCBQUEUED,
+
+ /*
++ * Requeue the task_work to restart operations on this request. The
++ * actual value isn't important, should just be not an otherwise
++ * valid error code, yet less than -MAX_ERRNO and valid internally.
++ */
++ IOU_REQUEUE = -3072,
++
++ /*
+ * Intended only when both IO_URING_F_MULTISHOT is passed
+ * to indicate to the poll runner that multishot should be
+ * removed and the result is set on req->cqe.res.
+--- a/io_uring/poll.c
++++ b/io_uring/poll.c
+@@ -226,6 +226,7 @@ enum {
+ IOU_POLL_NO_ACTION = 1,
+ IOU_POLL_REMOVE_POLL_USE_RES = 2,
+ IOU_POLL_REISSUE = 3,
++ IOU_POLL_REQUEUE = 4,
+ };
+
+ static void __io_poll_execute(struct io_kiocb *req, int mask)
+@@ -329,6 +330,8 @@ static int io_poll_check_events(struct i
+ int ret = io_poll_issue(req, ts);
+ if (ret == IOU_STOP_MULTISHOT)
+ return IOU_POLL_REMOVE_POLL_USE_RES;
++ else if (ret == IOU_REQUEUE)
++ return IOU_POLL_REQUEUE;
+ if (ret < 0)
+ return ret;
+ }
+@@ -351,8 +354,12 @@ void io_poll_task_func(struct io_kiocb *
+ int ret;
+
+ ret = io_poll_check_events(req, ts);
+- if (ret == IOU_POLL_NO_ACTION)
++ if (ret == IOU_POLL_NO_ACTION) {
+ return;
++ } else if (ret == IOU_POLL_REQUEUE) {
++ __io_poll_execute(req, 0);
++ return;
++ }
+ io_poll_remove_entries(req);
+ io_poll_tw_hash_eject(req, ts);
+