]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.6-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 13 Feb 2024 16:26:20 +0000 (17:26 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 13 Feb 2024 16:26:20 +0000 (17:26 +0100)
added patches:
io_uring-net-limit-inline-multishot-retries.patch
io_uring-net-un-indent-mshot-retry-path-in-io_recv_finish.patch
io_uring-poll-add-requeue-return-code-from-poll-multishot-handling.patch
io_uring-poll-move-poll-execution-helpers-higher-up.patch

queue-6.6/io_uring-net-limit-inline-multishot-retries.patch [new file with mode: 0644]
queue-6.6/io_uring-net-un-indent-mshot-retry-path-in-io_recv_finish.patch [new file with mode: 0644]
queue-6.6/io_uring-poll-add-requeue-return-code-from-poll-multishot-handling.patch [new file with mode: 0644]
queue-6.6/io_uring-poll-move-poll-execution-helpers-higher-up.patch [new file with mode: 0644]
queue-6.6/series

diff --git a/queue-6.6/io_uring-net-limit-inline-multishot-retries.patch b/queue-6.6/io_uring-net-limit-inline-multishot-retries.patch
new file mode 100644 (file)
index 0000000..2e79f2d
--- /dev/null
@@ -0,0 +1,89 @@
+From aeec8884c1607a73b379e610cc8de27e3f6244ce Mon Sep 17 00:00:00 2001
+From: Jens Axboe <axboe@kernel.dk>
+Date: Mon, 29 Jan 2024 12:00:58 -0700
+Subject: io_uring/net: limit inline multishot retries
+
+From: Jens Axboe <axboe@kernel.dk>
+
+Commit 76b367a2d83163cf19173d5cb0b562acbabc8eac upstream.
+
+If we have multiple clients and some/all are flooding the receives to
+such an extent that we can retry a LOT handling multishot receives, then
+we can be starving some clients and hence serving traffic in an
+imbalanced fashion.
+
+Limit multishot retry attempts to some arbitrary value, whose only
+purpose serves to ensure that we don't keep serving a single connection
+for way too long. We default to 32 retries, which should be more than
+enough to provide fairness, yet not so small that we'll spend too much
+time requeuing rather than handling traffic.
+
+Cc: stable@vger.kernel.org
+Depends-on: 704ea888d646 ("io_uring/poll: add requeue return code from poll multishot handling")
+Depends-on: 1e5d765a82f ("io_uring/net: un-indent mshot retry path in io_recv_finish()")
+Depends-on: e84b01a880f6 ("io_uring/poll: move poll execution helpers higher up")
+Fixes: b3fdea6ecb55 ("io_uring: multishot recv")
+Fixes: 9bb66906f23e ("io_uring: support multishot in recvmsg")
+Link: https://github.com/axboe/liburing/issues/1043
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ io_uring/net.c |   23 ++++++++++++++++++++---
+ 1 file changed, 20 insertions(+), 3 deletions(-)
+
+--- a/io_uring/net.c
++++ b/io_uring/net.c
+@@ -60,6 +60,7 @@ struct io_sr_msg {
+       unsigned                        len;
+       unsigned                        done_io;
+       unsigned                        msg_flags;
++      unsigned                        nr_multishot_loops;
+       u16                             flags;
+       /* initialised and used only by !msg send variants */
+       u16                             addr_len;
+@@ -70,6 +71,13 @@ struct io_sr_msg {
+       struct io_kiocb                 *notif;
+ };
++/*
++ * Number of times we'll try and do receives if there's more data. If we
++ * exceed this limit, then add us to the back of the queue and retry from
++ * there. This helps fairness between flooding clients.
++ */
++#define MULTISHOT_MAX_RETRY   32
++
+ static inline bool io_check_multishot(struct io_kiocb *req,
+                                     unsigned int issue_flags)
+ {
+@@ -611,6 +619,7 @@ int io_recvmsg_prep(struct io_kiocb *req
+               sr->msg_flags |= MSG_CMSG_COMPAT;
+ #endif
+       sr->done_io = 0;
++      sr->nr_multishot_loops = 0;
+       return 0;
+ }
+@@ -654,12 +663,20 @@ static inline bool io_recv_finish(struct
+        */
+       if (io_fill_cqe_req_aux(req, issue_flags & IO_URING_F_COMPLETE_DEFER,
+                               *ret, cflags | IORING_CQE_F_MORE)) {
++              struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
++              int mshot_retry_ret = IOU_ISSUE_SKIP_COMPLETE;
++
+               io_recv_prep_retry(req);
+               /* Known not-empty or unknown state, retry */
+-              if (cflags & IORING_CQE_F_SOCK_NONEMPTY || msg->msg_inq == -1)
+-                      return false;
++              if (cflags & IORING_CQE_F_SOCK_NONEMPTY || msg->msg_inq == -1) {
++                      if (sr->nr_multishot_loops++ < MULTISHOT_MAX_RETRY)
++                              return false;
++                      /* mshot retries exceeded, force a requeue */
++                      sr->nr_multishot_loops = 0;
++                      mshot_retry_ret = IOU_REQUEUE;
++              }
+               if (issue_flags & IO_URING_F_MULTISHOT)
+-                      *ret = IOU_ISSUE_SKIP_COMPLETE;
++                      *ret = mshot_retry_ret;
+               else
+                       *ret = -EAGAIN;
+               return true;
diff --git a/queue-6.6/io_uring-net-un-indent-mshot-retry-path-in-io_recv_finish.patch b/queue-6.6/io_uring-net-un-indent-mshot-retry-path-in-io_recv_finish.patch
new file mode 100644 (file)
index 0000000..77fa0e4
--- /dev/null
@@ -0,0 +1,67 @@
+From 37dbf0056a58703f815ae076b08dfb9da86c610d Mon Sep 17 00:00:00 2001
+From: Jens Axboe <axboe@kernel.dk>
+Date: Mon, 29 Jan 2024 11:54:18 -0700
+Subject: io_uring/net: un-indent mshot retry path in io_recv_finish()
+
+From: Jens Axboe <axboe@kernel.dk>
+
+Commit 91e5d765a82fb2c9d0b7ad930d8953208081ddf1 upstream.
+
+In preparation for putting some retry logic in there, have the done
+path just skip straight to the end rather than have too much nesting
+in here.
+
+No functional changes in this patch.
+
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ io_uring/net.c |   36 ++++++++++++++++++++----------------
+ 1 file changed, 20 insertions(+), 16 deletions(-)
+
+--- a/io_uring/net.c
++++ b/io_uring/net.c
+@@ -645,23 +645,27 @@ static inline bool io_recv_finish(struct
+               return true;
+       }
+-      if (!mshot_finished) {
+-              if (io_fill_cqe_req_aux(req, issue_flags & IO_URING_F_COMPLETE_DEFER,
+-                                      *ret, cflags | IORING_CQE_F_MORE)) {
+-                      io_recv_prep_retry(req);
+-                      /* Known not-empty or unknown state, retry */
+-                      if (cflags & IORING_CQE_F_SOCK_NONEMPTY ||
+-                          msg->msg_inq == -1)
+-                              return false;
+-                      if (issue_flags & IO_URING_F_MULTISHOT)
+-                              *ret = IOU_ISSUE_SKIP_COMPLETE;
+-                      else
+-                              *ret = -EAGAIN;
+-                      return true;
+-              }
+-              /* Otherwise stop multishot but use the current result. */
+-      }
++      if (mshot_finished)
++              goto finish;
++      /*
++       * Fill CQE for this receive and see if we should keep trying to
++       * receive from this socket.
++       */
++      if (io_fill_cqe_req_aux(req, issue_flags & IO_URING_F_COMPLETE_DEFER,
++                              *ret, cflags | IORING_CQE_F_MORE)) {
++              io_recv_prep_retry(req);
++              /* Known not-empty or unknown state, retry */
++              if (cflags & IORING_CQE_F_SOCK_NONEMPTY || msg->msg_inq == -1)
++                      return false;
++              if (issue_flags & IO_URING_F_MULTISHOT)
++                      *ret = IOU_ISSUE_SKIP_COMPLETE;
++              else
++                      *ret = -EAGAIN;
++              return true;
++      }
++      /* Otherwise stop multishot but use the current result. */
++finish:
+       io_req_set_res(req, *ret, cflags);
+       if (issue_flags & IO_URING_F_MULTISHOT)
diff --git a/queue-6.6/io_uring-poll-add-requeue-return-code-from-poll-multishot-handling.patch b/queue-6.6/io_uring-poll-add-requeue-return-code-from-poll-multishot-handling.patch
new file mode 100644 (file)
index 0000000..929fbc7
--- /dev/null
@@ -0,0 +1,76 @@
+From 330e3bbfe6e21b715b02362d2a6e427907efb6c8 Mon Sep 17 00:00:00 2001
+From: Jens Axboe <axboe@kernel.dk>
+Date: Mon, 29 Jan 2024 11:57:11 -0700
+Subject: io_uring/poll: add requeue return code from poll multishot handling
+
+From: Jens Axboe <axboe@kernel.dk>
+
+Commit 704ea888d646cb9d715662944cf389c823252ee0 upstream.
+
+Since our poll handling is edge triggered, multishot handlers retry
+internally until they know that no more data is available. In
+preparation for limiting these retries, add an internal return code,
+IOU_REQUEUE, which can be used to inform the poll backend about the
+handler wanting to retry, but that this should happen through a normal
+task_work requeue rather than keep hammering on the issue side for this
+one request.
+
+No functional changes in this patch, nobody is using this return code
+just yet.
+
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ io_uring/io_uring.h |    7 +++++++
+ io_uring/poll.c     |    9 ++++++++-
+ 2 files changed, 15 insertions(+), 1 deletion(-)
+
+--- a/io_uring/io_uring.h
++++ b/io_uring/io_uring.h
+@@ -31,6 +31,13 @@ enum {
+       IOU_ISSUE_SKIP_COMPLETE = -EIOCBQUEUED,
+       /*
++       * Requeue the task_work to restart operations on this request. The
++       * actual value isn't important, should just be not an otherwise
++       * valid error code, yet less than -MAX_ERRNO and valid internally.
++       */
++      IOU_REQUEUE             = -3072,
++
++      /*
+        * Intended only when both IO_URING_F_MULTISHOT is passed
+        * to indicate to the poll runner that multishot should be
+        * removed and the result is set on req->cqe.res.
+--- a/io_uring/poll.c
++++ b/io_uring/poll.c
+@@ -226,6 +226,7 @@ enum {
+       IOU_POLL_NO_ACTION = 1,
+       IOU_POLL_REMOVE_POLL_USE_RES = 2,
+       IOU_POLL_REISSUE = 3,
++      IOU_POLL_REQUEUE = 4,
+ };
+ static void __io_poll_execute(struct io_kiocb *req, int mask)
+@@ -324,6 +325,8 @@ static int io_poll_check_events(struct i
+                       int ret = io_poll_issue(req, ts);
+                       if (ret == IOU_STOP_MULTISHOT)
+                               return IOU_POLL_REMOVE_POLL_USE_RES;
++                      else if (ret == IOU_REQUEUE)
++                              return IOU_POLL_REQUEUE;
+                       if (ret < 0)
+                               return ret;
+               }
+@@ -346,8 +349,12 @@ void io_poll_task_func(struct io_kiocb *
+       int ret;
+       ret = io_poll_check_events(req, ts);
+-      if (ret == IOU_POLL_NO_ACTION)
++      if (ret == IOU_POLL_NO_ACTION) {
+               return;
++      } else if (ret == IOU_POLL_REQUEUE) {
++              __io_poll_execute(req, 0);
++              return;
++      }
+       io_poll_remove_entries(req);
+       io_poll_tw_hash_eject(req, ts);
diff --git a/queue-6.6/io_uring-poll-move-poll-execution-helpers-higher-up.patch b/queue-6.6/io_uring-poll-move-poll-execution-helpers-higher-up.patch
new file mode 100644 (file)
index 0000000..cb0fbe6
--- /dev/null
@@ -0,0 +1,66 @@
+From 582cc8795c22337041abc7ee06f9de34f1592922 Mon Sep 17 00:00:00 2001
+From: Jens Axboe <axboe@kernel.dk>
+Date: Mon, 29 Jan 2024 11:52:54 -0700
+Subject: io_uring/poll: move poll execution helpers higher up
+
+From: Jens Axboe <axboe@kernel.dk>
+
+Commit e84b01a880f635e3084a361afba41f95ff500d12 upstream.
+
+In preparation for calling __io_poll_execute() higher up, move the
+functions to avoid forward declarations.
+
+No functional changes in this patch.
+
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ io_uring/poll.c |   30 +++++++++++++++---------------
+ 1 file changed, 15 insertions(+), 15 deletions(-)
+
+--- a/io_uring/poll.c
++++ b/io_uring/poll.c
+@@ -228,6 +228,21 @@ enum {
+       IOU_POLL_REISSUE = 3,
+ };
++static void __io_poll_execute(struct io_kiocb *req, int mask)
++{
++      io_req_set_res(req, mask, 0);
++      req->io_task_work.func = io_poll_task_func;
++
++      trace_io_uring_task_add(req, mask);
++      io_req_task_work_add(req);
++}
++
++static inline void io_poll_execute(struct io_kiocb *req, int res)
++{
++      if (io_poll_get_ownership(req))
++              __io_poll_execute(req, res);
++}
++
+ /*
+  * All poll tw should go through this. Checks for poll events, manages
+  * references, does rewait, etc.
+@@ -364,21 +379,6 @@ void io_poll_task_func(struct io_kiocb *
+       }
+ }
+-static void __io_poll_execute(struct io_kiocb *req, int mask)
+-{
+-      io_req_set_res(req, mask, 0);
+-      req->io_task_work.func = io_poll_task_func;
+-
+-      trace_io_uring_task_add(req, mask);
+-      io_req_task_work_add(req);
+-}
+-
+-static inline void io_poll_execute(struct io_kiocb *req, int res)
+-{
+-      if (io_poll_get_ownership(req))
+-              __io_poll_execute(req, res);
+-}
+-
+ static void io_poll_cancel_req(struct io_kiocb *req)
+ {
+       io_poll_mark_cancelled(req);
index a364fa0282f94e398bada9a5b706af8cb7ae211d..ee0b4b4af897f2126d0f5b5aec39aaca00d241e6 100644 (file)
@@ -115,3 +115,7 @@ input-atkbd-skip-atkbd_cmd_setleds-when-skipping-atkbd_cmd_getid.patch
 revert-asoc-amd-add-new-dmi-entries-for-acp5x-platform.patch
 media-solo6x10-replace-max-a-min-b-c-by-clamp-b-a-c.patch
 io_uring-net-fix-sr-len-for-ioring_op_recv-with-msg_waitall-and-buffers.patch
+io_uring-poll-move-poll-execution-helpers-higher-up.patch
+io_uring-net-un-indent-mshot-retry-path-in-io_recv_finish.patch
+io_uring-poll-add-requeue-return-code-from-poll-multishot-handling.patch
+io_uring-net-limit-inline-multishot-retries.patch