]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.8-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 15 Apr 2024 10:51:28 +0000 (12:51 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 15 Apr 2024 10:51:28 +0000 (12:51 +0200)
added patches:
io_uring-disable-io-wq-execution-of-multishot-nowait-requests.patch
io_uring-refactor-defer_taskrun-multishot-checks.patch

queue-6.8/io_uring-disable-io-wq-execution-of-multishot-nowait-requests.patch [new file with mode: 0644]
queue-6.8/io_uring-refactor-defer_taskrun-multishot-checks.patch [new file with mode: 0644]
queue-6.8/series

diff --git a/queue-6.8/io_uring-disable-io-wq-execution-of-multishot-nowait-requests.patch b/queue-6.8/io_uring-disable-io-wq-execution-of-multishot-nowait-requests.patch
new file mode 100644 (file)
index 0000000..cca2feb
--- /dev/null
@@ -0,0 +1,44 @@
+From 5984bb9e2b72455e8e5a1925edde3079c9f949be Mon Sep 17 00:00:00 2001
+From: Jens Axboe <axboe@kernel.dk>
+Date: Mon, 1 Apr 2024 11:30:06 -0600
+Subject: io_uring: disable io-wq execution of multishot NOWAIT requests
+
+From: Jens Axboe <axboe@kernel.dk>
+
+Commit bee1d5becdf5bf23d4ca0cd9c6b60bdf3c61d72b upstream.
+
+Do the same check for direct io-wq execution for multishot requests that
+commit 2a975d426c82 did for the inline execution, and disable multishot
+mode (and revert to single shot) if the file type doesn't support NOWAIT,
+and isn't opened in O_NONBLOCK mode. For multishot to work properly, it's
+a requirement that nonblocking read attempts can be done.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ io_uring/io_uring.c |   13 +++++++++----
+ 1 file changed, 9 insertions(+), 4 deletions(-)
+
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -1964,10 +1964,15 @@ fail:
+               err = -EBADFD;
+               if (!file_can_poll(req->file))
+                       goto fail;
+-              err = -ECANCELED;
+-              if (io_arm_poll_handler(req, issue_flags) != IO_APOLL_OK)
+-                      goto fail;
+-              return;
++              if (req->file->f_flags & O_NONBLOCK ||
++                  req->file->f_mode & FMODE_NOWAIT) {
++                      err = -ECANCELED;
++                      if (io_arm_poll_handler(req, issue_flags) != IO_APOLL_OK)
++                              goto fail;
++                      return;
++              } else {
++                      req->flags &= ~REQ_F_APOLL_MULTISHOT;
++              }
+       }
+       if (req->flags & REQ_F_FORCE_ASYNC) {
diff --git a/queue-6.8/io_uring-refactor-defer_taskrun-multishot-checks.patch b/queue-6.8/io_uring-refactor-defer_taskrun-multishot-checks.patch
new file mode 100644 (file)
index 0000000..8a80683
--- /dev/null
@@ -0,0 +1,123 @@
+From 6fa8da25be5ad285fae51584ce1d461fec42bd94 Mon Sep 17 00:00:00 2001
+From: Pavel Begunkov <asml.silence@gmail.com>
+Date: Fri, 8 Mar 2024 13:55:57 +0000
+Subject: io_uring: refactor DEFER_TASKRUN multishot checks
+
+From: Pavel Begunkov <asml.silence@gmail.com>
+
+Commit e0e4ab52d17096d96c21a6805ccd424b283c3c6d upstream.
+
+We disallow DEFER_TASKRUN multishots from running by io-wq, which is
+checked by individual opcodes in the issue path. We can consolidate all
+it in io_wq_submit_work() at the same time moving the checks out of the
+hot path.
+
+Suggested-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
+Link: https://lore.kernel.org/r/e492f0f11588bb5aa11d7d24e6f53b7c7628afdb.1709905727.git.asml.silence@gmail.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ io_uring/io_uring.c |   20 ++++++++++++++++++++
+ io_uring/net.c      |   21 ---------------------
+ io_uring/rw.c       |    2 --
+ 3 files changed, 20 insertions(+), 23 deletions(-)
+
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -949,6 +949,8 @@ bool io_fill_cqe_req_aux(struct io_kiocb
+       u64 user_data = req->cqe.user_data;
+       struct io_uring_cqe *cqe;
++      lockdep_assert(!io_wq_current_is_worker());
++
+       if (!defer)
+               return __io_post_aux_cqe(ctx, user_data, res, cflags, false);
+@@ -1950,6 +1952,24 @@ fail:
+               goto fail;
+       }
++      /*
++       * If DEFER_TASKRUN is set, it's only allowed to post CQEs from the
++       * submitter task context. Final request completions are handed to the
++       * right context, however this is not the case of auxiliary CQEs,
++       * which is the main mean of operation for multishot requests.
++       * Don't allow any multishot execution from io-wq. It's more restrictive
++       * than necessary and also cleaner.
++       */
++      if (req->flags & REQ_F_APOLL_MULTISHOT) {
++              err = -EBADFD;
++              if (!file_can_poll(req->file))
++                      goto fail;
++              err = -ECANCELED;
++              if (io_arm_poll_handler(req, issue_flags) != IO_APOLL_OK)
++                      goto fail;
++              return;
++      }
++
+       if (req->flags & REQ_F_FORCE_ASYNC) {
+               bool opcode_poll = def->pollin || def->pollout;
+--- a/io_uring/net.c
++++ b/io_uring/net.c
+@@ -78,19 +78,6 @@ struct io_sr_msg {
+  */
+ #define MULTISHOT_MAX_RETRY   32
+-static inline bool io_check_multishot(struct io_kiocb *req,
+-                                    unsigned int issue_flags)
+-{
+-      /*
+-       * When ->locked_cq is set we only allow to post CQEs from the original
+-       * task context. Usual request completions will be handled in other
+-       * generic paths but multipoll may decide to post extra cqes.
+-       */
+-      return !(issue_flags & IO_URING_F_IOWQ) ||
+-              !(req->flags & REQ_F_APOLL_MULTISHOT) ||
+-              !req->ctx->task_complete;
+-}
+-
+ int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+ {
+       struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
+@@ -837,9 +824,6 @@ int io_recvmsg(struct io_kiocb *req, uns
+           (sr->flags & IORING_RECVSEND_POLL_FIRST))
+               return io_setup_async_msg(req, kmsg, issue_flags);
+-      if (!io_check_multishot(req, issue_flags))
+-              return io_setup_async_msg(req, kmsg, issue_flags);
+-
+ retry_multishot:
+       if (io_do_buffer_select(req)) {
+               void __user *buf;
+@@ -935,9 +919,6 @@ int io_recv(struct io_kiocb *req, unsign
+           (sr->flags & IORING_RECVSEND_POLL_FIRST))
+               return -EAGAIN;
+-      if (!io_check_multishot(req, issue_flags))
+-              return -EAGAIN;
+-
+       sock = sock_from_file(req->file);
+       if (unlikely(!sock))
+               return -ENOTSOCK;
+@@ -1386,8 +1367,6 @@ int io_accept(struct io_kiocb *req, unsi
+       struct file *file;
+       int ret, fd;
+-      if (!io_check_multishot(req, issue_flags))
+-              return -EAGAIN;
+ retry:
+       if (!fixed) {
+               fd = __get_unused_fd_flags(accept->flags, accept->nofile);
+--- a/io_uring/rw.c
++++ b/io_uring/rw.c
+@@ -932,8 +932,6 @@ int io_read_mshot(struct io_kiocb *req,
+        */
+       if (!file_can_poll(req->file))
+               return -EBADFD;
+-      if (issue_flags & IO_URING_F_IOWQ)
+-              return -EAGAIN;
+       ret = __io_read(req, issue_flags);
index ef2c1b7a8cf0e5696bbcf43b75512f31716331cf..b8be2ebf2fba13cfe1ff4529e06a188cd59d932c 100644 (file)
@@ -104,3 +104,5 @@ tracing-hide-unused-ftrace_event_id_fops.patch
 iommu-vt-d-fix-wrong-use-of-pasid-config.patch
 iommu-vt-d-allocate-local-memory-for-page-request-qu.patch
 iommu-vt-d-fix-warn_on-in-iommu-probe-path.patch
+io_uring-refactor-defer_taskrun-multishot-checks.patch
+io_uring-disable-io-wq-execution-of-multishot-nowait-requests.patch