]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.6-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 12 May 2025 14:05:14 +0000 (16:05 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 12 May 2025 14:05:14 +0000 (16:05 +0200)
added patches:
io_uring-always-arm-linked-timeouts-prior-to-issue.patch
io_uring-ensure-deferred-completions-are-posted-for-multishot.patch

queue-6.6/io_uring-always-arm-linked-timeouts-prior-to-issue.patch [new file with mode: 0644]
queue-6.6/io_uring-ensure-deferred-completions-are-posted-for-multishot.patch [new file with mode: 0644]
queue-6.6/series

diff --git a/queue-6.6/io_uring-always-arm-linked-timeouts-prior-to-issue.patch b/queue-6.6/io_uring-always-arm-linked-timeouts-prior-to-issue.patch
new file mode 100644 (file)
index 0000000..bf41f91
--- /dev/null
@@ -0,0 +1,158 @@
+From 9166451efd489fe6a84e22a26ab73a46bfc33c55 Mon Sep 17 00:00:00 2001
+From: Jens Axboe <axboe@kernel.dk>
+Date: Mon, 5 May 2025 08:34:39 -0600
+Subject: io_uring: always arm linked timeouts prior to issue
+
+From: Jens Axboe <axboe@kernel.dk>
+
+Commit b53e523261bf058ea4a518b482222e7a277b186b upstream.
+
+There are a few spots where linked timeouts are armed, and not all of
+them adhere to the pre-arm, attempt issue, post-arm pattern. This can
+be problematic if the linked request returns that it will trigger a
+callback later, and does so before the linked timeout is fully armed.
+
+Consolidate all the linked timeout handling into __io_issue_sqe(),
+rather than have it spread throughout the various issue entry points.
+
+Cc: stable@vger.kernel.org
+Link: https://github.com/axboe/liburing/issues/1390
+Reported-by: Chase Hiltz <chase@path.net>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ io_uring/io_uring.c |   53 +++++++++++++++-------------------------------------
+ 1 file changed, 16 insertions(+), 37 deletions(-)
+
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -422,24 +422,6 @@ static struct io_kiocb *__io_prep_linked
+       return req->link;
+ }
+-static inline struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
+-{
+-      if (likely(!(req->flags & REQ_F_ARM_LTIMEOUT)))
+-              return NULL;
+-      return __io_prep_linked_timeout(req);
+-}
+-
+-static noinline void __io_arm_ltimeout(struct io_kiocb *req)
+-{
+-      io_queue_linked_timeout(__io_prep_linked_timeout(req));
+-}
+-
+-static inline void io_arm_ltimeout(struct io_kiocb *req)
+-{
+-      if (unlikely(req->flags & REQ_F_ARM_LTIMEOUT))
+-              __io_arm_ltimeout(req);
+-}
+-
+ static void io_prep_async_work(struct io_kiocb *req)
+ {
+       const struct io_issue_def *def = &io_issue_defs[req->opcode];
+@@ -493,7 +475,6 @@ static void io_prep_async_link(struct io
+ static void io_queue_iowq(struct io_kiocb *req)
+ {
+-      struct io_kiocb *link = io_prep_linked_timeout(req);
+       struct io_uring_task *tctx = req->task->io_uring;
+       BUG_ON(!tctx);
+@@ -518,8 +499,6 @@ static void io_queue_iowq(struct io_kioc
+       trace_io_uring_queue_async_work(req, io_wq_is_hashed(&req->work));
+       io_wq_enqueue(tctx->io_wq, &req->work);
+-      if (link)
+-              io_queue_linked_timeout(link);
+ }
+ static __cold void io_queue_deferred(struct io_ring_ctx *ctx)
+@@ -1863,17 +1842,24 @@ static bool io_assign_file(struct io_kio
+       return !!req->file;
+ }
++#define REQ_ISSUE_SLOW_FLAGS  (REQ_F_CREDS | REQ_F_ARM_LTIMEOUT)
++
+ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
+ {
+       const struct io_issue_def *def = &io_issue_defs[req->opcode];
+       const struct cred *creds = NULL;
++      struct io_kiocb *link = NULL;
+       int ret;
+       if (unlikely(!io_assign_file(req, def, issue_flags)))
+               return -EBADF;
+-      if (unlikely((req->flags & REQ_F_CREDS) && req->creds != current_cred()))
+-              creds = override_creds(req->creds);
++      if (unlikely(req->flags & REQ_ISSUE_SLOW_FLAGS)) {
++              if ((req->flags & REQ_F_CREDS) && req->creds != current_cred())
++                      creds = override_creds(req->creds);
++              if (req->flags & REQ_F_ARM_LTIMEOUT)
++                      link = __io_prep_linked_timeout(req);
++      }
+       if (!def->audit_skip)
+               audit_uring_entry(req->opcode);
+@@ -1883,8 +1869,12 @@ static int io_issue_sqe(struct io_kiocb
+       if (!def->audit_skip)
+               audit_uring_exit(!ret, ret);
+-      if (creds)
+-              revert_creds(creds);
++      if (unlikely(creds || link)) {
++              if (creds)
++                      revert_creds(creds);
++              if (link)
++                      io_queue_linked_timeout(link);
++      }
+       if (ret == IOU_OK) {
+               if (issue_flags & IO_URING_F_COMPLETE_DEFER)
+@@ -1939,8 +1929,6 @@ void io_wq_submit_work(struct io_wq_work
+       else
+               req_ref_get(req);
+-      io_arm_ltimeout(req);
+-
+       /* either cancelled or io-wq is dying, so don't touch tctx->iowq */
+       if (work->flags & IO_WQ_WORK_CANCEL) {
+ fail:
+@@ -2036,15 +2024,11 @@ struct file *io_file_get_normal(struct i
+ static void io_queue_async(struct io_kiocb *req, int ret)
+       __must_hold(&req->ctx->uring_lock)
+ {
+-      struct io_kiocb *linked_timeout;
+-
+       if (ret != -EAGAIN || (req->flags & REQ_F_NOWAIT)) {
+               io_req_defer_failed(req, ret);
+               return;
+       }
+-      linked_timeout = io_prep_linked_timeout(req);
+-
+       switch (io_arm_poll_handler(req, 0)) {
+       case IO_APOLL_READY:
+               io_kbuf_recycle(req, 0);
+@@ -2057,9 +2041,6 @@ static void io_queue_async(struct io_kio
+       case IO_APOLL_OK:
+               break;
+       }
+-
+-      if (linked_timeout)
+-              io_queue_linked_timeout(linked_timeout);
+ }
+ static inline void io_queue_sqe(struct io_kiocb *req)
+@@ -2073,9 +2054,7 @@ static inline void io_queue_sqe(struct i
+        * We async punt it if the file wasn't marked NOWAIT, or if the file
+        * doesn't support non-blocking read/write attempts
+        */
+-      if (likely(!ret))
+-              io_arm_ltimeout(req);
+-      else
++      if (unlikely(ret))
+               io_queue_async(req, ret);
+ }
diff --git a/queue-6.6/io_uring-ensure-deferred-completions-are-posted-for-multishot.patch b/queue-6.6/io_uring-ensure-deferred-completions-are-posted-for-multishot.patch
new file mode 100644 (file)
index 0000000..6e6da30
--- /dev/null
@@ -0,0 +1,44 @@
+From fd9f46cfe46cfc49a0d0555e5cfe60d40ec00359 Mon Sep 17 00:00:00 2001
+From: Jens Axboe <axboe@kernel.dk>
+Date: Wed, 7 May 2025 08:07:09 -0600
+Subject: io_uring: ensure deferred completions are posted for multishot
+
+From: Jens Axboe <axboe@kernel.dk>
+
+Commit 687b2bae0efff9b25e071737d6af5004e6e35af5 upstream.
+
+Multishot normally uses io_req_post_cqe() to post completions, but when
+stopping it, it may finish up with a deferred completion. This is fine,
+except if another multishot event triggers before the deferred completions
+get flushed. If this occurs, then CQEs may get reordered in the CQ ring,
+and cause confusion on the application side.
+
+When multishot posting via io_req_post_cqe(), flush any pending deferred
+completions first, if any.
+
+Cc: stable@vger.kernel.org # 6.1+
+Reported-by: Norman Maurer <norman_maurer@apple.com>
+Reported-by: Christian Mazakas <christian.mazakas@gmail.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ io_uring/io_uring.c |    8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -919,6 +919,14 @@ static bool __io_post_aux_cqe(struct io_
+ {
+       bool filled;
++      /*
++       * If multishot has already posted deferred completions, ensure that
++       * those are flushed first before posting this one. If not, CQEs
++       * could get reordered.
++       */
++      if (!wq_list_empty(&ctx->submit_state.compl_reqs))
++              __io_submit_flush_completions(ctx);
++
+       io_cq_lock(ctx);
+       filled = io_fill_cqe_aux(ctx, user_data, res, cflags);
+       if (!filled && allow_overflow)
index 3a7e607ef5c57abe05ee6323f2fc3ac2e65c93da..98de08ae0f7ae279677abcfddea8173925cd76f7 100644 (file)
@@ -87,3 +87,5 @@ mips-fix-max_reg_offset.patch
 drm-panel-simple-update-timings-for-auo-g101evn010.patch
 nvme-unblock-ctrl-state-transition-for-firmware-upda.patch
 do_umount-add-missing-barrier-before-refcount-checks.patch
+io_uring-always-arm-linked-timeouts-prior-to-issue.patch
+io_uring-ensure-deferred-completions-are-posted-for-multishot.patch