--- /dev/null
+From 9166451efd489fe6a84e22a26ab73a46bfc33c55 Mon Sep 17 00:00:00 2001
+From: Jens Axboe <axboe@kernel.dk>
+Date: Mon, 5 May 2025 08:34:39 -0600
+Subject: io_uring: always arm linked timeouts prior to issue
+
+From: Jens Axboe <axboe@kernel.dk>
+
+Commit b53e523261bf058ea4a518b482222e7a277b186b upstream.
+
+There are a few spots where linked timeouts are armed, and not all of
+them adhere to the pre-arm, attempt issue, post-arm pattern. This can
+be problematic if the linked request returns that it will trigger a
+callback later, and does so before the linked timeout is fully armed.
+
+Consolidate all the linked timeout handling into __io_issue_sqe(),
+rather than have it spread throughout the various issue entry points.
+
+Cc: stable@vger.kernel.org
+Link: https://github.com/axboe/liburing/issues/1390
+Reported-by: Chase Hiltz <chase@path.net>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ io_uring/io_uring.c | 53 +++++++++++++++-------------------------------------
+ 1 file changed, 16 insertions(+), 37 deletions(-)
+
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -422,24 +422,6 @@ static struct io_kiocb *__io_prep_linked
+ return req->link;
+ }
+
+-static inline struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
+-{
+- if (likely(!(req->flags & REQ_F_ARM_LTIMEOUT)))
+- return NULL;
+- return __io_prep_linked_timeout(req);
+-}
+-
+-static noinline void __io_arm_ltimeout(struct io_kiocb *req)
+-{
+- io_queue_linked_timeout(__io_prep_linked_timeout(req));
+-}
+-
+-static inline void io_arm_ltimeout(struct io_kiocb *req)
+-{
+- if (unlikely(req->flags & REQ_F_ARM_LTIMEOUT))
+- __io_arm_ltimeout(req);
+-}
+-
+ static void io_prep_async_work(struct io_kiocb *req)
+ {
+ const struct io_issue_def *def = &io_issue_defs[req->opcode];
+@@ -493,7 +475,6 @@ static void io_prep_async_link(struct io
+
+ static void io_queue_iowq(struct io_kiocb *req)
+ {
+- struct io_kiocb *link = io_prep_linked_timeout(req);
+ struct io_uring_task *tctx = req->task->io_uring;
+
+ BUG_ON(!tctx);
+@@ -518,8 +499,6 @@ static void io_queue_iowq(struct io_kioc
+
+ trace_io_uring_queue_async_work(req, io_wq_is_hashed(&req->work));
+ io_wq_enqueue(tctx->io_wq, &req->work);
+- if (link)
+- io_queue_linked_timeout(link);
+ }
+
+ static __cold void io_queue_deferred(struct io_ring_ctx *ctx)
+@@ -1863,17 +1842,24 @@ static bool io_assign_file(struct io_kio
+ return !!req->file;
+ }
+
++#define REQ_ISSUE_SLOW_FLAGS (REQ_F_CREDS | REQ_F_ARM_LTIMEOUT)
++
+ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
+ {
+ const struct io_issue_def *def = &io_issue_defs[req->opcode];
+ const struct cred *creds = NULL;
++ struct io_kiocb *link = NULL;
+ int ret;
+
+ if (unlikely(!io_assign_file(req, def, issue_flags)))
+ return -EBADF;
+
+- if (unlikely((req->flags & REQ_F_CREDS) && req->creds != current_cred()))
+- creds = override_creds(req->creds);
++ if (unlikely(req->flags & REQ_ISSUE_SLOW_FLAGS)) {
++ if ((req->flags & REQ_F_CREDS) && req->creds != current_cred())
++ creds = override_creds(req->creds);
++ if (req->flags & REQ_F_ARM_LTIMEOUT)
++ link = __io_prep_linked_timeout(req);
++ }
+
+ if (!def->audit_skip)
+ audit_uring_entry(req->opcode);
+@@ -1883,8 +1869,12 @@ static int io_issue_sqe(struct io_kiocb
+ if (!def->audit_skip)
+ audit_uring_exit(!ret, ret);
+
+- if (creds)
+- revert_creds(creds);
++ if (unlikely(creds || link)) {
++ if (creds)
++ revert_creds(creds);
++ if (link)
++ io_queue_linked_timeout(link);
++ }
+
+ if (ret == IOU_OK) {
+ if (issue_flags & IO_URING_F_COMPLETE_DEFER)
+@@ -1939,8 +1929,6 @@ void io_wq_submit_work(struct io_wq_work
+ else
+ req_ref_get(req);
+
+- io_arm_ltimeout(req);
+-
+ /* either cancelled or io-wq is dying, so don't touch tctx->iowq */
+ if (work->flags & IO_WQ_WORK_CANCEL) {
+ fail:
+@@ -2036,15 +2024,11 @@ struct file *io_file_get_normal(struct i
+ static void io_queue_async(struct io_kiocb *req, int ret)
+ __must_hold(&req->ctx->uring_lock)
+ {
+- struct io_kiocb *linked_timeout;
+-
+ if (ret != -EAGAIN || (req->flags & REQ_F_NOWAIT)) {
+ io_req_defer_failed(req, ret);
+ return;
+ }
+
+- linked_timeout = io_prep_linked_timeout(req);
+-
+ switch (io_arm_poll_handler(req, 0)) {
+ case IO_APOLL_READY:
+ io_kbuf_recycle(req, 0);
+@@ -2057,9 +2041,6 @@ static void io_queue_async(struct io_kio
+ case IO_APOLL_OK:
+ break;
+ }
+-
+- if (linked_timeout)
+- io_queue_linked_timeout(linked_timeout);
+ }
+
+ static inline void io_queue_sqe(struct io_kiocb *req)
+@@ -2073,9 +2054,7 @@ static inline void io_queue_sqe(struct i
+ * We async punt it if the file wasn't marked NOWAIT, or if the file
+ * doesn't support non-blocking read/write attempts
+ */
+- if (likely(!ret))
+- io_arm_ltimeout(req);
+- else
++ if (unlikely(ret))
+ io_queue_async(req, ret);
+ }
+
--- /dev/null
+From fd9f46cfe46cfc49a0d0555e5cfe60d40ec00359 Mon Sep 17 00:00:00 2001
+From: Jens Axboe <axboe@kernel.dk>
+Date: Wed, 7 May 2025 08:07:09 -0600
+Subject: io_uring: ensure deferred completions are posted for multishot
+
+From: Jens Axboe <axboe@kernel.dk>
+
+Commit 687b2bae0efff9b25e071737d6af5004e6e35af5 upstream.
+
+Multishot normally uses io_req_post_cqe() to post completions, but when
+stopping it, it may finish up with a deferred completion. This is fine,
+except if another multishot event triggers before the deferred completions
+get flushed. If this occurs, then CQEs may get reordered in the CQ ring,
+and cause confusion on the application side.
+
+When multishot posting via io_req_post_cqe(), flush any pending deferred
+completions first, if any.
+
+Cc: stable@vger.kernel.org # 6.1+
+Reported-by: Norman Maurer <norman_maurer@apple.com>
+Reported-by: Christian Mazakas <christian.mazakas@gmail.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ io_uring/io_uring.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -919,6 +919,14 @@ static bool __io_post_aux_cqe(struct io_
+ {
+ bool filled;
+
++ /*
++ * If multishot has already posted deferred completions, ensure that
++ * those are flushed first before posting this one. If not, CQEs
++ * could get reordered.
++ */
++ if (!wq_list_empty(&ctx->submit_state.compl_reqs))
++ __io_submit_flush_completions(ctx);
++
+ io_cq_lock(ctx);
+ filled = io_fill_cqe_aux(ctx, user_data, res, cflags);
+ if (!filled && allow_overflow)