]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.1-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 12 May 2025 14:05:05 +0000 (16:05 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 12 May 2025 14:05:05 +0000 (16:05 +0200)
added patches:
io_uring-always-arm-linked-timeouts-prior-to-issue.patch
io_uring-ensure-deferred-completions-are-posted-for-multishot.patch
revert-net-phy-microchip-force-irq-polling-mode-for-lan88xx.patch

queue-6.1/io_uring-always-arm-linked-timeouts-prior-to-issue.patch [new file with mode: 0644]
queue-6.1/io_uring-ensure-deferred-completions-are-posted-for-multishot.patch [new file with mode: 0644]
queue-6.1/revert-net-phy-microchip-force-irq-polling-mode-for-lan88xx.patch [new file with mode: 0644]
queue-6.1/series

diff --git a/queue-6.1/io_uring-always-arm-linked-timeouts-prior-to-issue.patch b/queue-6.1/io_uring-always-arm-linked-timeouts-prior-to-issue.patch
new file mode 100644 (file)
index 0000000..580471f
--- /dev/null
@@ -0,0 +1,158 @@
+From 851af158805477610bec6b09bc03a162b1bd7193 Mon Sep 17 00:00:00 2001
+From: Jens Axboe <axboe@kernel.dk>
+Date: Mon, 5 May 2025 08:34:39 -0600
+Subject: io_uring: always arm linked timeouts prior to issue
+
+From: Jens Axboe <axboe@kernel.dk>
+
+Commit b53e523261bf058ea4a518b482222e7a277b186b upstream.
+
+There are a few spots where linked timeouts are armed, and not all of
+them adhere to the pre-arm, attempt issue, post-arm pattern. This can
+be problematic if the linked request returns that it will trigger a
+callback later, and does so before the linked timeout is fully armed.
+
+Consolidate all the linked timeout handling into __io_issue_sqe(),
+rather than have it spread throughout the various issue entry points.
+
+Cc: stable@vger.kernel.org
+Link: https://github.com/axboe/liburing/issues/1390
+Reported-by: Chase Hiltz <chase@path.net>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ io_uring/io_uring.c |   53 +++++++++++++++-------------------------------------
+ 1 file changed, 16 insertions(+), 37 deletions(-)
+
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -372,24 +372,6 @@ static struct io_kiocb *__io_prep_linked
+       return req->link;
+ }
+-static inline struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
+-{
+-      if (likely(!(req->flags & REQ_F_ARM_LTIMEOUT)))
+-              return NULL;
+-      return __io_prep_linked_timeout(req);
+-}
+-
+-static noinline void __io_arm_ltimeout(struct io_kiocb *req)
+-{
+-      io_queue_linked_timeout(__io_prep_linked_timeout(req));
+-}
+-
+-static inline void io_arm_ltimeout(struct io_kiocb *req)
+-{
+-      if (unlikely(req->flags & REQ_F_ARM_LTIMEOUT))
+-              __io_arm_ltimeout(req);
+-}
+-
+ static void io_prep_async_work(struct io_kiocb *req)
+ {
+       const struct io_op_def *def = &io_op_defs[req->opcode];
+@@ -437,7 +419,6 @@ static void io_prep_async_link(struct io
+ static void io_queue_iowq(struct io_kiocb *req)
+ {
+-      struct io_kiocb *link = io_prep_linked_timeout(req);
+       struct io_uring_task *tctx = req->task->io_uring;
+       BUG_ON(!tctx);
+@@ -462,8 +443,6 @@ static void io_queue_iowq(struct io_kioc
+       trace_io_uring_queue_async_work(req, io_wq_is_hashed(&req->work));
+       io_wq_enqueue(tctx->io_wq, &req->work);
+-      if (link)
+-              io_queue_linked_timeout(link);
+ }
+ static __cold void io_queue_deferred(struct io_ring_ctx *ctx)
+@@ -1741,17 +1720,24 @@ static bool io_assign_file(struct io_kio
+       return !!req->file;
+ }
++#define REQ_ISSUE_SLOW_FLAGS  (REQ_F_CREDS | REQ_F_ARM_LTIMEOUT)
++
+ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
+ {
+       const struct io_op_def *def = &io_op_defs[req->opcode];
+       const struct cred *creds = NULL;
++      struct io_kiocb *link = NULL;
+       int ret;
+       if (unlikely(!io_assign_file(req, issue_flags)))
+               return -EBADF;
+-      if (unlikely((req->flags & REQ_F_CREDS) && req->creds != current_cred()))
+-              creds = override_creds(req->creds);
++      if (unlikely(req->flags & REQ_ISSUE_SLOW_FLAGS)) {
++              if ((req->flags & REQ_F_CREDS) && req->creds != current_cred())
++                      creds = override_creds(req->creds);
++              if (req->flags & REQ_F_ARM_LTIMEOUT)
++                      link = __io_prep_linked_timeout(req);
++      }
+       if (!def->audit_skip)
+               audit_uring_entry(req->opcode);
+@@ -1761,8 +1747,12 @@ static int io_issue_sqe(struct io_kiocb
+       if (!def->audit_skip)
+               audit_uring_exit(!ret, ret);
+-      if (creds)
+-              revert_creds(creds);
++      if (unlikely(creds || link)) {
++              if (creds)
++                      revert_creds(creds);
++              if (link)
++                      io_queue_linked_timeout(link);
++      }
+       if (ret == IOU_OK) {
+               if (issue_flags & IO_URING_F_COMPLETE_DEFER)
+@@ -1809,8 +1799,6 @@ void io_wq_submit_work(struct io_wq_work
+       else
+               req_ref_get(req);
+-      io_arm_ltimeout(req);
+-
+       /* either cancelled or io-wq is dying, so don't touch tctx->iowq */
+       if (work->flags & IO_WQ_WORK_CANCEL) {
+ fail:
+@@ -1908,15 +1896,11 @@ struct file *io_file_get_normal(struct i
+ static void io_queue_async(struct io_kiocb *req, int ret)
+       __must_hold(&req->ctx->uring_lock)
+ {
+-      struct io_kiocb *linked_timeout;
+-
+       if (ret != -EAGAIN || (req->flags & REQ_F_NOWAIT)) {
+               io_req_complete_failed(req, ret);
+               return;
+       }
+-      linked_timeout = io_prep_linked_timeout(req);
+-
+       switch (io_arm_poll_handler(req, 0)) {
+       case IO_APOLL_READY:
+               io_kbuf_recycle(req, 0);
+@@ -1929,9 +1913,6 @@ static void io_queue_async(struct io_kio
+       case IO_APOLL_OK:
+               break;
+       }
+-
+-      if (linked_timeout)
+-              io_queue_linked_timeout(linked_timeout);
+ }
+ static inline void io_queue_sqe(struct io_kiocb *req)
+@@ -1945,9 +1926,7 @@ static inline void io_queue_sqe(struct i
+        * We async punt it if the file wasn't marked NOWAIT, or if the file
+        * doesn't support non-blocking read/write attempts
+        */
+-      if (likely(!ret))
+-              io_arm_ltimeout(req);
+-      else
++      if (unlikely(ret))
+               io_queue_async(req, ret);
+ }
diff --git a/queue-6.1/io_uring-ensure-deferred-completions-are-posted-for-multishot.patch b/queue-6.1/io_uring-ensure-deferred-completions-are-posted-for-multishot.patch
new file mode 100644 (file)
index 0000000..83d5c85
--- /dev/null
@@ -0,0 +1,44 @@
+From d714dbdfce858bc320a0e9f983f72652988ff11b Mon Sep 17 00:00:00 2001
+From: Jens Axboe <axboe@kernel.dk>
+Date: Wed, 7 May 2025 08:07:09 -0600
+Subject: io_uring: ensure deferred completions are posted for multishot
+
+From: Jens Axboe <axboe@kernel.dk>
+
+Commit 687b2bae0efff9b25e071737d6af5004e6e35af5 upstream.
+
+Multishot normally uses io_req_post_cqe() to post completions, but when
+stopping it, it may finish up with a deferred completion. This is fine,
+except if another multishot event triggers before the deferred completions
+get flushed. If this occurs, then CQEs may get reordered in the CQ ring,
+and cause confusion on the application side.
+
+When multishot posting via io_req_post_cqe(), flush any pending deferred
+completions first, if any.
+
+Cc: stable@vger.kernel.org # 6.1+
+Reported-by: Norman Maurer <norman_maurer@apple.com>
+Reported-by: Christian Mazakas <christian.mazakas@gmail.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ io_uring/io_uring.c |    8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -819,6 +819,14 @@ bool io_post_aux_cqe(struct io_ring_ctx
+ {
+       bool filled;
++      /*
++       * If multishot has already posted deferred completions, ensure that
++       * those are flushed first before posting this one. If not, CQEs
++       * could get reordered.
++       */
++      if (!wq_list_empty(&ctx->submit_state.compl_reqs))
++              __io_submit_flush_completions(ctx);
++
+       io_cq_lock(ctx);
+       filled = io_fill_cqe_aux(ctx, user_data, res, cflags, allow_overflow);
+       io_cq_unlock_post(ctx);
diff --git a/queue-6.1/revert-net-phy-microchip-force-irq-polling-mode-for-lan88xx.patch b/queue-6.1/revert-net-phy-microchip-force-irq-polling-mode-for-lan88xx.patch
new file mode 100644 (file)
index 0000000..14cb1c8
--- /dev/null
@@ -0,0 +1,86 @@
+From c9e1e41fa0cf8003d3a8f0af1c53236b7e4d7e17 Mon Sep 17 00:00:00 2001
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Date: Mon, 12 May 2025 16:01:41 +0200
+Subject: Revert "net: phy: microchip: force IRQ polling mode for lan88xx"
+
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+This reverts commit 9b89102fbb8fc5393e2a0f981aafdb3cf43591ee which is
+commit 30a41ed32d3088cd0d682a13d7f30b23baed7e93 upstream.
+
+It is reported to cause NFS boot problems on a Raspberry Pi 3b so revert
+it from this branch for now.
+
+Cc: Fiona Klute <fiona.klute@gmx.de>
+Cc: Andrew Lunn <andrew@lunn.ch>
+Cc: Paolo Abeni <pabeni@redhat.com>
+Cc: Sasha Levin <sashal@kernel.org>
+Link: https://lore.kernel.org/r/aB6uurX99AZWM9I1@finisterre.sirena.org.uk
+Reported-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/phy/microchip.c |   46 +++++++++++++++++++++++++++++++++++++++++---
+ 1 file changed, 43 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/phy/microchip.c
++++ b/drivers/net/phy/microchip.c
+@@ -31,6 +31,47 @@ static int lan88xx_write_page(struct phy
+       return __phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, page);
+ }
++static int lan88xx_phy_config_intr(struct phy_device *phydev)
++{
++      int rc;
++
++      if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
++              /* unmask all source and clear them before enable */
++              rc = phy_write(phydev, LAN88XX_INT_MASK, 0x7FFF);
++              rc = phy_read(phydev, LAN88XX_INT_STS);
++              rc = phy_write(phydev, LAN88XX_INT_MASK,
++                             LAN88XX_INT_MASK_MDINTPIN_EN_ |
++                             LAN88XX_INT_MASK_LINK_CHANGE_);
++      } else {
++              rc = phy_write(phydev, LAN88XX_INT_MASK, 0);
++              if (rc)
++                      return rc;
++
++              /* Ack interrupts after they have been disabled */
++              rc = phy_read(phydev, LAN88XX_INT_STS);
++      }
++
++      return rc < 0 ? rc : 0;
++}
++
++static irqreturn_t lan88xx_handle_interrupt(struct phy_device *phydev)
++{
++      int irq_status;
++
++      irq_status = phy_read(phydev, LAN88XX_INT_STS);
++      if (irq_status < 0) {
++              phy_error(phydev);
++              return IRQ_NONE;
++      }
++
++      if (!(irq_status & LAN88XX_INT_STS_LINK_CHANGE_))
++              return IRQ_NONE;
++
++      phy_trigger_machine(phydev);
++
++      return IRQ_HANDLED;
++}
++
+ static int lan88xx_suspend(struct phy_device *phydev)
+ {
+       struct lan88xx_priv *priv = phydev->priv;
+@@ -351,9 +392,8 @@ static struct phy_driver microchip_phy_d
+       .config_aneg    = lan88xx_config_aneg,
+       .link_change_notify = lan88xx_link_change_notify,
+-      /* Interrupt handling is broken, do not define related
+-       * functions to force polling.
+-       */
++      .config_intr    = lan88xx_phy_config_intr,
++      .handle_interrupt = lan88xx_handle_interrupt,
+       .suspend        = lan88xx_suspend,
+       .resume         = genphy_resume,
index 3c0e8675559d1040f89b212f4f127bf1745d9835..5d9921ea180eaef645a7f87da8a421027c6b3066 100644 (file)
@@ -66,3 +66,6 @@ mips-fix-max_reg_offset.patch
 drm-panel-simple-update-timings-for-auo-g101evn010.patch
 nvme-unblock-ctrl-state-transition-for-firmware-upda.patch
 do_umount-add-missing-barrier-before-refcount-checks.patch
+io_uring-always-arm-linked-timeouts-prior-to-issue.patch
+io_uring-ensure-deferred-completions-are-posted-for-multishot.patch
+revert-net-phy-microchip-force-irq-polling-mode-for-lan88xx.patch