]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.10-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 13 Sep 2023 08:19:50 +0000 (10:19 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 13 Sep 2023 08:19:50 +0000 (10:19 +0200)
added patches:
io_uring-always-lock-in-io_apoll_task_func.patch
io_uring-break-iopolling-on-signal.patch
io_uring-break-out-of-iowq-iopoll-on-teardown.patch

queue-5.10/io_uring-always-lock-in-io_apoll_task_func.patch [new file with mode: 0644]
queue-5.10/io_uring-break-iopolling-on-signal.patch [new file with mode: 0644]
queue-5.10/io_uring-break-out-of-iowq-iopoll-on-teardown.patch [new file with mode: 0644]
queue-5.10/series

diff --git a/queue-5.10/io_uring-always-lock-in-io_apoll_task_func.patch b/queue-5.10/io_uring-always-lock-in-io_apoll_task_func.patch
new file mode 100644 (file)
index 0000000..c2bd715
--- /dev/null
@@ -0,0 +1,45 @@
+From stable-owner@vger.kernel.org Tue Sep 12 16:05:05 2023
+From: Pavel Begunkov <asml.silence@gmail.com>
+Date: Tue, 12 Sep 2023 15:02:48 +0100
+Subject: io_uring: always lock in io_apoll_task_func
+To: stable@vger.kernel.org
+Cc: Jens Axboe <axboe@kernel.dk>, asml.silence@gmail.com, Dylan Yudaken <dylany@meta.com>
+Message-ID: <9f8a30b981705fa8fef31ad76c9bf7192b8db2a4.1694524751.git.asml.silence@gmail.com>
+
+From: Pavel Begunkov <asml.silence@gmail.com>
+
+From: Dylan Yudaken <dylany@meta.com>
+
+[ upstream commit c06c6c5d276707e04cedbcc55625e984922118aa ]
+
+This is required for the failure case (io_req_complete_failed) and is
+missing.
+
+The alternative would be to only lock in the failure path, however all of
+the non-error paths in io_poll_check_events that do not do not return
+IOU_POLL_NO_ACTION end up locking anyway. The only extraneous lock would
+be for the multishot poll overflowing the CQE ring, however multishot poll
+would probably benefit from being locked as it will allow completions to
+be batched.
+
+So it seems reasonable to lock always.
+
+Signed-off-by: Dylan Yudaken <dylany@meta.com>
+Link: https://lore.kernel.org/r/20221124093559.3780686-3-dylany@meta.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ io_uring/io_uring.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -5571,6 +5571,7 @@ static void io_apoll_task_func(struct io
+       if (ret > 0)
+               return;
++      io_tw_lock(req->ctx, locked);
+       io_poll_remove_entries(req);
+       spin_lock(&ctx->completion_lock);
+       hash_del(&req->hash_node);
diff --git a/queue-5.10/io_uring-break-iopolling-on-signal.patch b/queue-5.10/io_uring-break-iopolling-on-signal.patch
new file mode 100644 (file)
index 0000000..262f52d
--- /dev/null
@@ -0,0 +1,40 @@
+From stable-owner@vger.kernel.org Tue Sep 12 16:04:11 2023
+From: Pavel Begunkov <asml.silence@gmail.com>
+Date: Tue, 12 Sep 2023 15:02:50 +0100
+Subject: io_uring: break iopolling on signal
+To: stable@vger.kernel.org
+Cc: Jens Axboe <axboe@kernel.dk>, asml.silence@gmail.com
+Message-ID: <d3156dc2243c9a419f46d7c3cb4dcb2b839c3c65.1694524751.git.asml.silence@gmail.com>
+
+From: Pavel Begunkov <asml.silence@gmail.com>
+
+[ upstream commit dc314886cb3d0e4ab2858003e8de2917f8a3ccbd ]
+
+Don't keep spinning iopoll with a signal set. It'll eventually return
+back, e.g. by virtue of need_resched(), but it's not a nice user
+experience.
+
+Cc: stable@vger.kernel.org
+Fixes: def596e9557c9 ("io_uring: support for IO polling")
+Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
+Link: https://lore.kernel.org/r/eeba551e82cad12af30c3220125eb6cb244cc94c.1691594339.git.asml.silence@gmail.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ io_uring/io_uring.c |    5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -2665,6 +2665,11 @@ static int io_iopoll_check(struct io_rin
+                               break;
+               }
+               ret = io_do_iopoll(ctx, &nr_events, min);
++
++              if (task_sigpending(current)) {
++                      ret = -EINTR;
++                      goto out;
++              }
+       } while (!ret && nr_events < min && !need_resched());
+ out:
+       mutex_unlock(&ctx->uring_lock);
diff --git a/queue-5.10/io_uring-break-out-of-iowq-iopoll-on-teardown.patch b/queue-5.10/io_uring-break-out-of-iowq-iopoll-on-teardown.patch
new file mode 100644 (file)
index 0000000..f0bbfce
--- /dev/null
@@ -0,0 +1,70 @@
+From stable-owner@vger.kernel.org Tue Sep 12 16:05:05 2023
+From: Pavel Begunkov <asml.silence@gmail.com>
+Date: Tue, 12 Sep 2023 15:02:49 +0100
+Subject: io_uring: break out of iowq iopoll on teardown
+To: stable@vger.kernel.org
+Cc: Jens Axboe <axboe@kernel.dk>, asml.silence@gmail.com
+Message-ID: <649666e4cf7b5b829b12ba14f1648fdba2950d10.1694524751.git.asml.silence@gmail.com>
+
+From: Pavel Begunkov <asml.silence@gmail.com>
+
+[ upstream commit 45500dc4e01c167ee063f3dcc22f51ced5b2b1e9 ]
+
+io-wq will retry iopoll even when it failed with -EAGAIN. If that
+races with task exit, which sets TIF_NOTIFY_SIGNAL for all its workers,
+such workers might potentially infinitely spin retrying iopoll again and
+again and each time failing on some allocation / waiting / etc. Don't
+keep spinning if io-wq is dying.
+
+Fixes: 561fb04a6a225 ("io_uring: replace workqueue usage with io-wq")
+Cc: stable@vger.kernel.org
+Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ io_uring/io-wq.c    |   10 ++++++++++
+ io_uring/io-wq.h    |    1 +
+ io_uring/io_uring.c |    3 ++-
+ 3 files changed, 13 insertions(+), 1 deletion(-)
+
+--- a/io_uring/io-wq.c
++++ b/io_uring/io-wq.c
+@@ -176,6 +176,16 @@ static void io_worker_ref_put(struct io_
+               complete(&wq->worker_done);
+ }
++bool io_wq_worker_stopped(void)
++{
++      struct io_worker *worker = current->pf_io_worker;
++
++      if (WARN_ON_ONCE(!io_wq_current_is_worker()))
++              return true;
++
++      return test_bit(IO_WQ_BIT_EXIT, &worker->wqe->wq->state);
++}
++
+ static void io_worker_cancel_cb(struct io_worker *worker)
+ {
+       struct io_wqe_acct *acct = io_wqe_get_acct(worker);
+--- a/io_uring/io-wq.h
++++ b/io_uring/io-wq.h
+@@ -129,6 +129,7 @@ void io_wq_hash_work(struct io_wq_work *
+ int io_wq_cpu_affinity(struct io_wq *wq, cpumask_var_t mask);
+ int io_wq_max_workers(struct io_wq *wq, int *new_count);
++bool io_wq_worker_stopped(void);
+ static inline bool io_wq_is_hashed(struct io_wq_work *work)
+ {
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -6898,7 +6898,8 @@ static void io_wq_submit_work(struct io_
+                        */
+                       if (ret != -EAGAIN || !(req->ctx->flags & IORING_SETUP_IOPOLL))
+                               break;
+-
++                      if (io_wq_worker_stopped())
++                              break;
+                       /*
+                        * If REQ_F_NOWAIT is set, then don't wait or retry with
+                        * poll. -EAGAIN is final for that case.
index 2186ecb6f5f5331025a37172cd4e981230eccb44..fe8da9e5d10781ee052459c1efac31c00cc3488a 100644 (file)
@@ -311,3 +311,6 @@ tracing-zero-the-pipe-cpumask-on-alloc-to-avoid-spurious-ebusy.patch
 md-md-bitmap-remove-unnecessary-local-variable-in-backlog_store.patch
 udf-initialize-newblock-to-0.patch
 net-ipv6-skb-symmetric-hash-should-incorporate-transport-ports.patch
+io_uring-always-lock-in-io_apoll_task_func.patch
+io_uring-break-out-of-iowq-iopoll-on-teardown.patch
+io_uring-break-iopolling-on-signal.patch