From: Greg Kroah-Hartman Date: Wed, 13 Sep 2023 08:20:01 +0000 (+0200) Subject: 5.15-stable patches X-Git-Tag: v5.10.195~76 X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=c5b45a7bb6f41bdb5496d2adb2902ba13ecd4dcb;p=thirdparty%2Fkernel%2Fstable-queue.git 5.15-stable patches added patches: io_uring-always-lock-in-io_apoll_task_func.patch io_uring-break-iopolling-on-signal.patch io_uring-break-out-of-iowq-iopoll-on-teardown.patch --- diff --git a/queue-5.15/io_uring-always-lock-in-io_apoll_task_func.patch b/queue-5.15/io_uring-always-lock-in-io_apoll_task_func.patch new file mode 100644 index 00000000000..0e0653c2eaf --- /dev/null +++ b/queue-5.15/io_uring-always-lock-in-io_apoll_task_func.patch @@ -0,0 +1,45 @@ +From stable-owner@vger.kernel.org Tue Sep 12 16:03:09 2023 +From: Pavel Begunkov +Date: Tue, 12 Sep 2023 15:01:59 +0100 +Subject: io_uring: always lock in io_apoll_task_func +To: stable@vger.kernel.org +Cc: Jens Axboe , asml.silence@gmail.com, Dylan Yudaken +Message-ID: <83d9ee49014ac3c453f9d338bcf18dcba1be947d.1694522363.git.asml.silence@gmail.com> + +From: Pavel Begunkov + +From: Dylan Yudaken + +[ upstream commit c06c6c5d276707e04cedbcc55625e984922118aa ] + +This is required for the failure case (io_req_complete_failed) and is +missing. + +The alternative would be to only lock in the failure path, however all of +the non-error paths in io_poll_check_events that do not do not return +IOU_POLL_NO_ACTION end up locking anyway. The only extraneous lock would +be for the multishot poll overflowing the CQE ring, however multishot poll +would probably benefit from being locked as it will allow completions to +be batched. + +So it seems reasonable to lock always. + +Signed-off-by: Dylan Yudaken +Link: https://lore.kernel.org/r/20221124093559.3780686-3-dylany@meta.com +Signed-off-by: Jens Axboe +Signed-off-by: Pavel Begunkov +Signed-off-by: Greg Kroah-Hartman +--- + io_uring/io_uring.c | 1 + + 1 file changed, 1 insertion(+) + +--- a/io_uring/io_uring.c ++++ b/io_uring/io_uring.c +@@ -5716,6 +5716,7 @@ static void io_apoll_task_func(struct io + if (ret > 0) + return; + ++ io_tw_lock(req->ctx, locked); + io_poll_remove_entries(req); + spin_lock(&ctx->completion_lock); + hash_del(&req->hash_node); diff --git a/queue-5.15/io_uring-break-iopolling-on-signal.patch b/queue-5.15/io_uring-break-iopolling-on-signal.patch new file mode 100644 index 00000000000..94829c97938 --- /dev/null +++ b/queue-5.15/io_uring-break-iopolling-on-signal.patch @@ -0,0 +1,40 @@ +From stable-owner@vger.kernel.org Tue Sep 12 16:03:10 2023 +From: Pavel Begunkov +Date: Tue, 12 Sep 2023 15:02:01 +0100 +Subject: io_uring: break iopolling on signal +To: stable@vger.kernel.org +Cc: Jens Axboe , asml.silence@gmail.com +Message-ID: + +From: Pavel Begunkov + +[ upstream commit dc314886cb3d0e4ab2858003e8de2917f8a3ccbd ] + +Don't keep spinning iopoll with a signal set. It'll eventually return +back, e.g. by virtue of need_resched(), but it's not a nice user +experience. + +Cc: stable@vger.kernel.org +Fixes: def596e9557c9 ("io_uring: support for IO polling") +Signed-off-by: Pavel Begunkov +Link: https://lore.kernel.org/r/eeba551e82cad12af30c3220125eb6cb244cc94c.1691594339.git.asml.silence@gmail.com +Signed-off-by: Jens Axboe +Signed-off-by: Greg Kroah-Hartman +--- + io_uring/io_uring.c | 5 +++++ + 1 file changed, 5 insertions(+) + +--- a/io_uring/io_uring.c ++++ b/io_uring/io_uring.c +@@ -2668,6 +2668,11 @@ static int io_iopoll_check(struct io_rin + break; + } + ret = io_do_iopoll(ctx, &nr_events, min); ++ ++ if (task_sigpending(current)) { ++ ret = -EINTR; ++ goto out; ++ } + } while (!ret && nr_events < min && !need_resched()); + out: + mutex_unlock(&ctx->uring_lock); diff --git a/queue-5.15/io_uring-break-out-of-iowq-iopoll-on-teardown.patch b/queue-5.15/io_uring-break-out-of-iowq-iopoll-on-teardown.patch new file mode 100644 index 00000000000..d9c91be40b2 --- /dev/null +++ b/queue-5.15/io_uring-break-out-of-iowq-iopoll-on-teardown.patch @@ -0,0 +1,70 @@ +From stable-owner@vger.kernel.org Tue Sep 12 16:03:09 2023 +From: Pavel Begunkov +Date: Tue, 12 Sep 2023 15:02:00 +0100 +Subject: io_uring: break out of iowq iopoll on teardown +To: stable@vger.kernel.org +Cc: Jens Axboe , asml.silence@gmail.com +Message-ID: <5acefe363814fb8fcfc4426c0bd2f45ae6418921.1694522363.git.asml.silence@gmail.com> + +From: Pavel Begunkov + +[ upstream commit 45500dc4e01c167ee063f3dcc22f51ced5b2b1e9 ] + +io-wq will retry iopoll even when it failed with -EAGAIN. If that +races with task exit, which sets TIF_NOTIFY_SIGNAL for all its workers, +such workers might potentially infinitely spin retrying iopoll again and +again and each time failing on some allocation / waiting / etc. Don't +keep spinning if io-wq is dying. + +Fixes: 561fb04a6a225 ("io_uring: replace workqueue usage with io-wq") +Cc: stable@vger.kernel.org +Signed-off-by: Pavel Begunkov +Signed-off-by: Jens Axboe +Signed-off-by: Greg Kroah-Hartman +--- + io_uring/io-wq.c | 10 ++++++++++ + io_uring/io-wq.h | 1 + + io_uring/io_uring.c | 3 ++- + 3 files changed, 13 insertions(+), 1 deletion(-) + +--- a/io_uring/io-wq.c ++++ b/io_uring/io-wq.c +@@ -176,6 +176,16 @@ static void io_worker_ref_put(struct io_ + complete(&wq->worker_done); + } + ++bool io_wq_worker_stopped(void) ++{ ++ struct io_worker *worker = current->pf_io_worker; ++ ++ if (WARN_ON_ONCE(!io_wq_current_is_worker())) ++ return true; ++ ++ return test_bit(IO_WQ_BIT_EXIT, &worker->wqe->wq->state); ++} ++ + static void io_worker_cancel_cb(struct io_worker *worker) + { + struct io_wqe_acct *acct = io_wqe_get_acct(worker); +--- a/io_uring/io-wq.h ++++ b/io_uring/io-wq.h +@@ -129,6 +129,7 @@ void io_wq_hash_work(struct io_wq_work * + + int io_wq_cpu_affinity(struct io_wq *wq, cpumask_var_t mask); + int io_wq_max_workers(struct io_wq *wq, int *new_count); ++bool io_wq_worker_stopped(void); + + static inline bool io_wq_is_hashed(struct io_wq_work *work) + { +--- a/io_uring/io_uring.c ++++ b/io_uring/io_uring.c +@@ -7069,7 +7069,8 @@ static void io_wq_submit_work(struct io_ + */ + if (ret != -EAGAIN || !(req->ctx->flags & IORING_SETUP_IOPOLL)) + break; +- ++ if (io_wq_worker_stopped()) ++ break; + /* + * If REQ_F_NOWAIT is set, then don't wait or retry with + * poll. -EAGAIN is final for that case. diff --git a/queue-5.15/series b/queue-5.15/series index 05b90546026..2f5201fce02 100644 --- a/queue-5.15/series +++ b/queue-5.15/series @@ -370,3 +370,6 @@ md-md-bitmap-remove-unnecessary-local-variable-in-backlog_store.patch revert-drm-amdgpu-install-stub-fence-into-potential-unused-fence-pointers.patch udf-initialize-newblock-to-0.patch net-ipv6-skb-symmetric-hash-should-incorporate-transport-ports.patch +io_uring-always-lock-in-io_apoll_task_func.patch +io_uring-break-out-of-iowq-iopoll-on-teardown.patch +io_uring-break-iopolling-on-signal.patch