From 4413802ab50595d8ccbb384cc1209a0f2d048572 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Sat, 9 Sep 2023 14:14:28 +0100 Subject: [PATCH] drop queue-6.1/io_uring-break-out-of-iowq-iopoll-on-teardown.patch --- ...break-out-of-iowq-iopoll-on-teardown.patch | 66 ------------------- 1 file changed, 66 deletions(-) delete mode 100644 queue-6.1/io_uring-break-out-of-iowq-iopoll-on-teardown.patch diff --git a/queue-6.1/io_uring-break-out-of-iowq-iopoll-on-teardown.patch b/queue-6.1/io_uring-break-out-of-iowq-iopoll-on-teardown.patch deleted file mode 100644 index 3a6d657f750..00000000000 --- a/queue-6.1/io_uring-break-out-of-iowq-iopoll-on-teardown.patch +++ /dev/null @@ -1,66 +0,0 @@ -From 45500dc4e01c167ee063f3dcc22f51ced5b2b1e9 Mon Sep 17 00:00:00 2001 -From: Pavel Begunkov -Date: Thu, 7 Sep 2023 13:50:07 +0100 -Subject: io_uring: break out of iowq iopoll on teardown - -From: Pavel Begunkov - -commit 45500dc4e01c167ee063f3dcc22f51ced5b2b1e9 upstream. - -io-wq will retry iopoll even when it failed with -EAGAIN. If that -races with task exit, which sets TIF_NOTIFY_SIGNAL for all its workers, -such workers might potentially infinitely spin retrying iopoll again and -again and each time failing on some allocation / waiting / etc. Don't -keep spinning if io-wq is dying. - -Fixes: 561fb04a6a225 ("io_uring: replace workqueue usage with io-wq") -Cc: stable@vger.kernel.org -Signed-off-by: Pavel Begunkov -Signed-off-by: Jens Axboe -Signed-off-by: Greg Kroah-Hartman ---- - io_uring/io-wq.c | 10 ++++++++++ - io_uring/io-wq.h | 1 + - io_uring/io_uring.c | 2 ++ - 3 files changed, 13 insertions(+) - ---- a/io_uring/io-wq.c -+++ b/io_uring/io-wq.c -@@ -181,6 +181,16 @@ static void io_worker_ref_put(struct io_ - complete(&wq->worker_done); - } - -+bool io_wq_worker_stopped(void) -+{ -+ struct io_worker *worker = current->worker_private; -+ -+ if (WARN_ON_ONCE(!io_wq_current_is_worker())) -+ return true; -+ -+ return test_bit(IO_WQ_BIT_EXIT, &worker->wq->state); -+} -+ - static void io_worker_cancel_cb(struct io_worker *worker) - { - struct io_wqe_acct *acct = io_wqe_get_acct(worker); ---- a/io_uring/io-wq.h -+++ b/io_uring/io-wq.h -@@ -52,6 +52,7 @@ void io_wq_hash_work(struct io_wq_work * - - int io_wq_cpu_affinity(struct io_wq *wq, cpumask_var_t mask); - int io_wq_max_workers(struct io_wq *wq, int *new_count); -+bool io_wq_worker_stopped(void); - - static inline bool io_wq_is_hashed(struct io_wq_work *work) - { ---- a/io_uring/io_uring.c -+++ b/io_uring/io_uring.c -@@ -1823,6 +1823,8 @@ fail: - if (!needs_poll) { - if (!(req->ctx->flags & IORING_SETUP_IOPOLL)) - break; -+ if (io_wq_worker_stopped()) -+ break; - cond_resched(); - continue; - } -- 2.47.3