--- /dev/null
+From stable-owner@vger.kernel.org Tue Sep 12 16:03:09 2023
+From: Pavel Begunkov <asml.silence@gmail.com>
+Date: Tue, 12 Sep 2023 15:01:59 +0100
+Subject: io_uring: always lock in io_apoll_task_func
+To: stable@vger.kernel.org
+Cc: Jens Axboe <axboe@kernel.dk>, asml.silence@gmail.com, Dylan Yudaken <dylany@meta.com>
+Message-ID: <83d9ee49014ac3c453f9d338bcf18dcba1be947d.1694522363.git.asml.silence@gmail.com>
+
+From: Pavel Begunkov <asml.silence@gmail.com>
+
+From: Dylan Yudaken <dylany@meta.com>
+
+[ upstream commit c06c6c5d276707e04cedbcc55625e984922118aa ]
+
+This is required for the failure case (io_req_complete_failed) and is
+missing.
+
+The alternative would be to only lock in the failure path, however all of
+the non-error paths in io_poll_check_events that do not do not return
+IOU_POLL_NO_ACTION end up locking anyway. The only extraneous lock would
+be for the multishot poll overflowing the CQE ring, however multishot poll
+would probably benefit from being locked as it will allow completions to
+be batched.
+
+So it seems reasonable to lock always.
+
+Signed-off-by: Dylan Yudaken <dylany@meta.com>
+Link: https://lore.kernel.org/r/20221124093559.3780686-3-dylany@meta.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ io_uring/io_uring.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -5716,6 +5716,7 @@ static void io_apoll_task_func(struct io
+ if (ret > 0)
+ return;
+
++ io_tw_lock(req->ctx, locked);
+ io_poll_remove_entries(req);
+ spin_lock(&ctx->completion_lock);
+ hash_del(&req->hash_node);
--- /dev/null
+From stable-owner@vger.kernel.org Tue Sep 12 16:03:10 2023
+From: Pavel Begunkov <asml.silence@gmail.com>
+Date: Tue, 12 Sep 2023 15:02:01 +0100
+Subject: io_uring: break iopolling on signal
+To: stable@vger.kernel.org
+Cc: Jens Axboe <axboe@kernel.dk>, asml.silence@gmail.com
+Message-ID: <b3b333ddf567f86104ce42708ad8982e561e9f59.1694522363.git.asml.silence@gmail.com>
+
+From: Pavel Begunkov <asml.silence@gmail.com>
+
+[ upstream commit dc314886cb3d0e4ab2858003e8de2917f8a3ccbd ]
+
+Don't keep spinning iopoll with a signal set. It'll eventually return
+back, e.g. by virtue of need_resched(), but it's not a nice user
+experience.
+
+Cc: stable@vger.kernel.org
+Fixes: def596e9557c9 ("io_uring: support for IO polling")
+Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
+Link: https://lore.kernel.org/r/eeba551e82cad12af30c3220125eb6cb244cc94c.1691594339.git.asml.silence@gmail.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ io_uring/io_uring.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -2668,6 +2668,11 @@ static int io_iopoll_check(struct io_rin
+ break;
+ }
+ ret = io_do_iopoll(ctx, &nr_events, min);
++
++ if (task_sigpending(current)) {
++ ret = -EINTR;
++ goto out;
++ }
+ } while (!ret && nr_events < min && !need_resched());
+ out:
+ mutex_unlock(&ctx->uring_lock);
--- /dev/null
+From stable-owner@vger.kernel.org Tue Sep 12 16:03:09 2023
+From: Pavel Begunkov <asml.silence@gmail.com>
+Date: Tue, 12 Sep 2023 15:02:00 +0100
+Subject: io_uring: break out of iowq iopoll on teardown
+To: stable@vger.kernel.org
+Cc: Jens Axboe <axboe@kernel.dk>, asml.silence@gmail.com
+Message-ID: <5acefe363814fb8fcfc4426c0bd2f45ae6418921.1694522363.git.asml.silence@gmail.com>
+
+From: Pavel Begunkov <asml.silence@gmail.com>
+
+[ upstream commit 45500dc4e01c167ee063f3dcc22f51ced5b2b1e9 ]
+
+io-wq will retry iopoll even when it failed with -EAGAIN. If that
+races with task exit, which sets TIF_NOTIFY_SIGNAL for all its workers,
+such workers might potentially infinitely spin retrying iopoll again and
+again and each time failing on some allocation / waiting / etc. Don't
+keep spinning if io-wq is dying.
+
+Fixes: 561fb04a6a225 ("io_uring: replace workqueue usage with io-wq")
+Cc: stable@vger.kernel.org
+Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ io_uring/io-wq.c | 10 ++++++++++
+ io_uring/io-wq.h | 1 +
+ io_uring/io_uring.c | 3 ++-
+ 3 files changed, 13 insertions(+), 1 deletion(-)
+
+--- a/io_uring/io-wq.c
++++ b/io_uring/io-wq.c
+@@ -176,6 +176,16 @@ static void io_worker_ref_put(struct io_
+ complete(&wq->worker_done);
+ }
+
++bool io_wq_worker_stopped(void)
++{
++ struct io_worker *worker = current->pf_io_worker;
++
++ if (WARN_ON_ONCE(!io_wq_current_is_worker()))
++ return true;
++
++ return test_bit(IO_WQ_BIT_EXIT, &worker->wqe->wq->state);
++}
++
+ static void io_worker_cancel_cb(struct io_worker *worker)
+ {
+ struct io_wqe_acct *acct = io_wqe_get_acct(worker);
+--- a/io_uring/io-wq.h
++++ b/io_uring/io-wq.h
+@@ -129,6 +129,7 @@ void io_wq_hash_work(struct io_wq_work *
+
+ int io_wq_cpu_affinity(struct io_wq *wq, cpumask_var_t mask);
+ int io_wq_max_workers(struct io_wq *wq, int *new_count);
++bool io_wq_worker_stopped(void);
+
+ static inline bool io_wq_is_hashed(struct io_wq_work *work)
+ {
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -7069,7 +7069,8 @@ static void io_wq_submit_work(struct io_
+ */
+ if (ret != -EAGAIN || !(req->ctx->flags & IORING_SETUP_IOPOLL))
+ break;
+-
++ if (io_wq_worker_stopped())
++ break;
+ /*
+ * If REQ_F_NOWAIT is set, then don't wait or retry with
+ * poll. -EAGAIN is final for that case.
revert-drm-amdgpu-install-stub-fence-into-potential-unused-fence-pointers.patch
udf-initialize-newblock-to-0.patch
net-ipv6-skb-symmetric-hash-should-incorporate-transport-ports.patch
+io_uring-always-lock-in-io_apoll_task_func.patch
+io_uring-break-out-of-iowq-iopoll-on-teardown.patch
+io_uring-break-iopolling-on-signal.patch