]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
io_uring: backport io_should_terminate_tw()
authorJens Axboe <axboe@kernel.dk>
Thu, 18 Sep 2025 17:27:06 +0000 (11:27 -0600)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 25 Sep 2025 08:58:53 +0000 (10:58 +0200)
Parts of commit b6f58a3f4aa8dba424356c7a69388a81f4459300 upstream.

Backport io_should_terminate_tw() helper to judge whether task_work
should be run or terminated.

Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
io_uring/io_uring.c
io_uring/io_uring.h
io_uring/poll.c
io_uring/timeout.c

index 29adfc6d6ec24aab8b2cb6886013de139cfe307e..fa0c9c044931570a1f4bb8318293617e2f032fa7 100644 (file)
@@ -1249,8 +1249,7 @@ static void io_req_task_cancel(struct io_kiocb *req, bool *locked)
 void io_req_task_submit(struct io_kiocb *req, bool *locked)
 {
        io_tw_lock(req->ctx, locked);
-       /* req->task == current here, checking PF_EXITING is safe */
-       if (likely(!(req->task->flags & PF_EXITING)))
+       if (likely(!io_should_terminate_tw()))
                io_queue_sqe(req);
        else
                io_req_complete_failed(req, -EFAULT);
@@ -1773,7 +1772,7 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
 int io_poll_issue(struct io_kiocb *req, bool *locked)
 {
        io_tw_lock(req->ctx, locked);
-       if (unlikely(req->task->flags & PF_EXITING))
+       if (unlikely(io_should_terminate_tw()))
                return -EFAULT;
        return io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_MULTISHOT);
 }
index 886921d2d58debd3aad1368fab6acf5468ca0e94..37ef84520be4b26c607e8a9557f662c47f120c18 100644 (file)
@@ -395,6 +395,19 @@ static inline bool io_allowed_run_tw(struct io_ring_ctx *ctx)
                      ctx->submitter_task == current);
 }
 
+/*
+ * Terminate the request if either of these conditions are true:
+ *
+ * 1) It's being executed by the original task, but that task is marked
+ *    with PF_EXITING as it's exiting.
+ * 2) PF_KTHREAD is set, in which case the invoker of the task_work is
+ *    our fallback task_work.
+ */
+static inline bool io_should_terminate_tw(void)
+{
+       return current->flags & (PF_KTHREAD | PF_EXITING);
+}
+
 static inline void io_req_queue_tw_complete(struct io_kiocb *req, s32 res)
 {
        io_req_set_res(req, res, 0);
index ab27a627fd4c62c8e0850f65544fbc5152263737..a0152bdc1c6102f7da98bc4ead86b992ca907813 100644 (file)
@@ -241,8 +241,7 @@ static int io_poll_check_events(struct io_kiocb *req, bool *locked)
        struct io_ring_ctx *ctx = req->ctx;
        int v;
 
-       /* req->task == current here, checking PF_EXITING is safe */
-       if (unlikely(req->task->flags & PF_EXITING))
+       if (unlikely(io_should_terminate_tw()))
                return -ECANCELED;
 
        do {
index 7cdc234c5f53fb4551e78e9417d26f4fe5dfae54..57fe63faa6ba3575f8707dc7777b85e71183a4aa 100644 (file)
@@ -275,7 +275,7 @@ static void io_req_task_link_timeout(struct io_kiocb *req, bool *locked)
        int ret = -ENOENT;
 
        if (prev) {
-               if (!(req->task->flags & PF_EXITING)) {
+               if (!io_should_terminate_tw()) {
                        struct io_cancel_data cd = {
                                .ctx            = req->ctx,
                                .data           = prev->cqe.user_data,