]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
io_uring: backport io_should_terminate_tw()
authorJens Axboe <axboe@kernel.dk>
Thu, 18 Sep 2025 17:27:06 +0000 (11:27 -0600)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 25 Sep 2025 09:13:47 +0000 (11:13 +0200)
Parts of commit b6f58a3f4aa8dba424356c7a69388a81f4459300 upstream.

Backport io_should_terminate_tw() helper to judge whether task_work
should be run or terminated.

Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
io_uring/io_uring.c
io_uring/io_uring.h
io_uring/poll.c
io_uring/timeout.c
io_uring/uring_cmd.c

index 52ada466bf98f37829ed54e7d6b12a146a207699..a91dbb688b12018c4884ec8233f097164a18872b 100644 (file)
@@ -1359,8 +1359,7 @@ static void io_req_task_cancel(struct io_kiocb *req, struct io_tw_state *ts)
 void io_req_task_submit(struct io_kiocb *req, struct io_tw_state *ts)
 {
        io_tw_lock(req->ctx, ts);
-       /* req->task == current here, checking PF_EXITING is safe */
-       if (unlikely(req->task->flags & PF_EXITING))
+       if (unlikely(io_should_terminate_tw()))
                io_req_defer_failed(req, -EFAULT);
        else if (req->flags & REQ_F_FORCE_ASYNC)
                io_queue_iowq(req);
index 70b6675941ff765076a4cf33e6a41a8e69babda1..f2f6ba23150374fe5e7de69509668300d905c74d 100644 (file)
@@ -421,6 +421,19 @@ static inline bool io_allowed_run_tw(struct io_ring_ctx *ctx)
                      ctx->submitter_task == current);
 }
 
+/*
+ * Terminate the request if either of these conditions are true:
+ *
+ * 1) It's being executed by the original task, but that task is marked
+ *    with PF_EXITING as it's exiting.
+ * 2) PF_KTHREAD is set, in which case the invoker of the task_work is
+ *    our fallback task_work.
+ */
+static inline bool io_should_terminate_tw(void)
+{
+       return current->flags & (PF_KTHREAD | PF_EXITING);
+}
+
 static inline void io_req_queue_tw_complete(struct io_kiocb *req, s32 res)
 {
        io_req_set_res(req, res, 0);
index 17dea8aa09c9b3bbe2ef39372c395284e57b54da..a12ac9453606275bd2850566b473989930a9b151 100644 (file)
@@ -265,8 +265,7 @@ static int io_poll_check_events(struct io_kiocb *req, struct io_tw_state *ts)
 {
        int v;
 
-       /* req->task == current here, checking PF_EXITING is safe */
-       if (unlikely(req->task->flags & PF_EXITING))
+       if (unlikely(io_should_terminate_tw()))
                return -ECANCELED;
 
        do {
index 21c4bfea79f1c9efb54bc6e3dac82771669c0704..4ebe05085c20dc2d1901ecd22c58cb499d8598bd 100644 (file)
@@ -303,7 +303,7 @@ static void io_req_task_link_timeout(struct io_kiocb *req, struct io_tw_state *t
        int ret = -ENOENT;
 
        if (prev) {
-               if (!(req->task->flags & PF_EXITING)) {
+               if (!io_should_terminate_tw()) {
                        struct io_cancel_data cd = {
                                .ctx            = req->ctx,
                                .data           = prev->cqe.user_data,
index 13917967c52f273c276d6353b222e7344cd79228..27edb8fd639cb5377d8b9b5d26ce70e2eeafd85b 100644 (file)
@@ -118,7 +118,7 @@ static void io_uring_cmd_work(struct io_kiocb *req, struct io_tw_state *ts)
        struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
        unsigned int flags = IO_URING_F_COMPLETE_DEFER;
 
-       if (current->flags & (PF_EXITING | PF_KTHREAD))
+       if (io_should_terminate_tw())
                flags |= IO_URING_F_TASK_DEAD;
 
        /* task_work executor checks the deffered list completion */