]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
io_uring: include dying ring in task_work "should cancel" state
authorJens Axboe <axboe@kernel.dk>
Thu, 18 Sep 2025 16:21:14 +0000 (10:21 -0600)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 25 Sep 2025 09:13:47 +0000 (11:13 +0200)
Commit 3539b1467e94336d5854ebf976d9627bfb65d6c3 upstream.

When running task_work for an exiting task, rather than perform the
issue retry attempt, the task_work is canceled. However, this isn't
done for a ring that has been closed. This can lead to requests being
successfully completed post the ring being closed, which is somewhat
confusing and surprising to an application.

Rather than just check the task exit state, also include the ring
ref state in deciding whether or not to terminate a given request when
run from task_work.

Cc: stable@vger.kernel.org # 6.1+
Link: https://github.com/axboe/liburing/discussions/1459
Reported-by: Benedek Thaler <thaler@thaler.hu>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
io_uring/io_uring.c
io_uring/io_uring.h
io_uring/poll.c
io_uring/timeout.c
io_uring/uring_cmd.c

index a91dbb688b12018c4884ec8233f097164a18872b..cbca97d9d74b1952fbefe8bd6fc476cb7fa5d204 100644 (file)
@@ -1358,8 +1358,10 @@ static void io_req_task_cancel(struct io_kiocb *req, struct io_tw_state *ts)
 
 void io_req_task_submit(struct io_kiocb *req, struct io_tw_state *ts)
 {
-       io_tw_lock(req->ctx, ts);
-       if (unlikely(io_should_terminate_tw()))
+       struct io_ring_ctx *ctx = req->ctx;
+
+       io_tw_lock(ctx, ts);
+       if (unlikely(io_should_terminate_tw(ctx)))
                io_req_defer_failed(req, -EFAULT);
        else if (req->flags & REQ_F_FORCE_ASYNC)
                io_queue_iowq(req);
index f2f6ba23150374fe5e7de69509668300d905c74d..e8a3b75bc6c6831785666481a3a0d742f440db8c 100644 (file)
@@ -429,9 +429,9 @@ static inline bool io_allowed_run_tw(struct io_ring_ctx *ctx)
  * 2) PF_KTHREAD is set, in which case the invoker of the task_work is
  *    our fallback task_work.
  */
-static inline bool io_should_terminate_tw(void)
+static inline bool io_should_terminate_tw(struct io_ring_ctx *ctx)
 {
-       return current->flags & (PF_KTHREAD | PF_EXITING);
+       return (current->flags & (PF_KTHREAD | PF_EXITING)) || percpu_ref_is_dying(&ctx->refs);
 }
 
 static inline void io_req_queue_tw_complete(struct io_kiocb *req, s32 res)
index a12ac9453606275bd2850566b473989930a9b151..bfdb537572f7ff76722a3a8301f86fcb81642805 100644 (file)
@@ -265,7 +265,7 @@ static int io_poll_check_events(struct io_kiocb *req, struct io_tw_state *ts)
 {
        int v;
 
-       if (unlikely(io_should_terminate_tw()))
+       if (unlikely(io_should_terminate_tw(req->ctx)))
                return -ECANCELED;
 
        do {
index 4ebe05085c20dc2d1901ecd22c58cb499d8598bd..b215b2fbddd01d2bf0ff103350577ef2026096e0 100644 (file)
@@ -303,7 +303,7 @@ static void io_req_task_link_timeout(struct io_kiocb *req, struct io_tw_state *t
        int ret = -ENOENT;
 
        if (prev) {
-               if (!io_should_terminate_tw()) {
+               if (!io_should_terminate_tw(req->ctx)) {
                        struct io_cancel_data cd = {
                                .ctx            = req->ctx,
                                .data           = prev->cqe.user_data,
index 27edb8fd639cb5377d8b9b5d26ce70e2eeafd85b..f927844c8ada794a776b33aafef909da66098508 100644 (file)
@@ -118,7 +118,7 @@ static void io_uring_cmd_work(struct io_kiocb *req, struct io_tw_state *ts)
        struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
        unsigned int flags = IO_URING_F_COMPLETE_DEFER;
 
-       if (io_should_terminate_tw())
+       if (io_should_terminate_tw(req->ctx))
                flags |= IO_URING_F_TASK_DEAD;
 
        /* task_work executor checks the deffered list completion */