]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
io_uring: include dying ring in task_work "should cancel" state
authorJens Axboe <axboe@kernel.dk>
Thu, 18 Sep 2025 16:21:14 +0000 (10:21 -0600)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 25 Sep 2025 08:58:53 +0000 (10:58 +0200)
Commit 3539b1467e94336d5854ebf976d9627bfb65d6c3 upstream.

When running task_work for an exiting task, rather than perform the
issue retry attempt, the task_work is canceled. However, this isn't
done for a ring that has been closed. This can lead to requests being
successfully completed post the ring being closed, which is somewhat
confusing and surprising to an application.

Rather than just check the task exit state, also include the ring
ref state in deciding whether or not to terminate a given request when
run from task_work.

Cc: stable@vger.kernel.org # 6.1+
Link: https://github.com/axboe/liburing/discussions/1459
Reported-by: Benedek Thaler <thaler@thaler.hu>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
io_uring/io_uring.c
io_uring/io_uring.h
io_uring/poll.c
io_uring/timeout.c

index fa0c9c044931570a1f4bb8318293617e2f032fa7..2aae0de6169ce23cc0d7cc09e823fe50717fe7f7 100644 (file)
@@ -1248,8 +1248,10 @@ static void io_req_task_cancel(struct io_kiocb *req, bool *locked)
 
 void io_req_task_submit(struct io_kiocb *req, bool *locked)
 {
-       io_tw_lock(req->ctx, locked);
-       if (likely(!io_should_terminate_tw()))
+       struct io_ring_ctx *ctx = req->ctx;
+
+       io_tw_lock(ctx, locked);
+       if (likely(!io_should_terminate_tw(ctx)))
                io_queue_sqe(req);
        else
                io_req_complete_failed(req, -EFAULT);
@@ -1771,8 +1773,10 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
 
 int io_poll_issue(struct io_kiocb *req, bool *locked)
 {
-       io_tw_lock(req->ctx, locked);
-       if (unlikely(io_should_terminate_tw()))
+       struct io_ring_ctx *ctx = req->ctx;
+
+       io_tw_lock(ctx, locked);
+       if (unlikely(io_should_terminate_tw(ctx)))
                return -EFAULT;
        return io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_MULTISHOT);
 }
index 37ef84520be4b26c607e8a9557f662c47f120c18..194e3230f853d59c75a80acfbc0ee9770c6aa8c7 100644 (file)
@@ -403,9 +403,9 @@ static inline bool io_allowed_run_tw(struct io_ring_ctx *ctx)
  * 2) PF_KTHREAD is set, in which case the invoker of the task_work is
  *    our fallback task_work.
  */
-static inline bool io_should_terminate_tw(void)
+static inline bool io_should_terminate_tw(struct io_ring_ctx *ctx)
 {
-       return current->flags & (PF_KTHREAD | PF_EXITING);
+       return (current->flags & (PF_KTHREAD | PF_EXITING)) || percpu_ref_is_dying(&ctx->refs);
 }
 
 static inline void io_req_queue_tw_complete(struct io_kiocb *req, s32 res)
index a0152bdc1c6102f7da98bc4ead86b992ca907813..e9f83d3fc835f1db8ba3a3d5b150763b9cf1e4f9 100644 (file)
@@ -241,7 +241,7 @@ static int io_poll_check_events(struct io_kiocb *req, bool *locked)
        struct io_ring_ctx *ctx = req->ctx;
        int v;
 
-       if (unlikely(io_should_terminate_tw()))
+       if (unlikely(io_should_terminate_tw(ctx)))
                return -ECANCELED;
 
        do {
index 57fe63faa6ba3575f8707dc7777b85e71183a4aa..0bfd111e9164c00cc28ec964e50e3487bd317515 100644 (file)
@@ -275,7 +275,7 @@ static void io_req_task_link_timeout(struct io_kiocb *req, bool *locked)
        int ret = -ENOENT;
 
        if (prev) {
-               if (!io_should_terminate_tw()) {
+               if (!io_should_terminate_tw(req->ctx)) {
                        struct io_cancel_data cd = {
                                .ctx            = req->ctx,
                                .data           = prev->cqe.user_data,