]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
io_uring: include dying ring in task_work "should cancel" state
authorJens Axboe <axboe@kernel.dk>
Thu, 18 Sep 2025 16:21:14 +0000 (10:21 -0600)
committerJens Axboe <axboe@kernel.dk>
Thu, 18 Sep 2025 16:24:50 +0000 (10:24 -0600)
When running task_work for an exiting task, rather than perform the
issue retry attempt, the task_work is canceled. However, this isn't
done for a ring that has been closed. This can lead to requests being
successfully completed post the ring being closed, which is somewhat
confusing and surprising to an application.

Rather than just check the task exit state, also include the ring
ref state in deciding whether or not to terminate a given request when
run from task_work.

Cc: stable@vger.kernel.org # 6.1+
Link: https://github.com/axboe/liburing/discussions/1459
Reported-by: Benedek Thaler <thaler@thaler.hu>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
io_uring/io_uring.c
io_uring/io_uring.h
io_uring/poll.c
io_uring/timeout.c
io_uring/uring_cmd.c

index 93633613a1657c0b6a618c688501e8e61336cc68..bcec12256f3407b07ff98ada0406e5927c3c1a94 100644 (file)
@@ -1406,8 +1406,10 @@ static void io_req_task_cancel(struct io_kiocb *req, io_tw_token_t tw)
 
 void io_req_task_submit(struct io_kiocb *req, io_tw_token_t tw)
 {
-       io_tw_lock(req->ctx, tw);
-       if (unlikely(io_should_terminate_tw()))
+       struct io_ring_ctx *ctx = req->ctx;
+
+       io_tw_lock(ctx, tw);
+       if (unlikely(io_should_terminate_tw(ctx)))
                io_req_defer_failed(req, -EFAULT);
        else if (req->flags & REQ_F_FORCE_ASYNC)
                io_queue_iowq(req);
index abc6de227f74d23bbca84b761324072135932da7..1880902be6fd72eedd3b17e57bbcd241cddfed50 100644 (file)
@@ -476,9 +476,9 @@ static inline bool io_allowed_run_tw(struct io_ring_ctx *ctx)
  * 2) PF_KTHREAD is set, in which case the invoker of the task_work is
  *    our fallback task_work.
  */
-static inline bool io_should_terminate_tw(void)
+static inline bool io_should_terminate_tw(struct io_ring_ctx *ctx)
 {
-       return current->flags & (PF_KTHREAD | PF_EXITING);
+       return (current->flags & (PF_KTHREAD | PF_EXITING)) || percpu_ref_is_dying(&ctx->refs);
 }
 
 static inline void io_req_queue_tw_complete(struct io_kiocb *req, s32 res)
index c786e587563b0598228a2eb74a423485cf0b2063..6090a26975d400ca2db925b03f5882dc88d91758 100644 (file)
@@ -224,7 +224,7 @@ static int io_poll_check_events(struct io_kiocb *req, io_tw_token_t tw)
 {
        int v;
 
-       if (unlikely(io_should_terminate_tw()))
+       if (unlikely(io_should_terminate_tw(req->ctx)))
                return -ECANCELED;
 
        do {
index 7f13bfa9f2b6173d1e842f8971fdbd1db460d180..17e3aab0af3676aeb90ecafbf78a0c216340744e 100644 (file)
@@ -324,7 +324,7 @@ static void io_req_task_link_timeout(struct io_kiocb *req, io_tw_token_t tw)
        int ret;
 
        if (prev) {
-               if (!io_should_terminate_tw()) {
+               if (!io_should_terminate_tw(req->ctx)) {
                        struct io_cancel_data cd = {
                                .ctx            = req->ctx,
                                .data           = prev->cqe.user_data,
index 053bac89b6c0fc765ea492bfb12d8921d5a39013..213716e10d704a85cd8936daeb80c9e57b95a4e3 100644 (file)
@@ -118,7 +118,7 @@ static void io_uring_cmd_work(struct io_kiocb *req, io_tw_token_t tw)
        struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
        unsigned int flags = IO_URING_F_COMPLETE_DEFER;
 
-       if (io_should_terminate_tw())
+       if (io_should_terminate_tw(req->ctx))
                flags |= IO_URING_F_TASK_DEAD;
 
        /* task_work executor checks the deffered list completion */