]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
io_uring: include dying ring in task_work "should cancel" state
authorJens Axboe <axboe@kernel.dk>
Thu, 18 Sep 2025 16:21:14 +0000 (10:21 -0600)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 25 Sep 2025 09:16:50 +0000 (11:16 +0200)
commit 3539b1467e94336d5854ebf976d9627bfb65d6c3 upstream.

When running task_work for an exiting task, rather than perform the
issue retry attempt, the task_work is canceled. However, this isn't
done for a ring that has been closed. This can lead to requests being
successfully completed post the ring being closed, which is somewhat
confusing and surprising to an application.

Rather than just check the task exit state, also include the ring
ref state in deciding whether or not to terminate a given request when
run from task_work.

Cc: stable@vger.kernel.org # 6.1+
Link: https://github.com/axboe/liburing/discussions/1459
Reported-by: Benedek Thaler <thaler@thaler.hu>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
io_uring/io_uring.c
io_uring/io_uring.h
io_uring/poll.c
io_uring/timeout.c
io_uring/uring_cmd.c

index 5111ec040c53424e815072cd062a64bfbaf52956..aa8787777f29ae7cb5d629ae1e0a7454273fc8f4 100644 (file)
@@ -1371,8 +1371,10 @@ static void io_req_task_cancel(struct io_kiocb *req, io_tw_token_t tw)
 
 void io_req_task_submit(struct io_kiocb *req, io_tw_token_t tw)
 {
-       io_tw_lock(req->ctx, tw);
-       if (unlikely(io_should_terminate_tw()))
+       struct io_ring_ctx *ctx = req->ctx;
+
+       io_tw_lock(ctx, tw);
+       if (unlikely(io_should_terminate_tw(ctx)))
                io_req_defer_failed(req, -EFAULT);
        else if (req->flags & REQ_F_FORCE_ASYNC)
                io_queue_iowq(req);
index 66c1ca73f55ee56139986246236fc43574316f1c..336689752d9fe1890a1126c140964d44b647ad6c 100644 (file)
@@ -470,9 +470,9 @@ static inline bool io_allowed_run_tw(struct io_ring_ctx *ctx)
  * 2) PF_KTHREAD is set, in which case the invoker of the task_work is
  *    our fallback task_work.
  */
-static inline bool io_should_terminate_tw(void)
+static inline bool io_should_terminate_tw(struct io_ring_ctx *ctx)
 {
-       return current->flags & (PF_KTHREAD | PF_EXITING);
+       return (current->flags & (PF_KTHREAD | PF_EXITING)) || percpu_ref_is_dying(&ctx->refs);
 }
 
 static inline void io_req_queue_tw_complete(struct io_kiocb *req, s32 res)
index 20e9b46a4adfd5d9259e825249a4101cc04213f5..1b79c268725d4713cd5a0a20a542259740cad42a 100644 (file)
@@ -224,7 +224,7 @@ static int io_poll_check_events(struct io_kiocb *req, io_tw_token_t tw)
 {
        int v;
 
-       if (unlikely(io_should_terminate_tw()))
+       if (unlikely(io_should_terminate_tw(req->ctx)))
                return -ECANCELED;
 
        do {
index 7f13bfa9f2b6173d1e842f8971fdbd1db460d180..17e3aab0af3676aeb90ecafbf78a0c216340744e 100644 (file)
@@ -324,7 +324,7 @@ static void io_req_task_link_timeout(struct io_kiocb *req, io_tw_token_t tw)
        int ret;
 
        if (prev) {
-               if (!io_should_terminate_tw()) {
+               if (!io_should_terminate_tw(req->ctx)) {
                        struct io_cancel_data cd = {
                                .ctx            = req->ctx,
                                .data           = prev->cqe.user_data,
index 929cad6ee32628aaf04184a628ef4f88fe40217f..b2b4f62c90ce8048c61da48f1cc8fe2031030cdf 100644 (file)
@@ -123,7 +123,7 @@ static void io_uring_cmd_work(struct io_kiocb *req, io_tw_token_t tw)
        struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
        unsigned int flags = IO_URING_F_COMPLETE_DEFER;
 
-       if (io_should_terminate_tw())
+       if (io_should_terminate_tw(req->ctx))
                flags |= IO_URING_F_TASK_DEAD;
 
        /* task_work executor checks the deffered list completion */