From 07471dfeefed49833439730a8a5977b67238d3fb Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 18 Sep 2025 10:21:14 -0600 Subject: [PATCH] io_uring: include dying ring in task_work "should cancel" state Commit 3539b1467e94336d5854ebf976d9627bfb65d6c3 upstream. When running task_work for an exiting task, rather than perform the issue retry attempt, the task_work is canceled. However, this isn't done for a ring that has been closed. This can lead to requests being successfully completed post the ring being closed, which is somewhat confusing and surprising to an application. Rather than just check the task exit state, also include the ring ref state in deciding whether or not to terminate a given request when run from task_work. Cc: stable@vger.kernel.org # 6.1+ Link: https://github.com/axboe/liburing/discussions/1459 Reported-by: Benedek Thaler Signed-off-by: Jens Axboe Signed-off-by: Greg Kroah-Hartman --- io_uring/io_uring.c | 12 ++++++++---- io_uring/io_uring.h | 4 ++-- io_uring/poll.c | 2 +- io_uring/timeout.c | 2 +- 4 files changed, 12 insertions(+), 8 deletions(-) diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index fa0c9c0449315..2aae0de6169ce 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -1248,8 +1248,10 @@ static void io_req_task_cancel(struct io_kiocb *req, bool *locked) void io_req_task_submit(struct io_kiocb *req, bool *locked) { - io_tw_lock(req->ctx, locked); - if (likely(!io_should_terminate_tw())) + struct io_ring_ctx *ctx = req->ctx; + + io_tw_lock(ctx, locked); + if (likely(!io_should_terminate_tw(ctx))) io_queue_sqe(req); else io_req_complete_failed(req, -EFAULT); @@ -1771,8 +1773,10 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags) int io_poll_issue(struct io_kiocb *req, bool *locked) { - io_tw_lock(req->ctx, locked); - if (unlikely(io_should_terminate_tw())) + struct io_ring_ctx *ctx = req->ctx; + + io_tw_lock(ctx, locked); + if (unlikely(io_should_terminate_tw(ctx))) return -EFAULT; return io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_MULTISHOT); } diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h index 37ef84520be4b..194e3230f853d 100644 --- a/io_uring/io_uring.h +++ b/io_uring/io_uring.h @@ -403,9 +403,9 @@ static inline bool io_allowed_run_tw(struct io_ring_ctx *ctx) * 2) PF_KTHREAD is set, in which case the invoker of the task_work is * our fallback task_work. */ -static inline bool io_should_terminate_tw(void) +static inline bool io_should_terminate_tw(struct io_ring_ctx *ctx) { - return current->flags & (PF_KTHREAD | PF_EXITING); + return (current->flags & (PF_KTHREAD | PF_EXITING)) || percpu_ref_is_dying(&ctx->refs); } static inline void io_req_queue_tw_complete(struct io_kiocb *req, s32 res) diff --git a/io_uring/poll.c b/io_uring/poll.c index a0152bdc1c610..e9f83d3fc835f 100644 --- a/io_uring/poll.c +++ b/io_uring/poll.c @@ -241,7 +241,7 @@ static int io_poll_check_events(struct io_kiocb *req, bool *locked) struct io_ring_ctx *ctx = req->ctx; int v; - if (unlikely(io_should_terminate_tw())) + if (unlikely(io_should_terminate_tw(ctx))) return -ECANCELED; do { diff --git a/io_uring/timeout.c b/io_uring/timeout.c index 57fe63faa6ba3..0bfd111e9164c 100644 --- a/io_uring/timeout.c +++ b/io_uring/timeout.c @@ -275,7 +275,7 @@ static void io_req_task_link_timeout(struct io_kiocb *req, bool *locked) int ret = -ENOENT; if (prev) { - if (!io_should_terminate_tw()) { + if (!io_should_terminate_tw(req->ctx)) { struct io_cancel_data cd = { .ctx = req->ctx, .data = prev->cqe.user_data, -- 2.47.3