void io_req_task_submit(struct io_kiocb *req, bool *locked)
{
- io_tw_lock(req->ctx, locked);
- if (likely(!io_should_terminate_tw()))
+ struct io_ring_ctx *ctx = req->ctx;
+
+ io_tw_lock(ctx, locked);
+ if (likely(!io_should_terminate_tw(ctx)))
io_queue_sqe(req);
else
io_req_complete_failed(req, -EFAULT);
int io_poll_issue(struct io_kiocb *req, bool *locked)
{
- io_tw_lock(req->ctx, locked);
- if (unlikely(io_should_terminate_tw()))
+ struct io_ring_ctx *ctx = req->ctx;
+
+ io_tw_lock(ctx, locked);
+ if (unlikely(io_should_terminate_tw(ctx)))
return -EFAULT;
return io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_MULTISHOT);
}
* 2) PF_KTHREAD is set, in which case the invoker of the task_work is
* our fallback task_work.
*/
-static inline bool io_should_terminate_tw(void)
+static inline bool io_should_terminate_tw(struct io_ring_ctx *ctx)
{
- return current->flags & (PF_KTHREAD | PF_EXITING);
+ return (current->flags & (PF_KTHREAD | PF_EXITING)) || percpu_ref_is_dying(&ctx->refs);
}
static inline void io_req_queue_tw_complete(struct io_kiocb *req, s32 res)
struct io_ring_ctx *ctx = req->ctx;
int v;
- if (unlikely(io_should_terminate_tw()))
+ if (unlikely(io_should_terminate_tw(ctx)))
return -ECANCELED;
do {
int ret = -ENOENT;
if (prev) {
- if (!io_should_terminate_tw()) {
+ if (!io_should_terminate_tw(req->ctx)) {
struct io_cancel_data cd = {
.ctx = req->ctx,
.data = prev->cqe.user_data,