io_req_task_work_add(req);
}
+static bool io_drain_defer_seq(struct io_kiocb *req, u32 seq)
+{
+ struct io_ring_ctx *ctx = req->ctx;
+
+ return seq + READ_ONCE(ctx->cq_extra) != ctx->cached_cq_tail;
+}
+
static __cold noinline void io_queue_deferred(struct io_ring_ctx *ctx)
{
+ bool drain_seen = false, first = true;
+
spin_lock(&ctx->completion_lock);
while (!list_empty(&ctx->defer_list)) {
struct io_defer_entry *de = list_first_entry(&ctx->defer_list,
struct io_defer_entry, list);
- if (req_need_defer(de->req, de->seq))
+ drain_seen |= de->req->flags & REQ_F_IO_DRAIN;
+ if ((drain_seen || first) && io_drain_defer_seq(de->req, de->seq))
break;
+
list_del_init(&de->list);
io_req_task_queue(de->req);
kfree(de);
+ first = false;
}
spin_unlock(&ctx->completion_lock);
}