return true;
}
-static void io_req_cqe_overflow(struct io_kiocb *req)
-{
- io_cqring_event_overflow(req->ctx, req->cqe.user_data,
- req->cqe.res, req->cqe.flags,
- req->big_cqe.extra1, req->big_cqe.extra2);
- memset(&req->big_cqe, 0, sizeof(req->big_cqe));
-}
-
/*
* writes to the cq entry need to come after reading head; the
* control dependency is enough as we're using WRITE_ONCE to
unlikely(!io_fill_cqe_req(ctx, req))) {
if (ctx->lockless_cq) {
spin_lock(&ctx->completion_lock);
- io_req_cqe_overflow(req);
+ io_cqring_event_overflow(req->ctx, req->cqe.user_data,
+ req->cqe.res, req->cqe.flags,
+ req->big_cqe.extra1,
+ req->big_cqe.extra2);
spin_unlock(&ctx->completion_lock);
} else {
- io_req_cqe_overflow(req);
+ io_cqring_event_overflow(req->ctx, req->cqe.user_data,
+ req->cqe.res, req->cqe.flags,
+ req->big_cqe.extra1,
+ req->big_cqe.extra2);
}
+
+ memset(&req->big_cqe, 0, sizeof(req->big_cqe));
}
}
__io_cq_unlock_post(ctx);