atomic_t refs;
bool cancel_seq_set;
- /*
- * IOPOLL doesn't use task_work, so use the ->iopoll_node list
- * entry to manage pending iopoll requests.
- */
union {
struct io_task_work io_task_work;
- struct list_head iopoll_node;
+ /* For IOPOLL setup queues, with hybrid polling */
+ u64 iopoll_start;
};
union {
* poll
*/
struct hlist_node hash_node;
- /* For IOPOLL setup queues, with hybrid polling */
- u64 iopoll_start;
+ /* IOPOLL completion handling */
+ struct list_head iopoll_node;
/* for private io_kiocb freeing */
struct rcu_head rcu_head;
};
struct io_comp_batch *iob, unsigned int poll_flags)
{
struct io_ring_ctx *ctx = req->ctx;
- u64 runtime, sleep_time;
+ u64 runtime, sleep_time, iopoll_start;
int ret;
+ iopoll_start = READ_ONCE(req->iopoll_start);
sleep_time = io_hybrid_iopoll_delay(ctx, req);
ret = io_uring_classic_poll(req, iob, poll_flags);
- runtime = ktime_get_ns() - req->iopoll_start - sleep_time;
+ runtime = ktime_get_ns() - iopoll_start - sleep_time;
/*
* Use minimum sleep time if we're polling devices with different