There are a few of these, where flags are read outside of the
uring_lock, yet it's harmless to race on them.
Signed-off-by: Jens Axboe <axboe@kernel.dk>
struct io_ring_ctx *ctx = file->private_data;
__poll_t mask = 0;
- if (unlikely(!(ctx->int_flags & IO_RING_F_POLL_ACTIVATED)))
+ if (unlikely(!(data_race(ctx->int_flags) & IO_RING_F_POLL_ACTIVATED)))
io_activate_pollwq(ctx);
/*
* provides mb() which pairs with barrier from wq_has_sleeper
wq_list_add_tail(&req->comp_list, &state->compl_reqs);
}
+#define SHOULD_FLUSH_MASK (IO_RING_F_OFF_TIMEOUT_USED | \
+ IO_RING_F_HAS_EVFD | IO_RING_F_POLL_ACTIVATED)
+
static inline void io_commit_cqring_flush(struct io_ring_ctx *ctx)
{
- if (unlikely(ctx->int_flags & (IO_RING_F_OFF_TIMEOUT_USED |
- IO_RING_F_HAS_EVFD |
- IO_RING_F_POLL_ACTIVATED)))
+ if (unlikely(data_race(ctx->int_flags) & SHOULD_FLUSH_MASK))
__io_commit_cqring_flush(ctx);
}
if (!head) {
io_ctx_mark_taskrun(ctx);
- if (ctx->int_flags & IO_RING_F_HAS_EVFD)
+ if (data_race(ctx->int_flags) & IO_RING_F_HAS_EVFD)
io_eventfd_signal(ctx, false);
}