mutex_unlock(&ctx->uring_lock);
}
-static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
+static int io_iopoll_check(struct io_ring_ctx *ctx, long min_events)
{
unsigned int nr_events = 0;
unsigned long check_cq;
io_task_work_pending(ctx)) {
u32 tail = ctx->cached_cq_tail;
- (void) io_run_local_work_locked(ctx, min);
+ (void) io_run_local_work_locked(ctx, min_events);
if (task_work_pending(current) ||
wq_list_empty(&ctx->iopoll_list)) {
wq_list_empty(&ctx->iopoll_list))
break;
}
- ret = io_do_iopoll(ctx, !min);
+ ret = io_do_iopoll(ctx, !min_events);
if (unlikely(ret < 0))
return ret;
break;
nr_events += ret;
- } while (nr_events < min);
+ } while (nr_events < min_events);
return 0;
}