return -EFAULT;
idx = array_index_nospec(idx, ctx->nr_user_bufs);
req->imu = READ_ONCE(ctx->user_bufs[idx]);
- io_req_set_rsrc_node(notif, ctx, 0);
+ io_req_set_rsrc_node(notif, ctx);
}
if (req->opcode == IORING_OP_SEND_ZC) {
}
static inline void io_req_set_rsrc_node(struct io_kiocb *req,
- struct io_ring_ctx *ctx,
- unsigned int issue_flags)
+ struct io_ring_ctx *ctx)
{
- if (!req->rsrc_node) {
- io_ring_submit_lock(ctx, issue_flags);
+ if (!req->rsrc_node)
__io_req_set_rsrc_node(req, ctx);
- io_ring_submit_unlock(ctx, issue_flags);
- }
}
static inline u64 *io_get_tag_slot(struct io_rsrc_data *data, unsigned int idx)
return -EFAULT;
index = array_index_nospec(req->buf_index, ctx->nr_user_bufs);
imu = ctx->user_bufs[index];
- io_req_set_rsrc_node(req, ctx, 0);
+ io_req_set_rsrc_node(req, ctx);
io = req->async_data;
ret = io_import_fixed(ddir, &io->iter, imu, rw->addr, rw->len);
* being called. This prevents destruction of the mapped buffer
* we'll need at actual import time.
*/
- io_req_set_rsrc_node(req, ctx, 0);
+ io_req_set_rsrc_node(req, ctx);
}
ioucmd->cmd_op = READ_ONCE(sqe->cmd_op);