lockdep_assert_held(&req->ctx->uring_lock);
req_set_fail(req);
- io_req_set_res(req, res, io_put_kbuf(req, res, IO_URING_F_UNLOCKED));
+ io_req_set_res(req, res, io_put_kbuf(req, res));
if (def->fail)
def->fail(req);
io_req_complete_defer(req);
return false;
}
-static inline unsigned int io_put_kbuf(struct io_kiocb *req, int len,
- unsigned issue_flags)
+static inline unsigned int io_put_kbuf(struct io_kiocb *req, int len)
{
if (!(req->flags & (REQ_F_BUFFER_RING | REQ_F_BUFFER_SELECTED)))
return 0;
}
static inline unsigned int io_put_kbufs(struct io_kiocb *req, int len,
- int nbufs, unsigned issue_flags)
+ int nbufs)
{
if (!(req->flags & (REQ_F_BUFFER_RING | REQ_F_BUFFER_SELECTED)))
return 0;
}
static inline bool io_send_finish(struct io_kiocb *req, int *ret,
- struct io_async_msghdr *kmsg,
- unsigned issue_flags)
+ struct io_async_msghdr *kmsg)
{
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
bool bundle_finished = *ret <= 0;
unsigned int cflags;
if (!(sr->flags & IORING_RECVSEND_BUNDLE)) {
- cflags = io_put_kbuf(req, *ret, issue_flags);
+ cflags = io_put_kbuf(req, *ret);
goto finish;
}
- cflags = io_put_kbufs(req, *ret, io_bundle_nbufs(kmsg, *ret), issue_flags);
+ cflags = io_put_kbufs(req, *ret, io_bundle_nbufs(kmsg, *ret));
if (bundle_finished || req->flags & REQ_F_BL_EMPTY)
goto finish;
else if (sr->done_io)
ret = sr->done_io;
- if (!io_send_finish(req, &ret, kmsg, issue_flags))
+ if (!io_send_finish(req, &ret, kmsg))
goto retry_bundle;
io_req_msg_cleanup(req, issue_flags);
if (sr->flags & IORING_RECVSEND_BUNDLE) {
size_t this_ret = *ret - sr->done_io;
- cflags |= io_put_kbufs(req, this_ret, io_bundle_nbufs(kmsg, this_ret),
- issue_flags);
+ cflags |= io_put_kbufs(req, this_ret, io_bundle_nbufs(kmsg, this_ret));
if (sr->flags & IORING_RECV_RETRY)
cflags = req->cqe.flags | (cflags & CQE_F_MASK);
if (sr->mshot_len && *ret >= sr->mshot_len)
return false;
}
} else {
- cflags |= io_put_kbuf(req, *ret, issue_flags);
+ cflags |= io_put_kbuf(req, *ret);
}
/*
io_req_io_end(req);
if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING))
- req->cqe.flags |= io_put_kbuf(req, req->cqe.res, 0);
+ req->cqe.flags |= io_put_kbuf(req, req->cqe.res);
io_req_rw_cleanup(req, 0);
io_req_task_complete(req, tw);
* from the submission path.
*/
io_req_io_end(req);
- io_req_set_res(req, final_ret, io_put_kbuf(req, ret, issue_flags));
+ io_req_set_res(req, final_ret, io_put_kbuf(req, ret));
io_req_rw_cleanup(req, issue_flags);
return IOU_COMPLETE;
} else {
if (ret < 0)
req_set_fail(req);
} else if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
- cflags = io_put_kbuf(req, ret, issue_flags);
+ cflags = io_put_kbuf(req, ret);
} else {
/*
* Any successful return value will keep the multishot read
* we fail to post a CQE, or multishot is no longer set, then
* jump to the termination path. This request is then done.
*/
- cflags = io_put_kbuf(req, ret, issue_flags);
+ cflags = io_put_kbuf(req, ret);
rw->len = 0; /* similarly to above, reset len to 0 */
if (io_req_post_cqe(req, ret, cflags | IORING_CQE_F_MORE)) {
if (!smp_load_acquire(&req->iopoll_completed))
break;
nr_events++;
- req->cqe.flags = io_put_kbuf(req, req->cqe.res, 0);
+ req->cqe.flags = io_put_kbuf(req, req->cqe.res);
if (req->opcode != IORING_OP_URING_CMD)
io_req_rw_cleanup(req, 0);
}