lockdep_assert_held(&req->ctx->uring_lock);
req_set_fail(req);
- io_req_set_res(req, res, io_put_kbuf(req, res));
+ io_req_set_res(req, res, io_put_kbuf(req, res, req->buf_list));
if (def->fail)
def->fail(req);
io_req_complete_defer(req);
switch (io_arm_poll_handler(req, 0)) {
case IO_APOLL_READY:
- io_kbuf_recycle(req, 0);
+ io_kbuf_recycle(req, req->buf_list, 0);
io_req_task_queue(req);
break;
case IO_APOLL_ABORTED:
- io_kbuf_recycle(req, 0);
+ io_kbuf_recycle(req, req->buf_list, 0);
io_queue_iowq(req);
break;
case IO_APOLL_OK:
return io_provided_buffers_select(req, &arg->max_len, bl, arg->iovs);
}
-static inline bool __io_put_kbuf_ring(struct io_kiocb *req, int len, int nr)
+static inline bool __io_put_kbuf_ring(struct io_kiocb *req,
+ struct io_buffer_list *bl, int len, int nr)
{
- struct io_buffer_list *bl = req->buf_list;
bool ret = true;
if (bl)
return ret;
}
-unsigned int __io_put_kbufs(struct io_kiocb *req, int len, int nbufs)
+unsigned int __io_put_kbufs(struct io_kiocb *req, struct io_buffer_list *bl,
+ int len, int nbufs)
{
unsigned int ret;
return ret;
}
- if (!__io_put_kbuf_ring(req, len, nbufs))
+ if (!__io_put_kbuf_ring(req, bl, len, nbufs))
ret |= IORING_CQE_F_BUF_MORE;
return ret;
}
bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags);
void io_kbuf_drop_legacy(struct io_kiocb *req);
-unsigned int __io_put_kbufs(struct io_kiocb *req, int len, int nbufs);
+unsigned int __io_put_kbufs(struct io_kiocb *req, struct io_buffer_list *bl,
+ int len, int nbufs);
bool io_kbuf_commit(struct io_kiocb *req,
struct io_buffer_list *bl, int len, int nr);
struct io_mapped_region *io_pbuf_get_region(struct io_ring_ctx *ctx,
unsigned int bgid);
-static inline bool io_kbuf_recycle_ring(struct io_kiocb *req)
+static inline bool io_kbuf_recycle_ring(struct io_kiocb *req,
+ struct io_buffer_list *bl)
{
/*
* We don't need to recycle for REQ_F_BUFFER_RING, we can just clear
* The exception is partial io, that case we should increment bl->head
* to monopolize the buffer.
*/
- if (req->buf_list) {
+ if (bl) {
req->flags &= ~(REQ_F_BUFFER_RING|REQ_F_BUFFERS_COMMIT);
return true;
}
return !(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING));
}
-static inline bool io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
+static inline bool io_kbuf_recycle(struct io_kiocb *req, struct io_buffer_list *bl,
+ unsigned issue_flags)
{
if (req->flags & REQ_F_BL_NO_RECYCLE)
return false;
if (req->flags & REQ_F_BUFFER_SELECTED)
return io_kbuf_recycle_legacy(req, issue_flags);
if (req->flags & REQ_F_BUFFER_RING)
- return io_kbuf_recycle_ring(req);
+ return io_kbuf_recycle_ring(req, bl);
return false;
}
-static inline unsigned int io_put_kbuf(struct io_kiocb *req, int len)
+static inline unsigned int io_put_kbuf(struct io_kiocb *req, int len,
+ struct io_buffer_list *bl)
{
if (!(req->flags & (REQ_F_BUFFER_RING | REQ_F_BUFFER_SELECTED)))
return 0;
- return __io_put_kbufs(req, len, 1);
+ return __io_put_kbufs(req, bl, len, 1);
}
static inline unsigned int io_put_kbufs(struct io_kiocb *req, int len,
- int nbufs)
+ struct io_buffer_list *bl, int nbufs)
{
if (!(req->flags & (REQ_F_BUFFER_RING | REQ_F_BUFFER_SELECTED)))
return 0;
- return __io_put_kbufs(req, len, nbufs);
+ return __io_put_kbufs(req, bl, len, nbufs);
}
#endif
return nbufs;
}
-static int io_net_kbuf_recyle(struct io_kiocb *req,
+static int io_net_kbuf_recyle(struct io_kiocb *req, struct io_buffer_list *bl,
struct io_async_msghdr *kmsg, int len)
{
req->flags |= REQ_F_BL_NO_RECYCLE;
if (req->flags & REQ_F_BUFFERS_COMMIT)
- io_kbuf_commit(req, req->buf_list, len, io_bundle_nbufs(kmsg, len));
+ io_kbuf_commit(req, bl, len, io_bundle_nbufs(kmsg, len));
return IOU_RETRY;
}
unsigned int cflags;
if (!(sr->flags & IORING_RECVSEND_BUNDLE)) {
- cflags = io_put_kbuf(req, *ret);
+ cflags = io_put_kbuf(req, *ret, req->buf_list);
goto finish;
}
- cflags = io_put_kbufs(req, *ret, io_bundle_nbufs(kmsg, *ret));
+ cflags = io_put_kbufs(req, *ret, req->buf_list, io_bundle_nbufs(kmsg, *ret));
if (bundle_finished || req->flags & REQ_F_BL_EMPTY)
goto finish;
sr->len -= ret;
sr->buf += ret;
sr->done_io += ret;
- return io_net_kbuf_recyle(req, kmsg, ret);
+ return io_net_kbuf_recyle(req, req->buf_list, kmsg, ret);
}
if (ret == -ERESTARTSYS)
ret = -EINTR;
if (sr->flags & IORING_RECVSEND_BUNDLE) {
size_t this_ret = *ret - sr->done_io;
- cflags |= io_put_kbufs(req, this_ret, io_bundle_nbufs(kmsg, this_ret));
+ cflags |= io_put_kbufs(req, this_ret, req->buf_list, io_bundle_nbufs(kmsg, this_ret));
if (sr->flags & IORING_RECV_RETRY)
cflags = req->cqe.flags | (cflags & CQE_F_MASK);
if (sr->mshot_len && *ret >= sr->mshot_len)
return false;
}
} else {
- cflags |= io_put_kbuf(req, *ret);
+ cflags |= io_put_kbuf(req, *ret, req->buf_list);
}
/*
if (req->flags & REQ_F_APOLL_MULTISHOT) {
ret = io_recvmsg_prep_multishot(kmsg, sr, &buf, &len);
if (ret) {
- io_kbuf_recycle(req, issue_flags);
+ io_kbuf_recycle(req, req->buf_list, issue_flags);
return ret;
}
}
if (ret < min_ret) {
if (ret == -EAGAIN && force_nonblock) {
if (issue_flags & IO_URING_F_MULTISHOT)
- io_kbuf_recycle(req, issue_flags);
+ io_kbuf_recycle(req, req->buf_list, issue_flags);
return IOU_RETRY;
}
if (ret > 0 && io_net_retry(sock, flags)) {
sr->done_io += ret;
- return io_net_kbuf_recyle(req, kmsg, ret);
+ return io_net_kbuf_recyle(req, req->buf_list, kmsg, ret);
}
if (ret == -ERESTARTSYS)
ret = -EINTR;
else if (sr->done_io)
ret = sr->done_io;
else
- io_kbuf_recycle(req, issue_flags);
+ io_kbuf_recycle(req, req->buf_list, issue_flags);
if (!io_recv_finish(req, &ret, kmsg, mshot_finished, issue_flags))
goto retry_multishot;
if (ret < min_ret) {
if (ret == -EAGAIN && force_nonblock) {
if (issue_flags & IO_URING_F_MULTISHOT)
- io_kbuf_recycle(req, issue_flags);
+ io_kbuf_recycle(req, req->buf_list, issue_flags);
return IOU_RETRY;
}
sr->len -= ret;
sr->buf += ret;
sr->done_io += ret;
- return io_net_kbuf_recyle(req, kmsg, ret);
+ return io_net_kbuf_recyle(req, req->buf_list, kmsg, ret);
}
if (ret == -ERESTARTSYS)
ret = -EINTR;
else if (sr->done_io)
ret = sr->done_io;
else
- io_kbuf_recycle(req, issue_flags);
+ io_kbuf_recycle(req, req->buf_list, issue_flags);
if (!io_recv_finish(req, &ret, kmsg, mshot_finished, issue_flags))
goto retry_multishot;
ret = io_poll_check_events(req, tw);
if (ret == IOU_POLL_NO_ACTION) {
- io_kbuf_recycle(req, 0);
+ io_kbuf_recycle(req, req->buf_list, 0);
return;
} else if (ret == IOU_POLL_REQUEUE) {
- io_kbuf_recycle(req, 0);
+ io_kbuf_recycle(req, req->buf_list, 0);
__io_poll_execute(req, 0);
return;
}
req->flags |= REQ_F_POLLED;
ipt.pt._qproc = io_async_queue_proc;
- io_kbuf_recycle(req, issue_flags);
+ io_kbuf_recycle(req, req->buf_list, issue_flags);
ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask, issue_flags);
if (ret)
io_req_io_end(req);
if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING))
- req->cqe.flags |= io_put_kbuf(req, req->cqe.res);
+ req->cqe.flags |= io_put_kbuf(req, req->cqe.res, req->buf_list);
io_req_rw_cleanup(req, 0);
io_req_task_complete(req, tw);
* from the submission path.
*/
io_req_io_end(req);
- io_req_set_res(req, final_ret, io_put_kbuf(req, ret));
+ io_req_set_res(req, final_ret, io_put_kbuf(req, ret, req->buf_list));
io_req_rw_cleanup(req, issue_flags);
return IOU_COMPLETE;
} else {
* Reset rw->len to 0 again to avoid clamping future mshot
* reads, in case the buffer size varies.
*/
- if (io_kbuf_recycle(req, issue_flags))
+ if (io_kbuf_recycle(req, req->buf_list, issue_flags))
rw->len = 0;
return IOU_RETRY;
} else if (ret <= 0) {
- io_kbuf_recycle(req, issue_flags);
+ io_kbuf_recycle(req, req->buf_list, issue_flags);
if (ret < 0)
req_set_fail(req);
} else if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
- cflags = io_put_kbuf(req, ret);
+ cflags = io_put_kbuf(req, ret, req->buf_list);
} else {
/*
* Any successful return value will keep the multishot read
* we fail to post a CQE, or multishot is no longer set, then
* jump to the termination path. This request is then done.
*/
- cflags = io_put_kbuf(req, ret);
+ cflags = io_put_kbuf(req, ret, req->buf_list);
rw->len = 0; /* similarly to above, reset len to 0 */
if (io_req_post_cqe(req, ret, cflags | IORING_CQE_F_MORE)) {
if (!smp_load_acquire(&req->iopoll_completed))
break;
nr_events++;
- req->cqe.flags = io_put_kbuf(req, req->cqe.res);
+ req->cqe.flags = io_put_kbuf(req, req->cqe.res, req->buf_list);
if (req->opcode != IORING_OP_URING_CMD)
io_req_rw_cleanup(req, 0);
}