* Returns true if it is actually finished, or false if it should run
* again (for multishot).
*/
-static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
+static inline bool io_recv_finish(struct io_kiocb *req,
struct io_async_msghdr *kmsg,
- bool mshot_finished, unsigned issue_flags)
+ struct io_br_sel *sel, bool mshot_finished,
+ unsigned issue_flags)
{
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
unsigned int cflags = 0;
if (kmsg->msg.msg_inq > 0)
cflags |= IORING_CQE_F_SOCK_NONEMPTY;
- if (*ret > 0 && sr->flags & IORING_RECV_MSHOT_LIM) {
+ if (sel->val > 0 && sr->flags & IORING_RECV_MSHOT_LIM) {
/*
* If sr->len hits zero, the limit has been reached. Mark
* mshot as finished, and flag MSHOT_DONE as well to prevent
* a potential bundle from being retried.
*/
- sr->mshot_total_len -= min_t(int, *ret, sr->mshot_total_len);
+ sr->mshot_total_len -= min_t(int, sel->val, sr->mshot_total_len);
if (!sr->mshot_total_len) {
sr->flags |= IORING_RECV_MSHOT_DONE;
mshot_finished = true;
}
if (sr->flags & IORING_RECVSEND_BUNDLE) {
- size_t this_ret = *ret - sr->done_io;
+ size_t this_ret = sel->val - sr->done_io;
cflags |= io_put_kbufs(req, this_ret, req->buf_list, io_bundle_nbufs(kmsg, this_ret));
if (sr->flags & IORING_RECV_RETRY)
cflags = req->cqe.flags | (cflags & CQE_F_MASK);
- if (sr->mshot_len && *ret >= sr->mshot_len)
+ if (sr->mshot_len && sel->val >= sr->mshot_len)
sr->flags |= IORING_RECV_MSHOT_CAP;
/* bundle with no more immediate buffers, we're done */
if (req->flags & REQ_F_BL_EMPTY)
return false;
}
} else {
- cflags |= io_put_kbuf(req, *ret, req->buf_list);
+ cflags |= io_put_kbuf(req, sel->val, req->buf_list);
}
/*
* receive from this socket.
*/
if ((req->flags & REQ_F_APOLL_MULTISHOT) && !mshot_finished &&
- io_req_post_cqe(req, *ret, cflags | IORING_CQE_F_MORE)) {
- *ret = IOU_RETRY;
+ io_req_post_cqe(req, sel->val, cflags | IORING_CQE_F_MORE)) {
+ sel->val = IOU_RETRY;
io_mshot_prep_retry(req, kmsg);
/* Known not-empty or unknown state, retry */
if (cflags & IORING_CQE_F_SOCK_NONEMPTY || kmsg->msg.msg_inq < 0) {
sr->nr_multishot_loops = 0;
sr->flags &= ~IORING_RECV_MSHOT_CAP;
if (issue_flags & IO_URING_F_MULTISHOT)
- *ret = IOU_REQUEUE;
+ sel->val = IOU_REQUEUE;
}
return true;
}
/* Finish the request / stop multishot. */
finish:
- io_req_set_res(req, *ret, cflags);
- *ret = IOU_COMPLETE;
+ io_req_set_res(req, sel->val, cflags);
+ sel->val = IOU_COMPLETE;
io_req_msg_cleanup(req, issue_flags);
return true;
}
else
io_kbuf_recycle(req, req->buf_list, issue_flags);
- if (!io_recv_finish(req, &ret, kmsg, mshot_finished, issue_flags))
+ sel.val = ret;
+ if (!io_recv_finish(req, kmsg, &sel, mshot_finished, issue_flags))
goto retry_multishot;
- return ret;
+ return sel.val;
}
static int io_recv_buf_select(struct io_kiocb *req, struct io_async_msghdr *kmsg,
else
io_kbuf_recycle(req, req->buf_list, issue_flags);
- if (!io_recv_finish(req, &ret, kmsg, mshot_finished, issue_flags))
+ sel.val = ret;
+ if (!io_recv_finish(req, kmsg, &sel, mshot_finished, issue_flags))
goto retry_multishot;
- return ret;
+ return sel.val;
}
int io_recvzc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)