From: Jens Axboe Date: Thu, 21 Aug 2025 02:03:37 +0000 (-0600) Subject: io_uring/net: use struct io_br_sel->val as the recv finish value X-Git-Tag: v6.18-rc1~137^2~58 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=58d815091890e83aa2f83a9cce1fdfe3af02c7b4;p=thirdparty%2Fkernel%2Fstable.git io_uring/net: use struct io_br_sel->val as the recv finish value Currently a pointer is passed in to the 'ret' in the receive handlers, but since we already have a value field in io_br_sel, just use that. This is also in preparation for needing to pass in struct io_br_sel to io_recv_finish() anyway. Link: https://lore.kernel.org/r/20250821020750.598432-10-axboe@kernel.dk Signed-off-by: Jens Axboe --- diff --git a/io_uring/net.c b/io_uring/net.c index 8cff6a8244c0e..a7a4443e3ee77 100644 --- a/io_uring/net.c +++ b/io_uring/net.c @@ -845,9 +845,10 @@ int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) * Returns true if it is actually finished, or false if it should run * again (for multishot). */ -static inline bool io_recv_finish(struct io_kiocb *req, int *ret, +static inline bool io_recv_finish(struct io_kiocb *req, struct io_async_msghdr *kmsg, - bool mshot_finished, unsigned issue_flags) + struct io_br_sel *sel, bool mshot_finished, + unsigned issue_flags) { struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); unsigned int cflags = 0; @@ -855,13 +856,13 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret, if (kmsg->msg.msg_inq > 0) cflags |= IORING_CQE_F_SOCK_NONEMPTY; - if (*ret > 0 && sr->flags & IORING_RECV_MSHOT_LIM) { + if (sel->val > 0 && sr->flags & IORING_RECV_MSHOT_LIM) { /* * If sr->len hits zero, the limit has been reached. Mark * mshot as finished, and flag MSHOT_DONE as well to prevent * a potential bundle from being retried. */ - sr->mshot_total_len -= min_t(int, *ret, sr->mshot_total_len); + sr->mshot_total_len -= min_t(int, sel->val, sr->mshot_total_len); if (!sr->mshot_total_len) { sr->flags |= IORING_RECV_MSHOT_DONE; mshot_finished = true; @@ -869,12 +870,12 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret, } if (sr->flags & IORING_RECVSEND_BUNDLE) { - size_t this_ret = *ret - sr->done_io; + size_t this_ret = sel->val - sr->done_io; cflags |= io_put_kbufs(req, this_ret, req->buf_list, io_bundle_nbufs(kmsg, this_ret)); if (sr->flags & IORING_RECV_RETRY) cflags = req->cqe.flags | (cflags & CQE_F_MASK); - if (sr->mshot_len && *ret >= sr->mshot_len) + if (sr->mshot_len && sel->val >= sr->mshot_len) sr->flags |= IORING_RECV_MSHOT_CAP; /* bundle with no more immediate buffers, we're done */ if (req->flags & REQ_F_BL_EMPTY) @@ -893,7 +894,7 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret, return false; } } else { - cflags |= io_put_kbuf(req, *ret, req->buf_list); + cflags |= io_put_kbuf(req, sel->val, req->buf_list); } /* @@ -901,8 +902,8 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret, * receive from this socket. */ if ((req->flags & REQ_F_APOLL_MULTISHOT) && !mshot_finished && - io_req_post_cqe(req, *ret, cflags | IORING_CQE_F_MORE)) { - *ret = IOU_RETRY; + io_req_post_cqe(req, sel->val, cflags | IORING_CQE_F_MORE)) { + sel->val = IOU_RETRY; io_mshot_prep_retry(req, kmsg); /* Known not-empty or unknown state, retry */ if (cflags & IORING_CQE_F_SOCK_NONEMPTY || kmsg->msg.msg_inq < 0) { @@ -914,15 +915,15 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret, sr->nr_multishot_loops = 0; sr->flags &= ~IORING_RECV_MSHOT_CAP; if (issue_flags & IO_URING_F_MULTISHOT) - *ret = IOU_REQUEUE; + sel->val = IOU_REQUEUE; } return true; } /* Finish the request / stop multishot. */ finish: - io_req_set_res(req, *ret, cflags); - *ret = IOU_COMPLETE; + io_req_set_res(req, sel->val, cflags); + sel->val = IOU_COMPLETE; io_req_msg_cleanup(req, issue_flags); return true; } @@ -1092,10 +1093,11 @@ retry_multishot: else io_kbuf_recycle(req, req->buf_list, issue_flags); - if (!io_recv_finish(req, &ret, kmsg, mshot_finished, issue_flags)) + sel.val = ret; + if (!io_recv_finish(req, kmsg, &sel, mshot_finished, issue_flags)) goto retry_multishot; - return ret; + return sel.val; } static int io_recv_buf_select(struct io_kiocb *req, struct io_async_msghdr *kmsg, @@ -1240,10 +1242,11 @@ out_free: else io_kbuf_recycle(req, req->buf_list, issue_flags); - if (!io_recv_finish(req, &ret, kmsg, mshot_finished, issue_flags)) + sel.val = ret; + if (!io_recv_finish(req, kmsg, &sel, mshot_finished, issue_flags)) goto retry_multishot; - return ret; + return sel.val; } int io_recvzc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)