From: Greg Kroah-Hartman Date: Tue, 13 Feb 2024 13:20:40 +0000 (+0100) Subject: 6.7-stable patches X-Git-Tag: v6.1.78~42 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=9690db76d7ed4dbfb2e1fc5ddbaa10ee76e13ba8;p=thirdparty%2Fkernel%2Fstable-queue.git 6.7-stable patches added patches: io_uring-net-fix-sr-len-for-ioring_op_recv-with-msg_waitall-and-buffers.patch io_uring-net-un-indent-mshot-retry-path-in-io_recv_finish.patch io_uring-poll-move-poll-execution-helpers-higher-up.patch io_uring-rw-ensure-poll-based-multishot-read-retries-appropriately.patch --- diff --git a/queue-6.7/io_uring-net-fix-sr-len-for-ioring_op_recv-with-msg_waitall-and-buffers.patch b/queue-6.7/io_uring-net-fix-sr-len-for-ioring_op_recv-with-msg_waitall-and-buffers.patch new file mode 100644 index 00000000000..8e04daee23a --- /dev/null +++ b/queue-6.7/io_uring-net-fix-sr-len-for-ioring_op_recv-with-msg_waitall-and-buffers.patch @@ -0,0 +1,37 @@ +From 72bd80252feeb3bef8724230ee15d9f7ab541c6e Mon Sep 17 00:00:00 2001 +From: Jens Axboe +Date: Thu, 1 Feb 2024 06:42:36 -0700 +Subject: io_uring/net: fix sr->len for IORING_OP_RECV with MSG_WAITALL and buffers + +From: Jens Axboe + +commit 72bd80252feeb3bef8724230ee15d9f7ab541c6e upstream. + +If we use IORING_OP_RECV with provided buffers and pass in '0' as the +length of the request, the length is retrieved from the selected buffer. +If MSG_WAITALL is also set and we get a short receive, then we may hit +the retry path which decrements sr->len and increments the buffer for +a retry. However, the length is still zero at this point, which means +that sr->len now becomes huge and import_ubuf() will cap it to +MAX_RW_COUNT and subsequently return -EFAULT for the range as a whole. + +Fix this by always assigning sr->len once the buffer has been selected. + +Cc: stable@vger.kernel.org +Fixes: 7ba89d2af17a ("io_uring: ensure recv and recvmsg handle MSG_WAITALL correctly") +Signed-off-by: Jens Axboe +Signed-off-by: Greg Kroah-Hartman +--- + io_uring/net.c | 1 + + 1 file changed, 1 insertion(+) + +--- a/io_uring/net.c ++++ b/io_uring/net.c +@@ -902,6 +902,7 @@ retry_multishot: + if (!buf) + return -ENOBUFS; + sr->buf = buf; ++ sr->len = len; + } + + ret = import_ubuf(ITER_DEST, sr->buf, len, &msg.msg_iter); diff --git a/queue-6.7/io_uring-net-un-indent-mshot-retry-path-in-io_recv_finish.patch b/queue-6.7/io_uring-net-un-indent-mshot-retry-path-in-io_recv_finish.patch new file mode 100644 index 00000000000..49e1f148923 --- /dev/null +++ b/queue-6.7/io_uring-net-un-indent-mshot-retry-path-in-io_recv_finish.patch @@ -0,0 +1,67 @@ +From 91e5d765a82fb2c9d0b7ad930d8953208081ddf1 Mon Sep 17 00:00:00 2001 +From: Jens Axboe +Date: Mon, 29 Jan 2024 11:54:18 -0700 +Subject: io_uring/net: un-indent mshot retry path in io_recv_finish() + +From: Jens Axboe + +commit 91e5d765a82fb2c9d0b7ad930d8953208081ddf1 upstream. + +In preparation for putting some retry logic in there, have the done +path just skip straight to the end rather than have too much nesting +in here. + +No functional changes in this patch. + +Signed-off-by: Jens Axboe +Signed-off-by: Greg Kroah-Hartman +--- + io_uring/net.c | 36 ++++++++++++++++++++---------------- + 1 file changed, 20 insertions(+), 16 deletions(-) + +--- a/io_uring/net.c ++++ b/io_uring/net.c +@@ -645,23 +645,27 @@ static inline bool io_recv_finish(struct + return true; + } + +- if (!mshot_finished) { +- if (io_fill_cqe_req_aux(req, issue_flags & IO_URING_F_COMPLETE_DEFER, +- *ret, cflags | IORING_CQE_F_MORE)) { +- io_recv_prep_retry(req); +- /* Known not-empty or unknown state, retry */ +- if (cflags & IORING_CQE_F_SOCK_NONEMPTY || +- msg->msg_inq == -1) +- return false; +- if (issue_flags & IO_URING_F_MULTISHOT) +- *ret = IOU_ISSUE_SKIP_COMPLETE; +- else +- *ret = -EAGAIN; +- return true; +- } +- /* Otherwise stop multishot but use the current result. */ +- } ++ if (mshot_finished) ++ goto finish; + ++ /* ++ * Fill CQE for this receive and see if we should keep trying to ++ * receive from this socket. ++ */ ++ if (io_fill_cqe_req_aux(req, issue_flags & IO_URING_F_COMPLETE_DEFER, ++ *ret, cflags | IORING_CQE_F_MORE)) { ++ io_recv_prep_retry(req); ++ /* Known not-empty or unknown state, retry */ ++ if (cflags & IORING_CQE_F_SOCK_NONEMPTY || msg->msg_inq == -1) ++ return false; ++ if (issue_flags & IO_URING_F_MULTISHOT) ++ *ret = IOU_ISSUE_SKIP_COMPLETE; ++ else ++ *ret = -EAGAIN; ++ return true; ++ } ++ /* Otherwise stop multishot but use the current result. */ ++finish: + io_req_set_res(req, *ret, cflags); + + if (issue_flags & IO_URING_F_MULTISHOT) diff --git a/queue-6.7/io_uring-poll-move-poll-execution-helpers-higher-up.patch b/queue-6.7/io_uring-poll-move-poll-execution-helpers-higher-up.patch new file mode 100644 index 00000000000..48fcd3d77ce --- /dev/null +++ b/queue-6.7/io_uring-poll-move-poll-execution-helpers-higher-up.patch @@ -0,0 +1,76 @@ +From e84b01a880f635e3084a361afba41f95ff500d12 Mon Sep 17 00:00:00 2001 +From: Jens Axboe +Date: Mon, 29 Jan 2024 11:52:54 -0700 +Subject: io_uring/poll: move poll execution helpers higher up + +From: Jens Axboe + +commit e84b01a880f635e3084a361afba41f95ff500d12 upstream. + +In preparation for calling __io_poll_execute() higher up, move the +functions to avoid forward declarations. + +No functional changes in this patch. + +Signed-off-by: Jens Axboe +Signed-off-by: Greg Kroah-Hartman +--- + io_uring/poll.c | 40 ++++++++++++++++++++-------------------- + 1 file changed, 20 insertions(+), 20 deletions(-) + +--- a/io_uring/poll.c ++++ b/io_uring/poll.c +@@ -228,6 +228,26 @@ enum { + IOU_POLL_REISSUE = 3, + }; + ++static void __io_poll_execute(struct io_kiocb *req, int mask) ++{ ++ unsigned flags = 0; ++ ++ io_req_set_res(req, mask, 0); ++ req->io_task_work.func = io_poll_task_func; ++ ++ trace_io_uring_task_add(req, mask); ++ ++ if (!(req->flags & REQ_F_POLL_NO_LAZY)) ++ flags = IOU_F_TWQ_LAZY_WAKE; ++ __io_req_task_work_add(req, flags); ++} ++ ++static inline void io_poll_execute(struct io_kiocb *req, int res) ++{ ++ if (io_poll_get_ownership(req)) ++ __io_poll_execute(req, res); ++} ++ + /* + * All poll tw should go through this. Checks for poll events, manages + * references, does rewait, etc. +@@ -364,26 +384,6 @@ void io_poll_task_func(struct io_kiocb * + } + } + +-static void __io_poll_execute(struct io_kiocb *req, int mask) +-{ +- unsigned flags = 0; +- +- io_req_set_res(req, mask, 0); +- req->io_task_work.func = io_poll_task_func; +- +- trace_io_uring_task_add(req, mask); +- +- if (!(req->flags & REQ_F_POLL_NO_LAZY)) +- flags = IOU_F_TWQ_LAZY_WAKE; +- __io_req_task_work_add(req, flags); +-} +- +-static inline void io_poll_execute(struct io_kiocb *req, int res) +-{ +- if (io_poll_get_ownership(req)) +- __io_poll_execute(req, res); +-} +- + static void io_poll_cancel_req(struct io_kiocb *req) + { + io_poll_mark_cancelled(req); diff --git a/queue-6.7/io_uring-rw-ensure-poll-based-multishot-read-retries-appropriately.patch b/queue-6.7/io_uring-rw-ensure-poll-based-multishot-read-retries-appropriately.patch new file mode 100644 index 00000000000..101bbda5048 --- /dev/null +++ b/queue-6.7/io_uring-rw-ensure-poll-based-multishot-read-retries-appropriately.patch @@ -0,0 +1,89 @@ +From c79f52f0656eeb3e4a12f7f358f760077ae111b6 Mon Sep 17 00:00:00 2001 +From: Jens Axboe +Date: Sat, 27 Jan 2024 13:44:58 -0700 +Subject: io_uring/rw: ensure poll based multishot read retries appropriately + +From: Jens Axboe + +commit c79f52f0656eeb3e4a12f7f358f760077ae111b6 upstream. + +io_read_mshot() always relies on poll triggering retries, and this works +fine as long as we do a retry per size of the buffer being read. The +buffer size is given by the size of the buffer(s) in the given buffer +group ID. + +But if we're reading less than what is available, then we don't always +get to read everything that is available. For example, if the buffers +available are 32 bytes and we have 64 bytes to read, then we'll +correctly read the first 32 bytes and then wait for another poll trigger +before we attempt the next read. This next poll trigger may never +happen, in which case we just sit forever and never make progress, or it +may trigger at some point in the future, and now we're just delivering +the available data much later than we should have. + +io_read_mshot() could do retries itself, but that is wasteful as we'll +be going through all of __io_read() again, and most likely in vain. +Rather than do that, bump our poll reference count and have +io_poll_check_events() do one more loop and check with vfs_poll() if we +have more data to read. If we do, io_read_mshot() will get invoked again +directly and we'll read the next chunk. + +io_poll_multishot_retry() must only get called from inside +io_poll_issue(), which is our multishot retry handler, as we know we +already "own" the request at this point. + +Cc: stable@vger.kernel.org +Link: https://github.com/axboe/liburing/issues/1041 +Fixes: fc68fcda0491 ("io_uring/rw: add support for IORING_OP_READ_MULTISHOT") +Signed-off-by: Jens Axboe +Signed-off-by: Greg Kroah-Hartman +--- + io_uring/poll.h | 9 +++++++++ + io_uring/rw.c | 10 +++++++++- + 2 files changed, 18 insertions(+), 1 deletion(-) + +--- a/io_uring/poll.h ++++ b/io_uring/poll.h +@@ -24,6 +24,15 @@ struct async_poll { + struct io_poll *double_poll; + }; + ++/* ++ * Must only be called inside issue_flags & IO_URING_F_MULTISHOT, or ++ * potentially other cases where we already "own" this poll request. ++ */ ++static inline void io_poll_multishot_retry(struct io_kiocb *req) ++{ ++ atomic_inc(&req->poll_refs); ++} ++ + int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); + int io_poll_add(struct io_kiocb *req, unsigned int issue_flags); + +--- a/io_uring/rw.c ++++ b/io_uring/rw.c +@@ -18,6 +18,7 @@ + #include "opdef.h" + #include "kbuf.h" + #include "rsrc.h" ++#include "poll.h" + #include "rw.h" + + struct io_rw { +@@ -956,8 +957,15 @@ int io_read_mshot(struct io_kiocb *req, + if (io_fill_cqe_req_aux(req, + issue_flags & IO_URING_F_COMPLETE_DEFER, + ret, cflags | IORING_CQE_F_MORE)) { +- if (issue_flags & IO_URING_F_MULTISHOT) ++ if (issue_flags & IO_URING_F_MULTISHOT) { ++ /* ++ * Force retry, as we might have more data to ++ * be read and otherwise it won't get retried ++ * until (if ever) another poll is triggered. ++ */ ++ io_poll_multishot_retry(req); + return IOU_ISSUE_SKIP_COMPLETE; ++ } + return -EAGAIN; + } + } diff --git a/queue-6.7/series b/queue-6.7/series index 8384bc70db7..728dd9e7fbf 100644 --- a/queue-6.7/series +++ b/queue-6.7/series @@ -102,3 +102,7 @@ hrtimer-report-offline-hrtimer-enqueue.patch input-i8042-fix-strange-behavior-of-touchpad-on-clevo-ns70pu.patch input-atkbd-skip-atkbd_cmd_setleds-when-skipping-atkbd_cmd_getid.patch wifi-iwlwifi-mvm-fix-a-battery-life-regression.patch +io_uring-net-fix-sr-len-for-ioring_op_recv-with-msg_waitall-and-buffers.patch +io_uring-poll-move-poll-execution-helpers-higher-up.patch +io_uring-net-un-indent-mshot-retry-path-in-io_recv_finish.patch +io_uring-rw-ensure-poll-based-multishot-read-retries-appropriately.patch