From: Greg Kroah-Hartman Date: Mon, 15 Apr 2024 10:51:28 +0000 (+0200) Subject: 6.8-stable patches X-Git-Tag: v5.15.156~48 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=004c161ee79164c2ad8640fd83b6d7beaf001c82;p=thirdparty%2Fkernel%2Fstable-queue.git 6.8-stable patches added patches: io_uring-disable-io-wq-execution-of-multishot-nowait-requests.patch io_uring-refactor-defer_taskrun-multishot-checks.patch --- diff --git a/queue-6.8/io_uring-disable-io-wq-execution-of-multishot-nowait-requests.patch b/queue-6.8/io_uring-disable-io-wq-execution-of-multishot-nowait-requests.patch new file mode 100644 index 00000000000..cca2feb7678 --- /dev/null +++ b/queue-6.8/io_uring-disable-io-wq-execution-of-multishot-nowait-requests.patch @@ -0,0 +1,44 @@ +From 5984bb9e2b72455e8e5a1925edde3079c9f949be Mon Sep 17 00:00:00 2001 +From: Jens Axboe +Date: Mon, 1 Apr 2024 11:30:06 -0600 +Subject: io_uring: disable io-wq execution of multishot NOWAIT requests + +From: Jens Axboe + +Commit bee1d5becdf5bf23d4ca0cd9c6b60bdf3c61d72b upstream. + +Do the same check for direct io-wq execution for multishot requests that +commit 2a975d426c82 did for the inline execution, and disable multishot +mode (and revert to single shot) if the file type doesn't support NOWAIT, +and isn't opened in O_NONBLOCK mode. For multishot to work properly, it's +a requirement that nonblocking read attempts can be done. + +Cc: stable@vger.kernel.org +Signed-off-by: Jens Axboe +Signed-off-by: Greg Kroah-Hartman +--- + io_uring/io_uring.c | 13 +++++++++---- + 1 file changed, 9 insertions(+), 4 deletions(-) + +--- a/io_uring/io_uring.c ++++ b/io_uring/io_uring.c +@@ -1964,10 +1964,15 @@ fail: + err = -EBADFD; + if (!file_can_poll(req->file)) + goto fail; +- err = -ECANCELED; +- if (io_arm_poll_handler(req, issue_flags) != IO_APOLL_OK) +- goto fail; +- return; ++ if (req->file->f_flags & O_NONBLOCK || ++ req->file->f_mode & FMODE_NOWAIT) { ++ err = -ECANCELED; ++ if (io_arm_poll_handler(req, issue_flags) != IO_APOLL_OK) ++ goto fail; ++ return; ++ } else { ++ req->flags &= ~REQ_F_APOLL_MULTISHOT; ++ } + } + + if (req->flags & REQ_F_FORCE_ASYNC) { diff --git a/queue-6.8/io_uring-refactor-defer_taskrun-multishot-checks.patch b/queue-6.8/io_uring-refactor-defer_taskrun-multishot-checks.patch new file mode 100644 index 00000000000..8a80683352a --- /dev/null +++ b/queue-6.8/io_uring-refactor-defer_taskrun-multishot-checks.patch @@ -0,0 +1,123 @@ +From 6fa8da25be5ad285fae51584ce1d461fec42bd94 Mon Sep 17 00:00:00 2001 +From: Pavel Begunkov +Date: Fri, 8 Mar 2024 13:55:57 +0000 +Subject: io_uring: refactor DEFER_TASKRUN multishot checks + +From: Pavel Begunkov + +Commit e0e4ab52d17096d96c21a6805ccd424b283c3c6d upstream. + +We disallow DEFER_TASKRUN multishots from running by io-wq, which is +checked by individual opcodes in the issue path. We can consolidate all +it in io_wq_submit_work() at the same time moving the checks out of the +hot path. + +Suggested-by: Jens Axboe +Signed-off-by: Pavel Begunkov +Link: https://lore.kernel.org/r/e492f0f11588bb5aa11d7d24e6f53b7c7628afdb.1709905727.git.asml.silence@gmail.com +Signed-off-by: Jens Axboe +Signed-off-by: Greg Kroah-Hartman +--- + io_uring/io_uring.c | 20 ++++++++++++++++++++ + io_uring/net.c | 21 --------------------- + io_uring/rw.c | 2 -- + 3 files changed, 20 insertions(+), 23 deletions(-) + +--- a/io_uring/io_uring.c ++++ b/io_uring/io_uring.c +@@ -949,6 +949,8 @@ bool io_fill_cqe_req_aux(struct io_kiocb + u64 user_data = req->cqe.user_data; + struct io_uring_cqe *cqe; + ++ lockdep_assert(!io_wq_current_is_worker()); ++ + if (!defer) + return __io_post_aux_cqe(ctx, user_data, res, cflags, false); + +@@ -1950,6 +1952,24 @@ fail: + goto fail; + } + ++ /* ++ * If DEFER_TASKRUN is set, it's only allowed to post CQEs from the ++ * submitter task context. Final request completions are handed to the ++ * right context, however this is not the case of auxiliary CQEs, ++ * which is the main mean of operation for multishot requests. ++ * Don't allow any multishot execution from io-wq. It's more restrictive ++ * than necessary and also cleaner. ++ */ ++ if (req->flags & REQ_F_APOLL_MULTISHOT) { ++ err = -EBADFD; ++ if (!file_can_poll(req->file)) ++ goto fail; ++ err = -ECANCELED; ++ if (io_arm_poll_handler(req, issue_flags) != IO_APOLL_OK) ++ goto fail; ++ return; ++ } ++ + if (req->flags & REQ_F_FORCE_ASYNC) { + bool opcode_poll = def->pollin || def->pollout; + +--- a/io_uring/net.c ++++ b/io_uring/net.c +@@ -78,19 +78,6 @@ struct io_sr_msg { + */ + #define MULTISHOT_MAX_RETRY 32 + +-static inline bool io_check_multishot(struct io_kiocb *req, +- unsigned int issue_flags) +-{ +- /* +- * When ->locked_cq is set we only allow to post CQEs from the original +- * task context. Usual request completions will be handled in other +- * generic paths but multipoll may decide to post extra cqes. +- */ +- return !(issue_flags & IO_URING_F_IOWQ) || +- !(req->flags & REQ_F_APOLL_MULTISHOT) || +- !req->ctx->task_complete; +-} +- + int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) + { + struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown); +@@ -837,9 +824,6 @@ int io_recvmsg(struct io_kiocb *req, uns + (sr->flags & IORING_RECVSEND_POLL_FIRST)) + return io_setup_async_msg(req, kmsg, issue_flags); + +- if (!io_check_multishot(req, issue_flags)) +- return io_setup_async_msg(req, kmsg, issue_flags); +- + retry_multishot: + if (io_do_buffer_select(req)) { + void __user *buf; +@@ -935,9 +919,6 @@ int io_recv(struct io_kiocb *req, unsign + (sr->flags & IORING_RECVSEND_POLL_FIRST)) + return -EAGAIN; + +- if (!io_check_multishot(req, issue_flags)) +- return -EAGAIN; +- + sock = sock_from_file(req->file); + if (unlikely(!sock)) + return -ENOTSOCK; +@@ -1386,8 +1367,6 @@ int io_accept(struct io_kiocb *req, unsi + struct file *file; + int ret, fd; + +- if (!io_check_multishot(req, issue_flags)) +- return -EAGAIN; + retry: + if (!fixed) { + fd = __get_unused_fd_flags(accept->flags, accept->nofile); +--- a/io_uring/rw.c ++++ b/io_uring/rw.c +@@ -932,8 +932,6 @@ int io_read_mshot(struct io_kiocb *req, + */ + if (!file_can_poll(req->file)) + return -EBADFD; +- if (issue_flags & IO_URING_F_IOWQ) +- return -EAGAIN; + + ret = __io_read(req, issue_flags); + diff --git a/queue-6.8/series b/queue-6.8/series index ef2c1b7a8cf..b8be2ebf2fb 100644 --- a/queue-6.8/series +++ b/queue-6.8/series @@ -104,3 +104,5 @@ tracing-hide-unused-ftrace_event_id_fops.patch iommu-vt-d-fix-wrong-use-of-pasid-config.patch iommu-vt-d-allocate-local-memory-for-page-request-qu.patch iommu-vt-d-fix-warn_on-in-iommu-probe-path.patch +io_uring-refactor-defer_taskrun-multishot-checks.patch +io_uring-disable-io-wq-execution-of-multishot-nowait-requests.patch