]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.15-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 2 Jul 2025 08:50:03 +0000 (10:50 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 2 Jul 2025 08:50:03 +0000 (10:50 +0200)
added patches:
io_uring-kbuf-flag-partial-buffer-mappings.patch
s390-ptrace-fix-pointer-dereferencing-in-regs_get_kernel_stack_nth.patch

queue-6.15/io_uring-kbuf-flag-partial-buffer-mappings.patch [new file with mode: 0644]
queue-6.15/s390-ptrace-fix-pointer-dereferencing-in-regs_get_kernel_stack_nth.patch [new file with mode: 0644]
queue-6.15/series

diff --git a/queue-6.15/io_uring-kbuf-flag-partial-buffer-mappings.patch b/queue-6.15/io_uring-kbuf-flag-partial-buffer-mappings.patch
new file mode 100644 (file)
index 0000000..2343306
--- /dev/null
@@ -0,0 +1,142 @@
+From d0077a0341132e979b542cc5571938e89071c953 Mon Sep 17 00:00:00 2001
+From: Jens Axboe <axboe@kernel.dk>
+Date: Thu, 26 Jun 2025 12:17:48 -0600
+Subject: io_uring/kbuf: flag partial buffer mappings
+
+From: Jens Axboe <axboe@kernel.dk>
+
+Commit 178b8ff66ff827c41b4fa105e9aabb99a0b5c537 upstream.
+
+A previous commit aborted mapping more for a non-incremental ring for
+bundle peeking, but depending on where in the process this peeking
+happened, it would not necessarily prevent a retry by the user. That can
+create gaps in the received/read data.
+
+Add struct buf_sel_arg->partial_map, which can pass this information
+back. The networking side can then map that to internal state and use it
+to gate retry as well.
+
+Since this necessitates a new flag, change io_sr_msg->retry to a
+retry_flags member, and store both the retry and partial map condition
+in there.
+
+Cc: stable@vger.kernel.org
+Fixes: 26ec15e4b0c1 ("io_uring/kbuf: don't truncate end buffer for multiple buffer peeks")
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ io_uring/kbuf.c |    1 +
+ io_uring/kbuf.h |    1 +
+ io_uring/net.c  |   23 +++++++++++++++--------
+ 3 files changed, 17 insertions(+), 8 deletions(-)
+
+--- a/io_uring/kbuf.c
++++ b/io_uring/kbuf.c
+@@ -271,6 +271,7 @@ static int io_ring_buffers_peek(struct i
+               if (len > arg->max_len) {
+                       len = arg->max_len;
+                       if (!(bl->flags & IOBL_INC)) {
++                              arg->partial_map = 1;
+                               if (iov != arg->iovs)
+                                       break;
+                               buf->len = len;
+--- a/io_uring/kbuf.h
++++ b/io_uring/kbuf.h
+@@ -55,6 +55,7 @@ struct buf_sel_arg {
+       size_t max_len;
+       unsigned short nr_iovs;
+       unsigned short mode;
++      unsigned short partial_map;
+ };
+ void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
+--- a/io_uring/net.c
++++ b/io_uring/net.c
+@@ -76,12 +76,17 @@ struct io_sr_msg {
+       u16                             flags;
+       /* initialised and used only by !msg send variants */
+       u16                             buf_group;
+-      bool                            retry;
++      unsigned short                  retry_flags;
+       void __user                     *msg_control;
+       /* used only for send zerocopy */
+       struct io_kiocb                 *notif;
+ };
++enum sr_retry_flags {
++      IO_SR_MSG_RETRY         = 1,
++      IO_SR_MSG_PARTIAL_MAP   = 2,
++};
++
+ /*
+  * Number of times we'll try and do receives if there's more data. If we
+  * exceed this limit, then add us to the back of the queue and retry from
+@@ -188,7 +193,7 @@ static inline void io_mshot_prep_retry(s
+       req->flags &= ~REQ_F_BL_EMPTY;
+       sr->done_io = 0;
+-      sr->retry = false;
++      sr->retry_flags = 0;
+       sr->len = 0; /* get from the provided buffer */
+       req->buf_index = sr->buf_group;
+ }
+@@ -401,7 +406,7 @@ int io_sendmsg_prep(struct io_kiocb *req
+       struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
+       sr->done_io = 0;
+-      sr->retry = false;
++      sr->retry_flags = 0;
+       sr->len = READ_ONCE(sqe->len);
+       sr->flags = READ_ONCE(sqe->ioprio);
+       if (sr->flags & ~SENDMSG_FLAGS)
+@@ -759,7 +764,7 @@ int io_recvmsg_prep(struct io_kiocb *req
+       struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
+       sr->done_io = 0;
+-      sr->retry = false;
++      sr->retry_flags = 0;
+       if (unlikely(sqe->file_index || sqe->addr2))
+               return -EINVAL;
+@@ -831,7 +836,7 @@ static inline bool io_recv_finish(struct
+               cflags |= io_put_kbufs(req, this_ret, io_bundle_nbufs(kmsg, this_ret),
+                                     issue_flags);
+-              if (sr->retry)
++              if (sr->retry_flags & IO_SR_MSG_RETRY)
+                       cflags = req->cqe.flags | (cflags & CQE_F_MASK);
+               /* bundle with no more immediate buffers, we're done */
+               if (req->flags & REQ_F_BL_EMPTY)
+@@ -840,12 +845,12 @@ static inline bool io_recv_finish(struct
+                * If more is available AND it was a full transfer, retry and
+                * append to this one
+                */
+-              if (!sr->retry && kmsg->msg.msg_inq > 1 && this_ret > 0 &&
++              if (!sr->retry_flags && kmsg->msg.msg_inq > 1 && this_ret > 0 &&
+                   !iov_iter_count(&kmsg->msg.msg_iter)) {
+                       req->cqe.flags = cflags & ~CQE_F_MASK;
+                       sr->len = kmsg->msg.msg_inq;
+                       sr->done_io += this_ret;
+-                      sr->retry = true;
++                      sr->retry_flags |= IO_SR_MSG_RETRY;
+                       return false;
+               }
+       } else {
+@@ -1089,6 +1094,8 @@ static int io_recv_buf_select(struct io_
+                       kmsg->vec.iovec = arg.iovs;
+                       req->flags |= REQ_F_NEED_CLEANUP;
+               }
++              if (arg.partial_map)
++                      sr->retry_flags |= IO_SR_MSG_PARTIAL_MAP;
+               /* special case 1 vec, can be a fast path */
+               if (ret == 1) {
+@@ -1285,7 +1292,7 @@ int io_send_zc_prep(struct io_kiocb *req
+       int ret;
+       zc->done_io = 0;
+-      zc->retry = false;
++      zc->retry_flags = 0;
+       if (unlikely(READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3)))
+               return -EINVAL;
diff --git a/queue-6.15/s390-ptrace-fix-pointer-dereferencing-in-regs_get_kernel_stack_nth.patch b/queue-6.15/s390-ptrace-fix-pointer-dereferencing-in-regs_get_kernel_stack_nth.patch
new file mode 100644 (file)
index 0000000..3745e37
--- /dev/null
@@ -0,0 +1,38 @@
+From 7f8073cfb04a97842fe891ca50dad60afd1e3121 Mon Sep 17 00:00:00 2001
+From: Heiko Carstens <hca@linux.ibm.com>
+Date: Fri, 13 Jun 2025 17:53:04 +0200
+Subject: s390/ptrace: Fix pointer dereferencing in regs_get_kernel_stack_nth()
+
+From: Heiko Carstens <hca@linux.ibm.com>
+
+commit 7f8073cfb04a97842fe891ca50dad60afd1e3121 upstream.
+
+The recent change which added READ_ONCE_NOCHECK() to read the nth entry
+from the kernel stack incorrectly dropped dereferencing of the stack
+pointer in order to read the requested entry.
+
+In result the address of the entry is returned instead of its content.
+
+Dereference the pointer again to fix this.
+
+Reported-by: Will Deacon <will@kernel.org>
+Closes: https://lore.kernel.org/r/20250612163331.GA13384@willie-the-truck
+Fixes: d93a855c31b7 ("s390/ptrace: Avoid KASAN false positives in regs_get_kernel_stack_nth()")
+Cc: stable@vger.kernel.org
+Reviewed-by: Alexander Gordeev <agordeev@linux.ibm.com>
+Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
+Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/s390/kernel/ptrace.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/s390/kernel/ptrace.c
++++ b/arch/s390/kernel/ptrace.c
+@@ -1574,5 +1574,5 @@ unsigned long regs_get_kernel_stack_nth(
+       addr = kernel_stack_pointer(regs) + n * sizeof(long);
+       if (!regs_within_kernel_stack(regs, addr))
+               return 0;
+-      return READ_ONCE_NOCHECK(addr);
++      return READ_ONCE_NOCHECK(*(unsigned long *)addr);
+ }
index b0a0236acbfc887c2ecb0f4f531784161f6fd05a..143a2a6819265dac75e9854f308e6d6f2e0619e0 100644 (file)
@@ -258,3 +258,5 @@ rust-devres-fix-race-in-devres-drop.patch
 rust-devres-do-not-dereference-to-the-internal-revocable.patch
 x86-fpu-refactor-xfeature-bitmask-update-code-for-sigframe-xsave.patch
 x86-pkeys-simplify-pkru-update-in-signal-frame.patch
+s390-ptrace-fix-pointer-dereferencing-in-regs_get_kernel_stack_nth.patch
+io_uring-kbuf-flag-partial-buffer-mappings.patch