From e7f02d82c8da6a2ac220e7e9fa9a5290a2da3150 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Tue, 12 Aug 2025 11:12:58 +0200 Subject: [PATCH] 6.12-stable patches added patches: sunrpc-fix-handling-of-server-side-tls-alerts.patch --- ...l-__put_task_struct-on-rt-if-pi_bloc.patch | 97 --------------- queue-6.12/series | 2 +- ...x-handling-of-server-side-tls-alerts.patch | 114 ++++++++++++++++++ 3 files changed, 115 insertions(+), 98 deletions(-) delete mode 100644 queue-6.12/sched-do-not-call-__put_task_struct-on-rt-if-pi_bloc.patch create mode 100644 queue-6.12/sunrpc-fix-handling-of-server-side-tls-alerts.patch diff --git a/queue-6.12/sched-do-not-call-__put_task_struct-on-rt-if-pi_bloc.patch b/queue-6.12/sched-do-not-call-__put_task_struct-on-rt-if-pi_bloc.patch deleted file mode 100644 index 4e1839c898..0000000000 --- a/queue-6.12/sched-do-not-call-__put_task_struct-on-rt-if-pi_bloc.patch +++ /dev/null @@ -1,97 +0,0 @@ -From 38ff856aafe4adc4349e80e3d7a3c24ce279fdfa Mon Sep 17 00:00:00 2001 -From: Sasha Levin -Date: Mon, 7 Jul 2025 11:03:59 -0300 -Subject: sched: Do not call __put_task_struct() on rt if pi_blocked_on is set - -From: Luis Claudio R. Goncalves - -[ Upstream commit 8671bad873ebeb082afcf7b4501395c374da6023 ] - -With PREEMPT_RT enabled, some of the calls to put_task_struct() coming -from rt_mutex_adjust_prio_chain() could happen in preemptible context and -with a mutex enqueued. That could lead to this sequence: - - rt_mutex_adjust_prio_chain() - put_task_struct() - __put_task_struct() - sched_ext_free() - spin_lock_irqsave() - rtlock_lock() ---> TRIGGERS - lockdep_assert(!current->pi_blocked_on); - -This is not a SCHED_EXT bug. The first cleanup function called by -__put_task_struct() is sched_ext_free() and it happens to take a -(RT) spin_lock, which in the scenario described above, would trigger -the lockdep assertion of "!current->pi_blocked_on". - -Crystal Wood was able to identify the problem as __put_task_struct() -being called during rt_mutex_adjust_prio_chain(), in the context of -a process with a mutex enqueued. - -Instead of adding more complex conditions to decide when to directly -call __put_task_struct() and when to defer the call, unconditionally -resort to the deferred call on PREEMPT_RT to simplify the code. - -Fixes: 893cdaaa3977 ("sched: avoid false lockdep splat in put_task_struct()") -Suggested-by: Crystal Wood -Signed-off-by: Luis Claudio R. Goncalves -Signed-off-by: Peter Zijlstra (Intel) -Reviewed-by: Wander Lairson Costa -Reviewed-by: Valentin Schneider -Reviewed-by: Sebastian Andrzej Siewior -Link: https://lore.kernel.org/r/aGvTz5VaPFyj0pBV@uudg.org -Signed-off-by: Sasha Levin ---- - include/linux/sched/task.h | 27 ++++++++++----------------- - 1 file changed, 10 insertions(+), 17 deletions(-) - -diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h -index ca1db4b92c32..58ce71715268 100644 ---- a/include/linux/sched/task.h -+++ b/include/linux/sched/task.h -@@ -135,24 +135,17 @@ static inline void put_task_struct(struct task_struct *t) - return; - - /* -- * In !RT, it is always safe to call __put_task_struct(). -- * Under RT, we can only call it in preemptible context. -- */ -- if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible()) { -- static DEFINE_WAIT_OVERRIDE_MAP(put_task_map, LD_WAIT_SLEEP); -- -- lock_map_acquire_try(&put_task_map); -- __put_task_struct(t); -- lock_map_release(&put_task_map); -- return; -- } -- -- /* -- * under PREEMPT_RT, we can't call put_task_struct -+ * Under PREEMPT_RT, we can't call __put_task_struct - * in atomic context because it will indirectly -- * acquire sleeping locks. -+ * acquire sleeping locks. The same is true if the -+ * current process has a mutex enqueued (blocked on -+ * a PI chain). -+ * -+ * In !RT, it is always safe to call __put_task_struct(). -+ * Though, in order to simplify the code, resort to the -+ * deferred call too. - * -- * call_rcu() will schedule delayed_put_task_struct_rcu() -+ * call_rcu() will schedule __put_task_struct_rcu_cb() - * to be called in process context. - * - * __put_task_struct() is called when -@@ -165,7 +158,7 @@ static inline void put_task_struct(struct task_struct *t) - * - * delayed_free_task() also uses ->rcu, but it is only called - * when it fails to fork a process. Therefore, there is no -- * way it can conflict with put_task_struct(). -+ * way it can conflict with __put_task_struct(). - */ - call_rcu(&t->rcu, __put_task_struct_rcu_cb); - } --- -2.39.5 - diff --git a/queue-6.12/series b/queue-6.12/series index db4c2ea2e2..167fec5c70 100644 --- a/queue-6.12/series +++ b/queue-6.12/series @@ -105,7 +105,6 @@ net_sched-act_ctinfo-use-atomic64_t-for-three-counte.patch rdma-mlx5-fix-umr-modifying-of-mkey-page-size.patch xen-fix-uaf-in-dmabuf_exp_from_pages.patch xen-gntdev-remove-struct-gntdev_copy_batch-from-stac.patch -sched-do-not-call-__put_task_struct-on-rt-if-pi_bloc.patch tcp-call-tcp_measure_rcv_mss-for-ooo-packets.patch wifi-rtl8xxxu-fix-rx-skb-size-for-aggregation-disabl.patch wifi-rtw88-fix-macid-assigned-to-tdls-station.patch @@ -321,3 +320,4 @@ smb-client-let-recv_done-avoid-touching-data_transfe.patch nvmet-exit-debugfs-after-discovery-subsystem-exits.patch pptp-fix-pptp_xmit-error-path.patch smb-client-return-an-error-if-rdma_connect-does-not-.patch +sunrpc-fix-handling-of-server-side-tls-alerts.patch diff --git a/queue-6.12/sunrpc-fix-handling-of-server-side-tls-alerts.patch b/queue-6.12/sunrpc-fix-handling-of-server-side-tls-alerts.patch new file mode 100644 index 0000000000..09d438c183 --- /dev/null +++ b/queue-6.12/sunrpc-fix-handling-of-server-side-tls-alerts.patch @@ -0,0 +1,114 @@ +From bee47cb026e762841f3faece47b51f985e215edb Mon Sep 17 00:00:00 2001 +From: Olga Kornievskaia +Date: Tue, 29 Jul 2025 12:40:20 -0400 +Subject: sunrpc: fix handling of server side tls alerts + +From: Olga Kornievskaia + +commit bee47cb026e762841f3faece47b51f985e215edb upstream. + +Scott Mayhew discovered a security exploit in NFS over TLS in +tls_alert_recv() due to its assumption it can read data from +the msg iterator's kvec.. + +kTLS implementation splits TLS non-data record payload between +the control message buffer (which includes the type such as TLS +aler or TLS cipher change) and the rest of the payload (say TLS +alert's level/description) which goes into the msg payload buffer. + +This patch proposes to rework how control messages are setup and +used by sock_recvmsg(). + +If no control message structure is setup, kTLS layer will read and +process TLS data record types. As soon as it encounters a TLS control +message, it would return an error. At that point, NFS can setup a +kvec backed msg buffer and read in the control message such as a +TLS alert. Msg iterator can advance the kvec pointer as a part of +the copy process thus we need to revert the iterator before calling +into the tls_alert_recv. + +Reported-by: Scott Mayhew +Fixes: 5e052dda121e ("SUNRPC: Recognize control messages in server-side TCP socket code") +Suggested-by: Trond Myklebust +Cc: stable@vger.kernel.org +Signed-off-by: Olga Kornievskaia +Signed-off-by: Chuck Lever +Signed-off-by: Greg Kroah-Hartman +--- + net/sunrpc/svcsock.c | 43 +++++++++++++++++++++++++++++++++++-------- + 1 file changed, 35 insertions(+), 8 deletions(-) + +--- a/net/sunrpc/svcsock.c ++++ b/net/sunrpc/svcsock.c +@@ -257,20 +257,47 @@ svc_tcp_sock_process_cmsg(struct socket + } + + static int +-svc_tcp_sock_recv_cmsg(struct svc_sock *svsk, struct msghdr *msg) ++svc_tcp_sock_recv_cmsg(struct socket *sock, unsigned int *msg_flags) + { + union { + struct cmsghdr cmsg; + u8 buf[CMSG_SPACE(sizeof(u8))]; + } u; +- struct socket *sock = svsk->sk_sock; ++ u8 alert[2]; ++ struct kvec alert_kvec = { ++ .iov_base = alert, ++ .iov_len = sizeof(alert), ++ }; ++ struct msghdr msg = { ++ .msg_flags = *msg_flags, ++ .msg_control = &u, ++ .msg_controllen = sizeof(u), ++ }; ++ int ret; ++ ++ iov_iter_kvec(&msg.msg_iter, ITER_DEST, &alert_kvec, 1, ++ alert_kvec.iov_len); ++ ret = sock_recvmsg(sock, &msg, MSG_DONTWAIT); ++ if (ret > 0 && ++ tls_get_record_type(sock->sk, &u.cmsg) == TLS_RECORD_TYPE_ALERT) { ++ iov_iter_revert(&msg.msg_iter, ret); ++ ret = svc_tcp_sock_process_cmsg(sock, &msg, &u.cmsg, -EAGAIN); ++ } ++ return ret; ++} ++ ++static int ++svc_tcp_sock_recvmsg(struct svc_sock *svsk, struct msghdr *msg) ++{ + int ret; ++ struct socket *sock = svsk->sk_sock; + +- msg->msg_control = &u; +- msg->msg_controllen = sizeof(u); + ret = sock_recvmsg(sock, msg, MSG_DONTWAIT); +- if (unlikely(msg->msg_controllen != sizeof(u))) +- ret = svc_tcp_sock_process_cmsg(sock, msg, &u.cmsg, ret); ++ if (msg->msg_flags & MSG_CTRUNC) { ++ msg->msg_flags &= ~(MSG_CTRUNC | MSG_EOR); ++ if (ret == 0 || ret == -EIO) ++ ret = svc_tcp_sock_recv_cmsg(sock, &msg->msg_flags); ++ } + return ret; + } + +@@ -321,7 +348,7 @@ static ssize_t svc_tcp_read_msg(struct s + iov_iter_advance(&msg.msg_iter, seek); + buflen -= seek; + } +- len = svc_tcp_sock_recv_cmsg(svsk, &msg); ++ len = svc_tcp_sock_recvmsg(svsk, &msg); + if (len > 0) + svc_flush_bvec(bvec, len, seek); + +@@ -1019,7 +1046,7 @@ static ssize_t svc_tcp_read_marker(struc + iov.iov_base = ((char *)&svsk->sk_marker) + svsk->sk_tcplen; + iov.iov_len = want; + iov_iter_kvec(&msg.msg_iter, ITER_DEST, &iov, 1, want); +- len = svc_tcp_sock_recv_cmsg(svsk, &msg); ++ len = svc_tcp_sock_recvmsg(svsk, &msg); + if (len < 0) + return len; + svsk->sk_tcplen += len; -- 2.47.3