]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.12-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 24 Apr 2026 08:35:04 +0000 (10:35 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 24 Apr 2026 08:35:04 +0000 (10:35 +0200)
added patches:
rxrpc-only-handle-response-during-service-challenge.patch

queue-6.12/rxrpc-only-handle-response-during-service-challenge.patch [new file with mode: 0644]
queue-6.12/series

diff --git a/queue-6.12/rxrpc-only-handle-response-during-service-challenge.patch b/queue-6.12/rxrpc-only-handle-response-during-service-challenge.patch
new file mode 100644 (file)
index 0000000..86d7788
--- /dev/null
@@ -0,0 +1,83 @@
+From stable+bounces-237804-greg=kroah.com@vger.kernel.org Tue Apr 14 13:21:18 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 14 Apr 2026 07:21:03 -0400
+Subject: rxrpc: only handle RESPONSE during service challenge
+To: stable@vger.kernel.org
+Cc: Wang Jie <jiewang2024@lzu.edu.cn>, Yifan Wu <yifanwucs@gmail.com>, Juefei Pu <tomapufckgml@gmail.com>, Yuan Tan <yuantan098@gmail.com>, Xin Liu <bird@lzu.edu.cn>, Yang Yang <n05ec@lzu.edu.cn>, David Howells <dhowells@redhat.com>, Marc Dionne <marc.dionne@auristor.com>, Jeffrey Altman <jaltman@auristor.com>, Simon Horman <horms@kernel.org>, linux-afs@lists.infradead.org, stable@kernel.org, Jakub Kicinski <kuba@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260414112103.379483-2-sashal@kernel.org>
+
+From: Wang Jie <jiewang2024@lzu.edu.cn>
+
+[ Upstream commit c43ffdcfdbb5567b1f143556df8a04b4eeea041c ]
+
+Only process RESPONSE packets while the service connection is still in
+RXRPC_CONN_SERVICE_CHALLENGING. Check that state under state_lock before
+running response verification and security initialization, then use a local
+secured flag to decide whether to queue the secured-connection work after
+the state transition. This keeps duplicate or late RESPONSE packets from
+re-running the setup path and removes the unlocked post-transition state
+test.
+
+Fixes: 17926a79320a ("[AF_RXRPC]: Provide secure RxRPC sockets for use by userspace and kernel both")
+Reported-by: Yifan Wu <yifanwucs@gmail.com>
+Reported-by: Juefei Pu <tomapufckgml@gmail.com>
+Co-developed-by: Yuan Tan <yuantan098@gmail.com>
+Signed-off-by: Yuan Tan <yuantan098@gmail.com>
+Suggested-by: Xin Liu <bird@lzu.edu.cn>
+Signed-off-by: Jie Wang <jiewang2024@lzu.edu.cn>
+Signed-off-by: Yang Yang <n05ec@lzu.edu.cn>
+Signed-off-by: David Howells <dhowells@redhat.com>
+cc: Marc Dionne <marc.dionne@auristor.com>
+cc: Jeffrey Altman <jaltman@auristor.com>
+cc: Simon Horman <horms@kernel.org>
+cc: linux-afs@lists.infradead.org
+cc: stable@kernel.org
+Link: https://patch.msgid.link/20260408121252.2249051-21-dhowells@redhat.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+[ adapted spin_lock_irq/spin_unlock_irq calls to spin_lock/spin_unlock ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/rxrpc/conn_event.c |   14 ++++++++++++--
+ 1 file changed, 12 insertions(+), 2 deletions(-)
+
+--- a/net/rxrpc/conn_event.c
++++ b/net/rxrpc/conn_event.c
+@@ -233,6 +233,7 @@ static int rxrpc_process_event(struct rx
+                              struct sk_buff *skb)
+ {
+       struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
++      bool secured = false;
+       int ret;
+       if (conn->state == RXRPC_CONN_ABORTED)
+@@ -245,6 +246,13 @@ static int rxrpc_process_event(struct rx
+               return conn->security->respond_to_challenge(conn, skb);
+       case RXRPC_PACKET_TYPE_RESPONSE:
++              spin_lock(&conn->state_lock);
++              if (conn->state != RXRPC_CONN_SERVICE_CHALLENGING) {
++                      spin_unlock(&conn->state_lock);
++                      return 0;
++              }
++              spin_unlock(&conn->state_lock);
++
+               ret = conn->security->verify_response(conn, skb);
+               if (ret < 0)
+                       return ret;
+@@ -255,11 +263,13 @@ static int rxrpc_process_event(struct rx
+                       return ret;
+               spin_lock(&conn->state_lock);
+-              if (conn->state == RXRPC_CONN_SERVICE_CHALLENGING)
++              if (conn->state == RXRPC_CONN_SERVICE_CHALLENGING) {
+                       conn->state = RXRPC_CONN_SERVICE;
++                      secured = true;
++              }
+               spin_unlock(&conn->state_lock);
+-              if (conn->state == RXRPC_CONN_SERVICE) {
++              if (secured) {
+                       /* Offload call state flipping to the I/O thread.  As
+                        * we've already received the packet, put it on the
+                        * front of the queue.
index 1cc5af2c85fe8a5d335be4d61685272dbc2ba3c5..2f3b586e3bc0ef37119bbdf0778a2b3360b77770 100644 (file)
@@ -10,3 +10,4 @@ scripts-generate_rust_analyzer.py-define-scripts.patch
 mm-pagewalk-fix-race-between-concurrent-split-and-refault.patch
 ksmbd-fix-use-after-free-in-__ksmbd_close_fd-via-durable-scavenger.patch
 scripts-dtc-remove-unused-dts_version-in-dtc-lexer.l.patch
+rxrpc-only-handle-response-during-service-challenge.patch