--- /dev/null
+From 626dfed5fa3bfb41e0dffd796032b555b69f9cde Mon Sep 17 00:00:00 2001
+From: Daniel Borkmann <daniel@iogearbox.net>
+Date: Thu, 4 Jul 2024 08:41:57 +0200
+Subject: net, sunrpc: Remap EPERM in case of connection failure in xs_tcp_setup_socket
+
+From: Daniel Borkmann <daniel@iogearbox.net>
+
+commit 626dfed5fa3bfb41e0dffd796032b555b69f9cde upstream.
+
+When using a BPF program on kernel_connect(), the call can return -EPERM. This
+causes xs_tcp_setup_socket() to loop forever, filling up the syslog and causing
+the kernel to potentially freeze up.
+
+Neil suggested:
+
+ This will propagate -EPERM up into other layers which might not be ready
+ to handle it. It might be safer to map EPERM to an error we would be more
+ likely to expect from the network system - such as ECONNREFUSED or ENETDOWN.
+
+ECONNREFUSED as error seems reasonable. For programs setting a different error
+can be out of reach (see handling in 4fbac77d2d09) in particular on kernels
+which do not have f10d05966196 ("bpf: Make BPF_PROG_RUN_ARRAY return -err
+instead of allow boolean"), thus given that it is better to simply remap for
+consistent behavior. UDP does handle EPERM in xs_udp_send_request().
+
+Fixes: d74bad4e74ee ("bpf: Hooks for sys_connect")
+Fixes: 4fbac77d2d09 ("bpf: Hooks for sys_bind")
+Co-developed-by: Lex Siegel <usiegl00@gmail.com>
+Signed-off-by: Lex Siegel <usiegl00@gmail.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Cc: Neil Brown <neilb@suse.de>
+Cc: Trond Myklebust <trondmy@kernel.org>
+Cc: Anna Schumaker <anna@kernel.org>
+Link: https://github.com/cilium/cilium/issues/33395
+Link: https://lore.kernel.org/bpf/171374175513.12877.8993642908082014881@noble.neil.brown.name
+Link: https://patch.msgid.link/9069ec1d59e4b2129fc23433349fd5580ad43921.1720075070.git.daniel@iogearbox.net
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Hugo SIMELIERE <hsimeliere.opensource@witekio.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sunrpc/xprtsock.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/net/sunrpc/xprtsock.c
++++ b/net/sunrpc/xprtsock.c
+@@ -2437,6 +2437,13 @@ static void xs_tcp_setup_socket(struct w
+ case -EALREADY:
+ xprt_unlock_connect(xprt, transport);
+ return;
++ case -EPERM:
++ /* Happens, for instance, if a BPF program is preventing
++ * the connect. Remap the error so upper layers can better
++ * deal with it.
++ */
++ status = -ECONNREFUSED;
++ fallthrough;
+ case -EINVAL:
+ /* Happens, for instance, if the user specified a link
+ * local IPv6 address without a scope-id.
--- /dev/null
+From d33d26036a0274b472299d7dcdaa5fb34329f91b Mon Sep 17 00:00:00 2001
+From: Roland Xu <mu001999@outlook.com>
+Date: Thu, 15 Aug 2024 10:58:13 +0800
+Subject: rtmutex: Drop rt_mutex::wait_lock before scheduling
+
+From: Roland Xu <mu001999@outlook.com>
+
+commit d33d26036a0274b472299d7dcdaa5fb34329f91b upstream.
+
+rt_mutex_handle_deadlock() is called with rt_mutex::wait_lock held. In the
+good case it returns with the lock held and in the deadlock case it emits a
+warning and goes into an endless scheduling loop with the lock held, which
+triggers the 'scheduling in atomic' warning.
+
+Unlock rt_mutex::wait_lock in the dead lock case before issuing the warning
+and dropping into the schedule for ever loop.
+
+[ tglx: Moved unlock before the WARN(), removed the pointless comment,
+ massaged changelog, added Fixes tag ]
+
+Fixes: 3d5c9340d194 ("rtmutex: Handle deadlock detection smarter")
+Signed-off-by: Roland Xu <mu001999@outlook.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/all/ME0P300MB063599BEF0743B8FA339C2CECC802@ME0P300MB0635.AUSP300.PROD.OUTLOOK.COM
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/locking/rtmutex.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -1204,6 +1204,7 @@ __rt_mutex_slowlock(struct rt_mutex *loc
+ }
+
+ static void rt_mutex_handle_deadlock(int res, int detect_deadlock,
++ struct rt_mutex *lock,
+ struct rt_mutex_waiter *w)
+ {
+ /*
+@@ -1213,6 +1214,7 @@ static void rt_mutex_handle_deadlock(int
+ if (res != -EDEADLOCK || detect_deadlock)
+ return;
+
++ raw_spin_unlock_irq(&lock->wait_lock);
+ /*
+ * Yell lowdly and stop the task right here.
+ */
+@@ -1268,7 +1270,7 @@ rt_mutex_slowlock(struct rt_mutex *lock,
+ if (unlikely(ret)) {
+ __set_current_state(TASK_RUNNING);
+ remove_waiter(lock, &waiter);
+- rt_mutex_handle_deadlock(ret, chwalk, &waiter);
++ rt_mutex_handle_deadlock(ret, chwalk, lock, &waiter);
+ }
+
+ /*
nvmet-tcp-fix-kernel-crash-if-commands-allocation-fa.patch
drm-i915-fence-mark-debug_fence_init_onstack-with-__.patch
drm-i915-fence-mark-debug_fence_free-with-__maybe_un.patch
+rtmutex-drop-rt_mutex-wait_lock-before-scheduling.patch
+net-sunrpc-remap-eperm-in-case-of-connection-failure-in-xs_tcp_setup_socket.patch