From ba00587967f25c7130660a59538a4cced9ec1fbf Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Tue, 10 Sep 2024 09:36:39 +0200 Subject: [PATCH] 5.4-stable patches added patches: net-sunrpc-remap-eperm-in-case-of-connection-failure-in-xs_tcp_setup_socket.patch rtmutex-drop-rt_mutex-wait_lock-before-scheduling.patch --- ...ction-failure-in-xs_tcp_setup_socket.patch | 59 +++++++++++++++++++ ...rt_mutex-wait_lock-before-scheduling.patch | 57 ++++++++++++++++++ queue-5.4/series | 2 + 3 files changed, 118 insertions(+) create mode 100644 queue-5.4/net-sunrpc-remap-eperm-in-case-of-connection-failure-in-xs_tcp_setup_socket.patch create mode 100644 queue-5.4/rtmutex-drop-rt_mutex-wait_lock-before-scheduling.patch diff --git a/queue-5.4/net-sunrpc-remap-eperm-in-case-of-connection-failure-in-xs_tcp_setup_socket.patch b/queue-5.4/net-sunrpc-remap-eperm-in-case-of-connection-failure-in-xs_tcp_setup_socket.patch new file mode 100644 index 00000000000..869cda807aa --- /dev/null +++ b/queue-5.4/net-sunrpc-remap-eperm-in-case-of-connection-failure-in-xs_tcp_setup_socket.patch @@ -0,0 +1,59 @@ +From 626dfed5fa3bfb41e0dffd796032b555b69f9cde Mon Sep 17 00:00:00 2001 +From: Daniel Borkmann +Date: Thu, 4 Jul 2024 08:41:57 +0200 +Subject: net, sunrpc: Remap EPERM in case of connection failure in xs_tcp_setup_socket + +From: Daniel Borkmann + +commit 626dfed5fa3bfb41e0dffd796032b555b69f9cde upstream. + +When using a BPF program on kernel_connect(), the call can return -EPERM. This +causes xs_tcp_setup_socket() to loop forever, filling up the syslog and causing +the kernel to potentially freeze up. + +Neil suggested: + + This will propagate -EPERM up into other layers which might not be ready + to handle it. It might be safer to map EPERM to an error we would be more + likely to expect from the network system - such as ECONNREFUSED or ENETDOWN. + +ECONNREFUSED as error seems reasonable. For programs setting a different error +can be out of reach (see handling in 4fbac77d2d09) in particular on kernels +which do not have f10d05966196 ("bpf: Make BPF_PROG_RUN_ARRAY return -err +instead of allow boolean"), thus given that it is better to simply remap for +consistent behavior. UDP does handle EPERM in xs_udp_send_request(). + +Fixes: d74bad4e74ee ("bpf: Hooks for sys_connect") +Fixes: 4fbac77d2d09 ("bpf: Hooks for sys_bind") +Co-developed-by: Lex Siegel +Signed-off-by: Lex Siegel +Signed-off-by: Daniel Borkmann +Cc: Neil Brown +Cc: Trond Myklebust +Cc: Anna Schumaker +Link: https://github.com/cilium/cilium/issues/33395 +Link: https://lore.kernel.org/bpf/171374175513.12877.8993642908082014881@noble.neil.brown.name +Link: https://patch.msgid.link/9069ec1d59e4b2129fc23433349fd5580ad43921.1720075070.git.daniel@iogearbox.net +Signed-off-by: Paolo Abeni +Signed-off-by: Hugo SIMELIERE +Signed-off-by: Greg Kroah-Hartman +--- + net/sunrpc/xprtsock.c | 7 +++++++ + 1 file changed, 7 insertions(+) + +--- a/net/sunrpc/xprtsock.c ++++ b/net/sunrpc/xprtsock.c +@@ -2437,6 +2437,13 @@ static void xs_tcp_setup_socket(struct w + case -EALREADY: + xprt_unlock_connect(xprt, transport); + return; ++ case -EPERM: ++ /* Happens, for instance, if a BPF program is preventing ++ * the connect. Remap the error so upper layers can better ++ * deal with it. ++ */ ++ status = -ECONNREFUSED; ++ fallthrough; + case -EINVAL: + /* Happens, for instance, if the user specified a link + * local IPv6 address without a scope-id. diff --git a/queue-5.4/rtmutex-drop-rt_mutex-wait_lock-before-scheduling.patch b/queue-5.4/rtmutex-drop-rt_mutex-wait_lock-before-scheduling.patch new file mode 100644 index 00000000000..4054a27ef9c --- /dev/null +++ b/queue-5.4/rtmutex-drop-rt_mutex-wait_lock-before-scheduling.patch @@ -0,0 +1,57 @@ +From d33d26036a0274b472299d7dcdaa5fb34329f91b Mon Sep 17 00:00:00 2001 +From: Roland Xu +Date: Thu, 15 Aug 2024 10:58:13 +0800 +Subject: rtmutex: Drop rt_mutex::wait_lock before scheduling + +From: Roland Xu + +commit d33d26036a0274b472299d7dcdaa5fb34329f91b upstream. + +rt_mutex_handle_deadlock() is called with rt_mutex::wait_lock held. In the +good case it returns with the lock held and in the deadlock case it emits a +warning and goes into an endless scheduling loop with the lock held, which +triggers the 'scheduling in atomic' warning. + +Unlock rt_mutex::wait_lock in the dead lock case before issuing the warning +and dropping into the schedule for ever loop. + +[ tglx: Moved unlock before the WARN(), removed the pointless comment, + massaged changelog, added Fixes tag ] + +Fixes: 3d5c9340d194 ("rtmutex: Handle deadlock detection smarter") +Signed-off-by: Roland Xu +Signed-off-by: Thomas Gleixner +Cc: stable@vger.kernel.org +Link: https://lore.kernel.org/all/ME0P300MB063599BEF0743B8FA339C2CECC802@ME0P300MB0635.AUSP300.PROD.OUTLOOK.COM +Signed-off-by: Greg Kroah-Hartman +--- + kernel/locking/rtmutex.c | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +--- a/kernel/locking/rtmutex.c ++++ b/kernel/locking/rtmutex.c +@@ -1204,6 +1204,7 @@ __rt_mutex_slowlock(struct rt_mutex *loc + } + + static void rt_mutex_handle_deadlock(int res, int detect_deadlock, ++ struct rt_mutex *lock, + struct rt_mutex_waiter *w) + { + /* +@@ -1213,6 +1214,7 @@ static void rt_mutex_handle_deadlock(int + if (res != -EDEADLOCK || detect_deadlock) + return; + ++ raw_spin_unlock_irq(&lock->wait_lock); + /* + * Yell lowdly and stop the task right here. + */ +@@ -1268,7 +1270,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, + if (unlikely(ret)) { + __set_current_state(TASK_RUNNING); + remove_waiter(lock, &waiter); +- rt_mutex_handle_deadlock(ret, chwalk, &waiter); ++ rt_mutex_handle_deadlock(ret, chwalk, lock, &waiter); + } + + /* diff --git a/queue-5.4/series b/queue-5.4/series index c8e5f6274d9..a1e62ab3eee 100644 --- a/queue-5.4/series +++ b/queue-5.4/series @@ -117,3 +117,5 @@ arm64-acpi-harden-get_cpu_for_acpi_id-against-missin.patch nvmet-tcp-fix-kernel-crash-if-commands-allocation-fa.patch drm-i915-fence-mark-debug_fence_init_onstack-with-__.patch drm-i915-fence-mark-debug_fence_free-with-__maybe_un.patch +rtmutex-drop-rt_mutex-wait_lock-before-scheduling.patch +net-sunrpc-remap-eperm-in-case-of-connection-failure-in-xs_tcp_setup_socket.patch -- 2.47.3