]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.10-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 10 Sep 2024 07:36:46 +0000 (09:36 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 10 Sep 2024 07:36:46 +0000 (09:36 +0200)
added patches:
mmc-cqhci-fix-checking-of-cqhci_halt-state.patch
net-sunrpc-remap-eperm-in-case-of-connection-failure-in-xs_tcp_setup_socket.patch
rtmutex-drop-rt_mutex-wait_lock-before-scheduling.patch
x86-mm-fix-pti-for-i386-some-more.patch

queue-5.10/mmc-cqhci-fix-checking-of-cqhci_halt-state.patch [new file with mode: 0644]
queue-5.10/net-sunrpc-remap-eperm-in-case-of-connection-failure-in-xs_tcp_setup_socket.patch [new file with mode: 0644]
queue-5.10/rtmutex-drop-rt_mutex-wait_lock-before-scheduling.patch [new file with mode: 0644]
queue-5.10/series
queue-5.10/x86-mm-fix-pti-for-i386-some-more.patch [new file with mode: 0644]

diff --git a/queue-5.10/mmc-cqhci-fix-checking-of-cqhci_halt-state.patch b/queue-5.10/mmc-cqhci-fix-checking-of-cqhci_halt-state.patch
new file mode 100644 (file)
index 0000000..6cdaf2c
--- /dev/null
@@ -0,0 +1,35 @@
+From aea62c744a9ae2a8247c54ec42138405216414da Mon Sep 17 00:00:00 2001
+From: Seunghwan Baek <sh8267.baek@samsung.com>
+Date: Thu, 29 Aug 2024 15:18:22 +0900
+Subject: mmc: cqhci: Fix checking of CQHCI_HALT state
+
+From: Seunghwan Baek <sh8267.baek@samsung.com>
+
+commit aea62c744a9ae2a8247c54ec42138405216414da upstream.
+
+To check if mmc cqe is in halt state, need to check set/clear of CQHCI_HALT
+bit. At this time, we need to check with &, not &&.
+
+Fixes: a4080225f51d ("mmc: cqhci: support for command queue enabled host")
+Cc: stable@vger.kernel.org
+Signed-off-by: Seunghwan Baek <sh8267.baek@samsung.com>
+Reviewed-by: Ritesh Harjani <ritesh.list@gmail.com>
+Acked-by: Adrian Hunter <adrian.hunter@intel.com>
+Link: https://lore.kernel.org/r/20240829061823.3718-2-sh8267.baek@samsung.com
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mmc/host/cqhci.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/mmc/host/cqhci.c
++++ b/drivers/mmc/host/cqhci.c
+@@ -592,7 +592,7 @@ static int cqhci_request(struct mmc_host
+               cqhci_writel(cq_host, 0, CQHCI_CTL);
+               mmc->cqe_on = true;
+               pr_debug("%s: cqhci: CQE on\n", mmc_hostname(mmc));
+-              if (cqhci_readl(cq_host, CQHCI_CTL) && CQHCI_HALT) {
++              if (cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT) {
+                       pr_err("%s: cqhci: CQE failed to exit halt state\n",
+                              mmc_hostname(mmc));
+               }
diff --git a/queue-5.10/net-sunrpc-remap-eperm-in-case-of-connection-failure-in-xs_tcp_setup_socket.patch b/queue-5.10/net-sunrpc-remap-eperm-in-case-of-connection-failure-in-xs_tcp_setup_socket.patch
new file mode 100644 (file)
index 0000000..f6084df
--- /dev/null
@@ -0,0 +1,59 @@
+From 626dfed5fa3bfb41e0dffd796032b555b69f9cde Mon Sep 17 00:00:00 2001
+From: Daniel Borkmann <daniel@iogearbox.net>
+Date: Thu, 4 Jul 2024 08:41:57 +0200
+Subject: net, sunrpc: Remap EPERM in case of connection failure in xs_tcp_setup_socket
+
+From: Daniel Borkmann <daniel@iogearbox.net>
+
+commit 626dfed5fa3bfb41e0dffd796032b555b69f9cde upstream.
+
+When using a BPF program on kernel_connect(), the call can return -EPERM. This
+causes xs_tcp_setup_socket() to loop forever, filling up the syslog and causing
+the kernel to potentially freeze up.
+
+Neil suggested:
+
+  This will propagate -EPERM up into other layers which might not be ready
+  to handle it. It might be safer to map EPERM to an error we would be more
+  likely to expect from the network system - such as ECONNREFUSED or ENETDOWN.
+
+ECONNREFUSED as error seems reasonable. For programs setting a different error
+can be out of reach (see handling in 4fbac77d2d09) in particular on kernels
+which do not have f10d05966196 ("bpf: Make BPF_PROG_RUN_ARRAY return -err
+instead of allow boolean"), thus given that it is better to simply remap for
+consistent behavior. UDP does handle EPERM in xs_udp_send_request().
+
+Fixes: d74bad4e74ee ("bpf: Hooks for sys_connect")
+Fixes: 4fbac77d2d09 ("bpf: Hooks for sys_bind")
+Co-developed-by: Lex Siegel <usiegl00@gmail.com>
+Signed-off-by: Lex Siegel <usiegl00@gmail.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Cc: Neil Brown <neilb@suse.de>
+Cc: Trond Myklebust <trondmy@kernel.org>
+Cc: Anna Schumaker <anna@kernel.org>
+Link: https://github.com/cilium/cilium/issues/33395
+Link: https://lore.kernel.org/bpf/171374175513.12877.8993642908082014881@noble.neil.brown.name
+Link: https://patch.msgid.link/9069ec1d59e4b2129fc23433349fd5580ad43921.1720075070.git.daniel@iogearbox.net
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Hugo SIMELIERE <hsimeliere.opensource@witekio.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sunrpc/xprtsock.c |    7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/net/sunrpc/xprtsock.c
++++ b/net/sunrpc/xprtsock.c
+@@ -2314,6 +2314,13 @@ static void xs_tcp_setup_socket(struct w
+       case -EALREADY:
+               xprt_unlock_connect(xprt, transport);
+               return;
++      case -EPERM:
++              /* Happens, for instance, if a BPF program is preventing
++               * the connect. Remap the error so upper layers can better
++               * deal with it.
++               */
++              status = -ECONNREFUSED;
++              fallthrough;
+       case -EINVAL:
+               /* Happens, for instance, if the user specified a link
+                * local IPv6 address without a scope-id.
diff --git a/queue-5.10/rtmutex-drop-rt_mutex-wait_lock-before-scheduling.patch b/queue-5.10/rtmutex-drop-rt_mutex-wait_lock-before-scheduling.patch
new file mode 100644 (file)
index 0000000..149081d
--- /dev/null
@@ -0,0 +1,57 @@
+From d33d26036a0274b472299d7dcdaa5fb34329f91b Mon Sep 17 00:00:00 2001
+From: Roland Xu <mu001999@outlook.com>
+Date: Thu, 15 Aug 2024 10:58:13 +0800
+Subject: rtmutex: Drop rt_mutex::wait_lock before scheduling
+
+From: Roland Xu <mu001999@outlook.com>
+
+commit d33d26036a0274b472299d7dcdaa5fb34329f91b upstream.
+
+rt_mutex_handle_deadlock() is called with rt_mutex::wait_lock held.  In the
+good case it returns with the lock held and in the deadlock case it emits a
+warning and goes into an endless scheduling loop with the lock held, which
+triggers the 'scheduling in atomic' warning.
+
+Unlock rt_mutex::wait_lock in the dead lock case before issuing the warning
+and dropping into the schedule for ever loop.
+
+[ tglx: Moved unlock before the WARN(), removed the pointless comment,
+       massaged changelog, added Fixes tag ]
+
+Fixes: 3d5c9340d194 ("rtmutex: Handle deadlock detection smarter")
+Signed-off-by: Roland Xu <mu001999@outlook.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/all/ME0P300MB063599BEF0743B8FA339C2CECC802@ME0P300MB0635.AUSP300.PROD.OUTLOOK.COM
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/locking/rtmutex.c |    4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -1202,6 +1202,7 @@ __rt_mutex_slowlock(struct rt_mutex *loc
+ }
+ static void rt_mutex_handle_deadlock(int res, int detect_deadlock,
++                                   struct rt_mutex *lock,
+                                    struct rt_mutex_waiter *w)
+ {
+       /*
+@@ -1211,6 +1212,7 @@ static void rt_mutex_handle_deadlock(int
+       if (res != -EDEADLOCK || detect_deadlock)
+               return;
++      raw_spin_unlock_irq(&lock->wait_lock);
+       /*
+        * Yell lowdly and stop the task right here.
+        */
+@@ -1266,7 +1268,7 @@ rt_mutex_slowlock(struct rt_mutex *lock,
+       if (unlikely(ret)) {
+               __set_current_state(TASK_RUNNING);
+               remove_waiter(lock, &waiter);
+-              rt_mutex_handle_deadlock(ret, chwalk, &waiter);
++              rt_mutex_handle_deadlock(ret, chwalk, lock, &waiter);
+       }
+       /*
index d16744edb19f06f8def2ed676ad37a2f803dbee0..cdf283774bd31bfb56bb5d7e5cc94185ed6ac1c2 100644 (file)
@@ -180,3 +180,7 @@ arm64-acpi-harden-get_cpu_for_acpi_id-against-missin.patch
 nvmet-tcp-fix-kernel-crash-if-commands-allocation-fa.patch
 drm-i915-fence-mark-debug_fence_init_onstack-with-__.patch
 drm-i915-fence-mark-debug_fence_free-with-__maybe_un.patch
+mmc-cqhci-fix-checking-of-cqhci_halt-state.patch
+rtmutex-drop-rt_mutex-wait_lock-before-scheduling.patch
+x86-mm-fix-pti-for-i386-some-more.patch
+net-sunrpc-remap-eperm-in-case-of-connection-failure-in-xs_tcp_setup_socket.patch
diff --git a/queue-5.10/x86-mm-fix-pti-for-i386-some-more.patch b/queue-5.10/x86-mm-fix-pti-for-i386-some-more.patch
new file mode 100644 (file)
index 0000000..f81f101
--- /dev/null
@@ -0,0 +1,160 @@
+From c48b5a4cf3125adb679e28ef093f66ff81368d05 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 6 Aug 2024 20:48:43 +0200
+Subject: x86/mm: Fix PTI for i386 some more
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit c48b5a4cf3125adb679e28ef093f66ff81368d05 upstream.
+
+So it turns out that we have to do two passes of
+pti_clone_entry_text(), once before initcalls, such that device and
+late initcalls can use user-mode-helper / modprobe and once after
+free_initmem() / mark_readonly().
+
+Now obviously mark_readonly() can cause PMD splits, and
+pti_clone_pgtable() doesn't like that much.
+
+Allow the late clone to split PMDs so that pagetables stay in sync.
+
+[peterz: Changelog and comments]
+Reported-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Tested-by: Guenter Roeck <linux@roeck-us.net>
+Link: https://lkml.kernel.org/r/20240806184843.GX37996@noisy.programming.kicks-ass.net
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/mm/pti.c |   45 +++++++++++++++++++++++++++++----------------
+ 1 file changed, 29 insertions(+), 16 deletions(-)
+
+--- a/arch/x86/mm/pti.c
++++ b/arch/x86/mm/pti.c
+@@ -241,7 +241,7 @@ static pmd_t *pti_user_pagetable_walk_pm
+  *
+  * Returns a pointer to a PTE on success, or NULL on failure.
+  */
+-static pte_t *pti_user_pagetable_walk_pte(unsigned long address)
++static pte_t *pti_user_pagetable_walk_pte(unsigned long address, bool late_text)
+ {
+       gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
+       pmd_t *pmd;
+@@ -251,10 +251,15 @@ static pte_t *pti_user_pagetable_walk_pt
+       if (!pmd)
+               return NULL;
+-      /* We can't do anything sensible if we hit a large mapping. */
++      /* Large PMD mapping found */
+       if (pmd_large(*pmd)) {
+-              WARN_ON(1);
+-              return NULL;
++              /* Clear the PMD if we hit a large mapping from the first round */
++              if (late_text) {
++                      set_pmd(pmd, __pmd(0));
++              } else {
++                      WARN_ON_ONCE(1);
++                      return NULL;
++              }
+       }
+       if (pmd_none(*pmd)) {
+@@ -283,7 +288,7 @@ static void __init pti_setup_vsyscall(vo
+       if (!pte || WARN_ON(level != PG_LEVEL_4K) || pte_none(*pte))
+               return;
+-      target_pte = pti_user_pagetable_walk_pte(VSYSCALL_ADDR);
++      target_pte = pti_user_pagetable_walk_pte(VSYSCALL_ADDR, false);
+       if (WARN_ON(!target_pte))
+               return;
+@@ -301,7 +306,7 @@ enum pti_clone_level {
+ static void
+ pti_clone_pgtable(unsigned long start, unsigned long end,
+-                enum pti_clone_level level)
++                enum pti_clone_level level, bool late_text)
+ {
+       unsigned long addr;
+@@ -390,7 +395,7 @@ pti_clone_pgtable(unsigned long start, u
+                               return;
+                       /* Allocate PTE in the user page-table */
+-                      target_pte = pti_user_pagetable_walk_pte(addr);
++                      target_pte = pti_user_pagetable_walk_pte(addr, late_text);
+                       if (WARN_ON(!target_pte))
+                               return;
+@@ -453,7 +458,7 @@ static void __init pti_clone_user_shared
+               phys_addr_t pa = per_cpu_ptr_to_phys((void *)va);
+               pte_t *target_pte;
+-              target_pte = pti_user_pagetable_walk_pte(va);
++              target_pte = pti_user_pagetable_walk_pte(va, false);
+               if (WARN_ON(!target_pte))
+                       return;
+@@ -476,7 +481,7 @@ static void __init pti_clone_user_shared
+       start = CPU_ENTRY_AREA_BASE;
+       end   = start + (PAGE_SIZE * CPU_ENTRY_AREA_PAGES);
+-      pti_clone_pgtable(start, end, PTI_CLONE_PMD);
++      pti_clone_pgtable(start, end, PTI_CLONE_PMD, false);
+ }
+ #endif /* CONFIG_X86_64 */
+@@ -493,11 +498,11 @@ static void __init pti_setup_espfix64(vo
+ /*
+  * Clone the populated PMDs of the entry text and force it RO.
+  */
+-static void pti_clone_entry_text(void)
++static void pti_clone_entry_text(bool late)
+ {
+       pti_clone_pgtable((unsigned long) __entry_text_start,
+                         (unsigned long) __entry_text_end,
+-                        PTI_LEVEL_KERNEL_IMAGE);
++                        PTI_LEVEL_KERNEL_IMAGE, late);
+ }
+ /*
+@@ -572,7 +577,7 @@ static void pti_clone_kernel_text(void)
+        * pti_set_kernel_image_nonglobal() did to clear the
+        * global bit.
+        */
+-      pti_clone_pgtable(start, end_clone, PTI_LEVEL_KERNEL_IMAGE);
++      pti_clone_pgtable(start, end_clone, PTI_LEVEL_KERNEL_IMAGE, false);
+       /*
+        * pti_clone_pgtable() will set the global bit in any PMDs
+@@ -639,8 +644,15 @@ void __init pti_init(void)
+       /* Undo all global bits from the init pagetables in head_64.S: */
+       pti_set_kernel_image_nonglobal();
++
+       /* Replace some of the global bits just for shared entry text: */
+-      pti_clone_entry_text();
++      /*
++       * This is very early in boot. Device and Late initcalls can do
++       * modprobe before free_initmem() and mark_readonly(). This
++       * pti_clone_entry_text() allows those user-mode-helpers to function,
++       * but notably the text is still RW.
++       */
++      pti_clone_entry_text(false);
+       pti_setup_espfix64();
+       pti_setup_vsyscall();
+ }
+@@ -657,10 +669,11 @@ void pti_finalize(void)
+       if (!boot_cpu_has(X86_FEATURE_PTI))
+               return;
+       /*
+-       * We need to clone everything (again) that maps parts of the
+-       * kernel image.
++       * This is after free_initmem() (all initcalls are done) and we've done
++       * mark_readonly(). Text is now NX which might've split some PMDs
++       * relative to the early clone.
+        */
+-      pti_clone_entry_text();
++      pti_clone_entry_text(true);
+       pti_clone_kernel_text();
+       debug_checkwx_user();