]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.6-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 10 Sep 2024 07:37:07 +0000 (09:37 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 10 Sep 2024 07:37:07 +0000 (09:37 +0200)
added patches:
membarrier-riscv-add-full-memory-barrier-in-switch_mm.patch
x86-mm-fix-pti-for-i386-some-more.patch

queue-6.6/membarrier-riscv-add-full-memory-barrier-in-switch_mm.patch [new file with mode: 0644]
queue-6.6/series
queue-6.6/x86-mm-fix-pti-for-i386-some-more.patch [new file with mode: 0644]

diff --git a/queue-6.6/membarrier-riscv-add-full-memory-barrier-in-switch_mm.patch b/queue-6.6/membarrier-riscv-add-full-memory-barrier-in-switch_mm.patch
new file mode 100644 (file)
index 0000000..f9a4cc2
--- /dev/null
@@ -0,0 +1,115 @@
+From d6cfd1770f20392d7009ae1fdb04733794514fa9 Mon Sep 17 00:00:00 2001
+From: Andrea Parri <parri.andrea@gmail.com>
+Date: Wed, 31 Jan 2024 15:49:33 +0100
+Subject: membarrier: riscv: Add full memory barrier in switch_mm()
+
+From: Andrea Parri <parri.andrea@gmail.com>
+
+commit d6cfd1770f20392d7009ae1fdb04733794514fa9 upstream.
+
+The membarrier system call requires a full memory barrier after storing
+to rq->curr, before going back to user-space.  The barrier is only
+needed when switching between processes: the barrier is implied by
+mmdrop() when switching from kernel to userspace, and it's not needed
+when switching from userspace to kernel.
+
+Rely on the feature/mechanism ARCH_HAS_MEMBARRIER_CALLBACKS and on the
+primitive membarrier_arch_switch_mm(), already adopted by the PowerPC
+architecture, to insert the required barrier.
+
+Fixes: fab957c11efe2f ("RISC-V: Atomic and Locking Code")
+Signed-off-by: Andrea Parri <parri.andrea@gmail.com>
+Reviewed-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Link: https://lore.kernel.org/r/20240131144936.29190-2-parri.andrea@gmail.com
+Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
+Signed-off-by: WangYuli <wangyuli@uniontech.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ MAINTAINERS                         |    2 +-
+ arch/riscv/Kconfig                  |    1 +
+ arch/riscv/include/asm/membarrier.h |   31 +++++++++++++++++++++++++++++++
+ arch/riscv/mm/context.c             |    2 ++
+ kernel/sched/core.c                 |    5 +++--
+ 5 files changed, 38 insertions(+), 3 deletions(-)
+ create mode 100644 arch/riscv/include/asm/membarrier.h
+
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -13702,7 +13702,7 @@ M:     Mathieu Desnoyers <mathieu.desnoyers@
+ M:    "Paul E. McKenney" <paulmck@kernel.org>
+ L:    linux-kernel@vger.kernel.org
+ S:    Supported
+-F:    arch/powerpc/include/asm/membarrier.h
++F:    arch/*/include/asm/membarrier.h
+ F:    include/uapi/linux/membarrier.h
+ F:    kernel/sched/membarrier.c
+--- a/arch/riscv/Kconfig
++++ b/arch/riscv/Kconfig
+@@ -27,6 +27,7 @@ config RISCV
+       select ARCH_HAS_GCOV_PROFILE_ALL
+       select ARCH_HAS_GIGANTIC_PAGE
+       select ARCH_HAS_KCOV
++      select ARCH_HAS_MEMBARRIER_CALLBACKS
+       select ARCH_HAS_MMIOWB
+       select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
+       select ARCH_HAS_PMEM_API
+--- /dev/null
++++ b/arch/riscv/include/asm/membarrier.h
+@@ -0,0 +1,31 @@
++/* SPDX-License-Identifier: GPL-2.0-only */
++#ifndef _ASM_RISCV_MEMBARRIER_H
++#define _ASM_RISCV_MEMBARRIER_H
++
++static inline void membarrier_arch_switch_mm(struct mm_struct *prev,
++                                           struct mm_struct *next,
++                                           struct task_struct *tsk)
++{
++      /*
++       * Only need the full barrier when switching between processes.
++       * Barrier when switching from kernel to userspace is not
++       * required here, given that it is implied by mmdrop(). Barrier
++       * when switching from userspace to kernel is not needed after
++       * store to rq->curr.
++       */
++      if (IS_ENABLED(CONFIG_SMP) &&
++          likely(!(atomic_read(&next->membarrier_state) &
++                   (MEMBARRIER_STATE_PRIVATE_EXPEDITED |
++                    MEMBARRIER_STATE_GLOBAL_EXPEDITED)) || !prev))
++              return;
++
++      /*
++       * The membarrier system call requires a full memory barrier
++       * after storing to rq->curr, before going back to user-space.
++       * Matches a full barrier in the proximity of the membarrier
++       * system call entry.
++       */
++      smp_mb();
++}
++
++#endif /* _ASM_RISCV_MEMBARRIER_H */
+--- a/arch/riscv/mm/context.c
++++ b/arch/riscv/mm/context.c
+@@ -323,6 +323,8 @@ void switch_mm(struct mm_struct *prev, s
+       if (unlikely(prev == next))
+               return;
++      membarrier_arch_switch_mm(prev, next, task);
++
+       /*
+        * Mark the current MM context as inactive, and the next as
+        * active.  This is at least used by the icache flushing
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -6679,8 +6679,9 @@ static void __sched notrace __schedule(u
+                *
+                * Here are the schemes providing that barrier on the
+                * various architectures:
+-               * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC.
+-               *   switch_mm() rely on membarrier_arch_switch_mm() on PowerPC.
++               * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC,
++               *   RISC-V.  switch_mm() relies on membarrier_arch_switch_mm()
++               *   on PowerPC and on RISC-V.
+                * - finish_lock_switch() for weakly-ordered
+                *   architectures where spin_unlock is a full barrier,
+                * - switch_to() for arm64 (weakly-ordered, spin_unlock
index f01fd0e9715f96c11c680b8361a68ac90c5a105f..fa99db9943c0bd39022e51127fd85868c168e87c 100644 (file)
@@ -260,3 +260,5 @@ smb-client-fix-double-put-of-cfile-in-smb2_rename_pa.patch
 riscv-fix-toolchain-vector-detection.patch
 riscv-do-not-restrict-memory-size-because-of-linear-.patch
 ublk_drv-fix-null-pointer-dereference-in-ublk_ctrl_s.patch
+membarrier-riscv-add-full-memory-barrier-in-switch_mm.patch
+x86-mm-fix-pti-for-i386-some-more.patch
diff --git a/queue-6.6/x86-mm-fix-pti-for-i386-some-more.patch b/queue-6.6/x86-mm-fix-pti-for-i386-some-more.patch
new file mode 100644 (file)
index 0000000..8f14ca2
--- /dev/null
@@ -0,0 +1,160 @@
+From c48b5a4cf3125adb679e28ef093f66ff81368d05 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 6 Aug 2024 20:48:43 +0200
+Subject: x86/mm: Fix PTI for i386 some more
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit c48b5a4cf3125adb679e28ef093f66ff81368d05 upstream.
+
+So it turns out that we have to do two passes of
+pti_clone_entry_text(), once before initcalls, such that device and
+late initcalls can use user-mode-helper / modprobe and once after
+free_initmem() / mark_readonly().
+
+Now obviously mark_readonly() can cause PMD splits, and
+pti_clone_pgtable() doesn't like that much.
+
+Allow the late clone to split PMDs so that pagetables stay in sync.
+
+[peterz: Changelog and comments]
+Reported-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Tested-by: Guenter Roeck <linux@roeck-us.net>
+Link: https://lkml.kernel.org/r/20240806184843.GX37996@noisy.programming.kicks-ass.net
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/mm/pti.c |   45 +++++++++++++++++++++++++++++----------------
+ 1 file changed, 29 insertions(+), 16 deletions(-)
+
+--- a/arch/x86/mm/pti.c
++++ b/arch/x86/mm/pti.c
+@@ -241,7 +241,7 @@ static pmd_t *pti_user_pagetable_walk_pm
+  *
+  * Returns a pointer to a PTE on success, or NULL on failure.
+  */
+-static pte_t *pti_user_pagetable_walk_pte(unsigned long address)
++static pte_t *pti_user_pagetable_walk_pte(unsigned long address, bool late_text)
+ {
+       gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
+       pmd_t *pmd;
+@@ -251,10 +251,15 @@ static pte_t *pti_user_pagetable_walk_pt
+       if (!pmd)
+               return NULL;
+-      /* We can't do anything sensible if we hit a large mapping. */
++      /* Large PMD mapping found */
+       if (pmd_large(*pmd)) {
+-              WARN_ON(1);
+-              return NULL;
++              /* Clear the PMD if we hit a large mapping from the first round */
++              if (late_text) {
++                      set_pmd(pmd, __pmd(0));
++              } else {
++                      WARN_ON_ONCE(1);
++                      return NULL;
++              }
+       }
+       if (pmd_none(*pmd)) {
+@@ -283,7 +288,7 @@ static void __init pti_setup_vsyscall(vo
+       if (!pte || WARN_ON(level != PG_LEVEL_4K) || pte_none(*pte))
+               return;
+-      target_pte = pti_user_pagetable_walk_pte(VSYSCALL_ADDR);
++      target_pte = pti_user_pagetable_walk_pte(VSYSCALL_ADDR, false);
+       if (WARN_ON(!target_pte))
+               return;
+@@ -301,7 +306,7 @@ enum pti_clone_level {
+ static void
+ pti_clone_pgtable(unsigned long start, unsigned long end,
+-                enum pti_clone_level level)
++                enum pti_clone_level level, bool late_text)
+ {
+       unsigned long addr;
+@@ -390,7 +395,7 @@ pti_clone_pgtable(unsigned long start, u
+                               return;
+                       /* Allocate PTE in the user page-table */
+-                      target_pte = pti_user_pagetable_walk_pte(addr);
++                      target_pte = pti_user_pagetable_walk_pte(addr, late_text);
+                       if (WARN_ON(!target_pte))
+                               return;
+@@ -452,7 +457,7 @@ static void __init pti_clone_user_shared
+               phys_addr_t pa = per_cpu_ptr_to_phys((void *)va);
+               pte_t *target_pte;
+-              target_pte = pti_user_pagetable_walk_pte(va);
++              target_pte = pti_user_pagetable_walk_pte(va, false);
+               if (WARN_ON(!target_pte))
+                       return;
+@@ -475,7 +480,7 @@ static void __init pti_clone_user_shared
+       start = CPU_ENTRY_AREA_BASE;
+       end   = start + (PAGE_SIZE * CPU_ENTRY_AREA_PAGES);
+-      pti_clone_pgtable(start, end, PTI_CLONE_PMD);
++      pti_clone_pgtable(start, end, PTI_CLONE_PMD, false);
+ }
+ #endif /* CONFIG_X86_64 */
+@@ -492,11 +497,11 @@ static void __init pti_setup_espfix64(vo
+ /*
+  * Clone the populated PMDs of the entry text and force it RO.
+  */
+-static void pti_clone_entry_text(void)
++static void pti_clone_entry_text(bool late)
+ {
+       pti_clone_pgtable((unsigned long) __entry_text_start,
+                         (unsigned long) __entry_text_end,
+-                        PTI_LEVEL_KERNEL_IMAGE);
++                        PTI_LEVEL_KERNEL_IMAGE, late);
+ }
+ /*
+@@ -571,7 +576,7 @@ static void pti_clone_kernel_text(void)
+        * pti_set_kernel_image_nonglobal() did to clear the
+        * global bit.
+        */
+-      pti_clone_pgtable(start, end_clone, PTI_LEVEL_KERNEL_IMAGE);
++      pti_clone_pgtable(start, end_clone, PTI_LEVEL_KERNEL_IMAGE, false);
+       /*
+        * pti_clone_pgtable() will set the global bit in any PMDs
+@@ -638,8 +643,15 @@ void __init pti_init(void)
+       /* Undo all global bits from the init pagetables in head_64.S: */
+       pti_set_kernel_image_nonglobal();
++
+       /* Replace some of the global bits just for shared entry text: */
+-      pti_clone_entry_text();
++      /*
++       * This is very early in boot. Device and Late initcalls can do
++       * modprobe before free_initmem() and mark_readonly(). This
++       * pti_clone_entry_text() allows those user-mode-helpers to function,
++       * but notably the text is still RW.
++       */
++      pti_clone_entry_text(false);
+       pti_setup_espfix64();
+       pti_setup_vsyscall();
+ }
+@@ -656,10 +668,11 @@ void pti_finalize(void)
+       if (!boot_cpu_has(X86_FEATURE_PTI))
+               return;
+       /*
+-       * We need to clone everything (again) that maps parts of the
+-       * kernel image.
++       * This is after free_initmem() (all initcalls are done) and we've done
++       * mark_readonly(). Text is now NX which might've split some PMDs
++       * relative to the early clone.
+        */
+-      pti_clone_entry_text();
++      pti_clone_entry_text(true);
+       pti_clone_kernel_text();
+       debug_checkwx_user();