]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.10-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 7 Feb 2023 09:02:11 +0000 (10:02 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 7 Feb 2023 09:02:11 +0000 (10:02 +0100)
added patches:
mm-hugetlb-proc-check-for-hugetlb-shared-pmd-in-proc-pid-smaps.patch
x86-debug-fix-stack-recursion-caused-by-wrongly-ordered-dr7-accesses.patch

queue-5.10/mm-hugetlb-proc-check-for-hugetlb-shared-pmd-in-proc-pid-smaps.patch [new file with mode: 0644]
queue-5.10/series
queue-5.10/x86-debug-fix-stack-recursion-caused-by-wrongly-ordered-dr7-accesses.patch [new file with mode: 0644]

diff --git a/queue-5.10/mm-hugetlb-proc-check-for-hugetlb-shared-pmd-in-proc-pid-smaps.patch b/queue-5.10/mm-hugetlb-proc-check-for-hugetlb-shared-pmd-in-proc-pid-smaps.patch
new file mode 100644 (file)
index 0000000..94beba6
--- /dev/null
@@ -0,0 +1,95 @@
+From 3489dbb696d25602aea8c3e669a6d43b76bd5358 Mon Sep 17 00:00:00 2001
+From: Mike Kravetz <mike.kravetz@oracle.com>
+Date: Thu, 26 Jan 2023 14:27:20 -0800
+Subject: mm: hugetlb: proc: check for hugetlb shared PMD in /proc/PID/smaps
+
+From: Mike Kravetz <mike.kravetz@oracle.com>
+
+commit 3489dbb696d25602aea8c3e669a6d43b76bd5358 upstream.
+
+Patch series "Fixes for hugetlb mapcount at most 1 for shared PMDs".
+
+This issue of mapcount in hugetlb pages referenced by shared PMDs was
+discussed in [1].  The following two patches address user visible behavior
+caused by this issue.
+
+[1] https://lore.kernel.org/linux-mm/Y9BF+OCdWnCSilEu@monkey/
+
+
+This patch (of 2):
+
+A hugetlb page will have a mapcount of 1 if mapped by multiple processes
+via a shared PMD.  This is because only the first process increases the
+map count, and subsequent processes just add the shared PMD page to their
+page table.
+
+page_mapcount is being used to decide if a hugetlb page is shared or
+private in /proc/PID/smaps.  Pages referenced via a shared PMD were
+incorrectly being counted as private.
+
+To fix, check for a shared PMD if mapcount is 1.  If a shared PMD is found
+count the hugetlb page as shared.  A new helper to check for a shared PMD
+is added.
+
+[akpm@linux-foundation.org: simplification, per David]
+[akpm@linux-foundation.org: hugetlb.h: include page_ref.h for page_count()]
+Link: https://lkml.kernel.org/r/20230126222721.222195-2-mike.kravetz@oracle.com
+Fixes: 25ee01a2fca0 ("mm: hugetlb: proc: add hugetlb-related fields to /proc/PID/smaps")
+Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com>
+Acked-by: Peter Xu <peterx@redhat.com>
+Cc: David Hildenbrand <david@redhat.com>
+Cc: James Houghton <jthoughton@google.com>
+Cc: Matthew Wilcox <willy@infradead.org>
+Cc: Michal Hocko <mhocko@suse.com>
+Cc: Muchun Song <songmuchun@bytedance.com>
+Cc: Naoya Horiguchi <naoya.horiguchi@linux.dev>
+Cc: Vishal Moola (Oracle) <vishal.moola@gmail.com>
+Cc: Yang Shi <shy828301@gmail.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/proc/task_mmu.c      |    4 +---
+ include/linux/hugetlb.h |   13 +++++++++++++
+ 2 files changed, 14 insertions(+), 3 deletions(-)
+
+--- a/fs/proc/task_mmu.c
++++ b/fs/proc/task_mmu.c
+@@ -714,9 +714,7 @@ static int smaps_hugetlb_range(pte_t *pt
+                       page = device_private_entry_to_page(swpent);
+       }
+       if (page) {
+-              int mapcount = page_mapcount(page);
+-
+-              if (mapcount >= 2)
++              if (page_mapcount(page) >= 2 || hugetlb_pmd_shared(pte))
+                       mss->shared_hugetlb += huge_page_size(hstate_vma(vma));
+               else
+                       mss->private_hugetlb += huge_page_size(hstate_vma(vma));
+--- a/include/linux/hugetlb.h
++++ b/include/linux/hugetlb.h
+@@ -7,6 +7,7 @@
+ #include <linux/fs.h>
+ #include <linux/hugetlb_inline.h>
+ #include <linux/cgroup.h>
++#include <linux/page_ref.h>
+ #include <linux/list.h>
+ #include <linux/kref.h>
+ #include <linux/pgtable.h>
+@@ -942,4 +943,16 @@ static inline __init void hugetlb_cma_ch
+ }
+ #endif
++#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
++static inline bool hugetlb_pmd_shared(pte_t *pte)
++{
++      return page_count(virt_to_page(pte)) > 1;
++}
++#else
++static inline bool hugetlb_pmd_shared(pte_t *pte)
++{
++      return false;
++}
++#endif
++
+ #endif /* _LINUX_HUGETLB_H */
index 65862a87829931a428cacb7b17c931dc698a4129..943f7256c1b3618c8afd846ef4e6455149164e84 100644 (file)
@@ -71,3 +71,5 @@ nvmem-qcom-spmi-sdam-fix-module-autoloading.patch
 parisc-fix-return-code-of-pdc_iodc_print.patch
 parisc-wire-up-ptrace_getregs-ptrace_setregs-for-compat-case.patch
 riscv-disable-generation-of-unwind-tables.patch
+mm-hugetlb-proc-check-for-hugetlb-shared-pmd-in-proc-pid-smaps.patch
+x86-debug-fix-stack-recursion-caused-by-wrongly-ordered-dr7-accesses.patch
diff --git a/queue-5.10/x86-debug-fix-stack-recursion-caused-by-wrongly-ordered-dr7-accesses.patch b/queue-5.10/x86-debug-fix-stack-recursion-caused-by-wrongly-ordered-dr7-accesses.patch
new file mode 100644 (file)
index 0000000..f062e27
--- /dev/null
@@ -0,0 +1,85 @@
+From 9d2c7203ffdb846399b82b0660563c89e918c751 Mon Sep 17 00:00:00 2001
+From: Joerg Roedel <jroedel@suse.de>
+Date: Tue, 31 Jan 2023 09:57:18 +0100
+Subject: x86/debug: Fix stack recursion caused by wrongly ordered DR7 accesses
+
+From: Joerg Roedel <jroedel@suse.de>
+
+commit 9d2c7203ffdb846399b82b0660563c89e918c751 upstream.
+
+In kernels compiled with CONFIG_PARAVIRT=n, the compiler re-orders the
+DR7 read in exc_nmi() to happen before the call to sev_es_ist_enter().
+
+This is problematic when running as an SEV-ES guest because in this
+environment the DR7 read might cause a #VC exception, and taking #VC
+exceptions is not safe in exc_nmi() before sev_es_ist_enter() has run.
+
+The result is stack recursion if the NMI was caused on the #VC IST
+stack, because a subsequent #VC exception in the NMI handler will
+overwrite the stack frame of the interrupted #VC handler.
+
+As there are no compiler barriers affecting the ordering of DR7
+reads/writes, make the accesses to this register volatile, forbidding
+the compiler to re-order them.
+
+  [ bp: Massage text, make them volatile too, to make sure some
+  aggressive compiler optimization pass doesn't discard them. ]
+
+Fixes: 315562c9af3d ("x86/sev-es: Adjust #VC IST Stack on entering NMI handler")
+Reported-by: Alexey Kardashevskiy <aik@amd.com>
+Signed-off-by: Joerg Roedel <jroedel@suse.de>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20230127035616.508966-1-aik@amd.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/debugreg.h | 26 ++++++++++++++++++++++++--
+ 1 file changed, 24 insertions(+), 2 deletions(-)
+
+diff --git a/arch/x86/include/asm/debugreg.h b/arch/x86/include/asm/debugreg.h
+index b049d950612f..ca97442e8d49 100644
+--- a/arch/x86/include/asm/debugreg.h
++++ b/arch/x86/include/asm/debugreg.h
+@@ -39,7 +39,20 @@ static __always_inline unsigned long native_get_debugreg(int regno)
+               asm("mov %%db6, %0" :"=r" (val));
+               break;
+       case 7:
+-              asm("mov %%db7, %0" :"=r" (val));
++              /*
++               * Apply __FORCE_ORDER to DR7 reads to forbid re-ordering them
++               * with other code.
++               *
++               * This is needed because a DR7 access can cause a #VC exception
++               * when running under SEV-ES. Taking a #VC exception is not a
++               * safe thing to do just anywhere in the entry code and
++               * re-ordering might place the access into an unsafe location.
++               *
++               * This happened in the NMI handler, where the DR7 read was
++               * re-ordered to happen before the call to sev_es_ist_enter(),
++               * causing stack recursion.
++               */
++              asm volatile("mov %%db7, %0" : "=r" (val) : __FORCE_ORDER);
+               break;
+       default:
+               BUG();
+@@ -66,7 +79,16 @@ static __always_inline void native_set_debugreg(int regno, unsigned long value)
+               asm("mov %0, %%db6"     ::"r" (value));
+               break;
+       case 7:
+-              asm("mov %0, %%db7"     ::"r" (value));
++              /*
++               * Apply __FORCE_ORDER to DR7 writes to forbid re-ordering them
++               * with other code.
++               *
++               * While is didn't happen with a DR7 write (see the DR7 read
++               * comment above which explains where it happened), add the
++               * __FORCE_ORDER here too to avoid similar problems in the
++               * future.
++               */
++              asm volatile("mov %0, %%db7"    ::"r" (value), __FORCE_ORDER);
+               break;
+       default:
+               BUG();
+-- 
+2.39.1
+