From 51fd9b74619e892fcc9a00920ec21b7abbcbba14 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Tue, 7 Feb 2023 10:02:23 +0100 Subject: [PATCH] 5.15-stable patches added patches: kernel-irq-irqdomain.c-fix-memory-leak-with-using-debugfs_lookup.patch mm-hugetlb-proc-check-for-hugetlb-shared-pmd-in-proc-pid-smaps.patch usb-gadget-f_uac2-fix-incorrect-increment-of-bnumendpoints.patch x86-debug-fix-stack-recursion-caused-by-wrongly-ordered-dr7-accesses.patch --- ...emory-leak-with-using-debugfs_lookup.patch | 34 +++++++ ...hugetlb-shared-pmd-in-proc-pid-smaps.patch | 97 +++++++++++++++++++ queue-5.15/series | 4 + ...incorrect-increment-of-bnumendpoints.patch | 38 ++++++++ ...used-by-wrongly-ordered-dr7-accesses.patch | 80 +++++++++++++++ 5 files changed, 253 insertions(+) create mode 100644 queue-5.15/kernel-irq-irqdomain.c-fix-memory-leak-with-using-debugfs_lookup.patch create mode 100644 queue-5.15/mm-hugetlb-proc-check-for-hugetlb-shared-pmd-in-proc-pid-smaps.patch create mode 100644 queue-5.15/usb-gadget-f_uac2-fix-incorrect-increment-of-bnumendpoints.patch create mode 100644 queue-5.15/x86-debug-fix-stack-recursion-caused-by-wrongly-ordered-dr7-accesses.patch diff --git a/queue-5.15/kernel-irq-irqdomain.c-fix-memory-leak-with-using-debugfs_lookup.patch b/queue-5.15/kernel-irq-irqdomain.c-fix-memory-leak-with-using-debugfs_lookup.patch new file mode 100644 index 00000000000..0f6173672a5 --- /dev/null +++ b/queue-5.15/kernel-irq-irqdomain.c-fix-memory-leak-with-using-debugfs_lookup.patch @@ -0,0 +1,34 @@ +From d83d7ed260283560700d4034a80baad46620481b Mon Sep 17 00:00:00 2001 +From: Greg Kroah-Hartman +Date: Thu, 2 Feb 2023 16:15:54 +0100 +Subject: kernel/irq/irqdomain.c: fix memory leak with using debugfs_lookup() + +From: Greg Kroah-Hartman + +commit d83d7ed260283560700d4034a80baad46620481b upstream. + +When calling debugfs_lookup() the result must have dput() called on it, +otherwise the memory will leak over time. To make things simpler, just +call debugfs_lookup_and_remove() instead which handles all of the logic +at once. + +Cc: Thomas Gleixner +Cc: stable +Reviewed-by: Marc Zyngier +Link: https://lore.kernel.org/r/20230202151554.2310273-1-gregkh@linuxfoundation.org +Signed-off-by: Greg Kroah-Hartman +--- + kernel/irq/irqdomain.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/kernel/irq/irqdomain.c ++++ b/kernel/irq/irqdomain.c +@@ -1913,7 +1913,7 @@ static void debugfs_add_domain_dir(struc + + static void debugfs_remove_domain_dir(struct irq_domain *d) + { +- debugfs_remove(debugfs_lookup(d->name, domain_dir)); ++ debugfs_lookup_and_remove(d->name, domain_dir); + } + + void __init irq_domain_debugfs_init(struct dentry *root) diff --git a/queue-5.15/mm-hugetlb-proc-check-for-hugetlb-shared-pmd-in-proc-pid-smaps.patch b/queue-5.15/mm-hugetlb-proc-check-for-hugetlb-shared-pmd-in-proc-pid-smaps.patch new file mode 100644 index 00000000000..a32d45b790b --- /dev/null +++ b/queue-5.15/mm-hugetlb-proc-check-for-hugetlb-shared-pmd-in-proc-pid-smaps.patch @@ -0,0 +1,97 @@ +From 3489dbb696d25602aea8c3e669a6d43b76bd5358 Mon Sep 17 00:00:00 2001 +From: Mike Kravetz +Date: Thu, 26 Jan 2023 14:27:20 -0800 +Subject: mm: hugetlb: proc: check for hugetlb shared PMD in /proc/PID/smaps + +From: Mike Kravetz + +commit 3489dbb696d25602aea8c3e669a6d43b76bd5358 upstream. + +Patch series "Fixes for hugetlb mapcount at most 1 for shared PMDs". + +This issue of mapcount in hugetlb pages referenced by shared PMDs was +discussed in [1]. The following two patches address user visible behavior +caused by this issue. + +[1] https://lore.kernel.org/linux-mm/Y9BF+OCdWnCSilEu@monkey/ + + +This patch (of 2): + +A hugetlb page will have a mapcount of 1 if mapped by multiple processes +via a shared PMD. This is because only the first process increases the +map count, and subsequent processes just add the shared PMD page to their +page table. + +page_mapcount is being used to decide if a hugetlb page is shared or +private in /proc/PID/smaps. Pages referenced via a shared PMD were +incorrectly being counted as private. + +To fix, check for a shared PMD if mapcount is 1. If a shared PMD is found +count the hugetlb page as shared. A new helper to check for a shared PMD +is added. + +[akpm@linux-foundation.org: simplification, per David] +[akpm@linux-foundation.org: hugetlb.h: include page_ref.h for page_count()] +Link: https://lkml.kernel.org/r/20230126222721.222195-2-mike.kravetz@oracle.com +Fixes: 25ee01a2fca0 ("mm: hugetlb: proc: add hugetlb-related fields to /proc/PID/smaps") +Signed-off-by: Mike Kravetz +Acked-by: Peter Xu +Cc: David Hildenbrand +Cc: James Houghton +Cc: Matthew Wilcox +Cc: Michal Hocko +Cc: Muchun Song +Cc: Naoya Horiguchi +Cc: Vishal Moola (Oracle) +Cc: Yang Shi +Cc: +Signed-off-by: Andrew Morton +Signed-off-by: Greg Kroah-Hartman +--- + fs/proc/task_mmu.c | 4 +--- + include/linux/hugetlb.h | 13 +++++++++++++ + 2 files changed, 14 insertions(+), 3 deletions(-) + +--- a/fs/proc/task_mmu.c ++++ b/fs/proc/task_mmu.c +@@ -714,9 +714,7 @@ static int smaps_hugetlb_range(pte_t *pt + page = pfn_swap_entry_to_page(swpent); + } + if (page) { +- int mapcount = page_mapcount(page); +- +- if (mapcount >= 2) ++ if (page_mapcount(page) >= 2 || hugetlb_pmd_shared(pte)) + mss->shared_hugetlb += huge_page_size(hstate_vma(vma)); + else + mss->private_hugetlb += huge_page_size(hstate_vma(vma)); +--- a/include/linux/hugetlb.h ++++ b/include/linux/hugetlb.h +@@ -7,6 +7,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -1099,6 +1100,18 @@ static inline __init void hugetlb_cma_ch + } + #endif + ++#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE ++static inline bool hugetlb_pmd_shared(pte_t *pte) ++{ ++ return page_count(virt_to_page(pte)) > 1; ++} ++#else ++static inline bool hugetlb_pmd_shared(pte_t *pte) ++{ ++ return false; ++} ++#endif ++ + bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr); + + #ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE diff --git a/queue-5.15/series b/queue-5.15/series index fb253cffab1..28e907bcad1 100644 --- a/queue-5.15/series +++ b/queue-5.15/series @@ -87,3 +87,7 @@ nvmem-qcom-spmi-sdam-fix-module-autoloading.patch parisc-fix-return-code-of-pdc_iodc_print.patch parisc-wire-up-ptrace_getregs-ptrace_setregs-for-compat-case.patch riscv-disable-generation-of-unwind-tables.patch +mm-hugetlb-proc-check-for-hugetlb-shared-pmd-in-proc-pid-smaps.patch +usb-gadget-f_uac2-fix-incorrect-increment-of-bnumendpoints.patch +kernel-irq-irqdomain.c-fix-memory-leak-with-using-debugfs_lookup.patch +x86-debug-fix-stack-recursion-caused-by-wrongly-ordered-dr7-accesses.patch diff --git a/queue-5.15/usb-gadget-f_uac2-fix-incorrect-increment-of-bnumendpoints.patch b/queue-5.15/usb-gadget-f_uac2-fix-incorrect-increment-of-bnumendpoints.patch new file mode 100644 index 00000000000..32a6c193bbf --- /dev/null +++ b/queue-5.15/usb-gadget-f_uac2-fix-incorrect-increment-of-bnumendpoints.patch @@ -0,0 +1,38 @@ +From 2fa89458af9993fab8054daf827f38881e2ad473 Mon Sep 17 00:00:00 2001 +From: Pratham Pratap +Date: Wed, 25 Jan 2023 12:57:25 +0530 +Subject: usb: gadget: f_uac2: Fix incorrect increment of bNumEndpoints + +From: Pratham Pratap + +commit 2fa89458af9993fab8054daf827f38881e2ad473 upstream. + +Currently connect/disconnect of USB cable calls afunc_bind and +eventually increments the bNumEndpoints. Performing multiple +plugin/plugout will increment bNumEndpoints incorrectly, and on +the next plug-in it leads to invalid configuration of descriptor +and hence enumeration fails. + +Fix this by resetting the value of bNumEndpoints to 1 on every +afunc_bind call. + +Fixes: 40c73b30546e ("usb: gadget: f_uac2: add adaptive sync support for capture") +Cc: stable +Signed-off-by: Pratham Pratap +Signed-off-by: Prashanth K +Link: https://lore.kernel.org/r/1674631645-28888-1-git-send-email-quic_prashk@quicinc.com +Signed-off-by: Greg Kroah-Hartman +--- + drivers/usb/gadget/function/f_uac2.c | 1 + + 1 file changed, 1 insertion(+) + +--- a/drivers/usb/gadget/function/f_uac2.c ++++ b/drivers/usb/gadget/function/f_uac2.c +@@ -1069,6 +1069,7 @@ afunc_bind(struct usb_configuration *cfg + } + std_as_out_if0_desc.bInterfaceNumber = ret; + std_as_out_if1_desc.bInterfaceNumber = ret; ++ std_as_out_if1_desc.bNumEndpoints = 1; + uac2->as_out_intf = ret; + uac2->as_out_alt = 0; + diff --git a/queue-5.15/x86-debug-fix-stack-recursion-caused-by-wrongly-ordered-dr7-accesses.patch b/queue-5.15/x86-debug-fix-stack-recursion-caused-by-wrongly-ordered-dr7-accesses.patch new file mode 100644 index 00000000000..7bb87a58cc1 --- /dev/null +++ b/queue-5.15/x86-debug-fix-stack-recursion-caused-by-wrongly-ordered-dr7-accesses.patch @@ -0,0 +1,80 @@ +From 9d2c7203ffdb846399b82b0660563c89e918c751 Mon Sep 17 00:00:00 2001 +From: Joerg Roedel +Date: Tue, 31 Jan 2023 09:57:18 +0100 +Subject: x86/debug: Fix stack recursion caused by wrongly ordered DR7 accesses + +From: Joerg Roedel + +commit 9d2c7203ffdb846399b82b0660563c89e918c751 upstream. + +In kernels compiled with CONFIG_PARAVIRT=n, the compiler re-orders the +DR7 read in exc_nmi() to happen before the call to sev_es_ist_enter(). + +This is problematic when running as an SEV-ES guest because in this +environment the DR7 read might cause a #VC exception, and taking #VC +exceptions is not safe in exc_nmi() before sev_es_ist_enter() has run. + +The result is stack recursion if the NMI was caused on the #VC IST +stack, because a subsequent #VC exception in the NMI handler will +overwrite the stack frame of the interrupted #VC handler. + +As there are no compiler barriers affecting the ordering of DR7 +reads/writes, make the accesses to this register volatile, forbidding +the compiler to re-order them. + + [ bp: Massage text, make them volatile too, to make sure some + aggressive compiler optimization pass doesn't discard them. ] + +Fixes: 315562c9af3d ("x86/sev-es: Adjust #VC IST Stack on entering NMI handler") +Reported-by: Alexey Kardashevskiy +Signed-off-by: Joerg Roedel +Signed-off-by: Borislav Petkov (AMD) +Cc: stable@vger.kernel.org +Link: https://lore.kernel.org/r/20230127035616.508966-1-aik@amd.com +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/include/asm/debugreg.h | 26 ++++++++++++++++++++++++-- + 1 file changed, 24 insertions(+), 2 deletions(-) + +--- a/arch/x86/include/asm/debugreg.h ++++ b/arch/x86/include/asm/debugreg.h +@@ -39,7 +39,20 @@ static __always_inline unsigned long nat + asm("mov %%db6, %0" :"=r" (val)); + break; + case 7: +- asm("mov %%db7, %0" :"=r" (val)); ++ /* ++ * Apply __FORCE_ORDER to DR7 reads to forbid re-ordering them ++ * with other code. ++ * ++ * This is needed because a DR7 access can cause a #VC exception ++ * when running under SEV-ES. Taking a #VC exception is not a ++ * safe thing to do just anywhere in the entry code and ++ * re-ordering might place the access into an unsafe location. ++ * ++ * This happened in the NMI handler, where the DR7 read was ++ * re-ordered to happen before the call to sev_es_ist_enter(), ++ * causing stack recursion. ++ */ ++ asm volatile("mov %%db7, %0" : "=r" (val) : __FORCE_ORDER); + break; + default: + BUG(); +@@ -66,7 +79,16 @@ static __always_inline void native_set_d + asm("mov %0, %%db6" ::"r" (value)); + break; + case 7: +- asm("mov %0, %%db7" ::"r" (value)); ++ /* ++ * Apply __FORCE_ORDER to DR7 writes to forbid re-ordering them ++ * with other code. ++ * ++ * While is didn't happen with a DR7 write (see the DR7 read ++ * comment above which explains where it happened), add the ++ * __FORCE_ORDER here too to avoid similar problems in the ++ * future. ++ */ ++ asm volatile("mov %0, %%db7" ::"r" (value), __FORCE_ORDER); + break; + default: + BUG(); -- 2.47.3