--- /dev/null
+From d83d7ed260283560700d4034a80baad46620481b Mon Sep 17 00:00:00 2001
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Date: Thu, 2 Feb 2023 16:15:54 +0100
+Subject: kernel/irq/irqdomain.c: fix memory leak with using debugfs_lookup()
+
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+commit d83d7ed260283560700d4034a80baad46620481b upstream.
+
+When calling debugfs_lookup() the result must have dput() called on it,
+otherwise the memory will leak over time. To make things simpler, just
+call debugfs_lookup_and_remove() instead which handles all of the logic
+at once.
+
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: stable <stable@kernel.org>
+Reviewed-by: Marc Zyngier <maz@kernel.org>
+Link: https://lore.kernel.org/r/20230202151554.2310273-1-gregkh@linuxfoundation.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/irq/irqdomain.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/irq/irqdomain.c
++++ b/kernel/irq/irqdomain.c
+@@ -1913,7 +1913,7 @@ static void debugfs_add_domain_dir(struc
+
+ static void debugfs_remove_domain_dir(struct irq_domain *d)
+ {
+- debugfs_remove(debugfs_lookup(d->name, domain_dir));
++ debugfs_lookup_and_remove(d->name, domain_dir);
+ }
+
+ void __init irq_domain_debugfs_init(struct dentry *root)
--- /dev/null
+From 3489dbb696d25602aea8c3e669a6d43b76bd5358 Mon Sep 17 00:00:00 2001
+From: Mike Kravetz <mike.kravetz@oracle.com>
+Date: Thu, 26 Jan 2023 14:27:20 -0800
+Subject: mm: hugetlb: proc: check for hugetlb shared PMD in /proc/PID/smaps
+
+From: Mike Kravetz <mike.kravetz@oracle.com>
+
+commit 3489dbb696d25602aea8c3e669a6d43b76bd5358 upstream.
+
+Patch series "Fixes for hugetlb mapcount at most 1 for shared PMDs".
+
+This issue of mapcount in hugetlb pages referenced by shared PMDs was
+discussed in [1]. The following two patches address user visible behavior
+caused by this issue.
+
+[1] https://lore.kernel.org/linux-mm/Y9BF+OCdWnCSilEu@monkey/
+
+
+This patch (of 2):
+
+A hugetlb page will have a mapcount of 1 if mapped by multiple processes
+via a shared PMD. This is because only the first process increases the
+map count, and subsequent processes just add the shared PMD page to their
+page table.
+
+page_mapcount is being used to decide if a hugetlb page is shared or
+private in /proc/PID/smaps. Pages referenced via a shared PMD were
+incorrectly being counted as private.
+
+To fix, check for a shared PMD if mapcount is 1. If a shared PMD is found
+count the hugetlb page as shared. A new helper to check for a shared PMD
+is added.
+
+[akpm@linux-foundation.org: simplification, per David]
+[akpm@linux-foundation.org: hugetlb.h: include page_ref.h for page_count()]
+Link: https://lkml.kernel.org/r/20230126222721.222195-2-mike.kravetz@oracle.com
+Fixes: 25ee01a2fca0 ("mm: hugetlb: proc: add hugetlb-related fields to /proc/PID/smaps")
+Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com>
+Acked-by: Peter Xu <peterx@redhat.com>
+Cc: David Hildenbrand <david@redhat.com>
+Cc: James Houghton <jthoughton@google.com>
+Cc: Matthew Wilcox <willy@infradead.org>
+Cc: Michal Hocko <mhocko@suse.com>
+Cc: Muchun Song <songmuchun@bytedance.com>
+Cc: Naoya Horiguchi <naoya.horiguchi@linux.dev>
+Cc: Vishal Moola (Oracle) <vishal.moola@gmail.com>
+Cc: Yang Shi <shy828301@gmail.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/proc/task_mmu.c | 4 +---
+ include/linux/hugetlb.h | 13 +++++++++++++
+ 2 files changed, 14 insertions(+), 3 deletions(-)
+
+--- a/fs/proc/task_mmu.c
++++ b/fs/proc/task_mmu.c
+@@ -714,9 +714,7 @@ static int smaps_hugetlb_range(pte_t *pt
+ page = pfn_swap_entry_to_page(swpent);
+ }
+ if (page) {
+- int mapcount = page_mapcount(page);
+-
+- if (mapcount >= 2)
++ if (page_mapcount(page) >= 2 || hugetlb_pmd_shared(pte))
+ mss->shared_hugetlb += huge_page_size(hstate_vma(vma));
+ else
+ mss->private_hugetlb += huge_page_size(hstate_vma(vma));
+--- a/include/linux/hugetlb.h
++++ b/include/linux/hugetlb.h
+@@ -7,6 +7,7 @@
+ #include <linux/fs.h>
+ #include <linux/hugetlb_inline.h>
+ #include <linux/cgroup.h>
++#include <linux/page_ref.h>
+ #include <linux/list.h>
+ #include <linux/kref.h>
+ #include <linux/pgtable.h>
+@@ -1099,6 +1100,18 @@ static inline __init void hugetlb_cma_ch
+ }
+ #endif
+
++#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
++static inline bool hugetlb_pmd_shared(pte_t *pte)
++{
++ return page_count(virt_to_page(pte)) > 1;
++}
++#else
++static inline bool hugetlb_pmd_shared(pte_t *pte)
++{
++ return false;
++}
++#endif
++
+ bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr);
+
+ #ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
parisc-fix-return-code-of-pdc_iodc_print.patch
parisc-wire-up-ptrace_getregs-ptrace_setregs-for-compat-case.patch
riscv-disable-generation-of-unwind-tables.patch
+mm-hugetlb-proc-check-for-hugetlb-shared-pmd-in-proc-pid-smaps.patch
+usb-gadget-f_uac2-fix-incorrect-increment-of-bnumendpoints.patch
+kernel-irq-irqdomain.c-fix-memory-leak-with-using-debugfs_lookup.patch
+x86-debug-fix-stack-recursion-caused-by-wrongly-ordered-dr7-accesses.patch
--- /dev/null
+From 2fa89458af9993fab8054daf827f38881e2ad473 Mon Sep 17 00:00:00 2001
+From: Pratham Pratap <quic_ppratap@quicinc.com>
+Date: Wed, 25 Jan 2023 12:57:25 +0530
+Subject: usb: gadget: f_uac2: Fix incorrect increment of bNumEndpoints
+
+From: Pratham Pratap <quic_ppratap@quicinc.com>
+
+commit 2fa89458af9993fab8054daf827f38881e2ad473 upstream.
+
+Currently connect/disconnect of USB cable calls afunc_bind and
+eventually increments the bNumEndpoints. Performing multiple
+plugin/plugout will increment bNumEndpoints incorrectly, and on
+the next plug-in it leads to invalid configuration of descriptor
+and hence enumeration fails.
+
+Fix this by resetting the value of bNumEndpoints to 1 on every
+afunc_bind call.
+
+Fixes: 40c73b30546e ("usb: gadget: f_uac2: add adaptive sync support for capture")
+Cc: stable <stable@kernel.org>
+Signed-off-by: Pratham Pratap <quic_ppratap@quicinc.com>
+Signed-off-by: Prashanth K <quic_prashk@quicinc.com>
+Link: https://lore.kernel.org/r/1674631645-28888-1-git-send-email-quic_prashk@quicinc.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/gadget/function/f_uac2.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/usb/gadget/function/f_uac2.c
++++ b/drivers/usb/gadget/function/f_uac2.c
+@@ -1069,6 +1069,7 @@ afunc_bind(struct usb_configuration *cfg
+ }
+ std_as_out_if0_desc.bInterfaceNumber = ret;
+ std_as_out_if1_desc.bInterfaceNumber = ret;
++ std_as_out_if1_desc.bNumEndpoints = 1;
+ uac2->as_out_intf = ret;
+ uac2->as_out_alt = 0;
+
--- /dev/null
+From 9d2c7203ffdb846399b82b0660563c89e918c751 Mon Sep 17 00:00:00 2001
+From: Joerg Roedel <jroedel@suse.de>
+Date: Tue, 31 Jan 2023 09:57:18 +0100
+Subject: x86/debug: Fix stack recursion caused by wrongly ordered DR7 accesses
+
+From: Joerg Roedel <jroedel@suse.de>
+
+commit 9d2c7203ffdb846399b82b0660563c89e918c751 upstream.
+
+In kernels compiled with CONFIG_PARAVIRT=n, the compiler re-orders the
+DR7 read in exc_nmi() to happen before the call to sev_es_ist_enter().
+
+This is problematic when running as an SEV-ES guest because in this
+environment the DR7 read might cause a #VC exception, and taking #VC
+exceptions is not safe in exc_nmi() before sev_es_ist_enter() has run.
+
+The result is stack recursion if the NMI was caused on the #VC IST
+stack, because a subsequent #VC exception in the NMI handler will
+overwrite the stack frame of the interrupted #VC handler.
+
+As there are no compiler barriers affecting the ordering of DR7
+reads/writes, make the accesses to this register volatile, forbidding
+the compiler to re-order them.
+
+ [ bp: Massage text, make them volatile too, to make sure some
+ aggressive compiler optimization pass doesn't discard them. ]
+
+Fixes: 315562c9af3d ("x86/sev-es: Adjust #VC IST Stack on entering NMI handler")
+Reported-by: Alexey Kardashevskiy <aik@amd.com>
+Signed-off-by: Joerg Roedel <jroedel@suse.de>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20230127035616.508966-1-aik@amd.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/debugreg.h | 26 ++++++++++++++++++++++++--
+ 1 file changed, 24 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/include/asm/debugreg.h
++++ b/arch/x86/include/asm/debugreg.h
+@@ -39,7 +39,20 @@ static __always_inline unsigned long nat
+ asm("mov %%db6, %0" :"=r" (val));
+ break;
+ case 7:
+- asm("mov %%db7, %0" :"=r" (val));
++ /*
++ * Apply __FORCE_ORDER to DR7 reads to forbid re-ordering them
++ * with other code.
++ *
++ * This is needed because a DR7 access can cause a #VC exception
++ * when running under SEV-ES. Taking a #VC exception is not a
++ * safe thing to do just anywhere in the entry code and
++ * re-ordering might place the access into an unsafe location.
++ *
++ * This happened in the NMI handler, where the DR7 read was
++ * re-ordered to happen before the call to sev_es_ist_enter(),
++ * causing stack recursion.
++ */
++ asm volatile("mov %%db7, %0" : "=r" (val) : __FORCE_ORDER);
+ break;
+ default:
+ BUG();
+@@ -66,7 +79,16 @@ static __always_inline void native_set_d
+ asm("mov %0, %%db6" ::"r" (value));
+ break;
+ case 7:
+- asm("mov %0, %%db7" ::"r" (value));
++ /*
++ * Apply __FORCE_ORDER to DR7 writes to forbid re-ordering them
++ * with other code.
++ *
++ * While is didn't happen with a DR7 write (see the DR7 read
++ * comment above which explains where it happened), add the
++ * __FORCE_ORDER here too to avoid similar problems in the
++ * future.
++ */
++ asm volatile("mov %0, %%db7" ::"r" (value), __FORCE_ORDER);
+ break;
+ default:
+ BUG();