--- /dev/null
+From a4a118f2eead1d6c49e00765de89878288d4b890 Mon Sep 17 00:00:00 2001
+From: Nadav Amit <namit@vmware.com>
+Date: Sun, 21 Nov 2021 12:40:07 -0800
+Subject: hugetlbfs: flush TLBs correctly after huge_pmd_unshare
+
+From: Nadav Amit <namit@vmware.com>
+
+commit a4a118f2eead1d6c49e00765de89878288d4b890 upstream.
+
+When __unmap_hugepage_range() calls to huge_pmd_unshare() succeed, a TLB
+flush is missing. This TLB flush must be performed before releasing the
+i_mmap_rwsem, in order to prevent an unshared PMDs page from being
+released and reused before the TLB flush took place.
+
+Arguably, a comprehensive solution would use mmu_gather interface to
+batch the TLB flushes and the PMDs page release, however it is not an
+easy solution: (1) try_to_unmap_one() and try_to_migrate_one() also call
+huge_pmd_unshare() and they cannot use the mmu_gather interface; and (2)
+deferring the release of the page reference for the PMDs page until
+after i_mmap_rwsem is dropeed can confuse huge_pmd_unshare() into
+thinking PMDs are shared when they are not.
+
+Fix __unmap_hugepage_range() by adding the missing TLB flush, and
+forcing a flush when unshare is successful.
+
+Fixes: 24669e58477e ("hugetlb: use mmu_gather instead of a temporary linked list for accumulating pages)" # 3.6
+Signed-off-by: Nadav Amit <namit@vmware.com>
+Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
+Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
+Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/include/asm/tlb.h | 8 ++++++++
+ arch/ia64/include/asm/tlb.h | 10 ++++++++++
+ arch/s390/include/asm/tlb.h | 13 +++++++++++++
+ arch/sh/include/asm/tlb.h | 10 ++++++++++
+ arch/um/include/asm/tlb.h | 12 ++++++++++++
+ include/asm-generic/tlb.h | 7 +++++++
+ mm/hugetlb.c | 5 ++++-
+ 7 files changed, 64 insertions(+), 1 deletion(-)
+
+--- a/arch/arm/include/asm/tlb.h
++++ b/arch/arm/include/asm/tlb.h
+@@ -257,6 +257,14 @@ tlb_remove_pmd_tlb_entry(struct mmu_gath
+ tlb_add_flush(tlb, addr);
+ }
+
++static inline void
++tlb_flush_pmd_range(struct mmu_gather *tlb, unsigned long address,
++ unsigned long size)
++{
++ tlb_add_flush(tlb, address);
++ tlb_add_flush(tlb, address + size - PMD_SIZE);
++}
++
+ #define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
+ #define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr)
+ #define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp)
+--- a/arch/ia64/include/asm/tlb.h
++++ b/arch/ia64/include/asm/tlb.h
+@@ -251,6 +251,16 @@ __tlb_remove_tlb_entry (struct mmu_gathe
+ tlb->end_addr = address + PAGE_SIZE;
+ }
+
++static inline void
++tlb_flush_pmd_range(struct mmu_gather *tlb, unsigned long address,
++ unsigned long size)
++{
++ if (tlb->start_addr > address)
++ tlb->start_addr = address;
++ if (tlb->end_addr < address + size)
++ tlb->end_addr = address + size;
++}
++
+ #define tlb_migrate_finish(mm) platform_tlb_migrate_finish(mm)
+
+ #define tlb_start_vma(tlb, vma) do { } while (0)
+--- a/arch/s390/include/asm/tlb.h
++++ b/arch/s390/include/asm/tlb.h
+@@ -97,6 +97,19 @@ static inline void tlb_remove_page(struc
+ {
+ free_page_and_swap_cache(page);
+ }
++static inline void tlb_flush_pmd_range(struct mmu_gather *tlb,
++ unsigned long address, unsigned long size)
++{
++ /*
++ * the range might exceed the original range that was provided to
++ * tlb_gather_mmu(), so we need to update it despite the fact it is
++ * usually not updated.
++ */
++ if (tlb->start > address)
++ tlb->start = address;
++ if (tlb->end < address + size)
++ tlb->end = address + size;
++}
+
+ /*
+ * pte_free_tlb frees a pte table and clears the CRSTE for the
+--- a/arch/sh/include/asm/tlb.h
++++ b/arch/sh/include/asm/tlb.h
+@@ -65,6 +65,16 @@ tlb_remove_tlb_entry(struct mmu_gather *
+ tlb->end = address + PAGE_SIZE;
+ }
+
++static inline void
++tlb_flush_pmd_range(struct mmu_gather *tlb, unsigned long address,
++ unsigned long size)
++{
++ if (tlb->start > address)
++ tlb->start = address;
++ if (tlb->end < address + size)
++ tlb->end = address + size;
++}
++
+ /*
+ * In the case of tlb vma handling, we can optimise these away in the
+ * case where we're doing a full MM flush. When we're doing a munmap,
+--- a/arch/um/include/asm/tlb.h
++++ b/arch/um/include/asm/tlb.h
+@@ -110,6 +110,18 @@ static inline void tlb_remove_page(struc
+ __tlb_remove_page(tlb, page);
+ }
+
++static inline void
++tlb_flush_pmd_range(struct mmu_gather *tlb, unsigned long address,
++ unsigned long size)
++{
++ tlb->need_flush = 1;
++
++ if (tlb->start > address)
++ tlb->start = address;
++ if (tlb->end < address + size)
++ tlb->end = address + size;
++}
++
+ /**
+ * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
+ *
+--- a/include/asm-generic/tlb.h
++++ b/include/asm-generic/tlb.h
+@@ -165,6 +165,13 @@ static inline void __tlb_reset_range(str
+ #define tlb_end_vma __tlb_end_vma
+ #endif
+
++static inline void tlb_flush_pmd_range(struct mmu_gather *tlb,
++ unsigned long address, unsigned long size)
++{
++ tlb->start = min(tlb->start, address);
++ tlb->end = max(tlb->end, address + size);
++}
++
+ #ifndef __tlb_remove_tlb_entry
+ #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
+ #endif
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -3290,8 +3290,11 @@ again:
+ continue;
+
+ ptl = huge_pte_lock(h, mm, ptep);
+- if (huge_pmd_unshare(mm, &address, ptep))
++ if (huge_pmd_unshare(mm, &address, ptep)) {
++ tlb_flush_pmd_range(tlb, address & PUD_MASK, PUD_SIZE);
++ force_flush = 1;
+ goto unlock;
++ }
+
+ pte = huge_ptep_get(ptep);
+ if (huge_pte_none(pte))
--- /dev/null
+From 6cb206508b621a9a0a2c35b60540e399225c8243 Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (VMware)" <rostedt@goodmis.org>
+Date: Fri, 26 Nov 2021 13:35:26 -0500
+Subject: tracing: Check pid filtering when creating events
+
+From: Steven Rostedt (VMware) <rostedt@goodmis.org>
+
+commit 6cb206508b621a9a0a2c35b60540e399225c8243 upstream.
+
+When pid filtering is activated in an instance, all of the events trace
+files for that instance has the PID_FILTER flag set. This determines
+whether or not pid filtering needs to be done on the event, otherwise the
+event is executed as normal.
+
+If pid filtering is enabled when an event is created (via a dynamic event
+or modules), its flag is not updated to reflect the current state, and the
+events are not filtered properly.
+
+Cc: stable@vger.kernel.org
+Fixes: 3fdaf80f4a836 ("tracing: Implement event pid filtering")
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/trace_events.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/kernel/trace/trace_events.c
++++ b/kernel/trace/trace_events.c
+@@ -2341,12 +2341,19 @@ static struct trace_event_file *
+ trace_create_new_event(struct trace_event_call *call,
+ struct trace_array *tr)
+ {
++ struct trace_pid_list *pid_list;
+ struct trace_event_file *file;
+
+ file = kmem_cache_alloc(file_cachep, GFP_TRACE);
+ if (!file)
+ return NULL;
+
++ pid_list = rcu_dereference_protected(tr->filtered_pids,
++ lockdep_is_held(&event_mutex));
++
++ if (pid_list)
++ file->flags |= EVENT_FILE_FL_PID_FILTER;
++
+ file->event_call = call;
+ file->tr = tr;
+ atomic_set(&file->sm_ref, 0);