]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
fix up queue-5.15/mm-fix-race-between-__split_huge_pmd_locked-and-gup-.patch
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 16 Jun 2024 14:31:02 +0000 (16:31 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 16 Jun 2024 14:31:02 +0000 (16:31 +0200)
queue-5.15/mm-fix-race-between-__split_huge_pmd_locked-and-gup-.patch

index d682d9ad3bf2bb57aa1dea0759d7353ba45cc98a..9523fa6f08e53151504372142ebb49d9d58ed514 100644 (file)
@@ -61,17 +61,15 @@ Cc: <stable@vger.kernel.org>
 Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
 Signed-off-by: Sasha Levin <sashal@kernel.org>
 ---
- Documentation/vm/arch_pgtable_helpers.rst |  6 ++-
- arch/powerpc/mm/book3s64/pgtable.c        |  1 +
- arch/s390/include/asm/pgtable.h           |  4 +-
- arch/sparc/mm/tlb.c                       |  1 +
- arch/x86/mm/pgtable.c                     |  2 +
- mm/huge_memory.c                          | 49 ++++++++++++-----------
- mm/pgtable-generic.c                      |  2 +
+ Documentation/vm/arch_pgtable_helpers.rst |    6 ++-
+ arch/powerpc/mm/book3s64/pgtable.c        |    1 
+ arch/s390/include/asm/pgtable.h           |    4 +-
+ arch/sparc/mm/tlb.c                       |    1 
+ arch/x86/mm/pgtable.c                     |    2 +
+ mm/huge_memory.c                          |   49 +++++++++++++++---------------
+ mm/pgtable-generic.c                      |    2 +
  7 files changed, 39 insertions(+), 26 deletions(-)
 
-diff --git a/Documentation/vm/arch_pgtable_helpers.rst b/Documentation/vm/arch_pgtable_helpers.rst
-index 552567d863b86..b8ae5d040b998 100644
 --- a/Documentation/vm/arch_pgtable_helpers.rst
 +++ b/Documentation/vm/arch_pgtable_helpers.rst
 @@ -134,7 +134,8 @@ PMD Page Table Helpers
@@ -94,11 +92,9 @@ index 552567d863b86..b8ae5d040b998 100644
  +---------------------------+--------------------------------------------------+
  | pud_set_huge              | Creates a PUD huge mapping                       |
  +---------------------------+--------------------------------------------------+
-diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c
-index da15f28c7b13a..3a22e7d970f33 100644
 --- a/arch/powerpc/mm/book3s64/pgtable.c
 +++ b/arch/powerpc/mm/book3s64/pgtable.c
-@@ -115,6 +115,7 @@ pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
+@@ -115,6 +115,7 @@ pmd_t pmdp_invalidate(struct vm_area_str
  {
        unsigned long old_pmd;
  
@@ -106,11 +102,9 @@ index da15f28c7b13a..3a22e7d970f33 100644
        old_pmd = pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, _PAGE_INVALID);
        flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
        return __pmd(old_pmd);
-diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
-index b61426c9ef178..b65ce0c90dd0e 100644
 --- a/arch/s390/include/asm/pgtable.h
 +++ b/arch/s390/include/asm/pgtable.h
-@@ -1625,8 +1625,10 @@ static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
+@@ -1625,8 +1625,10 @@ static inline pmd_t pmdp_huge_clear_flus
  static inline pmd_t pmdp_invalidate(struct vm_area_struct *vma,
                                   unsigned long addr, pmd_t *pmdp)
  {
@@ -122,11 +116,9 @@ index b61426c9ef178..b65ce0c90dd0e 100644
        return pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd);
  }
  
-diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
-index 9a725547578e8..946f33c1b032f 100644
 --- a/arch/sparc/mm/tlb.c
 +++ b/arch/sparc/mm/tlb.c
-@@ -245,6 +245,7 @@ pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
+@@ -245,6 +245,7 @@ pmd_t pmdp_invalidate(struct vm_area_str
  {
        pmd_t old, entry;
  
@@ -134,11 +126,9 @@ index 9a725547578e8..946f33c1b032f 100644
        entry = __pmd(pmd_val(*pmdp) & ~_PAGE_VALID);
        old = pmdp_establish(vma, address, pmdp, entry);
        flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
-diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
-index f16059e9a85e7..5c2be867a2ed9 100644
 --- a/arch/x86/mm/pgtable.c
 +++ b/arch/x86/mm/pgtable.c
-@@ -612,6 +612,8 @@ int pmdp_clear_flush_young(struct vm_area_struct *vma,
+@@ -612,6 +612,8 @@ int pmdp_clear_flush_young(struct vm_are
  pmd_t pmdp_invalidate_ad(struct vm_area_struct *vma, unsigned long address,
                         pmd_t *pmdp)
  {
@@ -147,11 +137,9 @@ index f16059e9a85e7..5c2be867a2ed9 100644
        /*
         * No flush is necessary. Once an invalid PTE is established, the PTE's
         * access and dirty bits cannot be updated.
-diff --git a/mm/huge_memory.c b/mm/huge_memory.c
-index 265ef8d1393c5..99d38f712863b 100644
 --- a/mm/huge_memory.c
 +++ b/mm/huge_memory.c
-@@ -2024,32 +2024,11 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
+@@ -2024,32 +2024,11 @@ static void __split_huge_pmd_locked(stru
                return __split_huge_zero_page_pmd(vma, haddr, pmd);
        }
  
@@ -186,7 +174,7 @@ index 265ef8d1393c5..99d38f712863b 100644
                entry = pmd_to_swp_entry(old_pmd);
                page = pfn_swap_entry_to_page(entry);
                write = is_writable_migration_entry(entry);
-@@ -2057,6 +2036,30 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
+@@ -2057,6 +2036,30 @@ static void __split_huge_pmd_locked(stru
                soft_dirty = pmd_swp_soft_dirty(old_pmd);
                uffd_wp = pmd_swp_uffd_wp(old_pmd);
        } else {
@@ -217,19 +205,17 @@ index 265ef8d1393c5..99d38f712863b 100644
                page = pmd_page(old_pmd);
                if (pmd_dirty(old_pmd))
                        SetPageDirty(page);
-diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c
-index b0ce6c7391bf4..cc8b11724cf5a 100644
 --- a/mm/pgtable-generic.c
 +++ b/mm/pgtable-generic.c
-@@ -194,6 +194,7 @@ pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
- pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
+@@ -195,6 +195,7 @@ pmd_t pmdp_invalidate(struct vm_area_str
                     pmd_t *pmdp)
  {
-+      VM_WARN_ON_ONCE(!pmd_present(*pmdp));
        pmd_t old = pmdp_establish(vma, address, pmdp, pmd_mkinvalid(*pmdp));
++      VM_WARN_ON_ONCE(!pmd_present(*pmdp));
        flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
        return old;
-@@ -204,6 +205,7 @@ pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
+ }
+@@ -204,6 +205,7 @@ pmd_t pmdp_invalidate(struct vm_area_str
  pmd_t pmdp_invalidate_ad(struct vm_area_struct *vma, unsigned long address,
                         pmd_t *pmdp)
  {
@@ -237,6 +223,3 @@ index b0ce6c7391bf4..cc8b11724cf5a 100644
        return pmdp_invalidate(vma, address, pmdp);
  }
  #endif
--- 
-2.43.0
-