]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.10-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 28 Jun 2021 11:35:58 +0000 (13:35 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 28 Jun 2021 11:35:58 +0000 (13:35 +0200)
added patches:
mm-add-vm_warn_on_once_page-macro.patch
mm-rmap-remove-unneeded-semicolon-in-page_not_mapped.patch
mm-rmap-use-page_not_mapped-in-try_to_unmap.patch
mm-thp-fix-__split_huge_pmd_locked-on-shmem-migration-entry.patch
mm-thp-replace-debug_vm-bug-with-vm_warn-when-unmap-fails-for-split.patch
mm-thp-unmap_mapping_page-to-fix-thp-truncate_cleanup_page.patch

queue-5.10/mm-add-vm_warn_on_once_page-macro.patch [new file with mode: 0644]
queue-5.10/mm-rmap-remove-unneeded-semicolon-in-page_not_mapped.patch [new file with mode: 0644]
queue-5.10/mm-rmap-use-page_not_mapped-in-try_to_unmap.patch [new file with mode: 0644]
queue-5.10/mm-thp-fix-__split_huge_pmd_locked-on-shmem-migration-entry.patch [new file with mode: 0644]
queue-5.10/mm-thp-replace-debug_vm-bug-with-vm_warn-when-unmap-fails-for-split.patch [new file with mode: 0644]
queue-5.10/mm-thp-unmap_mapping_page-to-fix-thp-truncate_cleanup_page.patch [new file with mode: 0644]
queue-5.10/series

diff --git a/queue-5.10/mm-add-vm_warn_on_once_page-macro.patch b/queue-5.10/mm-add-vm_warn_on_once_page-macro.patch
new file mode 100644 (file)
index 0000000..002fd02
--- /dev/null
@@ -0,0 +1,59 @@
+From foo@baz Mon Jun 28 01:34:29 PM CEST 2021
+From: Alex Shi <alex.shi@linux.alibaba.com>
+Date: Fri, 18 Dec 2020 14:01:31 -0800
+Subject: mm: add VM_WARN_ON_ONCE_PAGE() macro
+
+From: Alex Shi <alex.shi@linux.alibaba.com>
+
+[ Upstream commit a4055888629bc0467d12d912cd7c90acdf3d9b12 part ]
+
+Add VM_WARN_ON_ONCE_PAGE() macro.
+
+Link: https://lkml.kernel.org/r/1604283436-18880-3-git-send-email-alex.shi@linux.alibaba.com
+Signed-off-by: Alex Shi <alex.shi@linux.alibaba.com>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Acked-by: Hugh Dickins <hughd@google.com>
+Acked-by: Johannes Weiner <hannes@cmpxchg.org>
+Cc: Vladimir Davydov <vdavydov.dev@gmail.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+
+Note on stable backport: original commit was titled
+mm/memcg: warning on !memcg after readahead page charged
+which included uses of this macro in mm/memcontrol.c: here omitted.
+
+Signed-off-by: Hugh Dickins <hughd@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/mmdebug.h |   13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+--- a/include/linux/mmdebug.h
++++ b/include/linux/mmdebug.h
+@@ -37,6 +37,18 @@ void dump_mm(const struct mm_struct *mm)
+                       BUG();                                          \
+               }                                                       \
+       } while (0)
++#define VM_WARN_ON_ONCE_PAGE(cond, page)      ({                      \
++      static bool __section(".data.once") __warned;                   \
++      int __ret_warn_once = !!(cond);                                 \
++                                                                      \
++      if (unlikely(__ret_warn_once && !__warned)) {                   \
++              dump_page(page, "VM_WARN_ON_ONCE_PAGE(" __stringify(cond)")");\
++              __warned = true;                                        \
++              WARN_ON(1);                                             \
++      }                                                               \
++      unlikely(__ret_warn_once);                                      \
++})
++
+ #define VM_WARN_ON(cond) (void)WARN_ON(cond)
+ #define VM_WARN_ON_ONCE(cond) (void)WARN_ON_ONCE(cond)
+ #define VM_WARN_ONCE(cond, format...) (void)WARN_ONCE(cond, format)
+@@ -48,6 +60,7 @@ void dump_mm(const struct mm_struct *mm)
+ #define VM_BUG_ON_MM(cond, mm) VM_BUG_ON(cond)
+ #define VM_WARN_ON(cond) BUILD_BUG_ON_INVALID(cond)
+ #define VM_WARN_ON_ONCE(cond) BUILD_BUG_ON_INVALID(cond)
++#define VM_WARN_ON_ONCE_PAGE(cond, page)  BUILD_BUG_ON_INVALID(cond)
+ #define VM_WARN_ONCE(cond, format...) BUILD_BUG_ON_INVALID(cond)
+ #define VM_WARN(cond, format...) BUILD_BUG_ON_INVALID(cond)
+ #endif
diff --git a/queue-5.10/mm-rmap-remove-unneeded-semicolon-in-page_not_mapped.patch b/queue-5.10/mm-rmap-remove-unneeded-semicolon-in-page_not_mapped.patch
new file mode 100644 (file)
index 0000000..a8e70bf
--- /dev/null
@@ -0,0 +1,31 @@
+From foo@baz Mon Jun 28 01:34:29 PM CEST 2021
+From: Miaohe Lin <linmiaohe@huawei.com>
+Date: Thu, 25 Feb 2021 17:17:56 -0800
+Subject: mm/rmap: remove unneeded semicolon in page_not_mapped()
+
+From: Miaohe Lin <linmiaohe@huawei.com>
+
+[ Upstream commit e0af87ff7afcde2660be44302836d2d5618185af ]
+
+Remove extra semicolon without any functional change intended.
+
+Link: https://lkml.kernel.org/r/20210127093425.39640-1-linmiaohe@huawei.com
+Signed-off-by: Miaohe Lin <linmiaohe@huawei.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/rmap.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/mm/rmap.c
++++ b/mm/rmap.c
+@@ -1763,7 +1763,7 @@ bool try_to_unmap(struct page *page, enu
+ static int page_not_mapped(struct page *page)
+ {
+       return !page_mapped(page);
+-};
++}
+ /**
+  * try_to_munlock - try to munlock a page
diff --git a/queue-5.10/mm-rmap-use-page_not_mapped-in-try_to_unmap.patch b/queue-5.10/mm-rmap-use-page_not_mapped-in-try_to_unmap.patch
new file mode 100644 (file)
index 0000000..fca4d51
--- /dev/null
@@ -0,0 +1,60 @@
+From foo@baz Mon Jun 28 01:34:29 PM CEST 2021
+From: Miaohe Lin <linmiaohe@huawei.com>
+Date: Thu, 25 Feb 2021 17:18:03 -0800
+Subject: mm/rmap: use page_not_mapped in try_to_unmap()
+
+From: Miaohe Lin <linmiaohe@huawei.com>
+
+[ Upstream commit b7e188ec98b1644ff70a6d3624ea16aadc39f5e0 ]
+
+page_mapcount_is_zero() calculates accurately how many mappings a hugepage
+has in order to check against 0 only.  This is a waste of cpu time.  We
+can do this via page_not_mapped() to save some possible atomic_read
+cycles.  Remove the function page_mapcount_is_zero() as it's not used
+anymore and move page_not_mapped() above try_to_unmap() to avoid
+identifier undeclared compilation error.
+
+Link: https://lkml.kernel.org/r/20210130084904.35307-1-linmiaohe@huawei.com
+Signed-off-by: Miaohe Lin <linmiaohe@huawei.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/rmap.c |   11 +++--------
+ 1 file changed, 3 insertions(+), 8 deletions(-)
+
+--- a/mm/rmap.c
++++ b/mm/rmap.c
+@@ -1716,9 +1716,9 @@ static bool invalid_migration_vma(struct
+       return vma_is_temporary_stack(vma);
+ }
+-static int page_mapcount_is_zero(struct page *page)
++static int page_not_mapped(struct page *page)
+ {
+-      return !total_mapcount(page);
++      return !page_mapped(page);
+ }
+ /**
+@@ -1736,7 +1736,7 @@ bool try_to_unmap(struct page *page, enu
+       struct rmap_walk_control rwc = {
+               .rmap_one = try_to_unmap_one,
+               .arg = (void *)flags,
+-              .done = page_mapcount_is_zero,
++              .done = page_not_mapped,
+               .anon_lock = page_lock_anon_vma_read,
+       };
+@@ -1760,11 +1760,6 @@ bool try_to_unmap(struct page *page, enu
+       return !page_mapcount(page) ? true : false;
+ }
+-static int page_not_mapped(struct page *page)
+-{
+-      return !page_mapped(page);
+-}
+-
+ /**
+  * try_to_munlock - try to munlock a page
+  * @page: the page to be munlocked
diff --git a/queue-5.10/mm-thp-fix-__split_huge_pmd_locked-on-shmem-migration-entry.patch b/queue-5.10/mm-thp-fix-__split_huge_pmd_locked-on-shmem-migration-entry.patch
new file mode 100644 (file)
index 0000000..d69995a
--- /dev/null
@@ -0,0 +1,130 @@
+From foo@baz Mon Jun 28 01:34:29 PM CEST 2021
+From: Hugh Dickins <hughd@google.com>
+Date: Tue, 15 Jun 2021 18:23:45 -0700
+Subject: mm/thp: fix __split_huge_pmd_locked() on shmem migration entry
+
+From: Hugh Dickins <hughd@google.com>
+
+[ Upstream commit 99fa8a48203d62b3743d866fc48ef6abaee682be ]
+
+Patch series "mm/thp: fix THP splitting unmap BUGs and related", v10.
+
+Here is v2 batch of long-standing THP bug fixes that I had not got
+around to sending before, but prompted now by Wang Yugui's report
+https://lore.kernel.org/linux-mm/20210412180659.B9E3.409509F4@e16-tech.com/
+
+Wang Yugui has tested a rollup of these fixes applied to 5.10.39, and
+they have done no harm, but have *not* fixed that issue: something more
+is needed and I have no idea of what.
+
+This patch (of 7):
+
+Stressing huge tmpfs page migration racing hole punch often crashed on
+the VM_BUG_ON(!pmd_present) in pmdp_huge_clear_flush(), with DEBUG_VM=y
+kernel; or shortly afterwards, on a bad dereference in
+__split_huge_pmd_locked() when DEBUG_VM=n.  They forgot to allow for pmd
+migration entries in the non-anonymous case.
+
+Full disclosure: those particular experiments were on a kernel with more
+relaxed mmap_lock and i_mmap_rwsem locking, and were not repeated on the
+vanilla kernel: it is conceivable that stricter locking happens to avoid
+those cases, or makes them less likely; but __split_huge_pmd_locked()
+already allowed for pmd migration entries when handling anonymous THPs,
+so this commit brings the shmem and file THP handling into line.
+
+And while there: use old_pmd rather than _pmd, as in the following
+blocks; and make it clearer to the eye that the !vma_is_anonymous()
+block is self-contained, making an early return after accounting for
+unmapping.
+
+Link: https://lkml.kernel.org/r/af88612-1473-2eaa-903-8d1a448b26@google.com
+Link: https://lkml.kernel.org/r/dd221a99-efb3-cd1d-6256-7e646af29314@google.com
+Fixes: e71769ae5260 ("mm: enable thp migration for shmem thp")
+Signed-off-by: Hugh Dickins <hughd@google.com>
+Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Cc: Yang Shi <shy828301@gmail.com>
+Cc: Wang Yugui <wangyugui@e16-tech.com>
+Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>
+Cc: Naoya Horiguchi <naoya.horiguchi@nec.com>
+Cc: Alistair Popple <apopple@nvidia.com>
+Cc: Ralph Campbell <rcampbell@nvidia.com>
+Cc: Zi Yan <ziy@nvidia.com>
+Cc: Miaohe Lin <linmiaohe@huawei.com>
+Cc: Minchan Kim <minchan@kernel.org>
+Cc: Jue Wang <juew@google.com>
+Cc: Peter Xu <peterx@redhat.com>
+Cc: Jan Kara <jack@suse.cz>
+Cc: Shakeel Butt <shakeelb@google.com>
+Cc: Oscar Salvador <osalvador@suse.de>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+
+Note on stable backport: this commit made intervening cleanups in
+pmdp_huge_clear_flush() redundant: here it's rediffed to skip them.
+
+Signed-off-by: Hugh Dickins <hughd@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/huge_memory.c     |   27 ++++++++++++++++++---------
+ mm/pgtable-generic.c |    4 ++--
+ 2 files changed, 20 insertions(+), 11 deletions(-)
+
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -2031,7 +2031,7 @@ static void __split_huge_pmd_locked(stru
+       count_vm_event(THP_SPLIT_PMD);
+       if (!vma_is_anonymous(vma)) {
+-              _pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd);
++              old_pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd);
+               /*
+                * We are going to unmap this huge page. So
+                * just go ahead and zap it
+@@ -2040,16 +2040,25 @@ static void __split_huge_pmd_locked(stru
+                       zap_deposited_table(mm, pmd);
+               if (vma_is_special_huge(vma))
+                       return;
+-              page = pmd_page(_pmd);
+-              if (!PageDirty(page) && pmd_dirty(_pmd))
+-                      set_page_dirty(page);
+-              if (!PageReferenced(page) && pmd_young(_pmd))
+-                      SetPageReferenced(page);
+-              page_remove_rmap(page, true);
+-              put_page(page);
++              if (unlikely(is_pmd_migration_entry(old_pmd))) {
++                      swp_entry_t entry;
++
++                      entry = pmd_to_swp_entry(old_pmd);
++                      page = migration_entry_to_page(entry);
++              } else {
++                      page = pmd_page(old_pmd);
++                      if (!PageDirty(page) && pmd_dirty(old_pmd))
++                              set_page_dirty(page);
++                      if (!PageReferenced(page) && pmd_young(old_pmd))
++                              SetPageReferenced(page);
++                      page_remove_rmap(page, true);
++                      put_page(page);
++              }
+               add_mm_counter(mm, mm_counter_file(page), -HPAGE_PMD_NR);
+               return;
+-      } else if (pmd_trans_huge(*pmd) && is_huge_zero_pmd(*pmd)) {
++      }
++
++      if (pmd_trans_huge(*pmd) && is_huge_zero_pmd(*pmd)) {
+               /*
+                * FIXME: Do we want to invalidate secondary mmu by calling
+                * mmu_notifier_invalidate_range() see comments below inside
+--- a/mm/pgtable-generic.c
++++ b/mm/pgtable-generic.c
+@@ -135,8 +135,8 @@ pmd_t pmdp_huge_clear_flush(struct vm_ar
+ {
+       pmd_t pmd;
+       VM_BUG_ON(address & ~HPAGE_PMD_MASK);
+-      VM_BUG_ON((pmd_present(*pmdp) && !pmd_trans_huge(*pmdp) &&
+-                         !pmd_devmap(*pmdp)) || !pmd_present(*pmdp));
++      VM_BUG_ON(pmd_present(*pmdp) && !pmd_trans_huge(*pmdp) &&
++                         !pmd_devmap(*pmdp));
+       pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
+       flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+       return pmd;
diff --git a/queue-5.10/mm-thp-replace-debug_vm-bug-with-vm_warn-when-unmap-fails-for-split.patch b/queue-5.10/mm-thp-replace-debug_vm-bug-with-vm_warn-when-unmap-fails-for-split.patch
new file mode 100644 (file)
index 0000000..d666eea
--- /dev/null
@@ -0,0 +1,117 @@
+From foo@baz Mon Jun 28 01:34:29 PM CEST 2021
+From: Yang Shi <shy828301@gmail.com>
+Date: Tue, 15 Jun 2021 18:24:07 -0700
+Subject: mm: thp: replace DEBUG_VM BUG with VM_WARN when unmap fails for split
+
+From: Yang Shi <shy828301@gmail.com>
+
+[ Upstream commit 504e070dc08f757bccaed6d05c0f53ecbfac8a23 ]
+
+When debugging the bug reported by Wang Yugui [1], try_to_unmap() may
+fail, but the first VM_BUG_ON_PAGE() just checks page_mapcount() however
+it may miss the failure when head page is unmapped but other subpage is
+mapped.  Then the second DEBUG_VM BUG() that check total mapcount would
+catch it.  This may incur some confusion.
+
+As this is not a fatal issue, so consolidate the two DEBUG_VM checks
+into one VM_WARN_ON_ONCE_PAGE().
+
+[1] https://lore.kernel.org/linux-mm/20210412180659.B9E3.409509F4@e16-tech.com/
+
+Link: https://lkml.kernel.org/r/d0f0db68-98b8-ebfb-16dc-f29df24cf012@google.com
+Signed-off-by: Yang Shi <shy828301@gmail.com>
+Reviewed-by: Zi Yan <ziy@nvidia.com>
+Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Signed-off-by: Hugh Dickins <hughd@google.com>
+Cc: Alistair Popple <apopple@nvidia.com>
+Cc: Jan Kara <jack@suse.cz>
+Cc: Jue Wang <juew@google.com>
+Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>
+Cc: Miaohe Lin <linmiaohe@huawei.com>
+Cc: Minchan Kim <minchan@kernel.org>
+Cc: Naoya Horiguchi <naoya.horiguchi@nec.com>
+Cc: Oscar Salvador <osalvador@suse.de>
+Cc: Peter Xu <peterx@redhat.com>
+Cc: Ralph Campbell <rcampbell@nvidia.com>
+Cc: Shakeel Butt <shakeelb@google.com>
+Cc: Wang Yugui <wangyugui@e16-tech.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+
+Note on stable backport: fixed up variables in split_huge_page_to_list().
+
+Signed-off-by: Hugh Dickins <hughd@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/huge_memory.c |   24 +++++++-----------------
+ 1 file changed, 7 insertions(+), 17 deletions(-)
+
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -2341,15 +2341,15 @@ static void unmap_page(struct page *page
+ {
+       enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK |
+               TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD;
+-      bool unmap_success;
+       VM_BUG_ON_PAGE(!PageHead(page), page);
+       if (PageAnon(page))
+               ttu_flags |= TTU_SPLIT_FREEZE;
+-      unmap_success = try_to_unmap(page, ttu_flags);
+-      VM_BUG_ON_PAGE(!unmap_success, page);
++      try_to_unmap(page, ttu_flags);
++
++      VM_WARN_ON_ONCE_PAGE(page_mapped(page), page);
+ }
+ static void remap_page(struct page *page, unsigned int nr)
+@@ -2639,7 +2639,7 @@ int split_huge_page_to_list(struct page
+       struct deferred_split *ds_queue = get_deferred_split_queue(head);
+       struct anon_vma *anon_vma = NULL;
+       struct address_space *mapping = NULL;
+-      int count, mapcount, extra_pins, ret;
++      int extra_pins, ret;
+       unsigned long flags;
+       pgoff_t end;
+@@ -2699,7 +2699,6 @@ int split_huge_page_to_list(struct page
+       }
+       unmap_page(head);
+-      VM_BUG_ON_PAGE(compound_mapcount(head), head);
+       /* prevent PageLRU to go away from under us, and freeze lru stats */
+       spin_lock_irqsave(&pgdata->lru_lock, flags);
+@@ -2718,9 +2717,7 @@ int split_huge_page_to_list(struct page
+       /* Prevent deferred_split_scan() touching ->_refcount */
+       spin_lock(&ds_queue->split_queue_lock);
+-      count = page_count(head);
+-      mapcount = total_mapcount(head);
+-      if (!mapcount && page_ref_freeze(head, 1 + extra_pins)) {
++      if (page_ref_freeze(head, 1 + extra_pins)) {
+               if (!list_empty(page_deferred_list(head))) {
+                       ds_queue->split_queue_len--;
+                       list_del(page_deferred_list(head));
+@@ -2736,16 +2733,9 @@ int split_huge_page_to_list(struct page
+               __split_huge_page(page, list, end, flags);
+               ret = 0;
+       } else {
+-              if (IS_ENABLED(CONFIG_DEBUG_VM) && mapcount) {
+-                      pr_alert("total_mapcount: %u, page_count(): %u\n",
+-                                      mapcount, count);
+-                      if (PageTail(page))
+-                              dump_page(head, NULL);
+-                      dump_page(page, "total_mapcount(head) > 0");
+-                      BUG();
+-              }
+               spin_unlock(&ds_queue->split_queue_lock);
+-fail:         if (mapping)
++fail:
++              if (mapping)
+                       xa_unlock(&mapping->i_pages);
+               spin_unlock_irqrestore(&pgdata->lru_lock, flags);
+               remap_page(head, thp_nr_pages(head));
diff --git a/queue-5.10/mm-thp-unmap_mapping_page-to-fix-thp-truncate_cleanup_page.patch b/queue-5.10/mm-thp-unmap_mapping_page-to-fix-thp-truncate_cleanup_page.patch
new file mode 100644 (file)
index 0000000..0821992
--- /dev/null
@@ -0,0 +1,254 @@
+From foo@baz Mon Jun 28 01:34:29 PM CEST 2021
+From: Hugh Dickins <hughd@google.com>
+Date: Tue, 15 Jun 2021 18:24:03 -0700
+Subject: mm/thp: unmap_mapping_page() to fix THP truncate_cleanup_page()
+
+From: Hugh Dickins <hughd@google.com>
+
+[ Upstream commit 22061a1ffabdb9c3385de159c5db7aac3a4df1cc ]
+
+There is a race between THP unmapping and truncation, when truncate sees
+pmd_none() and skips the entry, after munmap's zap_huge_pmd() cleared
+it, but before its page_remove_rmap() gets to decrement
+compound_mapcount: generating false "BUG: Bad page cache" reports that
+the page is still mapped when deleted.  This commit fixes that, but not
+in the way I hoped.
+
+The first attempt used try_to_unmap(page, TTU_SYNC|TTU_IGNORE_MLOCK)
+instead of unmap_mapping_range() in truncate_cleanup_page(): it has
+often been an annoyance that we usually call unmap_mapping_range() with
+no pages locked, but there apply it to a single locked page.
+try_to_unmap() looks more suitable for a single locked page.
+
+However, try_to_unmap_one() contains a VM_BUG_ON_PAGE(!pvmw.pte,page):
+it is used to insert THP migration entries, but not used to unmap THPs.
+Copy zap_huge_pmd() and add THP handling now? Perhaps, but their TLB
+needs are different, I'm too ignorant of the DAX cases, and couldn't
+decide how far to go for anon+swap.  Set that aside.
+
+The second attempt took a different tack: make no change in truncate.c,
+but modify zap_huge_pmd() to insert an invalidated huge pmd instead of
+clearing it initially, then pmd_clear() between page_remove_rmap() and
+unlocking at the end.  Nice.  But powerpc blows that approach out of the
+water, with its serialize_against_pte_lookup(), and interesting pgtable
+usage.  It would need serious help to get working on powerpc (with a
+minor optimization issue on s390 too).  Set that aside.
+
+Just add an "if (page_mapped(page)) synchronize_rcu();" or other such
+delay, after unmapping in truncate_cleanup_page()? Perhaps, but though
+that's likely to reduce or eliminate the number of incidents, it would
+give less assurance of whether we had identified the problem correctly.
+
+This successful iteration introduces "unmap_mapping_page(page)" instead
+of try_to_unmap(), and goes the usual unmap_mapping_range_tree() route,
+with an addition to details.  Then zap_pmd_range() watches for this
+case, and does spin_unlock(pmd_lock) if so - just like
+page_vma_mapped_walk() now does in the PVMW_SYNC case.  Not pretty, but
+safe.
+
+Note that unmap_mapping_page() is doing a VM_BUG_ON(!PageLocked) to
+assert its interface; but currently that's only used to make sure that
+page->mapping is stable, and zap_pmd_range() doesn't care if the page is
+locked or not.  Along these lines, in invalidate_inode_pages2_range()
+move the initial unmap_mapping_range() out from under page lock, before
+then calling unmap_mapping_page() under page lock if still mapped.
+
+Link: https://lkml.kernel.org/r/a2a4a148-cdd8-942c-4ef8-51b77f643dbe@google.com
+Fixes: fc127da085c2 ("truncate: handle file thp")
+Signed-off-by: Hugh Dickins <hughd@google.com>
+Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Reviewed-by: Yang Shi <shy828301@gmail.com>
+Cc: Alistair Popple <apopple@nvidia.com>
+Cc: Jan Kara <jack@suse.cz>
+Cc: Jue Wang <juew@google.com>
+Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>
+Cc: Miaohe Lin <linmiaohe@huawei.com>
+Cc: Minchan Kim <minchan@kernel.org>
+Cc: Naoya Horiguchi <naoya.horiguchi@nec.com>
+Cc: Oscar Salvador <osalvador@suse.de>
+Cc: Peter Xu <peterx@redhat.com>
+Cc: Ralph Campbell <rcampbell@nvidia.com>
+Cc: Shakeel Butt <shakeelb@google.com>
+Cc: Wang Yugui <wangyugui@e16-tech.com>
+Cc: Zi Yan <ziy@nvidia.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+
+Note on stable backport: fixed up call to truncate_cleanup_page()
+in truncate_inode_pages_range().
+
+Signed-off-by: Hugh Dickins <hughd@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/mm.h |    3 +++
+ mm/memory.c        |   41 +++++++++++++++++++++++++++++++++++++++++
+ mm/truncate.c      |   43 +++++++++++++++++++------------------------
+ 3 files changed, 63 insertions(+), 24 deletions(-)
+
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -1648,6 +1648,7 @@ struct zap_details {
+       struct address_space *check_mapping;    /* Check page->mapping if set */
+       pgoff_t first_index;                    /* Lowest page->index to unmap */
+       pgoff_t last_index;                     /* Highest page->index to unmap */
++      struct page *single_page;               /* Locked page to be unmapped */
+ };
+ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
+@@ -1695,6 +1696,7 @@ extern vm_fault_t handle_mm_fault(struct
+ extern int fixup_user_fault(struct mm_struct *mm,
+                           unsigned long address, unsigned int fault_flags,
+                           bool *unlocked);
++void unmap_mapping_page(struct page *page);
+ void unmap_mapping_pages(struct address_space *mapping,
+               pgoff_t start, pgoff_t nr, bool even_cows);
+ void unmap_mapping_range(struct address_space *mapping,
+@@ -1715,6 +1717,7 @@ static inline int fixup_user_fault(struc
+       BUG();
+       return -EFAULT;
+ }
++static inline void unmap_mapping_page(struct page *page) { }
+ static inline void unmap_mapping_pages(struct address_space *mapping,
+               pgoff_t start, pgoff_t nr, bool even_cows) { }
+ static inline void unmap_mapping_range(struct address_space *mapping,
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -1355,7 +1355,18 @@ static inline unsigned long zap_pmd_rang
+                       else if (zap_huge_pmd(tlb, vma, pmd, addr))
+                               goto next;
+                       /* fall through */
++              } else if (details && details->single_page &&
++                         PageTransCompound(details->single_page) &&
++                         next - addr == HPAGE_PMD_SIZE && pmd_none(*pmd)) {
++                      spinlock_t *ptl = pmd_lock(tlb->mm, pmd);
++                      /*
++                       * Take and drop THP pmd lock so that we cannot return
++                       * prematurely, while zap_huge_pmd() has cleared *pmd,
++                       * but not yet decremented compound_mapcount().
++                       */
++                      spin_unlock(ptl);
+               }
++
+               /*
+                * Here there can be other concurrent MADV_DONTNEED or
+                * trans huge page faults running, and if the pmd is
+@@ -3186,6 +3197,36 @@ static inline void unmap_mapping_range_t
+ }
+ /**
++ * unmap_mapping_page() - Unmap single page from processes.
++ * @page: The locked page to be unmapped.
++ *
++ * Unmap this page from any userspace process which still has it mmaped.
++ * Typically, for efficiency, the range of nearby pages has already been
++ * unmapped by unmap_mapping_pages() or unmap_mapping_range().  But once
++ * truncation or invalidation holds the lock on a page, it may find that
++ * the page has been remapped again: and then uses unmap_mapping_page()
++ * to unmap it finally.
++ */
++void unmap_mapping_page(struct page *page)
++{
++      struct address_space *mapping = page->mapping;
++      struct zap_details details = { };
++
++      VM_BUG_ON(!PageLocked(page));
++      VM_BUG_ON(PageTail(page));
++
++      details.check_mapping = mapping;
++      details.first_index = page->index;
++      details.last_index = page->index + thp_nr_pages(page) - 1;
++      details.single_page = page;
++
++      i_mmap_lock_write(mapping);
++      if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
++              unmap_mapping_range_tree(&mapping->i_mmap, &details);
++      i_mmap_unlock_write(mapping);
++}
++
++/**
+  * unmap_mapping_pages() - Unmap pages from processes.
+  * @mapping: The address space containing pages to be unmapped.
+  * @start: Index of first page to be unmapped.
+--- a/mm/truncate.c
++++ b/mm/truncate.c
+@@ -173,13 +173,10 @@ void do_invalidatepage(struct page *page
+  * its lock, b) when a concurrent invalidate_mapping_pages got there first and
+  * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
+  */
+-static void
+-truncate_cleanup_page(struct address_space *mapping, struct page *page)
++static void truncate_cleanup_page(struct page *page)
+ {
+-      if (page_mapped(page)) {
+-              unsigned int nr = thp_nr_pages(page);
+-              unmap_mapping_pages(mapping, page->index, nr, false);
+-      }
++      if (page_mapped(page))
++              unmap_mapping_page(page);
+       if (page_has_private(page))
+               do_invalidatepage(page, 0, thp_size(page));
+@@ -224,7 +221,7 @@ int truncate_inode_page(struct address_s
+       if (page->mapping != mapping)
+               return -EIO;
+-      truncate_cleanup_page(mapping, page);
++      truncate_cleanup_page(page);
+       delete_from_page_cache(page);
+       return 0;
+ }
+@@ -362,7 +359,7 @@ void truncate_inode_pages_range(struct a
+                       pagevec_add(&locked_pvec, page);
+               }
+               for (i = 0; i < pagevec_count(&locked_pvec); i++)
+-                      truncate_cleanup_page(mapping, locked_pvec.pages[i]);
++                      truncate_cleanup_page(locked_pvec.pages[i]);
+               delete_from_page_cache_batch(mapping, &locked_pvec);
+               for (i = 0; i < pagevec_count(&locked_pvec); i++)
+                       unlock_page(locked_pvec.pages[i]);
+@@ -737,6 +734,16 @@ int invalidate_inode_pages2_range(struct
+                               continue;
+                       }
++                      if (!did_range_unmap && page_mapped(page)) {
++                              /*
++                               * If page is mapped, before taking its lock,
++                               * zap the rest of the file in one hit.
++                               */
++                              unmap_mapping_pages(mapping, index,
++                                              (1 + end - index), false);
++                              did_range_unmap = 1;
++                      }
++
+                       lock_page(page);
+                       WARN_ON(page_to_index(page) != index);
+                       if (page->mapping != mapping) {
+@@ -744,23 +751,11 @@ int invalidate_inode_pages2_range(struct
+                               continue;
+                       }
+                       wait_on_page_writeback(page);
+-                      if (page_mapped(page)) {
+-                              if (!did_range_unmap) {
+-                                      /*
+-                                       * Zap the rest of the file in one hit.
+-                                       */
+-                                      unmap_mapping_pages(mapping, index,
+-                                              (1 + end - index), false);
+-                                      did_range_unmap = 1;
+-                              } else {
+-                                      /*
+-                                       * Just zap this page
+-                                       */
+-                                      unmap_mapping_pages(mapping, index,
+-                                                              1, false);
+-                              }
+-                      }
++
++                      if (page_mapped(page))
++                              unmap_mapping_page(page);
+                       BUG_ON(page_mapped(page));
++
+                       ret2 = do_launder_page(mapping, page);
+                       if (ret2 == 0) {
+                               if (!invalidate_complete_page2(mapping, page))
index d42c45e46dfb14b65b2bbebcf46349cbe154ac65..bc888a2567425333628c3beefd1c7d28439da4ac 100644 (file)
@@ -68,3 +68,9 @@ kthread_worker-split-code-for-canceling-the-delayed-work-timer.patch
 kthread-prevent-deadlock-when-kthread_mod_delayed_work-races-with-kthread_cancel_delayed_work_sync.patch
 x86-fpu-preserve-supervisor-states-in-sanitize_restored_user_xstate.patch
 x86-fpu-make-init_fpstate-correct-with-optimized-xsave.patch
+mm-add-vm_warn_on_once_page-macro.patch
+mm-rmap-remove-unneeded-semicolon-in-page_not_mapped.patch
+mm-rmap-use-page_not_mapped-in-try_to_unmap.patch
+mm-thp-fix-__split_huge_pmd_locked-on-shmem-migration-entry.patch
+mm-thp-unmap_mapping_page-to-fix-thp-truncate_cleanup_page.patch
+mm-thp-replace-debug_vm-bug-with-vm_warn-when-unmap-fails-for-split.patch