--- /dev/null
+From fe19bd3dae3d15d2fbfdb3de8839a6ea0fe94264 Mon Sep 17 00:00:00 2001
+From: Hugh Dickins <hughd@google.com>
+Date: Thu, 24 Jun 2021 18:39:52 -0700
+Subject: mm, futex: fix shared futex pgoff on shmem huge page
+
+From: Hugh Dickins <hughd@google.com>
+
+commit fe19bd3dae3d15d2fbfdb3de8839a6ea0fe94264 upstream.
+
+If more than one futex is placed on a shmem huge page, it can happen
+that waking the second wakes the first instead, and leaves the second
+waiting: the key's shared.pgoff is wrong.
+
+When 3.11 commit 13d60f4b6ab5 ("futex: Take hugepages into account when
+generating futex_key"), the only shared huge pages came from hugetlbfs,
+and the code added to deal with its exceptional page->index was put into
+hugetlb source. Then that was missed when 4.8 added shmem huge pages.
+
+page_to_pgoff() is what others use for this nowadays: except that, as
+currently written, it gives the right answer on hugetlbfs head, but
+nonsense on hugetlbfs tails. Fix that by calling hugetlbfs-specific
+hugetlb_basepage_index() on PageHuge tails as well as on head.
+
+Yes, it's unconventional to declare hugetlb_basepage_index() there in
+pagemap.h, rather than in hugetlb.h; but I do not expect anything but
+page_to_pgoff() ever to need it.
+
+[akpm@linux-foundation.org: give hugetlb_basepage_index() prototype the correct scope]
+
+Link: https://lkml.kernel.org/r/b17d946b-d09-326e-b42a-52884c36df32@google.com
+Fixes: 800d8c63b2e9 ("shmem: add huge pages support")
+Reported-by: Neel Natu <neelnatu@google.com>
+Signed-off-by: Hugh Dickins <hughd@google.com>
+Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Acked-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Cc: Zhang Yi <wetpzy@gmail.com>
+Cc: Mel Gorman <mgorman@techsingularity.net>
+Cc: Mike Kravetz <mike.kravetz@oracle.com>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Darren Hart <dvhart@infradead.org>
+Cc: Davidlohr Bueso <dave@stgolabs.net>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/hugetlb.h | 16 ----------------
+ include/linux/pagemap.h | 13 +++++++------
+ kernel/futex.c | 3 +--
+ mm/hugetlb.c | 5 +----
+ 4 files changed, 9 insertions(+), 28 deletions(-)
+
+--- a/include/linux/hugetlb.h
++++ b/include/linux/hugetlb.h
+@@ -628,17 +628,6 @@ static inline int hstate_index(struct hs
+ return h - hstates;
+ }
+
+-pgoff_t __basepage_index(struct page *page);
+-
+-/* Return page->index in PAGE_SIZE units */
+-static inline pgoff_t basepage_index(struct page *page)
+-{
+- if (!PageCompound(page))
+- return page->index;
+-
+- return __basepage_index(page);
+-}
+-
+ extern int dissolve_free_huge_page(struct page *page);
+ extern int dissolve_free_huge_pages(unsigned long start_pfn,
+ unsigned long end_pfn);
+@@ -871,11 +860,6 @@ static inline int hstate_index(struct hs
+ return 0;
+ }
+
+-static inline pgoff_t basepage_index(struct page *page)
+-{
+- return page->index;
+-}
+-
+ static inline int dissolve_free_huge_page(struct page *page)
+ {
+ return 0;
+--- a/include/linux/pagemap.h
++++ b/include/linux/pagemap.h
+@@ -501,7 +501,7 @@ static inline struct page *read_mapping_
+ }
+
+ /*
+- * Get index of the page with in radix-tree
++ * Get index of the page within radix-tree (but not for hugetlb pages).
+ * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE)
+ */
+ static inline pgoff_t page_to_index(struct page *page)
+@@ -520,15 +520,16 @@ static inline pgoff_t page_to_index(stru
+ return pgoff;
+ }
+
++extern pgoff_t hugetlb_basepage_index(struct page *page);
++
+ /*
+- * Get the offset in PAGE_SIZE.
+- * (TODO: hugepage should have ->index in PAGE_SIZE)
++ * Get the offset in PAGE_SIZE (even for hugetlb pages).
++ * (TODO: hugetlb pages should have ->index in PAGE_SIZE)
+ */
+ static inline pgoff_t page_to_pgoff(struct page *page)
+ {
+- if (unlikely(PageHeadHuge(page)))
+- return page->index << compound_order(page);
+-
++ if (unlikely(PageHuge(page)))
++ return hugetlb_basepage_index(page);
+ return page_to_index(page);
+ }
+
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -35,7 +35,6 @@
+ #include <linux/jhash.h>
+ #include <linux/pagemap.h>
+ #include <linux/syscalls.h>
+-#include <linux/hugetlb.h>
+ #include <linux/freezer.h>
+ #include <linux/memblock.h>
+ #include <linux/fault-inject.h>
+@@ -652,7 +651,7 @@ again:
+
+ key->both.offset |= FUT_OFF_INODE; /* inode-based key */
+ key->shared.i_seq = get_inode_sequence_number(inode);
+- key->shared.pgoff = basepage_index(tail);
++ key->shared.pgoff = page_to_pgoff(tail);
+ rcu_read_unlock();
+ }
+
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -1635,15 +1635,12 @@ struct address_space *hugetlb_page_mappi
+ return NULL;
+ }
+
+-pgoff_t __basepage_index(struct page *page)
++pgoff_t hugetlb_basepage_index(struct page *page)
+ {
+ struct page *page_head = compound_head(page);
+ pgoff_t index = page_index(page_head);
+ unsigned long compound_idx;
+
+- if (!PageHuge(page_head))
+- return page_index(page);
+-
+ if (compound_order(page_head) >= MAX_ORDER)
+ compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
+ else
--- /dev/null
+From b3807a91aca7d21c05d5790612e49969117a72b9 Mon Sep 17 00:00:00 2001
+From: Hugh Dickins <hughd@google.com>
+Date: Thu, 24 Jun 2021 18:39:17 -0700
+Subject: mm: page_vma_mapped_walk(): add a level of indentation
+
+From: Hugh Dickins <hughd@google.com>
+
+commit b3807a91aca7d21c05d5790612e49969117a72b9 upstream.
+
+page_vma_mapped_walk() cleanup: add a level of indentation to much of
+the body, making no functional change in this commit, but reducing the
+later diff when this is all converted to a loop.
+
+[hughd@google.com: : page_vma_mapped_walk(): add a level of indentation fix]
+ Link: https://lkml.kernel.org/r/7f817555-3ce1-c785-e438-87d8efdcaf26@google.com
+
+Link: https://lkml.kernel.org/r/efde211-f3e2-fe54-977-ef481419e7f3@google.com
+Signed-off-by: Hugh Dickins <hughd@google.com>
+Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Cc: Alistair Popple <apopple@nvidia.com>
+Cc: Matthew Wilcox <willy@infradead.org>
+Cc: Peter Xu <peterx@redhat.com>
+Cc: Ralph Campbell <rcampbell@nvidia.com>
+Cc: Wang Yugui <wangyugui@e16-tech.com>
+Cc: Will Deacon <will@kernel.org>
+Cc: Yang Shi <shy828301@gmail.com>
+Cc: Zi Yan <ziy@nvidia.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/page_vma_mapped.c | 105 ++++++++++++++++++++++++++-------------------------
+ 1 file changed, 55 insertions(+), 50 deletions(-)
+
+--- a/mm/page_vma_mapped.c
++++ b/mm/page_vma_mapped.c
+@@ -172,62 +172,67 @@ bool page_vma_mapped_walk(struct page_vm
+ if (pvmw->pte)
+ goto next_pte;
+ restart:
+- pgd = pgd_offset(mm, pvmw->address);
+- if (!pgd_present(*pgd))
+- return false;
+- p4d = p4d_offset(pgd, pvmw->address);
+- if (!p4d_present(*p4d))
+- return false;
+- pud = pud_offset(p4d, pvmw->address);
+- if (!pud_present(*pud))
+- return false;
+- pvmw->pmd = pmd_offset(pud, pvmw->address);
+- /*
+- * Make sure the pmd value isn't cached in a register by the
+- * compiler and used as a stale value after we've observed a
+- * subsequent update.
+- */
+- pmde = READ_ONCE(*pvmw->pmd);
+- if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) {
+- pvmw->ptl = pmd_lock(mm, pvmw->pmd);
+- pmde = *pvmw->pmd;
+- if (likely(pmd_trans_huge(pmde))) {
+- if (pvmw->flags & PVMW_MIGRATION)
+- return not_found(pvmw);
+- if (pmd_page(pmde) != page)
+- return not_found(pvmw);
+- return true;
+- }
+- if (!pmd_present(pmde)) {
+- swp_entry_t entry;
++ {
++ pgd = pgd_offset(mm, pvmw->address);
++ if (!pgd_present(*pgd))
++ return false;
++ p4d = p4d_offset(pgd, pvmw->address);
++ if (!p4d_present(*p4d))
++ return false;
++ pud = pud_offset(p4d, pvmw->address);
++ if (!pud_present(*pud))
++ return false;
+
+- if (!thp_migration_supported() ||
+- !(pvmw->flags & PVMW_MIGRATION))
+- return not_found(pvmw);
+- entry = pmd_to_swp_entry(pmde);
+- if (!is_migration_entry(entry) ||
+- migration_entry_to_page(entry) != page)
+- return not_found(pvmw);
+- return true;
+- }
+- /* THP pmd was split under us: handle on pte level */
+- spin_unlock(pvmw->ptl);
+- pvmw->ptl = NULL;
+- } else if (!pmd_present(pmde)) {
++ pvmw->pmd = pmd_offset(pud, pvmw->address);
+ /*
+- * If PVMW_SYNC, take and drop THP pmd lock so that we
+- * cannot return prematurely, while zap_huge_pmd() has
+- * cleared *pmd but not decremented compound_mapcount().
++ * Make sure the pmd value isn't cached in a register by the
++ * compiler and used as a stale value after we've observed a
++ * subsequent update.
+ */
+- if ((pvmw->flags & PVMW_SYNC) && PageTransCompound(page)) {
+- spinlock_t *ptl = pmd_lock(mm, pvmw->pmd);
++ pmde = READ_ONCE(*pvmw->pmd);
++
++ if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) {
++ pvmw->ptl = pmd_lock(mm, pvmw->pmd);
++ pmde = *pvmw->pmd;
++ if (likely(pmd_trans_huge(pmde))) {
++ if (pvmw->flags & PVMW_MIGRATION)
++ return not_found(pvmw);
++ if (pmd_page(pmde) != page)
++ return not_found(pvmw);
++ return true;
++ }
++ if (!pmd_present(pmde)) {
++ swp_entry_t entry;
+
+- spin_unlock(ptl);
++ if (!thp_migration_supported() ||
++ !(pvmw->flags & PVMW_MIGRATION))
++ return not_found(pvmw);
++ entry = pmd_to_swp_entry(pmde);
++ if (!is_migration_entry(entry) ||
++ migration_entry_to_page(entry) != page)
++ return not_found(pvmw);
++ return true;
++ }
++ /* THP pmd was split under us: handle on pte level */
++ spin_unlock(pvmw->ptl);
++ pvmw->ptl = NULL;
++ } else if (!pmd_present(pmde)) {
++ /*
++ * If PVMW_SYNC, take and drop THP pmd lock so that we
++ * cannot return prematurely, while zap_huge_pmd() has
++ * cleared *pmd but not decremented compound_mapcount().
++ */
++ if ((pvmw->flags & PVMW_SYNC) &&
++ PageTransCompound(page)) {
++ spinlock_t *ptl = pmd_lock(mm, pvmw->pmd);
++
++ spin_unlock(ptl);
++ }
++ return false;
+ }
+- return false;
++ if (!map_pte(pvmw))
++ goto next_pte;
+ }
+- if (!map_pte(pvmw))
+- goto next_pte;
+ while (1) {
+ unsigned long end;
+
--- /dev/null
+From 448282487483d6fa5b2eeeafaa0acc681e544a9c Mon Sep 17 00:00:00 2001
+From: Hugh Dickins <hughd@google.com>
+Date: Thu, 24 Jun 2021 18:39:14 -0700
+Subject: mm: page_vma_mapped_walk(): crossing page table boundary
+
+From: Hugh Dickins <hughd@google.com>
+
+commit 448282487483d6fa5b2eeeafaa0acc681e544a9c upstream.
+
+page_vma_mapped_walk() cleanup: adjust the test for crossing page table
+boundary - I believe pvmw->address is always page-aligned, but nothing
+else here assumed that; and remember to reset pvmw->pte to NULL after
+unmapping the page table, though I never saw any bug from that.
+
+Link: https://lkml.kernel.org/r/799b3f9c-2a9e-dfef-5d89-26e9f76fd97@google.com
+Signed-off-by: Hugh Dickins <hughd@google.com>
+Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Cc: Alistair Popple <apopple@nvidia.com>
+Cc: Matthew Wilcox <willy@infradead.org>
+Cc: Peter Xu <peterx@redhat.com>
+Cc: Ralph Campbell <rcampbell@nvidia.com>
+Cc: Wang Yugui <wangyugui@e16-tech.com>
+Cc: Will Deacon <will@kernel.org>
+Cc: Yang Shi <shy828301@gmail.com>
+Cc: Zi Yan <ziy@nvidia.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/page_vma_mapped.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/mm/page_vma_mapped.c
++++ b/mm/page_vma_mapped.c
+@@ -243,16 +243,16 @@ next_pte:
+ if (pvmw->address >= end)
+ return not_found(pvmw);
+ /* Did we cross page table boundary? */
+- if (pvmw->address % PMD_SIZE == 0) {
+- pte_unmap(pvmw->pte);
++ if ((pvmw->address & (PMD_SIZE - PAGE_SIZE)) == 0) {
+ if (pvmw->ptl) {
+ spin_unlock(pvmw->ptl);
+ pvmw->ptl = NULL;
+ }
++ pte_unmap(pvmw->pte);
++ pvmw->pte = NULL;
+ goto restart;
+- } else {
+- pvmw->pte++;
+ }
++ pvmw->pte++;
+ } while (pte_none(*pvmw->pte));
+
+ if (!pvmw->ptl) {
--- /dev/null
+From a765c417d876cc635f628365ec9aa6f09470069a Mon Sep 17 00:00:00 2001
+From: Hugh Dickins <hughd@google.com>
+Date: Thu, 24 Jun 2021 18:39:23 -0700
+Subject: mm: page_vma_mapped_walk(): get vma_address_end() earlier
+
+From: Hugh Dickins <hughd@google.com>
+
+commit a765c417d876cc635f628365ec9aa6f09470069a upstream.
+
+page_vma_mapped_walk() cleanup: get THP's vma_address_end() at the
+start, rather than later at next_pte.
+
+It's a little unnecessary overhead on the first call, but makes for a
+simpler loop in the following commit.
+
+Link: https://lkml.kernel.org/r/4542b34d-862f-7cb4-bb22-e0df6ce830a2@google.com
+Signed-off-by: Hugh Dickins <hughd@google.com>
+Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Cc: Alistair Popple <apopple@nvidia.com>
+Cc: Matthew Wilcox <willy@infradead.org>
+Cc: Peter Xu <peterx@redhat.com>
+Cc: Ralph Campbell <rcampbell@nvidia.com>
+Cc: Wang Yugui <wangyugui@e16-tech.com>
+Cc: Will Deacon <will@kernel.org>
+Cc: Yang Shi <shy828301@gmail.com>
+Cc: Zi Yan <ziy@nvidia.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/page_vma_mapped.c | 13 +++++++++----
+ 1 file changed, 9 insertions(+), 4 deletions(-)
+
+--- a/mm/page_vma_mapped.c
++++ b/mm/page_vma_mapped.c
+@@ -170,6 +170,15 @@ bool page_vma_mapped_walk(struct page_vm
+ return true;
+ }
+
++ /*
++ * Seek to next pte only makes sense for THP.
++ * But more important than that optimization, is to filter out
++ * any PageKsm page: whose page->index misleads vma_address()
++ * and vma_address_end() to disaster.
++ */
++ end = PageTransCompound(page) ?
++ vma_address_end(page, pvmw->vma) :
++ pvmw->address + PAGE_SIZE;
+ if (pvmw->pte)
+ goto next_pte;
+ restart:
+@@ -237,10 +246,6 @@ this_pte:
+ if (check_pte(pvmw))
+ return true;
+ next_pte:
+- /* Seek to next pte only makes sense for THP */
+- if (!PageTransHuge(page))
+- return not_found(pvmw);
+- end = vma_address_end(page, pvmw->vma);
+ do {
+ pvmw->address += PAGE_SIZE;
+ if (pvmw->address >= end)
--- /dev/null
+From e2e1d4076c77b3671cf8ce702535ae7dee3acf89 Mon Sep 17 00:00:00 2001
+From: Hugh Dickins <hughd@google.com>
+Date: Thu, 24 Jun 2021 18:39:10 -0700
+Subject: mm: page_vma_mapped_walk(): prettify PVMW_MIGRATION block
+
+From: Hugh Dickins <hughd@google.com>
+
+commit e2e1d4076c77b3671cf8ce702535ae7dee3acf89 upstream.
+
+page_vma_mapped_walk() cleanup: rearrange the !pmd_present() block to
+follow the same "return not_found, return not_found, return true"
+pattern as the block above it (note: returning not_found there is never
+premature, since existence or prior existence of huge pmd guarantees
+good alignment).
+
+Link: https://lkml.kernel.org/r/378c8650-1488-2edf-9647-32a53cf2e21@google.com
+Signed-off-by: Hugh Dickins <hughd@google.com>
+Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Reviewed-by: Peter Xu <peterx@redhat.com>
+Cc: Alistair Popple <apopple@nvidia.com>
+Cc: Matthew Wilcox <willy@infradead.org>
+Cc: Ralph Campbell <rcampbell@nvidia.com>
+Cc: Wang Yugui <wangyugui@e16-tech.com>
+Cc: Will Deacon <will@kernel.org>
+Cc: Yang Shi <shy828301@gmail.com>
+Cc: Zi Yan <ziy@nvidia.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/page_vma_mapped.c | 30 ++++++++++++++----------------
+ 1 file changed, 14 insertions(+), 16 deletions(-)
+
+--- a/mm/page_vma_mapped.c
++++ b/mm/page_vma_mapped.c
+@@ -197,24 +197,22 @@ restart:
+ if (pmd_page(pmde) != page)
+ return not_found(pvmw);
+ return true;
+- } else if (!pmd_present(pmde)) {
+- if (thp_migration_supported()) {
+- if (!(pvmw->flags & PVMW_MIGRATION))
+- return not_found(pvmw);
+- if (is_migration_entry(pmd_to_swp_entry(pmde))) {
+- swp_entry_t entry = pmd_to_swp_entry(pmde);
++ }
++ if (!pmd_present(pmde)) {
++ swp_entry_t entry;
+
+- if (migration_entry_to_page(entry) != page)
+- return not_found(pvmw);
+- return true;
+- }
+- }
+- return not_found(pvmw);
+- } else {
+- /* THP pmd was split under us: handle on pte level */
+- spin_unlock(pvmw->ptl);
+- pvmw->ptl = NULL;
++ if (!thp_migration_supported() ||
++ !(pvmw->flags & PVMW_MIGRATION))
++ return not_found(pvmw);
++ entry = pmd_to_swp_entry(pmde);
++ if (!is_migration_entry(entry) ||
++ migration_entry_to_page(entry) != page)
++ return not_found(pvmw);
++ return true;
+ }
++ /* THP pmd was split under us: handle on pte level */
++ spin_unlock(pvmw->ptl);
++ pvmw->ptl = NULL;
+ } else if (!pmd_present(pmde)) {
+ /*
+ * If PVMW_SYNC, take and drop THP pmd lock so that we
--- /dev/null
+From 6d0fd5987657cb0c9756ce684e3a74c0f6351728 Mon Sep 17 00:00:00 2001
+From: Hugh Dickins <hughd@google.com>
+Date: Thu, 24 Jun 2021 18:39:04 -0700
+Subject: mm: page_vma_mapped_walk(): settle PageHuge on entry
+
+From: Hugh Dickins <hughd@google.com>
+
+commit 6d0fd5987657cb0c9756ce684e3a74c0f6351728 upstream.
+
+page_vma_mapped_walk() cleanup: get the hugetlbfs PageHuge case out of
+the way at the start, so no need to worry about it later.
+
+Link: https://lkml.kernel.org/r/e31a483c-6d73-a6bb-26c5-43c3b880a2@google.com
+Signed-off-by: Hugh Dickins <hughd@google.com>
+Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Reviewed-by: Peter Xu <peterx@redhat.com>
+Cc: Alistair Popple <apopple@nvidia.com>
+Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Cc: Matthew Wilcox <willy@infradead.org>
+Cc: Ralph Campbell <rcampbell@nvidia.com>
+Cc: Wang Yugui <wangyugui@e16-tech.com>
+Cc: Will Deacon <will@kernel.org>
+Cc: Yang Shi <shy828301@gmail.com>
+Cc: Zi Yan <ziy@nvidia.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/page_vma_mapped.c | 12 ++++++++----
+ 1 file changed, 8 insertions(+), 4 deletions(-)
+
+--- a/mm/page_vma_mapped.c
++++ b/mm/page_vma_mapped.c
+@@ -152,10 +152,11 @@ bool page_vma_mapped_walk(struct page_vm
+ if (pvmw->pmd && !pvmw->pte)
+ return not_found(pvmw);
+
+- if (pvmw->pte)
+- goto next_pte;
+-
+ if (unlikely(PageHuge(page))) {
++ /* The only possible mapping was handled on last iteration */
++ if (pvmw->pte)
++ return not_found(pvmw);
++
+ /* when pud is not present, pte will be NULL */
+ pvmw->pte = huge_pte_offset(mm, pvmw->address, page_size(page));
+ if (!pvmw->pte)
+@@ -167,6 +168,9 @@ bool page_vma_mapped_walk(struct page_vm
+ return not_found(pvmw);
+ return true;
+ }
++
++ if (pvmw->pte)
++ goto next_pte;
+ restart:
+ pgd = pgd_offset(mm, pvmw->address);
+ if (!pgd_present(*pgd))
+@@ -232,7 +236,7 @@ restart:
+ return true;
+ next_pte:
+ /* Seek to next pte only makes sense for THP */
+- if (!PageTransHuge(page) || PageHuge(page))
++ if (!PageTransHuge(page))
+ return not_found(pvmw);
+ end = vma_address_end(page, pvmw->vma);
+ do {
--- /dev/null
+From 474466301dfd8b39a10c01db740645f3f7ae9a28 Mon Sep 17 00:00:00 2001
+From: Hugh Dickins <hughd@google.com>
+Date: Thu, 24 Jun 2021 18:39:20 -0700
+Subject: mm: page_vma_mapped_walk(): use goto instead of while (1)
+
+From: Hugh Dickins <hughd@google.com>
+
+commit 474466301dfd8b39a10c01db740645f3f7ae9a28 upstream.
+
+page_vma_mapped_walk() cleanup: add a label this_pte, matching next_pte,
+and use "goto this_pte", in place of the "while (1)" loop at the end.
+
+Link: https://lkml.kernel.org/r/a52b234a-851-3616-2525-f42736e8934@google.com
+Signed-off-by: Hugh Dickins <hughd@google.com>
+Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Cc: Alistair Popple <apopple@nvidia.com>
+Cc: Matthew Wilcox <willy@infradead.org>
+Cc: Peter Xu <peterx@redhat.com>
+Cc: Ralph Campbell <rcampbell@nvidia.com>
+Cc: Wang Yugui <wangyugui@e16-tech.com>
+Cc: Will Deacon <will@kernel.org>
+Cc: Yang Shi <shy828301@gmail.com>
+Cc: Zi Yan <ziy@nvidia.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/page_vma_mapped.c | 7 +++----
+ 1 file changed, 3 insertions(+), 4 deletions(-)
+
+--- a/mm/page_vma_mapped.c
++++ b/mm/page_vma_mapped.c
+@@ -143,6 +143,7 @@ bool page_vma_mapped_walk(struct page_vm
+ {
+ struct mm_struct *mm = pvmw->vma->vm_mm;
+ struct page *page = pvmw->page;
++ unsigned long end;
+ pgd_t *pgd;
+ p4d_t *p4d;
+ pud_t *pud;
+@@ -232,10 +233,7 @@ restart:
+ }
+ if (!map_pte(pvmw))
+ goto next_pte;
+- }
+- while (1) {
+- unsigned long end;
+-
++this_pte:
+ if (check_pte(pvmw))
+ return true;
+ next_pte:
+@@ -264,6 +262,7 @@ next_pte:
+ pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
+ spin_lock(pvmw->ptl);
+ }
++ goto this_pte;
+ }
+ }
+
--- /dev/null
+From f003c03bd29e6f46fef1b9a8e8d636ac732286d5 Mon Sep 17 00:00:00 2001
+From: Hugh Dickins <hughd@google.com>
+Date: Thu, 24 Jun 2021 18:39:01 -0700
+Subject: mm: page_vma_mapped_walk(): use page for pvmw->page
+
+From: Hugh Dickins <hughd@google.com>
+
+commit f003c03bd29e6f46fef1b9a8e8d636ac732286d5 upstream.
+
+Patch series "mm: page_vma_mapped_walk() cleanup and THP fixes".
+
+I've marked all of these for stable: many are merely cleanups, but I
+think they are much better before the main fix than after.
+
+This patch (of 11):
+
+page_vma_mapped_walk() cleanup: sometimes the local copy of pvwm->page
+was used, sometimes pvmw->page itself: use the local copy "page"
+throughout.
+
+Link: https://lkml.kernel.org/r/589b358c-febc-c88e-d4c2-7834b37fa7bf@google.com
+Link: https://lkml.kernel.org/r/88e67645-f467-c279-bf5e-af4b5c6b13eb@google.com
+Signed-off-by: Hugh Dickins <hughd@google.com>
+Reviewed-by: Alistair Popple <apopple@nvidia.com>
+Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Reviewed-by: Peter Xu <peterx@redhat.com>
+Cc: Yang Shi <shy828301@gmail.com>
+Cc: Wang Yugui <wangyugui@e16-tech.com>
+Cc: Matthew Wilcox <willy@infradead.org>
+Cc: Ralph Campbell <rcampbell@nvidia.com>
+Cc: Zi Yan <ziy@nvidia.com>
+Cc: Will Deacon <will@kernel.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/page_vma_mapped.c | 9 ++++-----
+ 1 file changed, 4 insertions(+), 5 deletions(-)
+
+--- a/mm/page_vma_mapped.c
++++ b/mm/page_vma_mapped.c
+@@ -155,7 +155,7 @@ bool page_vma_mapped_walk(struct page_vm
+ if (pvmw->pte)
+ goto next_pte;
+
+- if (unlikely(PageHuge(pvmw->page))) {
++ if (unlikely(PageHuge(page))) {
+ /* when pud is not present, pte will be NULL */
+ pvmw->pte = huge_pte_offset(mm, pvmw->address, page_size(page));
+ if (!pvmw->pte)
+@@ -216,8 +216,7 @@ restart:
+ * cannot return prematurely, while zap_huge_pmd() has
+ * cleared *pmd but not decremented compound_mapcount().
+ */
+- if ((pvmw->flags & PVMW_SYNC) &&
+- PageTransCompound(pvmw->page)) {
++ if ((pvmw->flags & PVMW_SYNC) && PageTransCompound(page)) {
+ spinlock_t *ptl = pmd_lock(mm, pvmw->pmd);
+
+ spin_unlock(ptl);
+@@ -233,9 +232,9 @@ restart:
+ return true;
+ next_pte:
+ /* Seek to next pte only makes sense for THP */
+- if (!PageTransHuge(pvmw->page) || PageHuge(pvmw->page))
++ if (!PageTransHuge(page) || PageHuge(page))
+ return not_found(pvmw);
+- end = vma_address_end(pvmw->page, pvmw->vma);
++ end = vma_address_end(page, pvmw->vma);
+ do {
+ pvmw->address += PAGE_SIZE;
+ if (pvmw->address >= end)
--- /dev/null
+From 3306d3119ceacc43ea8b141a73e21fea68eec30c Mon Sep 17 00:00:00 2001
+From: Hugh Dickins <hughd@google.com>
+Date: Thu, 24 Jun 2021 18:39:07 -0700
+Subject: mm: page_vma_mapped_walk(): use pmde for *pvmw->pmd
+
+From: Hugh Dickins <hughd@google.com>
+
+commit 3306d3119ceacc43ea8b141a73e21fea68eec30c upstream.
+
+page_vma_mapped_walk() cleanup: re-evaluate pmde after taking lock, then
+use it in subsequent tests, instead of repeatedly dereferencing pointer.
+
+Link: https://lkml.kernel.org/r/53fbc9d-891e-46b2-cb4b-468c3b19238e@google.com
+Signed-off-by: Hugh Dickins <hughd@google.com>
+Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Reviewed-by: Peter Xu <peterx@redhat.com>
+Cc: Alistair Popple <apopple@nvidia.com>
+Cc: Matthew Wilcox <willy@infradead.org>
+Cc: Ralph Campbell <rcampbell@nvidia.com>
+Cc: Wang Yugui <wangyugui@e16-tech.com>
+Cc: Will Deacon <will@kernel.org>
+Cc: Yang Shi <shy828301@gmail.com>
+Cc: Zi Yan <ziy@nvidia.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/page_vma_mapped.c | 11 ++++++-----
+ 1 file changed, 6 insertions(+), 5 deletions(-)
+
+--- a/mm/page_vma_mapped.c
++++ b/mm/page_vma_mapped.c
+@@ -190,18 +190,19 @@ restart:
+ pmde = READ_ONCE(*pvmw->pmd);
+ if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) {
+ pvmw->ptl = pmd_lock(mm, pvmw->pmd);
+- if (likely(pmd_trans_huge(*pvmw->pmd))) {
++ pmde = *pvmw->pmd;
++ if (likely(pmd_trans_huge(pmde))) {
+ if (pvmw->flags & PVMW_MIGRATION)
+ return not_found(pvmw);
+- if (pmd_page(*pvmw->pmd) != page)
++ if (pmd_page(pmde) != page)
+ return not_found(pvmw);
+ return true;
+- } else if (!pmd_present(*pvmw->pmd)) {
++ } else if (!pmd_present(pmde)) {
+ if (thp_migration_supported()) {
+ if (!(pvmw->flags & PVMW_MIGRATION))
+ return not_found(pvmw);
+- if (is_migration_entry(pmd_to_swp_entry(*pvmw->pmd))) {
+- swp_entry_t entry = pmd_to_swp_entry(*pvmw->pmd);
++ if (is_migration_entry(pmd_to_swp_entry(pmde))) {
++ swp_entry_t entry = pmd_to_swp_entry(pmde);
+
+ if (migration_entry_to_page(entry) != page)
+ return not_found(pvmw);
--- /dev/null
+From a7a69d8ba88d8dcee7ef00e91d413a4bd003a814 Mon Sep 17 00:00:00 2001
+From: Hugh Dickins <hughd@google.com>
+Date: Thu, 24 Jun 2021 18:39:30 -0700
+Subject: mm/thp: another PVMW_SYNC fix in page_vma_mapped_walk()
+
+From: Hugh Dickins <hughd@google.com>
+
+commit a7a69d8ba88d8dcee7ef00e91d413a4bd003a814 upstream.
+
+Aha! Shouldn't that quick scan over pte_none()s make sure that it holds
+ptlock in the PVMW_SYNC case? That too might have been responsible for
+BUGs or WARNs in split_huge_page_to_list() or its unmap_page(), though
+I've never seen any.
+
+Link: https://lkml.kernel.org/r/1bdf384c-8137-a149-2a1e-475a4791c3c@google.com
+Link: https://lore.kernel.org/linux-mm/20210412180659.B9E3.409509F4@e16-tech.com/
+Fixes: ace71a19cec5 ("mm: introduce page_vma_mapped_walk()")
+Signed-off-by: Hugh Dickins <hughd@google.com>
+Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Tested-by: Wang Yugui <wangyugui@e16-tech.com>
+Cc: Alistair Popple <apopple@nvidia.com>
+Cc: Matthew Wilcox <willy@infradead.org>
+Cc: Peter Xu <peterx@redhat.com>
+Cc: Ralph Campbell <rcampbell@nvidia.com>
+Cc: Will Deacon <will@kernel.org>
+Cc: Yang Shi <shy828301@gmail.com>
+Cc: Zi Yan <ziy@nvidia.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/page_vma_mapped.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/mm/page_vma_mapped.c
++++ b/mm/page_vma_mapped.c
+@@ -275,6 +275,10 @@ next_pte:
+ goto restart;
+ }
+ pvmw->pte++;
++ if ((pvmw->flags & PVMW_SYNC) && !pvmw->ptl) {
++ pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
++ spin_lock(pvmw->ptl);
++ }
+ } while (pte_none(*pvmw->pte));
+
+ if (!pvmw->ptl) {
--- /dev/null
+From 31657170deaf1d8d2f6a1955fbc6fa9d228be036 Mon Sep 17 00:00:00 2001
+From: Jue Wang <juew@google.com>
+Date: Tue, 15 Jun 2021 18:24:00 -0700
+Subject: mm/thp: fix page_address_in_vma() on file THP tails
+
+From: Jue Wang <juew@google.com>
+
+commit 31657170deaf1d8d2f6a1955fbc6fa9d228be036 upstream.
+
+Anon THP tails were already supported, but memory-failure may need to
+use page_address_in_vma() on file THP tails, which its page->mapping
+check did not permit: fix it.
+
+hughd adds: no current usage is known to hit the issue, but this does
+fix a subtle trap in a general helper: best fixed in stable sooner than
+later.
+
+Link: https://lkml.kernel.org/r/a0d9b53-bf5d-8bab-ac5-759dc61819c1@google.com
+Fixes: 800d8c63b2e9 ("shmem: add huge pages support")
+Signed-off-by: Jue Wang <juew@google.com>
+Signed-off-by: Hugh Dickins <hughd@google.com>
+Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Reviewed-by: Yang Shi <shy828301@gmail.com>
+Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Cc: Alistair Popple <apopple@nvidia.com>
+Cc: Jan Kara <jack@suse.cz>
+Cc: Miaohe Lin <linmiaohe@huawei.com>
+Cc: Minchan Kim <minchan@kernel.org>
+Cc: Naoya Horiguchi <naoya.horiguchi@nec.com>
+Cc: Oscar Salvador <osalvador@suse.de>
+Cc: Peter Xu <peterx@redhat.com>
+Cc: Ralph Campbell <rcampbell@nvidia.com>
+Cc: Shakeel Butt <shakeelb@google.com>
+Cc: Wang Yugui <wangyugui@e16-tech.com>
+Cc: Zi Yan <ziy@nvidia.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/rmap.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/mm/rmap.c
++++ b/mm/rmap.c
+@@ -709,11 +709,11 @@ unsigned long page_address_in_vma(struct
+ if (!vma->anon_vma || !page__anon_vma ||
+ vma->anon_vma->root != page__anon_vma->root)
+ return -EFAULT;
+- } else if (page->mapping) {
+- if (!vma->vm_file || vma->vm_file->f_mapping != page->mapping)
+- return -EFAULT;
+- } else
++ } else if (!vma->vm_file) {
++ return -EFAULT;
++ } else if (vma->vm_file->f_mapping != compound_head(page)->mapping) {
+ return -EFAULT;
++ }
+
+ return vma_address(page, vma);
+ }
--- /dev/null
+From a9a7504d9beaf395481faa91e70e2fd08f7a3dde Mon Sep 17 00:00:00 2001
+From: Hugh Dickins <hughd@google.com>
+Date: Thu, 24 Jun 2021 18:39:26 -0700
+Subject: mm/thp: fix page_vma_mapped_walk() if THP mapped by ptes
+
+From: Hugh Dickins <hughd@google.com>
+
+commit a9a7504d9beaf395481faa91e70e2fd08f7a3dde upstream.
+
+Running certain tests with a DEBUG_VM kernel would crash within hours,
+on the total_mapcount BUG() in split_huge_page_to_list(), while trying
+to free up some memory by punching a hole in a shmem huge page: split's
+try_to_unmap() was unable to find all the mappings of the page (which,
+on a !DEBUG_VM kernel, would then keep the huge page pinned in memory).
+
+Crash dumps showed two tail pages of a shmem huge page remained mapped
+by pte: ptes in a non-huge-aligned vma of a gVisor process, at the end
+of a long unmapped range; and no page table had yet been allocated for
+the head of the huge page to be mapped into.
+
+Although designed to handle these odd misaligned huge-page-mapped-by-pte
+cases, page_vma_mapped_walk() falls short by returning false prematurely
+when !pmd_present or !pud_present or !p4d_present or !pgd_present: there
+are cases when a huge page may span the boundary, with ptes present in
+the next.
+
+Restructure page_vma_mapped_walk() as a loop to continue in these cases,
+while keeping its layout much as before. Add a step_forward() helper to
+advance pvmw->address across those boundaries: originally I tried to use
+mm's standard p?d_addr_end() macros, but hit the same crash 512 times
+less often: because of the way redundant levels are folded together, but
+folded differently in different configurations, it was just too
+difficult to use them correctly; and step_forward() is simpler anyway.
+
+Link: https://lkml.kernel.org/r/fedb8632-1798-de42-f39e-873551d5bc81@google.com
+Fixes: ace71a19cec5 ("mm: introduce page_vma_mapped_walk()")
+Signed-off-by: Hugh Dickins <hughd@google.com>
+Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Cc: Alistair Popple <apopple@nvidia.com>
+Cc: Matthew Wilcox <willy@infradead.org>
+Cc: Peter Xu <peterx@redhat.com>
+Cc: Ralph Campbell <rcampbell@nvidia.com>
+Cc: Wang Yugui <wangyugui@e16-tech.com>
+Cc: Will Deacon <will@kernel.org>
+Cc: Yang Shi <shy828301@gmail.com>
+Cc: Zi Yan <ziy@nvidia.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/page_vma_mapped.c | 34 +++++++++++++++++++++++++---------
+ 1 file changed, 25 insertions(+), 9 deletions(-)
+
+--- a/mm/page_vma_mapped.c
++++ b/mm/page_vma_mapped.c
+@@ -115,6 +115,13 @@ static bool check_pte(struct page_vma_ma
+ return pfn_is_match(pvmw->page, pfn);
+ }
+
++static void step_forward(struct page_vma_mapped_walk *pvmw, unsigned long size)
++{
++ pvmw->address = (pvmw->address + size) & ~(size - 1);
++ if (!pvmw->address)
++ pvmw->address = ULONG_MAX;
++}
++
+ /**
+ * page_vma_mapped_walk - check if @pvmw->page is mapped in @pvmw->vma at
+ * @pvmw->address
+@@ -182,16 +189,22 @@ bool page_vma_mapped_walk(struct page_vm
+ if (pvmw->pte)
+ goto next_pte;
+ restart:
+- {
++ do {
+ pgd = pgd_offset(mm, pvmw->address);
+- if (!pgd_present(*pgd))
+- return false;
++ if (!pgd_present(*pgd)) {
++ step_forward(pvmw, PGDIR_SIZE);
++ continue;
++ }
+ p4d = p4d_offset(pgd, pvmw->address);
+- if (!p4d_present(*p4d))
+- return false;
++ if (!p4d_present(*p4d)) {
++ step_forward(pvmw, P4D_SIZE);
++ continue;
++ }
+ pud = pud_offset(p4d, pvmw->address);
+- if (!pud_present(*pud))
+- return false;
++ if (!pud_present(*pud)) {
++ step_forward(pvmw, PUD_SIZE);
++ continue;
++ }
+
+ pvmw->pmd = pmd_offset(pud, pvmw->address);
+ /*
+@@ -238,7 +251,8 @@ restart:
+
+ spin_unlock(ptl);
+ }
+- return false;
++ step_forward(pvmw, PMD_SIZE);
++ continue;
+ }
+ if (!map_pte(pvmw))
+ goto next_pte;
+@@ -268,7 +282,9 @@ next_pte:
+ spin_lock(pvmw->ptl);
+ }
+ goto this_pte;
+- }
++ } while (pvmw->address < end);
++
++ return false;
+ }
+
+ /**
--- /dev/null
+From 494334e43c16d63b878536a26505397fce6ff3a2 Mon Sep 17 00:00:00 2001
+From: Hugh Dickins <hughd@google.com>
+Date: Tue, 15 Jun 2021 18:23:56 -0700
+Subject: mm/thp: fix vma_address() if virtual address below file offset
+
+From: Hugh Dickins <hughd@google.com>
+
+commit 494334e43c16d63b878536a26505397fce6ff3a2 upstream.
+
+Running certain tests with a DEBUG_VM kernel would crash within hours,
+on the total_mapcount BUG() in split_huge_page_to_list(), while trying
+to free up some memory by punching a hole in a shmem huge page: split's
+try_to_unmap() was unable to find all the mappings of the page (which,
+on a !DEBUG_VM kernel, would then keep the huge page pinned in memory).
+
+When that BUG() was changed to a WARN(), it would later crash on the
+VM_BUG_ON_VMA(end < vma->vm_start || start >= vma->vm_end, vma) in
+mm/internal.h:vma_address(), used by rmap_walk_file() for
+try_to_unmap().
+
+vma_address() is usually correct, but there's a wraparound case when the
+vm_start address is unusually low, but vm_pgoff not so low:
+vma_address() chooses max(start, vma->vm_start), but that decides on the
+wrong address, because start has become almost ULONG_MAX.
+
+Rewrite vma_address() to be more careful about vm_pgoff; move the
+VM_BUG_ON_VMA() out of it, returning -EFAULT for errors, so that it can
+be safely used from page_mapped_in_vma() and page_address_in_vma() too.
+
+Add vma_address_end() to apply similar care to end address calculation,
+in page_vma_mapped_walk() and page_mkclean_one() and try_to_unmap_one();
+though it raises a question of whether callers would do better to supply
+pvmw->end to page_vma_mapped_walk() - I chose not, for a smaller patch.
+
+An irritation is that their apparent generality breaks down on KSM
+pages, which cannot be located by the page->index that page_to_pgoff()
+uses: as commit 4b0ece6fa016 ("mm: migrate: fix remove_migration_pte()
+for ksm pages") once discovered. I dithered over the best thing to do
+about that, and have ended up with a VM_BUG_ON_PAGE(PageKsm) in both
+vma_address() and vma_address_end(); though the only place in danger of
+using it on them was try_to_unmap_one().
+
+Sidenote: vma_address() and vma_address_end() now use compound_nr() on a
+head page, instead of thp_size(): to make the right calculation on a
+hugetlbfs page, whether or not THPs are configured. try_to_unmap() is
+used on hugetlbfs pages, but perhaps the wrong calculation never
+mattered.
+
+Link: https://lkml.kernel.org/r/caf1c1a3-7cfb-7f8f-1beb-ba816e932825@google.com
+Fixes: a8fa41ad2f6f ("mm, rmap: check all VMAs that PTE-mapped THP can be part of")
+Signed-off-by: Hugh Dickins <hughd@google.com>
+Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Cc: Alistair Popple <apopple@nvidia.com>
+Cc: Jan Kara <jack@suse.cz>
+Cc: Jue Wang <juew@google.com>
+Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>
+Cc: Miaohe Lin <linmiaohe@huawei.com>
+Cc: Minchan Kim <minchan@kernel.org>
+Cc: Naoya Horiguchi <naoya.horiguchi@nec.com>
+Cc: Oscar Salvador <osalvador@suse.de>
+Cc: Peter Xu <peterx@redhat.com>
+Cc: Ralph Campbell <rcampbell@nvidia.com>
+Cc: Shakeel Butt <shakeelb@google.com>
+Cc: Wang Yugui <wangyugui@e16-tech.com>
+Cc: Yang Shi <shy828301@gmail.com>
+Cc: Zi Yan <ziy@nvidia.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/internal.h | 51 ++++++++++++++++++++++++++++++++++++++-------------
+ mm/page_vma_mapped.c | 16 ++++++----------
+ mm/rmap.c | 16 ++++++++--------
+ 3 files changed, 52 insertions(+), 31 deletions(-)
+
+--- a/mm/internal.h
++++ b/mm/internal.h
+@@ -379,27 +379,52 @@ static inline void mlock_migrate_page(st
+ extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
+
+ /*
+- * At what user virtual address is page expected in @vma?
++ * At what user virtual address is page expected in vma?
++ * Returns -EFAULT if all of the page is outside the range of vma.
++ * If page is a compound head, the entire compound page is considered.
+ */
+ static inline unsigned long
+-__vma_address(struct page *page, struct vm_area_struct *vma)
++vma_address(struct page *page, struct vm_area_struct *vma)
+ {
+- pgoff_t pgoff = page_to_pgoff(page);
+- return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
++ pgoff_t pgoff;
++ unsigned long address;
++
++ VM_BUG_ON_PAGE(PageKsm(page), page); /* KSM page->index unusable */
++ pgoff = page_to_pgoff(page);
++ if (pgoff >= vma->vm_pgoff) {
++ address = vma->vm_start +
++ ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
++ /* Check for address beyond vma (or wrapped through 0?) */
++ if (address < vma->vm_start || address >= vma->vm_end)
++ address = -EFAULT;
++ } else if (PageHead(page) &&
++ pgoff + compound_nr(page) - 1 >= vma->vm_pgoff) {
++ /* Test above avoids possibility of wrap to 0 on 32-bit */
++ address = vma->vm_start;
++ } else {
++ address = -EFAULT;
++ }
++ return address;
+ }
+
++/*
++ * Then at what user virtual address will none of the page be found in vma?
++ * Assumes that vma_address() already returned a good starting address.
++ * If page is a compound head, the entire compound page is considered.
++ */
+ static inline unsigned long
+-vma_address(struct page *page, struct vm_area_struct *vma)
++vma_address_end(struct page *page, struct vm_area_struct *vma)
+ {
+- unsigned long start, end;
+-
+- start = __vma_address(page, vma);
+- end = start + thp_size(page) - PAGE_SIZE;
+-
+- /* page should be within @vma mapping range */
+- VM_BUG_ON_VMA(end < vma->vm_start || start >= vma->vm_end, vma);
++ pgoff_t pgoff;
++ unsigned long address;
+
+- return max(start, vma->vm_start);
++ VM_BUG_ON_PAGE(PageKsm(page), page); /* KSM page->index unusable */
++ pgoff = page_to_pgoff(page) + compound_nr(page);
++ address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
++ /* Check for address beyond vma (or wrapped through 0?) */
++ if (address < vma->vm_start || address > vma->vm_end)
++ address = vma->vm_end;
++ return address;
+ }
+
+ static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
+--- a/mm/page_vma_mapped.c
++++ b/mm/page_vma_mapped.c
+@@ -227,18 +227,18 @@ restart:
+ if (!map_pte(pvmw))
+ goto next_pte;
+ while (1) {
++ unsigned long end;
++
+ if (check_pte(pvmw))
+ return true;
+ next_pte:
+ /* Seek to next pte only makes sense for THP */
+ if (!PageTransHuge(pvmw->page) || PageHuge(pvmw->page))
+ return not_found(pvmw);
++ end = vma_address_end(pvmw->page, pvmw->vma);
+ do {
+ pvmw->address += PAGE_SIZE;
+- if (pvmw->address >= pvmw->vma->vm_end ||
+- pvmw->address >=
+- __vma_address(pvmw->page, pvmw->vma) +
+- thp_size(pvmw->page))
++ if (pvmw->address >= end)
+ return not_found(pvmw);
+ /* Did we cross page table boundary? */
+ if (pvmw->address % PMD_SIZE == 0) {
+@@ -276,14 +276,10 @@ int page_mapped_in_vma(struct page *page
+ .vma = vma,
+ .flags = PVMW_SYNC,
+ };
+- unsigned long start, end;
+-
+- start = __vma_address(page, vma);
+- end = start + thp_size(page) - PAGE_SIZE;
+
+- if (unlikely(end < vma->vm_start || start >= vma->vm_end))
++ pvmw.address = vma_address(page, vma);
++ if (pvmw.address == -EFAULT)
+ return 0;
+- pvmw.address = max(start, vma->vm_start);
+ if (!page_vma_mapped_walk(&pvmw))
+ return 0;
+ page_vma_mapped_walk_done(&pvmw);
+--- a/mm/rmap.c
++++ b/mm/rmap.c
+@@ -700,7 +700,6 @@ static bool should_defer_flush(struct mm
+ */
+ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
+ {
+- unsigned long address;
+ if (PageAnon(page)) {
+ struct anon_vma *page__anon_vma = page_anon_vma(page);
+ /*
+@@ -715,10 +714,8 @@ unsigned long page_address_in_vma(struct
+ return -EFAULT;
+ } else
+ return -EFAULT;
+- address = __vma_address(page, vma);
+- if (unlikely(address < vma->vm_start || address >= vma->vm_end))
+- return -EFAULT;
+- return address;
++
++ return vma_address(page, vma);
+ }
+
+ pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address)
+@@ -912,7 +909,7 @@ static bool page_mkclean_one(struct page
+ */
+ mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
+ 0, vma, vma->vm_mm, address,
+- min(vma->vm_end, address + page_size(page)));
++ vma_address_end(page, vma));
+ mmu_notifier_invalidate_range_start(&range);
+
+ while (page_vma_mapped_walk(&pvmw)) {
+@@ -1415,9 +1412,10 @@ static bool try_to_unmap_one(struct page
+ * Note that the page can not be free in this function as call of
+ * try_to_unmap() must hold a reference on the page.
+ */
++ range.end = PageKsm(page) ?
++ address + PAGE_SIZE : vma_address_end(page, vma);
+ mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
+- address,
+- min(vma->vm_end, address + page_size(page)));
++ address, range.end);
+ if (PageHuge(page)) {
+ /*
+ * If sharing is possible, start and end will be adjusted
+@@ -1869,6 +1867,7 @@ static void rmap_walk_anon(struct page *
+ struct vm_area_struct *vma = avc->vma;
+ unsigned long address = vma_address(page, vma);
+
++ VM_BUG_ON_VMA(address == -EFAULT, vma);
+ cond_resched();
+
+ if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
+@@ -1923,6 +1922,7 @@ static void rmap_walk_file(struct page *
+ pgoff_start, pgoff_end) {
+ unsigned long address = vma_address(page, vma);
+
++ VM_BUG_ON_VMA(address == -EFAULT, vma);
+ cond_resched();
+
+ if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
--- /dev/null
+From 3b77e8c8cde581dadab9a0f1543a347e24315f11 Mon Sep 17 00:00:00 2001
+From: Hugh Dickins <hughd@google.com>
+Date: Tue, 15 Jun 2021 18:23:49 -0700
+Subject: mm/thp: make is_huge_zero_pmd() safe and quicker
+
+From: Hugh Dickins <hughd@google.com>
+
+commit 3b77e8c8cde581dadab9a0f1543a347e24315f11 upstream.
+
+Most callers of is_huge_zero_pmd() supply a pmd already verified
+present; but a few (notably zap_huge_pmd()) do not - it might be a pmd
+migration entry, in which the pfn is encoded differently from a present
+pmd: which might pass the is_huge_zero_pmd() test (though not on x86,
+since L1TF forced us to protect against that); or perhaps even crash in
+pmd_page() applied to a swap-like entry.
+
+Make it safe by adding pmd_present() check into is_huge_zero_pmd()
+itself; and make it quicker by saving huge_zero_pfn, so that
+is_huge_zero_pmd() will not need to do that pmd_page() lookup each time.
+
+__split_huge_pmd_locked() checked pmd_trans_huge() before: that worked,
+but is unnecessary now that is_huge_zero_pmd() checks present.
+
+Link: https://lkml.kernel.org/r/21ea9ca-a1f5-8b90-5e88-95fb1c49bbfa@google.com
+Fixes: e71769ae5260 ("mm: enable thp migration for shmem thp")
+Signed-off-by: Hugh Dickins <hughd@google.com>
+Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Reviewed-by: Yang Shi <shy828301@gmail.com>
+Cc: Alistair Popple <apopple@nvidia.com>
+Cc: Jan Kara <jack@suse.cz>
+Cc: Jue Wang <juew@google.com>
+Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>
+Cc: Miaohe Lin <linmiaohe@huawei.com>
+Cc: Minchan Kim <minchan@kernel.org>
+Cc: Naoya Horiguchi <naoya.horiguchi@nec.com>
+Cc: Oscar Salvador <osalvador@suse.de>
+Cc: Peter Xu <peterx@redhat.com>
+Cc: Ralph Campbell <rcampbell@nvidia.com>
+Cc: Shakeel Butt <shakeelb@google.com>
+Cc: Wang Yugui <wangyugui@e16-tech.com>
+Cc: Zi Yan <ziy@nvidia.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/huge_mm.h | 8 +++++++-
+ mm/huge_memory.c | 5 ++++-
+ 2 files changed, 11 insertions(+), 2 deletions(-)
+
+--- a/include/linux/huge_mm.h
++++ b/include/linux/huge_mm.h
+@@ -297,6 +297,7 @@ struct page *follow_devmap_pud(struct vm
+ extern vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd);
+
+ extern struct page *huge_zero_page;
++extern unsigned long huge_zero_pfn;
+
+ static inline bool is_huge_zero_page(struct page *page)
+ {
+@@ -305,7 +306,7 @@ static inline bool is_huge_zero_page(str
+
+ static inline bool is_huge_zero_pmd(pmd_t pmd)
+ {
+- return is_huge_zero_page(pmd_page(pmd));
++ return READ_ONCE(huge_zero_pfn) == pmd_pfn(pmd) && pmd_present(pmd);
+ }
+
+ static inline bool is_huge_zero_pud(pud_t pud)
+@@ -450,6 +451,11 @@ static inline bool is_huge_zero_page(str
+ {
+ return false;
+ }
++
++static inline bool is_huge_zero_pmd(pmd_t pmd)
++{
++ return false;
++}
+
+ static inline bool is_huge_zero_pud(pud_t pud)
+ {
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -61,6 +61,7 @@ static struct shrinker deferred_split_sh
+
+ static atomic_t huge_zero_refcount;
+ struct page *huge_zero_page __read_mostly;
++unsigned long huge_zero_pfn __read_mostly = ~0UL;
+
+ bool transparent_hugepage_enabled(struct vm_area_struct *vma)
+ {
+@@ -97,6 +98,7 @@ retry:
+ __free_pages(zero_page, compound_order(zero_page));
+ goto retry;
+ }
++ WRITE_ONCE(huge_zero_pfn, page_to_pfn(zero_page));
+
+ /* We take additional reference here. It will be put back by shrinker */
+ atomic_set(&huge_zero_refcount, 2);
+@@ -146,6 +148,7 @@ static unsigned long shrink_huge_zero_pa
+ if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
+ struct page *zero_page = xchg(&huge_zero_page, NULL);
+ BUG_ON(zero_page == NULL);
++ WRITE_ONCE(huge_zero_pfn, ~0UL);
+ __free_pages(zero_page, compound_order(zero_page));
+ return HPAGE_PMD_NR;
+ }
+@@ -2058,7 +2061,7 @@ static void __split_huge_pmd_locked(stru
+ return;
+ }
+
+- if (pmd_trans_huge(*pmd) && is_huge_zero_pmd(*pmd)) {
++ if (is_huge_zero_pmd(*pmd)) {
+ /*
+ * FIXME: Do we want to invalidate secondary mmu by calling
+ * mmu_notifier_invalidate_range() see comments below inside
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
-@@ -2341,15 +2341,15 @@ static void unmap_page(struct page *page
+@@ -2344,15 +2344,15 @@ static void unmap_page(struct page *page
{
- enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK |
+ enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK | TTU_SYNC |
TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD;
- bool unmap_success;
}
static void remap_page(struct page *page, unsigned int nr)
-@@ -2639,7 +2639,7 @@ int split_huge_page_to_list(struct page
+@@ -2642,7 +2642,7 @@ int split_huge_page_to_list(struct page
struct deferred_split *ds_queue = get_deferred_split_queue(head);
struct anon_vma *anon_vma = NULL;
struct address_space *mapping = NULL;
unsigned long flags;
pgoff_t end;
-@@ -2699,7 +2699,6 @@ int split_huge_page_to_list(struct page
+@@ -2702,7 +2702,6 @@ int split_huge_page_to_list(struct page
}
unmap_page(head);
/* prevent PageLRU to go away from under us, and freeze lru stats */
spin_lock_irqsave(&pgdata->lru_lock, flags);
-@@ -2718,9 +2717,7 @@ int split_huge_page_to_list(struct page
+@@ -2721,9 +2720,7 @@ int split_huge_page_to_list(struct page
/* Prevent deferred_split_scan() touching ->_refcount */
spin_lock(&ds_queue->split_queue_lock);
if (!list_empty(page_deferred_list(head))) {
ds_queue->split_queue_len--;
list_del(page_deferred_list(head));
-@@ -2736,16 +2733,9 @@ int split_huge_page_to_list(struct page
+@@ -2739,16 +2736,9 @@ int split_huge_page_to_list(struct page
__split_huge_page(page, list, end, flags);
ret = 0;
} else {
--- /dev/null
+From 732ed55823fc3ad998d43b86bf771887bcc5ec67 Mon Sep 17 00:00:00 2001
+From: Hugh Dickins <hughd@google.com>
+Date: Tue, 15 Jun 2021 18:23:53 -0700
+Subject: mm/thp: try_to_unmap() use TTU_SYNC for safe splitting
+
+From: Hugh Dickins <hughd@google.com>
+
+commit 732ed55823fc3ad998d43b86bf771887bcc5ec67 upstream.
+
+Stressing huge tmpfs often crashed on unmap_page()'s VM_BUG_ON_PAGE
+(!unmap_success): with dump_page() showing mapcount:1, but then its raw
+struct page output showing _mapcount ffffffff i.e. mapcount 0.
+
+And even if that particular VM_BUG_ON_PAGE(!unmap_success) is removed,
+it is immediately followed by a VM_BUG_ON_PAGE(compound_mapcount(head)),
+and further down an IS_ENABLED(CONFIG_DEBUG_VM) total_mapcount BUG():
+all indicative of some mapcount difficulty in development here perhaps.
+But the !CONFIG_DEBUG_VM path handles the failures correctly and
+silently.
+
+I believe the problem is that once a racing unmap has cleared pte or
+pmd, try_to_unmap_one() may skip taking the page table lock, and emerge
+from try_to_unmap() before the racing task has reached decrementing
+mapcount.
+
+Instead of abandoning the unsafe VM_BUG_ON_PAGE(), and the ones that
+follow, use PVMW_SYNC in try_to_unmap_one() in this case: adding
+TTU_SYNC to the options, and passing that from unmap_page().
+
+When CONFIG_DEBUG_VM, or for non-debug too? Consensus is to do the same
+for both: the slight overhead added should rarely matter, except perhaps
+if splitting sparsely-populated multiply-mapped shmem. Once confident
+that bugs are fixed, TTU_SYNC here can be removed, and the race
+tolerated.
+
+Link: https://lkml.kernel.org/r/c1e95853-8bcd-d8fd-55fa-e7f2488e78f@google.com
+Fixes: fec89c109f3a ("thp: rewrite freeze_page()/unfreeze_page() with generic rmap walkers")
+Signed-off-by: Hugh Dickins <hughd@google.com>
+Cc: Alistair Popple <apopple@nvidia.com>
+Cc: Jan Kara <jack@suse.cz>
+Cc: Jue Wang <juew@google.com>
+Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>
+Cc: Miaohe Lin <linmiaohe@huawei.com>
+Cc: Minchan Kim <minchan@kernel.org>
+Cc: Naoya Horiguchi <naoya.horiguchi@nec.com>
+Cc: Oscar Salvador <osalvador@suse.de>
+Cc: Peter Xu <peterx@redhat.com>
+Cc: Ralph Campbell <rcampbell@nvidia.com>
+Cc: Shakeel Butt <shakeelb@google.com>
+Cc: Wang Yugui <wangyugui@e16-tech.com>
+Cc: Yang Shi <shy828301@gmail.com>
+Cc: Zi Yan <ziy@nvidia.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/rmap.h | 1 +
+ mm/huge_memory.c | 2 +-
+ mm/page_vma_mapped.c | 11 +++++++++++
+ mm/rmap.c | 17 ++++++++++++++++-
+ 4 files changed, 29 insertions(+), 2 deletions(-)
+
+--- a/include/linux/rmap.h
++++ b/include/linux/rmap.h
+@@ -91,6 +91,7 @@ enum ttu_flags {
+
+ TTU_SPLIT_HUGE_PMD = 0x4, /* split huge PMD if any */
+ TTU_IGNORE_MLOCK = 0x8, /* ignore mlock */
++ TTU_SYNC = 0x10, /* avoid racy checks with PVMW_SYNC */
+ TTU_IGNORE_HWPOISON = 0x20, /* corrupted page is recoverable */
+ TTU_BATCH_FLUSH = 0x40, /* Batch TLB flushes where possible
+ * and caller guarantees they will
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -2342,7 +2342,7 @@ void vma_adjust_trans_huge(struct vm_are
+
+ static void unmap_page(struct page *page)
+ {
+- enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK |
++ enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK | TTU_SYNC |
+ TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD;
+ bool unmap_success;
+
+--- a/mm/page_vma_mapped.c
++++ b/mm/page_vma_mapped.c
+@@ -211,6 +211,17 @@ restart:
+ pvmw->ptl = NULL;
+ }
+ } else if (!pmd_present(pmde)) {
++ /*
++ * If PVMW_SYNC, take and drop THP pmd lock so that we
++ * cannot return prematurely, while zap_huge_pmd() has
++ * cleared *pmd but not decremented compound_mapcount().
++ */
++ if ((pvmw->flags & PVMW_SYNC) &&
++ PageTransCompound(pvmw->page)) {
++ spinlock_t *ptl = pmd_lock(mm, pvmw->pmd);
++
++ spin_unlock(ptl);
++ }
+ return false;
+ }
+ if (!map_pte(pvmw))
+--- a/mm/rmap.c
++++ b/mm/rmap.c
+@@ -1385,6 +1385,15 @@ static bool try_to_unmap_one(struct page
+ struct mmu_notifier_range range;
+ enum ttu_flags flags = (enum ttu_flags)(long)arg;
+
++ /*
++ * When racing against e.g. zap_pte_range() on another cpu,
++ * in between its ptep_get_and_clear_full() and page_remove_rmap(),
++ * try_to_unmap() may return false when it is about to become true,
++ * if page table locking is skipped: use TTU_SYNC to wait for that.
++ */
++ if (flags & TTU_SYNC)
++ pvmw.flags = PVMW_SYNC;
++
+ /* munlock has nothing to gain from examining un-locked vmas */
+ if ((flags & TTU_MUNLOCK) && !(vma->vm_flags & VM_LOCKED))
+ return true;
+@@ -1757,7 +1766,13 @@ bool try_to_unmap(struct page *page, enu
+ else
+ rmap_walk(page, &rwc);
+
+- return !page_mapcount(page) ? true : false;
++ /*
++ * When racing against e.g. zap_pte_range() on another cpu,
++ * in between its ptep_get_and_clear_full() and page_remove_rmap(),
++ * try_to_unmap() may return false when it is about to become true,
++ * if page table locking is skipped: use TTU_SYNC to wait for that.
++ */
++ return !page_mapcount(page);
+ }
+
+ /**
--- /dev/null
+From ffc90cbb2970ab88b66ea51dd580469eede57b67 Mon Sep 17 00:00:00 2001
+From: Xu Yu <xuyu@linux.alibaba.com>
+Date: Tue, 15 Jun 2021 18:23:42 -0700
+Subject: mm, thp: use head page in __migration_entry_wait()
+
+From: Xu Yu <xuyu@linux.alibaba.com>
+
+commit ffc90cbb2970ab88b66ea51dd580469eede57b67 upstream.
+
+We notice that hung task happens in a corner but practical scenario when
+CONFIG_PREEMPT_NONE is enabled, as follows.
+
+Process 0 Process 1 Process 2..Inf
+split_huge_page_to_list
+ unmap_page
+ split_huge_pmd_address
+ __migration_entry_wait(head)
+ __migration_entry_wait(tail)
+ remap_page (roll back)
+ remove_migration_ptes
+ rmap_walk_anon
+ cond_resched
+
+Where __migration_entry_wait(tail) is occurred in kernel space, e.g.,
+copy_to_user in fstat, which will immediately fault again without
+rescheduling, and thus occupy the cpu fully.
+
+When there are too many processes performing __migration_entry_wait on
+tail page, remap_page will never be done after cond_resched.
+
+This makes __migration_entry_wait operate on the compound head page,
+thus waits for remap_page to complete, whether the THP is split
+successfully or roll back.
+
+Note that put_and_wait_on_page_locked helps to drop the page reference
+acquired with get_page_unless_zero, as soon as the page is on the wait
+queue, before actually waiting. So splitting the THP is only prevented
+for a brief interval.
+
+Link: https://lkml.kernel.org/r/b9836c1dd522e903891760af9f0c86a2cce987eb.1623144009.git.xuyu@linux.alibaba.com
+Fixes: ba98828088ad ("thp: add option to setup migration entries during PMD split")
+Suggested-by: Hugh Dickins <hughd@google.com>
+Signed-off-by: Gang Deng <gavin.dg@linux.alibaba.com>
+Signed-off-by: Xu Yu <xuyu@linux.alibaba.com>
+Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Acked-by: Hugh Dickins <hughd@google.com>
+Cc: Matthew Wilcox <willy@infradead.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/migrate.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -326,6 +326,7 @@ void __migration_entry_wait(struct mm_st
+ goto out;
+
+ page = migration_entry_to_page(entry);
++ page = compound_head(page);
+
+ /*
+ * Once page cache replacement of page migration started, page_count
mm-add-vm_warn_on_once_page-macro.patch
mm-rmap-remove-unneeded-semicolon-in-page_not_mapped.patch
mm-rmap-use-page_not_mapped-in-try_to_unmap.patch
+mm-thp-use-head-page-in-__migration_entry_wait.patch
mm-thp-fix-__split_huge_pmd_locked-on-shmem-migration-entry.patch
+mm-thp-make-is_huge_zero_pmd-safe-and-quicker.patch
+mm-thp-try_to_unmap-use-ttu_sync-for-safe-splitting.patch
+mm-thp-fix-vma_address-if-virtual-address-below-file-offset.patch
+mm-thp-fix-page_address_in_vma-on-file-thp-tails.patch
mm-thp-unmap_mapping_page-to-fix-thp-truncate_cleanup_page.patch
mm-thp-replace-debug_vm-bug-with-vm_warn-when-unmap-fails-for-split.patch
+mm-page_vma_mapped_walk-use-page-for-pvmw-page.patch
+mm-page_vma_mapped_walk-settle-pagehuge-on-entry.patch
+mm-page_vma_mapped_walk-use-pmde-for-pvmw-pmd.patch
+mm-page_vma_mapped_walk-prettify-pvmw_migration-block.patch
+mm-page_vma_mapped_walk-crossing-page-table-boundary.patch
+mm-page_vma_mapped_walk-add-a-level-of-indentation.patch
+mm-page_vma_mapped_walk-use-goto-instead-of-while-1.patch
+mm-page_vma_mapped_walk-get-vma_address_end-earlier.patch
+mm-thp-fix-page_vma_mapped_walk-if-thp-mapped-by-ptes.patch
+mm-thp-another-pvmw_sync-fix-in-page_vma_mapped_walk.patch
+mm-futex-fix-shared-futex-pgoff-on-shmem-huge-page.patch