--- /dev/null
+From 3c226c637b69104f6b9f1c6ec5b08d7b741b3229 Mon Sep 17 00:00:00 2001
+From: Mark Rutland <mark.rutland@arm.com>
+Date: Fri, 16 Jun 2017 14:02:34 -0700
+Subject: mm: numa: avoid waiting on freed migrated pages
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+commit 3c226c637b69104f6b9f1c6ec5b08d7b741b3229 upstream.
+
+In do_huge_pmd_numa_page(), we attempt to handle a migrating thp pmd by
+waiting until the pmd is unlocked before we return and retry. However,
+we can race with migrate_misplaced_transhuge_page():
+
+ // do_huge_pmd_numa_page // migrate_misplaced_transhuge_page()
+ // Holds 0 refs on page // Holds 2 refs on page
+
+ vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
+ /* ... */
+ if (pmd_trans_migrating(*vmf->pmd)) {
+ page = pmd_page(*vmf->pmd);
+ spin_unlock(vmf->ptl);
+ ptl = pmd_lock(mm, pmd);
+ if (page_count(page) != 2)) {
+ /* roll back */
+ }
+ /* ... */
+ mlock_migrate_page(new_page, page);
+ /* ... */
+ spin_unlock(ptl);
+ put_page(page);
+ put_page(page); // page freed here
+ wait_on_page_locked(page);
+ goto out;
+ }
+
+This can result in the freed page having its waiters flag set
+unexpectedly, which trips the PAGE_FLAGS_CHECK_AT_PREP checks in the
+page alloc/free functions. This has been observed on arm64 KVM guests.
+
+We can avoid this by having do_huge_pmd_numa_page() take a reference on
+the page before dropping the pmd lock, mirroring what we do in
+__migration_entry_wait().
+
+When we hit the race, migrate_misplaced_transhuge_page() will see the
+reference and abort the migration, as it may do today in other cases.
+
+Fixes: b8916634b77bffb2 ("mm: Prevent parallel splits during THP migration")
+Link: http://lkml.kernel.org/r/1497349722-6731-2-git-send-email-will.deacon@arm.com
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Acked-by: Steve Capper <steve.capper@arm.com>
+Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Acked-by: Vlastimil Babka <vbabka@suse.cz>
+Cc: Mel Gorman <mgorman@suse.de>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ mm/huge_memory.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -1363,8 +1363,11 @@ int do_huge_pmd_numa_page(struct mm_stru
+ */
+ if (unlikely(pmd_trans_migrating(*pmdp))) {
+ page = pmd_page(*pmdp);
++ if (!get_page_unless_zero(page))
++ goto out_unlock;
+ spin_unlock(ptl);
+ wait_on_page_locked(page);
++ put_page(page);
+ goto out;
+ }
+
+@@ -1396,8 +1399,11 @@ int do_huge_pmd_numa_page(struct mm_stru
+
+ /* Migration could have started since the pmd_trans_migrating check */
+ if (!page_locked) {
++ if (!get_page_unless_zero(page))
++ goto out_unlock;
+ spin_unlock(ptl);
+ wait_on_page_locked(page);
++ put_page(page);
+ page_nid = -1;
+ goto out;
+ }