]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.12-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 7 Jan 2014 18:24:27 +0000 (10:24 -0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 7 Jan 2014 18:24:27 +0000 (10:24 -0800)
added patches:
mm-numa-avoid-unnecessary-disruption-of-numa-hinting-during-migration.patch

queue-3.12/mm-numa-avoid-unnecessary-disruption-of-numa-hinting-during-migration.patch [new file with mode: 0644]
queue-3.12/series

diff --git a/queue-3.12/mm-numa-avoid-unnecessary-disruption-of-numa-hinting-during-migration.patch b/queue-3.12/mm-numa-avoid-unnecessary-disruption-of-numa-hinting-during-migration.patch
new file mode 100644 (file)
index 0000000..a4cdf56
--- /dev/null
@@ -0,0 +1,117 @@
+From mgorman@suse.de  Tue Jan  7 10:23:21 2014
+From: Mel Gorman <mgorman@suse.de>
+Date: Tue,  7 Jan 2014 14:00:45 +0000
+Subject: mm: numa: avoid unnecessary disruption of NUMA hinting during migration
+To: gregkh@linuxfoundation.org
+Cc: athorlton@sgi.com, riel@redhat.com, chegu_vinod@hp.com, Mel Gorman <mgorman@suse.de>, stable@vger.kernel.org
+Message-ID: <1389103248-17617-11-git-send-email-mgorman@suse.de>
+
+From: Mel Gorman <mgorman@suse.de>
+
+commit de466bd628e8d663fdf3f791bc8db318ee85c714 upstream.
+
+do_huge_pmd_numa_page() handles the case where there is parallel THP
+migration.  However, by the time it is checked the NUMA hinting
+information has already been disrupted.  This patch adds an earlier
+check with some helpers.
+
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+Reviewed-by: Rik van Riel <riel@redhat.com>
+Cc: Alex Thorlton <athorlton@sgi.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/migrate.h |   10 +++++++++-
+ mm/huge_memory.c        |   22 ++++++++++++++++------
+ mm/migrate.c            |   12 ++++++++++++
+ 3 files changed, 37 insertions(+), 7 deletions(-)
+
+--- a/include/linux/migrate.h
++++ b/include/linux/migrate.h
+@@ -90,10 +90,18 @@ static inline int migrate_huge_page_move
+ #endif /* CONFIG_MIGRATION */
+ #ifdef CONFIG_NUMA_BALANCING
+-extern int migrate_misplaced_page(struct page *page, int node);
++extern bool pmd_trans_migrating(pmd_t pmd);
++extern void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd);
+ extern int migrate_misplaced_page(struct page *page, int node);
+ extern bool migrate_ratelimited(int node);
+ #else
++static inline bool pmd_trans_migrating(pmd_t pmd)
++{
++      return false;
++}
++static inline void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd)
++{
++}
+ static inline int migrate_misplaced_page(struct page *page, int node)
+ {
+       return -EAGAIN; /* can't migrate now */
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -884,6 +884,10 @@ int copy_huge_pmd(struct mm_struct *dst_
+               ret = 0;
+               goto out_unlock;
+       }
++
++      /* mmap_sem prevents this happening but warn if that changes */
++      WARN_ON(pmd_trans_migrating(pmd));
++
+       if (unlikely(pmd_trans_splitting(pmd))) {
+               /* split huge page running from under us */
+               spin_unlock(&src_mm->page_table_lock);
+@@ -1294,6 +1298,17 @@ int do_huge_pmd_numa_page(struct mm_stru
+       if (unlikely(!pmd_same(pmd, *pmdp)))
+               goto out_unlock;
++      /*
++       * If there are potential migrations, wait for completion and retry
++       * without disrupting NUMA hinting information. Do not relock and
++       * check_same as the page may no longer be mapped.
++       */
++      if (unlikely(pmd_trans_migrating(*pmdp))) {
++              spin_unlock(&mm->page_table_lock);
++              wait_migrate_huge_page(vma->anon_vma, pmdp);
++              goto out;
++      }
++
+       page = pmd_page(pmd);
+       page_nid = page_to_nid(page);
+       count_vm_numa_event(NUMA_HINT_FAULTS);
+@@ -1312,12 +1327,7 @@ int do_huge_pmd_numa_page(struct mm_stru
+                       goto clear_pmdnuma;
+       }
+-      /*
+-       * If there are potential migrations, wait for completion and retry. We
+-       * do not relock and check_same as the page may no longer be mapped.
+-       * Furtermore, even if the page is currently misplaced, there is no
+-       * guarantee it is still misplaced after the migration completes.
+-       */
++      /* Migration could have started since the pmd_trans_migrating check */
+       if (!page_locked) {
+               spin_unlock(&mm->page_table_lock);
+               wait_on_page_locked(page);
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -1597,6 +1597,18 @@ int numamigrate_isolate_page(pg_data_t *
+       return 1;
+ }
++bool pmd_trans_migrating(pmd_t pmd)
++{
++      struct page *page = pmd_page(pmd);
++      return PageLocked(page);
++}
++
++void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd)
++{
++      struct page *page = pmd_page(*pmd);
++      wait_on_page_locked(page);
++}
++
+ /*
+  * Attempt to migrate a misplaced page to the specified destination
+  * node. Caller is expected to have an elevated reference count on
index ac7d17a6ab66e3c6abe096cde79453b2101b478d..da5613e4753cb525d5ce74aeb914ec43e0b46457 100644 (file)
@@ -115,6 +115,7 @@ mm-numa-ensure-anon_vma-is-locked-to-prevent-parallel-thp-splits.patch
 mm-numa-avoid-unnecessary-work-on-the-failure-path.patch
 sched-numa-skip-inaccessible-vmas.patch
 mm-numa-clear-numa-hinting-information-on-mprotect.patch
+mm-numa-avoid-unnecessary-disruption-of-numa-hinting-during-migration.patch
 mm-mempolicy-correct-putback-method-for-isolate-pages-if-failed.patch
 mm-compaction-respect-ignore_skip_hint-in-update_pageblock_skip.patch
 mm-memory-failure.c-recheck-pagehuge-after-hugetlb-page-migrate-successfully.patch