]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.12-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 7 Jan 2014 17:53:48 +0000 (09:53 -0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 7 Jan 2014 17:53:48 +0000 (09:53 -0800)
added patches:
mm-numa-avoid-unnecessary-work-on-the-failure-path.patch
mm-numa-do-not-clear-pmd-during-pte-update-scan.patch
mm-numa-do-not-clear-pte-for-pte_numa-update.patch
mm-numa-ensure-anon_vma-is-locked-to-prevent-parallel-thp-splits.patch

queue-3.12/mm-numa-avoid-unnecessary-work-on-the-failure-path.patch [new file with mode: 0644]
queue-3.12/mm-numa-do-not-clear-pmd-during-pte-update-scan.patch [new file with mode: 0644]
queue-3.12/mm-numa-do-not-clear-pte-for-pte_numa-update.patch [new file with mode: 0644]
queue-3.12/mm-numa-ensure-anon_vma-is-locked-to-prevent-parallel-thp-splits.patch [new file with mode: 0644]
queue-3.12/series

diff --git a/queue-3.12/mm-numa-avoid-unnecessary-work-on-the-failure-path.patch b/queue-3.12/mm-numa-avoid-unnecessary-work-on-the-failure-path.patch
new file mode 100644 (file)
index 0000000..fdc625b
--- /dev/null
@@ -0,0 +1,45 @@
+From mgorman@suse.de  Tue Jan  7 09:51:39 2014
+From: Mel Gorman <mgorman@suse.de>
+Date: Tue,  7 Jan 2014 14:00:42 +0000
+Subject: mm: numa: avoid unnecessary work on the failure path
+To: gregkh@linuxfoundation.org
+Cc: athorlton@sgi.com, riel@redhat.com, chegu_vinod@hp.com, Mel Gorman <mgorman@suse.de>, stable@vger.kernel.org
+Message-ID: <1389103248-17617-8-git-send-email-mgorman@suse.de>
+
+From: Mel Gorman <mgorman@suse.de>
+
+commit eb4489f69f224356193364dc2762aa009738ca7f upstream.
+
+If a PMD changes during a THP migration then migration aborts but the
+failure path is doing more work than is necessary.
+
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+Reviewed-by: Rik van Riel <riel@redhat.com>
+Cc: Alex Thorlton <athorlton@sgi.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/migrate.c |    4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -1726,7 +1726,8 @@ fail_putback:
+               putback_lru_page(page);
+               mod_zone_page_state(page_zone(page),
+                        NR_ISOLATED_ANON + page_lru, -HPAGE_PMD_NR);
+-              goto out_fail;
++
++              goto out_unlock;
+       }
+       /*
+@@ -1800,6 +1801,7 @@ out_dropref:
+       }
+       spin_unlock(&mm->page_table_lock);
++out_unlock:
+       unlock_page(page);
+       put_page(page);
+       return 0;
diff --git a/queue-3.12/mm-numa-do-not-clear-pmd-during-pte-update-scan.patch b/queue-3.12/mm-numa-do-not-clear-pmd-during-pte-update-scan.patch
new file mode 100644 (file)
index 0000000..caad699
--- /dev/null
@@ -0,0 +1,54 @@
+From mgorman@suse.de  Tue Jan  7 09:49:21 2014
+From: Mel Gorman <mgorman@suse.de>
+Date: Tue,  7 Jan 2014 14:00:39 +0000
+Subject: mm: numa: do not clear PMD during PTE update scan
+To: gregkh@linuxfoundation.org
+Cc: athorlton@sgi.com, riel@redhat.com, chegu_vinod@hp.com, Mel Gorman <mgorman@suse.de>, stable@vger.kernel.org
+Message-ID: <1389103248-17617-5-git-send-email-mgorman@suse.de>
+
+From: Mel Gorman <mgorman@suse.de>
+
+commit 5a6dac3ec5f583cc8ee7bc53b5500a207c4ca433 upstream.
+
+If the PMD is flushed then a parallel fault in handle_mm_fault() will
+enter the pmd_none and do_huge_pmd_anonymous_page() path where it'll
+attempt to insert a huge zero page.  This is wasteful so the patch
+avoids clearing the PMD when setting pmd_numa.
+
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+Reviewed-by: Rik van Riel <riel@redhat.com>
+Cc: Alex Thorlton <athorlton@sgi.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/huge_memory.c |    6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -1474,20 +1474,22 @@ int change_huge_pmd(struct vm_area_struc
+       if (__pmd_trans_huge_lock(pmd, vma) == 1) {
+               pmd_t entry;
+-              entry = pmdp_get_and_clear(mm, addr, pmd);
+               if (!prot_numa) {
++                      entry = pmdp_get_and_clear(mm, addr, pmd);
+                       entry = pmd_modify(entry, newprot);
+                       BUG_ON(pmd_write(entry));
++                      set_pmd_at(mm, addr, pmd, entry);
+               } else {
+                       struct page *page = pmd_page(*pmd);
++                      entry = *pmd;
+                       /* only check non-shared pages */
+                       if (page_mapcount(page) == 1 &&
+                           !pmd_numa(*pmd)) {
+                               entry = pmd_mknuma(entry);
++                              set_pmd_at(mm, addr, pmd, entry);
+                       }
+               }
+-              set_pmd_at(mm, addr, pmd, entry);
+               spin_unlock(&vma->vm_mm->page_table_lock);
+               ret = 1;
+       }
diff --git a/queue-3.12/mm-numa-do-not-clear-pte-for-pte_numa-update.patch b/queue-3.12/mm-numa-do-not-clear-pte-for-pte_numa-update.patch
new file mode 100644 (file)
index 0000000..59436a8
--- /dev/null
@@ -0,0 +1,72 @@
+From mgorman@suse.de  Tue Jan  7 09:50:13 2014
+From: Mel Gorman <mgorman@suse.de>
+Date: Tue,  7 Jan 2014 14:00:40 +0000
+Subject: mm: numa: do not clear PTE for pte_numa update
+To: gregkh@linuxfoundation.org
+Cc: athorlton@sgi.com, riel@redhat.com, chegu_vinod@hp.com, Mel Gorman <mgorman@suse.de>, stable@vger.kernel.org
+Message-ID: <1389103248-17617-6-git-send-email-mgorman@suse.de>
+
+From: Mel Gorman <mgorman@suse.de>
+
+commit 0c5f83c23ca703d32f930393825487257a5cde6d upstream.
+
+The TLB must be flushed if the PTE is updated but change_pte_range is
+clearing the PTE while marking PTEs pte_numa without necessarily
+flushing the TLB if it reinserts the same entry.  Without the flush,
+it's conceivable that two processors have different TLBs for the same
+virtual address and at the very least it would generate spurious faults.
+
+This patch only unmaps the pages in change_pte_range for a full
+protection change.
+
+[riel@redhat.com: write pte_numa pte back to the page tables]
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+Signed-off-by: Rik van Riel <riel@redhat.com>
+Reviewed-by: Rik van Riel <riel@redhat.com>
+Cc: Alex Thorlton <athorlton@sgi.com>
+Cc: Chegu Vinod <chegu_vinod@hp.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/mprotect.c |    9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+--- a/mm/mprotect.c
++++ b/mm/mprotect.c
+@@ -54,13 +54,14 @@ static unsigned long change_pte_range(st
+                       pte_t ptent;
+                       bool updated = false;
+-                      ptent = ptep_modify_prot_start(mm, addr, pte);
+                       if (!prot_numa) {
++                              ptent = ptep_modify_prot_start(mm, addr, pte);
+                               ptent = pte_modify(ptent, newprot);
+                               updated = true;
+                       } else {
+                               struct page *page;
++                              ptent = *pte;
+                               page = vm_normal_page(vma, addr, oldpte);
+                               if (page) {
+                                       int this_nid = page_to_nid(page);
+@@ -73,6 +74,7 @@ static unsigned long change_pte_range(st
+                                       if (!pte_numa(oldpte) &&
+                                           page_mapcount(page) == 1) {
+                                               ptent = pte_mknuma(ptent);
++                                              set_pte_at(mm, addr, pte, ptent);
+                                               updated = true;
+                                       }
+                               }
+@@ -89,7 +91,10 @@ static unsigned long change_pte_range(st
+                       if (updated)
+                               pages++;
+-                      ptep_modify_prot_commit(mm, addr, pte, ptent);
++
++                      /* Only !prot_numa always clears the pte */
++                      if (!prot_numa)
++                              ptep_modify_prot_commit(mm, addr, pte, ptent);
+               } else if (IS_ENABLED(CONFIG_MIGRATION) && !pte_file(oldpte)) {
+                       swp_entry_t entry = pte_to_swp_entry(oldpte);
diff --git a/queue-3.12/mm-numa-ensure-anon_vma-is-locked-to-prevent-parallel-thp-splits.patch b/queue-3.12/mm-numa-ensure-anon_vma-is-locked-to-prevent-parallel-thp-splits.patch
new file mode 100644 (file)
index 0000000..fe43b07
--- /dev/null
@@ -0,0 +1,43 @@
+From mgorman@suse.de  Tue Jan  7 09:51:09 2014
+From: Mel Gorman <mgorman@suse.de>
+Date: Tue,  7 Jan 2014 14:00:41 +0000
+Subject: mm: numa: ensure anon_vma is locked to prevent parallel THP splits
+To: gregkh@linuxfoundation.org
+Cc: athorlton@sgi.com, riel@redhat.com, chegu_vinod@hp.com, Mel Gorman <mgorman@suse.de>, stable@vger.kernel.org
+Message-ID: <1389103248-17617-7-git-send-email-mgorman@suse.de>
+
+From: Mel Gorman <mgorman@suse.de>
+
+commit c3a489cac38d43ea6dc4ac240473b44b46deecf7 upstream.
+
+The anon_vma lock prevents parallel THP splits and any associated
+complexity that arises when handling splits during THP migration.  This
+patch checks if the lock was successfully acquired and bails from THP
+migration if it failed for any reason.
+
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+Reviewed-by: Rik van Riel <riel@redhat.com>
+Cc: Alex Thorlton <athorlton@sgi.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/huge_memory.c |    7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -1342,6 +1342,13 @@ int do_huge_pmd_numa_page(struct mm_stru
+               goto out_unlock;
+       }
++      /* Bail if we fail to protect against THP splits for any reason */
++      if (unlikely(!anon_vma)) {
++              put_page(page);
++              page_nid = -1;
++              goto clear_pmdnuma;
++      }
++
+       /*
+        * Migrate the THP to the requested node, returns with page unlocked
+        * and pmd_numa cleared.
index 9f8e48d8ea42fa70329b14d962603d3aa1f9b6fb..0b84d932547fa0bebf7cb5d656b37fb00a078125 100644 (file)
@@ -110,6 +110,10 @@ revert-of-address-handle-address-cells-2-specially.patch
 mm-numa-serialise-parallel-get_user_page-against-thp-migration.patch
 mm-numa-call-mmu-notifiers-on-thp-migration.patch
 mm-clear-pmd_numa-before-invalidating.patch
+mm-numa-do-not-clear-pmd-during-pte-update-scan.patch
+mm-numa-do-not-clear-pte-for-pte_numa-update.patch
+mm-numa-ensure-anon_vma-is-locked-to-prevent-parallel-thp-splits.patch
+mm-numa-avoid-unnecessary-work-on-the-failure-path.patch
 mm-mempolicy-correct-putback-method-for-isolate-pages-if-failed.patch
 mm-compaction-respect-ignore_skip_hint-in-update_pageblock_skip.patch
 mm-memory-failure.c-recheck-pagehuge-after-hugetlb-page-migrate-successfully.patch