--- /dev/null
+From abc40bd2eeb77eb7c2effcaf63154aad929a1d5f Mon Sep 17 00:00:00 2001
+From: Mel Gorman <mgorman@suse.de>
+Date: Thu, 2 Oct 2014 19:47:42 +0100
+Subject: mm: numa: Do not mark PTEs pte_numa when splitting huge pages
+
+From: Mel Gorman <mgorman@suse.de>
+
+commit abc40bd2eeb77eb7c2effcaf63154aad929a1d5f upstream.
+
+This patch reverts 1ba6e0b50b ("mm: numa: split_huge_page: transfer the
+NUMA type from the pmd to the pte"). If a huge page is being split due
+a protection change and the tail will be in a PROT_NONE vma then NUMA
+hinting PTEs are temporarily created in the protected VMA.
+
+ VM_RW|VM_PROTNONE
+|-----------------|
+ ^
+ split here
+
+In the specific case above, it should get fixed up by change_pte_range()
+but there is a window of opportunity for weirdness to happen. Similarly,
+if a huge page is shrunk and split during a protection update but before
+pmd_numa is cleared then a pte_numa can be left behind.
+
+Instead of adding complexity trying to deal with the case, this patch
+will not mark PTEs NUMA when splitting a huge page. NUMA hinting faults
+will not be triggered which is marginal in comparison to the complexity
+in dealing with the corner cases during THP split.
+
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+Acked-by: Rik van Riel <riel@redhat.com>
+Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/huge_memory.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -1740,14 +1740,17 @@ static int __split_huge_page_map(struct
+ for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
+ pte_t *pte, entry;
+ BUG_ON(PageCompound(page+i));
++ /*
++ * Note that pmd_numa is not transferred deliberately
++ * to avoid any possibility that pte_numa leaks to
++ * a PROT_NONE VMA by accident.
++ */
+ entry = mk_pte(page + i, vma->vm_page_prot);
+ entry = maybe_mkwrite(pte_mkdirty(entry), vma);
+ if (!pmd_write(*pmd))
+ entry = pte_wrprotect(entry);
+ if (!pmd_young(*pmd))
+ entry = pte_mkold(entry);
+- if (pmd_numa(*pmd))
+- entry = pte_mknuma(entry);
+ pte = pte_offset_map(&_pmd, haddr);
+ BUG_ON(!pte_none(*pte));
+ set_pte_at(mm, haddr, pte, entry);
--- /dev/null
+From f8303c2582b889351e261ff18c4d8eb197a77db2 Mon Sep 17 00:00:00 2001
+From: Waiman Long <Waiman.Long@hp.com>
+Date: Wed, 6 Aug 2014 16:05:36 -0700
+Subject: mm, thp: move invariant bug check out of loop in __split_huge_page_map
+
+From: Waiman Long <Waiman.Long@hp.com>
+
+commit f8303c2582b889351e261ff18c4d8eb197a77db2 upstream.
+
+In __split_huge_page_map(), the check for page_mapcount(page) is
+invariant within the for loop. Because of the fact that the macro is
+implemented using atomic_read(), the redundant check cannot be optimized
+away by the compiler leading to unnecessary read to the page structure.
+
+This patch moves the invariant bug check out of the loop so that it will
+be done only once. On a 3.16-rc1 based kernel, the execution time of a
+microbenchmark that broke up 1000 transparent huge pages using munmap()
+had an execution time of 38,245us and 38,548us with and without the
+patch respectively. The performance gain is about 1%.
+
+Signed-off-by: Waiman Long <Waiman.Long@hp.com>
+Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: Mel Gorman <mgorman@suse.de>
+Cc: Rik van Riel <riel@redhat.com>
+Cc: Scott J Norton <scott.norton@hp.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/huge_memory.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -1733,6 +1733,8 @@ static int __split_huge_page_map(struct
+ if (pmd) {
+ pgtable = pgtable_trans_huge_withdraw(mm);
+ pmd_populate(mm, &_pmd, pgtable);
++ if (pmd_write(*pmd))
++ BUG_ON(page_mapcount(page) != 1);
+
+ haddr = address;
+ for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
+@@ -1742,8 +1744,6 @@ static int __split_huge_page_map(struct
+ entry = maybe_mkwrite(pte_mkdirty(entry), vma);
+ if (!pmd_write(*pmd))
+ entry = pte_wrprotect(entry);
+- else
+- BUG_ON(page_mapcount(page) != 1);
+ if (!pmd_young(*pmd))
+ entry = pte_mkold(entry);
+ if (pmd_numa(*pmd))
perf-fix-perf-bug-in-fork.patch
init-kconfig-fix-have_futex_cmpxchg-to-not-break-up-the-expert-menu.patch
ring-buffer-fix-infinite-spin-in-reading-buffer.patch
+mm-thp-move-invariant-bug-check-out-of-loop-in-__split_huge_page_map.patch
+mm-numa-do-not-mark-ptes-pte_numa-when-splitting-huge-pages.patch