From: Greg Kroah-Hartman Date: Mon, 6 Oct 2014 15:40:09 +0000 (-0700) Subject: 3.10-stable patches X-Git-Tag: v3.10.57~21 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=171d56b901d5c9db31604ff66adeea422d3ae7d8;p=thirdparty%2Fkernel%2Fstable-queue.git 3.10-stable patches added patches: mm-numa-do-not-mark-ptes-pte_numa-when-splitting-huge-pages.patch mm-thp-move-invariant-bug-check-out-of-loop-in-__split_huge_page_map.patch --- diff --git a/queue-3.10/mm-numa-do-not-mark-ptes-pte_numa-when-splitting-huge-pages.patch b/queue-3.10/mm-numa-do-not-mark-ptes-pte_numa-when-splitting-huge-pages.patch new file mode 100644 index 00000000000..43e12e26cf0 --- /dev/null +++ b/queue-3.10/mm-numa-do-not-mark-ptes-pte_numa-when-splitting-huge-pages.patch @@ -0,0 +1,61 @@ +From abc40bd2eeb77eb7c2effcaf63154aad929a1d5f Mon Sep 17 00:00:00 2001 +From: Mel Gorman +Date: Thu, 2 Oct 2014 19:47:42 +0100 +Subject: mm: numa: Do not mark PTEs pte_numa when splitting huge pages + +From: Mel Gorman + +commit abc40bd2eeb77eb7c2effcaf63154aad929a1d5f upstream. + +This patch reverts 1ba6e0b50b ("mm: numa: split_huge_page: transfer the +NUMA type from the pmd to the pte"). If a huge page is being split due +a protection change and the tail will be in a PROT_NONE vma then NUMA +hinting PTEs are temporarily created in the protected VMA. + + VM_RW|VM_PROTNONE +|-----------------| + ^ + split here + +In the specific case above, it should get fixed up by change_pte_range() +but there is a window of opportunity for weirdness to happen. Similarly, +if a huge page is shrunk and split during a protection update but before +pmd_numa is cleared then a pte_numa can be left behind. + +Instead of adding complexity trying to deal with the case, this patch +will not mark PTEs NUMA when splitting a huge page. NUMA hinting faults +will not be triggered which is marginal in comparison to the complexity +in dealing with the corner cases during THP split. + +Signed-off-by: Mel Gorman +Acked-by: Rik van Riel +Acked-by: Kirill A. Shutemov +Signed-off-by: Linus Torvalds +Signed-off-by: Greg Kroah-Hartman + +--- + mm/huge_memory.c | 7 +++++-- + 1 file changed, 5 insertions(+), 2 deletions(-) + +--- a/mm/huge_memory.c ++++ b/mm/huge_memory.c +@@ -1740,14 +1740,17 @@ static int __split_huge_page_map(struct + for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { + pte_t *pte, entry; + BUG_ON(PageCompound(page+i)); ++ /* ++ * Note that pmd_numa is not transferred deliberately ++ * to avoid any possibility that pte_numa leaks to ++ * a PROT_NONE VMA by accident. ++ */ + entry = mk_pte(page + i, vma->vm_page_prot); + entry = maybe_mkwrite(pte_mkdirty(entry), vma); + if (!pmd_write(*pmd)) + entry = pte_wrprotect(entry); + if (!pmd_young(*pmd)) + entry = pte_mkold(entry); +- if (pmd_numa(*pmd)) +- entry = pte_mknuma(entry); + pte = pte_offset_map(&_pmd, haddr); + BUG_ON(!pte_none(*pte)); + set_pte_at(mm, haddr, pte, entry); diff --git a/queue-3.10/mm-thp-move-invariant-bug-check-out-of-loop-in-__split_huge_page_map.patch b/queue-3.10/mm-thp-move-invariant-bug-check-out-of-loop-in-__split_huge_page_map.patch new file mode 100644 index 00000000000..78b4a23f231 --- /dev/null +++ b/queue-3.10/mm-thp-move-invariant-bug-check-out-of-loop-in-__split_huge_page_map.patch @@ -0,0 +1,54 @@ +From f8303c2582b889351e261ff18c4d8eb197a77db2 Mon Sep 17 00:00:00 2001 +From: Waiman Long +Date: Wed, 6 Aug 2014 16:05:36 -0700 +Subject: mm, thp: move invariant bug check out of loop in __split_huge_page_map + +From: Waiman Long + +commit f8303c2582b889351e261ff18c4d8eb197a77db2 upstream. + +In __split_huge_page_map(), the check for page_mapcount(page) is +invariant within the for loop. Because of the fact that the macro is +implemented using atomic_read(), the redundant check cannot be optimized +away by the compiler leading to unnecessary read to the page structure. + +This patch moves the invariant bug check out of the loop so that it will +be done only once. On a 3.16-rc1 based kernel, the execution time of a +microbenchmark that broke up 1000 transparent huge pages using munmap() +had an execution time of 38,245us and 38,548us with and without the +patch respectively. The performance gain is about 1%. + +Signed-off-by: Waiman Long +Acked-by: Kirill A. Shutemov +Cc: Andrea Arcangeli +Cc: Mel Gorman +Cc: Rik van Riel +Cc: Scott J Norton +Signed-off-by: Andrew Morton +Signed-off-by: Linus Torvalds +Signed-off-by: Greg Kroah-Hartman + +--- + mm/huge_memory.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/mm/huge_memory.c ++++ b/mm/huge_memory.c +@@ -1733,6 +1733,8 @@ static int __split_huge_page_map(struct + if (pmd) { + pgtable = pgtable_trans_huge_withdraw(mm); + pmd_populate(mm, &_pmd, pgtable); ++ if (pmd_write(*pmd)) ++ BUG_ON(page_mapcount(page) != 1); + + haddr = address; + for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { +@@ -1742,8 +1744,6 @@ static int __split_huge_page_map(struct + entry = maybe_mkwrite(pte_mkdirty(entry), vma); + if (!pmd_write(*pmd)) + entry = pte_wrprotect(entry); +- else +- BUG_ON(page_mapcount(page) != 1); + if (!pmd_young(*pmd)) + entry = pte_mkold(entry); + if (pmd_numa(*pmd)) diff --git a/queue-3.10/series b/queue-3.10/series index f38e5f9b806..b7ddf1c0ddd 100644 --- a/queue-3.10/series +++ b/queue-3.10/series @@ -2,3 +2,5 @@ udf-avoid-infinite-loop-when-processing-indirect-icbs.patch perf-fix-perf-bug-in-fork.patch init-kconfig-fix-have_futex_cmpxchg-to-not-break-up-the-expert-menu.patch ring-buffer-fix-infinite-spin-in-reading-buffer.patch +mm-thp-move-invariant-bug-check-out-of-loop-in-__split_huge_page_map.patch +mm-numa-do-not-mark-ptes-pte_numa-when-splitting-huge-pages.patch