--- /dev/null
+From b985194c8c0a130ed155b71662e39f7eaea4876f Mon Sep 17 00:00:00 2001
+From: Chen Yucong <slaoub@gmail.com>
+Date: Thu, 22 May 2014 11:54:15 -0700
+Subject: hwpoison, hugetlb: lock_page/unlock_page does not match for handling a free hugepage
+
+From: Chen Yucong <slaoub@gmail.com>
+
+commit b985194c8c0a130ed155b71662e39f7eaea4876f upstream.
+
+For handling a free hugepage in memory failure, the race will happen if
+another thread hwpoisoned this hugepage concurrently. So we need to
+check PageHWPoison instead of !PageHWPoison.
+
+If hwpoison_filter(p) returns true or a race happens, then we need to
+unlock_page(hpage).
+
+Signed-off-by: Chen Yucong <slaoub@gmail.com>
+Reviewed-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
+Tested-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
+Reviewed-by: Andi Kleen <ak@linux.intel.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/memory-failure.c | 15 ++++++++-------
+ 1 file changed, 8 insertions(+), 7 deletions(-)
+
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -1083,15 +1083,16 @@ int memory_failure(unsigned long pfn, in
+ return 0;
+ } else if (PageHuge(hpage)) {
+ /*
+- * Check "just unpoisoned", "filter hit", and
+- * "race with other subpage."
++ * Check "filter hit" and "race with other subpage."
+ */
+ lock_page(hpage);
+- if (!PageHWPoison(hpage)
+- || (hwpoison_filter(p) && TestClearPageHWPoison(p))
+- || (p != hpage && TestSetPageHWPoison(hpage))) {
+- atomic_long_sub(nr_pages, &num_poisoned_pages);
+- return 0;
++ if (PageHWPoison(hpage)) {
++ if ((hwpoison_filter(p) && TestClearPageHWPoison(p))
++ || (p != hpage && TestSetPageHWPoison(hpage))) {
++ atomic_long_sub(nr_pages, &num_poisoned_pages);
++ unlock_page(hpage);
++ return 0;
++ }
+ }
+ set_page_hwpoison_huge_page(hpage);
+ res = dequeue_hwpoisoned_huge_page(hpage);
--- /dev/null
+From dd18dbc2d42af75fffa60c77e0f02220bc329829 Mon Sep 17 00:00:00 2001
+From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Date: Fri, 9 May 2014 15:37:00 -0700
+Subject: mm, thp: close race between mremap() and split_huge_page()
+
+From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+
+commit dd18dbc2d42af75fffa60c77e0f02220bc329829 upstream.
+
+It's critical for split_huge_page() (and migration) to catch and freeze
+all PMDs on rmap walk. It gets tricky if there's concurrent fork() or
+mremap() since usually we copy/move page table entries on dup_mm() or
+move_page_tables() without rmap lock taken. To get it work we rely on
+rmap walk order to not miss any entry. We expect to see destination VMA
+after source one to work correctly.
+
+But after switching rmap implementation to interval tree it's not always
+possible to preserve expected walk order.
+
+It works fine for dup_mm() since new VMA has the same vma_start_pgoff()
+/ vma_last_pgoff() and explicitly insert dst VMA after src one with
+vma_interval_tree_insert_after().
+
+But on move_vma() destination VMA can be merged into adjacent one and as
+result shifted left in interval tree. Fortunately, we can detect the
+situation and prevent race with rmap walk by moving page table entries
+under rmap lock. See commit 38a76013ad80.
+
+Problem is that we miss the lock when we move transhuge PMD. Most
+likely this bug caused the crash[1].
+
+[1] http://thread.gmane.org/gmane.linux.kernel.mm/96473
+
+Fixes: 108d6642ad81 ("mm anon rmap: remove anon_vma_moveto_tail")
+
+Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Reviewed-by: Andrea Arcangeli <aarcange@redhat.com>
+Cc: Rik van Riel <riel@redhat.com>
+Acked-by: Michel Lespinasse <walken@google.com>
+Cc: Dave Jones <davej@redhat.com>
+Cc: David Miller <davem@davemloft.net>
+Acked-by: Johannes Weiner <hannes@cmpxchg.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/mremap.c | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+--- a/mm/mremap.c
++++ b/mm/mremap.c
+@@ -175,10 +175,17 @@ unsigned long move_page_tables(struct vm
+ break;
+ if (pmd_trans_huge(*old_pmd)) {
+ int err = 0;
+- if (extent == HPAGE_PMD_SIZE)
++ if (extent == HPAGE_PMD_SIZE) {
++ VM_BUG_ON(vma->vm_file || !vma->anon_vma);
++ /* See comment in move_ptes() */
++ if (need_rmap_locks)
++ anon_vma_lock_write(vma->anon_vma);
+ err = move_huge_pmd(vma, new_vma, old_addr,
+ new_addr, old_end,
+ old_pmd, new_pmd);
++ if (need_rmap_locks)
++ anon_vma_unlock_write(vma->anon_vma);
++ }
+ if (err > 0) {
+ need_flush = true;
+ continue;
ipmi-fix-a-race-restarting-the-timer.patch
ipmi-reset-the-kcs-timeout-when-starting-error-recovery.patch
mac80211-fix-suspend-vs.-authentication-race.patch
+mm-thp-close-race-between-mremap-and-split_huge_page.patch
+x86-mm-hugetlb-add-missing-tlb-page-invalidation-for-hugetlb_cow.patch
+hwpoison-hugetlb-lock_page-unlock_page-does-not-match-for-handling-a-free-hugepage.patch
--- /dev/null
+From 9844f5462392b53824e8b86726e7c33b5ecbb676 Mon Sep 17 00:00:00 2001
+From: Anthony Iliopoulos <anthony.iliopoulos@huawei.com>
+Date: Wed, 14 May 2014 11:29:48 +0200
+Subject: x86, mm, hugetlb: Add missing TLB page invalidation for hugetlb_cow()
+
+From: Anthony Iliopoulos <anthony.iliopoulos@huawei.com>
+
+commit 9844f5462392b53824e8b86726e7c33b5ecbb676 upstream.
+
+The invalidation is required in order to maintain proper semantics
+under CoW conditions. In scenarios where a process clones several
+threads, a thread operating on a core whose DTLB entry for a
+particular hugepage has not been invalidated, will be reading from
+the hugepage that belongs to the forked child process, even after
+hugetlb_cow().
+
+The thread will not see the updated page as long as the stale DTLB
+entry remains cached, the thread attempts to write into the page,
+the child process exits, or the thread gets migrated to a different
+processor.
+
+Signed-off-by: Anthony Iliopoulos <anthony.iliopoulos@huawei.com>
+Link: http://lkml.kernel.org/r/20140514092948.GA17391@server-36.huawei.corp
+Suggested-by: Shay Goikhman <shay.goikhman@huawei.com>
+Acked-by: Dave Hansen <dave.hansen@intel.com>
+Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/hugetlb.h | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/x86/include/asm/hugetlb.h
++++ b/arch/x86/include/asm/hugetlb.h
+@@ -52,6 +52,7 @@ static inline pte_t huge_ptep_get_and_cl
+ static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
+ {
++ ptep_clear_flush(vma, addr, ptep);
+ }
+
+ static inline int huge_pte_none(pte_t pte)