--- /dev/null
+From 4a705fef986231a3e7a6b1a6d3c37025f021f49f Mon Sep 17 00:00:00 2001
+From: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
+Date: Mon, 23 Jun 2014 13:22:03 -0700
+Subject: hugetlb: fix copy_hugetlb_page_range() to handle migration/hwpoisoned entry
+
+From: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
+
+commit 4a705fef986231a3e7a6b1a6d3c37025f021f49f upstream.
+
+There's a race between fork() and hugepage migration, as a result we try
+to "dereference" a swap entry as a normal pte, causing kernel panic.
+The cause of the problem is that copy_hugetlb_page_range() can't handle
+"swap entry" family (migration entry and hwpoisoned entry) so let's fix
+it.
+
+[akpm@linux-foundation.org: coding-style fixes]
+Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
+Acked-by: Hugh Dickins <hughd@google.com>
+Cc: Christoph Lameter <cl@linux.com>
+Cc: <stable@vger.kernel.org> [2.6.37+]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ mm/hugetlb.c | 71 +++++++++++++++++++++++++++++++++++------------------------
+ 1 file changed, 43 insertions(+), 28 deletions(-)
+
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -2276,6 +2276,31 @@ static void set_huge_ptep_writable(struc
+ update_mmu_cache(vma, address, ptep);
+ }
+
++static int is_hugetlb_entry_migration(pte_t pte)
++{
++ swp_entry_t swp;
++
++ if (huge_pte_none(pte) || pte_present(pte))
++ return 0;
++ swp = pte_to_swp_entry(pte);
++ if (non_swap_entry(swp) && is_migration_entry(swp))
++ return 1;
++ else
++ return 0;
++}
++
++static int is_hugetlb_entry_hwpoisoned(pte_t pte)
++{
++ swp_entry_t swp;
++
++ if (huge_pte_none(pte) || pte_present(pte))
++ return 0;
++ swp = pte_to_swp_entry(pte);
++ if (non_swap_entry(swp) && is_hwpoison_entry(swp))
++ return 1;
++ else
++ return 0;
++}
+
+ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
+ struct vm_area_struct *vma)
+@@ -2303,10 +2328,26 @@ int copy_hugetlb_page_range(struct mm_st
+
+ spin_lock(&dst->page_table_lock);
+ spin_lock_nested(&src->page_table_lock, SINGLE_DEPTH_NESTING);
+- if (!huge_pte_none(huge_ptep_get(src_pte))) {
++ entry = huge_ptep_get(src_pte);
++ if (huge_pte_none(entry)) { /* skip none entry */
++ ;
++ } else if (unlikely(is_hugetlb_entry_migration(entry) ||
++ is_hugetlb_entry_hwpoisoned(entry))) {
++ swp_entry_t swp_entry = pte_to_swp_entry(entry);
++
++ if (is_write_migration_entry(swp_entry) && cow) {
++ /*
++ * COW mappings require pages in both
++ * parent and child to be set to read.
++ */
++ make_migration_entry_read(&swp_entry);
++ entry = swp_entry_to_pte(swp_entry);
++ set_huge_pte_at(src, addr, src_pte, entry);
++ }
++ set_huge_pte_at(dst, addr, dst_pte, entry);
++ } else {
+ if (cow)
+ huge_ptep_set_wrprotect(src, addr, src_pte);
+- entry = huge_ptep_get(src_pte);
+ ptepage = pte_page(entry);
+ get_page(ptepage);
+ page_dup_rmap(ptepage);
+@@ -2321,32 +2362,6 @@ nomem:
+ return -ENOMEM;
+ }
+
+-static int is_hugetlb_entry_migration(pte_t pte)
+-{
+- swp_entry_t swp;
+-
+- if (huge_pte_none(pte) || pte_present(pte))
+- return 0;
+- swp = pte_to_swp_entry(pte);
+- if (non_swap_entry(swp) && is_migration_entry(swp))
+- return 1;
+- else
+- return 0;
+-}
+-
+-static int is_hugetlb_entry_hwpoisoned(pte_t pte)
+-{
+- swp_entry_t swp;
+-
+- if (huge_pte_none(pte) || pte_present(pte))
+- return 0;
+- swp = pte_to_swp_entry(pte);
+- if (non_swap_entry(swp) && is_hwpoison_entry(swp))
+- return 1;
+- else
+- return 0;
+-}
+-
+ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end, struct page *ref_page)
+ {