]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.4-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 8 Jul 2014 01:48:22 +0000 (18:48 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 8 Jul 2014 01:48:22 +0000 (18:48 -0700)
added patches:
hugetlb-fix-copy_hugetlb_page_range-to-handle-migration-hwpoisoned-entry.patch

queue-3.4/hugetlb-fix-copy_hugetlb_page_range-to-handle-migration-hwpoisoned-entry.patch [new file with mode: 0644]
queue-3.4/series

diff --git a/queue-3.4/hugetlb-fix-copy_hugetlb_page_range-to-handle-migration-hwpoisoned-entry.patch b/queue-3.4/hugetlb-fix-copy_hugetlb_page_range-to-handle-migration-hwpoisoned-entry.patch
new file mode 100644 (file)
index 0000000..6982974
--- /dev/null
@@ -0,0 +1,125 @@
+From 4a705fef986231a3e7a6b1a6d3c37025f021f49f Mon Sep 17 00:00:00 2001
+From: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
+Date: Mon, 23 Jun 2014 13:22:03 -0700
+Subject: hugetlb: fix copy_hugetlb_page_range() to handle migration/hwpoisoned entry
+
+From: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
+
+commit 4a705fef986231a3e7a6b1a6d3c37025f021f49f upstream.
+
+There's a race between fork() and hugepage migration, as a result we try
+to "dereference" a swap entry as a normal pte, causing kernel panic.
+The cause of the problem is that copy_hugetlb_page_range() can't handle
+"swap entry" family (migration entry and hwpoisoned entry) so let's fix
+it.
+
+[akpm@linux-foundation.org: coding-style fixes]
+Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
+Acked-by: Hugh Dickins <hughd@google.com>
+Cc: Christoph Lameter <cl@linux.com>
+Cc: <stable@vger.kernel.org>   [2.6.37+]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ mm/hugetlb.c |   71 +++++++++++++++++++++++++++++++++++------------------------
+ 1 file changed, 43 insertions(+), 28 deletions(-)
+
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -2276,6 +2276,31 @@ static void set_huge_ptep_writable(struc
+               update_mmu_cache(vma, address, ptep);
+ }
++static int is_hugetlb_entry_migration(pte_t pte)
++{
++      swp_entry_t swp;
++
++      if (huge_pte_none(pte) || pte_present(pte))
++              return 0;
++      swp = pte_to_swp_entry(pte);
++      if (non_swap_entry(swp) && is_migration_entry(swp))
++              return 1;
++      else
++              return 0;
++}
++
++static int is_hugetlb_entry_hwpoisoned(pte_t pte)
++{
++      swp_entry_t swp;
++
++      if (huge_pte_none(pte) || pte_present(pte))
++              return 0;
++      swp = pte_to_swp_entry(pte);
++      if (non_swap_entry(swp) && is_hwpoison_entry(swp))
++              return 1;
++      else
++              return 0;
++}
+ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
+                           struct vm_area_struct *vma)
+@@ -2303,10 +2328,26 @@ int copy_hugetlb_page_range(struct mm_st
+               spin_lock(&dst->page_table_lock);
+               spin_lock_nested(&src->page_table_lock, SINGLE_DEPTH_NESTING);
+-              if (!huge_pte_none(huge_ptep_get(src_pte))) {
++              entry = huge_ptep_get(src_pte);
++              if (huge_pte_none(entry)) { /* skip none entry */
++                      ;
++              } else if (unlikely(is_hugetlb_entry_migration(entry) ||
++                                  is_hugetlb_entry_hwpoisoned(entry))) {
++                      swp_entry_t swp_entry = pte_to_swp_entry(entry);
++
++                      if (is_write_migration_entry(swp_entry) && cow) {
++                              /*
++                               * COW mappings require pages in both
++                               * parent and child to be set to read.
++                               */
++                              make_migration_entry_read(&swp_entry);
++                              entry = swp_entry_to_pte(swp_entry);
++                              set_huge_pte_at(src, addr, src_pte, entry);
++                      }
++                      set_huge_pte_at(dst, addr, dst_pte, entry);
++              } else {
+                       if (cow)
+                               huge_ptep_set_wrprotect(src, addr, src_pte);
+-                      entry = huge_ptep_get(src_pte);
+                       ptepage = pte_page(entry);
+                       get_page(ptepage);
+                       page_dup_rmap(ptepage);
+@@ -2321,32 +2362,6 @@ nomem:
+       return -ENOMEM;
+ }
+-static int is_hugetlb_entry_migration(pte_t pte)
+-{
+-      swp_entry_t swp;
+-
+-      if (huge_pte_none(pte) || pte_present(pte))
+-              return 0;
+-      swp = pte_to_swp_entry(pte);
+-      if (non_swap_entry(swp) && is_migration_entry(swp))
+-              return 1;
+-      else
+-              return 0;
+-}
+-
+-static int is_hugetlb_entry_hwpoisoned(pte_t pte)
+-{
+-      swp_entry_t swp;
+-
+-      if (huge_pte_none(pte) || pte_present(pte))
+-              return 0;
+-      swp = pte_to_swp_entry(pte);
+-      if (non_swap_entry(swp) && is_hwpoison_entry(swp))
+-              return 1;
+-      else
+-              return 0;
+-}
+-
+ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
+                           unsigned long end, struct page *ref_page)
+ {
index 662ff69f033a4e43dec4b16a6a3b69c48869d006..20bd5e8640f7f110d891d5468e4e232abbf9702f 100644 (file)
@@ -42,3 +42,4 @@ powerpc-pseries-lparcfg-fix-possible-overflow-are-more-than-1026.patch
 powerpc-pseries-duplicate-dtl-entries-sometimes-sent-to-userspace.patch
 acpi-video-ignore-bios-backlight-value-for-hp-dm4.patch
 powerpc-sysfs-disable-writing-to-purr-in-guest-mode.patch
+hugetlb-fix-copy_hugetlb_page_range-to-handle-migration-hwpoisoned-entry.patch