]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.10-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 8 Jul 2014 01:52:46 +0000 (18:52 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 8 Jul 2014 01:52:46 +0000 (18:52 -0700)
added patches:
mm-fix-crashes-from-mbind-merging-vmas.patch

queue-3.10/mm-fix-crashes-from-mbind-merging-vmas.patch [new file with mode: 0644]
queue-3.10/series

diff --git a/queue-3.10/mm-fix-crashes-from-mbind-merging-vmas.patch b/queue-3.10/mm-fix-crashes-from-mbind-merging-vmas.patch
new file mode 100644 (file)
index 0000000..d544485
--- /dev/null
@@ -0,0 +1,155 @@
+From d05f0cdcbe6388723f1900c549b4850360545201 Mon Sep 17 00:00:00 2001
+From: Hugh Dickins <hughd@google.com>
+Date: Mon, 23 Jun 2014 13:22:07 -0700
+Subject: mm: fix crashes from mbind() merging vmas
+
+From: Hugh Dickins <hughd@google.com>
+
+commit d05f0cdcbe6388723f1900c549b4850360545201 upstream.
+
+In v2.6.34 commit 9d8cebd4bcd7 ("mm: fix mbind vma merge problem")
+introduced vma merging to mbind(), but it should have also changed the
+convention of passing start vma from queue_pages_range() (formerly
+check_range()) to new_vma_page(): vma merging may have already freed
+that structure, resulting in BUG at mm/mempolicy.c:1738 and probably
+worse crashes.
+
+Fixes: 9d8cebd4bcd7 ("mm: fix mbind vma merge problem")
+Reported-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
+Tested-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
+Signed-off-by: Hugh Dickins <hughd@google.com>
+Acked-by: Christoph Lameter <cl@linux.com>
+Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
+Cc: Minchan Kim <minchan.kim@gmail.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ mm/mempolicy.c |   46 ++++++++++++++++++++--------------------------
+ 1 file changed, 20 insertions(+), 26 deletions(-)
+
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -608,19 +608,18 @@ static unsigned long change_prot_numa(st
+  * If pagelist != NULL then isolate pages from the LRU and
+  * put them on the pagelist.
+  */
+-static struct vm_area_struct *
++static int
+ check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
+               const nodemask_t *nodes, unsigned long flags, void *private)
+ {
+-      int err;
+-      struct vm_area_struct *first, *vma, *prev;
+-
++      int err = 0;
++      struct vm_area_struct *vma, *prev;
+-      first = find_vma(mm, start);
+-      if (!first)
+-              return ERR_PTR(-EFAULT);
++      vma = find_vma(mm, start);
++      if (!vma)
++              return -EFAULT;
+       prev = NULL;
+-      for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
++      for (; vma && vma->vm_start < end; vma = vma->vm_next) {
+               unsigned long endvma = vma->vm_end;
+               if (endvma > end)
+@@ -630,9 +629,9 @@ check_range(struct mm_struct *mm, unsign
+               if (!(flags & MPOL_MF_DISCONTIG_OK)) {
+                       if (!vma->vm_next && vma->vm_end < end)
+-                              return ERR_PTR(-EFAULT);
++                              return -EFAULT;
+                       if (prev && prev->vm_end < vma->vm_start)
+-                              return ERR_PTR(-EFAULT);
++                              return -EFAULT;
+               }
+               if (is_vm_hugetlb_page(vma))
+@@ -649,15 +648,13 @@ check_range(struct mm_struct *mm, unsign
+                       err = check_pgd_range(vma, start, endvma, nodes,
+                                               flags, private);
+-                      if (err) {
+-                              first = ERR_PTR(err);
++                      if (err)
+                               break;
+-                      }
+               }
+ next:
+               prev = vma;
+       }
+-      return first;
++      return err;
+ }
+ /*
+@@ -1138,16 +1135,17 @@ out:
+ /*
+  * Allocate a new page for page migration based on vma policy.
+- * Start assuming that page is mapped by vma pointed to by @private.
++ * Start by assuming the page is mapped by the same vma as contains @start.
+  * Search forward from there, if not.  N.B., this assumes that the
+  * list of pages handed to migrate_pages()--which is how we get here--
+  * is in virtual address order.
+  */
+-static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
++static struct page *new_page(struct page *page, unsigned long start, int **x)
+ {
+-      struct vm_area_struct *vma = (struct vm_area_struct *)private;
++      struct vm_area_struct *vma;
+       unsigned long uninitialized_var(address);
++      vma = find_vma(current->mm, start);
+       while (vma) {
+               address = page_address_in_vma(page, vma);
+               if (address != -EFAULT)
+@@ -1173,7 +1171,7 @@ int do_migrate_pages(struct mm_struct *m
+       return -ENOSYS;
+ }
+-static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
++static struct page *new_page(struct page *page, unsigned long start, int **x)
+ {
+       return NULL;
+ }
+@@ -1183,7 +1181,6 @@ static long do_mbind(unsigned long start
+                    unsigned short mode, unsigned short mode_flags,
+                    nodemask_t *nmask, unsigned long flags)
+ {
+-      struct vm_area_struct *vma;
+       struct mm_struct *mm = current->mm;
+       struct mempolicy *new;
+       unsigned long end;
+@@ -1249,11 +1246,9 @@ static long do_mbind(unsigned long start
+       if (err)
+               goto mpol_out;
+-      vma = check_range(mm, start, end, nmask,
++      err = check_range(mm, start, end, nmask,
+                         flags | MPOL_MF_INVERT, &pagelist);
+-
+-      err = PTR_ERR(vma);     /* maybe ... */
+-      if (!IS_ERR(vma))
++      if (!err)
+               err = mbind_range(mm, start, end, new);
+       if (!err) {
+@@ -1261,9 +1256,8 @@ static long do_mbind(unsigned long start
+               if (!list_empty(&pagelist)) {
+                       WARN_ON_ONCE(flags & MPOL_MF_LAZY);
+-                      nr_failed = migrate_pages(&pagelist, new_vma_page,
+-                                      (unsigned long)vma,
+-                                      MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
++                      nr_failed = migrate_pages(&pagelist, new_page,
++                              start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
+                       if (nr_failed)
+                               putback_lru_pages(&pagelist);
+               }
index a0903622b8f3e7f300bab88eaf8159524b65a973..7f2d091e6c5d86221a60290950a06fef672a6e2a 100644 (file)
@@ -52,3 +52,4 @@ arch-unicore32-mm-alignment.c-include-asm-pgtable.h-to-avoid-compiling-error.pat
 drivers-video-fbdev-fb-puv3.c-add-header-files-for-function-unifb_mmap.patch
 sym53c8xx_2-set-did_requeue-return-code-when-aborting-squeue.patch
 hugetlb-fix-copy_hugetlb_page_range-to-handle-migration-hwpoisoned-entry.patch
+mm-fix-crashes-from-mbind-merging-vmas.patch