]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.14-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 8 Jul 2014 01:49:57 +0000 (18:49 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 8 Jul 2014 01:49:57 +0000 (18:49 -0700)
added patches:
mm-fix-crashes-from-mbind-merging-vmas.patch

queue-3.14/mm-fix-crashes-from-mbind-merging-vmas.patch [new file with mode: 0644]
queue-3.14/series

diff --git a/queue-3.14/mm-fix-crashes-from-mbind-merging-vmas.patch b/queue-3.14/mm-fix-crashes-from-mbind-merging-vmas.patch
new file mode 100644 (file)
index 0000000..4fc1ca0
--- /dev/null
@@ -0,0 +1,155 @@
+From d05f0cdcbe6388723f1900c549b4850360545201 Mon Sep 17 00:00:00 2001
+From: Hugh Dickins <hughd@google.com>
+Date: Mon, 23 Jun 2014 13:22:07 -0700
+Subject: mm: fix crashes from mbind() merging vmas
+
+From: Hugh Dickins <hughd@google.com>
+
+commit d05f0cdcbe6388723f1900c549b4850360545201 upstream.
+
+In v2.6.34 commit 9d8cebd4bcd7 ("mm: fix mbind vma merge problem")
+introduced vma merging to mbind(), but it should have also changed the
+convention of passing start vma from queue_pages_range() (formerly
+check_range()) to new_vma_page(): vma merging may have already freed
+that structure, resulting in BUG at mm/mempolicy.c:1738 and probably
+worse crashes.
+
+Fixes: 9d8cebd4bcd7 ("mm: fix mbind vma merge problem")
+Reported-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
+Tested-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
+Signed-off-by: Hugh Dickins <hughd@google.com>
+Acked-by: Christoph Lameter <cl@linux.com>
+Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
+Cc: Minchan Kim <minchan.kim@gmail.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ mm/mempolicy.c |   46 ++++++++++++++++++++--------------------------
+ 1 file changed, 20 insertions(+), 26 deletions(-)
+
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -653,19 +653,18 @@ static unsigned long change_prot_numa(st
+  * @nodes and @flags,) it's isolated and queued to the pagelist which is
+  * passed via @private.)
+  */
+-static struct vm_area_struct *
++static int
+ queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
+               const nodemask_t *nodes, unsigned long flags, void *private)
+ {
+-      int err;
+-      struct vm_area_struct *first, *vma, *prev;
+-
++      int err = 0;
++      struct vm_area_struct *vma, *prev;
+-      first = find_vma(mm, start);
+-      if (!first)
+-              return ERR_PTR(-EFAULT);
++      vma = find_vma(mm, start);
++      if (!vma)
++              return -EFAULT;
+       prev = NULL;
+-      for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
++      for (; vma && vma->vm_start < end; vma = vma->vm_next) {
+               unsigned long endvma = vma->vm_end;
+               if (endvma > end)
+@@ -675,9 +674,9 @@ queue_pages_range(struct mm_struct *mm,
+               if (!(flags & MPOL_MF_DISCONTIG_OK)) {
+                       if (!vma->vm_next && vma->vm_end < end)
+-                              return ERR_PTR(-EFAULT);
++                              return -EFAULT;
+                       if (prev && prev->vm_end < vma->vm_start)
+-                              return ERR_PTR(-EFAULT);
++                              return -EFAULT;
+               }
+               if (flags & MPOL_MF_LAZY) {
+@@ -691,15 +690,13 @@ queue_pages_range(struct mm_struct *mm,
+                       err = queue_pages_pgd_range(vma, start, endvma, nodes,
+                                               flags, private);
+-                      if (err) {
+-                              first = ERR_PTR(err);
++                      if (err)
+                               break;
+-                      }
+               }
+ next:
+               prev = vma;
+       }
+-      return first;
++      return err;
+ }
+ /*
+@@ -1184,16 +1181,17 @@ out:
+ /*
+  * Allocate a new page for page migration based on vma policy.
+- * Start assuming that page is mapped by vma pointed to by @private.
++ * Start by assuming the page is mapped by the same vma as contains @start.
+  * Search forward from there, if not.  N.B., this assumes that the
+  * list of pages handed to migrate_pages()--which is how we get here--
+  * is in virtual address order.
+  */
+-static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
++static struct page *new_page(struct page *page, unsigned long start, int **x)
+ {
+-      struct vm_area_struct *vma = (struct vm_area_struct *)private;
++      struct vm_area_struct *vma;
+       unsigned long uninitialized_var(address);
++      vma = find_vma(current->mm, start);
+       while (vma) {
+               address = page_address_in_vma(page, vma);
+               if (address != -EFAULT)
+@@ -1223,7 +1221,7 @@ int do_migrate_pages(struct mm_struct *m
+       return -ENOSYS;
+ }
+-static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
++static struct page *new_page(struct page *page, unsigned long start, int **x)
+ {
+       return NULL;
+ }
+@@ -1233,7 +1231,6 @@ static long do_mbind(unsigned long start
+                    unsigned short mode, unsigned short mode_flags,
+                    nodemask_t *nmask, unsigned long flags)
+ {
+-      struct vm_area_struct *vma;
+       struct mm_struct *mm = current->mm;
+       struct mempolicy *new;
+       unsigned long end;
+@@ -1299,11 +1296,9 @@ static long do_mbind(unsigned long start
+       if (err)
+               goto mpol_out;
+-      vma = queue_pages_range(mm, start, end, nmask,
++      err = queue_pages_range(mm, start, end, nmask,
+                         flags | MPOL_MF_INVERT, &pagelist);
+-
+-      err = PTR_ERR(vma);     /* maybe ... */
+-      if (!IS_ERR(vma))
++      if (!err)
+               err = mbind_range(mm, start, end, new);
+       if (!err) {
+@@ -1311,9 +1306,8 @@ static long do_mbind(unsigned long start
+               if (!list_empty(&pagelist)) {
+                       WARN_ON_ONCE(flags & MPOL_MF_LAZY);
+-                      nr_failed = migrate_pages(&pagelist, new_vma_page,
+-                                      (unsigned long)vma,
+-                                      MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
++                      nr_failed = migrate_pages(&pagelist, new_page,
++                              start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
+                       if (nr_failed)
+                               putback_movable_pages(&pagelist);
+               }
index 77fa350bd56e7f66e8d295c14ad2c7c2d4ad10a4..790407a24f1df347e6f80ad28ecdbe64c3bd77ab 100644 (file)
@@ -92,3 +92,4 @@ drivers-video-fbdev-fb-puv3.c-add-header-files-for-function-unifb_mmap.patch
 mm-numa-remove-bug_on-in-__handle_mm_fault.patch
 slab-fix-oops-when-reading-proc-slab_allocators.patch
 sym53c8xx_2-set-did_requeue-return-code-when-aborting-squeue.patch
+mm-fix-crashes-from-mbind-merging-vmas.patch