]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
.32 patches
authorGreg Kroah-Hartman <gregkh@suse.de>
Mon, 23 Aug 2010 20:25:48 +0000 (13:25 -0700)
committerGreg Kroah-Hartman <gregkh@suse.de>
Mon, 23 Aug 2010 20:25:48 +0000 (13:25 -0700)
queue-2.6.32/mm-make-stack-guard-page-logic-use-vm_prev-pointer.patch [new file with mode: 0644]
queue-2.6.32/mm-make-the-mlock-stack-guard-page-checks-stricter.patch [new file with mode: 0644]
queue-2.6.32/mm-make-the-vma-list-be-doubly-linked.patch [new file with mode: 0644]
queue-2.6.32/series

diff --git a/queue-2.6.32/mm-make-stack-guard-page-logic-use-vm_prev-pointer.patch b/queue-2.6.32/mm-make-stack-guard-page-logic-use-vm_prev-pointer.patch
new file mode 100644 (file)
index 0000000..075f92a
--- /dev/null
@@ -0,0 +1,49 @@
+From 0e8e50e20c837eeec8323bba7dcd25fe5479194c Mon Sep 17 00:00:00 2001
+From: Linus Torvalds <torvalds@linux-foundation.org>
+Date: Fri, 20 Aug 2010 16:49:40 -0700
+Subject: mm: make stack guard page logic use vm_prev pointer
+
+From: Linus Torvalds <torvalds@linux-foundation.org>
+
+commit 0e8e50e20c837eeec8323bba7dcd25fe5479194c upstream.
+
+Like the mlock() change previously, this makes the stack guard check
+code use vma->vm_prev to see what the mapping below the current stack
+is, rather than have to look it up with find_vma().
+
+Also, accept an abutting stack segment, since that happens naturally if
+you split the stack with mlock or mprotect.
+
+Tested-by: Ian Campbell <ijc@hellion.org.uk>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ mm/memory.c |   15 +++++++++++----
+ 1 file changed, 11 insertions(+), 4 deletions(-)
+
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -2640,11 +2640,18 @@ static inline int check_stack_guard_page
+ {
+       address &= PAGE_MASK;
+       if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
+-              address -= PAGE_SIZE;
+-              if (find_vma(vma->vm_mm, address) != vma)
+-                      return -ENOMEM;
++              struct vm_area_struct *prev = vma->vm_prev;
+-              expand_stack(vma, address);
++              /*
++               * Is there a mapping abutting this one below?
++               *
++               * That's only ok if it's the same stack mapping
++               * that has gotten split..
++               */
++              if (prev && prev->vm_end == address)
++                      return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
++
++              expand_stack(vma, address - PAGE_SIZE);
+       }
+       return 0;
+ }
diff --git a/queue-2.6.32/mm-make-the-mlock-stack-guard-page-checks-stricter.patch b/queue-2.6.32/mm-make-the-mlock-stack-guard-page-checks-stricter.patch
new file mode 100644 (file)
index 0000000..d6ecc42
--- /dev/null
@@ -0,0 +1,57 @@
+From 7798330ac8114c731cfab83e634c6ecedaa233d7 Mon Sep 17 00:00:00 2001
+From: Linus Torvalds <torvalds@linux-foundation.org>
+Date: Fri, 20 Aug 2010 16:39:25 -0700
+Subject: mm: make the mlock() stack guard page checks stricter
+
+From: Linus Torvalds <torvalds@linux-foundation.org>
+
+commit 7798330ac8114c731cfab83e634c6ecedaa233d7 upstream.
+
+If we've split the stack vma, only the lowest one has the guard page.
+Now that we have a doubly linked list of vma's, checking this is trivial.
+
+Tested-by: Ian Campbell <ijc@hellion.org.uk>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ mm/mlock.c |   21 ++++++++++++++++-----
+ 1 file changed, 16 insertions(+), 5 deletions(-)
+
+--- a/mm/mlock.c
++++ b/mm/mlock.c
+@@ -138,6 +138,19 @@ void munlock_vma_page(struct page *page)
+       }
+ }
++/* Is the vma a continuation of the stack vma above it? */
++static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
++{
++      return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
++}
++
++static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
++{
++      return (vma->vm_flags & VM_GROWSDOWN) &&
++              (vma->vm_start == addr) &&
++              !vma_stack_continue(vma->vm_prev, addr);
++}
++
+ /**
+  * __mlock_vma_pages_range() -  mlock a range of pages in the vma.
+  * @vma:   target vma
+@@ -171,11 +184,9 @@ static long __mlock_vma_pages_range(stru
+               gup_flags |= FOLL_WRITE;
+       /* We don't try to access the guard page of a stack vma */
+-      if (vma->vm_flags & VM_GROWSDOWN) {
+-              if (start == vma->vm_start) {
+-                      start += PAGE_SIZE;
+-                      nr_pages--;
+-              }
++      if (stack_guard_page(vma, start)) {
++              addr += PAGE_SIZE;
++              nr_pages--;
+       }
+       while (nr_pages > 0) {
diff --git a/queue-2.6.32/mm-make-the-vma-list-be-doubly-linked.patch b/queue-2.6.32/mm-make-the-vma-list-be-doubly-linked.patch
new file mode 100644 (file)
index 0000000..4940574
--- /dev/null
@@ -0,0 +1,156 @@
+From 297c5eee372478fc32fec5fe8eed711eedb13f3d Mon Sep 17 00:00:00 2001
+From: Linus Torvalds <torvalds@linux-foundation.org>
+Date: Fri, 20 Aug 2010 16:24:55 -0700
+Subject: mm: make the vma list be doubly linked
+
+From: Linus Torvalds <torvalds@linux-foundation.org>
+
+commit 297c5eee372478fc32fec5fe8eed711eedb13f3d upstream.
+
+It's a really simple list, and several of the users want to go backwards
+in it to find the previous vma.  So rather than have to look up the
+previous entry with 'find_vma_prev()' or something similar, just make it
+doubly linked instead.
+
+Tested-by: Ian Campbell <ijc@hellion.org.uk>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ include/linux/mm_types.h |    2 +-
+ kernel/fork.c            |    7 +++++--
+ mm/mmap.c                |   21 +++++++++++++++++----
+ mm/nommu.c               |    7 +++++--
+ 4 files changed, 28 insertions(+), 9 deletions(-)
+
+--- a/include/linux/mm_types.h
++++ b/include/linux/mm_types.h
+@@ -138,7 +138,7 @@ struct vm_area_struct {
+                                          within vm_mm. */
+       /* linked list of VM areas per task, sorted by address */
+-      struct vm_area_struct *vm_next;
++      struct vm_area_struct *vm_next, *vm_prev;
+       pgprot_t vm_page_prot;          /* Access permissions of this VMA. */
+       unsigned long vm_flags;         /* Flags, see mm.h. */
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -277,7 +277,7 @@ out:
+ #ifdef CONFIG_MMU
+ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
+ {
+-      struct vm_area_struct *mpnt, *tmp, **pprev;
++      struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
+       struct rb_node **rb_link, *rb_parent;
+       int retval;
+       unsigned long charge;
+@@ -305,6 +305,7 @@ static int dup_mmap(struct mm_struct *mm
+       if (retval)
+               goto out;
++      prev = NULL;
+       for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
+               struct file *file;
+@@ -333,7 +334,7 @@ static int dup_mmap(struct mm_struct *mm
+               vma_set_policy(tmp, pol);
+               tmp->vm_flags &= ~VM_LOCKED;
+               tmp->vm_mm = mm;
+-              tmp->vm_next = NULL;
++              tmp->vm_next = tmp->vm_prev = NULL;
+               anon_vma_link(tmp);
+               file = tmp->vm_file;
+               if (file) {
+@@ -367,6 +368,8 @@ static int dup_mmap(struct mm_struct *mm
+                */
+               *pprev = tmp;
+               pprev = &tmp->vm_next;
++              tmp->vm_prev = prev;
++              prev = tmp;
+               __vma_link_rb(mm, tmp, rb_link, rb_parent);
+               rb_link = &tmp->vm_rb.rb_right;
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -389,17 +389,23 @@ static inline void
+ __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
+               struct vm_area_struct *prev, struct rb_node *rb_parent)
+ {
++      struct vm_area_struct *next;
++
++      vma->vm_prev = prev;
+       if (prev) {
+-              vma->vm_next = prev->vm_next;
++              next = prev->vm_next;
+               prev->vm_next = vma;
+       } else {
+               mm->mmap = vma;
+               if (rb_parent)
+-                      vma->vm_next = rb_entry(rb_parent,
++                      next = rb_entry(rb_parent,
+                                       struct vm_area_struct, vm_rb);
+               else
+-                      vma->vm_next = NULL;
++                      next = NULL;
+       }
++      vma->vm_next = next;
++      if (next)
++              next->vm_prev = vma;
+ }
+ void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -487,7 +493,11 @@ static inline void
+ __vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma,
+               struct vm_area_struct *prev)
+ {
+-      prev->vm_next = vma->vm_next;
++      struct vm_area_struct *next = vma->vm_next;
++
++      prev->vm_next = next;
++      if (next)
++              next->vm_prev = prev;
+       rb_erase(&vma->vm_rb, &mm->mm_rb);
+       if (mm->mmap_cache == vma)
+               mm->mmap_cache = prev;
+@@ -1798,6 +1808,7 @@ detach_vmas_to_be_unmapped(struct mm_str
+       unsigned long addr;
+       insertion_point = (prev ? &prev->vm_next : &mm->mmap);
++      vma->vm_prev = NULL;
+       do {
+               rb_erase(&vma->vm_rb, &mm->mm_rb);
+               mm->map_count--;
+@@ -1805,6 +1816,8 @@ detach_vmas_to_be_unmapped(struct mm_str
+               vma = vma->vm_next;
+       } while (vma && vma->vm_start < end);
+       *insertion_point = vma;
++      if (vma)
++              vma->vm_prev = prev;
+       tail_vma->vm_next = NULL;
+       if (mm->unmap_area == arch_unmap_area)
+               addr = prev ? prev->vm_end : mm->mmap_base;
+--- a/mm/nommu.c
++++ b/mm/nommu.c
+@@ -608,7 +608,7 @@ static void protect_vma(struct vm_area_s
+  */
+ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
+ {
+-      struct vm_area_struct *pvma, **pp;
++      struct vm_area_struct *pvma, **pp, *next;
+       struct address_space *mapping;
+       struct rb_node **p, *parent;
+@@ -668,8 +668,11 @@ static void add_vma_to_mm(struct mm_stru
+                       break;
+       }
+-      vma->vm_next = *pp;
++      next = *pp;
+       *pp = vma;
++      vma->vm_next = next;
++      if (next)
++              next->vm_prev = vma;
+ }
+ /*
index 7d7d1cf1715a236a9c02e47272873bb6978c36ac..85930e45435f2cc84815a076a92925cc2ebda8aa 100644 (file)
@@ -21,3 +21,6 @@ pxa3xx-fix-ns2cycle-equation.patch
 drm-i915-edp-flush-the-write-before-waiting-for-plls.patch
 dm-mpath-fix-null-pointer-dereference-when-path-parameters-missing.patch
 dm-ioctl-release-_hash_lock-between-devices-in-remove_all.patch
+mm-make-the-vma-list-be-doubly-linked.patch
+mm-make-the-mlock-stack-guard-page-checks-stricter.patch
+mm-make-stack-guard-page-logic-use-vm_prev-pointer.patch