From: Greg Kroah-Hartman Date: Mon, 23 Aug 2010 20:31:39 +0000 (-0700) Subject: .34 patches X-Git-Tag: v2.6.32.21~35 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=8035eba0c62c47b121d75c4a7508873c65fa8458;p=thirdparty%2Fkernel%2Fstable-queue.git .34 patches --- diff --git a/queue-2.6.34/mm-make-stack-guard-page-logic-use-vm_prev-pointer.patch b/queue-2.6.34/mm-make-stack-guard-page-logic-use-vm_prev-pointer.patch new file mode 100644 index 00000000000..75d182e7856 --- /dev/null +++ b/queue-2.6.34/mm-make-stack-guard-page-logic-use-vm_prev-pointer.patch @@ -0,0 +1,49 @@ +From 0e8e50e20c837eeec8323bba7dcd25fe5479194c Mon Sep 17 00:00:00 2001 +From: Linus Torvalds +Date: Fri, 20 Aug 2010 16:49:40 -0700 +Subject: mm: make stack guard page logic use vm_prev pointer + +From: Linus Torvalds + +commit 0e8e50e20c837eeec8323bba7dcd25fe5479194c upstream. + +Like the mlock() change previously, this makes the stack guard check +code use vma->vm_prev to see what the mapping below the current stack +is, rather than have to look it up with find_vma(). + +Also, accept an abutting stack segment, since that happens naturally if +you split the stack with mlock or mprotect. + +Tested-by: Ian Campbell +Signed-off-by: Linus Torvalds +Signed-off-by: Greg Kroah-Hartman + +--- + mm/memory.c | 15 +++++++++++---- + 1 file changed, 11 insertions(+), 4 deletions(-) + +--- a/mm/memory.c ++++ b/mm/memory.c +@@ -2761,11 +2761,18 @@ static inline int check_stack_guard_page + { + address &= PAGE_MASK; + if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) { +- address -= PAGE_SIZE; +- if (find_vma(vma->vm_mm, address) != vma) +- return -ENOMEM; ++ struct vm_area_struct *prev = vma->vm_prev; + +- expand_stack(vma, address); ++ /* ++ * Is there a mapping abutting this one below? ++ * ++ * That's only ok if it's the same stack mapping ++ * that has gotten split.. ++ */ ++ if (prev && prev->vm_end == address) ++ return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM; ++ ++ expand_stack(vma, address - PAGE_SIZE); + } + return 0; + } diff --git a/queue-2.6.34/mm-make-the-mlock-stack-guard-page-checks-stricter.patch b/queue-2.6.34/mm-make-the-mlock-stack-guard-page-checks-stricter.patch new file mode 100644 index 00000000000..368f3dadaac --- /dev/null +++ b/queue-2.6.34/mm-make-the-mlock-stack-guard-page-checks-stricter.patch @@ -0,0 +1,57 @@ +From 7798330ac8114c731cfab83e634c6ecedaa233d7 Mon Sep 17 00:00:00 2001 +From: Linus Torvalds +Date: Fri, 20 Aug 2010 16:39:25 -0700 +Subject: mm: make the mlock() stack guard page checks stricter + +From: Linus Torvalds + +commit 7798330ac8114c731cfab83e634c6ecedaa233d7 upstream. + +If we've split the stack vma, only the lowest one has the guard page. +Now that we have a doubly linked list of vma's, checking this is trivial. + +Tested-by: Ian Campbell +Signed-off-by: Linus Torvalds +Signed-off-by: Greg Kroah-Hartman + +--- + mm/mlock.c | 21 ++++++++++++++++----- + 1 file changed, 16 insertions(+), 5 deletions(-) + +--- a/mm/mlock.c ++++ b/mm/mlock.c +@@ -135,6 +135,19 @@ void munlock_vma_page(struct page *page) + } + } + ++/* Is the vma a continuation of the stack vma above it? */ ++static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr) ++{ ++ return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN); ++} ++ ++static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr) ++{ ++ return (vma->vm_flags & VM_GROWSDOWN) && ++ (vma->vm_start == addr) && ++ !vma_stack_continue(vma->vm_prev, addr); ++} ++ + /** + * __mlock_vma_pages_range() - mlock a range of pages in the vma. + * @vma: target vma +@@ -168,11 +181,9 @@ static long __mlock_vma_pages_range(stru + gup_flags |= FOLL_WRITE; + + /* We don't try to access the guard page of a stack vma */ +- if (vma->vm_flags & VM_GROWSDOWN) { +- if (start == vma->vm_start) { +- start += PAGE_SIZE; +- nr_pages--; +- } ++ if (stack_guard_page(vma, start)) { ++ addr += PAGE_SIZE; ++ nr_pages--; + } + + while (nr_pages > 0) { diff --git a/queue-2.6.34/mm-make-the-vma-list-be-doubly-linked.patch b/queue-2.6.34/mm-make-the-vma-list-be-doubly-linked.patch new file mode 100644 index 00000000000..28379e833d8 --- /dev/null +++ b/queue-2.6.34/mm-make-the-vma-list-be-doubly-linked.patch @@ -0,0 +1,156 @@ +From 297c5eee372478fc32fec5fe8eed711eedb13f3d Mon Sep 17 00:00:00 2001 +From: Linus Torvalds +Date: Fri, 20 Aug 2010 16:24:55 -0700 +Subject: mm: make the vma list be doubly linked + +From: Linus Torvalds + +commit 297c5eee372478fc32fec5fe8eed711eedb13f3d upstream. + +It's a really simple list, and several of the users want to go backwards +in it to find the previous vma. So rather than have to look up the +previous entry with 'find_vma_prev()' or something similar, just make it +doubly linked instead. + +Tested-by: Ian Campbell +Signed-off-by: Linus Torvalds +Signed-off-by: Greg Kroah-Hartman + +--- + include/linux/mm_types.h | 2 +- + kernel/fork.c | 7 +++++-- + mm/mmap.c | 21 +++++++++++++++++---- + mm/nommu.c | 7 +++++-- + 4 files changed, 28 insertions(+), 9 deletions(-) + +--- a/include/linux/mm_types.h ++++ b/include/linux/mm_types.h +@@ -134,7 +134,7 @@ struct vm_area_struct { + within vm_mm. */ + + /* linked list of VM areas per task, sorted by address */ +- struct vm_area_struct *vm_next; ++ struct vm_area_struct *vm_next, *vm_prev; + + pgprot_t vm_page_prot; /* Access permissions of this VMA. */ + unsigned long vm_flags; /* Flags, see mm.h. */ +--- a/kernel/fork.c ++++ b/kernel/fork.c +@@ -287,7 +287,7 @@ out: + #ifdef CONFIG_MMU + static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) + { +- struct vm_area_struct *mpnt, *tmp, **pprev; ++ struct vm_area_struct *mpnt, *tmp, *prev, **pprev; + struct rb_node **rb_link, *rb_parent; + int retval; + unsigned long charge; +@@ -315,6 +315,7 @@ static int dup_mmap(struct mm_struct *mm + if (retval) + goto out; + ++ prev = NULL; + for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) { + struct file *file; + +@@ -346,7 +347,7 @@ static int dup_mmap(struct mm_struct *mm + goto fail_nomem_anon_vma_fork; + tmp->vm_flags &= ~VM_LOCKED; + tmp->vm_mm = mm; +- tmp->vm_next = NULL; ++ tmp->vm_next = tmp->vm_prev = NULL; + file = tmp->vm_file; + if (file) { + struct inode *inode = file->f_path.dentry->d_inode; +@@ -379,6 +380,8 @@ static int dup_mmap(struct mm_struct *mm + */ + *pprev = tmp; + pprev = &tmp->vm_next; ++ tmp->vm_prev = prev; ++ prev = tmp; + + __vma_link_rb(mm, tmp, rb_link, rb_parent); + rb_link = &tmp->vm_rb.rb_right; +--- a/mm/mmap.c ++++ b/mm/mmap.c +@@ -388,17 +388,23 @@ static inline void + __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, + struct vm_area_struct *prev, struct rb_node *rb_parent) + { ++ struct vm_area_struct *next; ++ ++ vma->vm_prev = prev; + if (prev) { +- vma->vm_next = prev->vm_next; ++ next = prev->vm_next; + prev->vm_next = vma; + } else { + mm->mmap = vma; + if (rb_parent) +- vma->vm_next = rb_entry(rb_parent, ++ next = rb_entry(rb_parent, + struct vm_area_struct, vm_rb); + else +- vma->vm_next = NULL; ++ next = NULL; + } ++ vma->vm_next = next; ++ if (next) ++ next->vm_prev = vma; + } + + void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma, +@@ -485,7 +491,11 @@ static inline void + __vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma, + struct vm_area_struct *prev) + { +- prev->vm_next = vma->vm_next; ++ struct vm_area_struct *next = vma->vm_next; ++ ++ prev->vm_next = next; ++ if (next) ++ next->vm_prev = prev; + rb_erase(&vma->vm_rb, &mm->mm_rb); + if (mm->mmap_cache == vma) + mm->mmap_cache = prev; +@@ -1900,6 +1910,7 @@ detach_vmas_to_be_unmapped(struct mm_str + unsigned long addr; + + insertion_point = (prev ? &prev->vm_next : &mm->mmap); ++ vma->vm_prev = NULL; + do { + rb_erase(&vma->vm_rb, &mm->mm_rb); + mm->map_count--; +@@ -1907,6 +1918,8 @@ detach_vmas_to_be_unmapped(struct mm_str + vma = vma->vm_next; + } while (vma && vma->vm_start < end); + *insertion_point = vma; ++ if (vma) ++ vma->vm_prev = prev; + tail_vma->vm_next = NULL; + if (mm->unmap_area == arch_unmap_area) + addr = prev ? prev->vm_end : mm->mmap_base; +--- a/mm/nommu.c ++++ b/mm/nommu.c +@@ -609,7 +609,7 @@ static void protect_vma(struct vm_area_s + */ + static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma) + { +- struct vm_area_struct *pvma, **pp; ++ struct vm_area_struct *pvma, **pp, *next; + struct address_space *mapping; + struct rb_node **p, *parent; + +@@ -669,8 +669,11 @@ static void add_vma_to_mm(struct mm_stru + break; + } + +- vma->vm_next = *pp; ++ next = *pp; + *pp = vma; ++ vma->vm_next = next; ++ if (next) ++ next->vm_prev = vma; + } + + /* diff --git a/queue-2.6.34/series b/queue-2.6.34/series index 630b2a440f6..cb555d2819d 100644 --- a/queue-2.6.34/series +++ b/queue-2.6.34/series @@ -40,3 +40,6 @@ dm-snapshot-iterate-origin-and-cow-devices.patch dm-snapshot-test-chunk-size-against-both-origin-and-snapshot.patch dm-prevent-access-to-md-being-deleted.patch dm-ioctl-release-_hash_lock-between-devices-in-remove_all.patch +mm-make-the-vma-list-be-doubly-linked.patch +mm-make-the-mlock-stack-guard-page-checks-stricter.patch +mm-make-stack-guard-page-logic-use-vm_prev-pointer.patch