--- /dev/null
+From 320b2b8de12698082609ebbc1a17165727f4c893 Mon Sep 17 00:00:00 2001
+From: Linus Torvalds <torvalds@linux-foundation.org>
+Date: Thu, 12 Aug 2010 17:54:33 -0700
+Subject: mm: keep a guard page below a grow-down stack segment
+
+From: Linus Torvalds <torvalds@linux-foundation.org>
+
+commit 320b2b8de12698082609ebbc1a17165727f4c893 upstream.
+
+This is a rather minimally invasive patch to solve the problem of the
+user stack growing into a memory mapped area below it. Whenever we fill
+the first page of the stack segment, expand the segment down by one
+page.
+
+Now, admittedly some odd application might _want_ the stack to grow down
+into the preceding memory mapping, and so we may at some point need to
+make this a process tunable (some people might also want to have more
+than a single page of guarding), but let's try the minimal approach
+first.
+
+Tested with trivial application that maps a single page just below the
+stack, and then starts recursing. Without this, we will get a SIGSEGV
+_after_ the stack has smashed the mapping. With this patch, we'll get a
+nice SIGBUS just as the stack touches the page just above the mapping.
+
+Requested-by: Keith Packard <keithp@keithp.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ mm/memory.c | 23 +++++++++++++++++++++++
+ 1 file changed, 23 insertions(+)
+
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -2396,6 +2396,26 @@ out_nomap:
+ }
+
+ /*
++ * This is like a special single-page "expand_downwards()",
++ * except we must first make sure that 'address-PAGE_SIZE'
++ * doesn't hit another vma.
++ *
++ * The "find_vma()" will do the right thing even if we wrap
++ */
++static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
++{
++ address &= PAGE_MASK;
++ if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
++ address -= PAGE_SIZE;
++ if (find_vma(vma->vm_mm, address) != vma)
++ return -ENOMEM;
++
++ expand_stack(vma, address);
++ }
++ return 0;
++}
++
++/*
+ * We enter with non-exclusive mmap_sem (to exclude vma changes,
+ * but allow concurrent faults), and pte mapped but not yet locked.
+ * We return with mmap_sem still held, but pte unmapped and unlocked.
+@@ -2408,6 +2428,9 @@ static int do_anonymous_page(struct mm_s
+ spinlock_t *ptl;
+ pte_t entry;
+
++ if (check_stack_guard_page(vma, address) < 0)
++ return VM_FAULT_SIGBUS;
++
+ /* Allocate our own private page. */
+ pte_unmap(page_table);
+
xen-drop-xen_sched_clock-in-favour-of-using-plain-wallclock-time.patch
bdi-register-sysfs-bdi-device-only-once-per-queue.patch
mm-backing-dev.c-remove-recently-added-warn_on.patch
+mm-keep-a-guard-page-below-a-grow-down-stack-segment.patch
--- /dev/null
+From 320b2b8de12698082609ebbc1a17165727f4c893 Mon Sep 17 00:00:00 2001
+From: Linus Torvalds <torvalds@linux-foundation.org>
+Date: Thu, 12 Aug 2010 17:54:33 -0700
+Subject: mm: keep a guard page below a grow-down stack segment
+
+From: Linus Torvalds <torvalds@linux-foundation.org>
+
+commit 320b2b8de12698082609ebbc1a17165727f4c893 upstream.
+
+This is a rather minimally invasive patch to solve the problem of the
+user stack growing into a memory mapped area below it. Whenever we fill
+the first page of the stack segment, expand the segment down by one
+page.
+
+Now, admittedly some odd application might _want_ the stack to grow down
+into the preceding memory mapping, and so we may at some point need to
+make this a process tunable (some people might also want to have more
+than a single page of guarding), but let's try the minimal approach
+first.
+
+Tested with trivial application that maps a single page just below the
+stack, and then starts recursing. Without this, we will get a SIGSEGV
+_after_ the stack has smashed the mapping. With this patch, we'll get a
+nice SIGBUS just as the stack touches the page just above the mapping.
+
+Requested-by: Keith Packard <keithp@keithp.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ mm/memory.c | 23 +++++++++++++++++++++++
+ 1 file changed, 23 insertions(+)
+
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -2630,6 +2630,26 @@ out_release:
+ }
+
+ /*
++ * This is like a special single-page "expand_downwards()",
++ * except we must first make sure that 'address-PAGE_SIZE'
++ * doesn't hit another vma.
++ *
++ * The "find_vma()" will do the right thing even if we wrap
++ */
++static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
++{
++ address &= PAGE_MASK;
++ if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
++ address -= PAGE_SIZE;
++ if (find_vma(vma->vm_mm, address) != vma)
++ return -ENOMEM;
++
++ expand_stack(vma, address);
++ }
++ return 0;
++}
++
++/*
+ * We enter with non-exclusive mmap_sem (to exclude vma changes,
+ * but allow concurrent faults), and pte mapped but not yet locked.
+ * We return with mmap_sem still held, but pte unmapped and unlocked.
+@@ -2642,6 +2662,9 @@ static int do_anonymous_page(struct mm_s
+ spinlock_t *ptl;
+ pte_t entry;
+
++ if (check_stack_guard_page(vma, address) < 0)
++ return VM_FAULT_SIGBUS;
++
+ if (!(flags & FAULT_FLAG_WRITE)) {
+ entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
+ vma->vm_page_prot));
ibmvfc-reduce-error-recovery-timeout.patch
md-raid1-delay-reads-that-could-overtake-behind-writes.patch
mm-fix-corruption-of-hibernation-caused-by-reusing-swap-during-image-saving.patch
+mm-keep-a-guard-page-below-a-grow-down-stack-segment.patch
--- /dev/null
+From 320b2b8de12698082609ebbc1a17165727f4c893 Mon Sep 17 00:00:00 2001
+From: Linus Torvalds <torvalds@linux-foundation.org>
+Date: Thu, 12 Aug 2010 17:54:33 -0700
+Subject: mm: keep a guard page below a grow-down stack segment
+
+From: Linus Torvalds <torvalds@linux-foundation.org>
+
+commit 320b2b8de12698082609ebbc1a17165727f4c893 upstream.
+
+This is a rather minimally invasive patch to solve the problem of the
+user stack growing into a memory mapped area below it. Whenever we fill
+the first page of the stack segment, expand the segment down by one
+page.
+
+Now, admittedly some odd application might _want_ the stack to grow down
+into the preceding memory mapping, and so we may at some point need to
+make this a process tunable (some people might also want to have more
+than a single page of guarding), but let's try the minimal approach
+first.
+
+Tested with trivial application that maps a single page just below the
+stack, and then starts recursing. Without this, we will get a SIGSEGV
+_after_ the stack has smashed the mapping. With this patch, we'll get a
+nice SIGBUS just as the stack touches the page just above the mapping.
+
+Requested-by: Keith Packard <keithp@keithp.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ mm/memory.c | 23 +++++++++++++++++++++++
+ 1 file changed, 23 insertions(+)
+
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -2751,6 +2751,26 @@ out_release:
+ }
+
+ /*
++ * This is like a special single-page "expand_downwards()",
++ * except we must first make sure that 'address-PAGE_SIZE'
++ * doesn't hit another vma.
++ *
++ * The "find_vma()" will do the right thing even if we wrap
++ */
++static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
++{
++ address &= PAGE_MASK;
++ if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
++ address -= PAGE_SIZE;
++ if (find_vma(vma->vm_mm, address) != vma)
++ return -ENOMEM;
++
++ expand_stack(vma, address);
++ }
++ return 0;
++}
++
++/*
+ * We enter with non-exclusive mmap_sem (to exclude vma changes,
+ * but allow concurrent faults), and pte mapped but not yet locked.
+ * We return with mmap_sem still held, but pte unmapped and unlocked.
+@@ -2763,6 +2783,9 @@ static int do_anonymous_page(struct mm_s
+ spinlock_t *ptl;
+ pte_t entry;
+
++ if (check_stack_guard_page(vma, address) < 0)
++ return VM_FAULT_SIGBUS;
++
+ if (!(flags & FAULT_FLAG_WRITE)) {
+ entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
+ vma->vm_page_prot));
ibmvfc-fix-command-completion-handling.patch
ibmvfc-reduce-error-recovery-timeout.patch
md-raid1-delay-reads-that-could-overtake-behind-writes.patch
+mm-keep-a-guard-page-below-a-grow-down-stack-segment.patch
--- /dev/null
+From 320b2b8de12698082609ebbc1a17165727f4c893 Mon Sep 17 00:00:00 2001
+From: Linus Torvalds <torvalds@linux-foundation.org>
+Date: Thu, 12 Aug 2010 17:54:33 -0700
+Subject: mm: keep a guard page below a grow-down stack segment
+
+From: Linus Torvalds <torvalds@linux-foundation.org>
+
+commit 320b2b8de12698082609ebbc1a17165727f4c893 upstream.
+
+This is a rather minimally invasive patch to solve the problem of the
+user stack growing into a memory mapped area below it. Whenever we fill
+the first page of the stack segment, expand the segment down by one
+page.
+
+Now, admittedly some odd application might _want_ the stack to grow down
+into the preceding memory mapping, and so we may at some point need to
+make this a process tunable (some people might also want to have more
+than a single page of guarding), but let's try the minimal approach
+first.
+
+Tested with trivial application that maps a single page just below the
+stack, and then starts recursing. Without this, we will get a SIGSEGV
+_after_ the stack has smashed the mapping. With this patch, we'll get a
+nice SIGBUS just as the stack touches the page just above the mapping.
+
+Requested-by: Keith Packard <keithp@keithp.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ mm/memory.c | 23 +++++++++++++++++++++++
+ 1 file changed, 23 insertions(+)
+
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -2760,6 +2760,26 @@ out_release:
+ }
+
+ /*
++ * This is like a special single-page "expand_downwards()",
++ * except we must first make sure that 'address-PAGE_SIZE'
++ * doesn't hit another vma.
++ *
++ * The "find_vma()" will do the right thing even if we wrap
++ */
++static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
++{
++ address &= PAGE_MASK;
++ if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
++ address -= PAGE_SIZE;
++ if (find_vma(vma->vm_mm, address) != vma)
++ return -ENOMEM;
++
++ expand_stack(vma, address);
++ }
++ return 0;
++}
++
++/*
+ * We enter with non-exclusive mmap_sem (to exclude vma changes,
+ * but allow concurrent faults), and pte mapped but not yet locked.
+ * We return with mmap_sem still held, but pte unmapped and unlocked.
+@@ -2772,6 +2792,9 @@ static int do_anonymous_page(struct mm_s
+ spinlock_t *ptl;
+ pte_t entry;
+
++ if (check_stack_guard_page(vma, address) < 0)
++ return VM_FAULT_SIGBUS;
++
+ if (!(flags & FAULT_FLAG_WRITE)) {
+ entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
+ vma->vm_page_prot));
xen-do-not-suspend-ipi-irqs.patch
crypto-testmgr-add-an-option-to-disable-cryptoalgos-self-tests.patch
ext4-fix-freeze-deadlock-under-io.patch
+mm-keep-a-guard-page-below-a-grow-down-stack-segment.patch