--- /dev/null
+From 75e1c70fc31490ef8a373ea2a4bea2524099b478 Mon Sep 17 00:00:00 2001
+From: Jeff Moyer <jmoyer@redhat.com>
+Date: Fri, 10 Sep 2010 14:16:00 -0700
+Subject: aio: check for multiplication overflow in do_io_submit
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jeff Moyer <jmoyer@redhat.com>
+
+commit 75e1c70fc31490ef8a373ea2a4bea2524099b478 upstream.
+
+Tavis Ormandy pointed out that do_io_submit does not do proper bounds
+checking on the passed-in iocb array:
+
+ if (unlikely(nr < 0))
+ return -EINVAL;
+
+ if (unlikely(!access_ok(VERIFY_READ, iocbpp, (nr*sizeof(iocbpp)))))
+ return -EFAULT; ^^^^^^^^^^^^^^^^^^
+
+The attached patch checks for overflow, and if it is detected, the
+number of iocbs submitted is scaled down to a number that will fit in
+the long. This is an ok thing to do, as sys_io_submit is documented as
+returning the number of iocbs submitted, so callers should handle a
+return value of less than the 'nr' argument passed in.
+
+Reported-by: Tavis Ormandy <taviso@cmpxchg8b.com>
+Signed-off-by: Jeff Moyer <jmoyer@redhat.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/aio.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -1677,6 +1677,9 @@ SYSCALL_DEFINE3(io_submit, aio_context_t
+ if (unlikely(nr < 0))
+ return -EINVAL;
+
++ if (unlikely(nr > LONG_MAX/sizeof(*iocbpp)))
++ nr = LONG_MAX/sizeof(*iocbpp);
++
+ if (unlikely(!access_ok(VERIFY_READ, iocbpp, (nr*sizeof(*iocbpp)))))
+ return -EFAULT;
+
--- /dev/null
+From 8ca3eb08097f6839b2206e2242db4179aee3cfb3 Mon Sep 17 00:00:00 2001
+From: Luck, Tony <tony.luck@intel.com>
+Date: Tue, 24 Aug 2010 11:44:18 -0700
+Subject: guard page for stacks that grow upwards
+
+From: Luck, Tony <tony.luck@intel.com>
+
+commit 8ca3eb08097f6839b2206e2242db4179aee3cfb3 upstream.
+
+pa-risc and ia64 have stacks that grow upwards. Check that
+they do not run into other mappings. By making VM_GROWSUP
+0x0 on architectures that do not ever use it, we can avoid
+some unpleasant #ifdefs in check_stack_guard_page().
+
+Signed-off-by: Tony Luck <tony.luck@intel.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: dann frazier <dannf@debian.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ include/linux/mm.h | 8 +++++++-
+ mm/memory.c | 15 +++++++++++----
+ mm/mmap.c | 3 ---
+ 3 files changed, 18 insertions(+), 8 deletions(-)
+
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -87,7 +87,11 @@ extern unsigned int kobjsize(const void
+ #define VM_MAYSHARE 0x00000080
+
+ #define VM_GROWSDOWN 0x00000100 /* general info on the segment */
++#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
+ #define VM_GROWSUP 0x00000200
++#else
++#define VM_GROWSUP 0x00000000
++#endif
+ #define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */
+ #define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */
+
+@@ -1181,8 +1185,10 @@ unsigned long max_sane_readahead(unsigne
+
+ /* Do stack extension */
+ extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
+-#ifdef CONFIG_IA64
++#if VM_GROWSUP
+ extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
++#else
++ #define expand_upwards(vma, address) do { } while (0)
+ #endif
+ extern int expand_stack_downwards(struct vm_area_struct *vma,
+ unsigned long address);
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -2396,11 +2396,9 @@ out_nomap:
+ }
+
+ /*
+- * This is like a special single-page "expand_downwards()",
+- * except we must first make sure that 'address-PAGE_SIZE'
++ * This is like a special single-page "expand_{down|up}wards()",
++ * except we must first make sure that 'address{-|+}PAGE_SIZE'
+ * doesn't hit another vma.
+- *
+- * The "find_vma()" will do the right thing even if we wrap
+ */
+ static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
+ {
+@@ -2412,6 +2410,15 @@ static inline int check_stack_guard_page
+
+ expand_stack(vma, address);
+ }
++ if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
++ struct vm_area_struct *next = vma->vm_next;
++
++ /* As VM_GROWSDOWN but s/below/above/ */
++ if (next && next->vm_start == address + PAGE_SIZE)
++ return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
++
++ expand_upwards(vma, address + PAGE_SIZE);
++ }
+ return 0;
+ }
+
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -1589,9 +1589,6 @@ static int acct_stack_growth(struct vm_a
+ * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
+ * vma is the last one with address > vma->vm_end. Have to extend vma.
+ */
+-#ifndef CONFIG_IA64
+-static inline
+-#endif
+ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
+ {
+ int error;
--- /dev/null
+aio-check-for-multiplication-overflow-in-do_io_submit.patch
+x86-add-smp_mb-before-sending-invalidate_tlb_vector.patch
+guard-page-for-stacks-that-grow-upwards.patch
--- /dev/null
+From d6f0f39b7d05e62b347c4352d070e4afb3ade4b5 Mon Sep 17 00:00:00 2001
+From: Suresh Siddha <suresh.b.siddha@intel.com>
+Date: Tue, 4 Nov 2008 13:53:04 -0800
+Subject: x86: add smp_mb() before sending INVALIDATE_TLB_VECTOR
+
+From: Suresh Siddha <suresh.b.siddha@intel.com>
+
+commit d6f0f39b7d05e62b347c4352d070e4afb3ade4b5 upstream.
+
+Impact: fix rare x2apic hang
+
+On x86, x2apic mode accesses for sending IPI's don't have serializing
+semantics. If the IPI receivner refers(in lock-free fashion) to some
+memory setup by the sender, the need for smp_mb() before sending the
+IPI becomes critical in x2apic mode.
+
+Add the smp_mb() in native_flush_tlb_others() before sending the IPI.
+
+Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Cc: Thomas Renninger <trenn@novell.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/kernel/tlb_32.c | 6 ++++++
+ arch/x86/kernel/tlb_64.c | 5 +++++
+ 2 files changed, 11 insertions(+)
+
+--- a/arch/x86/kernel/tlb_32.c
++++ b/arch/x86/kernel/tlb_32.c
+@@ -154,6 +154,12 @@ void native_flush_tlb_others(const cpuma
+ flush_mm = mm;
+ flush_va = va;
+ cpus_or(flush_cpumask, cpumask, flush_cpumask);
++
++ /*
++ * Make the above memory operations globally visible before
++ * sending the IPI.
++ */
++ smp_mb();
+ /*
+ * We have to send the IPI only to
+ * CPUs affected.
+--- a/arch/x86/kernel/tlb_64.c
++++ b/arch/x86/kernel/tlb_64.c
+@@ -183,6 +183,11 @@ void native_flush_tlb_others(const cpuma
+ cpus_or(f->flush_cpumask, cpumask, f->flush_cpumask);
+
+ /*
++ * Make the above memory operations globally visible before
++ * sending the IPI.
++ */
++ smp_mb();
++ /*
+ * We have to send the IPI only to
+ * CPUs affected.
+ */