--- /dev/null
+From e787ec1376e862fcea1bfd523feb7c5fb43ecdb9 Mon Sep 17 00:00:00 2001
+From: Tim Bird <tim.bird@am.sony.com>
+Date: Wed, 2 May 2012 22:55:39 +0100
+Subject: ARM: 7410/1: Add extra clobber registers for assembly in kernel_execve
+
+From: Tim Bird <tim.bird@am.sony.com>
+
+commit e787ec1376e862fcea1bfd523feb7c5fb43ecdb9 upstream.
+
+The inline assembly in kernel_execve() uses r8 and r9. Since this
+code sequence does not return, it usually doesn't matter if the
+register clobber list is accurate. However, I saw a case where a
+particular version of gcc used r8 as an intermediate for the value
+eventually passed to r9. Because r8 is used in the inline
+assembly, and not mentioned in the clobber list, r9 was set
+to an incorrect value.
+
+This resulted in a kernel panic on execution of the first user-space
+program in the system. r9 is used in ret_to_user as the thread_info
+pointer, and if it's wrong, bad things happen.
+
+Signed-off-by: Tim Bird <tim.bird@am.sony.com>
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/kernel/sys_arm.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm/kernel/sys_arm.c
++++ b/arch/arm/kernel/sys_arm.c
+@@ -115,7 +115,7 @@ int kernel_execve(const char *filename,
+ "Ir" (THREAD_START_SP - sizeof(regs)),
+ "r" (®s),
+ "Ir" (sizeof(regs))
+- : "r0", "r1", "r2", "r3", "ip", "lr", "memory");
++ : "r0", "r1", "r2", "r3", "r8", "r9", "ip", "lr", "memory");
+
+ out:
+ return ret;
--- /dev/null
+From fde165b2a29673aabf18ceff14dea1f1cfb0daad Mon Sep 17 00:00:00 2001
+From: Colin Cross <ccross@android.com>
+Date: Sat, 5 May 2012 20:58:13 +0100
+Subject: ARM: 7414/1: SMP: prevent use of the console when using idmap_pgd
+
+From: Colin Cross <ccross@android.com>
+
+commit fde165b2a29673aabf18ceff14dea1f1cfb0daad upstream.
+
+Commit 4e8ee7de227e3ab9a72040b448ad728c5428a042 (ARM: SMP: use
+idmap_pgd for mapping MMU enable during secondary booting)
+switched secondary boot to use idmap_pgd, which is initialized
+during early_initcall, instead of a page table initialized during
+__cpu_up. This causes idmap_pgd to contain the static mappings
+but be missing all dynamic mappings.
+
+If a console is registered that creates a dynamic mapping, the
+printk in secondary_start_kernel will trigger a data abort on
+the missing mapping before the exception handlers have been
+initialized, leading to a hang. Initial boot is not affected
+because no consoles have been registered, and resume is usually
+not affected because the offending console is suspended.
+Onlining a cpu with hotplug triggers the problem.
+
+A workaround is to the printk in secondary_start_kernel until
+after the page tables have been switched back to init_mm.
+
+Signed-off-by: Colin Cross <ccross@android.com>
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/kernel/smp.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/arm/kernel/smp.c
++++ b/arch/arm/kernel/smp.c
+@@ -279,8 +279,6 @@ asmlinkage void __cpuinit secondary_star
+ struct mm_struct *mm = &init_mm;
+ unsigned int cpu = smp_processor_id();
+
+- printk("CPU%u: Booted secondary processor\n", cpu);
+-
+ /*
+ * All kernel threads share the same mm context; grab a
+ * reference and switch to it.
+@@ -292,6 +290,8 @@ asmlinkage void __cpuinit secondary_star
+ enter_lazy_tlb(mm, current);
+ local_flush_tlb_all();
+
++ printk("CPU%u: Booted secondary processor\n", cpu);
++
+ cpu_init();
+ preempt_disable();
+ trace_hardirqs_off();
--- /dev/null
+From f5c2347ee20a8d6964d6a6b1ad04f200f8d4dfa7 Mon Sep 17 00:00:00 2001
+From: "H. Peter Anvin" <hpa@linux.intel.com>
+Date: Thu, 26 Apr 2012 11:45:16 -0700
+Subject: asm-generic: Use __BITS_PER_LONG in statfs.h
+
+From: "H. Peter Anvin" <hpa@linux.intel.com>
+
+commit f5c2347ee20a8d6964d6a6b1ad04f200f8d4dfa7 upstream.
+
+<asm-generic/statfs.h> is exported to userspace, so using
+BITS_PER_LONG is invalid. We need to use __BITS_PER_LONG instead.
+
+This is kernel bugzilla 43165.
+
+Reported-by: H.J. Lu <hjl.tools@gmail.com>
+Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
+Link: http://lkml.kernel.org/r/1335465916-16965-1-git-send-email-hpa@linux.intel.com
+Acked-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/asm-generic/statfs.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/include/asm-generic/statfs.h
++++ b/include/asm-generic/statfs.h
+@@ -15,7 +15,7 @@ typedef __kernel_fsid_t fsid_t;
+ * with a 10' pole.
+ */
+ #ifndef __statfs_word
+-#if BITS_PER_LONG == 64
++#if __BITS_PER_LONG == 64
+ #define __statfs_word long
+ #else
+ #define __statfs_word __u32
--- /dev/null
+From 2f624278626677bfaf73fef97f86b37981621f5c Mon Sep 17 00:00:00 2001
+From: Linus Torvalds <torvalds@linux-foundation.org>
+Date: Fri, 4 May 2012 14:46:02 -0700
+Subject: Fix __read_seqcount_begin() to use ACCESS_ONCE for sequence value read
+
+From: Linus Torvalds <torvalds@linux-foundation.org>
+
+commit 2f624278626677bfaf73fef97f86b37981621f5c upstream.
+
+We really need to use a ACCESS_ONCE() on the sequence value read in
+__read_seqcount_begin(), because otherwise the compiler might end up
+reloading the value in between the test and the return of it. As a
+result, it might end up returning an odd value (which means that a write
+is in progress).
+
+If the reader is then fast enough that that odd value is still the
+current one when the read_seqcount_retry() is done, we might end up with
+a "successful" read sequence, even despite the concurrent write being
+active.
+
+In practice this probably never really happens - there just isn't
+anything else going on around the read of the sequence count, and the
+common case is that we end up having a read barrier immediately
+afterwards.
+
+So the code sequence in which gcc might decide to reaload from memory is
+small, and there's no reason to believe it would ever actually do the
+reload. But if the compiler ever were to decide to do so, it would be
+incredibly annoying to debug. Let's just make sure.
+
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/seqlock.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/include/linux/seqlock.h
++++ b/include/linux/seqlock.h
+@@ -141,7 +141,7 @@ static inline unsigned __read_seqcount_b
+ unsigned ret;
+
+ repeat:
+- ret = s->sequence;
++ ret = ACCESS_ONCE(s->sequence);
+ if (unlikely(ret & 1)) {
+ cpu_relax();
+ goto repeat;
--- /dev/null
+From d5e28005a1d2e67833852f4c9ea8ec206ea3ff85 Mon Sep 17 00:00:00 2001
+From: Tejun Heo <tj@kernel.org>
+Date: Fri, 27 Apr 2012 10:54:35 -0700
+Subject: percpu, x86: don't use PMD_SIZE as embedded atom_size on 32bit
+
+From: Tejun Heo <tj@kernel.org>
+
+commit d5e28005a1d2e67833852f4c9ea8ec206ea3ff85 upstream.
+
+With the embed percpu first chunk allocator, x86 uses either PAGE_SIZE
+or PMD_SIZE for atom_size. PMD_SIZE is used when CPU supports PSE so
+that percpu areas are aligned to PMD mappings and possibly allow using
+PMD mappings in vmalloc areas in the future. Using larger atom_size
+doesn't waste actual memory; however, it does require larger vmalloc
+space allocation later on for !first chunks.
+
+With reasonably sized vmalloc area, PMD_SIZE shouldn't be a problem
+but x86_32 at this point is anything but reasonable in terms of
+address space and using larger atom_size reportedly leads to frequent
+percpu allocation failures on certain setups.
+
+As there is no reason to not use PMD_SIZE on x86_64 as vmalloc space
+is aplenty and most x86_64 configurations support PSE, fix the issue
+by always using PMD_SIZE on x86_64 and PAGE_SIZE on x86_32.
+
+v2: drop cpu_has_pse test and make x86_64 always use PMD_SIZE and
+ x86_32 PAGE_SIZE as suggested by hpa.
+
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Reported-by: Yanmin Zhang <yanmin.zhang@intel.com>
+Reported-by: ShuoX Liu <shuox.liu@intel.com>
+Acked-by: H. Peter Anvin <hpa@zytor.com>
+LKML-Reference: <4F97BA98.6010001@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/setup_percpu.c | 14 +++++++++++++-
+ 1 file changed, 13 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/setup_percpu.c
++++ b/arch/x86/kernel/setup_percpu.c
+@@ -185,10 +185,22 @@ void __init setup_per_cpu_areas(void)
+ #endif
+ rc = -EINVAL;
+ if (pcpu_chosen_fc != PCPU_FC_PAGE) {
+- const size_t atom_size = cpu_has_pse ? PMD_SIZE : PAGE_SIZE;
+ const size_t dyn_size = PERCPU_MODULE_RESERVE +
+ PERCPU_DYNAMIC_RESERVE - PERCPU_FIRST_CHUNK_RESERVE;
++ size_t atom_size;
+
++ /*
++ * On 64bit, use PMD_SIZE for atom_size so that embedded
++ * percpu areas are aligned to PMD. This, in the future,
++ * can also allow using PMD mappings in vmalloc area. Use
++ * PAGE_SIZE on 32bit as vmalloc space is highly contended
++ * and large vmalloc area allocs can easily fail.
++ */
++#ifdef CONFIG_X86_64
++ atom_size = PMD_SIZE;
++#else
++ atom_size = PAGE_SIZE;
++#endif
+ rc = pcpu_embed_first_chunk(PERCPU_FIRST_CHUNK_RESERVE,
+ dyn_size, atom_size,
+ pcpu_cpu_distance,
smsc95xx-mark-link-down-on-startup-and-let-phy-interrupt.patch
xen-pte-fix-crashes-when-trying-to-see-non-existent-pgd-pmd-pud-ptes.patch
xen-pci-don-t-use-pci-bios-service-for-configuration-space-accesses.patch
+x86-relocs-remove-an-unused-variable.patch
+percpu-x86-don-t-use-pmd_size-as-embedded-atom_size-on-32bit.patch
+asm-generic-use-__bits_per_long-in-statfs.h.patch
+fix-__read_seqcount_begin-to-use-access_once-for-sequence-value-read.patch
+arm-7410-1-add-extra-clobber-registers-for-assembly-in-kernel_execve.patch
+arm-7414-1-smp-prevent-use-of-the-console-when-using-idmap_pgd.patch
--- /dev/null
+From 7c77cda0fe742ed07622827ce80963bbeebd1e3f Mon Sep 17 00:00:00 2001
+From: Kusanagi Kouichi <slash@ac.auone-net.jp>
+Date: Sun, 1 Apr 2012 17:29:32 +0900
+Subject: x86, relocs: Remove an unused variable
+
+From: Kusanagi Kouichi <slash@ac.auone-net.jp>
+
+commit 7c77cda0fe742ed07622827ce80963bbeebd1e3f upstream.
+
+sh_symtab is set but not used.
+
+[ hpa: putting this in urgent because of the sheer harmlessness of the patch:
+ it quiets a build warning but does not change any generated code. ]
+
+Signed-off-by: Kusanagi Kouichi <slash@ac.auone-net.jp>
+Link: http://lkml.kernel.org/r/20120401082932.D5E066FC03D@msa105.auone-net.jp
+Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/boot/compressed/relocs.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+--- a/arch/x86/boot/compressed/relocs.c
++++ b/arch/x86/boot/compressed/relocs.c
+@@ -402,13 +402,11 @@ static void print_absolute_symbols(void)
+ for (i = 0; i < ehdr.e_shnum; i++) {
+ struct section *sec = &secs[i];
+ char *sym_strtab;
+- Elf32_Sym *sh_symtab;
+ int j;
+
+ if (sec->shdr.sh_type != SHT_SYMTAB) {
+ continue;
+ }
+- sh_symtab = sec->symtab;
+ sym_strtab = sec->link->strtab;
+ for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Sym); j++) {
+ Elf32_Sym *sym;