--- /dev/null
+From bf3a1eb85967dcbaae42f4fcb53c2392cec32677 Mon Sep 17 00:00:00 2001
+From: Florian Fainelli <florian@openwrt.org>
+Date: Sun, 27 Feb 2011 19:53:53 +0100
+Subject: MIPS: MTX-1: Make au1000_eth probe all PHY addresses
+
+From: Florian Fainelli <florian@openwrt.org>
+
+commit bf3a1eb85967dcbaae42f4fcb53c2392cec32677 upstream.
+
+When au1000_eth probes the MII bus for PHY address, if we do not set
+au1000_eth platform data's phy_search_highest_address, the MII probing
+logic will exit early and will assume a valid PHY is found at address 0.
+For MTX-1, the PHY is at address 31, and without this patch, the link
+detection/speed/duplex would not work correctly.
+
+Signed-off-by: Florian Fainelli <florian@openwrt.org>
+To: linux-mips@linux-mips.org
+Patchwork: https://patchwork.linux-mips.org/patch/2111/
+Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/mips/alchemy/mtx-1/platform.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+--- a/arch/mips/alchemy/mtx-1/platform.c
++++ b/arch/mips/alchemy/mtx-1/platform.c
+@@ -28,6 +28,8 @@
+ #include <linux/mtd/physmap.h>
+ #include <mtd/mtd-abi.h>
+
++#include <asm/mach-au1x00/au1xxx_eth.h>
++
+ static struct gpio_keys_button mtx1_gpio_button[] = {
+ {
+ .gpio = 207,
+@@ -140,10 +142,17 @@ static struct __initdata platform_device
+ &mtx1_mtd,
+ };
+
++static struct au1000_eth_platform_data mtx1_au1000_eth0_pdata = {
++ .phy_search_highest_addr = 1,
++ .phy1_search_mac0 = 1,
++};
++
+ static int __init mtx1_register_devices(void)
+ {
+ int rc;
+
++ au1xxx_override_eth_cfg(0, &mtx1_au1000_eth0_pdata);
++
+ rc = gpio_request(mtx1_gpio_button[0].gpio,
+ mtx1_gpio_button[0].desc);
+ if (rc < 0) {
--- /dev/null
+From a7bd1dafdcc13ec7add4aafc927eb5e3a8d597e6 Mon Sep 17 00:00:00 2001
+From: Naga Chumbalkar <nagananda.chumbalkar@hp.com>
+Date: Fri, 25 Feb 2011 20:31:55 +0000
+Subject: x86: Don't check for BIOS corruption in first 64K when there's no need to
+
+From: Naga Chumbalkar <nagananda.chumbalkar@hp.com>
+
+commit a7bd1dafdcc13ec7add4aafc927eb5e3a8d597e6 upstream.
+
+Due to commit 781c5a67f152c17c3e4a9ed9647f8c0be6ea5ae9 it is
+likely that the number of areas to scan for BIOS corruption is 0
+ -- especially when the first 64K is already reserved
+(X86_RESERVE_LOW is 64K by default).
+
+If that's the case then don't set up the scan.
+
+Signed-off-by: Naga Chumbalkar <nagananda.chumbalkar@hp.com>
+LKML-Reference: <20110225202838.2229.71011.sendpatchset@nchumbalkar.americas.hpqcorp.net>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/kernel/check.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/arch/x86/kernel/check.c
++++ b/arch/x86/kernel/check.c
+@@ -106,8 +106,8 @@ void __init setup_bios_corruption_check(
+ addr += size;
+ }
+
+- printk(KERN_INFO "Scanning %d areas for low memory corruption\n",
+- num_scan_areas);
++ if (num_scan_areas)
++ printk(KERN_INFO "Scanning %d areas for low memory corruption\n", num_scan_areas);
+ }
+
+
+@@ -143,12 +143,12 @@ static void check_corruption(struct work
+ {
+ check_for_bios_corruption();
+ schedule_delayed_work(&bios_check_work,
+- round_jiffies_relative(corruption_check_period*HZ));
++ round_jiffies_relative(corruption_check_period*HZ));
+ }
+
+ static int start_periodic_check_for_corruption(void)
+ {
+- if (!memory_corruption_check || corruption_check_period == 0)
++ if (!num_scan_areas || !memory_corruption_check || corruption_check_period == 0)
+ return 0;
+
+ printk(KERN_INFO "Scanning for low memory corruption every %d seconds\n",
--- /dev/null
+From a79e53d85683c6dd9f99c90511028adc2043031f Mon Sep 17 00:00:00 2001
+From: Andrea Arcangeli <aarcange@redhat.com>
+Date: Wed, 16 Feb 2011 15:45:22 -0800
+Subject: x86/mm: Fix pgd_lock deadlock
+
+From: Andrea Arcangeli <aarcange@redhat.com>
+
+commit a79e53d85683c6dd9f99c90511028adc2043031f upstream.
+
+It's forbidden to take the page_table_lock with the irq disabled
+or if there's contention the IPIs (for tlb flushes) sent with
+the page_table_lock held will never run leading to a deadlock.
+
+Nobody takes the pgd_lock from irq context so the _irqsave can be
+removed.
+
+Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
+Acked-by: Rik van Riel <riel@redhat.com>
+Tested-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+LKML-Reference: <201102162345.p1GNjMjm021738@imap1.linux-foundation.org>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/mm/fault.c | 7 +++----
+ arch/x86/mm/init_64.c | 6 +++---
+ arch/x86/mm/pageattr.c | 18 ++++++++----------
+ arch/x86/mm/pgtable.c | 11 ++++-------
+ arch/x86/xen/mmu.c | 10 ++++------
+ 5 files changed, 22 insertions(+), 30 deletions(-)
+
+--- a/arch/x86/mm/fault.c
++++ b/arch/x86/mm/fault.c
+@@ -229,15 +229,14 @@ void vmalloc_sync_all(void)
+ for (address = VMALLOC_START & PMD_MASK;
+ address >= TASK_SIZE && address < FIXADDR_TOP;
+ address += PMD_SIZE) {
+-
+- unsigned long flags;
+ struct page *page;
+
+- spin_lock_irqsave(&pgd_lock, flags);
++ spin_lock(&pgd_lock);
+ list_for_each_entry(page, &pgd_list, lru) {
+ spinlock_t *pgt_lock;
+ pmd_t *ret;
+
++ /* the pgt_lock only for Xen */
+ pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
+
+ spin_lock(pgt_lock);
+@@ -247,7 +246,7 @@ void vmalloc_sync_all(void)
+ if (!ret)
+ break;
+ }
+- spin_unlock_irqrestore(&pgd_lock, flags);
++ spin_unlock(&pgd_lock);
+ }
+ }
+
+--- a/arch/x86/mm/init_64.c
++++ b/arch/x86/mm/init_64.c
+@@ -105,18 +105,18 @@ void sync_global_pgds(unsigned long star
+
+ for (address = start; address <= end; address += PGDIR_SIZE) {
+ const pgd_t *pgd_ref = pgd_offset_k(address);
+- unsigned long flags;
+ struct page *page;
+
+ if (pgd_none(*pgd_ref))
+ continue;
+
+- spin_lock_irqsave(&pgd_lock, flags);
++ spin_lock(&pgd_lock);
+ list_for_each_entry(page, &pgd_list, lru) {
+ pgd_t *pgd;
+ spinlock_t *pgt_lock;
+
+ pgd = (pgd_t *)page_address(page) + pgd_index(address);
++ /* the pgt_lock only for Xen */
+ pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
+ spin_lock(pgt_lock);
+
+@@ -128,7 +128,7 @@ void sync_global_pgds(unsigned long star
+
+ spin_unlock(pgt_lock);
+ }
+- spin_unlock_irqrestore(&pgd_lock, flags);
++ spin_unlock(&pgd_lock);
+ }
+ }
+
+--- a/arch/x86/mm/pageattr.c
++++ b/arch/x86/mm/pageattr.c
+@@ -56,12 +56,10 @@ static unsigned long direct_pages_count[
+
+ void update_page_count(int level, unsigned long pages)
+ {
+- unsigned long flags;
+-
+ /* Protect against CPA */
+- spin_lock_irqsave(&pgd_lock, flags);
++ spin_lock(&pgd_lock);
+ direct_pages_count[level] += pages;
+- spin_unlock_irqrestore(&pgd_lock, flags);
++ spin_unlock(&pgd_lock);
+ }
+
+ static void split_page_count(int level)
+@@ -391,7 +389,7 @@ static int
+ try_preserve_large_page(pte_t *kpte, unsigned long address,
+ struct cpa_data *cpa)
+ {
+- unsigned long nextpage_addr, numpages, pmask, psize, flags, addr, pfn;
++ unsigned long nextpage_addr, numpages, pmask, psize, addr, pfn;
+ pte_t new_pte, old_pte, *tmp;
+ pgprot_t old_prot, new_prot;
+ int i, do_split = 1;
+@@ -400,7 +398,7 @@ try_preserve_large_page(pte_t *kpte, uns
+ if (cpa->force_split)
+ return 1;
+
+- spin_lock_irqsave(&pgd_lock, flags);
++ spin_lock(&pgd_lock);
+ /*
+ * Check for races, another CPU might have split this page
+ * up already:
+@@ -495,14 +493,14 @@ try_preserve_large_page(pte_t *kpte, uns
+ }
+
+ out_unlock:
+- spin_unlock_irqrestore(&pgd_lock, flags);
++ spin_unlock(&pgd_lock);
+
+ return do_split;
+ }
+
+ static int split_large_page(pte_t *kpte, unsigned long address)
+ {
+- unsigned long flags, pfn, pfninc = 1;
++ unsigned long pfn, pfninc = 1;
+ unsigned int i, level;
+ pte_t *pbase, *tmp;
+ pgprot_t ref_prot;
+@@ -516,7 +514,7 @@ static int split_large_page(pte_t *kpte,
+ if (!base)
+ return -ENOMEM;
+
+- spin_lock_irqsave(&pgd_lock, flags);
++ spin_lock(&pgd_lock);
+ /*
+ * Check for races, another CPU might have split this page
+ * up for us already:
+@@ -588,7 +586,7 @@ out_unlock:
+ */
+ if (base)
+ __free_page(base);
+- spin_unlock_irqrestore(&pgd_lock, flags);
++ spin_unlock(&pgd_lock);
+
+ return 0;
+ }
+--- a/arch/x86/mm/pgtable.c
++++ b/arch/x86/mm/pgtable.c
+@@ -121,14 +121,12 @@ static void pgd_ctor(struct mm_struct *m
+
+ static void pgd_dtor(pgd_t *pgd)
+ {
+- unsigned long flags; /* can be called from interrupt context */
+-
+ if (SHARED_KERNEL_PMD)
+ return;
+
+- spin_lock_irqsave(&pgd_lock, flags);
++ spin_lock(&pgd_lock);
+ pgd_list_del(pgd);
+- spin_unlock_irqrestore(&pgd_lock, flags);
++ spin_unlock(&pgd_lock);
+ }
+
+ /*
+@@ -260,7 +258,6 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
+ {
+ pgd_t *pgd;
+ pmd_t *pmds[PREALLOCATED_PMDS];
+- unsigned long flags;
+
+ pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
+
+@@ -280,12 +277,12 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
+ * respect to anything walking the pgd_list, so that they
+ * never see a partially populated pgd.
+ */
+- spin_lock_irqsave(&pgd_lock, flags);
++ spin_lock(&pgd_lock);
+
+ pgd_ctor(mm, pgd);
+ pgd_prepopulate_pmd(mm, pgd, pmds);
+
+- spin_unlock_irqrestore(&pgd_lock, flags);
++ spin_unlock(&pgd_lock);
+
+ return pgd;
+
+--- a/arch/x86/xen/mmu.c
++++ b/arch/x86/xen/mmu.c
+@@ -1362,10 +1362,9 @@ static void xen_pgd_pin(struct mm_struct
+ */
+ void xen_mm_pin_all(void)
+ {
+- unsigned long flags;
+ struct page *page;
+
+- spin_lock_irqsave(&pgd_lock, flags);
++ spin_lock(&pgd_lock);
+
+ list_for_each_entry(page, &pgd_list, lru) {
+ if (!PagePinned(page)) {
+@@ -1374,7 +1373,7 @@ void xen_mm_pin_all(void)
+ }
+ }
+
+- spin_unlock_irqrestore(&pgd_lock, flags);
++ spin_unlock(&pgd_lock);
+ }
+
+ /*
+@@ -1475,10 +1474,9 @@ static void xen_pgd_unpin(struct mm_stru
+ */
+ void xen_mm_unpin_all(void)
+ {
+- unsigned long flags;
+ struct page *page;
+
+- spin_lock_irqsave(&pgd_lock, flags);
++ spin_lock(&pgd_lock);
+
+ list_for_each_entry(page, &pgd_list, lru) {
+ if (PageSavePinned(page)) {
+@@ -1488,7 +1486,7 @@ void xen_mm_unpin_all(void)
+ }
+ }
+
+- spin_unlock_irqrestore(&pgd_lock, flags);
++ spin_unlock(&pgd_lock);
+ }
+
+ void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
--- /dev/null
+From f86268549f424f83b9eb0963989270e14fbfc3de Mon Sep 17 00:00:00 2001
+From: Andrey Vagin <avagin@openvz.org>
+Date: Wed, 9 Mar 2011 15:22:23 -0800
+Subject: x86/mm: Handle mm_fault_error() in kernel space
+
+From: Andrey Vagin <avagin@openvz.org>
+
+commit f86268549f424f83b9eb0963989270e14fbfc3de upstream.
+
+mm_fault_error() should not execute oom-killer, if page fault
+occurs in kernel space. E.g. in copy_from_user()/copy_to_user().
+
+This would happen if we find ourselves in OOM on a
+copy_to_user(), or a copy_from_user() which faults.
+
+Without this patch, the kernels hangs up in copy_from_user(),
+because OOM killer sends SIG_KILL to current process, but it
+can't handle a signal while in syscall, then the kernel returns
+to copy_from_user(), reexcute current command and provokes
+page_fault again.
+
+With this patch the kernel return -EFAULT from copy_from_user().
+
+The code, which checks that page fault occurred in kernel space,
+has been copied from do_sigbus().
+
+This situation is handled by the same way on powerpc, xtensa,
+tile, ...
+
+Signed-off-by: Andrey Vagin <avagin@openvz.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+LKML-Reference: <201103092322.p29NMNPH001682@imap1.linux-foundation.org>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/mm/fault.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/arch/x86/mm/fault.c
++++ b/arch/x86/mm/fault.c
+@@ -827,6 +827,13 @@ mm_fault_error(struct pt_regs *regs, uns
+ unsigned long address, unsigned int fault)
+ {
+ if (fault & VM_FAULT_OOM) {
++ /* Kernel mode? Handle exceptions or die: */
++ if (!(error_code & PF_USER)) {
++ up_read(¤t->mm->mmap_sem);
++ no_context(regs, error_code, address);
++ return;
++ }
++
+ out_of_memory(regs, error_code, address);
+ } else {
+ if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|