--- /dev/null
+From e2dbe12557d85d81f4527879499f55681c3cca4f Mon Sep 17 00:00:00 2001
+From: Amerigo Wang <amwang@redhat.com>
+Date: Wed, 1 Jul 2009 01:06:26 -0400
+Subject: elf: fix one check-after-use
+
+From: Amerigo Wang <amwang@redhat.com>
+
+commit e2dbe12557d85d81f4527879499f55681c3cca4f upstream.
+
+Check before use it.
+
+Signed-off-by: WANG Cong <amwang@redhat.com>
+Cc: Alexander Viro <viro@zeniv.linux.org.uk>
+Cc: David Howells <dhowells@redhat.com>
+Acked-by: Roland McGrath <roland@redhat.com>
+Acked-by: James Morris <jmorris@namei.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/binfmt_elf.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/fs/binfmt_elf.c
++++ b/fs/binfmt_elf.c
+@@ -1518,11 +1518,11 @@ static int fill_note_info(struct elfhdr
+ info->thread = NULL;
+
+ psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL);
+- fill_note(&info->psinfo, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
+-
+ if (psinfo == NULL)
+ return 0;
+
++ fill_note(&info->psinfo, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
++
+ /*
+ * Figure out how many notes we're going to need for each thread.
+ */
--- /dev/null
+From 025dc740d01f99ccba945df1f9ef9e06b1c15d96 Mon Sep 17 00:00:00 2001
+From: Jiri Slaby <jirislaby@gmail.com>
+Date: Sat, 11 Jul 2009 13:42:37 +0200
+Subject: hwmon: (max6650) Fix lock imbalance
+
+From: Jiri Slaby <jirislaby@gmail.com>
+
+commit 025dc740d01f99ccba945df1f9ef9e06b1c15d96 upstream.
+
+Add omitted update_lock to one switch/case in set_div.
+
+Signed-off-by: Jiri Slaby <jirislaby@gmail.com>
+Acked-by: Hans J. Koch <hjk@linutronix.de>
+Signed-off-by: Jean Delvare <khali@linux-fr.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/hwmon/max6650.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/hwmon/max6650.c
++++ b/drivers/hwmon/max6650.c
+@@ -407,6 +407,7 @@ static ssize_t set_div(struct device *de
+ data->count = 3;
+ break;
+ default:
++ mutex_unlock(&data->update_lock);
+ dev_err(&client->dev,
+ "illegal value for fan divider (%d)\n", div);
+ return -EINVAL;
--- /dev/null
+From c8236db9cd7aa492dcfcdcca702638e704abed49 Mon Sep 17 00:00:00 2001
+From: Josef Bacik <josef@redhat.com>
+Date: Sun, 5 Jul 2009 12:08:18 -0700
+Subject: mm: mark page accessed before we write_end()
+
+From: Josef Bacik <josef@redhat.com>
+
+commit c8236db9cd7aa492dcfcdcca702638e704abed49 upstream.
+
+In testing a backport of the write_begin/write_end AOPs, a 10% re-read
+regression was noticed when running iozone. This regression was
+introduced because the old AOPs would always do a mark_page_accessed(page)
+after the commit_write, but when the new AOPs where introduced, the only
+place this was kept was in pagecache_write_end().
+
+This patch does the same thing in the generic case as what is done in
+pagecache_write_end(), which is just to mark the page accessed before we
+do write_end().
+
+Signed-off-by: Josef Bacik <jbacik@redhat.com>
+Acked-by: Nick Piggin <npiggin@suse.de>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ mm/filemap.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -2249,6 +2249,7 @@ again:
+ pagefault_enable();
+ flush_dcache_page(page);
+
++ mark_page_accessed(page);
+ status = a_ops->write_end(file, mapping, pos, bytes, copied,
+ page, fsdata);
+ if (unlikely(status < 0))
--- /dev/null
+From e82a3b75127188f20c7780bec580e148beb29da7 Mon Sep 17 00:00:00 2001
+From: Helge Deller <deller@gmx.de>
+Date: Tue, 16 Jun 2009 20:51:48 +0000
+Subject: parisc: ensure broadcast tlb purge runs single threaded
+
+From: Helge Deller <deller@gmx.de>
+
+commit e82a3b75127188f20c7780bec580e148beb29da7 upstream.
+
+The TLB flushing functions on hppa, which causes PxTLB broadcasts on the system
+bus, needs to be protected by irq-safe spinlocks to avoid irq handlers to deadlock
+the kernel. The deadlocks only happened during I/O intensive loads and triggered
+pretty seldom, which is why this bug went so long unnoticed.
+
+Signed-off-by: Helge Deller <deller@gmx.de>
+[edited to use spin_lock_irqsave on UP as well since we'd been locking there
+ all this time anyway, --kyle]
+Signed-off-by: Kyle McMartin <kyle@mcmartin.ca>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/parisc/include/asm/tlbflush.h | 14 +++++++-------
+ arch/parisc/kernel/cache.c | 23 +++++++++++++++--------
+ arch/parisc/kernel/pci-dma.c | 12 ++++++++----
+ 3 files changed, 30 insertions(+), 19 deletions(-)
+
+--- a/arch/parisc/include/asm/tlbflush.h
++++ b/arch/parisc/include/asm/tlbflush.h
+@@ -12,14 +12,12 @@
+ * N class systems, only one PxTLB inter processor broadcast can be
+ * active at any one time on the Merced bus. This tlb purge
+ * synchronisation is fairly lightweight and harmless so we activate
+- * it on all SMP systems not just the N class. We also need to have
+- * preemption disabled on uniprocessor machines, and spin_lock does that
+- * nicely.
++ * it on all systems not just the N class.
+ */
+ extern spinlock_t pa_tlb_lock;
+
+-#define purge_tlb_start(x) spin_lock(&pa_tlb_lock)
+-#define purge_tlb_end(x) spin_unlock(&pa_tlb_lock)
++#define purge_tlb_start(flags) spin_lock_irqsave(&pa_tlb_lock, flags)
++#define purge_tlb_end(flags) spin_unlock_irqrestore(&pa_tlb_lock, flags)
+
+ extern void flush_tlb_all(void);
+ extern void flush_tlb_all_local(void *);
+@@ -63,14 +61,16 @@ static inline void flush_tlb_mm(struct m
+ static inline void flush_tlb_page(struct vm_area_struct *vma,
+ unsigned long addr)
+ {
++ unsigned long flags;
++
+ /* For one page, it's not worth testing the split_tlb variable */
+
+ mb();
+ mtsp(vma->vm_mm->context,1);
+- purge_tlb_start();
++ purge_tlb_start(flags);
+ pdtlb(addr);
+ pitlb(addr);
+- purge_tlb_end();
++ purge_tlb_end(flags);
+ }
+
+ void __flush_tlb_range(unsigned long sid,
+--- a/arch/parisc/kernel/cache.c
++++ b/arch/parisc/kernel/cache.c
+@@ -398,12 +398,13 @@ EXPORT_SYMBOL(flush_kernel_icache_range_
+
+ void clear_user_page_asm(void *page, unsigned long vaddr)
+ {
++ unsigned long flags;
+ /* This function is implemented in assembly in pacache.S */
+ extern void __clear_user_page_asm(void *page, unsigned long vaddr);
+
+- purge_tlb_start();
++ purge_tlb_start(flags);
+ __clear_user_page_asm(page, vaddr);
+- purge_tlb_end();
++ purge_tlb_end(flags);
+ }
+
+ #define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
+@@ -444,20 +445,24 @@ extern void clear_user_page_asm(void *pa
+
+ void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
+ {
++ unsigned long flags;
++
+ purge_kernel_dcache_page((unsigned long)page);
+- purge_tlb_start();
++ purge_tlb_start(flags);
+ pdtlb_kernel(page);
+- purge_tlb_end();
++ purge_tlb_end(flags);
+ clear_user_page_asm(page, vaddr);
+ }
+ EXPORT_SYMBOL(clear_user_page);
+
+ void flush_kernel_dcache_page_addr(void *addr)
+ {
++ unsigned long flags;
++
+ flush_kernel_dcache_page_asm(addr);
+- purge_tlb_start();
++ purge_tlb_start(flags);
+ pdtlb_kernel(addr);
+- purge_tlb_end();
++ purge_tlb_end(flags);
+ }
+ EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
+
+@@ -490,8 +495,10 @@ void __flush_tlb_range(unsigned long sid
+ if (npages >= 512) /* 2MB of space: arbitrary, should be tuned */
+ flush_tlb_all();
+ else {
++ unsigned long flags;
++
+ mtsp(sid, 1);
+- purge_tlb_start();
++ purge_tlb_start(flags);
+ if (split_tlb) {
+ while (npages--) {
+ pdtlb(start);
+@@ -504,7 +511,7 @@ void __flush_tlb_range(unsigned long sid
+ start += PAGE_SIZE;
+ }
+ }
+- purge_tlb_end();
++ purge_tlb_end(flags);
+ }
+ }
+
+--- a/arch/parisc/kernel/pci-dma.c
++++ b/arch/parisc/kernel/pci-dma.c
+@@ -90,12 +90,14 @@ static inline int map_pte_uncached(pte_t
+ if (end > PMD_SIZE)
+ end = PMD_SIZE;
+ do {
++ unsigned long flags;
++
+ if (!pte_none(*pte))
+ printk(KERN_ERR "map_pte_uncached: page already exists\n");
+ set_pte(pte, __mk_pte(*paddr_ptr, PAGE_KERNEL_UNC));
+- purge_tlb_start();
++ purge_tlb_start(flags);
+ pdtlb_kernel(orig_vaddr);
+- purge_tlb_end();
++ purge_tlb_end(flags);
+ vaddr += PAGE_SIZE;
+ orig_vaddr += PAGE_SIZE;
+ (*paddr_ptr) += PAGE_SIZE;
+@@ -168,11 +170,13 @@ static inline void unmap_uncached_pte(pm
+ if (end > PMD_SIZE)
+ end = PMD_SIZE;
+ do {
++ unsigned long flags;
+ pte_t page = *pte;
++
+ pte_clear(&init_mm, vaddr, pte);
+- purge_tlb_start();
++ purge_tlb_start(flags);
+ pdtlb_kernel(orig_vaddr);
+- purge_tlb_end();
++ purge_tlb_end(flags);
+ vaddr += PAGE_SIZE;
+ orig_vaddr += PAGE_SIZE;
+ pte++;
--- /dev/null
+From 7d17e2763129ea307702fcdc91f6e9d114b65c2d Mon Sep 17 00:00:00 2001
+From: Helge Deller <deller@gmx.de>
+Date: Thu, 30 Apr 2009 21:39:45 +0000
+Subject: parisc: fix ldcw inline assembler
+
+From: Helge Deller <deller@gmx.de>
+
+commit 7d17e2763129ea307702fcdc91f6e9d114b65c2d upstream.
+
+There are two reasons to expose the memory *a in the asm:
+
+1) To prevent the compiler from discarding a preceeding write to *a, and
+2) to prevent it from caching *a in a register over the asm.
+
+The change has had a few days testing with a SMP build of 2.6.22.19
+running on a rp3440.
+
+This patch is about the correctness of the __ldcw() macro itself.
+The use of the macro should be confined to small inline functions
+to try to limit the effect of clobbering memory on GCC's optimization
+of loads and stores.
+
+Signed-off-by: Dave Anglin <dave.anglin@nrc-cnrc.gc.ca>
+Signed-off-by: Helge Deller <deller@gmx.de>
+Signed-off-by: Kyle McMartin <kyle@mcmartin.ca>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/parisc/include/asm/system.h | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/parisc/include/asm/system.h
++++ b/arch/parisc/include/asm/system.h
+@@ -168,8 +168,8 @@ static inline void set_eiem(unsigned lon
+ /* LDCW, the only atomic read-write operation PA-RISC has. *sigh*. */
+ #define __ldcw(a) ({ \
+ unsigned __ret; \
+- __asm__ __volatile__(__LDCW " 0(%1),%0" \
+- : "=r" (__ret) : "r" (a)); \
++ __asm__ __volatile__(__LDCW " 0(%2),%0" \
++ : "=r" (__ret), "+m" (*(a)) : "r" (a)); \
+ __ret; \
+ })
+
--- /dev/null
+From 5a2642f620eb6e40792822fa0eafe23046fbb55e Mon Sep 17 00:00:00 2001
+From: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Date: Mon, 22 Jun 2009 16:47:59 +0000
+Subject: powerpc/mpic: Fix mapping of "DCR" based MPIC variants
+
+From: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+
+commit 5a2642f620eb6e40792822fa0eafe23046fbb55e upstream.
+
+Commit 31207dab7d2e63795eb15823947bd2f7025b08e2
+"Fix incorrect allocation of interrupt rev-map"
+introduced a regression crashing on boot on machines using
+a "DCR" based MPIC, such as the Cell blades.
+
+The reason is that the irq host data structure is initialized
+much later as a result of that patch, causing our calls to
+mpic_map() do be done before we have a host setup.
+
+Unfortunately, this breaks _mpic_map_dcr() which uses the
+mpic->irqhost to get to the device node.
+
+This fixes it by, instead, passing the device node explicitely
+to mpic_map().
+
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Acked-by: Akira Tsukamoto <akirat@rd.scei.sony.co.jp>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/powerpc/sysdev/mpic.c | 29 ++++++++++++++++-------------
+ 1 file changed, 16 insertions(+), 13 deletions(-)
+
+--- a/arch/powerpc/sysdev/mpic.c
++++ b/arch/powerpc/sysdev/mpic.c
+@@ -279,28 +279,29 @@ static void _mpic_map_mmio(struct mpic *
+ }
+
+ #ifdef CONFIG_PPC_DCR
+-static void _mpic_map_dcr(struct mpic *mpic, struct mpic_reg_bank *rb,
++static void _mpic_map_dcr(struct mpic *mpic, struct device_node *node,
++ struct mpic_reg_bank *rb,
+ unsigned int offset, unsigned int size)
+ {
+ const u32 *dbasep;
+
+- dbasep = of_get_property(mpic->irqhost->of_node, "dcr-reg", NULL);
++ dbasep = of_get_property(node, "dcr-reg", NULL);
+
+- rb->dhost = dcr_map(mpic->irqhost->of_node, *dbasep + offset, size);
++ rb->dhost = dcr_map(node, *dbasep + offset, size);
+ BUG_ON(!DCR_MAP_OK(rb->dhost));
+ }
+
+-static inline void mpic_map(struct mpic *mpic, phys_addr_t phys_addr,
+- struct mpic_reg_bank *rb, unsigned int offset,
+- unsigned int size)
++static inline void mpic_map(struct mpic *mpic, struct device_node *node,
++ phys_addr_t phys_addr, struct mpic_reg_bank *rb,
++ unsigned int offset, unsigned int size)
+ {
+ if (mpic->flags & MPIC_USES_DCR)
+- _mpic_map_dcr(mpic, rb, offset, size);
++ _mpic_map_dcr(mpic, node, rb, offset, size);
+ else
+ _mpic_map_mmio(mpic, phys_addr, rb, offset, size);
+ }
+ #else /* CONFIG_PPC_DCR */
+-#define mpic_map(m,p,b,o,s) _mpic_map_mmio(m,p,b,o,s)
++#define mpic_map(m,n,p,b,o,s) _mpic_map_mmio(m,p,b,o,s)
+ #endif /* !CONFIG_PPC_DCR */
+
+
+@@ -1150,8 +1151,8 @@ struct mpic * __init mpic_alloc(struct d
+ }
+
+ /* Map the global registers */
+- mpic_map(mpic, paddr, &mpic->gregs, MPIC_INFO(GREG_BASE), 0x1000);
+- mpic_map(mpic, paddr, &mpic->tmregs, MPIC_INFO(TIMER_BASE), 0x1000);
++ mpic_map(mpic, node, paddr, &mpic->gregs, MPIC_INFO(GREG_BASE), 0x1000);
++ mpic_map(mpic, node, paddr, &mpic->tmregs, MPIC_INFO(TIMER_BASE), 0x1000);
+
+ /* Reset */
+ if (flags & MPIC_WANTS_RESET) {
+@@ -1192,7 +1193,7 @@ struct mpic * __init mpic_alloc(struct d
+
+ /* Map the per-CPU registers */
+ for (i = 0; i < mpic->num_cpus; i++) {
+- mpic_map(mpic, paddr, &mpic->cpuregs[i],
++ mpic_map(mpic, node, paddr, &mpic->cpuregs[i],
+ MPIC_INFO(CPU_BASE) + i * MPIC_INFO(CPU_STRIDE),
+ 0x1000);
+ }
+@@ -1200,7 +1201,7 @@ struct mpic * __init mpic_alloc(struct d
+ /* Initialize main ISU if none provided */
+ if (mpic->isu_size == 0) {
+ mpic->isu_size = mpic->num_sources;
+- mpic_map(mpic, paddr, &mpic->isus[0],
++ mpic_map(mpic, node, paddr, &mpic->isus[0],
+ MPIC_INFO(IRQ_BASE), MPIC_INFO(IRQ_STRIDE) * mpic->isu_size);
+ }
+ mpic->isu_shift = 1 + __ilog2(mpic->isu_size - 1);
+@@ -1254,8 +1255,10 @@ void __init mpic_assign_isu(struct mpic
+
+ BUG_ON(isu_num >= MPIC_MAX_ISU);
+
+- mpic_map(mpic, paddr, &mpic->isus[isu_num], 0,
++ mpic_map(mpic, mpic->irqhost->of_node,
++ paddr, &mpic->isus[isu_num], 0,
+ MPIC_INFO(IRQ_STRIDE) * mpic->isu_size);
++
+ if ((isu_first + mpic->isu_size) > mpic->num_sources)
+ mpic->num_sources = isu_first + mpic->isu_size;
+ }
x86-pci-insert-ioapic-resource-before-assigning-unassigned-resources.patch
sched-fix-nr_uninterruptible-accounting-of-frozen-tasks-really.patch
dm-raid1-wake-kmirrord-when-requeueing-delayed-bios-after-remote-recovery.patch
+x86-geode-mark-mfgpt-irq-irqf_timer-to-prevent-resume-failure.patch
+x86-fix-movq-immediate-operand-constraints-in-uaccess_64.h.patch
+x86-fix-movq-immediate-operand-constraints-in-uaccess.h.patch
+x86-add-quirk-for-intel-dg45id-board-to-avoid-low-memory-corruption.patch
+x86-64-fix-bad_srat-to-clear-all-state.patch
+parisc-ensure-broadcast-tlb-purge-runs-single-threaded.patch
+parisc-fix-ldcw-inline-assembler.patch
+x86-setup-fix-80x34-and-80x60-console-modes.patch
+staging-rt2870-add-usb-id-for-sitecom-wl-608.patch
+vmscan-do-not-unconditionally-treat-zones-that-fail-zone_reclaim-as-full.patch
+x86-don-t-use-access_ok-as-a-range-check-in-get_user_pages_fast.patch
+mm-mark-page-accessed-before-we-write_end.patch
+elf-fix-one-check-after-use.patch
+hwmon-fix-lock-imbalance.patch
+powerpc-mpic-fix-mapping-of-dcr-based-mpic-variants.patch
--- /dev/null
+From 8dfb00571819ce491ce1760523d50e85bcd2185f Mon Sep 17 00:00:00 2001
+From: Jorrit Schippers <jorrit@ncode.nl>
+Date: Wed, 10 Jun 2009 15:34:26 +0200
+Subject: Staging: rt2870: Add USB ID for Sitecom WL-608
+
+From: Jorrit Schippers <jorrit@ncode.nl>
+
+commit 8dfb00571819ce491ce1760523d50e85bcd2185f upstream.
+
+Add the USB id 0x0DF6,0x003F to the rt2870.h file such that the
+Sitecom WL-608 device will be recognized by this driver.
+
+Signed-off-by: Jorrit Schippers <jorrit@ncode.nl>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/staging/rt2870/rt2870.h | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/staging/rt2870/rt2870.h
++++ b/drivers/staging/rt2870/rt2870.h
+@@ -97,6 +97,7 @@
+ {USB_DEVICE(0x0DF6,0x002C)}, /* Sitecom */ \
+ {USB_DEVICE(0x0DF6,0x002D)}, /* Sitecom */ \
+ {USB_DEVICE(0x0DF6,0x0039)}, /* Sitecom */ \
++ {USB_DEVICE(0x0DF6,0x003F)}, /* Sitecom WL-608 */ \
+ {USB_DEVICE(0x14B2,0x3C06)}, /* Conceptronic */ \
+ {USB_DEVICE(0x14B2,0x3C28)}, /* Conceptronic */ \
+ {USB_DEVICE(0x2019,0xED06)}, /* Planex Communications, Inc. */ \
--- /dev/null
+From mel@csn.ul.ie Tue Jul 28 11:08:24 2009
+From: Mel Gorman <mel@csn.ul.ie>
+Date: Wed, 1 Jul 2009 09:26:25 +0100
+Subject: vmscan: do not unconditionally treat zones that fail zone_reclaim() as full
+To: Greg KH <greg@kroah.com>
+Cc: akpm@linux-foundation.org, torvalds@linux-foundation.org, riel@redhat.com, cl@linux-foundation.org, kosaki.motohiro@jp.fujitsu.com, fengguang.wu@intel.com, stable@kernel.org
+Message-ID: <20090701082625.GA16355@csn.ul.ie>
+Content-Disposition: inline
+
+From: Mel Gorman <mel@csn.ul.ie>
+
+commit fa5e084e43eb14c14942027e1e2e894aeed96097 upstream.
+
+vmscan: do not unconditionally treat zones that fail zone_reclaim() as full
+
+On NUMA machines, the administrator can configure zone_reclaim_mode that
+is a more targetted form of direct reclaim. On machines with large NUMA
+distances for example, a zone_reclaim_mode defaults to 1 meaning that
+clean unmapped pages will be reclaimed if the zone watermarks are not
+being met. The problem is that zone_reclaim() failing at all means the
+zone gets marked full.
+
+This can cause situations where a zone is usable, but is being skipped
+because it has been considered full. Take a situation where a large tmpfs
+mount is occuping a large percentage of memory overall. The pages do not
+get cleaned or reclaimed by zone_reclaim(), but the zone gets marked full
+and the zonelist cache considers them not worth trying in the future.
+
+This patch makes zone_reclaim() return more fine-grained information about
+what occured when zone_reclaim() failued. The zone only gets marked full
+if it really is unreclaimable. If it's a case that the scan did not occur
+or if enough pages were not reclaimed with the limited reclaim_mode, then
+the zone is simply skipped.
+
+There is a side-effect to this patch. Currently, if zone_reclaim()
+successfully reclaimed SWAP_CLUSTER_MAX, an allocation attempt would go
+ahead. With this patch applied, zone watermarks are rechecked after
+zone_reclaim() does some work.
+
+This bug was introduced by commit 9276b1bc96a132f4068fdee00983c532f43d3a26
+("memory page_alloc zonelist caching speedup") way back in 2.6.19 when the
+zonelist_cache was introduced. It was not intended that zone_reclaim()
+aggressively consider the zone to be full when it failed as full direct
+reclaim can still be an option. Due to the age of the bug, it should be
+considered a -stable candidate.
+
+Signed-off-by: Mel Gorman <mel@csn.ul.ie>
+Reviewed-by: Wu Fengguang <fengguang.wu@intel.com>
+Reviewed-by: Rik van Riel <riel@redhat.com>
+Reviewed-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
+Cc: Christoph Lameter <cl@linux-foundation.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ mm/internal.h | 4 ++++
+ mm/page_alloc.c | 26 ++++++++++++++++++++++----
+ mm/vmscan.c | 11 ++++++-----
+ 3 files changed, 32 insertions(+), 9 deletions(-)
+
+--- a/mm/internal.h
++++ b/mm/internal.h
+@@ -284,4 +284,8 @@ int __get_user_pages(struct task_struct
+ unsigned long start, int len, int flags,
+ struct page **pages, struct vm_area_struct **vmas);
+
++#define ZONE_RECLAIM_NOSCAN -2
++#define ZONE_RECLAIM_FULL -1
++#define ZONE_RECLAIM_SOME 0
++#define ZONE_RECLAIM_SUCCESS 1
+ #endif
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -1420,20 +1420,38 @@ zonelist_scan:
+
+ if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
+ unsigned long mark;
++ int ret;
+ if (alloc_flags & ALLOC_WMARK_MIN)
+ mark = zone->pages_min;
+ else if (alloc_flags & ALLOC_WMARK_LOW)
+ mark = zone->pages_low;
+ else
+ mark = zone->pages_high;
+- if (!zone_watermark_ok(zone, order, mark,
+- classzone_idx, alloc_flags)) {
+- if (!zone_reclaim_mode ||
+- !zone_reclaim(zone, gfp_mask, order))
++
++ if (zone_watermark_ok(zone, order, mark,
++ classzone_idx, alloc_flags))
++ goto try_this_zone;
++
++ if (zone_reclaim_mode == 0)
++ goto this_zone_full;
++
++ ret = zone_reclaim(zone, gfp_mask, order);
++ switch (ret) {
++ case ZONE_RECLAIM_NOSCAN:
++ /* did not scan */
++ goto try_next_zone;
++ case ZONE_RECLAIM_FULL:
++ /* scanned but unreclaimable */
++ goto this_zone_full;
++ default:
++ /* did we reclaim enough */
++ if (!zone_watermark_ok(zone, order, mark,
++ classzone_idx, alloc_flags))
+ goto this_zone_full;
+ }
+ }
+
++try_this_zone:
+ page = buffered_rmqueue(preferred_zone, zone, order, gfp_mask);
+ if (page)
+ break;
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -2426,16 +2426,16 @@ int zone_reclaim(struct zone *zone, gfp_
+ */
+ if (zone_pagecache_reclaimable(zone) <= zone->min_unmapped_pages &&
+ zone_page_state(zone, NR_SLAB_RECLAIMABLE) <= zone->min_slab_pages)
+- return 0;
++ return ZONE_RECLAIM_FULL;
+
+ if (zone_is_all_unreclaimable(zone))
+- return 0;
++ return ZONE_RECLAIM_FULL;
+
+ /*
+ * Do not scan if the allocation should not be delayed.
+ */
+ if (!(gfp_mask & __GFP_WAIT) || (current->flags & PF_MEMALLOC))
+- return 0;
++ return ZONE_RECLAIM_NOSCAN;
+
+ /*
+ * Only run zone reclaim on the local zone or on zones that do not
+@@ -2445,10 +2445,11 @@ int zone_reclaim(struct zone *zone, gfp_
+ */
+ node_id = zone_to_nid(zone);
+ if (node_state(node_id, N_CPU) && node_id != numa_node_id())
+- return 0;
++ return ZONE_RECLAIM_NOSCAN;
+
+ if (zone_test_and_set_flag(zone, ZONE_RECLAIM_LOCKED))
+- return 0;
++ return ZONE_RECLAIM_NOSCAN;
++
+ ret = __zone_reclaim(zone, gfp_mask, order);
+ zone_clear_flag(zone, ZONE_RECLAIM_LOCKED);
+
--- /dev/null
+From 429b2b319af3987e808c18f6b81313104caf782c Mon Sep 17 00:00:00 2001
+From: Andi Kleen <andi@firstfloor.org>
+Date: Sat, 18 Jul 2009 08:56:57 +0200
+Subject: x86-64: Fix bad_srat() to clear all state
+
+From: Andi Kleen <andi@firstfloor.org>
+
+commit 429b2b319af3987e808c18f6b81313104caf782c upstream.
+
+Need to clear both nodes and nodes_add state for start/end.
+
+Signed-off-by: Andi Kleen <ak@linux.intel.com>
+LKML-Reference: <20090718065657.GA2898@basil.fritz.box>
+Signed-off-by: H. Peter Anvin <hpa@zytor.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/mm/srat_64.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/mm/srat_64.c
++++ b/arch/x86/mm/srat_64.c
+@@ -89,8 +89,10 @@ static __init void bad_srat(void)
+ found_add_area = 0;
+ for (i = 0; i < MAX_LOCAL_APIC; i++)
+ apicid_to_node[i] = NUMA_NO_NODE;
+- for (i = 0; i < MAX_NUMNODES; i++)
+- nodes_add[i].start = nodes[i].end = 0;
++ for (i = 0; i < MAX_NUMNODES; i++) {
++ nodes[i].start = nodes[i].end = 0;
++ nodes_add[i].start = nodes_add[i].end = 0;
++ }
+ remove_all_active_ranges();
+ }
+
--- /dev/null
+From 6aa542a694dc9ea4344a8a590d2628c33d1b9431 Mon Sep 17 00:00:00 2001
+From: Alexey Fisher <bug-track@fisher-privat.net>
+Date: Wed, 15 Jul 2009 14:16:09 +0200
+Subject: x86: Add quirk for Intel DG45ID board to avoid low memory corruption
+
+From: Alexey Fisher <bug-track@fisher-privat.net>
+
+commit 6aa542a694dc9ea4344a8a590d2628c33d1b9431 upstream.
+
+AMI BIOS with low memory corruption was found on Intel DG45ID
+board (Bug 13710). Add this board to the blacklist - in the
+(somewhat optimistic) hope of future boards/BIOSes from Intel
+not having this bug.
+
+Also see:
+
+ http://bugzilla.kernel.org/show_bug.cgi?id=13736
+
+Signed-off-by: Alexey Fisher <bug-track@fisher-privat.net>
+Cc: ykzhao <yakui.zhao@intel.com>
+Cc: alan@lxorguk.ukuu.org.uk
+Cc: <stable@kernel.org>
+LKML-Reference: <1247660169-4503-1-git-send-email-bug-track@fisher-privat.net>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/kernel/setup.c | 13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+--- a/arch/x86/kernel/setup.c
++++ b/arch/x86/kernel/setup.c
+@@ -650,6 +650,19 @@ static struct dmi_system_id __initdata b
+ DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies"),
+ },
+ },
++ {
++ /*
++ * AMI BIOS with low memory corruption was found on Intel DG45ID board.
++ * It hase different DMI_BIOS_VENDOR = "Intel Corp.", for now we will
++ * match only DMI_BOARD_NAME and see if there is more bad products
++ * with this vendor.
++ */
++ .callback = dmi_low_memory_corruption,
++ .ident = "AMI BIOS",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_NAME, "DG45ID"),
++ },
++ },
+ #endif
+ {}
+ };
--- /dev/null
+From torvalds@linux-foundation.org Tue Jul 28 11:13:51 2009
+From: Linus Torvalds <torvalds@linux-foundation.org>
+Date: Mon, 22 Jun 2009 10:25:25 -0700 (PDT)
+Subject: x86: don't use 'access_ok()' as a range check in get_user_pages_fast()
+To: Greg KH <gregkh@suse.de>
+Cc: Ingo Molnar <mingo@elte.hu>, Andrew Morton <akpm@linux-foundation.org>, Hugh Dickins <hugh.dickins@tiscali.co.uk>, Chris Wright <chrisw@sous-sol.org>, Nick Piggin <npiggin@suse.de>, "H. Peter Anvin" <hpa@zytor.com>, Thomas Gleixner <tglx@linutronix.de>, Alan Cox <alan@lxorguk.ukuu.org.uk>, Peter Zijlstra <a.p.zijlstra@chello.nl>
+Message-ID: <alpine.LFD.2.01.0906221024140.3240@localhost.localdomain>
+
+From: Linus Torvalds <torvalds@linux-foundation.org>
+
+[ Upstream commit 7f8189068726492950bf1a2dcfd9b51314560abf - modified
+ for stable to not use the sloppy __VIRTUAL_MASK_SHIFT ]
+
+It's really not right to use 'access_ok()', since that is meant for the
+normal "get_user()" and "copy_from/to_user()" accesses, which are done
+through the TLB, rather than through the page tables.
+
+Why? access_ok() does both too few, and too many checks. Too many,
+because it is meant for regular kernel accesses that will not honor the
+'user' bit in the page tables, and because it honors the USER_DS vs
+KERNEL_DS distinction that we shouldn't care about in GUP. And too few,
+because it doesn't do the 'canonical' check on the address on x86-64,
+since the TLB will do that for us.
+
+So instead of using a function that isn't meant for this, and does
+something else and much more complicated, just do the real rules: we
+don't want the range to overflow, and on x86-64, we want it to be a
+canonical low address (on 32-bit, all addresses are canonical).
+
+Acked-by: Ingo Molnar <mingo@elte.hu>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/mm/gup.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/mm/gup.c
++++ b/arch/x86/mm/gup.c
+@@ -247,10 +247,15 @@ int get_user_pages_fast(unsigned long st
+ start &= PAGE_MASK;
+ addr = start;
+ len = (unsigned long) nr_pages << PAGE_SHIFT;
++
+ end = start + len;
+- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
+- (void __user *)start, len)))
++ if (end < start)
++ goto slow_irqon;
++
++#ifdef CONFIG_X86_64
++ if (end >> 47)
+ goto slow_irqon;
++#endif
+
+ /*
+ * XXX: batch / limit 'nr', to avoid large irq off latency
--- /dev/null
+From ebe119cd0929df4878f758ebf880cb435e4dcaaf Mon Sep 17 00:00:00 2001
+From: H. Peter Anvin <hpa@zytor.com>
+Date: Mon, 20 Jul 2009 23:27:39 -0700
+Subject: x86: Fix movq immediate operand constraints in uaccess.h
+
+From: H. Peter Anvin <hpa@zytor.com>
+
+commit ebe119cd0929df4878f758ebf880cb435e4dcaaf upstream.
+
+The movq instruction, generated by __put_user_asm() when used for
+64-bit data, takes a sign-extended immediate ("e") not a zero-extended
+immediate ("Z").
+
+Signed-off-by: H. Peter Anvin <hpa@zytor.com>
+Cc: Uros Bizjak <ubizjak@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/include/asm/uaccess.h | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/include/asm/uaccess.h
++++ b/arch/x86/include/asm/uaccess.h
+@@ -212,9 +212,9 @@ extern int __get_user_bad(void);
+ : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
+ #else
+ #define __put_user_asm_u64(x, ptr, retval, errret) \
+- __put_user_asm(x, ptr, retval, "q", "", "Zr", errret)
++ __put_user_asm(x, ptr, retval, "q", "", "er", errret)
+ #define __put_user_asm_ex_u64(x, addr) \
+- __put_user_asm_ex(x, addr, "q", "", "Zr")
++ __put_user_asm_ex(x, addr, "q", "", "er")
+ #define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu)
+ #endif
+
--- /dev/null
+From 155b73529583c38f30fd394d692b15a893960782 Mon Sep 17 00:00:00 2001
+From: Uros Bizjak <ubizjak@gmail.com>
+Date: Sun, 19 Jul 2009 18:06:35 +0200
+Subject: x86: Fix movq immediate operand constraints in uaccess_64.h
+
+From: Uros Bizjak <ubizjak@gmail.com>
+
+commit 155b73529583c38f30fd394d692b15a893960782 upstream.
+
+arch/x86/include/asm/uaccess_64.h uses wrong asm operand constraint
+("ir") for movq insn. Since movq sign-extends its immediate operand,
+"er" constraint should be used instead.
+
+Attached patch changes all uses of __put_user_asm in uaccess_64.h to use
+"er" when "q" insn suffix is involved.
+
+Patch was compile tested on x86_64 with defconfig.
+
+Signed-off-by: Uros Bizjak <ubizjak@gmail.com>
+Signed-off-by: H. Peter Anvin <hpa@zytor.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/include/asm/uaccess_64.h | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+--- a/arch/x86/include/asm/uaccess_64.h
++++ b/arch/x86/include/asm/uaccess_64.h
+@@ -88,11 +88,11 @@ int __copy_to_user(void __user *dst, con
+ ret, "l", "k", "ir", 4);
+ return ret;
+ case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
+- ret, "q", "", "ir", 8);
++ ret, "q", "", "er", 8);
+ return ret;
+ case 10:
+ __put_user_asm(*(u64 *)src, (u64 __user *)dst,
+- ret, "q", "", "ir", 10);
++ ret, "q", "", "er", 10);
+ if (unlikely(ret))
+ return ret;
+ asm("":::"memory");
+@@ -101,12 +101,12 @@ int __copy_to_user(void __user *dst, con
+ return ret;
+ case 16:
+ __put_user_asm(*(u64 *)src, (u64 __user *)dst,
+- ret, "q", "", "ir", 16);
++ ret, "q", "", "er", 16);
+ if (unlikely(ret))
+ return ret;
+ asm("":::"memory");
+ __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
+- ret, "q", "", "ir", 8);
++ ret, "q", "", "er", 8);
+ return ret;
+ default:
+ return copy_user_generic((__force void *)dst, src, size);
+@@ -157,7 +157,7 @@ int __copy_in_user(void __user *dst, con
+ ret, "q", "", "=r", 8);
+ if (likely(!ret))
+ __put_user_asm(tmp, (u64 __user *)dst,
+- ret, "q", "", "ir", 8);
++ ret, "q", "", "er", 8);
+ return ret;
+ }
+ default:
--- /dev/null
+From d6c585a4342a2ff627a29f9aea77c5ed4cd76023 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 24 Jul 2009 08:34:59 +0200
+Subject: x86: geode: Mark mfgpt irq IRQF_TIMER to prevent resume failure
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit d6c585a4342a2ff627a29f9aea77c5ed4cd76023 upstream.
+
+Timer interrupts are excluded from being disabled during suspend. The
+clock events code manages the disabling of clock events on its own
+because the timer interrupt needs to be functional before the resume
+code reenables the device interrupts.
+
+The mfgpt timer request its interrupt without setting the IRQF_TIMER
+flag so suspend_device_irqs() disables it as well which results in a
+fatal resume failure.
+
+Adding IRQF_TIMER to the interupt flags when requesting the mrgpt
+timer interrupt solves the problem.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+LKML-Reference: <new-submission>
+Cc: Andres Salomon <dilinger@debian.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/kernel/mfgpt_32.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/mfgpt_32.c
++++ b/arch/x86/kernel/mfgpt_32.c
+@@ -347,7 +347,7 @@ static irqreturn_t mfgpt_tick(int irq, v
+
+ static struct irqaction mfgptirq = {
+ .handler = mfgpt_tick,
+- .flags = IRQF_DISABLED | IRQF_NOBALANCING,
++ .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_TIMER,
+ .name = "mfgpt-timer"
+ };
+
--- /dev/null
+From root@ualberta.ca Tue Jul 28 11:04:31 2009
+From: Marc Aurele La France <root@ualberta.ca>
+Date: Mon, 29 Jun 2009 18:07:02 -0600 (MDT)
+Subject: x86, setup (2.6.30-stable) fix 80x34 and 80x60 console modes
+To: stable@kernel.org
+Cc: "H. Peter Anvin" <hpa@linux.intel.com>, x86@kernel.org, linux-kernel@vger.kernel.org, Research.Support@ualberta.ca
+Message-ID: <alpine.BSO.1.10.0906291746410.14658@fanir.ucs.ualberta.ca>
+
+From: Marc Aurele La France <root@ualberta.ca>
+
+Note: this is not in upstream since upstream is not affected due to the
+ new "BIOS glovebox" subsystem.
+
+As coded, most INT10 calls in video-vga.c allow the compiler to assume
+EAX remains unchanged across them, which is not always the case. This
+triggers an optimisation issue that causes vga_set_vertical_end() to be
+called with an incorrect number of scanlines. Fix this by beefing up
+the asm constraints on these calls.
+
+Reported-by: Marc Aurele La France <tsi@xfree86.org>
+Signed-off-by: Marc Aurele La France <tsi@xfree86.org>
+Acked-by: H. Peter Anvin <hpa@zytor.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/boot/video-vga.c | 44 +++++++++++++++++++++++++++++++-------------
+ 1 file changed, 31 insertions(+), 13 deletions(-)
+
+--- a/arch/x86/boot/video-vga.c
++++ b/arch/x86/boot/video-vga.c
+@@ -45,8 +45,10 @@ static u8 vga_set_basic_mode(void)
+
+ #ifdef CONFIG_VIDEO_400_HACK
+ if (adapter >= ADAPTER_VGA) {
++ ax = 0x1202;
+ asm volatile(INT10
+- : : "a" (0x1202), "b" (0x0030)
++ : "+a" (ax)
++ : "b" (0x0030)
+ : "ecx", "edx", "esi", "edi");
+ }
+ #endif
+@@ -81,44 +83,59 @@ static u8 vga_set_basic_mode(void)
+
+ static void vga_set_8font(void)
+ {
++ u16 ax;
++
+ /* Set 8x8 font - 80x43 on EGA, 80x50 on VGA */
+
+ /* Set 8x8 font */
+- asm volatile(INT10 : : "a" (0x1112), "b" (0));
++ ax = 0x1112;
++ asm volatile(INT10 : "+a" (ax) : "b" (0));
+
+ /* Use alternate print screen */
+- asm volatile(INT10 : : "a" (0x1200), "b" (0x20));
++ ax = 0x1200;
++ asm volatile(INT10 : "+a" (ax) : "b" (0x20));
+
+ /* Turn off cursor emulation */
+- asm volatile(INT10 : : "a" (0x1201), "b" (0x34));
++ ax = 0x1201;
++ asm volatile(INT10 : "+a" (ax) : "b" (0x34));
+
+ /* Cursor is scan lines 6-7 */
+- asm volatile(INT10 : : "a" (0x0100), "c" (0x0607));
++ ax = 0x0100;
++ asm volatile(INT10 : "+a" (ax) : "c" (0x0607));
+ }
+
+ static void vga_set_14font(void)
+ {
++ u16 ax;
++
+ /* Set 9x14 font - 80x28 on VGA */
+
+ /* Set 9x14 font */
+- asm volatile(INT10 : : "a" (0x1111), "b" (0));
++ ax = 0x1111;
++ asm volatile(INT10 : "+a" (ax) : "b" (0));
+
+ /* Turn off cursor emulation */
+- asm volatile(INT10 : : "a" (0x1201), "b" (0x34));
++ ax = 0x1201;
++ asm volatile(INT10 : "+a" (ax) : "b" (0x34));
+
+ /* Cursor is scan lines 11-12 */
+- asm volatile(INT10 : : "a" (0x0100), "c" (0x0b0c));
++ ax = 0x0100;
++ asm volatile(INT10 : "+a" (ax) : "c" (0x0b0c));
+ }
+
+ static void vga_set_80x43(void)
+ {
++ u16 ax;
++
+ /* Set 80x43 mode on VGA (not EGA) */
+
+ /* Set 350 scans */
+- asm volatile(INT10 : : "a" (0x1201), "b" (0x30));
++ ax = 0x1201;
++ asm volatile(INT10 : "+a" (ax) : "b" (0x30));
+
+ /* Reset video mode */
+- asm volatile(INT10 : : "a" (0x0003));
++ ax = 0x0003;
++ asm volatile(INT10 : "+a" (ax));
+
+ vga_set_8font();
+ }
+@@ -225,7 +242,7 @@ static int vga_set_mode(struct mode_info
+ */
+ static int vga_probe(void)
+ {
+- u16 ega_bx;
++ u16 ax, ega_bx;
+
+ static const char *card_name[] = {
+ "CGA/MDA/HGC", "EGA", "VGA"
+@@ -242,9 +259,10 @@ static int vga_probe(void)
+ };
+ u8 vga_flag;
+
++ ax = 0x1200;
+ asm(INT10
+- : "=b" (ega_bx)
+- : "a" (0x1200), "b" (0x10) /* Check EGA/VGA */
++ : "+a" (ax), "=b" (ega_bx)
++ : "b" (0x10) /* Check EGA/VGA */
+ : "ecx", "edx", "esi", "edi");
+
+ #ifndef _WAKEUP