--- /dev/null
+From 731eb1a03a8445cde2cb23ecfb3580c6fa7bb690 Mon Sep 17 00:00:00 2001
+From: Akinobu Mita <akinobu.mita@gmail.com>
+Date: Wed, 3 Mar 2010 23:55:01 -0500
+Subject: ext4: consolidate in_range() definitions
+
+From: Akinobu Mita <akinobu.mita@gmail.com>
+
+commit 731eb1a03a8445cde2cb23ecfb3580c6fa7bb690 upstream.
+
+There are duplicate macro definitions of in_range() in mballoc.h and
+balloc.c. This consolidates these two definitions into ext4.h, and
+changes extents.c to use in_range() as well.
+
+Signed-off-by: Akinobu Mita <akinobu.mita@gmail.com>
+Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
+Cc: Andreas Dilger <adilger@sun.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/ext4/balloc.c | 3 ---
+ fs/ext4/ext4.h | 2 ++
+ fs/ext4/extents.c | 4 ++--
+ fs/ext4/mballoc.h | 2 --
+ 4 files changed, 4 insertions(+), 7 deletions(-)
+
+--- a/fs/ext4/balloc.c
++++ b/fs/ext4/balloc.c
+@@ -189,9 +189,6 @@ unsigned ext4_init_block_bitmap(struct s
+ * when a file system is mounted (see ext4_fill_super).
+ */
+
+-
+-#define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1)
+-
+ /**
+ * ext4_get_group_desc() -- load group descriptor from disk
+ * @sb: super block
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -1888,6 +1888,8 @@ static inline void set_bitmap_uptodate(s
+ set_bit(BH_BITMAP_UPTODATE, &(bh)->b_state);
+ }
+
++#define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1)
++
+ #endif /* __KERNEL__ */
+
+ #endif /* _EXT4_H */
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -1948,7 +1948,7 @@ ext4_ext_in_cache(struct inode *inode, e
+
+ BUG_ON(cex->ec_type != EXT4_EXT_CACHE_GAP &&
+ cex->ec_type != EXT4_EXT_CACHE_EXTENT);
+- if (block >= cex->ec_block && block < cex->ec_block + cex->ec_len) {
++ if (in_range(block, cex->ec_block, cex->ec_len)) {
+ ex->ee_block = cpu_to_le32(cex->ec_block);
+ ext4_ext_store_pblock(ex, cex->ec_start);
+ ex->ee_len = cpu_to_le16(cex->ec_len);
+@@ -3302,7 +3302,7 @@ int ext4_ext_get_blocks(handle_t *handle
+ */
+ ee_len = ext4_ext_get_actual_len(ex);
+ /* if found extent covers block, simply return it */
+- if (iblock >= ee_block && iblock < ee_block + ee_len) {
++ if (in_range(iblock, ee_block, ee_len)) {
+ newblock = iblock - ee_block + ee_start;
+ /* number of remaining blocks in the extent */
+ allocated = ee_len - (iblock - ee_block);
+--- a/fs/ext4/mballoc.h
++++ b/fs/ext4/mballoc.h
+@@ -221,8 +221,6 @@ struct ext4_buddy {
+ #define EXT4_MB_BITMAP(e4b) ((e4b)->bd_bitmap)
+ #define EXT4_MB_BUDDY(e4b) ((e4b)->bd_buddy)
+
+-#define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1)
+-
+ static inline ext4_fsblk_t ext4_grp_offs_to_block(struct super_block *sb,
+ struct ext4_free_extent *fex)
+ {
--- /dev/null
+From a7c55cbee0c1bae9bf5a15a08300e91d88706e45 Mon Sep 17 00:00:00 2001
+From: Josh Hunt <johunt@akamai.com>
+Date: Wed, 4 Aug 2010 20:27:05 -0400
+Subject: oprofile: add support for Intel processor model 30
+
+From: Josh Hunt <johunt@akamai.com>
+
+commit a7c55cbee0c1bae9bf5a15a08300e91d88706e45 upstream.
+
+Newer Intel processors identifying themselves as model 30 are not recognized by
+oprofile.
+
+<cpuinfo snippet>
+model : 30
+model name : Intel(R) Xeon(R) CPU X3470 @ 2.93GHz
+</cpuinfo snippet>
+
+Running oprofile on these machines gives the following:
++ opcontrol --init
++ opcontrol --list-events
+oprofile: available events for CPU type "Intel Architectural Perfmon"
+
+See Intel 64 and IA-32 Architectures Software Developer's Manual
+Volume 3B (Document 253669) Chapter 18 for architectural perfmon events
+This is a limited set of fallback events because oprofile doesn't know your CPU
+CPU_CLK_UNHALTED: (counter: all)
+ Clock cycles when not halted (min count: 6000)
+INST_RETIRED: (counter: all)
+ number of instructions retired (min count: 6000)
+LLC_MISSES: (counter: all)
+ Last level cache demand requests from this core that missed the LLC
+(min count: 6000)
+ Unit masks (default 0x41)
+ ----------
+ 0x41: No unit mask
+LLC_REFS: (counter: all)
+ Last level cache demand requests from this core (min count: 6000)
+ Unit masks (default 0x4f)
+ ----------
+ 0x4f: No unit mask
+BR_MISS_PRED_RETIRED: (counter: all)
+ number of mispredicted branches retired (precise) (min count: 500)
++ opcontrol --shutdown
+
+Tested using oprofile 0.9.6.
+
+Signed-off-by: Josh Hunt <johunt@akamai.com>
+Reviewed-by: Andi Kleen <ak@linux.intel.com>
+Signed-off-by: Robert Richter <robert.richter@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/oprofile/nmi_int.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/x86/oprofile/nmi_int.c
++++ b/arch/x86/oprofile/nmi_int.c
+@@ -618,6 +618,7 @@ static int __init ppro_init(char **cpu_t
+ *cpu_type = "i386/core_2";
+ break;
+ case 0x1a:
++ case 0x1e:
+ case 0x2e:
+ spec = &op_arch_perfmon_spec;
+ *cpu_type = "i386/core_i7";
--- /dev/null
+From 45c34e05c4e3d36e7c44e790241ea11a1d90d54e Mon Sep 17 00:00:00 2001
+From: John Villalovos <sodarock@gmail.com>
+Date: Fri, 7 May 2010 12:41:40 -0400
+Subject: Oprofile: Change CPUIDS from decimal to hex, and add some comments
+
+From: John Villalovos <sodarock@gmail.com>
+
+commit 45c34e05c4e3d36e7c44e790241ea11a1d90d54e upstream.
+
+Back when the patch was submitted for "Add Xeon 7500 series support to
+oprofile", Robert Richter had asked for a followon patch that
+converted all the CPU ID values to hex.
+
+I have done that here for the "i386/core_i7" and "i386/atom" class
+processors in the ppro_init() function and also added some comments on
+where to find documentation on the Intel processors.
+
+Signed-off-by: John L. Villalovos <john.l.villalovos@intel.com>
+Signed-off-by: Robert Richter <robert.richter@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/oprofile/nmi_int.c | 16 ++++++++++++++--
+ 1 file changed, 14 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/oprofile/nmi_int.c
++++ b/arch/x86/oprofile/nmi_int.c
+@@ -584,6 +584,18 @@ static int __init ppro_init(char **cpu_t
+ if (force_arch_perfmon && cpu_has_arch_perfmon)
+ return 0;
+
++ /*
++ * Documentation on identifying Intel processors by CPU family
++ * and model can be found in the Intel Software Developer's
++ * Manuals (SDM):
++ *
++ * http://www.intel.com/products/processor/manuals/
++ *
++ * As of May 2010 the documentation for this was in the:
++ * "Intel 64 and IA-32 Architectures Software Developer's
++ * Manual Volume 3B: System Programming Guide", "Table B-1
++ * CPUID Signature Values of DisplayFamily_DisplayModel".
++ */
+ switch (cpu_model) {
+ case 0 ... 2:
+ *cpu_type = "i386/ppro";
+@@ -605,12 +617,12 @@ static int __init ppro_init(char **cpu_t
+ case 15: case 23:
+ *cpu_type = "i386/core_2";
+ break;
++ case 0x1a:
+ case 0x2e:
+- case 26:
+ spec = &op_arch_perfmon_spec;
+ *cpu_type = "i386/core_i7";
+ break;
+- case 28:
++ case 0x1c:
+ *cpu_type = "i386/atom";
+ break;
+ default:
--- /dev/null
+From 127c03cdbad9bd5af5d7f33bd31a1015a90cb77f Mon Sep 17 00:00:00 2001
+From: Dominik Brodowski <linux@dominikbrodowski.net>
+Date: Tue, 3 Aug 2010 09:33:45 +0200
+Subject: pcmcia: avoid buffer overflow in pcmcia_setup_isa_irq
+
+From: Dominik Brodowski <linux@dominikbrodowski.net>
+
+commit 127c03cdbad9bd5af5d7f33bd31a1015a90cb77f upstream.
+
+NR_IRQS may be as low as 16, causing a (harmless?) buffer overflow in
+pcmcia_setup_isa_irq():
+
+static u8 pcmcia_used_irq[NR_IRQS];
+
+...
+
+ if ((try < 32) && pcmcia_used_irq[irq])
+ continue;
+
+This is read-only, so if this address would be non-zero, it would just
+mean we would not attempt an IRQ >= NR_IRQS -- which would fail anyway!
+And as request_irq() fails for an irq >= NR_IRQS, the setting code path:
+
+ pcmcia_used_irq[irq]++;
+
+is never reached as well.
+
+Reported-by: Christoph Fritz <chf.fritz@googlemail.com>
+Signed-off-by: Dominik Brodowski <linux@dominikbrodowski.net>
+Signed-off-by: Christoph Fritz <chf.fritz@googlemail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/pcmcia/pcmcia_resource.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/drivers/pcmcia/pcmcia_resource.c
++++ b/drivers/pcmcia/pcmcia_resource.c
+@@ -39,7 +39,7 @@ module_param(io_speed, int, 0444);
+ #ifdef CONFIG_PCMCIA_PROBE
+ #include <asm/irq.h>
+ /* mask of IRQs already reserved by other cards, we should avoid using them */
+-static u8 pcmcia_used_irq[NR_IRQS];
++static u8 pcmcia_used_irq[32];
+ #endif
+
+
+@@ -719,6 +719,9 @@ int pcmcia_request_irq(struct pcmcia_dev
+ for (try = 0; try < 64; try++) {
+ irq = try % 32;
+
++ if (irq > NR_IRQS)
++ continue;
++
+ /* marked as available by driver, and not blocked by userspace? */
+ if (!((mask >> irq) & 1))
+ continue;
can-add-limit-for-nframes-and-clean-up-signed-unsigned-variables.patch
isdn-fix-information-leak.patch
act_nat-the-checksum-of-icmp-doesn-t-have-pseudo-header.patch
+vmscan-raise-the-bar-to-pageout_io_sync-stalls.patch
+pcmcia-avoid-buffer-overflow-in-pcmcia_setup_isa_irq.patch
+ext4-consolidate-in_range-definitions.patch
+oprofile-change-cpuids-from-decimal-to-hex-and-add-some-comments.patch
+oprofile-add-support-for-intel-processor-model-30.patch
--- /dev/null
+From e31f3698cd3499e676f6b0ea12e3528f569c4fa3 Mon Sep 17 00:00:00 2001
+From: Wu Fengguang <fengguang.wu@intel.com>
+Date: Mon, 9 Aug 2010 17:20:01 -0700
+Subject: vmscan: raise the bar to PAGEOUT_IO_SYNC stalls
+
+From: Wu Fengguang <fengguang.wu@intel.com>
+
+commit e31f3698cd3499e676f6b0ea12e3528f569c4fa3 upstream.
+
+Fix "system goes unresponsive under memory pressure and lots of
+dirty/writeback pages" bug.
+
+ http://lkml.org/lkml/2010/4/4/86
+
+In the above thread, Andreas Mohr described that
+
+ Invoking any command locked up for minutes (note that I'm
+ talking about attempted additional I/O to the _other_,
+ _unaffected_ main system HDD - such as loading some shell
+ binaries -, NOT the external SSD18M!!).
+
+This happens when the two conditions are both meet:
+- under memory pressure
+- writing heavily to a slow device
+
+OOM also happens in Andreas' system. The OOM trace shows that 3 processes
+are stuck in wait_on_page_writeback() in the direct reclaim path. One in
+do_fork() and the other two in unix_stream_sendmsg(). They are blocked on
+this condition:
+
+ (sc->order && priority < DEF_PRIORITY - 2)
+
+which was introduced in commit 78dc583d (vmscan: low order lumpy reclaim
+also should use PAGEOUT_IO_SYNC) one year ago. That condition may be too
+permissive. In Andreas' case, 512MB/1024 = 512KB. If the direct reclaim
+for the order-1 fork() allocation runs into a range of 512KB
+hard-to-reclaim LRU pages, it will be stalled.
+
+It's a severe problem in three ways.
+
+Firstly, it can easily happen in daily desktop usage. vmscan priority can
+easily go below (DEF_PRIORITY - 2) on _local_ memory pressure. Even if
+the system has 50% globally reclaimable pages, it still has good
+opportunity to have 0.1% sized hard-to-reclaim ranges. For example, a
+simple dd can easily create a big range (up to 20%) of dirty pages in the
+LRU lists. And order-1 to order-3 allocations are more than common with
+SLUB. Try "grep -v '1 :' /proc/slabinfo" to get the list of high order
+slab caches. For example, the order-1 radix_tree_node slab cache may
+stall applications at swap-in time; the order-3 inode cache on most
+filesystems may stall applications when trying to read some file; the
+order-2 proc_inode_cache may stall applications when trying to open a
+/proc file.
+
+Secondly, once triggered, it will stall unrelated processes (not doing IO
+at all) in the system. This "one slow USB device stalls the whole system"
+avalanching effect is very bad.
+
+Thirdly, once stalled, the stall time could be intolerable long for the
+users. When there are 20MB queued writeback pages and USB 1.1 is writing
+them in 1MB/s, wait_on_page_writeback() will stuck for up to 20 seconds.
+Not to mention it may be called multiple times.
+
+So raise the bar to only enable PAGEOUT_IO_SYNC when priority goes below
+DEF_PRIORITY/3, or 6.25% LRU size. As the default dirty throttle ratio is
+20%, it will hardly be triggered by pure dirty pages. We'd better treat
+PAGEOUT_IO_SYNC as some last resort workaround -- its stall time is so
+uncomfortably long (easily goes beyond 1s).
+
+The bar is only raised for (order < PAGE_ALLOC_COSTLY_ORDER) allocations,
+which are easy to satisfy in 1TB memory boxes. So, although 6.25% of
+memory could be an awful lot of pages to scan on a system with 1TB of
+memory, it won't really have to busy scan that much.
+
+Andreas tested an older version of this patch and reported that it mostly
+fixed his problem. Mel Gorman helped improve it and KOSAKI Motohiro will
+fix it further in the next patch.
+
+Reported-by: Andreas Mohr <andi@lisas.de>
+Reviewed-by: Minchan Kim <minchan.kim@gmail.com>
+Reviewed-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
+Signed-off-by: Mel Gorman <mel@csn.ul.ie>
+Signed-off-by: Wu Fengguang <fengguang.wu@intel.com>
+Cc: Rik van Riel <riel@redhat.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ mm/vmscan.c | 53 +++++++++++++++++++++++++++++++++++++++++++++--------
+ 1 file changed, 45 insertions(+), 8 deletions(-)
+
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -1083,6 +1083,48 @@ static int too_many_isolated(struct zone
+ }
+
+ /*
++ * Returns true if the caller should wait to clean dirty/writeback pages.
++ *
++ * If we are direct reclaiming for contiguous pages and we do not reclaim
++ * everything in the list, try again and wait for writeback IO to complete.
++ * This will stall high-order allocations noticeably. Only do that when really
++ * need to free the pages under high memory pressure.
++ */
++static inline bool should_reclaim_stall(unsigned long nr_taken,
++ unsigned long nr_freed,
++ int priority,
++ int lumpy_reclaim,
++ struct scan_control *sc)
++{
++ int lumpy_stall_priority;
++
++ /* kswapd should not stall on sync IO */
++ if (current_is_kswapd())
++ return false;
++
++ /* Only stall on lumpy reclaim */
++ if (!lumpy_reclaim)
++ return false;
++
++ /* If we have relaimed everything on the isolated list, no stall */
++ if (nr_freed == nr_taken)
++ return false;
++
++ /*
++ * For high-order allocations, there are two stall thresholds.
++ * High-cost allocations stall immediately where as lower
++ * order allocations such as stacks require the scanning
++ * priority to be much higher before stalling.
++ */
++ if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
++ lumpy_stall_priority = DEF_PRIORITY;
++ else
++ lumpy_stall_priority = DEF_PRIORITY / 3;
++
++ return priority <= lumpy_stall_priority;
++}
++
++/*
+ * shrink_inactive_list() is a helper for shrink_zone(). It returns the number
+ * of reclaimed pages
+ */
+@@ -1176,14 +1218,9 @@ static unsigned long shrink_inactive_lis
+ nr_scanned += nr_scan;
+ nr_freed = shrink_page_list(&page_list, sc, PAGEOUT_IO_ASYNC);
+
+- /*
+- * If we are direct reclaiming for contiguous pages and we do
+- * not reclaim everything in the list, try again and wait
+- * for IO to complete. This will stall high-order allocations
+- * but that should be acceptable to the caller
+- */
+- if (nr_freed < nr_taken && !current_is_kswapd() &&
+- lumpy_reclaim) {
++ /* Check if we should syncronously wait for writeback */
++ if (should_reclaim_stall(nr_taken, nr_freed, priority,
++ lumpy_reclaim, sc)) {
+ congestion_wait(BLK_RW_ASYNC, HZ/10);
+
+ /*