From: Greg Kroah-Hartman Date: Mon, 23 Aug 2010 22:46:07 +0000 (-0700) Subject: .34 patches X-Git-Tag: v2.6.32.21~26 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=4ffc47bc57b496b414e6962f878644893d596974;p=thirdparty%2Fkernel%2Fstable-queue.git .34 patches --- diff --git a/queue-2.6.34/isdn-gigaset-add-missing-unlock.patch b/queue-2.6.34/isdn-gigaset-add-missing-unlock.patch new file mode 100644 index 00000000000..a58716446f7 --- /dev/null +++ b/queue-2.6.34/isdn-gigaset-add-missing-unlock.patch @@ -0,0 +1,31 @@ +From 7e27a0aeb98d53539bdc38384eee899d6db62617 Mon Sep 17 00:00:00 2001 +From: Dan Carpenter +Date: Thu, 5 Aug 2010 22:23:23 +0000 +Subject: isdn: gigaset: add missing unlock + +From: Dan Carpenter + +commit 7e27a0aeb98d53539bdc38384eee899d6db62617 upstream. + +We should unlock here. This is the only place where we return from the +function with the lock held. The caller isn't expecting it. + +Signed-off-by: Dan Carpenter +Signed-off-by: David S. Miller +Signed-off-by: Tilman Schmidt +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/isdn/gigaset/capi.c | 1 + + 1 file changed, 1 insertion(+) + +--- a/drivers/isdn/gigaset/capi.c ++++ b/drivers/isdn/gigaset/capi.c +@@ -1055,6 +1055,7 @@ static inline void remove_appl_from_chan + do { + if (bcap->bcnext == ap) { + bcap->bcnext = bcap->bcnext->bcnext; ++ spin_unlock_irqrestore(&bcs->aplock, flags); + return; + } + bcap = bcap->bcnext; diff --git a/queue-2.6.34/isdn-gigaset-reduce-syslog-spam.patch b/queue-2.6.34/isdn-gigaset-reduce-syslog-spam.patch new file mode 100644 index 00000000000..50a2721608f --- /dev/null +++ b/queue-2.6.34/isdn-gigaset-reduce-syslog-spam.patch @@ -0,0 +1,90 @@ +From 7d060ed2877ff6d00e7238226edbaf91493d6d0b Mon Sep 17 00:00:00 2001 +From: Tilman Schmidt +Date: Mon, 5 Jul 2010 14:19:14 +0000 +Subject: isdn/gigaset: reduce syslog spam + +From: Tilman Schmidt + +commit 7d060ed2877ff6d00e7238226edbaf91493d6d0b upstream. + +Downgrade some error messages which occur frequently during +normal operation to debug messages. + +Impact: logging +Signed-off-by: Tilman Schmidt +Signed-off-by: David S. Miller +Signed-off-by: Greg Kroah-Hartman + + +--- + drivers/isdn/gigaset/capi.c | 16 ++++++++-------- + 1 file changed, 8 insertions(+), 8 deletions(-) + +--- a/drivers/isdn/gigaset/capi.c ++++ b/drivers/isdn/gigaset/capi.c +@@ -389,13 +389,13 @@ void gigaset_skb_sent(struct bc_state *b + ++bcs->trans_up; + + if (!ap) { +- dev_err(cs->dev, "%s: no application\n", __func__); ++ gig_dbg(DEBUG_MCMD, "%s: application gone", __func__); + return; + } + + /* don't send further B3 messages if disconnected */ + if (bcs->apconnstate < APCONN_ACTIVE) { +- gig_dbg(DEBUG_LLDATA, "disconnected, discarding ack"); ++ gig_dbg(DEBUG_MCMD, "%s: disconnected", __func__); + return; + } + +@@ -433,13 +433,14 @@ void gigaset_skb_rcvd(struct bc_state *b + bcs->trans_down++; + + if (!ap) { +- dev_err(cs->dev, "%s: no application\n", __func__); ++ gig_dbg(DEBUG_MCMD, "%s: application gone", __func__); ++ dev_kfree_skb_any(skb); + return; + } + + /* don't send further B3 messages if disconnected */ + if (bcs->apconnstate < APCONN_ACTIVE) { +- gig_dbg(DEBUG_LLDATA, "disconnected, discarding data"); ++ gig_dbg(DEBUG_MCMD, "%s: disconnected", __func__); + dev_kfree_skb_any(skb); + return; + } +@@ -758,7 +759,7 @@ void gigaset_isdn_connD(struct bc_state + ap = bcs->ap; + if (!ap) { + spin_unlock_irqrestore(&bcs->aplock, flags); +- dev_err(cs->dev, "%s: no application\n", __func__); ++ gig_dbg(DEBUG_CMD, "%s: application gone", __func__); + return; + } + if (bcs->apconnstate == APCONN_NONE) { +@@ -854,7 +855,7 @@ void gigaset_isdn_connB(struct bc_state + ap = bcs->ap; + if (!ap) { + spin_unlock_irqrestore(&bcs->aplock, flags); +- dev_err(cs->dev, "%s: no application\n", __func__); ++ gig_dbg(DEBUG_CMD, "%s: application gone", __func__); + return; + } + if (!bcs->apconnstate) { +@@ -912,13 +913,12 @@ void gigaset_isdn_connB(struct bc_state + */ + void gigaset_isdn_hupB(struct bc_state *bcs) + { +- struct cardstate *cs = bcs->cs; + struct gigaset_capi_appl *ap = bcs->ap; + + /* ToDo: assure order of DISCONNECT_B3_IND and DISCONNECT_IND ? */ + + if (!ap) { +- dev_err(cs->dev, "%s: no application\n", __func__); ++ gig_dbg(DEBUG_CMD, "%s: application gone", __func__); + return; + } + diff --git a/queue-2.6.34/oprofile-add-support-for-intel-processor-model-30.patch b/queue-2.6.34/oprofile-add-support-for-intel-processor-model-30.patch new file mode 100644 index 00000000000..f8e0ff966a9 --- /dev/null +++ b/queue-2.6.34/oprofile-add-support-for-intel-processor-model-30.patch @@ -0,0 +1,65 @@ +From a7c55cbee0c1bae9bf5a15a08300e91d88706e45 Mon Sep 17 00:00:00 2001 +From: Josh Hunt +Date: Wed, 4 Aug 2010 20:27:05 -0400 +Subject: oprofile: add support for Intel processor model 30 + +From: Josh Hunt + +commit a7c55cbee0c1bae9bf5a15a08300e91d88706e45 upstream. + +Newer Intel processors identifying themselves as model 30 are not recognized by +oprofile. + + +model : 30 +model name : Intel(R) Xeon(R) CPU X3470 @ 2.93GHz + + +Running oprofile on these machines gives the following: ++ opcontrol --init ++ opcontrol --list-events +oprofile: available events for CPU type "Intel Architectural Perfmon" + +See Intel 64 and IA-32 Architectures Software Developer's Manual +Volume 3B (Document 253669) Chapter 18 for architectural perfmon events +This is a limited set of fallback events because oprofile doesn't know your CPU +CPU_CLK_UNHALTED: (counter: all) + Clock cycles when not halted (min count: 6000) +INST_RETIRED: (counter: all) + number of instructions retired (min count: 6000) +LLC_MISSES: (counter: all) + Last level cache demand requests from this core that missed the LLC +(min count: 6000) + Unit masks (default 0x41) + ---------- + 0x41: No unit mask +LLC_REFS: (counter: all) + Last level cache demand requests from this core (min count: 6000) + Unit masks (default 0x4f) + ---------- + 0x4f: No unit mask +BR_MISS_PRED_RETIRED: (counter: all) + number of mispredicted branches retired (precise) (min count: 500) ++ opcontrol --shutdown + +Tested using oprofile 0.9.6. + +Signed-off-by: Josh Hunt +Reviewed-by: Andi Kleen +Signed-off-by: Robert Richter +Signed-off-by: Greg Kroah-Hartman + +--- + arch/x86/oprofile/nmi_int.c | 1 + + 1 file changed, 1 insertion(+) + +--- a/arch/x86/oprofile/nmi_int.c ++++ b/arch/x86/oprofile/nmi_int.c +@@ -618,6 +618,7 @@ static int __init ppro_init(char **cpu_t + *cpu_type = "i386/core_2"; + break; + case 0x1a: ++ case 0x1e: + case 0x2e: + spec = &op_arch_perfmon_spec; + *cpu_type = "i386/core_i7"; diff --git a/queue-2.6.34/oprofile-change-cpuids-from-decimal-to-hex-and-add-some-comments.patch b/queue-2.6.34/oprofile-change-cpuids-from-decimal-to-hex-and-add-some-comments.patch new file mode 100644 index 00000000000..b7444ac2a9a --- /dev/null +++ b/queue-2.6.34/oprofile-change-cpuids-from-decimal-to-hex-and-add-some-comments.patch @@ -0,0 +1,61 @@ +From 45c34e05c4e3d36e7c44e790241ea11a1d90d54e Mon Sep 17 00:00:00 2001 +From: John Villalovos +Date: Fri, 7 May 2010 12:41:40 -0400 +Subject: Oprofile: Change CPUIDS from decimal to hex, and add some comments + +From: John Villalovos + +commit 45c34e05c4e3d36e7c44e790241ea11a1d90d54e upstream. + +Back when the patch was submitted for "Add Xeon 7500 series support to +oprofile", Robert Richter had asked for a followon patch that +converted all the CPU ID values to hex. + +I have done that here for the "i386/core_i7" and "i386/atom" class +processors in the ppro_init() function and also added some comments on +where to find documentation on the Intel processors. + +Signed-off-by: John L. Villalovos +Signed-off-by: Robert Richter +Signed-off-by: Greg Kroah-Hartman + +--- + arch/x86/oprofile/nmi_int.c | 16 ++++++++++++++-- + 1 file changed, 14 insertions(+), 2 deletions(-) + +--- a/arch/x86/oprofile/nmi_int.c ++++ b/arch/x86/oprofile/nmi_int.c +@@ -584,6 +584,18 @@ static int __init ppro_init(char **cpu_t + if (force_arch_perfmon && cpu_has_arch_perfmon) + return 0; + ++ /* ++ * Documentation on identifying Intel processors by CPU family ++ * and model can be found in the Intel Software Developer's ++ * Manuals (SDM): ++ * ++ * http://www.intel.com/products/processor/manuals/ ++ * ++ * As of May 2010 the documentation for this was in the: ++ * "Intel 64 and IA-32 Architectures Software Developer's ++ * Manual Volume 3B: System Programming Guide", "Table B-1 ++ * CPUID Signature Values of DisplayFamily_DisplayModel". ++ */ + switch (cpu_model) { + case 0 ... 2: + *cpu_type = "i386/ppro"; +@@ -605,12 +617,12 @@ static int __init ppro_init(char **cpu_t + case 15: case 23: + *cpu_type = "i386/core_2"; + break; ++ case 0x1a: + case 0x2e: +- case 26: + spec = &op_arch_perfmon_spec; + *cpu_type = "i386/core_i7"; + break; +- case 28: ++ case 0x1c: + *cpu_type = "i386/atom"; + break; + default: diff --git a/queue-2.6.34/pcmcia-avoid-buffer-overflow-in-pcmcia_setup_isa_irq.patch b/queue-2.6.34/pcmcia-avoid-buffer-overflow-in-pcmcia_setup_isa_irq.patch new file mode 100644 index 00000000000..60eedc7b47d --- /dev/null +++ b/queue-2.6.34/pcmcia-avoid-buffer-overflow-in-pcmcia_setup_isa_irq.patch @@ -0,0 +1,57 @@ +From 127c03cdbad9bd5af5d7f33bd31a1015a90cb77f Mon Sep 17 00:00:00 2001 +From: Dominik Brodowski +Date: Tue, 3 Aug 2010 09:33:45 +0200 +Subject: pcmcia: avoid buffer overflow in pcmcia_setup_isa_irq + +From: Dominik Brodowski + +commit 127c03cdbad9bd5af5d7f33bd31a1015a90cb77f upstream. + +NR_IRQS may be as low as 16, causing a (harmless?) buffer overflow in +pcmcia_setup_isa_irq(): + +static u8 pcmcia_used_irq[NR_IRQS]; + +... + + if ((try < 32) && pcmcia_used_irq[irq]) + continue; + +This is read-only, so if this address would be non-zero, it would just +mean we would not attempt an IRQ >= NR_IRQS -- which would fail anyway! +And as request_irq() fails for an irq >= NR_IRQS, the setting code path: + + pcmcia_used_irq[irq]++; + +is never reached as well. + +Reported-by: Christoph Fritz +Signed-off-by: Dominik Brodowski +Signed-off-by: Christoph Fritz +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/pcmcia/pcmcia_resource.c | 5 ++++- + 1 file changed, 4 insertions(+), 1 deletion(-) + +--- a/drivers/pcmcia/pcmcia_resource.c ++++ b/drivers/pcmcia/pcmcia_resource.c +@@ -41,7 +41,7 @@ module_param(io_speed, int, 0444); + #ifdef CONFIG_PCMCIA_PROBE + #include + /* mask of IRQs already reserved by other cards, we should avoid using them */ +-static u8 pcmcia_used_irq[NR_IRQS]; ++static u8 pcmcia_used_irq[32]; + #endif + + static int pcmcia_adjust_io_region(struct resource *res, unsigned long start, +@@ -768,6 +768,9 @@ int pcmcia_request_irq(struct pcmcia_dev + for (try = 0; try < 64; try++) { + irq = try % 32; + ++ if (irq > NR_IRQS) ++ continue; ++ + /* marked as available by driver, and not blocked by userspace? */ + if (!((mask >> irq) & 1)) + continue; diff --git a/queue-2.6.34/series b/queue-2.6.34/series index 73eda4b9a3c..b43acefc152 100644 --- a/queue-2.6.34/series +++ b/queue-2.6.34/series @@ -59,3 +59,9 @@ net-fix-napi_gro_frags-vs-netpoll-path.patch net-fix-a-memmove-bug-in-dev_gro_receive.patch pkt_sched-fix-sch_sfq-vs-tcf_bind_filter-oops.patch pkt_sched-fix-sch_sfq-vs-tc_modify_qdisc-oops.patch +vmscan-raise-the-bar-to-pageout_io_sync-stalls.patch +pcmcia-avoid-buffer-overflow-in-pcmcia_setup_isa_irq.patch +isdn-gigaset-reduce-syslog-spam.patch +isdn-gigaset-add-missing-unlock.patch +oprofile-change-cpuids-from-decimal-to-hex-and-add-some-comments.patch +oprofile-add-support-for-intel-processor-model-30.patch diff --git a/queue-2.6.34/vmscan-raise-the-bar-to-pageout_io_sync-stalls.patch b/queue-2.6.34/vmscan-raise-the-bar-to-pageout_io_sync-stalls.patch new file mode 100644 index 00000000000..6e1090d9bbf --- /dev/null +++ b/queue-2.6.34/vmscan-raise-the-bar-to-pageout_io_sync-stalls.patch @@ -0,0 +1,161 @@ +From e31f3698cd3499e676f6b0ea12e3528f569c4fa3 Mon Sep 17 00:00:00 2001 +From: Wu Fengguang +Date: Mon, 9 Aug 2010 17:20:01 -0700 +Subject: vmscan: raise the bar to PAGEOUT_IO_SYNC stalls + +From: Wu Fengguang + +commit e31f3698cd3499e676f6b0ea12e3528f569c4fa3 upstream. + +Fix "system goes unresponsive under memory pressure and lots of +dirty/writeback pages" bug. + + http://lkml.org/lkml/2010/4/4/86 + +In the above thread, Andreas Mohr described that + + Invoking any command locked up for minutes (note that I'm + talking about attempted additional I/O to the _other_, + _unaffected_ main system HDD - such as loading some shell + binaries -, NOT the external SSD18M!!). + +This happens when the two conditions are both meet: +- under memory pressure +- writing heavily to a slow device + +OOM also happens in Andreas' system. The OOM trace shows that 3 processes +are stuck in wait_on_page_writeback() in the direct reclaim path. One in +do_fork() and the other two in unix_stream_sendmsg(). They are blocked on +this condition: + + (sc->order && priority < DEF_PRIORITY - 2) + +which was introduced in commit 78dc583d (vmscan: low order lumpy reclaim +also should use PAGEOUT_IO_SYNC) one year ago. That condition may be too +permissive. In Andreas' case, 512MB/1024 = 512KB. If the direct reclaim +for the order-1 fork() allocation runs into a range of 512KB +hard-to-reclaim LRU pages, it will be stalled. + +It's a severe problem in three ways. + +Firstly, it can easily happen in daily desktop usage. vmscan priority can +easily go below (DEF_PRIORITY - 2) on _local_ memory pressure. Even if +the system has 50% globally reclaimable pages, it still has good +opportunity to have 0.1% sized hard-to-reclaim ranges. For example, a +simple dd can easily create a big range (up to 20%) of dirty pages in the +LRU lists. And order-1 to order-3 allocations are more than common with +SLUB. Try "grep -v '1 :' /proc/slabinfo" to get the list of high order +slab caches. For example, the order-1 radix_tree_node slab cache may +stall applications at swap-in time; the order-3 inode cache on most +filesystems may stall applications when trying to read some file; the +order-2 proc_inode_cache may stall applications when trying to open a +/proc file. + +Secondly, once triggered, it will stall unrelated processes (not doing IO +at all) in the system. This "one slow USB device stalls the whole system" +avalanching effect is very bad. + +Thirdly, once stalled, the stall time could be intolerable long for the +users. When there are 20MB queued writeback pages and USB 1.1 is writing +them in 1MB/s, wait_on_page_writeback() will stuck for up to 20 seconds. +Not to mention it may be called multiple times. + +So raise the bar to only enable PAGEOUT_IO_SYNC when priority goes below +DEF_PRIORITY/3, or 6.25% LRU size. As the default dirty throttle ratio is +20%, it will hardly be triggered by pure dirty pages. We'd better treat +PAGEOUT_IO_SYNC as some last resort workaround -- its stall time is so +uncomfortably long (easily goes beyond 1s). + +The bar is only raised for (order < PAGE_ALLOC_COSTLY_ORDER) allocations, +which are easy to satisfy in 1TB memory boxes. So, although 6.25% of +memory could be an awful lot of pages to scan on a system with 1TB of +memory, it won't really have to busy scan that much. + +Andreas tested an older version of this patch and reported that it mostly +fixed his problem. Mel Gorman helped improve it and KOSAKI Motohiro will +fix it further in the next patch. + +Reported-by: Andreas Mohr +Reviewed-by: Minchan Kim +Reviewed-by: KOSAKI Motohiro +Signed-off-by: Mel Gorman +Signed-off-by: Wu Fengguang +Cc: Rik van Riel +Signed-off-by: Andrew Morton +Signed-off-by: Linus Torvalds +Signed-off-by: Jiri Slaby +Signed-off-by: Greg Kroah-Hartman + +--- + mm/vmscan.c | 53 +++++++++++++++++++++++++++++++++++++++++++++-------- + 1 file changed, 45 insertions(+), 8 deletions(-) + +--- a/mm/vmscan.c ++++ b/mm/vmscan.c +@@ -1118,6 +1118,48 @@ static int too_many_isolated(struct zone + } + + /* ++ * Returns true if the caller should wait to clean dirty/writeback pages. ++ * ++ * If we are direct reclaiming for contiguous pages and we do not reclaim ++ * everything in the list, try again and wait for writeback IO to complete. ++ * This will stall high-order allocations noticeably. Only do that when really ++ * need to free the pages under high memory pressure. ++ */ ++static inline bool should_reclaim_stall(unsigned long nr_taken, ++ unsigned long nr_freed, ++ int priority, ++ int lumpy_reclaim, ++ struct scan_control *sc) ++{ ++ int lumpy_stall_priority; ++ ++ /* kswapd should not stall on sync IO */ ++ if (current_is_kswapd()) ++ return false; ++ ++ /* Only stall on lumpy reclaim */ ++ if (!lumpy_reclaim) ++ return false; ++ ++ /* If we have relaimed everything on the isolated list, no stall */ ++ if (nr_freed == nr_taken) ++ return false; ++ ++ /* ++ * For high-order allocations, there are two stall thresholds. ++ * High-cost allocations stall immediately where as lower ++ * order allocations such as stacks require the scanning ++ * priority to be much higher before stalling. ++ */ ++ if (sc->order > PAGE_ALLOC_COSTLY_ORDER) ++ lumpy_stall_priority = DEF_PRIORITY; ++ else ++ lumpy_stall_priority = DEF_PRIORITY / 3; ++ ++ return priority <= lumpy_stall_priority; ++} ++ ++/* + * shrink_inactive_list() is a helper for shrink_zone(). It returns the number + * of reclaimed pages + */ +@@ -1209,14 +1251,9 @@ static unsigned long shrink_inactive_lis + nr_scanned += nr_scan; + nr_freed = shrink_page_list(&page_list, sc, PAGEOUT_IO_ASYNC); + +- /* +- * If we are direct reclaiming for contiguous pages and we do +- * not reclaim everything in the list, try again and wait +- * for IO to complete. This will stall high-order allocations +- * but that should be acceptable to the caller +- */ +- if (nr_freed < nr_taken && !current_is_kswapd() && +- lumpy_reclaim) { ++ /* Check if we should syncronously wait for writeback */ ++ if (should_reclaim_stall(nr_taken, nr_freed, priority, ++ lumpy_reclaim, sc)) { + congestion_wait(BLK_RW_ASYNC, HZ/10); + + /*