]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
.33 patches
authorGreg Kroah-Hartman <gregkh@suse.de>
Mon, 29 Mar 2010 21:32:17 +0000 (14:32 -0700)
committerGreg Kroah-Hartman <gregkh@suse.de>
Mon, 29 Mar 2010 21:32:17 +0000 (14:32 -0700)
queue-2.6.33/ath9k-fix-bug_on-triggered-by-pae-frames.patch [new file with mode: 0644]
queue-2.6.33/cpuset-fix-the-problem-that-cpuset_mem_spread_node-returns-an-offline-node.patch [new file with mode: 0644]
queue-2.6.33/genirq-prevent-oneshot-irq-thread-race.patch [new file with mode: 0644]
queue-2.6.33/nilfs2-fix-hang-up-of-cleaner-after-log-writer-returned-with-error.patch [new file with mode: 0644]
queue-2.6.33/series
queue-2.6.33/softlockup-stop-spurious-softlockup-messages-due-to-overflow.patch [new file with mode: 0644]

diff --git a/queue-2.6.33/ath9k-fix-bug_on-triggered-by-pae-frames.patch b/queue-2.6.33/ath9k-fix-bug_on-triggered-by-pae-frames.patch
new file mode 100644 (file)
index 0000000..380baa1
--- /dev/null
@@ -0,0 +1,86 @@
+From 4fdec031b9169b3c17938b9c4168f099f457169c Mon Sep 17 00:00:00 2001
+From: Felix Fietkau <nbd@openwrt.org>
+Date: Fri, 12 Mar 2010 04:02:43 +0100
+Subject: ath9k: fix BUG_ON triggered by PAE frames
+
+From: Felix Fietkau <nbd@openwrt.org>
+
+commit 4fdec031b9169b3c17938b9c4168f099f457169c upstream.
+
+When I initially stumbled upon sequence number problems with PAE frames
+in ath9k, I submitted a patch to remove all special cases for PAE
+frames and let them go through the normal transmit path.
+Out of concern about crypto incompatibility issues, this change was
+merged instead:
+
+commit 6c8afef551fef87a3bf24f8a74c69a7f2f72fc82
+Author: Sujith <Sujith.Manoharan@atheros.com>
+Date:   Tue Feb 9 10:07:00 2010 +0530
+
+    ath9k: Fix sequence numbers for PAE frames
+
+After a lot of testing, I'm able to reliably trigger a driver crash on
+rekeying with current versions with this change in place.
+It seems that the driver does not support sending out regular MPDUs with
+the same TID while an A-MPDU session is active.
+This leads to duplicate entries in the TID Tx buffer, which hits the
+following BUG_ON in ath_tx_addto_baw():
+
+    index  = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno);
+    cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
+
+    BUG_ON(tid->tx_buf[cindex] != NULL);
+
+I believe until we actually have a reproducible case of an
+incompatibility with another AP using no PAE special cases, we should
+simply get rid of this mess.
+
+This patch completely fixes my crash issues in STA mode and makes it
+stay connected without throughput drops or connectivity issues even
+when the AP is configured to a very short group rekey interval.
+
+Signed-off-by: Felix Fietkau <nbd@openwrt.org>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/net/wireless/ath/ath9k/xmit.c |   21 +--------------------
+ 1 file changed, 1 insertion(+), 20 deletions(-)
+
+--- a/drivers/net/wireless/ath/ath9k/xmit.c
++++ b/drivers/net/wireless/ath/ath9k/xmit.c
+@@ -1353,25 +1353,6 @@ static enum ath9k_pkt_type get_hw_packet
+       return htype;
+ }
+-static bool is_pae(struct sk_buff *skb)
+-{
+-      struct ieee80211_hdr *hdr;
+-      __le16 fc;
+-
+-      hdr = (struct ieee80211_hdr *)skb->data;
+-      fc = hdr->frame_control;
+-
+-      if (ieee80211_is_data(fc)) {
+-              if (ieee80211_is_nullfunc(fc) ||
+-                  /* Port Access Entity (IEEE 802.1X) */
+-                  (skb->protocol == cpu_to_be16(ETH_P_PAE))) {
+-                      return true;
+-              }
+-      }
+-
+-      return false;
+-}
+-
+ static int get_hw_crypto_keytype(struct sk_buff *skb)
+ {
+       struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+@@ -1701,7 +1682,7 @@ static void ath_tx_start_dma(struct ath_
+                       goto tx_done;
+               }
+-              if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && !is_pae(skb)) {
++              if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
+                       /*
+                        * Try aggregation if it's a unicast data frame
+                        * and the destination is HT capable.
diff --git a/queue-2.6.33/cpuset-fix-the-problem-that-cpuset_mem_spread_node-returns-an-offline-node.patch b/queue-2.6.33/cpuset-fix-the-problem-that-cpuset_mem_spread_node-returns-an-offline-node.patch
new file mode 100644 (file)
index 0000000..d5c0925
--- /dev/null
@@ -0,0 +1,106 @@
+From 5ab116c9349ef52d6fbd2e2917a53f13194b048e Mon Sep 17 00:00:00 2001
+From: Miao Xie <miaox@cn.fujitsu.com>
+Date: Tue, 23 Mar 2010 13:35:34 -0700
+Subject: cpuset: fix the problem that cpuset_mem_spread_node() returns an offline node
+
+From: Miao Xie <miaox@cn.fujitsu.com>
+
+commit 5ab116c9349ef52d6fbd2e2917a53f13194b048e upstream.
+
+cpuset_mem_spread_node() returns an offline node, and causes an oops.
+
+This patch fixes it by initializing task->mems_allowed to
+node_states[N_HIGH_MEMORY], and updating task->mems_allowed when doing
+memory hotplug.
+
+Signed-off-by: Miao Xie <miaox@cn.fujitsu.com>
+Acked-by: David Rientjes <rientjes@google.com>
+Reported-by: Nick Piggin <npiggin@suse.de>
+Tested-by: Nick Piggin <npiggin@suse.de>
+Cc: Paul Menage <menage@google.com>
+Cc: Li Zefan <lizf@cn.fujitsu.com>
+Cc: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ init/main.c      |    2 +-
+ kernel/cpuset.c  |   20 ++++++++++++--------
+ kernel/kthread.c |    2 +-
+ 3 files changed, 14 insertions(+), 10 deletions(-)
+
+--- a/init/main.c
++++ b/init/main.c
+@@ -846,7 +846,7 @@ static int __init kernel_init(void * unu
+       /*
+        * init can allocate pages on any node
+        */
+-      set_mems_allowed(node_possible_map);
++      set_mems_allowed(node_states[N_HIGH_MEMORY]);
+       /*
+        * init can run on any cpu.
+        */
+--- a/kernel/cpuset.c
++++ b/kernel/cpuset.c
+@@ -920,9 +920,6 @@ static int update_cpumask(struct cpuset
+  *    call to guarantee_online_mems(), as we know no one is changing
+  *    our task's cpuset.
+  *
+- *    Hold callback_mutex around the two modifications of our tasks
+- *    mems_allowed to synchronize with cpuset_mems_allowed().
+- *
+  *    While the mm_struct we are migrating is typically from some
+  *    other task, the task_struct mems_allowed that we are hacking
+  *    is for our current task, which must allocate new pages for that
+@@ -1391,11 +1388,10 @@ static void cpuset_attach(struct cgroup_
+       if (cs == &top_cpuset) {
+               cpumask_copy(cpus_attach, cpu_possible_mask);
+-              to = node_possible_map;
+       } else {
+               guarantee_online_cpus(cs, cpus_attach);
+-              guarantee_online_mems(cs, &to);
+       }
++      guarantee_online_mems(cs, &to);
+       /* do per-task migration stuff possibly for each in the threadgroup */
+       cpuset_attach_task(tsk, &to, cs);
+@@ -2090,15 +2086,23 @@ static int cpuset_track_online_cpus(stru
+ static int cpuset_track_online_nodes(struct notifier_block *self,
+                               unsigned long action, void *arg)
+ {
++      nodemask_t oldmems;
++
+       cgroup_lock();
+       switch (action) {
+       case MEM_ONLINE:
+-      case MEM_OFFLINE:
++              oldmems = top_cpuset.mems_allowed;
+               mutex_lock(&callback_mutex);
+               top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
+               mutex_unlock(&callback_mutex);
+-              if (action == MEM_OFFLINE)
+-                      scan_for_empty_cpusets(&top_cpuset);
++              update_tasks_nodemask(&top_cpuset, &oldmems, NULL);
++              break;
++      case MEM_OFFLINE:
++              /*
++               * needn't update top_cpuset.mems_allowed explicitly because
++               * scan_for_empty_cpusets() will update it.
++               */
++              scan_for_empty_cpusets(&top_cpuset);
+               break;
+       default:
+               break;
+--- a/kernel/kthread.c
++++ b/kernel/kthread.c
+@@ -219,7 +219,7 @@ int kthreadd(void *unused)
+       set_task_comm(tsk, "kthreadd");
+       ignore_signals(tsk);
+       set_cpus_allowed_ptr(tsk, cpu_all_mask);
+-      set_mems_allowed(node_possible_map);
++      set_mems_allowed(node_states[N_HIGH_MEMORY]);
+       current->flags |= PF_NOFREEZE | PF_FREEZER_NOSIG;
diff --git a/queue-2.6.33/genirq-prevent-oneshot-irq-thread-race.patch b/queue-2.6.33/genirq-prevent-oneshot-irq-thread-race.patch
new file mode 100644 (file)
index 0000000..42b06ee
--- /dev/null
@@ -0,0 +1,165 @@
+From 0b1adaa031a55e44f5dd942f234bf09d28e8a0d6 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 9 Mar 2010 19:45:54 +0100
+Subject: genirq: Prevent oneshot irq thread race
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 0b1adaa031a55e44f5dd942f234bf09d28e8a0d6 upstream.
+
+Lars-Peter pointed out that the oneshot threaded interrupt handler
+code has the following race:
+
+ CPU0                            CPU1
+ hande_level_irq(irq X)
+   mask_ack_irq(irq X)
+   handle_IRQ_event(irq X)
+     wake_up(thread_handler)
+                                 thread handler(irq X) runs
+                                 finalize_oneshot(irq X)
+                                 does not unmask due to
+                                 !(desc->status & IRQ_MASKED)
+
+ return from irq
+ does not unmask due to
+ (desc->status & IRQ_ONESHOT)
+
+This leaves the interrupt line masked forever.
+
+The reason for this is the inconsistent handling of the IRQ_MASKED
+flag. Instead of setting it in the mask function the oneshot support
+sets the flag after waking up the irq thread.
+
+The solution for this is to set/clear the IRQ_MASKED status whenever
+we mask/unmask an interrupt line. That's the easy part, but that
+cleanup opens another race:
+
+ CPU0                            CPU1
+ hande_level_irq(irq)
+   mask_ack_irq(irq)
+   handle_IRQ_event(irq)
+     wake_up(thread_handler)
+                                 thread handler(irq) runs
+                                 finalize_oneshot_irq(irq)
+                                 unmask(irq)
+     irq triggers again
+     handle_level_irq(irq)
+       mask_ack_irq(irq)
+     return from irq due to IRQ_INPROGRESS
+
+ return from irq
+ does not unmask due to
+ (desc->status & IRQ_ONESHOT)
+
+This requires that we synchronize finalize_oneshot_irq() with the
+primary handler. If IRQ_INPROGESS is set we wait until the primary
+handler on the other CPU has returned before unmasking the interrupt
+line again.
+
+We probably have never seen that problem because it does not happen on
+UP and on SMP the irqbalancer protects us by pinning the primary
+handler and the thread to the same CPU.
+
+Reported-by: Lars-Peter Clausen <lars@metafoo.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+--- a/kernel/irq/chip.c
++++ b/kernel/irq/chip.c
+@@ -359,6 +359,23 @@ static inline void mask_ack_irq(struct irq_desc *desc, int irq)
+               if (desc->chip->ack)
+                       desc->chip->ack(irq);
+       }
++      desc->status |= IRQ_MASKED;
++}
++
++static inline void mask_irq(struct irq_desc *desc, int irq)
++{
++      if (desc->chip->mask) {
++              desc->chip->mask(irq);
++              desc->status |= IRQ_MASKED;
++      }
++}
++
++static inline void unmask_irq(struct irq_desc *desc, int irq)
++{
++      if (desc->chip->unmask) {
++              desc->chip->unmask(irq);
++              desc->status &= ~IRQ_MASKED;
++      }
+ }
+ /*
+@@ -484,10 +501,8 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
+       raw_spin_lock(&desc->lock);
+       desc->status &= ~IRQ_INPROGRESS;
+-      if (unlikely(desc->status & IRQ_ONESHOT))
+-              desc->status |= IRQ_MASKED;
+-      else if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask)
+-              desc->chip->unmask(irq);
++      if (!(desc->status & (IRQ_DISABLED | IRQ_ONESHOT)))
++              unmask_irq(desc, irq);
+ out_unlock:
+       raw_spin_unlock(&desc->lock);
+ }
+@@ -524,8 +539,7 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
+       action = desc->action;
+       if (unlikely(!action || (desc->status & IRQ_DISABLED))) {
+               desc->status |= IRQ_PENDING;
+-              if (desc->chip->mask)
+-                      desc->chip->mask(irq);
++              mask_irq(desc, irq);
+               goto out;
+       }
+@@ -593,7 +607,7 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
+               irqreturn_t action_ret;
+               if (unlikely(!action)) {
+-                      desc->chip->mask(irq);
++                      mask_irq(desc, irq);
+                       goto out_unlock;
+               }
+@@ -605,8 +619,7 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
+               if (unlikely((desc->status &
+                              (IRQ_PENDING | IRQ_MASKED | IRQ_DISABLED)) ==
+                             (IRQ_PENDING | IRQ_MASKED))) {
+-                      desc->chip->unmask(irq);
+-                      desc->status &= ~IRQ_MASKED;
++                      unmask_irq(desc, irq);
+               }
+               desc->status &= ~IRQ_PENDING;
+diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
+index eb6078c..69a3d7b 100644
+--- a/kernel/irq/manage.c
++++ b/kernel/irq/manage.c
+@@ -483,8 +483,26 @@ static int irq_wait_for_interrupt(struct irqaction *action)
+  */
+ static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc)
+ {
++again:
+       chip_bus_lock(irq, desc);
+       raw_spin_lock_irq(&desc->lock);
++
++      /*
++       * Implausible though it may be we need to protect us against
++       * the following scenario:
++       *
++       * The thread is faster done than the hard interrupt handler
++       * on the other CPU. If we unmask the irq line then the
++       * interrupt can come in again and masks the line, leaves due
++       * to IRQ_INPROGRESS and the irq line is masked forever.
++       */
++      if (unlikely(desc->status & IRQ_INPROGRESS)) {
++              raw_spin_unlock_irq(&desc->lock);
++              chip_bus_sync_unlock(irq, desc);
++              cpu_relax();
++              goto again;
++      }
++
+       if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) {
+               desc->status &= ~IRQ_MASKED;
+               desc->chip->unmask(irq);
diff --git a/queue-2.6.33/nilfs2-fix-hang-up-of-cleaner-after-log-writer-returned-with-error.patch b/queue-2.6.33/nilfs2-fix-hang-up-of-cleaner-after-log-writer-returned-with-error.patch
new file mode 100644 (file)
index 0000000..617e757
--- /dev/null
@@ -0,0 +1,41 @@
+From 110d735a0ae69bdd11af9acb6ea3b979137eb118 Mon Sep 17 00:00:00 2001
+From: Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>
+Date: Mon, 22 Mar 2010 21:36:06 +0900
+Subject: nilfs2: fix hang-up of cleaner after log writer returned with error
+
+From: Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>
+
+commit 110d735a0ae69bdd11af9acb6ea3b979137eb118 upstream.
+
+According to the report from Andreas Beckmann (Message-ID:
+<4BA54677.3090902@abeckmann.de>), nilfs in 2.6.33 kernel got stuck
+after a disk full error.
+
+This turned out to be a regression by log writer updates merged at
+kernel 2.6.33.  nilfs_segctor_abort_construction, which is a cleanup
+function for erroneous cases, was skipping writeback completion for
+some logs.
+
+This fixes the bug and would resolve the hang issue.
+
+Reported-by: Andreas Beckmann <debian@abeckmann.de>
+Signed-off-by: Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>
+Tested-by: Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/nilfs2/segment.c |    3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/fs/nilfs2/segment.c
++++ b/fs/nilfs2/segment.c
+@@ -1902,8 +1902,7 @@ static void nilfs_segctor_abort_construc
+       list_splice_tail_init(&sci->sc_write_logs, &logs);
+       ret = nilfs_wait_on_logs(&logs);
+-      if (ret)
+-              nilfs_abort_logs(&logs, NULL, sci->sc_super_root, ret);
++      nilfs_abort_logs(&logs, NULL, sci->sc_super_root, ret ? : err);
+       list_splice_tail_init(&sci->sc_segbufs, &logs);
+       nilfs_cancel_segusage(&logs, nilfs->ns_sufile);
index a21b242974749c45d757cfa99ad942598b7ff700..2227b6512bf3a72856de396716cf6e936bd82944 100644 (file)
@@ -90,3 +90,8 @@ pci-fix-return-value-from-pcix_get_max_mmrbc.patch
 pci-fix-access-of-pci_x_cmd-by-pcix-get-and-set-mmrbc-functions.patch
 pci-cleanup-error-return-for-pcix-get-and-set-mmrbc-functions.patch
 pci-quirk-rs780-rs880-work-around-missing-msi-initialization.patch
+ath9k-fix-bug_on-triggered-by-pae-frames.patch
+cpuset-fix-the-problem-that-cpuset_mem_spread_node-returns-an-offline-node.patch
+nilfs2-fix-hang-up-of-cleaner-after-log-writer-returned-with-error.patch
+genirq-prevent-oneshot-irq-thread-race.patch
+softlockup-stop-spurious-softlockup-messages-due-to-overflow.patch
diff --git a/queue-2.6.33/softlockup-stop-spurious-softlockup-messages-due-to-overflow.patch b/queue-2.6.33/softlockup-stop-spurious-softlockup-messages-due-to-overflow.patch
new file mode 100644 (file)
index 0000000..5a11623
--- /dev/null
@@ -0,0 +1,41 @@
+From 8c2eb4805d422bdbf60ba00ff233c794d23c3c00 Mon Sep 17 00:00:00 2001
+From: Colin Ian King <colin.king@canonical.com>
+Date: Fri, 19 Mar 2010 10:28:02 +0000
+Subject: softlockup: Stop spurious softlockup messages due to overflow
+
+From: Colin Ian King <colin.king@canonical.com>
+
+commit 8c2eb4805d422bdbf60ba00ff233c794d23c3c00 upstream.
+
+Ensure additions on touch_ts do not overflow.  This can occur
+when the top 32 bits of the TSC reach 0xffffffff causing
+additions to touch_ts to overflow and this in turn generates
+spurious softlockup warnings.
+
+Signed-off-by: Colin Ian King <colin.king@canonical.com>
+Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Cc: Eric Dumazet <eric.dumazet@gmail.com>
+LKML-Reference: <1268994482.1798.6.camel@lenovo>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ kernel/softlockup.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/kernel/softlockup.c
++++ b/kernel/softlockup.c
+@@ -155,11 +155,11 @@ void softlockup_tick(void)
+        * Wake up the high-prio watchdog task twice per
+        * threshold timespan.
+        */
+-      if (now > touch_ts + softlockup_thresh/2)
++      if (time_after(now - softlockup_thresh/2, touch_ts))
+               wake_up_process(per_cpu(softlockup_watchdog, this_cpu));
+       /* Warn about unreasonable delays: */
+-      if (now <= (touch_ts + softlockup_thresh))
++      if (time_before_eq(now - softlockup_thresh, touch_ts))
+               return;
+       per_cpu(softlockup_print_ts, this_cpu) = touch_ts;