]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.4-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 16 May 2013 21:49:09 +0000 (17:49 -0400)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 16 May 2013 21:49:09 +0000 (17:49 -0400)
added patches:
ath9k-fix-key-allocation-error-handling-for-powersave-keys.patch
b43-handle-dma-rx-descriptor-underrun.patch
drm-don-t-check-modeset-locks-in-panic-handler.patch
drm-mm-fix-dump-table-bug.patch
ext4-limit-group-search-loop-for-non-extent-files.patch
mwifiex-clear-is_suspended-flag-when-interrupt-is-received-early.patch
mwifiex-fix-setting-of-multicast-filter.patch
powerpc-bring-all-threads-online-prior-to-migration-hibernation.patch
powerpc-kexec-fix-kexec-when-using-vmx-optimised-memcpy.patch
tick-cleanup-nohz-per-cpu-data-on-cpu-down.patch
timer-don-t-reinitialize-the-cpu-base-lock-during-cpu_up_prepare.patch
tracing-fix-leaks-of-filter-preds.patch

13 files changed:
queue-3.4/ath9k-fix-key-allocation-error-handling-for-powersave-keys.patch [new file with mode: 0644]
queue-3.4/b43-handle-dma-rx-descriptor-underrun.patch [new file with mode: 0644]
queue-3.4/drm-don-t-check-modeset-locks-in-panic-handler.patch [new file with mode: 0644]
queue-3.4/drm-mm-fix-dump-table-bug.patch [new file with mode: 0644]
queue-3.4/ext4-limit-group-search-loop-for-non-extent-files.patch [new file with mode: 0644]
queue-3.4/mwifiex-clear-is_suspended-flag-when-interrupt-is-received-early.patch [new file with mode: 0644]
queue-3.4/mwifiex-fix-setting-of-multicast-filter.patch [new file with mode: 0644]
queue-3.4/powerpc-bring-all-threads-online-prior-to-migration-hibernation.patch [new file with mode: 0644]
queue-3.4/powerpc-kexec-fix-kexec-when-using-vmx-optimised-memcpy.patch [new file with mode: 0644]
queue-3.4/series
queue-3.4/tick-cleanup-nohz-per-cpu-data-on-cpu-down.patch [new file with mode: 0644]
queue-3.4/timer-don-t-reinitialize-the-cpu-base-lock-during-cpu_up_prepare.patch [new file with mode: 0644]
queue-3.4/tracing-fix-leaks-of-filter-preds.patch [new file with mode: 0644]

diff --git a/queue-3.4/ath9k-fix-key-allocation-error-handling-for-powersave-keys.patch b/queue-3.4/ath9k-fix-key-allocation-error-handling-for-powersave-keys.patch
new file mode 100644 (file)
index 0000000..e83fe91
--- /dev/null
@@ -0,0 +1,52 @@
+From 4ef69d0394cba8caa9f75d3f2e53429bfb8b3045 Mon Sep 17 00:00:00 2001
+From: Felix Fietkau <nbd@openwrt.org>
+Date: Sat, 27 Apr 2013 11:47:01 +0200
+Subject: ath9k: fix key allocation error handling for powersave keys
+
+From: Felix Fietkau <nbd@openwrt.org>
+
+commit 4ef69d0394cba8caa9f75d3f2e53429bfb8b3045 upstream.
+
+If no keycache slots are available, ath_key_config can return -ENOSPC.
+If the key index is not checked for errors, it can lead to logspam that
+looks like this: "ath: wiphy0: keyreset: keycache entry 228 out of range"
+This can cause follow-up errors if the invalid keycache index gets
+used for tx.
+
+Signed-off-by: Felix Fietkau <nbd@openwrt.org>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/wireless/ath/ath9k/main.c |    6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/wireless/ath/ath9k/main.c
++++ b/drivers/net/wireless/ath/ath9k/main.c
+@@ -1711,6 +1711,7 @@ static int ath9k_sta_add(struct ieee8021
+       struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+       struct ath_node *an = (struct ath_node *) sta->drv_priv;
+       struct ieee80211_key_conf ps_key = { };
++      int key;
+       ath_node_attach(sc, sta, vif);
+@@ -1718,7 +1719,9 @@ static int ath9k_sta_add(struct ieee8021
+           vif->type != NL80211_IFTYPE_AP_VLAN)
+               return 0;
+-      an->ps_key = ath_key_config(common, vif, sta, &ps_key);
++      key = ath_key_config(common, vif, sta, &ps_key);
++      if (key > 0)
++              an->ps_key = key;
+       return 0;
+ }
+@@ -1735,6 +1738,7 @@ static void ath9k_del_ps_key(struct ath_
+           return;
+       ath_key_delete(common, &ps_key);
++      an->ps_key = 0;
+ }
+ static int ath9k_sta_remove(struct ieee80211_hw *hw,
diff --git a/queue-3.4/b43-handle-dma-rx-descriptor-underrun.patch b/queue-3.4/b43-handle-dma-rx-descriptor-underrun.patch
new file mode 100644 (file)
index 0000000..26b45da
--- /dev/null
@@ -0,0 +1,149 @@
+From 73b82bf0bfbf58e6ff328d3726934370585f6e78 Mon Sep 17 00:00:00 2001
+From: Thommy Jakobsson <thommyj@gmail.com>
+Date: Tue, 23 Apr 2013 21:45:11 +0200
+Subject: B43: Handle DMA RX descriptor underrun
+
+From: Thommy Jakobsson <thommyj@gmail.com>
+
+commit 73b82bf0bfbf58e6ff328d3726934370585f6e78 upstream.
+
+Add handling of rx descriptor underflow. This fixes a fault that could
+happen on slow machines, where data is received faster than the CPU can
+handle. In such a case the device will use up all rx descriptors and
+refuse to send any more data before confirming that it is ok. This
+patch enables necessary interrupt to discover such a situation and will
+handle them by dropping everything in the ring buffer.
+
+Reviewed-by: Michael Buesch <m@bues.ch>
+Signed-off-by: Thommy Jakobsson <thommyj@gmail.com>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/wireless/b43/dma.c  |   19 +++++++++++++++++
+ drivers/net/wireless/b43/dma.h  |    4 ++-
+ drivers/net/wireless/b43/main.c |   43 ++++++++++++++++------------------------
+ 3 files changed, 40 insertions(+), 26 deletions(-)
+
+--- a/drivers/net/wireless/b43/dma.c
++++ b/drivers/net/wireless/b43/dma.c
+@@ -1733,6 +1733,25 @@ drop_recycle_buffer:
+       sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize);
+ }
++void b43_dma_handle_rx_overflow(struct b43_dmaring *ring)
++{
++      int current_slot, previous_slot;
++
++      B43_WARN_ON(ring->tx);
++
++      /* Device has filled all buffers, drop all packets and let TCP
++       * decrease speed.
++       * Decrement RX index by one will let the device to see all slots
++       * as free again
++       */
++      /*
++      *TODO: How to increase rx_drop in mac80211?
++      */
++      current_slot = ring->ops->get_current_rxslot(ring);
++      previous_slot = prev_slot(ring, current_slot);
++      ring->ops->set_current_rxslot(ring, previous_slot);
++}
++
+ void b43_dma_rx(struct b43_dmaring *ring)
+ {
+       const struct b43_dma_ops *ops = ring->ops;
+--- a/drivers/net/wireless/b43/dma.h
++++ b/drivers/net/wireless/b43/dma.h
+@@ -9,7 +9,7 @@
+ /* DMA-Interrupt reasons. */
+ #define B43_DMAIRQ_FATALMASK  ((1 << 10) | (1 << 11) | (1 << 12) \
+                                        | (1 << 14) | (1 << 15))
+-#define B43_DMAIRQ_NONFATALMASK       (1 << 13)
++#define B43_DMAIRQ_RDESC_UFLOW                (1 << 13)
+ #define B43_DMAIRQ_RX_DONE            (1 << 16)
+ /*** 32-bit DMA Engine. ***/
+@@ -295,6 +295,8 @@ int b43_dma_tx(struct b43_wldev *dev,
+ void b43_dma_handle_txstatus(struct b43_wldev *dev,
+                            const struct b43_txstatus *status);
++void b43_dma_handle_rx_overflow(struct b43_dmaring *ring);
++
+ void b43_dma_rx(struct b43_dmaring *ring);
+ void b43_dma_direct_fifo_rx(struct b43_wldev *dev,
+--- a/drivers/net/wireless/b43/main.c
++++ b/drivers/net/wireless/b43/main.c
+@@ -1895,30 +1895,18 @@ static void b43_do_interrupt_thread(stru
+               }
+       }
+-      if (unlikely(merged_dma_reason & (B43_DMAIRQ_FATALMASK |
+-                                        B43_DMAIRQ_NONFATALMASK))) {
+-              if (merged_dma_reason & B43_DMAIRQ_FATALMASK) {
+-                      b43err(dev->wl, "Fatal DMA error: "
+-                             "0x%08X, 0x%08X, 0x%08X, "
+-                             "0x%08X, 0x%08X, 0x%08X\n",
+-                             dma_reason[0], dma_reason[1],
+-                             dma_reason[2], dma_reason[3],
+-                             dma_reason[4], dma_reason[5]);
+-                      b43err(dev->wl, "This device does not support DMA "
++      if (unlikely(merged_dma_reason & (B43_DMAIRQ_FATALMASK))) {
++              b43err(dev->wl,
++                      "Fatal DMA error: 0x%08X, 0x%08X, 0x%08X, 0x%08X, 0x%08X, 0x%08X\n",
++                      dma_reason[0], dma_reason[1],
++                      dma_reason[2], dma_reason[3],
++                      dma_reason[4], dma_reason[5]);
++              b43err(dev->wl, "This device does not support DMA "
+                              "on your system. It will now be switched to PIO.\n");
+-                      /* Fall back to PIO transfers if we get fatal DMA errors! */
+-                      dev->use_pio = true;
+-                      b43_controller_restart(dev, "DMA error");
+-                      return;
+-              }
+-              if (merged_dma_reason & B43_DMAIRQ_NONFATALMASK) {
+-                      b43err(dev->wl, "DMA error: "
+-                             "0x%08X, 0x%08X, 0x%08X, "
+-                             "0x%08X, 0x%08X, 0x%08X\n",
+-                             dma_reason[0], dma_reason[1],
+-                             dma_reason[2], dma_reason[3],
+-                             dma_reason[4], dma_reason[5]);
+-              }
++              /* Fall back to PIO transfers if we get fatal DMA errors! */
++              dev->use_pio = true;
++              b43_controller_restart(dev, "DMA error");
++              return;
+       }
+       if (unlikely(reason & B43_IRQ_UCODE_DEBUG))
+@@ -1937,6 +1925,11 @@ static void b43_do_interrupt_thread(stru
+               handle_irq_noise(dev);
+       /* Check the DMA reason registers for received data. */
++      if (dma_reason[0] & B43_DMAIRQ_RDESC_UFLOW) {
++              if (B43_DEBUG)
++                      b43warn(dev->wl, "RX descriptor underrun\n");
++              b43_dma_handle_rx_overflow(dev->dma.rx_ring);
++      }
+       if (dma_reason[0] & B43_DMAIRQ_RX_DONE) {
+               if (b43_using_pio_transfers(dev))
+                       b43_pio_rx(dev->pio.rx_queue);
+@@ -1994,7 +1987,7 @@ static irqreturn_t b43_do_interrupt(stru
+               return IRQ_NONE;
+       dev->dma_reason[0] = b43_read32(dev, B43_MMIO_DMA0_REASON)
+-          & 0x0001DC00;
++          & 0x0001FC00;
+       dev->dma_reason[1] = b43_read32(dev, B43_MMIO_DMA1_REASON)
+           & 0x0000DC00;
+       dev->dma_reason[2] = b43_read32(dev, B43_MMIO_DMA2_REASON)
+@@ -3122,7 +3115,7 @@ static int b43_chip_init(struct b43_wlde
+               b43_write32(dev, 0x018C, 0x02000000);
+       }
+       b43_write32(dev, B43_MMIO_GEN_IRQ_REASON, 0x00004000);
+-      b43_write32(dev, B43_MMIO_DMA0_IRQ_MASK, 0x0001DC00);
++      b43_write32(dev, B43_MMIO_DMA0_IRQ_MASK, 0x0001FC00);
+       b43_write32(dev, B43_MMIO_DMA1_IRQ_MASK, 0x0000DC00);
+       b43_write32(dev, B43_MMIO_DMA2_IRQ_MASK, 0x0000DC00);
+       b43_write32(dev, B43_MMIO_DMA3_IRQ_MASK, 0x0001DC00);
diff --git a/queue-3.4/drm-don-t-check-modeset-locks-in-panic-handler.patch b/queue-3.4/drm-don-t-check-modeset-locks-in-panic-handler.patch
new file mode 100644 (file)
index 0000000..9b7a5b6
--- /dev/null
@@ -0,0 +1,36 @@
+From a9b054e8ab06504c2afa0e307ee78d3778993a1d Mon Sep 17 00:00:00 2001
+From: Daniel Vetter <daniel.vetter@ffwll.ch>
+Date: Thu, 2 May 2013 09:43:05 +0200
+Subject: drm: don't check modeset locks in panic handler
+
+From: Daniel Vetter <daniel.vetter@ffwll.ch>
+
+commit a9b054e8ab06504c2afa0e307ee78d3778993a1d upstream.
+
+Since we know that locking is broken in that case and it's more
+important to not flood the dmesg with random gunk.
+
+References: http://lkml.kernel.org/r/20130502000206.GH15623@pd.tnic
+Cc: Dave Airlie <airlied@gmail.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Reported-and-tested-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/drm_crtc.c |    4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/drivers/gpu/drm/drm_crtc.c
++++ b/drivers/gpu/drm/drm_crtc.c
+@@ -973,6 +973,10 @@ int drm_mode_group_init_legacy_group(str
+       if ((ret = drm_mode_group_init(dev, group)))
+               return ret;
++      /* Locking is currently fubar in the panic handler. */
++      if (oops_in_progress)
++              return;
++
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+               group->id_list[group->num_crtcs++] = crtc->base.id;
diff --git a/queue-3.4/drm-mm-fix-dump-table-bug.patch b/queue-3.4/drm-mm-fix-dump-table-bug.patch
new file mode 100644 (file)
index 0000000..1441469
--- /dev/null
@@ -0,0 +1,92 @@
+From 3a359f0b21ab218c1bf7a6a1b638b6fd143d0b99 Mon Sep 17 00:00:00 2001
+From: Daniel Vetter <daniel.vetter@ffwll.ch>
+Date: Sat, 20 Apr 2013 12:08:11 +0200
+Subject: drm/mm: fix dump table BUG
+
+From: Daniel Vetter <daniel.vetter@ffwll.ch>
+
+commit 3a359f0b21ab218c1bf7a6a1b638b6fd143d0b99 upstream.
+
+In
+
+commit 9e8944ab564f2e3dde90a518cd32048c58918608
+Author: Chris Wilson <chris@chris-wilson.co.uk>
+Date:   Thu Nov 15 11:32:17 2012 +0000
+
+    drm: Introduce an iterator over holes in the drm_mm range manager
+
+helpers and iterators for hole handling have been introduced with some
+debug BUG_ONs sprinkled over. Unfortunately this broke the mm dumper
+which unconditionally tried to compute the size of the very first
+hole.
+
+While at it unify the code a bit with the hole dumping in the loop.
+
+v2: Extract a hole dump helper.
+
+Reported-by: Christopher Harvey <charvey@matrox.com>
+Cc: Christopher Harvey <charvey@matrox.com>
+Cc: Dave Airlie <airlied@redhat.com>
+Cc: Chris Wilson <chris@chris-wilson.co.uk>
+Acked-by: Dave Airlie <airlied@redhat.com>
+Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/drm_mm.c |   34 ++++++++++++++++++----------------
+ 1 file changed, 18 insertions(+), 16 deletions(-)
+
+--- a/drivers/gpu/drm/drm_mm.c
++++ b/drivers/gpu/drm/drm_mm.c
+@@ -680,33 +680,35 @@ void drm_mm_debug_table(struct drm_mm *m
+ EXPORT_SYMBOL(drm_mm_debug_table);
+ #if defined(CONFIG_DEBUG_FS)
+-int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
++static unsigned long drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry)
+ {
+-      struct drm_mm_node *entry;
+-      unsigned long total_used = 0, total_free = 0, total = 0;
+       unsigned long hole_start, hole_end, hole_size;
+-      hole_start = drm_mm_hole_node_start(&mm->head_node);
+-      hole_end = drm_mm_hole_node_end(&mm->head_node);
+-      hole_size = hole_end - hole_start;
+-      if (hole_size)
++      if (entry->hole_follows) {
++              hole_start = drm_mm_hole_node_start(entry);
++              hole_end = drm_mm_hole_node_end(entry);
++              hole_size = hole_end - hole_start;
+               seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
+                               hole_start, hole_end, hole_size);
+-      total_free += hole_size;
++              return hole_size;
++      }
++
++      return 0;
++}
++
++int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
++{
++      struct drm_mm_node *entry;
++      unsigned long total_used = 0, total_free = 0, total = 0;
++
++      total_free += drm_mm_dump_hole(m, &mm->head_node);
+       drm_mm_for_each_node(entry, mm) {
+               seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n",
+                               entry->start, entry->start + entry->size,
+                               entry->size);
+               total_used += entry->size;
+-              if (entry->hole_follows) {
+-                      hole_start = drm_mm_hole_node_start(entry);
+-                      hole_end = drm_mm_hole_node_end(entry);
+-                      hole_size = hole_end - hole_start;
+-                      seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
+-                                      hole_start, hole_end, hole_size);
+-                      total_free += hole_size;
+-              }
++              total_free += drm_mm_dump_hole(m, entry);
+       }
+       total = total_free + total_used;
diff --git a/queue-3.4/ext4-limit-group-search-loop-for-non-extent-files.patch b/queue-3.4/ext4-limit-group-search-loop-for-non-extent-files.patch
new file mode 100644 (file)
index 0000000..8b6fb61
--- /dev/null
@@ -0,0 +1,55 @@
+From e6155736ad76b2070652745f9e54cdea3f0d8567 Mon Sep 17 00:00:00 2001
+From: Lachlan McIlroy <lmcilroy@redhat.com>
+Date: Sun, 5 May 2013 23:10:00 -0400
+Subject: ext4: limit group search loop for non-extent files
+
+From: Lachlan McIlroy <lmcilroy@redhat.com>
+
+commit e6155736ad76b2070652745f9e54cdea3f0d8567 upstream.
+
+In the case where we are allocating for a non-extent file,
+we must limit the groups we allocate from to those below
+2^32 blocks, and ext4_mb_regular_allocator() attempts to
+do this initially by putting a cap on ngroups for the
+subsequent search loop.
+
+However, the initial target group comes in from the
+allocation context (ac), and it may already be beyond
+the artificially limited ngroups.  In this case,
+the limit
+
+       if (group == ngroups)
+               group = 0;
+
+at the top of the loop is never true, and the loop will
+run away.
+
+Catch this case inside the loop and reset the search to
+start at group 0.
+
+[sandeen@redhat.com: add commit msg & comments]
+
+Signed-off-by: Lachlan McIlroy <lmcilroy@redhat.com>
+Signed-off-by: Eric Sandeen <sandeen@redhat.com>
+Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ext4/mballoc.c |    6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -1980,7 +1980,11 @@ repeat:
+               group = ac->ac_g_ex.fe_group;
+               for (i = 0; i < ngroups; group++, i++) {
+-                      if (group == ngroups)
++                      /*
++                       * Artificially restricted ngroups for non-extent
++                       * files makes group > ngroups possible on first loop.
++                       */
++                      if (group >= ngroups)
+                               group = 0;
+                       /* This now checks without needing the buddy page */
diff --git a/queue-3.4/mwifiex-clear-is_suspended-flag-when-interrupt-is-received-early.patch b/queue-3.4/mwifiex-clear-is_suspended-flag-when-interrupt-is-received-early.patch
new file mode 100644 (file)
index 0000000..1c501c6
--- /dev/null
@@ -0,0 +1,60 @@
+From 48795424acff7215d5eac0b52793a2c1eb3a6283 Mon Sep 17 00:00:00 2001
+From: Bing Zhao <bzhao@marvell.com>
+Date: Mon, 6 May 2013 19:46:53 -0700
+Subject: mwifiex: clear is_suspended flag when interrupt is received early
+
+From: Bing Zhao <bzhao@marvell.com>
+
+commit 48795424acff7215d5eac0b52793a2c1eb3a6283 upstream.
+
+When the XO-4 with 8787 wireless is woken up due to wake-on-WLAN
+mwifiex is often flooded with "not allowed while suspended" messages
+and the interface is unusable.
+
+[  202.171609] int: sdio_ireg = 0x1
+[  202.180700] info: mwifiex_process_hs_config: auto cancelling host
+               sleep since there is interrupt from the firmware
+[  202.201880] event: wakeup device...
+[  202.211452] event: hs_deactivated
+[  202.514638] info: --- Rx: Data packet ---
+[  202.514753] data: 4294957544 BSS(0-0): Data <= kernel
+[  202.514825] PREP_CMD: device in suspended state
+[  202.514839] data: dequeuing the packet ec7248c0 ec4869c0
+[  202.514886] mwifiex_write_data_sync: not allowed while suspended
+[  202.514886] host_to_card, write iomem (1) failed: -1
+[  202.514917] mwifiex_write_data_sync: not allowed while suspended
+[  202.514936] host_to_card, write iomem (2) failed: -1
+[  202.514949] mwifiex_write_data_sync: not allowed while suspended
+[  202.514965] host_to_card, write iomem (3) failed: -1
+[  202.514976] mwifiex_write_data_async failed: 0xFFFFFFFF
+
+This can be readily reproduced when putting the XO-4 in a loop where
+it goes to sleep due to inactivity, but then wakes up due to an
+incoming ping. The error is hit within an hour or two.
+
+This issue happens when an interrupt comes in early while host sleep
+is still activated. Driver handles this case by auto cancelling host
+sleep. However is_suspended flag is still set which prevents any cmd
+or data from being sent to firmware. Fix it by clearing is_suspended
+flag in this path.
+
+Reported-by: Daniel Drake <dsd@laptop.org>
+Tested-by: Daniel Drake <dsd@laptop.org>
+Signed-off-by: Bing Zhao <bzhao@marvell.com>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/wireless/mwifiex/cmdevt.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/net/wireless/mwifiex/cmdevt.c
++++ b/drivers/net/wireless/mwifiex/cmdevt.c
+@@ -1084,6 +1084,7 @@ mwifiex_process_hs_config(struct mwifiex
+       adapter->if_ops.wakeup(adapter);
+       adapter->hs_activated = false;
+       adapter->is_hs_configured = false;
++      adapter->is_suspended = false;
+       mwifiex_hs_activated_event(mwifiex_get_priv(adapter,
+                                                   MWIFIEX_BSS_ROLE_ANY),
+                                  false);
diff --git a/queue-3.4/mwifiex-fix-setting-of-multicast-filter.patch b/queue-3.4/mwifiex-fix-setting-of-multicast-filter.patch
new file mode 100644 (file)
index 0000000..9b60d03
--- /dev/null
@@ -0,0 +1,66 @@
+From ccd384b10420ac81ba3fb9b0a7d18272c7173552 Mon Sep 17 00:00:00 2001
+From: Daniel Drake <dsd@laptop.org>
+Date: Wed, 8 May 2013 15:37:19 -0400
+Subject: mwifiex: fix setting of multicast filter
+
+From: Daniel Drake <dsd@laptop.org>
+
+commit ccd384b10420ac81ba3fb9b0a7d18272c7173552 upstream.
+
+A small bug in this code was causing the ALLMULTI filter to be set
+when in fact we were just wanting to program a selective multicast list
+to the hardware.
+
+Fix that bug and remove a redundant if condition in the code that
+follows.
+
+This fixes wakeup behaviour when multicast WOL is enabled. Previously,
+all multicast packets would wake up the system. Now, only those that the
+host intended to receive trigger wakeups.
+
+Signed-off-by: Daniel Drake <dsd@laptop.org>
+Acked-by: Bing Zhao <bzhao@marvell.com>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/wireless/mwifiex/sta_ioctl.c |   21 ++++++---------------
+ 1 file changed, 6 insertions(+), 15 deletions(-)
+
+--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
++++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
+@@ -105,7 +105,7 @@ int mwifiex_request_set_multicast_list(s
+       } else {
+               /* Multicast */
+               priv->curr_pkt_filter &= ~HostCmd_ACT_MAC_PROMISCUOUS_ENABLE;
+-              if (mcast_list->mode == MWIFIEX_MULTICAST_MODE) {
++              if (mcast_list->mode == MWIFIEX_ALL_MULTI_MODE) {
+                       dev_dbg(priv->adapter->dev,
+                               "info: Enabling All Multicast!\n");
+                       priv->curr_pkt_filter |=
+@@ -117,20 +117,11 @@ int mwifiex_request_set_multicast_list(s
+                               dev_dbg(priv->adapter->dev,
+                                       "info: Set multicast list=%d\n",
+                                      mcast_list->num_multicast_addr);
+-                              /* Set multicast addresses to firmware */
+-                              if (old_pkt_filter == priv->curr_pkt_filter) {
+-                                      /* Send request to firmware */
+-                                      ret = mwifiex_send_cmd_async(priv,
+-                                              HostCmd_CMD_MAC_MULTICAST_ADR,
+-                                              HostCmd_ACT_GEN_SET, 0,
+-                                              mcast_list);
+-                              } else {
+-                                      /* Send request to firmware */
+-                                      ret = mwifiex_send_cmd_async(priv,
+-                                              HostCmd_CMD_MAC_MULTICAST_ADR,
+-                                              HostCmd_ACT_GEN_SET, 0,
+-                                              mcast_list);
+-                              }
++                              /* Send multicast addresses to firmware */
++                              ret = mwifiex_send_cmd_async(priv,
++                                      HostCmd_CMD_MAC_MULTICAST_ADR,
++                                      HostCmd_ACT_GEN_SET, 0,
++                                      mcast_list);
+                       }
+               }
+       }
diff --git a/queue-3.4/powerpc-bring-all-threads-online-prior-to-migration-hibernation.patch b/queue-3.4/powerpc-bring-all-threads-online-prior-to-migration-hibernation.patch
new file mode 100644 (file)
index 0000000..23c545d
--- /dev/null
@@ -0,0 +1,253 @@
+From 120496ac2d2d60aee68d3123a68169502a85f4b5 Mon Sep 17 00:00:00 2001
+From: Robert Jennings <rcj@linux.vnet.ibm.com>
+Date: Tue, 7 May 2013 04:34:11 +0000
+Subject: powerpc: Bring all threads online prior to migration/hibernation
+
+From: Robert Jennings <rcj@linux.vnet.ibm.com>
+
+commit 120496ac2d2d60aee68d3123a68169502a85f4b5 upstream.
+
+This patch brings online all threads which are present but not online
+prior to migration/hibernation.  After migration/hibernation those
+threads are taken back offline.
+
+During migration/hibernation all online CPUs must call H_JOIN, this is
+required by the hypervisor.  Without this patch, threads that are offline
+(H_CEDE'd) will not be woken to make the H_JOIN call and the OS will be
+deadlocked (all threads either JOIN'd or CEDE'd).
+
+Signed-off-by: Robert Jennings <rcj@linux.vnet.ibm.com>
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/include/asm/rtas.h          |    2 
+ arch/powerpc/kernel/rtas.c               |  113 +++++++++++++++++++++++++++++++
+ arch/powerpc/platforms/pseries/suspend.c |   22 ++++++
+ 3 files changed, 137 insertions(+)
+
+--- a/arch/powerpc/include/asm/rtas.h
++++ b/arch/powerpc/include/asm/rtas.h
+@@ -262,6 +262,8 @@ extern void rtas_progress(char *s, unsig
+ extern void rtas_initialize(void);
+ extern int rtas_suspend_cpu(struct rtas_suspend_me_data *data);
+ extern int rtas_suspend_last_cpu(struct rtas_suspend_me_data *data);
++extern int rtas_online_cpus_mask(cpumask_var_t cpus);
++extern int rtas_offline_cpus_mask(cpumask_var_t cpus);
+ extern int rtas_ibm_suspend_me(struct rtas_args *);
+ struct rtc_time;
+--- a/arch/powerpc/kernel/rtas.c
++++ b/arch/powerpc/kernel/rtas.c
+@@ -19,6 +19,7 @@
+ #include <linux/init.h>
+ #include <linux/capability.h>
+ #include <linux/delay.h>
++#include <linux/cpu.h>
+ #include <linux/smp.h>
+ #include <linux/completion.h>
+ #include <linux/cpumask.h>
+@@ -808,6 +809,95 @@ static void rtas_percpu_suspend_me(void
+       __rtas_suspend_cpu((struct rtas_suspend_me_data *)info, 1);
+ }
++enum rtas_cpu_state {
++      DOWN,
++      UP,
++};
++
++#ifndef CONFIG_SMP
++static int rtas_cpu_state_change_mask(enum rtas_cpu_state state,
++                              cpumask_var_t cpus)
++{
++      if (!cpumask_empty(cpus)) {
++              cpumask_clear(cpus);
++              return -EINVAL;
++      } else
++              return 0;
++}
++#else
++/* On return cpumask will be altered to indicate CPUs changed.
++ * CPUs with states changed will be set in the mask,
++ * CPUs with status unchanged will be unset in the mask. */
++static int rtas_cpu_state_change_mask(enum rtas_cpu_state state,
++                              cpumask_var_t cpus)
++{
++      int cpu;
++      int cpuret = 0;
++      int ret = 0;
++
++      if (cpumask_empty(cpus))
++              return 0;
++
++      for_each_cpu(cpu, cpus) {
++              switch (state) {
++              case DOWN:
++                      cpuret = cpu_down(cpu);
++                      break;
++              case UP:
++                      cpuret = cpu_up(cpu);
++                      break;
++              }
++              if (cpuret) {
++                      pr_debug("%s: cpu_%s for cpu#%d returned %d.\n",
++                                      __func__,
++                                      ((state == UP) ? "up" : "down"),
++                                      cpu, cpuret);
++                      if (!ret)
++                              ret = cpuret;
++                      if (state == UP) {
++                              /* clear bits for unchanged cpus, return */
++                              cpumask_shift_right(cpus, cpus, cpu);
++                              cpumask_shift_left(cpus, cpus, cpu);
++                              break;
++                      } else {
++                              /* clear bit for unchanged cpu, continue */
++                              cpumask_clear_cpu(cpu, cpus);
++                      }
++              }
++      }
++
++      return ret;
++}
++#endif
++
++int rtas_online_cpus_mask(cpumask_var_t cpus)
++{
++      int ret;
++
++      ret = rtas_cpu_state_change_mask(UP, cpus);
++
++      if (ret) {
++              cpumask_var_t tmp_mask;
++
++              if (!alloc_cpumask_var(&tmp_mask, GFP_TEMPORARY))
++                      return ret;
++
++              /* Use tmp_mask to preserve cpus mask from first failure */
++              cpumask_copy(tmp_mask, cpus);
++              rtas_offline_cpus_mask(tmp_mask);
++              free_cpumask_var(tmp_mask);
++      }
++
++      return ret;
++}
++EXPORT_SYMBOL(rtas_online_cpus_mask);
++
++int rtas_offline_cpus_mask(cpumask_var_t cpus)
++{
++      return rtas_cpu_state_change_mask(DOWN, cpus);
++}
++EXPORT_SYMBOL(rtas_offline_cpus_mask);
++
+ int rtas_ibm_suspend_me(struct rtas_args *args)
+ {
+       long state;
+@@ -815,6 +905,8 @@ int rtas_ibm_suspend_me(struct rtas_args
+       unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+       struct rtas_suspend_me_data data;
+       DECLARE_COMPLETION_ONSTACK(done);
++      cpumask_var_t offline_mask;
++      int cpuret;
+       if (!rtas_service_present("ibm,suspend-me"))
+               return -ENOSYS;
+@@ -838,11 +930,24 @@ int rtas_ibm_suspend_me(struct rtas_args
+               return 0;
+       }
++      if (!alloc_cpumask_var(&offline_mask, GFP_TEMPORARY))
++              return -ENOMEM;
++
+       atomic_set(&data.working, 0);
+       atomic_set(&data.done, 0);
+       atomic_set(&data.error, 0);
+       data.token = rtas_token("ibm,suspend-me");
+       data.complete = &done;
++
++      /* All present CPUs must be online */
++      cpumask_andnot(offline_mask, cpu_present_mask, cpu_online_mask);
++      cpuret = rtas_online_cpus_mask(offline_mask);
++      if (cpuret) {
++              pr_err("%s: Could not bring present CPUs online.\n", __func__);
++              atomic_set(&data.error, cpuret);
++              goto out;
++      }
++
+       stop_topology_update();
+       /* Call function on all CPUs.  One of us will make the
+@@ -858,6 +963,14 @@ int rtas_ibm_suspend_me(struct rtas_args
+       start_topology_update();
++      /* Take down CPUs not online prior to suspend */
++      cpuret = rtas_offline_cpus_mask(offline_mask);
++      if (cpuret)
++              pr_warn("%s: Could not restore CPUs to offline state.\n",
++                              __func__);
++
++out:
++      free_cpumask_var(offline_mask);
+       return atomic_read(&data.error);
+ }
+ #else /* CONFIG_PPC_PSERIES */
+--- a/arch/powerpc/platforms/pseries/suspend.c
++++ b/arch/powerpc/platforms/pseries/suspend.c
+@@ -16,6 +16,7 @@
+   * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+   */
++#include <linux/cpu.h>
+ #include <linux/delay.h>
+ #include <linux/suspend.h>
+ #include <linux/stat.h>
+@@ -126,11 +127,15 @@ static ssize_t store_hibernate(struct de
+                              struct device_attribute *attr,
+                              const char *buf, size_t count)
+ {
++      cpumask_var_t offline_mask;
+       int rc;
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
++      if (!alloc_cpumask_var(&offline_mask, GFP_TEMPORARY))
++              return -ENOMEM;
++
+       stream_id = simple_strtoul(buf, NULL, 16);
+       do {
+@@ -140,15 +145,32 @@ static ssize_t store_hibernate(struct de
+       } while (rc == -EAGAIN);
+       if (!rc) {
++              /* All present CPUs must be online */
++              cpumask_andnot(offline_mask, cpu_present_mask,
++                              cpu_online_mask);
++              rc = rtas_online_cpus_mask(offline_mask);
++              if (rc) {
++                      pr_err("%s: Could not bring present CPUs online.\n",
++                                      __func__);
++                      goto out;
++              }
++
+               stop_topology_update();
+               rc = pm_suspend(PM_SUSPEND_MEM);
+               start_topology_update();
++
++              /* Take down CPUs not online prior to suspend */
++              if (!rtas_offline_cpus_mask(offline_mask))
++                      pr_warn("%s: Could not restore CPUs to offline "
++                                      "state.\n", __func__);
+       }
+       stream_id = 0;
+       if (!rc)
+               rc = count;
++out:
++      free_cpumask_var(offline_mask);
+       return rc;
+ }
diff --git a/queue-3.4/powerpc-kexec-fix-kexec-when-using-vmx-optimised-memcpy.patch b/queue-3.4/powerpc-kexec-fix-kexec-when-using-vmx-optimised-memcpy.patch
new file mode 100644 (file)
index 0000000..833969b
--- /dev/null
@@ -0,0 +1,54 @@
+From 79c66ce8f6448a3295a32efeac88c9debd7f7094 Mon Sep 17 00:00:00 2001
+From: Anton Blanchard <anton@au1.ibm.com>
+Date: Sun, 12 May 2013 15:04:53 +0000
+Subject: powerpc/kexec: Fix kexec when using VMX optimised memcpy
+
+From: Anton Blanchard <anton@au1.ibm.com>
+
+commit 79c66ce8f6448a3295a32efeac88c9debd7f7094 upstream.
+
+commit b3f271e86e5a (powerpc: POWER7 optimised memcpy using VMX and
+enhanced prefetch) uses VMX when it is safe to do so (ie not in
+interrupt). It also looks at the task struct to decide if we have to
+save the current tasks' VMX state.
+
+kexec calls memcpy() at a point where the task struct may have been
+overwritten by the new kexec segments. If it has been overwritten
+then when memcpy -> enable_altivec looks up current->thread.regs->msr
+we get a cryptic oops or lockup.
+
+I also notice we aren't initialising thread_info->cpu, which means
+smp_processor_id is broken. Fix that too.
+
+Signed-off-by: Anton Blanchard <anton@samba.org>
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/machine_kexec_64.c |    4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/arch/powerpc/kernel/machine_kexec_64.c
++++ b/arch/powerpc/kernel/machine_kexec_64.c
+@@ -17,6 +17,7 @@
+ #include <linux/errno.h>
+ #include <linux/kernel.h>
+ #include <linux/cpu.h>
++#include <linux/hardirq.h>
+ #include <asm/page.h>
+ #include <asm/current.h>
+@@ -335,10 +336,13 @@ void default_machine_kexec(struct kimage
+       pr_debug("kexec: Starting switchover sequence.\n");
+       /* switch to a staticly allocated stack.  Based on irq stack code.
++       * We setup preempt_count to avoid using VMX in memcpy.
+        * XXX: the task struct will likely be invalid once we do the copy!
+        */
+       kexec_stack.thread_info.task = current_thread_info()->task;
+       kexec_stack.thread_info.flags = 0;
++      kexec_stack.thread_info.preempt_count = HARDIRQ_OFFSET;
++      kexec_stack.thread_info.cpu = current_thread_info()->cpu;
+       /* We need a static PACA, too; copy this CPU's PACA over and switch to
+        * it.  Also poison per_cpu_offset to catch anyone using non-static
index 11583b6e5746d28392ce43d0582f7c541f007d0d..37e91b6e26bfa2a723c4186d71e0b7b81c1c9e96 100644 (file)
@@ -9,3 +9,15 @@ hp_accel-ignore-the-error-from-lis3lv02d_poweron-at-resume.patch
 xen-vcpu-pvhvm-fix-vcpu-hotplugging-hanging.patch
 scsi-sd-fix-array-cache-flushing-bug-causing-performance-problems.patch
 audit-syscall-rules-are-not-applied-to-existing-processes-on-non-x86.patch
+timer-don-t-reinitialize-the-cpu-base-lock-during-cpu_up_prepare.patch
+tick-cleanup-nohz-per-cpu-data-on-cpu-down.patch
+tracing-fix-leaks-of-filter-preds.patch
+ext4-limit-group-search-loop-for-non-extent-files.patch
+powerpc-bring-all-threads-online-prior-to-migration-hibernation.patch
+powerpc-kexec-fix-kexec-when-using-vmx-optimised-memcpy.patch
+ath9k-fix-key-allocation-error-handling-for-powersave-keys.patch
+mwifiex-clear-is_suspended-flag-when-interrupt-is-received-early.patch
+mwifiex-fix-setting-of-multicast-filter.patch
+b43-handle-dma-rx-descriptor-underrun.patch
+drm-mm-fix-dump-table-bug.patch
+drm-don-t-check-modeset-locks-in-panic-handler.patch
diff --git a/queue-3.4/tick-cleanup-nohz-per-cpu-data-on-cpu-down.patch b/queue-3.4/tick-cleanup-nohz-per-cpu-data-on-cpu-down.patch
new file mode 100644 (file)
index 0000000..181d78e
--- /dev/null
@@ -0,0 +1,38 @@
+From 4b0c0f294f60abcdd20994a8341a95c8ac5eeb96 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 3 May 2013 15:02:50 +0200
+Subject: tick: Cleanup NOHZ per cpu data on cpu down
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 4b0c0f294f60abcdd20994a8341a95c8ac5eeb96 upstream.
+
+Prarit reported a crash on CPU offline/online. The reason is that on
+CPU down the NOHZ related per cpu data of the dead cpu is not cleaned
+up. If at cpu online an interrupt happens before the per cpu tick
+device is registered the irq_enter() check potentially sees stale data
+and dereferences a NULL pointer.
+
+Cleanup the data after the cpu is dead.
+
+Reported-by: Prarit Bhargava <prarit@redhat.com>
+Cc: Mike Galbraith <bitbucket@online.de>
+Link: http://lkml.kernel.org/r/alpine.LFD.2.02.1305031451561.2886@ionos
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/time/tick-sched.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/time/tick-sched.c
++++ b/kernel/time/tick-sched.c
+@@ -866,7 +866,7 @@ void tick_cancel_sched_timer(int cpu)
+               hrtimer_cancel(&ts->sched_timer);
+ # endif
+-      ts->nohz_mode = NOHZ_MODE_INACTIVE;
++      memset(ts, 0, sizeof(*ts));
+ }
+ #endif
diff --git a/queue-3.4/timer-don-t-reinitialize-the-cpu-base-lock-during-cpu_up_prepare.patch b/queue-3.4/timer-don-t-reinitialize-the-cpu-base-lock-during-cpu_up_prepare.patch
new file mode 100644 (file)
index 0000000..f423f30
--- /dev/null
@@ -0,0 +1,85 @@
+From 42a5cf46cd56f46267d2a9fcf2655f4078cd3042 Mon Sep 17 00:00:00 2001
+From: Tirupathi Reddy <tirupath@codeaurora.org>
+Date: Tue, 14 May 2013 13:59:02 +0530
+Subject: timer: Don't reinitialize the cpu base lock during CPU_UP_PREPARE
+
+From: Tirupathi Reddy <tirupath@codeaurora.org>
+
+commit 42a5cf46cd56f46267d2a9fcf2655f4078cd3042 upstream.
+
+An inactive timer's base can refer to a offline cpu's base.
+
+In the current code, cpu_base's lock is blindly reinitialized each
+time a CPU is brought up. If a CPU is brought online during the period
+that another thread is trying to modify an inactive timer on that CPU
+with holding its timer base lock, then the lock will be reinitialized
+under its feet. This leads to following SPIN_BUG().
+
+<0> BUG: spinlock already unlocked on CPU#3, kworker/u:3/1466
+<0> lock: 0xe3ebe000, .magic: dead4ead, .owner: kworker/u:3/1466, .owner_cpu: 1
+<4> [<c0013dc4>] (unwind_backtrace+0x0/0x11c) from [<c026e794>] (do_raw_spin_unlock+0x40/0xcc)
+<4> [<c026e794>] (do_raw_spin_unlock+0x40/0xcc) from [<c076c160>] (_raw_spin_unlock+0x8/0x30)
+<4> [<c076c160>] (_raw_spin_unlock+0x8/0x30) from [<c009b858>] (mod_timer+0x294/0x310)
+<4> [<c009b858>] (mod_timer+0x294/0x310) from [<c00a5e04>] (queue_delayed_work_on+0x104/0x120)
+<4> [<c00a5e04>] (queue_delayed_work_on+0x104/0x120) from [<c04eae00>] (sdhci_msm_bus_voting+0x88/0x9c)
+<4> [<c04eae00>] (sdhci_msm_bus_voting+0x88/0x9c) from [<c04d8780>] (sdhci_disable+0x40/0x48)
+<4> [<c04d8780>] (sdhci_disable+0x40/0x48) from [<c04bf300>] (mmc_release_host+0x4c/0xb0)
+<4> [<c04bf300>] (mmc_release_host+0x4c/0xb0) from [<c04c7aac>] (mmc_sd_detect+0x90/0xfc)
+<4> [<c04c7aac>] (mmc_sd_detect+0x90/0xfc) from [<c04c2504>] (mmc_rescan+0x7c/0x2c4)
+<4> [<c04c2504>] (mmc_rescan+0x7c/0x2c4) from [<c00a6a7c>] (process_one_work+0x27c/0x484)
+<4> [<c00a6a7c>] (process_one_work+0x27c/0x484) from [<c00a6e94>] (worker_thread+0x210/0x3b0)
+<4> [<c00a6e94>] (worker_thread+0x210/0x3b0) from [<c00aad9c>] (kthread+0x80/0x8c)
+<4> [<c00aad9c>] (kthread+0x80/0x8c) from [<c000ea80>] (kernel_thread_exit+0x0/0x8)
+
+As an example, this particular crash occurred when CPU #3 is executing
+mod_timer() on an inactive timer whose base is refered to offlined CPU
+#2.  The code locked the timer_base corresponding to CPU #2. Before it
+could proceed, CPU #2 came online and reinitialized the spinlock
+corresponding to its base. Thus now CPU #3 held a lock which was
+reinitialized. When CPU #3 finally ended up unlocking the old cpu_base
+corresponding to CPU #2, we hit the above SPIN_BUG().
+
+CPU #0         CPU #3                                 CPU #2
+------         -------                                -------
+.....           ......                               <Offline>
+               mod_timer()
+                lock_timer_base
+                  spin_lock_irqsave(&base->lock)
+
+cpu_up(2)       .....                                  ......
+                                                       init_timers_cpu()
+....            .....                                  spin_lock_init(&base->lock)
+.....             spin_unlock_irqrestore(&base->lock)  ......
+                  <spin_bug>
+
+Allocation of per_cpu timer vector bases is done only once under
+"tvec_base_done[]" check. In the current code, spinlock_initialization
+of base->lock isn't under this check. When a CPU is up each time the
+base lock is reinitialized. Move base spinlock initialization under
+the check.
+
+Signed-off-by: Tirupathi Reddy <tirupath@codeaurora.org>
+Link: http://lkml.kernel.org/r/1368520142-4136-1-git-send-email-tirupath@codeaurora.org
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/timer.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/timer.c
++++ b/kernel/timer.c
+@@ -1680,12 +1680,12 @@ static int __cpuinit init_timers_cpu(int
+                       boot_done = 1;
+                       base = &boot_tvec_bases;
+               }
++              spin_lock_init(&base->lock);
+               tvec_base_done[cpu] = 1;
+       } else {
+               base = per_cpu(tvec_bases, cpu);
+       }
+-      spin_lock_init(&base->lock);
+       for (j = 0; j < TVN_SIZE; j++) {
+               INIT_LIST_HEAD(base->tv5.vec + j);
diff --git a/queue-3.4/tracing-fix-leaks-of-filter-preds.patch b/queue-3.4/tracing-fix-leaks-of-filter-preds.patch
new file mode 100644 (file)
index 0000000..4b3aea2
--- /dev/null
@@ -0,0 +1,59 @@
+From 60705c89460fdc7227f2d153b68b3f34814738a4 Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (Red Hat)" <rostedt@goodmis.org>
+Date: Tue, 14 May 2013 15:40:48 -0400
+Subject: tracing: Fix leaks of filter preds
+
+From: "Steven Rostedt (Red Hat)" <rostedt@goodmis.org>
+
+commit 60705c89460fdc7227f2d153b68b3f34814738a4 upstream.
+
+Special preds are created when folding a series of preds that
+can be done in serial. These are allocated in an ops field of
+the pred structure. But they were never freed, causing memory
+leaks.
+
+This was discovered using the kmemleak checker:
+
+unreferenced object 0xffff8800797fd5e0 (size 32):
+  comm "swapper/0", pid 1, jiffies 4294690605 (age 104.608s)
+  hex dump (first 32 bytes):
+    00 00 01 00 03 00 05 00 07 00 09 00 0b 00 0d 00  ................
+    00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00  ................
+  backtrace:
+    [<ffffffff814b52af>] kmemleak_alloc+0x73/0x98
+    [<ffffffff8111ff84>] kmemleak_alloc_recursive.constprop.42+0x16/0x18
+    [<ffffffff81120e68>] __kmalloc+0xd7/0x125
+    [<ffffffff810d47eb>] kcalloc.constprop.24+0x2d/0x2f
+    [<ffffffff810d4896>] fold_pred_tree_cb+0xa9/0xf4
+    [<ffffffff810d3781>] walk_pred_tree+0x47/0xcc
+    [<ffffffff810d5030>] replace_preds.isra.20+0x6f8/0x72f
+    [<ffffffff810d50b5>] create_filter+0x4e/0x8b
+    [<ffffffff81b1c30d>] ftrace_test_event_filter+0x5a/0x155
+    [<ffffffff8100028d>] do_one_initcall+0xa0/0x137
+    [<ffffffff81afbedf>] kernel_init_freeable+0x14d/0x1dc
+    [<ffffffff814b24b7>] kernel_init+0xe/0xdb
+    [<ffffffff814d539c>] ret_from_fork+0x7c/0xb0
+    [<ffffffffffffffff>] 0xffffffffffffffff
+
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Cc: Tom Zanussi <tzanussi@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/trace_events_filter.c |    4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/kernel/trace/trace_events_filter.c
++++ b/kernel/trace/trace_events_filter.c
+@@ -777,7 +777,11 @@ static int filter_set_pred(struct event_
+ static void __free_preds(struct event_filter *filter)
+ {
++      int i;
++
+       if (filter->preds) {
++              for (i = 0; i < filter->n_preds; i++)
++                      kfree(filter->preds[i].ops);
+               kfree(filter->preds);
+               filter->preds = NULL;
+       }