--- /dev/null
+From 6eabb3301b1facee669d9938f7c5a0295c21d71d Mon Sep 17 00:00:00 2001
+From: Jaccon Bastiaansen <jaccon.bastiaansen@gmail.com>
+Date: Mon, 13 May 2013 17:28:27 +0100
+Subject: ARM: 7720/1: ARM v6/v7 cmpxchg64 shouldn't clear upper 32 bits of the old/new value
+
+From: Jaccon Bastiaansen <jaccon.bastiaansen@gmail.com>
+
+commit 6eabb3301b1facee669d9938f7c5a0295c21d71d upstream.
+
+The implementation of cmpxchg64() for the ARM v6 and v7 architecture
+casts parameter 2 and 3 (the old and new 64bit values) to an unsigned
+long before calling the atomic_cmpxchg64() function. This clears
+the top 32 bits of the old and new values, resulting in the wrong
+values being compare-exchanged. Luckily, this only appears to be used
+for 64-bit sched_clock, which we don't (yet) have on ARM.
+
+This bug was introduced by commit 3e0f5a15f500 ("ARM: 7404/1: cmpxchg64:
+use atomic64 and local64 routines for cmpxchg64").
+
+Acked-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Jaccon Bastiaansen <jaccon.bastiaansen@gmail.com>
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/include/asm/cmpxchg.h | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/arch/arm/include/asm/cmpxchg.h
++++ b/arch/arm/include/asm/cmpxchg.h
+@@ -233,15 +233,15 @@ static inline unsigned long __cmpxchg_lo
+ ((__typeof__(*(ptr)))atomic64_cmpxchg(container_of((ptr), \
+ atomic64_t, \
+ counter), \
+- (unsigned long)(o), \
+- (unsigned long)(n)))
++ (unsigned long long)(o), \
++ (unsigned long long)(n)))
+
+ #define cmpxchg64_local(ptr, o, n) \
+ ((__typeof__(*(ptr)))local64_cmpxchg(container_of((ptr), \
+ local64_t, \
+ a), \
+- (unsigned long)(o), \
+- (unsigned long)(n)))
++ (unsigned long long)(o), \
++ (unsigned long long)(n)))
+
+ #endif /* __LINUX_ARM_ARCH__ >= 6 */
+
--- /dev/null
+From 4ef69d0394cba8caa9f75d3f2e53429bfb8b3045 Mon Sep 17 00:00:00 2001
+From: Felix Fietkau <nbd@openwrt.org>
+Date: Sat, 27 Apr 2013 11:47:01 +0200
+Subject: ath9k: fix key allocation error handling for powersave keys
+
+From: Felix Fietkau <nbd@openwrt.org>
+
+commit 4ef69d0394cba8caa9f75d3f2e53429bfb8b3045 upstream.
+
+If no keycache slots are available, ath_key_config can return -ENOSPC.
+If the key index is not checked for errors, it can lead to logspam that
+looks like this: "ath: wiphy0: keyreset: keycache entry 228 out of range"
+This can cause follow-up errors if the invalid keycache index gets
+used for tx.
+
+Signed-off-by: Felix Fietkau <nbd@openwrt.org>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/wireless/ath/ath9k/main.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/wireless/ath/ath9k/main.c
++++ b/drivers/net/wireless/ath/ath9k/main.c
+@@ -1308,6 +1308,7 @@ static int ath9k_sta_add(struct ieee8021
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_node *an = (struct ath_node *) sta->drv_priv;
+ struct ieee80211_key_conf ps_key = { };
++ int key;
+
+ ath_node_attach(sc, sta, vif);
+
+@@ -1315,7 +1316,9 @@ static int ath9k_sta_add(struct ieee8021
+ vif->type != NL80211_IFTYPE_AP_VLAN)
+ return 0;
+
+- an->ps_key = ath_key_config(common, vif, sta, &ps_key);
++ key = ath_key_config(common, vif, sta, &ps_key);
++ if (key > 0)
++ an->ps_key = key;
+
+ return 0;
+ }
+@@ -1332,6 +1335,7 @@ static void ath9k_del_ps_key(struct ath_
+ return;
+
+ ath_key_delete(common, &ps_key);
++ an->ps_key = 0;
+ }
+
+ static int ath9k_sta_remove(struct ieee80211_hw *hw,
--- /dev/null
+From 73b82bf0bfbf58e6ff328d3726934370585f6e78 Mon Sep 17 00:00:00 2001
+From: Thommy Jakobsson <thommyj@gmail.com>
+Date: Tue, 23 Apr 2013 21:45:11 +0200
+Subject: B43: Handle DMA RX descriptor underrun
+
+From: Thommy Jakobsson <thommyj@gmail.com>
+
+commit 73b82bf0bfbf58e6ff328d3726934370585f6e78 upstream.
+
+Add handling of rx descriptor underflow. This fixes a fault that could
+happen on slow machines, where data is received faster than the CPU can
+handle. In such a case the device will use up all rx descriptors and
+refuse to send any more data before confirming that it is ok. This
+patch enables necessary interrupt to discover such a situation and will
+handle them by dropping everything in the ring buffer.
+
+Reviewed-by: Michael Buesch <m@bues.ch>
+Signed-off-by: Thommy Jakobsson <thommyj@gmail.com>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/wireless/b43/dma.c | 19 +++++++++++++++++
+ drivers/net/wireless/b43/dma.h | 4 ++-
+ drivers/net/wireless/b43/main.c | 43 ++++++++++++++++------------------------
+ 3 files changed, 40 insertions(+), 26 deletions(-)
+
+--- a/drivers/net/wireless/b43/dma.c
++++ b/drivers/net/wireless/b43/dma.c
+@@ -1733,6 +1733,25 @@ drop_recycle_buffer:
+ sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize);
+ }
+
++void b43_dma_handle_rx_overflow(struct b43_dmaring *ring)
++{
++ int current_slot, previous_slot;
++
++ B43_WARN_ON(ring->tx);
++
++ /* Device has filled all buffers, drop all packets and let TCP
++ * decrease speed.
++ * Decrement RX index by one will let the device to see all slots
++ * as free again
++ */
++ /*
++ *TODO: How to increase rx_drop in mac80211?
++ */
++ current_slot = ring->ops->get_current_rxslot(ring);
++ previous_slot = prev_slot(ring, current_slot);
++ ring->ops->set_current_rxslot(ring, previous_slot);
++}
++
+ void b43_dma_rx(struct b43_dmaring *ring)
+ {
+ const struct b43_dma_ops *ops = ring->ops;
+--- a/drivers/net/wireless/b43/dma.h
++++ b/drivers/net/wireless/b43/dma.h
+@@ -9,7 +9,7 @@
+ /* DMA-Interrupt reasons. */
+ #define B43_DMAIRQ_FATALMASK ((1 << 10) | (1 << 11) | (1 << 12) \
+ | (1 << 14) | (1 << 15))
+-#define B43_DMAIRQ_NONFATALMASK (1 << 13)
++#define B43_DMAIRQ_RDESC_UFLOW (1 << 13)
+ #define B43_DMAIRQ_RX_DONE (1 << 16)
+
+ /*** 32-bit DMA Engine. ***/
+@@ -295,6 +295,8 @@ int b43_dma_tx(struct b43_wldev *dev,
+ void b43_dma_handle_txstatus(struct b43_wldev *dev,
+ const struct b43_txstatus *status);
+
++void b43_dma_handle_rx_overflow(struct b43_dmaring *ring);
++
+ void b43_dma_rx(struct b43_dmaring *ring);
+
+ void b43_dma_direct_fifo_rx(struct b43_wldev *dev,
+--- a/drivers/net/wireless/b43/main.c
++++ b/drivers/net/wireless/b43/main.c
+@@ -1895,30 +1895,18 @@ static void b43_do_interrupt_thread(stru
+ }
+ }
+
+- if (unlikely(merged_dma_reason & (B43_DMAIRQ_FATALMASK |
+- B43_DMAIRQ_NONFATALMASK))) {
+- if (merged_dma_reason & B43_DMAIRQ_FATALMASK) {
+- b43err(dev->wl, "Fatal DMA error: "
+- "0x%08X, 0x%08X, 0x%08X, "
+- "0x%08X, 0x%08X, 0x%08X\n",
+- dma_reason[0], dma_reason[1],
+- dma_reason[2], dma_reason[3],
+- dma_reason[4], dma_reason[5]);
+- b43err(dev->wl, "This device does not support DMA "
++ if (unlikely(merged_dma_reason & (B43_DMAIRQ_FATALMASK))) {
++ b43err(dev->wl,
++ "Fatal DMA error: 0x%08X, 0x%08X, 0x%08X, 0x%08X, 0x%08X, 0x%08X\n",
++ dma_reason[0], dma_reason[1],
++ dma_reason[2], dma_reason[3],
++ dma_reason[4], dma_reason[5]);
++ b43err(dev->wl, "This device does not support DMA "
+ "on your system. It will now be switched to PIO.\n");
+- /* Fall back to PIO transfers if we get fatal DMA errors! */
+- dev->use_pio = true;
+- b43_controller_restart(dev, "DMA error");
+- return;
+- }
+- if (merged_dma_reason & B43_DMAIRQ_NONFATALMASK) {
+- b43err(dev->wl, "DMA error: "
+- "0x%08X, 0x%08X, 0x%08X, "
+- "0x%08X, 0x%08X, 0x%08X\n",
+- dma_reason[0], dma_reason[1],
+- dma_reason[2], dma_reason[3],
+- dma_reason[4], dma_reason[5]);
+- }
++ /* Fall back to PIO transfers if we get fatal DMA errors! */
++ dev->use_pio = true;
++ b43_controller_restart(dev, "DMA error");
++ return;
+ }
+
+ if (unlikely(reason & B43_IRQ_UCODE_DEBUG))
+@@ -1937,6 +1925,11 @@ static void b43_do_interrupt_thread(stru
+ handle_irq_noise(dev);
+
+ /* Check the DMA reason registers for received data. */
++ if (dma_reason[0] & B43_DMAIRQ_RDESC_UFLOW) {
++ if (B43_DEBUG)
++ b43warn(dev->wl, "RX descriptor underrun\n");
++ b43_dma_handle_rx_overflow(dev->dma.rx_ring);
++ }
+ if (dma_reason[0] & B43_DMAIRQ_RX_DONE) {
+ if (b43_using_pio_transfers(dev))
+ b43_pio_rx(dev->pio.rx_queue);
+@@ -1994,7 +1987,7 @@ static irqreturn_t b43_do_interrupt(stru
+ return IRQ_NONE;
+
+ dev->dma_reason[0] = b43_read32(dev, B43_MMIO_DMA0_REASON)
+- & 0x0001DC00;
++ & 0x0001FC00;
+ dev->dma_reason[1] = b43_read32(dev, B43_MMIO_DMA1_REASON)
+ & 0x0000DC00;
+ dev->dma_reason[2] = b43_read32(dev, B43_MMIO_DMA2_REASON)
+@@ -3126,7 +3119,7 @@ static int b43_chip_init(struct b43_wlde
+ b43_write32(dev, 0x018C, 0x02000000);
+ }
+ b43_write32(dev, B43_MMIO_GEN_IRQ_REASON, 0x00004000);
+- b43_write32(dev, B43_MMIO_DMA0_IRQ_MASK, 0x0001DC00);
++ b43_write32(dev, B43_MMIO_DMA0_IRQ_MASK, 0x0001FC00);
+ b43_write32(dev, B43_MMIO_DMA1_IRQ_MASK, 0x0000DC00);
+ b43_write32(dev, B43_MMIO_DMA2_IRQ_MASK, 0x0000DC00);
+ b43_write32(dev, B43_MMIO_DMA3_IRQ_MASK, 0x0001DC00);
--- /dev/null
+From a9b054e8ab06504c2afa0e307ee78d3778993a1d Mon Sep 17 00:00:00 2001
+From: Daniel Vetter <daniel.vetter@ffwll.ch>
+Date: Thu, 2 May 2013 09:43:05 +0200
+Subject: drm: don't check modeset locks in panic handler
+
+From: Daniel Vetter <daniel.vetter@ffwll.ch>
+
+commit a9b054e8ab06504c2afa0e307ee78d3778993a1d upstream.
+
+Since we know that locking is broken in that case and it's more
+important to not flood the dmesg with random gunk.
+
+References: http://lkml.kernel.org/r/20130502000206.GH15623@pd.tnic
+Cc: Dave Airlie <airlied@gmail.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Reported-and-tested-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/drm_crtc.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/drivers/gpu/drm/drm_crtc.c
++++ b/drivers/gpu/drm/drm_crtc.c
+@@ -78,6 +78,10 @@ void drm_warn_on_modeset_not_all_locked(
+ {
+ struct drm_crtc *crtc;
+
++ /* Locking is currently fubar in the panic handler. */
++ if (oops_in_progress)
++ return;
++
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+ WARN_ON(!mutex_is_locked(&crtc->mutex));
+
--- /dev/null
+From 1ffc5289bfcf7f4c4e4213240bb4be68c48ce603 Mon Sep 17 00:00:00 2001
+From: Jani Nikula <jani.nikula@intel.com>
+Date: Tue, 7 May 2013 18:54:05 +0300
+Subject: drm/i915: clear the stolen fb before resuming
+
+From: Jani Nikula <jani.nikula@intel.com>
+
+commit 1ffc5289bfcf7f4c4e4213240bb4be68c48ce603 upstream.
+
+Similar to
+commit 88afe715dd5469bc24ca7a19ac62dd3c241cab48
+Author: Chris Wilson <chris@chris-wilson.co.uk>
+Date: Sun Dec 16 12:15:41 2012 +0000
+
+ drm/i915: Clear the stolen fb before enabling
+
+but on the resume path.
+
+Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=57191
+Reported-and-tested-by: Nikolay Amiantov <nikoamia@gmail.com>
+Signed-off-by: Jani Nikula <jani.nikula@intel.com>
+Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
+Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/i915/intel_fb.c | 16 ++++++++++++++--
+ 1 file changed, 14 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/i915/intel_fb.c
++++ b/drivers/gpu/drm/i915/intel_fb.c
+@@ -261,10 +261,22 @@ void intel_fbdev_fini(struct drm_device
+ void intel_fbdev_set_suspend(struct drm_device *dev, int state)
+ {
+ drm_i915_private_t *dev_priv = dev->dev_private;
+- if (!dev_priv->fbdev)
++ struct intel_fbdev *ifbdev = dev_priv->fbdev;
++ struct fb_info *info;
++
++ if (!ifbdev)
+ return;
+
+- fb_set_suspend(dev_priv->fbdev->helper.fbdev, state);
++ info = ifbdev->helper.fbdev;
++
++ /* On resume from hibernation: If the object is shmemfs backed, it has
++ * been restored from swap. If the object is stolen however, it will be
++ * full of whatever garbage was left in there.
++ */
++ if (!state && ifbdev->ifb.obj->stolen)
++ memset_io(info->screen_base, 0, info->screen_size);
++
++ fb_set_suspend(info, state);
+ }
+
+ MODULE_LICENSE("GPL and additional rights");
--- /dev/null
+From 9f1d036648c1c5ed81b0e98d7a06d55df972701e Mon Sep 17 00:00:00 2001
+From: Christopher Harvey <charvey@matrox.com>
+Date: Wed, 8 May 2013 19:10:38 +0000
+Subject: drm/mgag200: Fix framebuffer base address programming
+
+From: Christopher Harvey <charvey@matrox.com>
+
+commit 9f1d036648c1c5ed81b0e98d7a06d55df972701e upstream.
+
+Higher bits of the base address of framebuffers weren't being
+programmed properly. This caused framebuffers that didn't happen to be
+allocated at a low enough address to not be displayed properly.
+
+Signed-off-by: Christopher Harvey <charvey@matrox.com>
+Signed-off-by: Mathieu Larouche <mathieu.larouche@matrox.com>
+Acked-by: Julia Lemire <jlemire@matrox.com>
+Tested-by: Julia Lemire <jlemire@matrox.com>
+Signed-off-by: Dave Airlie <airlied@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/mgag200/mgag200_mode.c | 27 ++++++++++++++++++++++++---
+ 1 file changed, 24 insertions(+), 3 deletions(-)
+
+--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
++++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
+@@ -657,12 +657,26 @@ static void mga_g200wb_commit(struct drm
+ WREG_DAC(MGA1064_GEN_IO_DATA, tmp);
+ }
+
+-
++/*
++ This is how the framebuffer base address is stored in g200 cards:
++ * Assume @offset is the gpu_addr variable of the framebuffer object
++ * Then addr is the number of _pixels_ (not bytes) from the start of
++ VRAM to the first pixel we want to display. (divided by 2 for 32bit
++ framebuffers)
++ * addr is stored in the CRTCEXT0, CRTCC and CRTCD registers
++ addr<20> -> CRTCEXT0<6>
++ addr<19-16> -> CRTCEXT0<3-0>
++ addr<15-8> -> CRTCC<7-0>
++ addr<7-0> -> CRTCD<7-0>
++ CRTCEXT0 has to be programmed last to trigger an update and make the
++ new addr variable take effect.
++ */
+ void mga_set_start_address(struct drm_crtc *crtc, unsigned offset)
+ {
+ struct mga_device *mdev = crtc->dev->dev_private;
+ u32 addr;
+ int count;
++ u8 crtcext0;
+
+ while (RREG8(0x1fda) & 0x08);
+ while (!(RREG8(0x1fda) & 0x08));
+@@ -670,10 +684,17 @@ void mga_set_start_address(struct drm_cr
+ count = RREG8(MGAREG_VCOUNT) + 2;
+ while (RREG8(MGAREG_VCOUNT) < count);
+
+- addr = offset >> 2;
++ WREG8(MGAREG_CRTCEXT_INDEX, 0);
++ crtcext0 = RREG8(MGAREG_CRTCEXT_DATA);
++ crtcext0 &= 0xB0;
++ addr = offset / 8;
++ /* Can't store addresses any higher than that...
++ but we also don't have more than 16MB of memory, so it should be fine. */
++ WARN_ON(addr > 0x1fffff);
++ crtcext0 |= (!!(addr & (1<<20)))<<6;
+ WREG_CRT(0x0d, (u8)(addr & 0xff));
+ WREG_CRT(0x0c, (u8)(addr >> 8) & 0xff);
+- WREG_CRT(0xaf, (u8)(addr >> 16) & 0xf);
++ WREG_ECRT(0x0, ((u8)(addr >> 16) & 0xf) | crtcext0);
+ }
+
+
--- /dev/null
+From fb70a6690875315a3a1454e52fa339441ee7612b Mon Sep 17 00:00:00 2001
+From: Christopher Harvey <charvey@matrox.com>
+Date: Fri, 12 Apr 2013 22:24:05 +0000
+Subject: drm/mgag200: Fix writes into MGA1064_PIX_CLK_CTL register
+
+From: Christopher Harvey <charvey@matrox.com>
+
+commit fb70a6690875315a3a1454e52fa339441ee7612b upstream.
+
+The original line,
+ WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp);
+wrote tmp into MGA1064_PIX_CLK_CTL_CLK_DIS, where
+MGA1064_PIX_CLK_CTL_CLK_DIS is an offset into
+MGA1064_PIX_CLK_CTL. Change the line to write properly into
+MGA1064_PIX_CLK_CTL. There were other chunks of code nearby that use
+the same pattern (but work correctly), so this patch updates them all
+to use this new (slightly more efficient) write pattern. The WREG_DAC
+macro was causing the DAC_INDEX register to be set to the same value
+twice. WREG8(DAC_DATA, foo) takes advantage of the fact that DAC_INDEX
+is already at the value we want.
+
+Signed-off-by: Christopher Harvey <charvey@matrox.com>
+Acked-by: Julia Lemire <jlemire@matrox.com>
+Tested-by: Julia Lemire <jlemire@matrox.com>
+Acked-by: Mathieu Larouche <mathieu.larouche@matrox.com>
+Signed-off-by: Dave Airlie <airlied@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/mgag200/mgag200_mode.c | 42 ++++++++++++++++-----------------
+ 1 file changed, 21 insertions(+), 21 deletions(-)
+
+--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
++++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
+@@ -189,12 +189,12 @@ static int mga_g200wb_set_plls(struct mg
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
+- WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp);
++ WREG8(DAC_DATA, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= MGA1064_REMHEADCTL_CLKDIS;
+- WREG_DAC(MGA1064_REMHEADCTL, tmp);
++ WREG8(DAC_DATA, tmp);
+
+ /* select PLL Set C */
+ tmp = RREG8(MGAREG_MEM_MISC_READ);
+@@ -204,7 +204,7 @@ static int mga_g200wb_set_plls(struct mg
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN | 0x80;
+- WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
++ WREG8(DAC_DATA, tmp);
+
+ udelay(500);
+
+@@ -212,7 +212,7 @@ static int mga_g200wb_set_plls(struct mg
+ WREG8(DAC_INDEX, MGA1064_VREF_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~0x04;
+- WREG_DAC(MGA1064_VREF_CTL, tmp);
++ WREG8(DAC_DATA, tmp);
+
+ udelay(50);
+
+@@ -236,13 +236,13 @@ static int mga_g200wb_set_plls(struct mg
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK;
+ tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL;
+- WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
++ WREG8(DAC_DATA, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_REMHEADCTL_CLKSL_MSK;
+ tmp |= MGA1064_REMHEADCTL_CLKSL_PLL;
+- WREG_DAC(MGA1064_REMHEADCTL, tmp);
++ WREG8(DAC_DATA, tmp);
+
+ /* reset dotclock rate bit */
+ WREG8(MGAREG_SEQ_INDEX, 1);
+@@ -253,7 +253,7 @@ static int mga_g200wb_set_plls(struct mg
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
+- WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
++ WREG8(DAC_DATA, tmp);
+
+ vcount = RREG8(MGAREG_VCOUNT);
+
+@@ -318,7 +318,7 @@ static int mga_g200ev_set_plls(struct mg
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
+- WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp);
++ WREG8(DAC_DATA, tmp);
+
+ tmp = RREG8(MGAREG_MEM_MISC_READ);
+ tmp |= 0x3 << 2;
+@@ -326,12 +326,12 @@ static int mga_g200ev_set_plls(struct mg
+
+ WREG8(DAC_INDEX, MGA1064_PIX_PLL_STAT);
+ tmp = RREG8(DAC_DATA);
+- WREG_DAC(MGA1064_PIX_PLL_STAT, tmp & ~0x40);
++ WREG8(DAC_DATA, tmp & ~0x40);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
+- WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
++ WREG8(DAC_DATA, tmp);
+
+ WREG_DAC(MGA1064_EV_PIX_PLLC_M, m);
+ WREG_DAC(MGA1064_EV_PIX_PLLC_N, n);
+@@ -342,7 +342,7 @@ static int mga_g200ev_set_plls(struct mg
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
+- WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
++ WREG8(DAC_DATA, tmp);
+
+ udelay(500);
+
+@@ -350,11 +350,11 @@ static int mga_g200ev_set_plls(struct mg
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK;
+ tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL;
+- WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
++ WREG8(DAC_DATA, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_PLL_STAT);
+ tmp = RREG8(DAC_DATA);
+- WREG_DAC(MGA1064_PIX_PLL_STAT, tmp | 0x40);
++ WREG8(DAC_DATA, tmp | 0x40);
+
+ tmp = RREG8(MGAREG_MEM_MISC_READ);
+ tmp |= (0x3 << 2);
+@@ -363,7 +363,7 @@ static int mga_g200ev_set_plls(struct mg
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
+- WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
++ WREG8(DAC_DATA, tmp);
+
+ return 0;
+ }
+@@ -416,7 +416,7 @@ static int mga_g200eh_set_plls(struct mg
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
+- WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp);
++ WREG8(DAC_DATA, tmp);
+
+ tmp = RREG8(MGAREG_MEM_MISC_READ);
+ tmp |= 0x3 << 2;
+@@ -425,7 +425,7 @@ static int mga_g200eh_set_plls(struct mg
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
+- WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
++ WREG8(DAC_DATA, tmp);
+
+ udelay(500);
+
+@@ -439,13 +439,13 @@ static int mga_g200eh_set_plls(struct mg
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK;
+ tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL;
+- WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
++ WREG8(DAC_DATA, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
+ tmp &= ~MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
+- WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
++ WREG8(DAC_DATA, tmp);
+
+ vcount = RREG8(MGAREG_VCOUNT);
+
+@@ -515,12 +515,12 @@ static int mga_g200er_set_plls(struct mg
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
+- WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp);
++ WREG8(DAC_DATA, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= MGA1064_REMHEADCTL_CLKDIS;
+- WREG_DAC(MGA1064_REMHEADCTL, tmp);
++ WREG8(DAC_DATA, tmp);
+
+ tmp = RREG8(MGAREG_MEM_MISC_READ);
+ tmp |= (0x3<<2) | 0xc0;
+@@ -530,7 +530,7 @@ static int mga_g200er_set_plls(struct mg
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
+ tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
+- WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
++ WREG8(DAC_DATA, tmp);
+
+ udelay(500);
+
--- /dev/null
+From 3a359f0b21ab218c1bf7a6a1b638b6fd143d0b99 Mon Sep 17 00:00:00 2001
+From: Daniel Vetter <daniel.vetter@ffwll.ch>
+Date: Sat, 20 Apr 2013 12:08:11 +0200
+Subject: drm/mm: fix dump table BUG
+
+From: Daniel Vetter <daniel.vetter@ffwll.ch>
+
+commit 3a359f0b21ab218c1bf7a6a1b638b6fd143d0b99 upstream.
+
+In
+
+commit 9e8944ab564f2e3dde90a518cd32048c58918608
+Author: Chris Wilson <chris@chris-wilson.co.uk>
+Date: Thu Nov 15 11:32:17 2012 +0000
+
+ drm: Introduce an iterator over holes in the drm_mm range manager
+
+helpers and iterators for hole handling have been introduced with some
+debug BUG_ONs sprinkled over. Unfortunately this broke the mm dumper
+which unconditionally tried to compute the size of the very first
+hole.
+
+While at it unify the code a bit with the hole dumping in the loop.
+
+v2: Extract a hole dump helper.
+
+Reported-by: Christopher Harvey <charvey@matrox.com>
+Cc: Christopher Harvey <charvey@matrox.com>
+Cc: Dave Airlie <airlied@redhat.com>
+Cc: Chris Wilson <chris@chris-wilson.co.uk>
+Acked-by: Dave Airlie <airlied@redhat.com>
+Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/drm_mm.c | 34 ++++++++++++++++++----------------
+ 1 file changed, 18 insertions(+), 16 deletions(-)
+
+--- a/drivers/gpu/drm/drm_mm.c
++++ b/drivers/gpu/drm/drm_mm.c
+@@ -755,33 +755,35 @@ void drm_mm_debug_table(struct drm_mm *m
+ EXPORT_SYMBOL(drm_mm_debug_table);
+
+ #if defined(CONFIG_DEBUG_FS)
+-int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
++static unsigned long drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry)
+ {
+- struct drm_mm_node *entry;
+- unsigned long total_used = 0, total_free = 0, total = 0;
+ unsigned long hole_start, hole_end, hole_size;
+
+- hole_start = drm_mm_hole_node_start(&mm->head_node);
+- hole_end = drm_mm_hole_node_end(&mm->head_node);
+- hole_size = hole_end - hole_start;
+- if (hole_size)
++ if (entry->hole_follows) {
++ hole_start = drm_mm_hole_node_start(entry);
++ hole_end = drm_mm_hole_node_end(entry);
++ hole_size = hole_end - hole_start;
+ seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
+ hole_start, hole_end, hole_size);
+- total_free += hole_size;
++ return hole_size;
++ }
++
++ return 0;
++}
++
++int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
++{
++ struct drm_mm_node *entry;
++ unsigned long total_used = 0, total_free = 0, total = 0;
++
++ total_free += drm_mm_dump_hole(m, &mm->head_node);
+
+ drm_mm_for_each_node(entry, mm) {
+ seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n",
+ entry->start, entry->start + entry->size,
+ entry->size);
+ total_used += entry->size;
+- if (entry->hole_follows) {
+- hole_start = drm_mm_hole_node_start(entry);
+- hole_end = drm_mm_hole_node_end(entry);
+- hole_size = hole_end - hole_start;
+- seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
+- hole_start, hole_end, hole_size);
+- total_free += hole_size;
+- }
++ total_free += drm_mm_dump_hole(m, entry);
+ }
+ total = total_free + total_used;
+
--- /dev/null
+From e6155736ad76b2070652745f9e54cdea3f0d8567 Mon Sep 17 00:00:00 2001
+From: Lachlan McIlroy <lmcilroy@redhat.com>
+Date: Sun, 5 May 2013 23:10:00 -0400
+Subject: ext4: limit group search loop for non-extent files
+
+From: Lachlan McIlroy <lmcilroy@redhat.com>
+
+commit e6155736ad76b2070652745f9e54cdea3f0d8567 upstream.
+
+In the case where we are allocating for a non-extent file,
+we must limit the groups we allocate from to those below
+2^32 blocks, and ext4_mb_regular_allocator() attempts to
+do this initially by putting a cap on ngroups for the
+subsequent search loop.
+
+However, the initial target group comes in from the
+allocation context (ac), and it may already be beyond
+the artificially limited ngroups. In this case,
+the limit
+
+ if (group == ngroups)
+ group = 0;
+
+at the top of the loop is never true, and the loop will
+run away.
+
+Catch this case inside the loop and reset the search to
+start at group 0.
+
+[sandeen@redhat.com: add commit msg & comments]
+
+Signed-off-by: Lachlan McIlroy <lmcilroy@redhat.com>
+Signed-off-by: Eric Sandeen <sandeen@redhat.com>
+Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ext4/mballoc.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -1994,7 +1994,11 @@ repeat:
+ group = ac->ac_g_ex.fe_group;
+
+ for (i = 0; i < ngroups; group++, i++) {
+- if (group == ngroups)
++ /*
++ * Artificially restricted ngroups for non-extent
++ * files makes group > ngroups possible on first loop.
++ */
++ if (group >= ngroups)
+ group = 0;
+
+ /* This now checks without needing the buddy page */
--- /dev/null
+From dd9c46408fdc07098333655ff27edf8cac8d9fcf Mon Sep 17 00:00:00 2001
+From: Stanislaw Gruszka <sgruszka@redhat.com>
+Date: Tue, 7 May 2013 18:07:06 +0200
+Subject: iwl4965: workaround connection regression on passive channel
+
+From: Stanislaw Gruszka <sgruszka@redhat.com>
+
+commit dd9c46408fdc07098333655ff27edf8cac8d9fcf upstream.
+
+Jake reported that since commit 1672c0e31917f49d31d30d79067103432bc20cc7
+"mac80211: start auth/assoc timeout on frame status", he is unable to
+connect to his AP, which is configured to use passive channel.
+
+After switch to passive channel 4965 firmware drops any TX packet until
+it receives beacon. Before commit 1672c0e3 we waited on channel and
+retransmit packet after 200ms, that makes we receive beacon on the
+meantime and association process succeed. New mac80211 behaviour cause
+that any ASSOC frame fail immediately on iwl4965 and we can not
+associate.
+
+This patch restore old mac80211 behaviour for iwl4965, by removing
+IEEE80211_HW_REPORTS_TX_ACK_STATUS feature. This feature will be
+added again to iwl4965 driver, when different, more complex
+workaround for this firmware issue, will be added to the driver.
+
+Bisected-by: Jake Edge <jake@lwn.net>
+Reported-and-tested-by: Jake Edge <jake@lwn.net>
+Signed-off-by: Stanislaw Gruszka <sgruszka@redhat.com>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/wireless/iwlegacy/4965-mac.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/drivers/net/wireless/iwlegacy/4965-mac.c
++++ b/drivers/net/wireless/iwlegacy/4965-mac.c
+@@ -5740,8 +5740,7 @@ il4965_mac_setup_register(struct il_priv
+ hw->flags =
+ IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_AMPDU_AGGREGATION |
+ IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC | IEEE80211_HW_SPECTRUM_MGMT |
+- IEEE80211_HW_REPORTS_TX_ACK_STATUS | IEEE80211_HW_SUPPORTS_PS |
+- IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
++ IEEE80211_HW_SUPPORTS_PS | IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
+ if (il->cfg->sku & IL_SKU_N)
+ hw->flags |=
+ IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
--- /dev/null
+From 48795424acff7215d5eac0b52793a2c1eb3a6283 Mon Sep 17 00:00:00 2001
+From: Bing Zhao <bzhao@marvell.com>
+Date: Mon, 6 May 2013 19:46:53 -0700
+Subject: mwifiex: clear is_suspended flag when interrupt is received early
+
+From: Bing Zhao <bzhao@marvell.com>
+
+commit 48795424acff7215d5eac0b52793a2c1eb3a6283 upstream.
+
+When the XO-4 with 8787 wireless is woken up due to wake-on-WLAN
+mwifiex is often flooded with "not allowed while suspended" messages
+and the interface is unusable.
+
+[ 202.171609] int: sdio_ireg = 0x1
+[ 202.180700] info: mwifiex_process_hs_config: auto cancelling host
+ sleep since there is interrupt from the firmware
+[ 202.201880] event: wakeup device...
+[ 202.211452] event: hs_deactivated
+[ 202.514638] info: --- Rx: Data packet ---
+[ 202.514753] data: 4294957544 BSS(0-0): Data <= kernel
+[ 202.514825] PREP_CMD: device in suspended state
+[ 202.514839] data: dequeuing the packet ec7248c0 ec4869c0
+[ 202.514886] mwifiex_write_data_sync: not allowed while suspended
+[ 202.514886] host_to_card, write iomem (1) failed: -1
+[ 202.514917] mwifiex_write_data_sync: not allowed while suspended
+[ 202.514936] host_to_card, write iomem (2) failed: -1
+[ 202.514949] mwifiex_write_data_sync: not allowed while suspended
+[ 202.514965] host_to_card, write iomem (3) failed: -1
+[ 202.514976] mwifiex_write_data_async failed: 0xFFFFFFFF
+
+This can be readily reproduced when putting the XO-4 in a loop where
+it goes to sleep due to inactivity, but then wakes up due to an
+incoming ping. The error is hit within an hour or two.
+
+This issue happens when an interrupt comes in early while host sleep
+is still activated. Driver handles this case by auto cancelling host
+sleep. However is_suspended flag is still set which prevents any cmd
+or data from being sent to firmware. Fix it by clearing is_suspended
+flag in this path.
+
+Reported-by: Daniel Drake <dsd@laptop.org>
+Tested-by: Daniel Drake <dsd@laptop.org>
+Signed-off-by: Bing Zhao <bzhao@marvell.com>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/wireless/mwifiex/cmdevt.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/net/wireless/mwifiex/cmdevt.c
++++ b/drivers/net/wireless/mwifiex/cmdevt.c
+@@ -1176,6 +1176,7 @@ mwifiex_process_hs_config(struct mwifiex
+ adapter->if_ops.wakeup(adapter);
+ adapter->hs_activated = false;
+ adapter->is_hs_configured = false;
++ adapter->is_suspended = false;
+ mwifiex_hs_activated_event(mwifiex_get_priv(adapter,
+ MWIFIEX_BSS_ROLE_ANY),
+ false);
--- /dev/null
+From f16fdc9d2dc1e5b270e9a08377587e831e0d36ac Mon Sep 17 00:00:00 2001
+From: Amitkumar Karwar <akarwar@marvell.com>
+Date: Mon, 6 May 2013 19:46:54 -0700
+Subject: mwifiex: fix memory leak issue when driver unload
+
+From: Amitkumar Karwar <akarwar@marvell.com>
+
+commit f16fdc9d2dc1e5b270e9a08377587e831e0d36ac upstream.
+
+After unregister_netdevice() call the request is queued and
+reg_state is changed to NETREG_UNREGISTERING.
+As we check for NETREG_UNREGISTERED state, free_netdev() never
+gets executed causing memory leak.
+
+Initialize "dev->destructor" to free_netdev() to free device
+data after unregistration.
+
+Reported-by: Daniel Drake <dsd@laptop.org>
+Tested-by: Daniel Drake <dsd@laptop.org>
+Signed-off-by: Amitkumar Karwar <akarwar@marvell.com>
+Signed-off-by: Bing Zhao <bzhao@marvell.com>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/wireless/mwifiex/cfg80211.c | 3 ---
+ drivers/net/wireless/mwifiex/main.c | 1 +
+ 2 files changed, 1 insertion(+), 3 deletions(-)
+
+--- a/drivers/net/wireless/mwifiex/cfg80211.c
++++ b/drivers/net/wireless/mwifiex/cfg80211.c
+@@ -2280,9 +2280,6 @@ int mwifiex_del_virtual_intf(struct wiph
+ if (wdev->netdev->reg_state == NETREG_REGISTERED)
+ unregister_netdevice(wdev->netdev);
+
+- if (wdev->netdev->reg_state == NETREG_UNREGISTERED)
+- free_netdev(wdev->netdev);
+-
+ /* Clear the priv in adapter */
+ priv->netdev = NULL;
+
+--- a/drivers/net/wireless/mwifiex/main.c
++++ b/drivers/net/wireless/mwifiex/main.c
+@@ -646,6 +646,7 @@ void mwifiex_init_priv_params(struct mwi
+ struct net_device *dev)
+ {
+ dev->netdev_ops = &mwifiex_netdev_ops;
++ dev->destructor = free_netdev;
+ /* Initialize private structure */
+ priv->current_key_index = 0;
+ priv->media_connected = false;
--- /dev/null
+From ccd384b10420ac81ba3fb9b0a7d18272c7173552 Mon Sep 17 00:00:00 2001
+From: Daniel Drake <dsd@laptop.org>
+Date: Wed, 8 May 2013 15:37:19 -0400
+Subject: mwifiex: fix setting of multicast filter
+
+From: Daniel Drake <dsd@laptop.org>
+
+commit ccd384b10420ac81ba3fb9b0a7d18272c7173552 upstream.
+
+A small bug in this code was causing the ALLMULTI filter to be set
+when in fact we were just wanting to program a selective multicast list
+to the hardware.
+
+Fix that bug and remove a redundant if condition in the code that
+follows.
+
+This fixes wakeup behaviour when multicast WOL is enabled. Previously,
+all multicast packets would wake up the system. Now, only those that the
+host intended to receive trigger wakeups.
+
+Signed-off-by: Daniel Drake <dsd@laptop.org>
+Acked-by: Bing Zhao <bzhao@marvell.com>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/wireless/mwifiex/sta_ioctl.c | 21 ++++++---------------
+ 1 file changed, 6 insertions(+), 15 deletions(-)
+
+--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
++++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
+@@ -99,7 +99,7 @@ int mwifiex_request_set_multicast_list(s
+ } else {
+ /* Multicast */
+ priv->curr_pkt_filter &= ~HostCmd_ACT_MAC_PROMISCUOUS_ENABLE;
+- if (mcast_list->mode == MWIFIEX_MULTICAST_MODE) {
++ if (mcast_list->mode == MWIFIEX_ALL_MULTI_MODE) {
+ dev_dbg(priv->adapter->dev,
+ "info: Enabling All Multicast!\n");
+ priv->curr_pkt_filter |=
+@@ -111,20 +111,11 @@ int mwifiex_request_set_multicast_list(s
+ dev_dbg(priv->adapter->dev,
+ "info: Set multicast list=%d\n",
+ mcast_list->num_multicast_addr);
+- /* Set multicast addresses to firmware */
+- if (old_pkt_filter == priv->curr_pkt_filter) {
+- /* Send request to firmware */
+- ret = mwifiex_send_cmd_async(priv,
+- HostCmd_CMD_MAC_MULTICAST_ADR,
+- HostCmd_ACT_GEN_SET, 0,
+- mcast_list);
+- } else {
+- /* Send request to firmware */
+- ret = mwifiex_send_cmd_async(priv,
+- HostCmd_CMD_MAC_MULTICAST_ADR,
+- HostCmd_ACT_GEN_SET, 0,
+- mcast_list);
+- }
++ /* Send multicast addresses to firmware */
++ ret = mwifiex_send_cmd_async(priv,
++ HostCmd_CMD_MAC_MULTICAST_ADR,
++ HostCmd_ACT_GEN_SET, 0,
++ mcast_list);
+ }
+ }
+ }
--- /dev/null
+From 120496ac2d2d60aee68d3123a68169502a85f4b5 Mon Sep 17 00:00:00 2001
+From: Robert Jennings <rcj@linux.vnet.ibm.com>
+Date: Tue, 7 May 2013 04:34:11 +0000
+Subject: powerpc: Bring all threads online prior to migration/hibernation
+
+From: Robert Jennings <rcj@linux.vnet.ibm.com>
+
+commit 120496ac2d2d60aee68d3123a68169502a85f4b5 upstream.
+
+This patch brings online all threads which are present but not online
+prior to migration/hibernation. After migration/hibernation those
+threads are taken back offline.
+
+During migration/hibernation all online CPUs must call H_JOIN, this is
+required by the hypervisor. Without this patch, threads that are offline
+(H_CEDE'd) will not be woken to make the H_JOIN call and the OS will be
+deadlocked (all threads either JOIN'd or CEDE'd).
+
+Signed-off-by: Robert Jennings <rcj@linux.vnet.ibm.com>
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/include/asm/rtas.h | 2
+ arch/powerpc/kernel/rtas.c | 113 +++++++++++++++++++++++++++++++
+ arch/powerpc/platforms/pseries/suspend.c | 22 ++++++
+ 3 files changed, 137 insertions(+)
+
+--- a/arch/powerpc/include/asm/rtas.h
++++ b/arch/powerpc/include/asm/rtas.h
+@@ -262,6 +262,8 @@ extern void rtas_progress(char *s, unsig
+ extern void rtas_initialize(void);
+ extern int rtas_suspend_cpu(struct rtas_suspend_me_data *data);
+ extern int rtas_suspend_last_cpu(struct rtas_suspend_me_data *data);
++extern int rtas_online_cpus_mask(cpumask_var_t cpus);
++extern int rtas_offline_cpus_mask(cpumask_var_t cpus);
+ extern int rtas_ibm_suspend_me(struct rtas_args *);
+
+ struct rtc_time;
+--- a/arch/powerpc/kernel/rtas.c
++++ b/arch/powerpc/kernel/rtas.c
+@@ -19,6 +19,7 @@
+ #include <linux/init.h>
+ #include <linux/capability.h>
+ #include <linux/delay.h>
++#include <linux/cpu.h>
+ #include <linux/smp.h>
+ #include <linux/completion.h>
+ #include <linux/cpumask.h>
+@@ -807,6 +808,95 @@ static void rtas_percpu_suspend_me(void
+ __rtas_suspend_cpu((struct rtas_suspend_me_data *)info, 1);
+ }
+
++enum rtas_cpu_state {
++ DOWN,
++ UP,
++};
++
++#ifndef CONFIG_SMP
++static int rtas_cpu_state_change_mask(enum rtas_cpu_state state,
++ cpumask_var_t cpus)
++{
++ if (!cpumask_empty(cpus)) {
++ cpumask_clear(cpus);
++ return -EINVAL;
++ } else
++ return 0;
++}
++#else
++/* On return cpumask will be altered to indicate CPUs changed.
++ * CPUs with states changed will be set in the mask,
++ * CPUs with status unchanged will be unset in the mask. */
++static int rtas_cpu_state_change_mask(enum rtas_cpu_state state,
++ cpumask_var_t cpus)
++{
++ int cpu;
++ int cpuret = 0;
++ int ret = 0;
++
++ if (cpumask_empty(cpus))
++ return 0;
++
++ for_each_cpu(cpu, cpus) {
++ switch (state) {
++ case DOWN:
++ cpuret = cpu_down(cpu);
++ break;
++ case UP:
++ cpuret = cpu_up(cpu);
++ break;
++ }
++ if (cpuret) {
++ pr_debug("%s: cpu_%s for cpu#%d returned %d.\n",
++ __func__,
++ ((state == UP) ? "up" : "down"),
++ cpu, cpuret);
++ if (!ret)
++ ret = cpuret;
++ if (state == UP) {
++ /* clear bits for unchanged cpus, return */
++ cpumask_shift_right(cpus, cpus, cpu);
++ cpumask_shift_left(cpus, cpus, cpu);
++ break;
++ } else {
++ /* clear bit for unchanged cpu, continue */
++ cpumask_clear_cpu(cpu, cpus);
++ }
++ }
++ }
++
++ return ret;
++}
++#endif
++
++int rtas_online_cpus_mask(cpumask_var_t cpus)
++{
++ int ret;
++
++ ret = rtas_cpu_state_change_mask(UP, cpus);
++
++ if (ret) {
++ cpumask_var_t tmp_mask;
++
++ if (!alloc_cpumask_var(&tmp_mask, GFP_TEMPORARY))
++ return ret;
++
++ /* Use tmp_mask to preserve cpus mask from first failure */
++ cpumask_copy(tmp_mask, cpus);
++ rtas_offline_cpus_mask(tmp_mask);
++ free_cpumask_var(tmp_mask);
++ }
++
++ return ret;
++}
++EXPORT_SYMBOL(rtas_online_cpus_mask);
++
++int rtas_offline_cpus_mask(cpumask_var_t cpus)
++{
++ return rtas_cpu_state_change_mask(DOWN, cpus);
++}
++EXPORT_SYMBOL(rtas_offline_cpus_mask);
++
+ int rtas_ibm_suspend_me(struct rtas_args *args)
+ {
+ long state;
+@@ -814,6 +904,8 @@ int rtas_ibm_suspend_me(struct rtas_args
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+ struct rtas_suspend_me_data data;
+ DECLARE_COMPLETION_ONSTACK(done);
++ cpumask_var_t offline_mask;
++ int cpuret;
+
+ if (!rtas_service_present("ibm,suspend-me"))
+ return -ENOSYS;
+@@ -837,11 +929,24 @@ int rtas_ibm_suspend_me(struct rtas_args
+ return 0;
+ }
+
++ if (!alloc_cpumask_var(&offline_mask, GFP_TEMPORARY))
++ return -ENOMEM;
++
+ atomic_set(&data.working, 0);
+ atomic_set(&data.done, 0);
+ atomic_set(&data.error, 0);
+ data.token = rtas_token("ibm,suspend-me");
+ data.complete = &done;
++
++ /* All present CPUs must be online */
++ cpumask_andnot(offline_mask, cpu_present_mask, cpu_online_mask);
++ cpuret = rtas_online_cpus_mask(offline_mask);
++ if (cpuret) {
++ pr_err("%s: Could not bring present CPUs online.\n", __func__);
++ atomic_set(&data.error, cpuret);
++ goto out;
++ }
++
+ stop_topology_update();
+
+ /* Call function on all CPUs. One of us will make the
+@@ -857,6 +962,14 @@ int rtas_ibm_suspend_me(struct rtas_args
+
+ start_topology_update();
+
++ /* Take down CPUs not online prior to suspend */
++ cpuret = rtas_offline_cpus_mask(offline_mask);
++ if (cpuret)
++ pr_warn("%s: Could not restore CPUs to offline state.\n",
++ __func__);
++
++out:
++ free_cpumask_var(offline_mask);
+ return atomic_read(&data.error);
+ }
+ #else /* CONFIG_PPC_PSERIES */
+--- a/arch/powerpc/platforms/pseries/suspend.c
++++ b/arch/powerpc/platforms/pseries/suspend.c
+@@ -16,6 +16,7 @@
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
++#include <linux/cpu.h>
+ #include <linux/delay.h>
+ #include <linux/suspend.h>
+ #include <linux/stat.h>
+@@ -126,11 +127,15 @@ static ssize_t store_hibernate(struct de
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+ {
++ cpumask_var_t offline_mask;
+ int rc;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
++ if (!alloc_cpumask_var(&offline_mask, GFP_TEMPORARY))
++ return -ENOMEM;
++
+ stream_id = simple_strtoul(buf, NULL, 16);
+
+ do {
+@@ -140,15 +145,32 @@ static ssize_t store_hibernate(struct de
+ } while (rc == -EAGAIN);
+
+ if (!rc) {
++ /* All present CPUs must be online */
++ cpumask_andnot(offline_mask, cpu_present_mask,
++ cpu_online_mask);
++ rc = rtas_online_cpus_mask(offline_mask);
++ if (rc) {
++ pr_err("%s: Could not bring present CPUs online.\n",
++ __func__);
++ goto out;
++ }
++
+ stop_topology_update();
+ rc = pm_suspend(PM_SUSPEND_MEM);
+ start_topology_update();
++
++ /* Take down CPUs not online prior to suspend */
++ if (!rtas_offline_cpus_mask(offline_mask))
++ pr_warn("%s: Could not restore CPUs to offline "
++ "state.\n", __func__);
+ }
+
+ stream_id = 0;
+
+ if (!rc)
+ rc = count;
++out:
++ free_cpumask_var(offline_mask);
+ return rc;
+ }
+
--- /dev/null
+From 79c66ce8f6448a3295a32efeac88c9debd7f7094 Mon Sep 17 00:00:00 2001
+From: Anton Blanchard <anton@au1.ibm.com>
+Date: Sun, 12 May 2013 15:04:53 +0000
+Subject: powerpc/kexec: Fix kexec when using VMX optimised memcpy
+
+From: Anton Blanchard <anton@au1.ibm.com>
+
+commit 79c66ce8f6448a3295a32efeac88c9debd7f7094 upstream.
+
+commit b3f271e86e5a (powerpc: POWER7 optimised memcpy using VMX and
+enhanced prefetch) uses VMX when it is safe to do so (ie not in
+interrupt). It also looks at the task struct to decide if we have to
+save the current tasks' VMX state.
+
+kexec calls memcpy() at a point where the task struct may have been
+overwritten by the new kexec segments. If it has been overwritten
+then when memcpy -> enable_altivec looks up current->thread.regs->msr
+we get a cryptic oops or lockup.
+
+I also notice we aren't initialising thread_info->cpu, which means
+smp_processor_id is broken. Fix that too.
+
+Signed-off-by: Anton Blanchard <anton@samba.org>
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/machine_kexec_64.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/arch/powerpc/kernel/machine_kexec_64.c
++++ b/arch/powerpc/kernel/machine_kexec_64.c
+@@ -17,6 +17,7 @@
+ #include <linux/errno.h>
+ #include <linux/kernel.h>
+ #include <linux/cpu.h>
++#include <linux/hardirq.h>
+
+ #include <asm/page.h>
+ #include <asm/current.h>
+@@ -335,10 +336,13 @@ void default_machine_kexec(struct kimage
+ pr_debug("kexec: Starting switchover sequence.\n");
+
+ /* switch to a staticly allocated stack. Based on irq stack code.
++ * We setup preempt_count to avoid using VMX in memcpy.
+ * XXX: the task struct will likely be invalid once we do the copy!
+ */
+ kexec_stack.thread_info.task = current_thread_info()->task;
+ kexec_stack.thread_info.flags = 0;
++ kexec_stack.thread_info.preempt_count = HARDIRQ_OFFSET;
++ kexec_stack.thread_info.cpu = current_thread_info()->cpu;
+
+ /* We need a static PACA, too; copy this CPU's PACA over and switch to
+ * it. Also poison per_cpu_offset to catch anyone using non-static
scsi-sd-fix-array-cache-flushing-bug-causing-performance-problems.patch
audit-syscall-rules-are-not-applied-to-existing-processes-on-non-x86.patch
audit-vfs-fix-audit_inode-call-in-o_creat-case-of-do_last.patch
+time-revert-always_use_persistent_clock-compile-time-optimizaitons.patch
+timer-don-t-reinitialize-the-cpu-base-lock-during-cpu_up_prepare.patch
+tick-cleanup-nohz-per-cpu-data-on-cpu-down.patch
+tracing-fix-leaks-of-filter-preds.patch
+ext4-limit-group-search-loop-for-non-extent-files.patch
+x86-microcode-add-local-mutex-to-fix-physical-cpu-hot-add-deadlock.patch
+arm-7720-1-arm-v6-v7-cmpxchg64-shouldn-t-clear-upper-32-bits-of-the-old-new-value.patch
+powerpc-bring-all-threads-online-prior-to-migration-hibernation.patch
+powerpc-kexec-fix-kexec-when-using-vmx-optimised-memcpy.patch
+ath9k-fix-key-allocation-error-handling-for-powersave-keys.patch
+mwifiex-clear-is_suspended-flag-when-interrupt-is-received-early.patch
+mwifiex-fix-memory-leak-issue-when-driver-unload.patch
+mwifiex-fix-setting-of-multicast-filter.patch
+tile-support-new-tilera-hypervisor.patch
+b43-handle-dma-rx-descriptor-underrun.patch
+iwl4965-workaround-connection-regression-on-passive-channel.patch
+drm-mgag200-fix-writes-into-mga1064_pix_clk_ctl-register.patch
+drm-mgag200-fix-framebuffer-base-address-programming.patch
+drm-mm-fix-dump-table-bug.patch
+drm-don-t-check-modeset-locks-in-panic-handler.patch
+drm-i915-clear-the-stolen-fb-before-resuming.patch
--- /dev/null
+From 4b0c0f294f60abcdd20994a8341a95c8ac5eeb96 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 3 May 2013 15:02:50 +0200
+Subject: tick: Cleanup NOHZ per cpu data on cpu down
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 4b0c0f294f60abcdd20994a8341a95c8ac5eeb96 upstream.
+
+Prarit reported a crash on CPU offline/online. The reason is that on
+CPU down the NOHZ related per cpu data of the dead cpu is not cleaned
+up. If at cpu online an interrupt happens before the per cpu tick
+device is registered the irq_enter() check potentially sees stale data
+and dereferences a NULL pointer.
+
+Cleanup the data after the cpu is dead.
+
+Reported-by: Prarit Bhargava <prarit@redhat.com>
+Cc: Mike Galbraith <bitbucket@online.de>
+Link: http://lkml.kernel.org/r/alpine.LFD.2.02.1305031451561.2886@ionos
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/time/tick-sched.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/time/tick-sched.c
++++ b/kernel/time/tick-sched.c
+@@ -904,7 +904,7 @@ void tick_cancel_sched_timer(int cpu)
+ hrtimer_cancel(&ts->sched_timer);
+ # endif
+
+- ts->nohz_mode = NOHZ_MODE_INACTIVE;
++ memset(ts, 0, sizeof(*ts));
+ }
+ #endif
+
--- /dev/null
+From c539914dcd9a68c63305e055b14115a6a19578a8 Mon Sep 17 00:00:00 2001
+From: Chris Metcalf <cmetcalf@tilera.com>
+Date: Thu, 2 May 2013 15:29:04 -0400
+Subject: tile: support new Tilera hypervisor
+
+From: Chris Metcalf <cmetcalf@tilera.com>
+
+commit c539914dcd9a68c63305e055b14115a6a19578a8 upstream.
+
+The Tilera hypervisor shipped in releases up through MDE 4.1 launches
+the client operating system (i.e. Linux) at privilege level 1 (PL1).
+Starting with MDE 4.2, as part of the work to enable KVM, the
+Tilera hypervisor launches Linux at PL2 instead.
+
+This commit makes the KERNEL_PL option default to 2 for tilegx, while
+still saying at 1 for tilepro, which doesn't have an updated hypervisor.
+It also explains how and when you might want to choose another value.
+In addition, we change a small buglet in the on-chip Ethernet driver,
+where we were failing to use the KERNEL_PL constant in an API call.
+
+To make the transition cleaner, this change also provides the updated
+hv_init() API for the new hypervisor that supports announcing Linux's
+compiled-in PL, so the hypervisor can generate a suitable error in the
+case of a mismatched hypervisor and Linux binary.
+
+Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/tile/Kconfig | 14 ++++++++++----
+ arch/tile/include/hv/hypervisor.h | 27 ++++++++++++++++++++++++---
+ arch/tile/kernel/head_32.S | 2 +-
+ arch/tile/kernel/head_64.S | 12 +++++++++---
+ drivers/net/ethernet/tile/tilegx.c | 2 +-
+ 5 files changed, 45 insertions(+), 12 deletions(-)
+
+--- a/arch/tile/Kconfig
++++ b/arch/tile/Kconfig
+@@ -368,11 +368,17 @@ config HARDWALL
+ config KERNEL_PL
+ int "Processor protection level for kernel"
+ range 1 2
+- default "1"
++ default 2 if TILEGX
++ default 1 if !TILEGX
+ ---help---
+- This setting determines the processor protection level the
+- kernel will be built to run at. Generally you should use
+- the default value here.
++ Since MDE 4.2, the Tilera hypervisor runs the kernel
++ at PL2 by default. If running under an older hypervisor,
++ or as a KVM guest, you must run at PL1. (The current
++ hypervisor may also be recompiled with "make HV_PL=2" to
++ allow it to run a kernel at PL1, but clients running at PL1
++ are not expected to be supported indefinitely.)
++
++ If you're not sure, don't change the default.
+
+ source "arch/tile/gxio/Kconfig"
+
+--- a/arch/tile/include/hv/hypervisor.h
++++ b/arch/tile/include/hv/hypervisor.h
+@@ -107,7 +107,22 @@
+ #define HV_DISPATCH_ENTRY_SIZE 32
+
+ /** Version of the hypervisor interface defined by this file */
+-#define _HV_VERSION 11
++#define _HV_VERSION 13
++
++/** Last version of the hypervisor interface with old hv_init() ABI.
++ *
++ * The change from version 12 to version 13 corresponds to launching
++ * the client by default at PL2 instead of PL1 (corresponding to the
++ * hv itself running at PL3 instead of PL2). To make this explicit,
++ * the hv_init() API was also extended so the client can report its
++ * desired PL, resulting in a more helpful failure diagnostic. If you
++ * call hv_init() with _HV_VERSION_OLD_HV_INIT and omit the client_pl
++ * argument, the hypervisor will assume client_pl = 1.
++ *
++ * Note that this is a deprecated solution and we do not expect to
++ * support clients of the Tilera hypervisor running at PL1 indefinitely.
++ */
++#define _HV_VERSION_OLD_HV_INIT 12
+
+ /* Index into hypervisor interface dispatch code blocks.
+ *
+@@ -377,7 +392,11 @@ typedef int HV_Errno;
+ #ifndef __ASSEMBLER__
+
+ /** Pass HV_VERSION to hv_init to request this version of the interface. */
+-typedef enum { HV_VERSION = _HV_VERSION } HV_VersionNumber;
++typedef enum {
++ HV_VERSION = _HV_VERSION,
++ HV_VERSION_OLD_HV_INIT = _HV_VERSION_OLD_HV_INIT,
++
++} HV_VersionNumber;
+
+ /** Initializes the hypervisor.
+ *
+@@ -385,9 +404,11 @@ typedef enum { HV_VERSION = _HV_VERSION
+ * that this program expects, typically HV_VERSION.
+ * @param chip_num Architecture number of the chip the client was built for.
+ * @param chip_rev_num Revision number of the chip the client was built for.
++ * @param client_pl Privilege level the client is built for
++ * (not required if interface_version_number == HV_VERSION_OLD_HV_INIT).
+ */
+ void hv_init(HV_VersionNumber interface_version_number,
+- int chip_num, int chip_rev_num);
++ int chip_num, int chip_rev_num, int client_pl);
+
+
+ /** Queries we can make for hv_sysconf().
+--- a/arch/tile/kernel/head_32.S
++++ b/arch/tile/kernel/head_32.S
+@@ -38,7 +38,7 @@ ENTRY(_start)
+ movei r2, TILE_CHIP_REV
+ }
+ {
+- moveli r0, _HV_VERSION
++ moveli r0, _HV_VERSION_OLD_HV_INIT
+ jal hv_init
+ }
+ /* Get a reasonable default ASID in r0 */
+--- a/arch/tile/kernel/head_64.S
++++ b/arch/tile/kernel/head_64.S
+@@ -34,13 +34,19 @@
+ ENTRY(_start)
+ /* Notify the hypervisor of what version of the API we want */
+ {
++#if KERNEL_PL == 1 && _HV_VERSION == 13
++ /* Support older hypervisors by asking for API version 12. */
++ movei r0, _HV_VERSION_OLD_HV_INIT
++#else
++ movei r0, _HV_VERSION
++#endif
+ movei r1, TILE_CHIP
+- movei r2, TILE_CHIP_REV
+ }
+ {
+- moveli r0, _HV_VERSION
+- jal hv_init
++ movei r2, TILE_CHIP_REV
++ movei r3, KERNEL_PL
+ }
++ jal hv_init
+ /* Get a reasonable default ASID in r0 */
+ {
+ move r0, zero
+--- a/drivers/net/ethernet/tile/tilegx.c
++++ b/drivers/net/ethernet/tile/tilegx.c
+@@ -930,7 +930,7 @@ static int tile_net_setup_interrupts(str
+ if (info->has_iqueue) {
+ gxio_mpipe_request_notif_ring_interrupt(
+ &context, cpu_x(cpu), cpu_y(cpu),
+- 1, ingress_irq, info->iqueue.ring);
++ KERNEL_PL, ingress_irq, info->iqueue.ring);
+ }
+ }
+
--- /dev/null
+From b4f711ee03d28f776fd2324fd0bd999cc428e4d2 Mon Sep 17 00:00:00 2001
+From: John Stultz <john.stultz@linaro.org>
+Date: Wed, 24 Apr 2013 11:32:56 -0700
+Subject: time: Revert ALWAYS_USE_PERSISTENT_CLOCK compile time optimizaitons
+
+From: John Stultz <john.stultz@linaro.org>
+
+commit b4f711ee03d28f776fd2324fd0bd999cc428e4d2 upstream.
+
+Kay Sievers noted that the ALWAYS_USE_PERSISTENT_CLOCK config,
+which enables some minor compile time optimization to avoid
+uncessary code in mostly the suspend/resume path could cause
+problems for userland.
+
+In particular, the dependency for RTC_HCTOSYS on
+!ALWAYS_USE_PERSISTENT_CLOCK, which avoids setting the time
+twice and simplifies suspend/resume, has the side effect
+of causing the /sys/class/rtc/rtcN/hctosys flag to always be
+zero, and this flag is commonly used by udev to setup the
+/dev/rtc symlink to /dev/rtcN, which can cause pain for
+older applications.
+
+While the udev rules could use some work to be less fragile,
+breaking userland should strongly be avoided. Additionally
+the compile time optimizations are fairly minor, and the code
+being optimized is likely to be reworked in the future, so
+lets revert this change.
+
+Reported-by: Kay Sievers <kay@vrfy.org>
+Signed-off-by: John Stultz <john.stultz@linaro.org>
+Cc: Feng Tang <feng.tang@intel.com>
+Cc: Jason Gunthorpe <jgunthorpe@obsidianresearch.com>
+Link: http://lkml.kernel.org/r/1366828376-18124-1-git-send-email-john.stultz@linaro.org
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/Kconfig | 1 -
+ drivers/rtc/Kconfig | 2 --
+ include/linux/time.h | 4 ----
+ kernel/time/Kconfig | 5 -----
+ 4 files changed, 12 deletions(-)
+
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -107,7 +107,6 @@ config X86
+ select GENERIC_CLOCKEVENTS_BROADCAST if X86_64 || (X86_32 && X86_LOCAL_APIC)
+ select GENERIC_TIME_VSYSCALL if X86_64
+ select KTIME_SCALAR if X86_32
+- select ALWAYS_USE_PERSISTENT_CLOCK
+ select GENERIC_STRNCPY_FROM_USER
+ select GENERIC_STRNLEN_USER
+ select HAVE_CONTEXT_TRACKING if X86_64
+--- a/drivers/rtc/Kconfig
++++ b/drivers/rtc/Kconfig
+@@ -20,7 +20,6 @@ if RTC_CLASS
+ config RTC_HCTOSYS
+ bool "Set system time from RTC on startup and resume"
+ default y
+- depends on !ALWAYS_USE_PERSISTENT_CLOCK
+ help
+ If you say yes here, the system time (wall clock) will be set using
+ the value read from a specified RTC device. This is useful to avoid
+@@ -29,7 +28,6 @@ config RTC_HCTOSYS
+ config RTC_SYSTOHC
+ bool "Set the RTC time based on NTP synchronization"
+ default y
+- depends on !ALWAYS_USE_PERSISTENT_CLOCK
+ help
+ If you say yes here, the system time (wall clock) will be stored
+ in the RTC specified by RTC_HCTOSYS_DEVICE approximately every 11
+--- a/include/linux/time.h
++++ b/include/linux/time.h
+@@ -117,14 +117,10 @@ static inline bool timespec_valid_strict
+
+ extern bool persistent_clock_exist;
+
+-#ifdef ALWAYS_USE_PERSISTENT_CLOCK
+-#define has_persistent_clock() true
+-#else
+ static inline bool has_persistent_clock(void)
+ {
+ return persistent_clock_exist;
+ }
+-#endif
+
+ extern void read_persistent_clock(struct timespec *ts);
+ extern void read_boot_clock(struct timespec *ts);
+--- a/kernel/time/Kconfig
++++ b/kernel/time/Kconfig
+@@ -12,11 +12,6 @@ config CLOCKSOURCE_WATCHDOG
+ config ARCH_CLOCKSOURCE_DATA
+ bool
+
+-# Platforms has a persistent clock
+-config ALWAYS_USE_PERSISTENT_CLOCK
+- bool
+- default n
+-
+ # Timekeeping vsyscall support
+ config GENERIC_TIME_VSYSCALL
+ bool
--- /dev/null
+From 42a5cf46cd56f46267d2a9fcf2655f4078cd3042 Mon Sep 17 00:00:00 2001
+From: Tirupathi Reddy <tirupath@codeaurora.org>
+Date: Tue, 14 May 2013 13:59:02 +0530
+Subject: timer: Don't reinitialize the cpu base lock during CPU_UP_PREPARE
+
+From: Tirupathi Reddy <tirupath@codeaurora.org>
+
+commit 42a5cf46cd56f46267d2a9fcf2655f4078cd3042 upstream.
+
+An inactive timer's base can refer to a offline cpu's base.
+
+In the current code, cpu_base's lock is blindly reinitialized each
+time a CPU is brought up. If a CPU is brought online during the period
+that another thread is trying to modify an inactive timer on that CPU
+with holding its timer base lock, then the lock will be reinitialized
+under its feet. This leads to following SPIN_BUG().
+
+<0> BUG: spinlock already unlocked on CPU#3, kworker/u:3/1466
+<0> lock: 0xe3ebe000, .magic: dead4ead, .owner: kworker/u:3/1466, .owner_cpu: 1
+<4> [<c0013dc4>] (unwind_backtrace+0x0/0x11c) from [<c026e794>] (do_raw_spin_unlock+0x40/0xcc)
+<4> [<c026e794>] (do_raw_spin_unlock+0x40/0xcc) from [<c076c160>] (_raw_spin_unlock+0x8/0x30)
+<4> [<c076c160>] (_raw_spin_unlock+0x8/0x30) from [<c009b858>] (mod_timer+0x294/0x310)
+<4> [<c009b858>] (mod_timer+0x294/0x310) from [<c00a5e04>] (queue_delayed_work_on+0x104/0x120)
+<4> [<c00a5e04>] (queue_delayed_work_on+0x104/0x120) from [<c04eae00>] (sdhci_msm_bus_voting+0x88/0x9c)
+<4> [<c04eae00>] (sdhci_msm_bus_voting+0x88/0x9c) from [<c04d8780>] (sdhci_disable+0x40/0x48)
+<4> [<c04d8780>] (sdhci_disable+0x40/0x48) from [<c04bf300>] (mmc_release_host+0x4c/0xb0)
+<4> [<c04bf300>] (mmc_release_host+0x4c/0xb0) from [<c04c7aac>] (mmc_sd_detect+0x90/0xfc)
+<4> [<c04c7aac>] (mmc_sd_detect+0x90/0xfc) from [<c04c2504>] (mmc_rescan+0x7c/0x2c4)
+<4> [<c04c2504>] (mmc_rescan+0x7c/0x2c4) from [<c00a6a7c>] (process_one_work+0x27c/0x484)
+<4> [<c00a6a7c>] (process_one_work+0x27c/0x484) from [<c00a6e94>] (worker_thread+0x210/0x3b0)
+<4> [<c00a6e94>] (worker_thread+0x210/0x3b0) from [<c00aad9c>] (kthread+0x80/0x8c)
+<4> [<c00aad9c>] (kthread+0x80/0x8c) from [<c000ea80>] (kernel_thread_exit+0x0/0x8)
+
+As an example, this particular crash occurred when CPU #3 is executing
+mod_timer() on an inactive timer whose base is refered to offlined CPU
+#2. The code locked the timer_base corresponding to CPU #2. Before it
+could proceed, CPU #2 came online and reinitialized the spinlock
+corresponding to its base. Thus now CPU #3 held a lock which was
+reinitialized. When CPU #3 finally ended up unlocking the old cpu_base
+corresponding to CPU #2, we hit the above SPIN_BUG().
+
+CPU #0 CPU #3 CPU #2
+------ ------- -------
+..... ...... <Offline>
+ mod_timer()
+ lock_timer_base
+ spin_lock_irqsave(&base->lock)
+
+cpu_up(2) ..... ......
+ init_timers_cpu()
+.... ..... spin_lock_init(&base->lock)
+..... spin_unlock_irqrestore(&base->lock) ......
+ <spin_bug>
+
+Allocation of per_cpu timer vector bases is done only once under
+"tvec_base_done[]" check. In the current code, spinlock_initialization
+of base->lock isn't under this check. When a CPU is up each time the
+base lock is reinitialized. Move base spinlock initialization under
+the check.
+
+Signed-off-by: Tirupathi Reddy <tirupath@codeaurora.org>
+Link: http://lkml.kernel.org/r/1368520142-4136-1-git-send-email-tirupath@codeaurora.org
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/timer.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/timer.c
++++ b/kernel/timer.c
+@@ -1678,12 +1678,12 @@ static int __cpuinit init_timers_cpu(int
+ boot_done = 1;
+ base = &boot_tvec_bases;
+ }
++ spin_lock_init(&base->lock);
+ tvec_base_done[cpu] = 1;
+ } else {
+ base = per_cpu(tvec_bases, cpu);
+ }
+
+- spin_lock_init(&base->lock);
+
+ for (j = 0; j < TVN_SIZE; j++) {
+ INIT_LIST_HEAD(base->tv5.vec + j);
--- /dev/null
+From 60705c89460fdc7227f2d153b68b3f34814738a4 Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (Red Hat)" <rostedt@goodmis.org>
+Date: Tue, 14 May 2013 15:40:48 -0400
+Subject: tracing: Fix leaks of filter preds
+
+From: "Steven Rostedt (Red Hat)" <rostedt@goodmis.org>
+
+commit 60705c89460fdc7227f2d153b68b3f34814738a4 upstream.
+
+Special preds are created when folding a series of preds that
+can be done in serial. These are allocated in an ops field of
+the pred structure. But they were never freed, causing memory
+leaks.
+
+This was discovered using the kmemleak checker:
+
+unreferenced object 0xffff8800797fd5e0 (size 32):
+ comm "swapper/0", pid 1, jiffies 4294690605 (age 104.608s)
+ hex dump (first 32 bytes):
+ 00 00 01 00 03 00 05 00 07 00 09 00 0b 00 0d 00 ................
+ 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
+ backtrace:
+ [<ffffffff814b52af>] kmemleak_alloc+0x73/0x98
+ [<ffffffff8111ff84>] kmemleak_alloc_recursive.constprop.42+0x16/0x18
+ [<ffffffff81120e68>] __kmalloc+0xd7/0x125
+ [<ffffffff810d47eb>] kcalloc.constprop.24+0x2d/0x2f
+ [<ffffffff810d4896>] fold_pred_tree_cb+0xa9/0xf4
+ [<ffffffff810d3781>] walk_pred_tree+0x47/0xcc
+ [<ffffffff810d5030>] replace_preds.isra.20+0x6f8/0x72f
+ [<ffffffff810d50b5>] create_filter+0x4e/0x8b
+ [<ffffffff81b1c30d>] ftrace_test_event_filter+0x5a/0x155
+ [<ffffffff8100028d>] do_one_initcall+0xa0/0x137
+ [<ffffffff81afbedf>] kernel_init_freeable+0x14d/0x1dc
+ [<ffffffff814b24b7>] kernel_init+0xe/0xdb
+ [<ffffffff814d539c>] ret_from_fork+0x7c/0xb0
+ [<ffffffffffffffff>] 0xffffffffffffffff
+
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Cc: Tom Zanussi <tzanussi@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/trace_events_filter.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/kernel/trace/trace_events_filter.c
++++ b/kernel/trace/trace_events_filter.c
+@@ -777,7 +777,11 @@ static int filter_set_pred(struct event_
+
+ static void __free_preds(struct event_filter *filter)
+ {
++ int i;
++
+ if (filter->preds) {
++ for (i = 0; i < filter->n_preds; i++)
++ kfree(filter->preds[i].ops);
+ kfree(filter->preds);
+ filter->preds = NULL;
+ }
--- /dev/null
+From 074d72ff57f65de779e2f70d5906964c0ba1c123 Mon Sep 17 00:00:00 2001
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Wed, 8 May 2013 12:13:03 -0400
+Subject: x86/microcode: Add local mutex to fix physical CPU hot-add deadlock
+
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+
+commit 074d72ff57f65de779e2f70d5906964c0ba1c123 upstream.
+
+This can easily be triggered if a new CPU is added (via
+ACPI hotplug mechanism) and from user-space you do:
+
+ echo 1 > /sys/devices/system/cpu/cpu3/online
+
+(or wait for UDEV to do it) on a newly appeared physical CPU.
+
+The deadlock is that the "store_online" in drivers/base/cpu.c
+takes the cpu_hotplug_driver_lock() lock, then calls "cpu_up".
+"cpu_up" eventually ends up calling "save_mc_for_early"
+which also takes the cpu_hotplug_driver_lock() lock.
+
+And here is that lockdep thinks of it:
+
+ smpboot: Stack at about ffff880075c39f44
+ smpboot: CPU3: has booted.
+ microcode: CPU3 sig=0x206a7, pf=0x2, revision=0x25
+
+ =============================================
+ [ INFO: possible recursive locking detected ]
+ 3.9.0upstream-10129-g167af0e #1 Not tainted
+ ---------------------------------------------
+ sh/2487 is trying to acquire lock:
+ (x86_cpu_hotplug_driver_mutex){+.+.+.}, at: [<ffffffff81075512>] cpu_hotplug_driver_lock+0x12/0x20
+
+ but task is already holding lock:
+ (x86_cpu_hotplug_driver_mutex){+.+.+.}, at: [<ffffffff81075512>] cpu_hotplug_driver_lock+0x12/0x20
+
+ other info that might help us debug this:
+ Possible unsafe locking scenario:
+
+ CPU0
+ ----
+ lock(x86_cpu_hotplug_driver_mutex);
+ lock(x86_cpu_hotplug_driver_mutex);
+
+ *** DEADLOCK ***
+
+ May be due to missing lock nesting notation
+
+ 6 locks held by sh/2487:
+ #0: (sb_writers#5){.+.+.+}, at: [<ffffffff811ca48d>] vfs_write+0x17d/0x190
+ #1: (&buffer->mutex){+.+.+.}, at: [<ffffffff812464ef>] sysfs_write_file+0x3f/0x160
+ #2: (s_active#20){.+.+.+}, at: [<ffffffff81246578>] sysfs_write_file+0xc8/0x160
+ #3: (x86_cpu_hotplug_driver_mutex){+.+.+.}, at: [<ffffffff81075512>] cpu_hotplug_driver_lock+0x12/0x20
+ #4: (cpu_add_remove_lock){+.+.+.}, at: [<ffffffff810961c2>] cpu_maps_update_begin+0x12/0x20
+ #5: (cpu_hotplug.lock){+.+.+.}, at: [<ffffffff810962a7>] cpu_hotplug_begin+0x27/0x60
+
+Suggested-and-Acked-by: Borislav Petkov <bp@alien8.de>
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Cc: fenghua.yu@intel.com
+Cc: xen-devel@lists.xensource.com
+Link: http://lkml.kernel.org/r/1368029583-23337-1-git-send-email-konrad.wilk@oracle.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/microcode_intel_early.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kernel/microcode_intel_early.c
++++ b/arch/x86/kernel/microcode_intel_early.c
+@@ -487,6 +487,7 @@ static inline void show_saved_mc(void)
+ #endif
+
+ #if defined(CONFIG_MICROCODE_INTEL_EARLY) && defined(CONFIG_HOTPLUG_CPU)
++static DEFINE_MUTEX(x86_cpu_microcode_mutex);
+ /*
+ * Save this mc into mc_saved_data. So it will be loaded early when a CPU is
+ * hot added or resumes.
+@@ -507,7 +508,7 @@ int save_mc_for_early(u8 *mc)
+ * Hold hotplug lock so mc_saved_data is not accessed by a CPU in
+ * hotplug.
+ */
+- cpu_hotplug_driver_lock();
++ mutex_lock(&x86_cpu_microcode_mutex);
+
+ mc_saved_count_init = mc_saved_data.mc_saved_count;
+ mc_saved_count = mc_saved_data.mc_saved_count;
+@@ -544,7 +545,7 @@ int save_mc_for_early(u8 *mc)
+ }
+
+ out:
+- cpu_hotplug_driver_unlock();
++ mutex_unlock(&x86_cpu_microcode_mutex);
+
+ return ret;
+ }