--- /dev/null
+From 1dc831bf53fddcc6443f74a39e72db5bcea4f15d Mon Sep 17 00:00:00 2001
+From: Jason Gunthorpe <jgunthorpe@obsidianresearch.com>
+Date: Wed, 21 Nov 2012 00:19:06 -0700
+Subject: ARM: Kirkwood: Update PCI-E fixup
+
+From: Jason Gunthorpe <jgunthorpe@obsidianresearch.com>
+
+commit 1dc831bf53fddcc6443f74a39e72db5bcea4f15d upstream.
+
+- The code relies on rc_pci_fixup being called, which only happens
+ when CONFIG_PCI_QUIRKS is enabled, so add that to Kconfig. Omitting
+ this causes a booting failure with a non-obvious cause.
+- Update rc_pci_fixup to set the class properly, copying the
+ more modern style from other places
+- Correct the rc_pci_fixup comment
+
+Signed-off-by: Jason Gunthorpe <jgunthorpe@obsidianresearch.com>
+Signed-off-by: Jason Cooper <jason@lakedaemon.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/Kconfig | 1 +
+ arch/arm/mach-kirkwood/pcie.c | 11 ++++++++---
+ 2 files changed, 9 insertions(+), 3 deletions(-)
+
+--- a/arch/arm/Kconfig
++++ b/arch/arm/Kconfig
+@@ -585,6 +585,7 @@ config ARCH_KIRKWOOD
+ bool "Marvell Kirkwood"
+ select CPU_FEROCEON
+ select PCI
++ select PCI_QUIRKS
+ select ARCH_REQUIRE_GPIOLIB
+ select GENERIC_CLOCKEVENTS
+ select NEED_MACH_IO_H
+--- a/arch/arm/mach-kirkwood/pcie.c
++++ b/arch/arm/mach-kirkwood/pcie.c
+@@ -225,14 +225,19 @@ static int __init kirkwood_pcie_setup(in
+ return 1;
+ }
+
++/*
++ * The root complex has a hardwired class of PCI_CLASS_MEMORY_OTHER, when it
++ * is operating as a root complex this needs to be switched to
++ * PCI_CLASS_BRIDGE_HOST or Linux will errantly try to process the BAR's on
++ * the device. Decoding setup is handled by the orion code.
++ */
+ static void __devinit rc_pci_fixup(struct pci_dev *dev)
+ {
+- /*
+- * Prevent enumeration of root complex.
+- */
+ if (dev->bus->parent == NULL && dev->devfn == 0) {
+ int i;
+
++ dev->class &= 0xff;
++ dev->class |= PCI_CLASS_BRIDGE_HOST << 8;
+ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+ dev->resource[i].start = 0;
+ dev->resource[i].end = 0;
--- /dev/null
+From 5d3df935426271016b895aecaa247101b4bfa35e Mon Sep 17 00:00:00 2001
+From: Russell King - ARM Linux <linux@arm.linux.org.uk>
+Date: Sun, 18 Nov 2012 16:29:44 +0000
+Subject: Dove: Attempt to fix PMU/RTC interrupts
+
+From: Russell King - ARM Linux <linux@arm.linux.org.uk>
+
+commit 5d3df935426271016b895aecaa247101b4bfa35e upstream.
+
+Fix the acknowledgement of PMU interrupts on Dove: some Dove hardware
+has not been sensibly designed so that interrupts can be handled in a
+race free manner. The PMU is one such instance.
+
+The pending (aka 'cause') register is a bunch of RW bits, meaning that
+these bits can be both cleared and set by software (confirmed on the
+Armada-510 on the cubox.)
+
+Hardware sets the appropriate bit when an interrupt is asserted, and
+software is required to clear the bits which are to be processed. If
+we write ~(1 << bit), then we end up asserting every other interrupt
+except the one we're processing. So, we need to do a read-modify-write
+cycle to clear the asserted bit.
+
+However, any interrupts which occur in the middle of this cycle will
+also be written back as zero, which will also clear the new interrupts.
+
+The upshot of this is: there is _no_ way to safely clear down interrupts
+in this register (and other similarly behaving interrupt pending
+registers on this device.) The patch below at least stops us creating
+new interrupts.
+
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Signed-off-by: Jason Cooper <jason@lakedaemon.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/mach-dove/irq.c | 14 +++++++++++++-
+ 1 file changed, 13 insertions(+), 1 deletion(-)
+
+--- a/arch/arm/mach-dove/irq.c
++++ b/arch/arm/mach-dove/irq.c
+@@ -45,8 +45,20 @@ static void pmu_irq_ack(struct irq_data
+ int pin = irq_to_pmu(d->irq);
+ u32 u;
+
++ /*
++ * The PMU mask register is not RW0C: it is RW. This means that
++ * the bits take whatever value is written to them; if you write
++ * a '1', you will set the interrupt.
++ *
++ * Unfortunately this means there is NO race free way to clear
++ * these interrupts.
++ *
++ * So, let's structure the code so that the window is as small as
++ * possible.
++ */
+ u = ~(1 << (pin & 31));
+- writel(u, PMU_INTERRUPT_CAUSE);
++ u &= readl_relaxed(PMU_INTERRUPT_CAUSE);
++ writel_relaxed(u, PMU_INTERRUPT_CAUSE);
+ }
+
+ static struct irq_chip pmu_irq_chip = {
--- /dev/null
+From d356cf5a74afa32b40decca3c9dd88bc3cd63eb5 Mon Sep 17 00:00:00 2001
+From: Russell King - ARM Linux <linux@arm.linux.org.uk>
+Date: Sun, 18 Nov 2012 16:39:32 +0000
+Subject: Dove: Fix irq_to_pmu()
+
+From: Russell King - ARM Linux <linux@arm.linux.org.uk>
+
+commit d356cf5a74afa32b40decca3c9dd88bc3cd63eb5 upstream.
+
+PMU interrupts start at IRQ_DOVE_PMU_START, not IRQ_DOVE_PMU_START + 1.
+Fix the condition. (It may have been less likely to occur had the code
+been written "if (irq >= IRQ_DOVE_PMU_START" which imho is the easier
+to understand notation, and matches the normal way of thinking about
+these things.)
+
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Signed-off-by: Jason Cooper <jason@lakedaemon.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/mach-dove/include/mach/pm.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm/mach-dove/include/mach/pm.h
++++ b/arch/arm/mach-dove/include/mach/pm.h
+@@ -45,7 +45,7 @@ static inline int pmu_to_irq(int pin)
+
+ static inline int irq_to_pmu(int irq)
+ {
+- if (IRQ_DOVE_PMU_START < irq && irq < NR_IRQS)
++ if (IRQ_DOVE_PMU_START <= irq && irq < NR_IRQS)
+ return irq - IRQ_DOVE_PMU_START;
+
+ return -EINVAL;
--- /dev/null
+From 4a15903db02026728d0cf2755c6fabae16b8db6a Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Wed, 15 Aug 2012 17:13:53 -0400
+Subject: drm/radeon/dce4+: don't use radeon_crtc for vblank callback
+
+From: Alex Deucher <alexander.deucher@amd.com>
+
+commit 4a15903db02026728d0cf2755c6fabae16b8db6a upstream.
+
+This might be called before we've allocated the radeon_crtcs
+
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/radeon/evergreen.c | 20 ++++++++++++++++----
+ 1 file changed, 16 insertions(+), 4 deletions(-)
+
+--- a/drivers/gpu/drm/radeon/evergreen.c
++++ b/drivers/gpu/drm/radeon/evergreen.c
+@@ -37,6 +37,16 @@
+ #define EVERGREEN_PFP_UCODE_SIZE 1120
+ #define EVERGREEN_PM4_UCODE_SIZE 1376
+
++static const u32 crtc_offsets[6] =
++{
++ EVERGREEN_CRTC0_REGISTER_OFFSET,
++ EVERGREEN_CRTC1_REGISTER_OFFSET,
++ EVERGREEN_CRTC2_REGISTER_OFFSET,
++ EVERGREEN_CRTC3_REGISTER_OFFSET,
++ EVERGREEN_CRTC4_REGISTER_OFFSET,
++ EVERGREEN_CRTC5_REGISTER_OFFSET
++};
++
+ static void evergreen_gpu_init(struct radeon_device *rdev);
+ void evergreen_fini(struct radeon_device *rdev);
+ void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
+@@ -109,17 +119,19 @@ void evergreen_fix_pci_max_read_req_size
+ */
+ void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc)
+ {
+- struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc];
+ int i;
+
+- if (RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset) & EVERGREEN_CRTC_MASTER_EN) {
++ if (crtc >= rdev->num_crtc)
++ return;
++
++ if (RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[crtc]) & EVERGREEN_CRTC_MASTER_EN) {
+ for (i = 0; i < rdev->usec_timeout; i++) {
+- if (!(RREG32(EVERGREEN_CRTC_STATUS + radeon_crtc->crtc_offset) & EVERGREEN_CRTC_V_BLANK))
++ if (!(RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK))
+ break;
+ udelay(1);
+ }
+ for (i = 0; i < rdev->usec_timeout; i++) {
+- if (RREG32(EVERGREEN_CRTC_STATUS + radeon_crtc->crtc_offset) & EVERGREEN_CRTC_V_BLANK)
++ if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK)
+ break;
+ udelay(1);
+ }
--- /dev/null
+From 62444b7462a2b98bc78d68736c03a7c4e66ba7e2 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Wed, 15 Aug 2012 17:18:42 -0400
+Subject: drm/radeon: properly handle mc_stop/mc_resume on evergreen+ (v2)
+
+From: Alex Deucher <alexander.deucher@amd.com>
+
+commit 62444b7462a2b98bc78d68736c03a7c4e66ba7e2 upstream.
+
+- Stop the displays from accessing the FB
+- Block CPU access
+- Turn off MC client access
+
+This should fix issues some users have seen, especially
+with UEFI, when changing the MC FB location that result
+in hangs or display corruption.
+
+v2: fix crtc enabled check noticed by Luca Tettamanti
+
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/radeon/evergreen.c | 173 +++++++++++++++------------------
+ drivers/gpu/drm/radeon/evergreen_reg.h | 2
+ drivers/gpu/drm/radeon/evergreend.h | 7 +
+ drivers/gpu/drm/radeon/radeon_asic.h | 1
+ 4 files changed, 90 insertions(+), 93 deletions(-)
+
+--- a/drivers/gpu/drm/radeon/evergreen.c
++++ b/drivers/gpu/drm/radeon/evergreen.c
+@@ -1241,116 +1241,103 @@ void evergreen_agp_enable(struct radeon_
+
+ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
+ {
++ u32 crtc_enabled, tmp, frame_count, blackout;
++ int i, j;
++
+ save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
+ save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
+
+- /* Stop all video */
++ /* disable VGA render */
+ WREG32(VGA_RENDER_CONTROL, 0);
+- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1);
+- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1);
+- if (rdev->num_crtc >= 4) {
+- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
+- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
+- }
+- if (rdev->num_crtc >= 6) {
+- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
+- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
+- }
+- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
+- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
+- if (rdev->num_crtc >= 4) {
+- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
+- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+- }
+- if (rdev->num_crtc >= 6) {
+- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+- }
+- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
+- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
+- if (rdev->num_crtc >= 4) {
+- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
+- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+- }
+- if (rdev->num_crtc >= 6) {
+- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+- }
+-
+- WREG32(D1VGA_CONTROL, 0);
+- WREG32(D2VGA_CONTROL, 0);
+- if (rdev->num_crtc >= 4) {
+- WREG32(EVERGREEN_D3VGA_CONTROL, 0);
+- WREG32(EVERGREEN_D4VGA_CONTROL, 0);
+- }
+- if (rdev->num_crtc >= 6) {
+- WREG32(EVERGREEN_D5VGA_CONTROL, 0);
+- WREG32(EVERGREEN_D6VGA_CONTROL, 0);
++ /* blank the display controllers */
++ for (i = 0; i < rdev->num_crtc; i++) {
++ crtc_enabled = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) & EVERGREEN_CRTC_MASTER_EN;
++ if (crtc_enabled) {
++ save->crtc_enabled[i] = true;
++ if (ASIC_IS_DCE6(rdev)) {
++ tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
++ if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) {
++ radeon_wait_for_vblank(rdev, i);
++ tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
++ WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
++ }
++ } else {
++ tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
++ if (!(tmp & EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE)) {
++ radeon_wait_for_vblank(rdev, i);
++ tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
++ WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
++ }
++ }
++ /* wait for the next frame */
++ frame_count = radeon_get_vblank_counter(rdev, i);
++ for (j = 0; j < rdev->usec_timeout; j++) {
++ if (radeon_get_vblank_counter(rdev, i) != frame_count)
++ break;
++ udelay(1);
++ }
++ }
++ }
++
++ radeon_mc_wait_for_idle(rdev);
++
++ blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
++ if ((blackout & BLACKOUT_MODE_MASK) != 1) {
++ /* Block CPU access */
++ WREG32(BIF_FB_EN, 0);
++ /* blackout the MC */
++ blackout &= ~BLACKOUT_MODE_MASK;
++ WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
+ }
+ }
+
+ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
+ {
+- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC0_REGISTER_OFFSET,
+- upper_32_bits(rdev->mc.vram_start));
+- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC0_REGISTER_OFFSET,
+- upper_32_bits(rdev->mc.vram_start));
+- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC0_REGISTER_OFFSET,
+- (u32)rdev->mc.vram_start);
+- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC0_REGISTER_OFFSET,
+- (u32)rdev->mc.vram_start);
+-
+- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC1_REGISTER_OFFSET,
+- upper_32_bits(rdev->mc.vram_start));
+- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC1_REGISTER_OFFSET,
+- upper_32_bits(rdev->mc.vram_start));
+- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET,
+- (u32)rdev->mc.vram_start);
+- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET,
+- (u32)rdev->mc.vram_start);
+-
+- if (rdev->num_crtc >= 4) {
+- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
+- upper_32_bits(rdev->mc.vram_start));
+- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
+- upper_32_bits(rdev->mc.vram_start));
+- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET,
+- (u32)rdev->mc.vram_start);
+- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET,
+- (u32)rdev->mc.vram_start);
++ u32 tmp, frame_count;
++ int i, j;
+
+- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET,
++ /* update crtc base addresses */
++ for (i = 0; i < rdev->num_crtc; i++) {
++ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
+ upper_32_bits(rdev->mc.vram_start));
+- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET,
++ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
+ upper_32_bits(rdev->mc.vram_start));
+- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
++ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
+ (u32)rdev->mc.vram_start);
+- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
++ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
+ (u32)rdev->mc.vram_start);
+ }
+- if (rdev->num_crtc >= 6) {
+- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
+- upper_32_bits(rdev->mc.vram_start));
+- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
+- upper_32_bits(rdev->mc.vram_start));
+- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET,
+- (u32)rdev->mc.vram_start);
+- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET,
+- (u32)rdev->mc.vram_start);
+-
+- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET,
+- upper_32_bits(rdev->mc.vram_start));
+- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET,
+- upper_32_bits(rdev->mc.vram_start));
+- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET,
+- (u32)rdev->mc.vram_start);
+- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET,
+- (u32)rdev->mc.vram_start);
+- }
+-
+ WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start));
+ WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
+- /* Unlock host access */
++
++ /* unblackout the MC */
++ tmp = RREG32(MC_SHARED_BLACKOUT_CNTL);
++ tmp &= ~BLACKOUT_MODE_MASK;
++ WREG32(MC_SHARED_BLACKOUT_CNTL, tmp);
++ /* allow CPU access */
++ WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
++
++ for (i = 0; i < rdev->num_crtc; i++) {
++ if (save->crtc_enabled) {
++ if (ASIC_IS_DCE6(rdev)) {
++ tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
++ tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
++ WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
++ } else {
++ tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
++ tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
++ WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
++ }
++ /* wait for the next frame */
++ frame_count = radeon_get_vblank_counter(rdev, i);
++ for (j = 0; j < rdev->usec_timeout; j++) {
++ if (radeon_get_vblank_counter(rdev, i) != frame_count)
++ break;
++ udelay(1);
++ }
++ }
++ }
++ /* Unlock vga access */
+ WREG32(VGA_HDP_CONTROL, save->vga_hdp_control);
+ mdelay(1);
+ WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
+--- a/drivers/gpu/drm/radeon/evergreen_reg.h
++++ b/drivers/gpu/drm/radeon/evergreen_reg.h
+@@ -218,6 +218,8 @@
+ #define EVERGREEN_CRTC_CONTROL 0x6e70
+ # define EVERGREEN_CRTC_MASTER_EN (1 << 0)
+ # define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24)
++#define EVERGREEN_CRTC_BLANK_CONTROL 0x6e74
++# define EVERGREEN_CRTC_BLANK_DATA_EN (1 << 8)
+ #define EVERGREEN_CRTC_STATUS 0x6e8c
+ # define EVERGREEN_CRTC_V_BLANK (1 << 0)
+ #define EVERGREEN_CRTC_STATUS_POSITION 0x6e90
+--- a/drivers/gpu/drm/radeon/evergreend.h
++++ b/drivers/gpu/drm/radeon/evergreend.h
+@@ -87,6 +87,10 @@
+
+ #define CONFIG_MEMSIZE 0x5428
+
++#define BIF_FB_EN 0x5490
++#define FB_READ_EN (1 << 0)
++#define FB_WRITE_EN (1 << 1)
++
+ #define CP_STRMOUT_CNTL 0x84FC
+
+ #define CP_COHER_CNTL 0x85F0
+@@ -434,6 +438,9 @@
+ #define NOOFCHAN_MASK 0x00003000
+ #define MC_SHARED_CHREMAP 0x2008
+
++#define MC_SHARED_BLACKOUT_CNTL 0x20ac
++#define BLACKOUT_MODE_MASK 0x00000007
++
+ #define MC_ARB_RAMCFG 0x2760
+ #define NOOFBANK_SHIFT 0
+ #define NOOFBANK_MASK 0x00000003
+--- a/drivers/gpu/drm/radeon/radeon_asic.h
++++ b/drivers/gpu/drm/radeon/radeon_asic.h
+@@ -389,6 +389,7 @@ void r700_cp_fini(struct radeon_device *
+ struct evergreen_mc_save {
+ u32 vga_render_control;
+ u32 vga_hdp_control;
++ bool crtc_enabled[RADEON_MAX_CRTCS];
+ };
+
+ void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev);
--- /dev/null
+From 804cc4a0ad3a896ca295f771a28c6eb36ced7903 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Mon, 19 Nov 2012 09:11:27 -0500
+Subject: drm/radeon: properly track the crtc not_enabled case evergreen_mc_stop()
+
+From: Alex Deucher <alexander.deucher@amd.com>
+
+commit 804cc4a0ad3a896ca295f771a28c6eb36ced7903 upstream.
+
+The save struct is not initialized previously so explicitly
+mark the crtcs as not used when they are not in use.
+
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/radeon/evergreen.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/gpu/drm/radeon/evergreen.c
++++ b/drivers/gpu/drm/radeon/evergreen.c
+@@ -1276,6 +1276,8 @@ void evergreen_mc_stop(struct radeon_dev
+ break;
+ udelay(1);
+ }
++ } else {
++ save->crtc_enabled[i] = false;
+ }
+ }
+
--- /dev/null
+From 5edd0b946a0afeb1d0364a3654328b046fb818a2 Mon Sep 17 00:00:00 2001
+From: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
+Date: Tue, 20 Nov 2012 16:31:25 +0200
+Subject: iwlwifi: fix the basic CCK rates calculation
+
+From: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
+
+commit 5edd0b946a0afeb1d0364a3654328b046fb818a2 upstream.
+
+Fix a copy paste error in iwl_calc_basic_rates which leads
+to a wrong calculation of CCK basic rates.
+
+Signed-off-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/wireless/iwlwifi/dvm/rxon.c | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+--- a/drivers/net/wireless/iwlwifi/dvm/rxon.c
++++ b/drivers/net/wireless/iwlwifi/dvm/rxon.c
+@@ -1012,12 +1012,12 @@ static void iwl_calc_basic_rates(struct
+ * As a consequence, it's not as complicated as it sounds, just add
+ * any lower rates to the ACK rate bitmap.
+ */
+- if (IWL_RATE_11M_INDEX < lowest_present_ofdm)
+- ofdm |= IWL_RATE_11M_MASK >> IWL_FIRST_CCK_RATE;
+- if (IWL_RATE_5M_INDEX < lowest_present_ofdm)
+- ofdm |= IWL_RATE_5M_MASK >> IWL_FIRST_CCK_RATE;
+- if (IWL_RATE_2M_INDEX < lowest_present_ofdm)
+- ofdm |= IWL_RATE_2M_MASK >> IWL_FIRST_CCK_RATE;
++ if (IWL_RATE_11M_INDEX < lowest_present_cck)
++ cck |= IWL_RATE_11M_MASK >> IWL_FIRST_CCK_RATE;
++ if (IWL_RATE_5M_INDEX < lowest_present_cck)
++ cck |= IWL_RATE_5M_MASK >> IWL_FIRST_CCK_RATE;
++ if (IWL_RATE_2M_INDEX < lowest_present_cck)
++ cck |= IWL_RATE_2M_MASK >> IWL_FIRST_CCK_RATE;
+ /* 1M already there or needed so always add */
+ cck |= IWL_RATE_1M_MASK >> IWL_FIRST_CCK_RATE;
+
--- /dev/null
+From 783657a7dc20e5c0efbc9a09a9dd38e238a723da Mon Sep 17 00:00:00 2001
+From: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
+Date: Thu, 29 Nov 2012 13:54:34 -0800
+Subject: mm: soft offline: split thp at the beginning of soft_offline_page()
+
+From: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
+
+commit 783657a7dc20e5c0efbc9a09a9dd38e238a723da upstream.
+
+When we try to soft-offline a thp tail page, put_page() is called on the
+tail page unthinkingly and VM_BUG_ON is triggered in put_compound_page().
+
+This patch splits thp before going into the main body of soft-offlining.
+
+Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
+Cc: Andi Kleen <andi@firstfloor.org>
+Cc: Tony Luck <tony.luck@intel.com>
+Cc: Andi Kleen <andi.kleen@intel.com>
+Cc: Wu Fengguang <fengguang.wu@intel.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/memory-failure.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -1474,9 +1474,17 @@ int soft_offline_page(struct page *page,
+ {
+ int ret;
+ unsigned long pfn = page_to_pfn(page);
++ struct page *hpage = compound_trans_head(page);
+
+ if (PageHuge(page))
+ return soft_offline_huge_page(page, flags);
++ if (PageTransHuge(hpage)) {
++ if (PageAnon(hpage) && unlikely(split_huge_page(hpage))) {
++ pr_info("soft offline: %#lx: failed to split THP\n",
++ pfn);
++ return -EBUSY;
++ }
++ }
+
+ ret = get_any_page(page, pfn, flags);
+ if (ret < 0)
--- /dev/null
+From ae64ffcac35de0db628ba9631edf8ff34c5cd7ac Mon Sep 17 00:00:00 2001
+From: Jianguo Wu <wujianguo@huawei.com>
+Date: Thu, 29 Nov 2012 13:54:21 -0800
+Subject: mm/vmemmap: fix wrong use of virt_to_page
+
+From: Jianguo Wu <wujianguo@huawei.com>
+
+commit ae64ffcac35de0db628ba9631edf8ff34c5cd7ac upstream.
+
+I enable CONFIG_DEBUG_VIRTUAL and CONFIG_SPARSEMEM_VMEMMAP, when doing
+memory hotremove, there is a kernel BUG at arch/x86/mm/physaddr.c:20.
+
+It is caused by free_section_usemap()->virt_to_page(), virt_to_page() is
+only used for kernel direct mapping address, but sparse-vmemmap uses
+vmemmap address, so it is going wrong here.
+
+ ------------[ cut here ]------------
+ kernel BUG at arch/x86/mm/physaddr.c:20!
+ invalid opcode: 0000 [#1] SMP
+ Modules linked in: acpihp_drv acpihp_slot edd cpufreq_conservative cpufreq_userspace cpufreq_powersave acpi_cpufreq mperf fuse vfat fat loop dm_mod coretemp kvm crc32c_intel ipv6 ixgbe igb iTCO_wdt i7core_edac edac_core pcspkr iTCO_vendor_support ioatdma microcode joydev sr_mod i2c_i801 dca lpc_ich mfd_core mdio tpm_tis i2c_core hid_generic tpm cdrom sg tpm_bios rtc_cmos button ext3 jbd mbcache usbhid hid uhci_hcd ehci_hcd usbcore usb_common sd_mod crc_t10dif processor thermal_sys hwmon scsi_dh_alua scsi_dh_hp_sw scsi_dh_rdac scsi_dh_emc scsi_dh ata_generic ata_piix libata megaraid_sas scsi_mod
+ CPU 39
+ Pid: 6454, comm: sh Not tainted 3.7.0-rc1-acpihp-final+ #45 QCI QSSC-S4R/QSSC-S4R
+ RIP: 0010:[<ffffffff8103c908>] [<ffffffff8103c908>] __phys_addr+0x88/0x90
+ RSP: 0018:ffff8804440d7c08 EFLAGS: 00010006
+ RAX: 0000000000000006 RBX: ffffea0012000000 RCX: 000000000000002c
+ ...
+
+Signed-off-by: Jianguo Wu <wujianguo@huawei.com>
+Signed-off-by: Jiang Liu <jiang.liu@huawei.com>
+Reviewd-by: Wen Congyang <wency@cn.fujitsu.com>
+Acked-by: Johannes Weiner <hannes@cmpxchg.org>
+Reviewed-by: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com>
+Reviewed-by: Michal Hocko <mhocko@suse.cz>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/sparse.c | 10 ++++------
+ 1 file changed, 4 insertions(+), 6 deletions(-)
+
+--- a/mm/sparse.c
++++ b/mm/sparse.c
+@@ -617,7 +617,7 @@ static void __kfree_section_memmap(struc
+ {
+ return; /* XXX: Not implemented yet */
+ }
+-static void free_map_bootmem(struct page *page, unsigned long nr_pages)
++static void free_map_bootmem(struct page *memmap, unsigned long nr_pages)
+ {
+ }
+ #else
+@@ -658,10 +658,11 @@ static void __kfree_section_memmap(struc
+ get_order(sizeof(struct page) * nr_pages));
+ }
+
+-static void free_map_bootmem(struct page *page, unsigned long nr_pages)
++static void free_map_bootmem(struct page *memmap, unsigned long nr_pages)
+ {
+ unsigned long maps_section_nr, removing_section_nr, i;
+ unsigned long magic;
++ struct page *page = virt_to_page(memmap);
+
+ for (i = 0; i < nr_pages; i++, page++) {
+ magic = (unsigned long) page->lru.next;
+@@ -710,13 +711,10 @@ static void free_section_usemap(struct p
+ */
+
+ if (memmap) {
+- struct page *memmap_page;
+- memmap_page = virt_to_page(memmap);
+-
+ nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page))
+ >> PAGE_SHIFT;
+
+- free_map_bootmem(memmap_page, nr_pages);
++ free_map_bootmem(memmap, nr_pages);
+ }
+ }
+
--- /dev/null
+From 60cefed485a02bd99b6299dad70666fe49245da7 Mon Sep 17 00:00:00 2001
+From: Johannes Weiner <hannes@cmpxchg.org>
+Date: Thu, 29 Nov 2012 13:54:23 -0800
+Subject: mm: vmscan: fix endless loop in kswapd balancing
+
+From: Johannes Weiner <hannes@cmpxchg.org>
+
+commit 60cefed485a02bd99b6299dad70666fe49245da7 upstream.
+
+Kswapd does not in all places have the same criteria for a balanced
+zone. Zones are only being reclaimed when their high watermark is
+breached, but compaction checks loop over the zonelist again when the
+zone does not meet the low watermark plus two times the size of the
+allocation. This gets kswapd stuck in an endless loop over a small
+zone, like the DMA zone, where the high watermark is smaller than the
+compaction requirement.
+
+Add a function, zone_balanced(), that checks the watermark, and, for
+higher order allocations, if compaction has enough free memory. Then
+use it uniformly to check for balanced zones.
+
+This makes sure that when the compaction watermark is not met, at least
+reclaim happens and progress is made - or the zone is declared
+unreclaimable at some point and skipped entirely.
+
+Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
+Reported-by: George Spelvin <linux@horizon.com>
+Reported-by: Johannes Hirte <johannes.hirte@fem.tu-ilmenau.de>
+Reported-by: Tomas Racek <tracek@redhat.com>
+Tested-by: Johannes Hirte <johannes.hirte@fem.tu-ilmenau.de>
+Reviewed-by: Rik van Riel <riel@redhat.com>
+Cc: Mel Gorman <mel@csn.ul.ie>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/vmscan.c | 27 ++++++++++++++++++---------
+ 1 file changed, 18 insertions(+), 9 deletions(-)
+
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -2383,6 +2383,19 @@ static void age_active_anon(struct zone
+ } while (memcg);
+ }
+
++static bool zone_balanced(struct zone *zone, int order,
++ unsigned long balance_gap, int classzone_idx)
++{
++ if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone) +
++ balance_gap, classzone_idx, 0))
++ return false;
++
++ if (COMPACTION_BUILD && order && !compaction_suitable(zone, order))
++ return false;
++
++ return true;
++}
++
+ /*
+ * pgdat_balanced is used when checking if a node is balanced for high-order
+ * allocations. Only zones that meet watermarks and are in a zone allowed
+@@ -2461,8 +2474,7 @@ static bool prepare_kswapd_sleep(pg_data
+ continue;
+ }
+
+- if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone),
+- i, 0))
++ if (!zone_balanced(zone, order, 0, i))
+ all_zones_ok = false;
+ else
+ balanced += zone->present_pages;
+@@ -2571,8 +2583,7 @@ loop_again:
+ break;
+ }
+
+- if (!zone_watermark_ok_safe(zone, order,
+- high_wmark_pages(zone), 0, 0)) {
++ if (!zone_balanced(zone, order, 0, 0)) {
+ end_zone = i;
+ break;
+ } else {
+@@ -2648,9 +2659,8 @@ loop_again:
+ testorder = 0;
+
+ if ((buffer_heads_over_limit && is_highmem_idx(i)) ||
+- !zone_watermark_ok_safe(zone, testorder,
+- high_wmark_pages(zone) + balance_gap,
+- end_zone, 0)) {
++ !zone_balanced(zone, testorder,
++ balance_gap, end_zone)) {
+ shrink_zone(zone, &sc);
+
+ reclaim_state->reclaimed_slab = 0;
+@@ -2677,8 +2687,7 @@ loop_again:
+ continue;
+ }
+
+- if (!zone_watermark_ok_safe(zone, testorder,
+- high_wmark_pages(zone), end_zone, 0)) {
++ if (!zone_balanced(zone, testorder, 0, end_zone)) {
+ all_zones_ok = 0;
+ /*
+ * We are still under min water mark. This
--- /dev/null
+dove-attempt-to-fix-pmu-rtc-interrupts.patch
+dove-fix-irq_to_pmu.patch
+drm-radeon-dce4-don-t-use-radeon_crtc-for-vblank-callback.patch
+drm-radeon-properly-handle-mc_stop-mc_resume-on-evergreen-v2.patch
+drm-radeon-properly-track-the-crtc-not_enabled-case-evergreen_mc_stop.patch
+mm-vmemmap-fix-wrong-use-of-virt_to_page.patch
+mm-vmscan-fix-endless-loop-in-kswapd-balancing.patch
+mm-soft-offline-split-thp-at-the-beginning-of-soft_offline_page.patch
+target-fix-handling-of-aborted-commands.patch
+iwlwifi-fix-the-basic-cck-rates-calculation.patch
+arm-kirkwood-update-pci-e-fixup.patch
+x86-fpu-avoid-fpu-lazy-restore-after-suspend.patch
--- /dev/null
+From 3ea160b3e8f0de8161861995d9901f61192fc0b0 Mon Sep 17 00:00:00 2001
+From: Roland Dreier <roland@purestorage.com>
+Date: Fri, 16 Nov 2012 08:06:16 -0800
+Subject: target: Fix handling of aborted commands
+
+From: Roland Dreier <roland@purestorage.com>
+
+commit 3ea160b3e8f0de8161861995d9901f61192fc0b0 upstream.
+
+- If we stop processing an already-aborted command in
+ target_execute_cmd(), then we need to complete t_transport_stop_comp
+ to wake up the the TMR handling thread, or else it will end up
+ waiting forever.
+
+- If we've a already sent an "aborted" status for a command in
+ transport_check_aborted_status() then we should bail out of
+ transport_send_task_abort() to avoid freeing the command twice.
+
+Signed-off-by: Roland Dreier <roland@purestorage.com>
+Signed-off-by: Nicholas Bellinger <nab@risingtidesystems.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/target/target_core_transport.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -1756,8 +1756,10 @@ void target_execute_cmd(struct se_cmd *c
+ /*
+ * If the received CDB has aleady been aborted stop processing it here.
+ */
+- if (transport_check_aborted_status(cmd, 1))
++ if (transport_check_aborted_status(cmd, 1)) {
++ complete(&cmd->t_transport_stop_comp);
+ return;
++ }
+
+ /*
+ * Determine if IOCTL context caller in requesting the stopping of this
+@@ -3029,7 +3031,7 @@ void transport_send_task_abort(struct se
+ unsigned long flags;
+
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+- if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
++ if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION | SCF_SENT_DELAYED_TAS)) {
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ return;
+ }
--- /dev/null
+From 644c154186386bb1fa6446bc5e037b9ed098db46 Mon Sep 17 00:00:00 2001
+From: Vincent Palatin <vpalatin@chromium.org>
+Date: Fri, 30 Nov 2012 12:15:32 -0800
+Subject: x86, fpu: Avoid FPU lazy restore after suspend
+
+From: Vincent Palatin <vpalatin@chromium.org>
+
+commit 644c154186386bb1fa6446bc5e037b9ed098db46 upstream.
+
+When a cpu enters S3 state, the FPU state is lost.
+After resuming for S3, if we try to lazy restore the FPU for a process running
+on the same CPU, this will result in a corrupted FPU context.
+
+Ensure that "fpu_owner_task" is properly invalided when (re-)initializing a CPU,
+so nobody will try to lazy restore a state which doesn't exist in the hardware.
+
+Tested with a 64-bit kernel on a 4-core Ivybridge CPU with eagerfpu=off,
+by doing thousands of suspend/resume cycles with 4 processes doing FPU
+operations running. Without the patch, a process is killed after a
+few hundreds cycles by a SIGFPE.
+
+Signed-off-by: Vincent Palatin <vpalatin@chromium.org>
+Cc: Duncan Laurie <dlaurie@chromium.org>
+Cc: Olof Johansson <olofj@chromium.org>
+Link: http://lkml.kernel.org/r/1354306532-1014-1-git-send-email-vpalatin@chromium.org
+Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/fpu-internal.h | 15 +++++++++------
+ arch/x86/kernel/smpboot.c | 5 +++++
+ 2 files changed, 14 insertions(+), 6 deletions(-)
+
+--- a/arch/x86/include/asm/fpu-internal.h
++++ b/arch/x86/include/asm/fpu-internal.h
+@@ -334,14 +334,17 @@ static inline void __thread_fpu_begin(st
+ typedef struct { int preload; } fpu_switch_t;
+
+ /*
+- * FIXME! We could do a totally lazy restore, but we need to
+- * add a per-cpu "this was the task that last touched the FPU
+- * on this CPU" variable, and the task needs to have a "I last
+- * touched the FPU on this CPU" and check them.
++ * Must be run with preemption disabled: this clears the fpu_owner_task,
++ * on this CPU.
+ *
+- * We don't do that yet, so "fpu_lazy_restore()" always returns
+- * false, but some day..
++ * This will disable any lazy FPU state restore of the current FPU state,
++ * but if the current thread owns the FPU, it will still be saved by.
+ */
++static inline void __cpu_disable_lazy_restore(unsigned int cpu)
++{
++ per_cpu(fpu_owner_task, cpu) = NULL;
++}
++
+ static inline int fpu_lazy_restore(struct task_struct *new, unsigned int cpu)
+ {
+ return new == this_cpu_read_stable(fpu_owner_task) &&
+--- a/arch/x86/kernel/smpboot.c
++++ b/arch/x86/kernel/smpboot.c
+@@ -68,6 +68,8 @@
+ #include <asm/mwait.h>
+ #include <asm/apic.h>
+ #include <asm/io_apic.h>
++#include <asm/i387.h>
++#include <asm/fpu-internal.h>
+ #include <asm/setup.h>
+ #include <asm/uv/uv.h>
+ #include <linux/mc146818rtc.h>
+@@ -817,6 +819,9 @@ int __cpuinit native_cpu_up(unsigned int
+
+ per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
+
++ /* the FPU context is blank, nobody can own it */
++ __cpu_disable_lazy_restore(cpu);
++
+ err = do_boot_cpu(apicid, cpu, tidle);
+ if (err) {
+ pr_debug("do_boot_cpu failed %d\n", err);