]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.4-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 15 Jul 2014 23:07:15 +0000 (16:07 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 15 Jul 2014 23:07:15 +0000 (16:07 -0700)
added patches:
revert-x86-64-modify_ldt-make-support-for-16-bit-segments-a-runtime-option.patch
x86-espfix-fix-broken-header-guard.patch
x86-espfix-make-espfix64-a-kconfig-option-fix-uml.patch
x86-espfix-make-it-possible-to-disable-16-bit-support.patch
x86-espfix-move-espfix-definitions-into-a-separate-header-file.patch
x86-ioremap-speed-up-check-for-ram-pages.patch

queue-3.4/revert-x86-64-modify_ldt-make-support-for-16-bit-segments-a-runtime-option.patch [new file with mode: 0644]
queue-3.4/series
queue-3.4/x86-64-espfix-don-t-leak-bits-31-16-of-esp-returning-to-16-bit-stack.patch
queue-3.4/x86-espfix-fix-broken-header-guard.patch [new file with mode: 0644]
queue-3.4/x86-espfix-make-espfix64-a-kconfig-option-fix-uml.patch [new file with mode: 0644]
queue-3.4/x86-espfix-make-it-possible-to-disable-16-bit-support.patch [new file with mode: 0644]
queue-3.4/x86-espfix-move-espfix-definitions-into-a-separate-header-file.patch [new file with mode: 0644]
queue-3.4/x86-ioremap-speed-up-check-for-ram-pages.patch [new file with mode: 0644]

diff --git a/queue-3.4/revert-x86-64-modify_ldt-make-support-for-16-bit-segments-a-runtime-option.patch b/queue-3.4/revert-x86-64-modify_ldt-make-support-for-16-bit-segments-a-runtime-option.patch
new file mode 100644 (file)
index 0000000..24dec3d
--- /dev/null
@@ -0,0 +1,64 @@
+From 7ed6fb9b5a5510e4ef78ab27419184741169978a Mon Sep 17 00:00:00 2001
+From: "H. Peter Anvin" <hpa@zytor.com>
+Date: Wed, 21 May 2014 10:22:59 -0700
+Subject: Revert "x86-64, modify_ldt: Make support for 16-bit segments a runtime option"
+
+From: "H. Peter Anvin" <hpa@zytor.com>
+
+commit 7ed6fb9b5a5510e4ef78ab27419184741169978a upstream.
+
+This reverts commit fa81511bb0bbb2b1aace3695ce869da9762624ff in
+preparation of merging in the proper fix (espfix64).
+
+Signed-off-by: H. Peter Anvin <hpa@zytor.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/ldt.c        |    4 +---
+ arch/x86/vdso/vdso32-setup.c |    8 --------
+ 2 files changed, 1 insertion(+), 11 deletions(-)
+
+--- a/arch/x86/kernel/ldt.c
++++ b/arch/x86/kernel/ldt.c
+@@ -20,8 +20,6 @@
+ #include <asm/mmu_context.h>
+ #include <asm/syscalls.h>
+-int sysctl_ldt16 = 0;
+-
+ #ifdef CONFIG_SMP
+ static void flush_ldt(void *current_mm)
+ {
+@@ -236,7 +234,7 @@ static int write_ldt(void __user *ptr, u
+        * IRET leaking the high bits of the kernel stack address.
+        */
+ #ifdef CONFIG_X86_64
+-      if (!ldt_info.seg_32bit && !sysctl_ldt16) {
++      if (!ldt_info.seg_32bit) {
+               error = -EINVAL;
+               goto out_unlock;
+       }
+--- a/arch/x86/vdso/vdso32-setup.c
++++ b/arch/x86/vdso/vdso32-setup.c
+@@ -41,7 +41,6 @@ enum {
+ #ifdef CONFIG_X86_64
+ #define vdso_enabled                  sysctl_vsyscall32
+ #define arch_setup_additional_pages   syscall32_setup_pages
+-extern int sysctl_ldt16;
+ #endif
+ /*
+@@ -380,13 +379,6 @@ static ctl_table abi_table2[] = {
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec
+-      },
+-      {
+-              .procname       = "ldt16",
+-              .data           = &sysctl_ldt16,
+-              .maxlen         = sizeof(int),
+-              .mode           = 0644,
+-              .proc_handler   = proc_dointvec
+       },
+       {}
+ };
index c0161731ba683fea0417d1822b9628addfe444f0..a4460c05ea55e4368bdb6fcf1412770b4035c7f7 100644 (file)
@@ -8,8 +8,14 @@ powerpc-perf-never-program-book3s-pmcs-with-values-0x80000000.patch
 ext4-clarify-error-count-warning-messages.patch
 drm-radeon-stop-poisoning-the-gart-tlb.patch
 tracing-remove-ftrace_stop-start-from-reading-the-trace-file.patch
-x86-64-espfix-don-t-leak-bits-31-16-of-esp-returning-to-16-bit-stack.patch
 rtmutex-fix-deadlock-detector-for-real.patch
 rtmutex-detect-changes-in-the-pi-lock-chain.patch
 rtmutex-handle-deadlock-detection-smarter.patch
 rtmutex-plug-slow-unlock-race.patch
+revert-x86-64-modify_ldt-make-support-for-16-bit-segments-a-runtime-option.patch
+x86-64-espfix-don-t-leak-bits-31-16-of-esp-returning-to-16-bit-stack.patch
+x86-espfix-move-espfix-definitions-into-a-separate-header-file.patch
+x86-espfix-fix-broken-header-guard.patch
+x86-espfix-make-espfix64-a-kconfig-option-fix-uml.patch
+x86-espfix-make-it-possible-to-disable-16-bit-support.patch
+x86-ioremap-speed-up-check-for-ram-pages.patch
index 35acb3923ce484abffef81750fbabfd6007f10bf..dcdfee546c7acc05eb3d047e2d3ba542c6798c8d 100644 (file)
@@ -448,7 +448,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
 +}
 --- a/arch/x86/kernel/ldt.c
 +++ b/arch/x86/kernel/ldt.c
-@@ -231,17 +231,6 @@ static int write_ldt(void __user *ptr, u
+@@ -229,17 +229,6 @@ static int write_ldt(void __user *ptr, u
                }
        }
  
@@ -457,7 +457,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
 -       * IRET leaking the high bits of the kernel stack address.
 -       */
 -#ifdef CONFIG_X86_64
--      if (!ldt_info.seg_32bit && !sysctl_ldt16) {
+-      if (!ldt_info.seg_32bit) {
 -              error = -EINVAL;
 -              goto out_unlock;
 -      }
diff --git a/queue-3.4/x86-espfix-fix-broken-header-guard.patch b/queue-3.4/x86-espfix-fix-broken-header-guard.patch
new file mode 100644 (file)
index 0000000..39b5fb2
--- /dev/null
@@ -0,0 +1,27 @@
+From 20b68535cd27183ebd3651ff313afb2b97dac941 Mon Sep 17 00:00:00 2001
+From: "H. Peter Anvin" <hpa@linux.intel.com>
+Date: Fri, 2 May 2014 11:33:51 -0700
+Subject: x86, espfix: Fix broken header guard
+
+From: "H. Peter Anvin" <hpa@linux.intel.com>
+
+commit 20b68535cd27183ebd3651ff313afb2b97dac941 upstream.
+
+Header guard is #ifndef, not #ifdef...
+
+Reported-by: Fengguang Wu <fengguang.wu@intel.com>
+Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/espfix.h |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/include/asm/espfix.h
++++ b/arch/x86/include/asm/espfix.h
+@@ -1,4 +1,4 @@
+-#ifdef _ASM_X86_ESPFIX_H
++#ifndef _ASM_X86_ESPFIX_H
+ #define _ASM_X86_ESPFIX_H
+ #ifdef CONFIG_X86_64
diff --git a/queue-3.4/x86-espfix-make-espfix64-a-kconfig-option-fix-uml.patch b/queue-3.4/x86-espfix-make-espfix64-a-kconfig-option-fix-uml.patch
new file mode 100644 (file)
index 0000000..428c867
--- /dev/null
@@ -0,0 +1,77 @@
+From 197725de65477bc8509b41388157c1a2283542bb Mon Sep 17 00:00:00 2001
+From: "H. Peter Anvin" <hpa@zytor.com>
+Date: Sun, 4 May 2014 10:00:49 -0700
+Subject: x86, espfix: Make espfix64 a Kconfig option, fix UML
+
+From: "H. Peter Anvin" <hpa@zytor.com>
+
+commit 197725de65477bc8509b41388157c1a2283542bb upstream.
+
+Make espfix64 a hidden Kconfig option.  This fixes the x86-64 UML
+build which had broken due to the non-existence of init_espfix_bsp()
+in UML: since UML uses its own Kconfig, this option does not appear in
+the UML build.
+
+This also makes it possible to make support for 16-bit segments a
+configuration option, for the people who want to minimize the size of
+the kernel.
+
+Reported-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: H. Peter Anvin <hpa@zytor.com>
+Cc: Richard Weinberger <richard@nod.at>
+Link: http://lkml.kernel.org/r/1398816946-3351-1-git-send-email-hpa@linux.intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/Kconfig          |    4 ++++
+ arch/x86/kernel/Makefile  |    2 +-
+ arch/x86/kernel/smpboot.c |    2 +-
+ init/main.c               |    2 +-
+ 4 files changed, 7 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -920,6 +920,10 @@ config VM86
+         XFree86 to initialize some video cards via BIOS. Disabling this
+         option saves about 6k.
++config X86_ESPFIX64
++      def_bool y
++      depends on X86_64
++
+ config TOSHIBA
+       tristate "Toshiba Laptop support"
+       depends on X86_32
+--- a/arch/x86/kernel/Makefile
++++ b/arch/x86/kernel/Makefile
+@@ -28,7 +28,7 @@ obj-$(CONFIG_X86_64) += sys_x86_64.o x86
+ obj-y                 += syscall_$(BITS).o
+ obj-$(CONFIG_X86_64)  += vsyscall_64.o
+ obj-$(CONFIG_X86_64)  += vsyscall_emu_64.o
+-obj-$(CONFIG_X86_64)  += espfix_64.o
++obj-$(CONFIG_X86_ESPFIX64)    += espfix_64.o
+ obj-y                 += bootflag.o e820.o
+ obj-y                 += pci-dma.o quirks.o topology.o kdebugfs.o
+ obj-y                 += alternative.o i8253.o pci-nommu.o hw_breakpoint.o
+--- a/arch/x86/kernel/smpboot.c
++++ b/arch/x86/kernel/smpboot.c
+@@ -273,7 +273,7 @@ notrace static void __cpuinit start_seco
+       /*
+        * Enable the espfix hack for this CPU
+        */
+-#ifdef CONFIG_X86_64
++#ifdef CONFIG_X86_ESPFIX64
+       init_espfix_ap();
+ #endif
+--- a/init/main.c
++++ b/init/main.c
+@@ -606,7 +606,7 @@ asmlinkage void __init start_kernel(void
+       if (efi_enabled(EFI_RUNTIME_SERVICES))
+               efi_enter_virtual_mode();
+ #endif
+-#ifdef CONFIG_X86_64
++#ifdef CONFIG_X86_ESPFIX64
+       /* Should be run before the first non-init thread is created */
+       init_espfix_bsp();
+ #endif
diff --git a/queue-3.4/x86-espfix-make-it-possible-to-disable-16-bit-support.patch b/queue-3.4/x86-espfix-make-it-possible-to-disable-16-bit-support.patch
new file mode 100644 (file)
index 0000000..209e184
--- /dev/null
@@ -0,0 +1,212 @@
+From 34273f41d57ee8d854dcd2a1d754cbb546cb548f Mon Sep 17 00:00:00 2001
+From: "H. Peter Anvin" <hpa@zytor.com>
+Date: Sun, 4 May 2014 10:36:22 -0700
+Subject: x86, espfix: Make it possible to disable 16-bit support
+
+From: "H. Peter Anvin" <hpa@zytor.com>
+
+commit 34273f41d57ee8d854dcd2a1d754cbb546cb548f upstream.
+
+Embedded systems, which may be very memory-size-sensitive, are
+extremely unlikely to ever encounter any 16-bit software, so make it
+a CONFIG_EXPERT option to turn off support for any 16-bit software
+whatsoever.
+
+Signed-off-by: H. Peter Anvin <hpa@zytor.com>
+Link: http://lkml.kernel.org/r/1398816946-3351-1-git-send-email-hpa@linux.intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/Kconfig           |   23 ++++++++++++++++++-----
+ arch/x86/kernel/entry_32.S |   12 ++++++++++++
+ arch/x86/kernel/entry_64.S |    8 ++++++++
+ arch/x86/kernel/ldt.c      |    5 +++++
+ 4 files changed, 43 insertions(+), 5 deletions(-)
+
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -915,14 +915,27 @@ config VM86
+       default y
+       depends on X86_32
+       ---help---
+-        This option is required by programs like DOSEMU to run 16-bit legacy
+-        code on X86 processors. It also may be needed by software like
+-        XFree86 to initialize some video cards via BIOS. Disabling this
+-        option saves about 6k.
++        This option is required by programs like DOSEMU to run
++        16-bit real mode legacy code on x86 processors. It also may
++        be needed by software like XFree86 to initialize some video
++        cards via BIOS. Disabling this option saves about 6K.
++
++config X86_16BIT
++      bool "Enable support for 16-bit segments" if EXPERT
++      default y
++      ---help---
++        This option is required by programs like Wine to run 16-bit
++        protected mode legacy code on x86 processors.  Disabling
++        this option saves about 300 bytes on i386, or around 6K text
++        plus 16K runtime memory on x86-64,
++
++config X86_ESPFIX32
++      def_bool y
++      depends on X86_16BIT && X86_32
+ config X86_ESPFIX64
+       def_bool y
+-      depends on X86_64
++      depends on X86_16BIT && X86_64
+ config TOSHIBA
+       tristate "Toshiba Laptop support"
+--- a/arch/x86/kernel/entry_32.S
++++ b/arch/x86/kernel/entry_32.S
+@@ -524,6 +524,7 @@ syscall_exit:
+ restore_all:
+       TRACE_IRQS_IRET
+ restore_all_notrace:
++#ifdef CONFIG_X86_ESPFIX32
+       movl PT_EFLAGS(%esp), %eax      # mix EFLAGS, SS and CS
+       # Warning: PT_OLDSS(%esp) contains the wrong/random values if we
+       # are returning to the kernel.
+@@ -534,6 +535,7 @@ restore_all_notrace:
+       cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
+       CFI_REMEMBER_STATE
+       je ldt_ss                       # returning to user-space with LDT SS
++#endif
+ restore_nocheck:
+       RESTORE_REGS 4                  # skip orig_eax/error_code
+ irq_return:
+@@ -549,6 +551,7 @@ ENTRY(iret_exc)
+       .long irq_return,iret_exc
+ .previous
++#ifdef CONFIG_X86_ESPFIX32
+       CFI_RESTORE_STATE
+ ldt_ss:
+ #ifdef CONFIG_PARAVIRT
+@@ -592,6 +595,7 @@ ldt_ss:
+       lss (%esp), %esp                /* switch to espfix segment */
+       CFI_ADJUST_CFA_OFFSET -8
+       jmp restore_nocheck
++#endif
+       CFI_ENDPROC
+ ENDPROC(system_call)
+@@ -765,6 +769,7 @@ ENDPROC(ptregs_clone)
+  * the high word of the segment base from the GDT and swiches to the
+  * normal stack and adjusts ESP with the matching offset.
+  */
++#ifdef CONFIG_X86_ESPFIX32
+       /* fixup the stack */
+       mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
+       mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
+@@ -774,8 +779,10 @@ ENDPROC(ptregs_clone)
+       pushl_cfi %eax
+       lss (%esp), %esp                /* switch to the normal stack segment */
+       CFI_ADJUST_CFA_OFFSET -8
++#endif
+ .endm
+ .macro UNWIND_ESPFIX_STACK
++#ifdef CONFIG_X86_ESPFIX32
+       movl %ss, %eax
+       /* see if on espfix stack */
+       cmpw $__ESPFIX_SS, %ax
+@@ -786,6 +793,7 @@ ENDPROC(ptregs_clone)
+       /* switch to normal stack */
+       FIXUP_ESPFIX_STACK
+ 27:
++#endif
+ .endm
+ /*
+@@ -1317,11 +1325,13 @@ END(debug)
+  */
+ ENTRY(nmi)
+       RING0_INT_FRAME
++#ifdef CONFIG_X86_ESPFIX32
+       pushl_cfi %eax
+       movl %ss, %eax
+       cmpw $__ESPFIX_SS, %ax
+       popl_cfi %eax
+       je nmi_espfix_stack
++#endif
+       cmpl $ia32_sysenter_target,(%esp)
+       je nmi_stack_fixup
+       pushl_cfi %eax
+@@ -1361,6 +1371,7 @@ nmi_debug_stack_check:
+       FIX_STACK 24, nmi_stack_correct, 1
+       jmp nmi_stack_correct
++#ifdef CONFIG_X86_ESPFIX32
+ nmi_espfix_stack:
+       /* We have a RING0_INT_FRAME here.
+        *
+@@ -1382,6 +1393,7 @@ nmi_espfix_stack:
+       lss 12+4(%esp), %esp            # back to espfix stack
+       CFI_ADJUST_CFA_OFFSET -24
+       jmp irq_return
++#endif
+       CFI_ENDPROC
+ END(nmi)
+--- a/arch/x86/kernel/entry_64.S
++++ b/arch/x86/kernel/entry_64.S
+@@ -904,8 +904,10 @@ irq_return:
+        * Are we returning to a stack segment from the LDT?  Note: in
+        * 64-bit mode SS:RSP on the exception stack is always valid.
+        */
++#ifdef CONFIG_X86_ESPFIX64
+       testb $4,(SS-RIP)(%rsp)
+       jnz irq_return_ldt
++#endif
+ irq_return_iret:
+       INTERRUPT_RETURN
+@@ -923,6 +925,7 @@ ENTRY(native_iret)
+       .previous
+ #endif
++#ifdef CONFIG_X86_ESPFIX64
+ irq_return_ldt:
+       pushq_cfi %rax
+       pushq_cfi %rdi
+@@ -946,6 +949,7 @@ irq_return_ldt:
+       movq %rax,%rsp
+       popq_cfi %rax
+       jmp irq_return_iret
++#endif
+       .section .fixup,"ax"
+ bad_iret:
+@@ -1019,6 +1023,7 @@ END(common_interrupt)
+        * modify the stack to make it look like we just entered
+        * the #GP handler from user space, similar to bad_iret.
+        */
++#ifdef CONFIG_X86_ESPFIX64
+       ALIGN
+ __do_double_fault:
+       XCPT_FRAME 1 RDI+8
+@@ -1044,6 +1049,9 @@ __do_double_fault:
+       retq
+       CFI_ENDPROC
+ END(__do_double_fault)
++#else
++# define __do_double_fault do_double_fault
++#endif
+ /*
+  * End of kprobes section
+--- a/arch/x86/kernel/ldt.c
++++ b/arch/x86/kernel/ldt.c
+@@ -229,6 +229,11 @@ static int write_ldt(void __user *ptr, u
+               }
+       }
++      if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
++              error = -EINVAL;
++              goto out_unlock;
++      }
++
+       fill_ldt(&ldt, &ldt_info);
+       if (oldmode)
+               ldt.avl = 0;
diff --git a/queue-3.4/x86-espfix-move-espfix-definitions-into-a-separate-header-file.patch b/queue-3.4/x86-espfix-move-espfix-definitions-into-a-separate-header-file.patch
new file mode 100644 (file)
index 0000000..6247714
--- /dev/null
@@ -0,0 +1,68 @@
+From e1fe9ed8d2a4937510d0d60e20705035c2609aea Mon Sep 17 00:00:00 2001
+From: "H. Peter Anvin" <hpa@linux.intel.com>
+Date: Thu, 1 May 2014 14:12:23 -0700
+Subject: x86, espfix: Move espfix definitions into a separate header file
+
+From: "H. Peter Anvin" <hpa@linux.intel.com>
+
+commit e1fe9ed8d2a4937510d0d60e20705035c2609aea upstream.
+
+Sparse warns that the percpu variables aren't declared before they are
+defined.  Rather than hacking around it, move espfix definitions into
+a proper header file.
+
+Reported-by: Fengguang Wu <fengguang.wu@intel.com>
+Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/espfix.h |   16 ++++++++++++++++
+ arch/x86/include/asm/setup.h  |    5 ++---
+ arch/x86/kernel/espfix_64.c   |    1 +
+ 3 files changed, 19 insertions(+), 3 deletions(-)
+
+--- /dev/null
++++ b/arch/x86/include/asm/espfix.h
+@@ -0,0 +1,16 @@
++#ifdef _ASM_X86_ESPFIX_H
++#define _ASM_X86_ESPFIX_H
++
++#ifdef CONFIG_X86_64
++
++#include <asm/percpu.h>
++
++DECLARE_PER_CPU_READ_MOSTLY(unsigned long, espfix_stack);
++DECLARE_PER_CPU_READ_MOSTLY(unsigned long, espfix_waddr);
++
++extern void init_espfix_bsp(void);
++extern void init_espfix_ap(void);
++
++#endif /* CONFIG_X86_64 */
++
++#endif /* _ASM_X86_ESPFIX_H */
+--- a/arch/x86/include/asm/setup.h
++++ b/arch/x86/include/asm/setup.h
+@@ -59,11 +59,10 @@ extern void x86_ce4100_early_setup(void)
+ static inline void x86_ce4100_early_setup(void) { }
+ #endif
+-extern void init_espfix_bsp(void);
+-extern void init_espfix_ap(void);
+-
+ #ifndef _SETUP
++#include <asm/espfix.h>
++
+ /*
+  * This is set up by the setup-routine at boot-time
+  */
+--- a/arch/x86/kernel/espfix_64.c
++++ b/arch/x86/kernel/espfix_64.c
+@@ -40,6 +40,7 @@
+ #include <asm/pgtable.h>
+ #include <asm/pgalloc.h>
+ #include <asm/setup.h>
++#include <asm/espfix.h>
+ /*
+  * Note: we only need 6*8 = 48 bytes for the espfix stack, but round
diff --git a/queue-3.4/x86-ioremap-speed-up-check-for-ram-pages.patch b/queue-3.4/x86-ioremap-speed-up-check-for-ram-pages.patch
new file mode 100644 (file)
index 0000000..a4cab98
--- /dev/null
@@ -0,0 +1,76 @@
+From c81c8a1eeede61e92a15103748c23d100880cc8a Mon Sep 17 00:00:00 2001
+From: Roland Dreier <roland@purestorage.com>
+Date: Fri, 2 May 2014 11:18:41 -0700
+Subject: x86, ioremap: Speed up check for RAM pages
+
+From: Roland Dreier <roland@purestorage.com>
+
+commit c81c8a1eeede61e92a15103748c23d100880cc8a upstream.
+
+In __ioremap_caller() (the guts of ioremap), we loop over the range of
+pfns being remapped and checks each one individually with page_is_ram().
+For large ioremaps, this can be very slow.  For example, we have a
+device with a 256 GiB PCI BAR, and ioremapping this BAR can take 20+
+seconds -- sometimes long enough to trigger the soft lockup detector!
+
+Internally, page_is_ram() calls walk_system_ram_range() on a single
+page.  Instead, we can make a single call to walk_system_ram_range()
+from __ioremap_caller(), and do our further checks only for any RAM
+pages that we find.  For the common case of MMIO, this saves an enormous
+amount of work, since the range being ioremapped doesn't intersect
+system RAM at all.
+
+With this change, ioremap on our 256 GiB BAR takes less than 1 second.
+
+Signed-off-by: Roland Dreier <roland@purestorage.com>
+Link: http://lkml.kernel.org/r/1399054721-1331-1-git-send-email-roland@kernel.org
+Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/mm/ioremap.c |   26 +++++++++++++++++++-------
+ 1 file changed, 19 insertions(+), 7 deletions(-)
+
+--- a/arch/x86/mm/ioremap.c
++++ b/arch/x86/mm/ioremap.c
+@@ -50,6 +50,21 @@ int ioremap_change_attr(unsigned long va
+       return err;
+ }
++static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
++                             void *arg)
++{
++      unsigned long i;
++
++      for (i = 0; i < nr_pages; ++i)
++              if (pfn_valid(start_pfn + i) &&
++                  !PageReserved(pfn_to_page(start_pfn + i)))
++                      return 1;
++
++      WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn);
++
++      return 0;
++}
++
+ /*
+  * Remap an arbitrary physical address space into the kernel virtual
+  * address space. Needed when the kernel wants to access high addresses
+@@ -93,14 +108,11 @@ static void __iomem *__ioremap_caller(re
+       /*
+        * Don't allow anybody to remap normal RAM that we're using..
+        */
++      pfn      = phys_addr >> PAGE_SHIFT;
+       last_pfn = last_addr >> PAGE_SHIFT;
+-      for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
+-              int is_ram = page_is_ram(pfn);
+-
+-              if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
+-                      return NULL;
+-              WARN_ON_ONCE(is_ram);
+-      }
++      if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
++                                __ioremap_check_ram) == 1)
++              return NULL;
+       /*
+        * Mappings have to be page-aligned