]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blob
d074721e240a88ba4747a34b63793920671a389e
[thirdparty/kernel/stable-queue.git] /
1 From b253149b843f89cd300cbdbea27ce1f847506f99 Mon Sep 17 00:00:00 2001
2 From: Len Brown <len.brown@intel.com>
3 Date: Wed, 15 Jan 2014 00:37:34 -0500
4 Subject: sched/idle/x86: Restore mwait_idle() to fix boot hangs, to improve power savings and to improve performance
5
6 From: Len Brown <len.brown@intel.com>
7
8 commit b253149b843f89cd300cbdbea27ce1f847506f99 upstream.
9
10 In Linux-3.9 we removed the mwait_idle() loop:
11
12 69fb3676df33 ("x86 idle: remove mwait_idle() and "idle=mwait" cmdline param")
13
14 The reasoning was that modern machines should be sufficiently
15 happy during the boot process using the default_idle() HALT
16 loop, until cpuidle loads and either acpi_idle or intel_idle
17 invoke the newer MWAIT-with-hints idle loop.
18
19 But two machines reported problems:
20
21 1. Certain Core2-era machines support MWAIT-C1 and HALT only.
22 MWAIT-C1 is preferred for optimal power and performance.
23 But if they support just C1, cpuidle never loads and
24 so they use the boot-time default idle loop forever.
25
26 2. Some laptops will boot-hang if HALT is used,
27 but will boot successfully if MWAIT is used.
28 This appears to be a hidden assumption in BIOS SMI,
29 that is presumably valid on the proprietary OS
30 where the BIOS was validated.
31
32 https://bugzilla.kernel.org/show_bug.cgi?id=60770
33
34 So here we effectively revert the patch above, restoring
35 the mwait_idle() loop. However, we don't bother restoring
36 the idle=mwait cmdline parameter, since it appears to add
37 no value.
38
39 Maintainer notes:
40
41 For 3.9, simply revert 69fb3676df
42 for 3.10, patch -F3 applies, fuzz needed due to __cpuinit use in
43 context For 3.11, 3.12, 3.13, this patch applies cleanly
44
45 Tested-by: Mike Galbraith <bitbucket@online.de>
46 Signed-off-by: Len Brown <len.brown@intel.com>
47 Acked-by: Mike Galbraith <bitbucket@online.de>
48 Cc: Borislav Petkov <bp@alien8.de>
49 Cc: H. Peter Anvin <hpa@zytor.com>
50 Cc: Ian Malone <ibmalone@gmail.com>
51 Cc: Josh Boyer <jwboyer@redhat.com>
52 Cc: Linus Torvalds <torvalds@linux-foundation.org>
53 Cc: Mike Galbraith <efault@gmx.de>
54 Cc: Peter Zijlstra <peterz@infradead.org>
55 Cc: Thomas Gleixner <tglx@linutronix.de>
56 Link: http://lkml.kernel.org/r/345254a551eb5a6a866e048d7ab570fd2193aca4.1389763084.git.len.brown@intel.com
57 [ Ported to recent kernels. ]
58 Signed-off-by: Ingo Molnar <mingo@kernel.org>
59 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
60
61 diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h
62 index a1410db38a1a..653dfa7662e1 100644
63 --- a/arch/x86/include/asm/mwait.h
64 +++ b/arch/x86/include/asm/mwait.h
65 @@ -30,6 +30,14 @@ static inline void __mwait(unsigned long eax, unsigned long ecx)
66 :: "a" (eax), "c" (ecx));
67 }
68
69 +static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
70 +{
71 + trace_hardirqs_on();
72 + /* "mwait %eax, %ecx;" */
73 + asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
74 + :: "a" (eax), "c" (ecx));
75 +}
76 +
77 /*
78 * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
79 * which can obviate IPI to trigger checking of need_resched.
80 diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
81 index e127ddaa2d5a..da06f741d2a6 100644
82 --- a/arch/x86/kernel/process.c
83 +++ b/arch/x86/kernel/process.c
84 @@ -24,6 +24,7 @@
85 #include <asm/syscalls.h>
86 #include <asm/idle.h>
87 #include <asm/uaccess.h>
88 +#include <asm/mwait.h>
89 #include <asm/i387.h>
90 #include <asm/fpu-internal.h>
91 #include <asm/debugreg.h>
92 @@ -398,6 +399,49 @@ static void amd_e400_idle(void)
93 default_idle();
94 }
95
96 +/*
97 + * Intel Core2 and older machines prefer MWAIT over HALT for C1.
98 + * We can't rely on cpuidle installing MWAIT, because it will not load
99 + * on systems that support only C1 -- so the boot default must be MWAIT.
100 + *
101 + * Some AMD machines are the opposite, they depend on using HALT.
102 + *
103 + * So for default C1, which is used during boot until cpuidle loads,
104 + * use MWAIT-C1 on Intel HW that has it, else use HALT.
105 + */
106 +static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
107 +{
108 + if (c->x86_vendor != X86_VENDOR_INTEL)
109 + return 0;
110 +
111 + if (!cpu_has(c, X86_FEATURE_MWAIT))
112 + return 0;
113 +
114 + return 1;
115 +}
116 +
117 +/*
118 + * MONITOR/MWAIT with no hints, used for default default C1 state.
119 + * This invokes MWAIT with interrutps enabled and no flags,
120 + * which is backwards compatible with the original MWAIT implementation.
121 + */
122 +
123 +static void mwait_idle(void)
124 +{
125 + if (!need_resched()) {
126 + if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR))
127 + clflush((void *)&current_thread_info()->flags);
128 +
129 + __monitor((void *)&current_thread_info()->flags, 0, 0);
130 + smp_mb();
131 + if (!need_resched())
132 + __sti_mwait(0, 0);
133 + else
134 + local_irq_enable();
135 + } else
136 + local_irq_enable();
137 +}
138 +
139 void select_idle_routine(const struct cpuinfo_x86 *c)
140 {
141 #ifdef CONFIG_SMP
142 @@ -411,6 +455,9 @@ void select_idle_routine(const struct cpuinfo_x86 *c)
143 /* E400: APIC timer interrupt does not wake up CPU from C1e */
144 pr_info("using AMD E400 aware idle routine\n");
145 x86_idle = amd_e400_idle;
146 + } else if (prefer_mwait_c1_over_halt(c)) {
147 + pr_info("using mwait in idle threads\n");
148 + x86_idle = mwait_idle;
149 } else
150 x86_idle = default_idle;
151 }