]>
Commit | Line | Data |
---|---|---|
2cb7cef9 BS |
1 | From: Andi Kleen <andi@firstfloor.org> |
2 | Date: Thu, 9 Apr 2009 10:28:22 +0000 (+0200) | |
3 | Subject: x86, mce: make polling timer interval per CPU | |
4 | Patch-mainline: v2.6.30-rc5~52^2~1 | |
5 | Git-commit: 6298c512bc1007c3ff5c9ce20e6996781651cc45 | |
6 | References: bnc#507557 | |
7 | ||
8 | x86, mce: make polling timer interval per CPU | |
9 | ||
10 | The polling timer while running per CPU still uses a global next_interval | |
11 | variable, which lead to some CPUs either polling too fast or too slow. | |
12 | This was not a serious problem because all errors get picked up eventually, | |
13 | but it's still better to avoid it. Turn next_interval into a per cpu variable. | |
14 | ||
15 | v2: Fix check_interval == 0 case (Hidetoshi Seto) | |
16 | ||
17 | [ Impact: minor bug fix ] | |
18 | ||
19 | Signed-off-by: Andi Kleen <ak@linux.intel.com> | |
20 | Reviewed-by: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com> | |
21 | Signed-off-by: H. Peter Anvin <hpa@linux.intel.com> | |
22 | Acked-by: Jeff Mahoney <jeffm@suse.com> | |
23 | --- | |
24 | arch/x86/kernel/cpu/mcheck/mce_64.c | 24 ++++++++++++------------ | |
25 | 1 file changed, 12 insertions(+), 12 deletions(-) | |
26 | ||
27 | --- a/arch/x86/kernel/cpu/mcheck/mce_64.c | |
28 | +++ b/arch/x86/kernel/cpu/mcheck/mce_64.c | |
29 | @@ -352,13 +352,14 @@ void mce_log_therm_throt_event(unsigned | |
30 | */ | |
31 | ||
32 | static int check_interval = 5 * 60; /* 5 minutes */ | |
33 | -static int next_interval; /* in jiffies */ | |
34 | +static DEFINE_PER_CPU(int, next_interval); /* in jiffies */ | |
35 | static void mcheck_timer(unsigned long); | |
36 | static DEFINE_PER_CPU(struct timer_list, mce_timer); | |
37 | ||
38 | static void mcheck_timer(unsigned long data) | |
39 | { | |
40 | struct timer_list *t = &per_cpu(mce_timer, data); | |
41 | + int *n; | |
42 | ||
43 | WARN_ON(smp_processor_id() != data); | |
44 | ||
45 | @@ -369,14 +370,14 @@ static void mcheck_timer(unsigned long d | |
46 | * Alert userspace if needed. If we logged an MCE, reduce the | |
47 | * polling interval, otherwise increase the polling interval. | |
48 | */ | |
49 | + n = &__get_cpu_var(next_interval); | |
50 | if (mce_notify_user()) { | |
51 | - next_interval = max(next_interval/2, HZ/100); | |
52 | + *n = max(*n/2, HZ/100); | |
53 | } else { | |
54 | - next_interval = min(next_interval * 2, | |
55 | - (int)round_jiffies_relative(check_interval*HZ)); | |
56 | + *n = min(*n*2, (int)round_jiffies_relative(check_interval*HZ)); | |
57 | } | |
58 | ||
59 | - t->expires = jiffies + next_interval; | |
60 | + t->expires = jiffies + *n; | |
61 | add_timer(t); | |
62 | } | |
63 | ||
64 | @@ -502,14 +503,13 @@ static void __cpuinit mce_cpu_features(s | |
65 | static void mce_init_timer(void) | |
66 | { | |
67 | struct timer_list *t = &__get_cpu_var(mce_timer); | |
68 | + int *n = &__get_cpu_var(next_interval); | |
69 | ||
70 | - /* data race harmless because everyone sets to the same value */ | |
71 | - if (!next_interval) | |
72 | - next_interval = check_interval * HZ; | |
73 | - if (!next_interval) | |
74 | + *n = check_interval * HZ; | |
75 | + if (!*n) | |
76 | return; | |
77 | setup_timer(t, mcheck_timer, smp_processor_id()); | |
78 | - t->expires = round_jiffies(jiffies + next_interval); | |
79 | + t->expires = round_jiffies(jiffies + *n); | |
80 | add_timer(t); | |
81 | } | |
82 | ||
83 | @@ -761,7 +761,6 @@ static void mce_cpu_restart(void *data) | |
84 | /* Reinit MCEs after user configuration changes */ | |
85 | static void mce_restart(void) | |
86 | { | |
87 | - next_interval = check_interval * HZ; | |
88 | on_each_cpu(mce_cpu_restart, NULL, 1); | |
89 | } | |
90 | ||
91 | @@ -912,7 +911,8 @@ static int __cpuinit mce_cpu_callback(st | |
92 | break; | |
93 | case CPU_DOWN_FAILED: | |
94 | case CPU_DOWN_FAILED_FROZEN: | |
95 | - t->expires = round_jiffies(jiffies + next_interval); | |
96 | + t->expires = round_jiffies(jiffies + | |
97 | + __get_cpu_var(next_interval)); | |
98 | add_timer_on(t, cpu); | |
99 | break; | |
100 | } |