]> git.ipfire.org Git - people/arne_f/kernel.git/blame - arch/x86/kernel/cpu/mcheck/mce_64.c
x86, mce: don't set up mce sysdev devices with mce=off
[people/arne_f/kernel.git] / arch / x86 / kernel / cpu / mcheck / mce_64.c
CommitLineData
1da177e4
LT
1/*
2 * Machine check handler.
3 * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs.
d88203d1
TG
4 * Rest from unknown author(s).
5 * 2004 Andi Kleen. Rewrote most of it.
1da177e4
LT
6 */
7
8#include <linux/init.h>
9#include <linux/types.h>
10#include <linux/kernel.h>
11#include <linux/sched.h>
38c4c97c 12#include <linux/smp_lock.h>
1da177e4
LT
13#include <linux/string.h>
14#include <linux/rcupdate.h>
15#include <linux/kallsyms.h>
16#include <linux/sysdev.h>
17#include <linux/miscdevice.h>
18#include <linux/fs.h>
a9415644 19#include <linux/capability.h>
91c6d400
AK
20#include <linux/cpu.h>
21#include <linux/percpu.h>
e02e68d3
TH
22#include <linux/poll.h>
23#include <linux/thread_info.h>
8c566ef5 24#include <linux/ctype.h>
a98f0dd3 25#include <linux/kmod.h>
1eeb66a1 26#include <linux/kdebug.h>
d88203d1 27#include <asm/processor.h>
1da177e4
LT
28#include <asm/msr.h>
29#include <asm/mce.h>
1da177e4 30#include <asm/uaccess.h>
0a9c3ee7 31#include <asm/smp.h>
e02e68d3 32#include <asm/idle.h>
1da177e4
LT
33
34#define MISC_MCELOG_MINOR 227
8edc5cc5 35#define NR_SYSFS_BANKS 6
1da177e4 36
553f265f
AK
37atomic_t mce_entry;
38
1da177e4
LT
39static int mce_dont_init;
40
bd78432c
TH
41/*
42 * Tolerant levels:
43 * 0: always panic on uncorrected errors, log corrected errors
44 * 1: panic or SIGBUS on uncorrected errors, log corrected errors
45 * 2: SIGBUS or log uncorrected errors (if possible), log corrected errors
46 * 3: never panic or SIGBUS, log all errors (for testing only)
47 */
1da177e4
LT
48static int tolerant = 1;
49static int banks;
8edc5cc5 50static unsigned long bank[NR_SYSFS_BANKS] = { [0 ... NR_SYSFS_BANKS-1] = ~0UL };
e02e68d3 51static unsigned long notify_user;
94ad8474 52static int rip_msr;
911f6a7b 53static int mce_bootlog = -1;
a98f0dd3
AK
54static atomic_t mce_events;
55
56static char trigger[128];
57static char *trigger_argv[2] = { trigger, NULL };
1da177e4 58
e02e68d3
TH
59static DECLARE_WAIT_QUEUE_HEAD(mce_wait);
60
1da177e4
LT
61/*
62 * Lockless MCE logging infrastructure.
63 * This avoids deadlocks on printk locks without having to break locks. Also
64 * separate MCEs from kernel messages to avoid bogus bug reports.
65 */
66
231fd906 67static struct mce_log mcelog = {
1da177e4
LT
68 MCE_LOG_SIGNATURE,
69 MCE_LOG_LEN,
d88203d1 70};
1da177e4
LT
71
72void mce_log(struct mce *mce)
73{
74 unsigned next, entry;
a98f0dd3 75 atomic_inc(&mce_events);
1da177e4 76 mce->finished = 0;
7644143c 77 wmb();
1da177e4
LT
78 for (;;) {
79 entry = rcu_dereference(mcelog.next);
673242c1
AK
80 for (;;) {
81 /* When the buffer fills up discard new entries. Assume
82 that the earlier errors are the more interesting. */
83 if (entry >= MCE_LOG_LEN) {
53756d37 84 set_bit(MCE_OVERFLOW, (unsigned long *)&mcelog.flags);
673242c1
AK
85 return;
86 }
87 /* Old left over entry. Skip. */
88 if (mcelog.entry[entry].finished) {
89 entry++;
90 continue;
91 }
7644143c 92 break;
1da177e4 93 }
1da177e4
LT
94 smp_rmb();
95 next = entry + 1;
96 if (cmpxchg(&mcelog.next, entry, next) == entry)
97 break;
98 }
99 memcpy(mcelog.entry + entry, mce, sizeof(struct mce));
7644143c 100 wmb();
1da177e4 101 mcelog.entry[entry].finished = 1;
7644143c 102 wmb();
1da177e4 103
e02e68d3 104 set_bit(0, &notify_user);
1da177e4
LT
105}
106
107static void print_mce(struct mce *m)
108{
109 printk(KERN_EMERG "\n"
4855170f 110 KERN_EMERG "HARDWARE ERROR\n"
1da177e4
LT
111 KERN_EMERG
112 "CPU %d: Machine Check Exception: %16Lx Bank %d: %016Lx\n",
113 m->cpu, m->mcgstatus, m->bank, m->status);
65ea5b03 114 if (m->ip) {
d88203d1 115 printk(KERN_EMERG "RIP%s %02x:<%016Lx> ",
1da177e4 116 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
65ea5b03 117 m->cs, m->ip);
1da177e4 118 if (m->cs == __KERNEL_CS)
65ea5b03 119 print_symbol("{%s}", m->ip);
1da177e4
LT
120 printk("\n");
121 }
d88203d1 122 printk(KERN_EMERG "TSC %Lx ", m->tsc);
1da177e4
LT
123 if (m->addr)
124 printk("ADDR %Lx ", m->addr);
125 if (m->misc)
d88203d1 126 printk("MISC %Lx ", m->misc);
1da177e4 127 printk("\n");
4855170f 128 printk(KERN_EMERG "This is not a software problem!\n");
d88203d1
TG
129 printk(KERN_EMERG "Run through mcelog --ascii to decode "
130 "and contact your hardware vendor\n");
1da177e4
LT
131}
132
133static void mce_panic(char *msg, struct mce *backup, unsigned long start)
d88203d1 134{
1da177e4 135 int i;
e02e68d3 136
1da177e4
LT
137 oops_begin();
138 for (i = 0; i < MCE_LOG_LEN; i++) {
139 unsigned long tsc = mcelog.entry[i].tsc;
d88203d1 140
1da177e4
LT
141 if (time_before(tsc, start))
142 continue;
d88203d1 143 print_mce(&mcelog.entry[i]);
1da177e4
LT
144 if (backup && mcelog.entry[i].tsc == backup->tsc)
145 backup = NULL;
146 }
147 if (backup)
148 print_mce(backup);
e02e68d3 149 panic(msg);
d88203d1 150}
1da177e4
LT
151
152static int mce_available(struct cpuinfo_x86 *c)
153{
5b4408fd
AK
154 if (mce_dont_init)
155 return 0;
3d1712c9 156 return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA);
1da177e4
LT
157}
158
94ad8474
AK
159static inline void mce_get_rip(struct mce *m, struct pt_regs *regs)
160{
161 if (regs && (m->mcgstatus & MCG_STATUS_RIPV)) {
65ea5b03 162 m->ip = regs->ip;
94ad8474
AK
163 m->cs = regs->cs;
164 } else {
65ea5b03 165 m->ip = 0;
94ad8474
AK
166 m->cs = 0;
167 }
168 if (rip_msr) {
169 /* Assume the RIP in the MSR is exact. Is this true? */
170 m->mcgstatus |= MCG_STATUS_EIPV;
65ea5b03 171 rdmsrl(rip_msr, m->ip);
94ad8474
AK
172 m->cs = 0;
173 }
174}
175
d88203d1 176/*
1da177e4
LT
177 * The actual machine check handler
178 */
1da177e4
LT
179void do_machine_check(struct pt_regs * regs, long error_code)
180{
181 struct mce m, panicm;
1da177e4
LT
182 u64 mcestart = 0;
183 int i;
184 int panicm_found = 0;
bd78432c
TH
185 /*
186 * If no_way_out gets set, there is no safe way to recover from this
187 * MCE. If tolerant is cranked up, we'll try anyway.
188 */
189 int no_way_out = 0;
190 /*
191 * If kill_it gets set, there might be a way to recover from this
192 * error.
193 */
194 int kill_it = 0;
1da177e4 195
553f265f
AK
196 atomic_inc(&mce_entry);
197
22f5991c
JB
198 if ((regs
199 && notify_die(DIE_NMI, "machine check", regs, error_code,
200 18, SIGKILL) == NOTIFY_STOP)
201 || !banks)
553f265f 202 goto out2;
1da177e4
LT
203
204 memset(&m, 0, sizeof(struct mce));
151f8cc1 205 m.cpu = smp_processor_id();
1da177e4 206 rdmsrl(MSR_IA32_MCG_STATUS, m.mcgstatus);
bd78432c 207 /* if the restart IP is not valid, we're done for */
1da177e4 208 if (!(m.mcgstatus & MCG_STATUS_RIPV))
bd78432c 209 no_way_out = 1;
d88203d1 210
1da177e4
LT
211 rdtscll(mcestart);
212 barrier();
213
214 for (i = 0; i < banks; i++) {
8edc5cc5 215 if (i < NR_SYSFS_BANKS && !bank[i])
1da177e4 216 continue;
d88203d1
TG
217
218 m.misc = 0;
1da177e4
LT
219 m.addr = 0;
220 m.bank = i;
221 m.tsc = 0;
222
223 rdmsrl(MSR_IA32_MC0_STATUS + i*4, m.status);
224 if ((m.status & MCI_STATUS_VAL) == 0)
225 continue;
226
227 if (m.status & MCI_STATUS_EN) {
bd78432c
TH
228 /* if PCC was set, there's no way out */
229 no_way_out |= !!(m.status & MCI_STATUS_PCC);
230 /*
231 * If this error was uncorrectable and there was
232 * an overflow, we're in trouble. If no overflow,
233 * we might get away with just killing a task.
234 */
235 if (m.status & MCI_STATUS_UC) {
236 if (tolerant < 1 || m.status & MCI_STATUS_OVER)
237 no_way_out = 1;
238 kill_it = 1;
239 }
1da177e4
LT
240 }
241
242 if (m.status & MCI_STATUS_MISCV)
243 rdmsrl(MSR_IA32_MC0_MISC + i*4, m.misc);
244 if (m.status & MCI_STATUS_ADDRV)
245 rdmsrl(MSR_IA32_MC0_ADDR + i*4, m.addr);
246
94ad8474 247 mce_get_rip(&m, regs);
d5172f26 248 if (error_code >= 0)
1da177e4 249 rdtscll(m.tsc);
d5172f26
AK
250 if (error_code != -2)
251 mce_log(&m);
1da177e4
LT
252
253 /* Did this bank cause the exception? */
254 /* Assume that the bank with uncorrectable errors did it,
255 and that there is only a single one. */
256 if ((m.status & MCI_STATUS_UC) && (m.status & MCI_STATUS_EN)) {
257 panicm = m;
258 panicm_found = 1;
259 }
260
9f158333 261 add_taint(TAINT_MACHINE_CHECK);
1da177e4
LT
262 }
263
264 /* Never do anything final in the polling timer */
e02e68d3 265 if (!regs)
1da177e4
LT
266 goto out;
267
268 /* If we didn't find an uncorrectable error, pick
269 the last one (shouldn't happen, just being safe). */
270 if (!panicm_found)
271 panicm = m;
bd78432c
TH
272
273 /*
274 * If we have decided that we just CAN'T continue, and the user
275 * has not set tolerant to an insane level, give up and die.
276 */
277 if (no_way_out && tolerant < 3)
1da177e4 278 mce_panic("Machine check", &panicm, mcestart);
bd78432c
TH
279
280 /*
281 * If the error seems to be unrecoverable, something should be
282 * done. Try to kill as little as possible. If we can kill just
283 * one task, do that. If the user has set the tolerance very
284 * high, don't try to do anything at all.
285 */
286 if (kill_it && tolerant < 3) {
1da177e4
LT
287 int user_space = 0;
288
bd78432c
TH
289 /*
290 * If the EIPV bit is set, it means the saved IP is the
291 * instruction which caused the MCE.
292 */
293 if (m.mcgstatus & MCG_STATUS_EIPV)
65ea5b03 294 user_space = panicm.ip && (panicm.cs & 3);
bd78432c
TH
295
296 /*
297 * If we know that the error was in user space, send a
298 * SIGBUS. Otherwise, panic if tolerance is low.
299 *
380851bc 300 * force_sig() takes an awful lot of locks and has a slight
bd78432c
TH
301 * risk of deadlocking.
302 */
303 if (user_space) {
380851bc 304 force_sig(SIGBUS, current);
bd78432c
TH
305 } else if (panic_on_oops || tolerant < 2) {
306 mce_panic("Uncorrected machine check",
307 &panicm, mcestart);
308 }
1da177e4
LT
309 }
310
e02e68d3
TH
311 /* notify userspace ASAP */
312 set_thread_flag(TIF_MCE_NOTIFY);
313
1da177e4 314 out:
bd78432c
TH
315 /* the last thing we do is clear state */
316 for (i = 0; i < banks; i++)
317 wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0);
1da177e4 318 wrmsrl(MSR_IA32_MCG_STATUS, 0);
553f265f
AK
319 out2:
320 atomic_dec(&mce_entry);
1da177e4
LT
321}
322
15d5f839
DZ
323#ifdef CONFIG_X86_MCE_INTEL
324/***
325 * mce_log_therm_throt_event - Logs the thermal throttling event to mcelog
676b1855 326 * @cpu: The CPU on which the event occurred.
15d5f839
DZ
327 * @status: Event status information
328 *
329 * This function should be called by the thermal interrupt after the
330 * event has been processed and the decision was made to log the event
331 * further.
332 *
333 * The status parameter will be saved to the 'status' field of 'struct mce'
334 * and historically has been the register value of the
335 * MSR_IA32_THERMAL_STATUS (Intel) msr.
336 */
337void mce_log_therm_throt_event(unsigned int cpu, __u64 status)
338{
339 struct mce m;
340
341 memset(&m, 0, sizeof(m));
342 m.cpu = cpu;
343 m.bank = MCE_THERMAL_BANK;
344 m.status = status;
345 rdtscll(m.tsc);
346 mce_log(&m);
347}
348#endif /* CONFIG_X86_MCE_INTEL */
349
1da177e4 350/*
8a336b0a
TH
351 * Periodic polling timer for "silent" machine check errors. If the
352 * poller finds an MCE, poll 2x faster. When the poller finds no more
353 * errors, poll 2x slower (up to check_interval seconds).
1da177e4
LT
354 */
355
356static int check_interval = 5 * 60; /* 5 minutes */
8a336b0a 357static int next_interval; /* in jiffies */
52d168e2
AK
358static void mcheck_timer(unsigned long);
359static DEFINE_PER_CPU(struct timer_list, mce_timer);
1da177e4 360
52d168e2 361static void mcheck_timer(unsigned long data)
1da177e4 362{
52d168e2
AK
363 struct timer_list *t = &per_cpu(mce_timer, data);
364
365 WARN_ON(smp_processor_id() != data);
366
1da177e4
LT
367 if (mce_available(&current_cpu_data))
368 do_machine_check(NULL, 0);
1da177e4
LT
369
370 /*
e02e68d3
TH
371 * Alert userspace if needed. If we logged an MCE, reduce the
372 * polling interval, otherwise increase the polling interval.
1da177e4 373 */
e02e68d3
TH
374 if (mce_notify_user()) {
375 next_interval = max(next_interval/2, HZ/100);
376 } else {
d88203d1 377 next_interval = min(next_interval * 2,
22293e58 378 (int)round_jiffies_relative(check_interval*HZ));
e02e68d3
TH
379 }
380
52d168e2
AK
381 t->expires = jiffies + next_interval;
382 add_timer(t);
e02e68d3
TH
383}
384
9bd98405
AK
385static void mce_do_trigger(struct work_struct *work)
386{
387 call_usermodehelper(trigger, trigger_argv, NULL, UMH_NO_WAIT);
388}
389
390static DECLARE_WORK(mce_trigger_work, mce_do_trigger);
391
e02e68d3 392/*
9bd98405
AK
393 * Notify the user(s) about new machine check events.
394 * Can be called from interrupt context, but not from machine check/NMI
395 * context.
e02e68d3
TH
396 */
397int mce_notify_user(void)
398{
399 clear_thread_flag(TIF_MCE_NOTIFY);
400 if (test_and_clear_bit(0, &notify_user)) {
8a336b0a
TH
401 static unsigned long last_print;
402 unsigned long now = jiffies;
403
e02e68d3 404 wake_up_interruptible(&mce_wait);
9bd98405
AK
405
406 /*
407 * There is no risk of missing notifications because
408 * work_pending is always cleared before the function is
409 * executed.
410 */
411 if (trigger[0] && !work_pending(&mce_trigger_work))
412 schedule_work(&mce_trigger_work);
e02e68d3 413
8a336b0a
TH
414 if (time_after_eq(now, last_print + (check_interval*HZ))) {
415 last_print = now;
416 printk(KERN_INFO "Machine check events logged\n");
417 }
e02e68d3
TH
418
419 return 1;
1da177e4 420 }
e02e68d3
TH
421 return 0;
422}
8a336b0a 423
e02e68d3
TH
424/* see if the idle task needs to notify userspace */
425static int
426mce_idle_callback(struct notifier_block *nfb, unsigned long action, void *junk)
427{
428 /* IDLE_END should be safe - interrupts are back on */
429 if (action == IDLE_END && test_thread_flag(TIF_MCE_NOTIFY))
430 mce_notify_user();
431
432 return NOTIFY_OK;
1da177e4
LT
433}
434
e02e68d3
TH
435static struct notifier_block mce_idle_notifier = {
436 .notifier_call = mce_idle_callback,
437};
1da177e4
LT
438
439static __init int periodic_mcheck_init(void)
d88203d1 440{
52d168e2
AK
441 idle_notifier_register(&mce_idle_notifier);
442 return 0;
d88203d1 443}
1da177e4
LT
444__initcall(periodic_mcheck_init);
445
d88203d1 446/*
1da177e4
LT
447 * Initialize Machine Checks for a CPU.
448 */
449static void mce_init(void *dummy)
450{
451 u64 cap;
452 int i;
453
454 rdmsrl(MSR_IA32_MCG_CAP, cap);
455 banks = cap & 0xff;
8edc5cc5 456 if (banks > MCE_EXTENDED_BANK) {
b4b3bd96 457 banks = MCE_EXTENDED_BANK;
8edc5cc5
VP
458 printk(KERN_INFO "MCE: warning: using only %d banks\n",
459 MCE_EXTENDED_BANK);
1da177e4 460 }
94ad8474
AK
461 /* Use accurate RIP reporting if available. */
462 if ((cap & (1<<9)) && ((cap >> 16) & 0xff) >= 9)
463 rip_msr = MSR_IA32_MCG_EIP;
1da177e4
LT
464
465 /* Log the machine checks left over from the previous reset.
466 This also clears all registers */
d5172f26 467 do_machine_check(NULL, mce_bootlog ? -1 : -2);
1da177e4
LT
468
469 set_in_cr4(X86_CR4_MCE);
470
471 if (cap & MCG_CTL_P)
472 wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
473
474 for (i = 0; i < banks; i++) {
2d144e63
VP
475 if (i < NR_SYSFS_BANKS)
476 wrmsrl(MSR_IA32_MC0_CTL+4*i, bank[i]);
477 else
478 wrmsrl(MSR_IA32_MC0_CTL+4*i, ~0UL);
479
1da177e4 480 wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0);
d88203d1 481 }
1da177e4
LT
482}
483
484/* Add per CPU specific workarounds here */
e6982c67 485static void __cpuinit mce_cpu_quirks(struct cpuinfo_x86 *c)
d88203d1 486{
1da177e4 487 /* This should be disabled by the BIOS, but isn't always */
911f6a7b
JB
488 if (c->x86_vendor == X86_VENDOR_AMD) {
489 if(c->x86 == 15)
490 /* disable GART TBL walk error reporting, which trips off
491 incorrectly with the IOMMU & 3ware & Cerberus. */
492 clear_bit(10, &bank[4]);
493 if(c->x86 <= 17 && mce_bootlog < 0)
494 /* Lots of broken BIOS around that don't clear them
495 by default and leave crap in there. Don't log. */
496 mce_bootlog = 0;
1da177e4 497 }
e583538f 498
d88203d1 499}
1da177e4 500
e6982c67 501static void __cpuinit mce_cpu_features(struct cpuinfo_x86 *c)
1da177e4
LT
502{
503 switch (c->x86_vendor) {
504 case X86_VENDOR_INTEL:
505 mce_intel_feature_init(c);
506 break;
89b831ef
JS
507 case X86_VENDOR_AMD:
508 mce_amd_feature_init(c);
509 break;
1da177e4
LT
510 default:
511 break;
512 }
513}
514
52d168e2
AK
515static void mce_init_timer(void)
516{
517 struct timer_list *t = &__get_cpu_var(mce_timer);
518
519 /* data race harmless because everyone sets to the same value */
520 if (!next_interval)
521 next_interval = check_interval * HZ;
522 if (!next_interval)
523 return;
524 setup_timer(t, mcheck_timer, smp_processor_id());
525 t->expires = round_jiffies_relative(jiffies + next_interval);
526 add_timer(t);
527}
528
d88203d1 529/*
1da177e4 530 * Called for each booted CPU to set up machine checks.
d88203d1 531 * Must be called with preempt off.
1da177e4 532 */
e6982c67 533void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
1da177e4 534{
d88203d1 535 mce_cpu_quirks(c);
1da177e4 536
5b4408fd 537 if (!mce_available(c))
1da177e4
LT
538 return;
539
540 mce_init(NULL);
541 mce_cpu_features(c);
52d168e2 542 mce_init_timer();
1da177e4
LT
543}
544
545/*
546 * Character device to read and clear the MCE log.
547 */
548
f528e7ba
TH
549static DEFINE_SPINLOCK(mce_state_lock);
550static int open_count; /* #times opened */
551static int open_exclu; /* already open exclusive? */
552
553static int mce_open(struct inode *inode, struct file *file)
554{
38c4c97c 555 lock_kernel();
f528e7ba
TH
556 spin_lock(&mce_state_lock);
557
558 if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
559 spin_unlock(&mce_state_lock);
38c4c97c 560 unlock_kernel();
f528e7ba
TH
561 return -EBUSY;
562 }
563
564 if (file->f_flags & O_EXCL)
565 open_exclu = 1;
566 open_count++;
567
568 spin_unlock(&mce_state_lock);
38c4c97c 569 unlock_kernel();
f528e7ba 570
bd78432c 571 return nonseekable_open(inode, file);
f528e7ba
TH
572}
573
574static int mce_release(struct inode *inode, struct file *file)
575{
576 spin_lock(&mce_state_lock);
577
578 open_count--;
579 open_exclu = 0;
580
581 spin_unlock(&mce_state_lock);
582
583 return 0;
584}
585
d88203d1
TG
586static void collect_tscs(void *data)
587{
1da177e4 588 unsigned long *cpu_tsc = (unsigned long *)data;
d88203d1 589
1da177e4 590 rdtscll(cpu_tsc[smp_processor_id()]);
d88203d1 591}
1da177e4 592
d88203d1
TG
593static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize,
594 loff_t *off)
1da177e4 595{
f0de53bb 596 unsigned long *cpu_tsc;
8c8b8859 597 static DEFINE_MUTEX(mce_read_mutex);
1da177e4
LT
598 unsigned next;
599 char __user *buf = ubuf;
600 int i, err;
601
6bca67f9 602 cpu_tsc = kmalloc(nr_cpu_ids * sizeof(long), GFP_KERNEL);
f0de53bb
AK
603 if (!cpu_tsc)
604 return -ENOMEM;
605
8c8b8859 606 mutex_lock(&mce_read_mutex);
1da177e4
LT
607 next = rcu_dereference(mcelog.next);
608
609 /* Only supports full reads right now */
d88203d1 610 if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce)) {
8c8b8859 611 mutex_unlock(&mce_read_mutex);
f0de53bb 612 kfree(cpu_tsc);
1da177e4
LT
613 return -EINVAL;
614 }
615
616 err = 0;
d88203d1 617 for (i = 0; i < next; i++) {
673242c1 618 unsigned long start = jiffies;
d88203d1 619
673242c1 620 while (!mcelog.entry[i].finished) {
4f84e4be 621 if (time_after_eq(jiffies, start + 2)) {
673242c1 622 memset(mcelog.entry + i,0, sizeof(struct mce));
4f84e4be 623 goto timeout;
673242c1
AK
624 }
625 cpu_relax();
626 }
1da177e4
LT
627 smp_rmb();
628 err |= copy_to_user(buf, mcelog.entry + i, sizeof(struct mce));
d88203d1 629 buf += sizeof(struct mce);
4f84e4be
JW
630 timeout:
631 ;
d88203d1 632 }
1da177e4
LT
633
634 memset(mcelog.entry, 0, next * sizeof(struct mce));
635 mcelog.next = 0;
636
b2b18660 637 synchronize_sched();
1da177e4 638
d88203d1
TG
639 /*
640 * Collect entries that were still getting written before the
641 * synchronize.
642 */
15c8b6c1 643 on_each_cpu(collect_tscs, cpu_tsc, 1);
d88203d1
TG
644 for (i = next; i < MCE_LOG_LEN; i++) {
645 if (mcelog.entry[i].finished &&
646 mcelog.entry[i].tsc < cpu_tsc[mcelog.entry[i].cpu]) {
647 err |= copy_to_user(buf, mcelog.entry+i,
648 sizeof(struct mce));
1da177e4
LT
649 smp_rmb();
650 buf += sizeof(struct mce);
651 memset(&mcelog.entry[i], 0, sizeof(struct mce));
652 }
d88203d1 653 }
8c8b8859 654 mutex_unlock(&mce_read_mutex);
f0de53bb 655 kfree(cpu_tsc);
d88203d1 656 return err ? -EFAULT : buf - ubuf;
1da177e4
LT
657}
658
e02e68d3
TH
659static unsigned int mce_poll(struct file *file, poll_table *wait)
660{
661 poll_wait(file, &mce_wait, wait);
662 if (rcu_dereference(mcelog.next))
663 return POLLIN | POLLRDNORM;
664 return 0;
665}
666
c68461b6 667static long mce_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
1da177e4
LT
668{
669 int __user *p = (int __user *)arg;
d88203d1 670
1da177e4 671 if (!capable(CAP_SYS_ADMIN))
d88203d1 672 return -EPERM;
1da177e4 673 switch (cmd) {
d88203d1 674 case MCE_GET_RECORD_LEN:
1da177e4
LT
675 return put_user(sizeof(struct mce), p);
676 case MCE_GET_LOG_LEN:
d88203d1 677 return put_user(MCE_LOG_LEN, p);
1da177e4
LT
678 case MCE_GETCLEAR_FLAGS: {
679 unsigned flags;
d88203d1
TG
680
681 do {
1da177e4 682 flags = mcelog.flags;
d88203d1
TG
683 } while (cmpxchg(&mcelog.flags, flags, 0) != flags);
684 return put_user(flags, p);
1da177e4
LT
685 }
686 default:
d88203d1
TG
687 return -ENOTTY;
688 }
1da177e4
LT
689}
690
5dfe4c96 691static const struct file_operations mce_chrdev_ops = {
f528e7ba
TH
692 .open = mce_open,
693 .release = mce_release,
1da177e4 694 .read = mce_read,
e02e68d3 695 .poll = mce_poll,
c68461b6 696 .unlocked_ioctl = mce_ioctl,
1da177e4
LT
697};
698
699static struct miscdevice mce_log_device = {
700 MISC_MCELOG_MINOR,
701 "mcelog",
702 &mce_chrdev_ops,
703};
704
d88203d1
TG
705/*
706 * Old style boot options parsing. Only for compatibility.
1da177e4 707 */
1da177e4
LT
708static int __init mcheck_disable(char *str)
709{
710 mce_dont_init = 1;
9b41046c 711 return 1;
1da177e4
LT
712}
713
5b4408fd 714/* mce=off disables machine check.
8c566ef5 715 mce=TOLERANCELEVEL (number, see above)
e583538f
AK
716 mce=bootlog Log MCEs from before booting. Disabled by default on AMD.
717 mce=nobootlog Don't log MCEs from before booting. */
1da177e4
LT
718static int __init mcheck_enable(char *str)
719{
720 if (!strcmp(str, "off"))
721 mce_dont_init = 1;
e583538f
AK
722 else if (!strcmp(str, "bootlog") || !strcmp(str,"nobootlog"))
723 mce_bootlog = str[0] == 'b';
8c566ef5
AK
724 else if (isdigit(str[0]))
725 get_option(&str, &tolerant);
1da177e4 726 else
d88203d1 727 printk("mce= argument %s ignored. Please use /sys", str);
9b41046c 728 return 1;
1da177e4
LT
729}
730
731__setup("nomce", mcheck_disable);
909dd324 732__setup("mce=", mcheck_enable);
1da177e4 733
d88203d1 734/*
1da177e4 735 * Sysfs support
d88203d1 736 */
1da177e4 737
973a2dd1
AK
738/*
739 * Disable machine checks on suspend and shutdown. We can't really handle
740 * them later.
741 */
742static int mce_disable(void)
743{
744 int i;
745
746 for (i = 0; i < banks; i++)
747 wrmsrl(MSR_IA32_MC0_CTL + i*4, 0);
748 return 0;
749}
750
751static int mce_suspend(struct sys_device *dev, pm_message_t state)
752{
753 return mce_disable();
754}
755
756static int mce_shutdown(struct sys_device *dev)
757{
758 return mce_disable();
759}
760
413588c7
AK
761/* On resume clear all MCE state. Don't want to see leftovers from the BIOS.
762 Only one CPU is active at this time, the others get readded later using
763 CPU hotplug. */
1da177e4
LT
764static int mce_resume(struct sys_device *dev)
765{
413588c7 766 mce_init(NULL);
6ec68bff 767 mce_cpu_features(&current_cpu_data);
1da177e4
LT
768 return 0;
769}
770
52d168e2
AK
771static void mce_cpu_restart(void *data)
772{
773 del_timer_sync(&__get_cpu_var(mce_timer));
774 if (mce_available(&current_cpu_data))
775 mce_init(NULL);
776 mce_init_timer();
777}
778
1da177e4 779/* Reinit MCEs after user configuration changes */
d88203d1
TG
780static void mce_restart(void)
781{
8a336b0a 782 next_interval = check_interval * HZ;
52d168e2 783 on_each_cpu(mce_cpu_restart, NULL, 1);
1da177e4
LT
784}
785
786static struct sysdev_class mce_sysclass = {
973a2dd1
AK
787 .suspend = mce_suspend,
788 .shutdown = mce_shutdown,
1da177e4 789 .resume = mce_resume,
af5ca3f4 790 .name = "machinecheck",
1da177e4
LT
791};
792
fff2e89f 793DEFINE_PER_CPU(struct sys_device, device_mce);
8735728e 794void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu) __cpuinitdata;
1da177e4
LT
795
796/* Why are there no generic functions for this? */
797#define ACCESSOR(name, var, start) \
4a0b2b4d
AK
798 static ssize_t show_ ## name(struct sys_device *s, \
799 struct sysdev_attribute *attr, \
800 char *buf) { \
d88203d1
TG
801 return sprintf(buf, "%lx\n", (unsigned long)var); \
802 } \
4a0b2b4d
AK
803 static ssize_t set_ ## name(struct sys_device *s, \
804 struct sysdev_attribute *attr, \
805 const char *buf, size_t siz) { \
d88203d1
TG
806 char *end; \
807 unsigned long new = simple_strtoul(buf, &end, 0); \
808 if (end == buf) return -EINVAL; \
809 var = new; \
810 start; \
811 return end-buf; \
812 } \
1da177e4
LT
813 static SYSDEV_ATTR(name, 0644, show_ ## name, set_ ## name);
814
8edc5cc5
VP
815/*
816 * TBD should generate these dynamically based on number of available banks.
817 * Have only 6 contol banks in /sysfs until then.
818 */
1da177e4
LT
819ACCESSOR(bank0ctl,bank[0],mce_restart())
820ACCESSOR(bank1ctl,bank[1],mce_restart())
821ACCESSOR(bank2ctl,bank[2],mce_restart())
822ACCESSOR(bank3ctl,bank[3],mce_restart())
823ACCESSOR(bank4ctl,bank[4],mce_restart())
73ca5358 824ACCESSOR(bank5ctl,bank[5],mce_restart())
a98f0dd3 825
4a0b2b4d
AK
826static ssize_t show_trigger(struct sys_device *s, struct sysdev_attribute *attr,
827 char *buf)
a98f0dd3
AK
828{
829 strcpy(buf, trigger);
830 strcat(buf, "\n");
831 return strlen(trigger) + 1;
832}
833
4a0b2b4d
AK
834static ssize_t set_trigger(struct sys_device *s, struct sysdev_attribute *attr,
835 const char *buf,size_t siz)
a98f0dd3
AK
836{
837 char *p;
838 int len;
839 strncpy(trigger, buf, sizeof(trigger));
840 trigger[sizeof(trigger)-1] = 0;
841 len = strlen(trigger);
842 p = strchr(trigger, '\n');
843 if (*p) *p = 0;
844 return len;
845}
846
847static SYSDEV_ATTR(trigger, 0644, show_trigger, set_trigger);
d95d62c0 848static SYSDEV_INT_ATTR(tolerant, 0644, tolerant);
1da177e4 849ACCESSOR(check_interval,check_interval,mce_restart())
a98f0dd3
AK
850static struct sysdev_attribute *mce_attributes[] = {
851 &attr_bank0ctl, &attr_bank1ctl, &attr_bank2ctl,
852 &attr_bank3ctl, &attr_bank4ctl, &attr_bank5ctl,
d95d62c0 853 &attr_tolerant.attr, &attr_check_interval, &attr_trigger,
a98f0dd3
AK
854 NULL
855};
1da177e4 856
bae19fe0
AH
857static cpumask_t mce_device_initialized = CPU_MASK_NONE;
858
91c6d400
AK
859/* Per cpu sysdev init. All of the cpus still share the same ctl bank */
860static __cpuinit int mce_create_device(unsigned int cpu)
1da177e4
LT
861{
862 int err;
73ca5358 863 int i;
92cb7612 864
90367556 865 if (!mce_available(&boot_cpu_data))
91c6d400
AK
866 return -EIO;
867
d435d862 868 memset(&per_cpu(device_mce, cpu).kobj, 0, sizeof(struct kobject));
91c6d400
AK
869 per_cpu(device_mce,cpu).id = cpu;
870 per_cpu(device_mce,cpu).cls = &mce_sysclass;
871
872 err = sysdev_register(&per_cpu(device_mce,cpu));
d435d862
AM
873 if (err)
874 return err;
875
876 for (i = 0; mce_attributes[i]; i++) {
877 err = sysdev_create_file(&per_cpu(device_mce,cpu),
878 mce_attributes[i]);
879 if (err)
880 goto error;
881 }
bae19fe0 882 cpu_set(cpu, mce_device_initialized);
91c6d400 883
d435d862
AM
884 return 0;
885error:
886 while (i--) {
887 sysdev_remove_file(&per_cpu(device_mce,cpu),
888 mce_attributes[i]);
91c6d400 889 }
d435d862
AM
890 sysdev_unregister(&per_cpu(device_mce,cpu));
891
91c6d400
AK
892 return err;
893}
894
2d9cd6c2 895static __cpuinit void mce_remove_device(unsigned int cpu)
91c6d400 896{
73ca5358
SL
897 int i;
898
bae19fe0
AH
899 if (!cpu_isset(cpu, mce_device_initialized))
900 return;
901
a98f0dd3 902 for (i = 0; mce_attributes[i]; i++)
73ca5358 903 sysdev_remove_file(&per_cpu(device_mce,cpu),
a98f0dd3 904 mce_attributes[i]);
91c6d400 905 sysdev_unregister(&per_cpu(device_mce,cpu));
bae19fe0 906 cpu_clear(cpu, mce_device_initialized);
91c6d400 907}
91c6d400
AK
908
909/* Get notified when a cpu comes on/off. Be hotplug friendly. */
1e35669d
SR
910static int __cpuinit mce_cpu_callback(struct notifier_block *nfb,
911 unsigned long action, void *hcpu)
91c6d400
AK
912{
913 unsigned int cpu = (unsigned long)hcpu;
52d168e2 914 struct timer_list *t = &per_cpu(mce_timer, cpu);
91c6d400
AK
915
916 switch (action) {
bae19fe0
AH
917 case CPU_ONLINE:
918 case CPU_ONLINE_FROZEN:
919 mce_create_device(cpu);
8735728e
RW
920 if (threshold_cpu_callback)
921 threshold_cpu_callback(action, cpu);
91c6d400 922 break;
91c6d400 923 case CPU_DEAD:
8bb78442 924 case CPU_DEAD_FROZEN:
8735728e
RW
925 if (threshold_cpu_callback)
926 threshold_cpu_callback(action, cpu);
91c6d400
AK
927 mce_remove_device(cpu);
928 break;
52d168e2
AK
929 case CPU_DOWN_PREPARE:
930 case CPU_DOWN_PREPARE_FROZEN:
931 del_timer_sync(t);
932 break;
933 case CPU_DOWN_FAILED:
934 case CPU_DOWN_FAILED_FROZEN:
935 t->expires = round_jiffies_relative(jiffies + next_interval);
936 add_timer_on(t, cpu);
937 break;
91c6d400 938 }
bae19fe0 939 return NOTIFY_OK;
91c6d400
AK
940}
941
1e35669d 942static struct notifier_block mce_cpu_notifier __cpuinitdata = {
91c6d400
AK
943 .notifier_call = mce_cpu_callback,
944};
945
946static __init int mce_init_device(void)
947{
948 int err;
949 int i = 0;
950
1da177e4
LT
951 if (!mce_available(&boot_cpu_data))
952 return -EIO;
953 err = sysdev_class_register(&mce_sysclass);
d435d862
AM
954 if (err)
955 return err;
91c6d400
AK
956
957 for_each_online_cpu(i) {
d435d862
AM
958 err = mce_create_device(i);
959 if (err)
960 return err;
91c6d400
AK
961 }
962
be6b5a35 963 register_hotcpu_notifier(&mce_cpu_notifier);
1da177e4
LT
964 misc_register(&mce_log_device);
965 return err;
1da177e4 966}
91c6d400 967
1da177e4 968device_initcall(mce_init_device);