]> git.ipfire.org Git - people/arne_f/kernel.git/blame - arch/x86/kernel/cpu/mcheck/mce_64.c
x86, mce: fix ifdef for 64bit thermal apic vector clear on shutdown
[people/arne_f/kernel.git] / arch / x86 / kernel / cpu / mcheck / mce_64.c
CommitLineData
1da177e4
LT
1/*
2 * Machine check handler.
3 * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs.
d88203d1
TG
4 * Rest from unknown author(s).
5 * 2004 Andi Kleen. Rewrote most of it.
1da177e4
LT
6 */
7
8#include <linux/init.h>
9#include <linux/types.h>
10#include <linux/kernel.h>
11#include <linux/sched.h>
38c4c97c 12#include <linux/smp_lock.h>
1da177e4
LT
13#include <linux/string.h>
14#include <linux/rcupdate.h>
15#include <linux/kallsyms.h>
16#include <linux/sysdev.h>
17#include <linux/miscdevice.h>
18#include <linux/fs.h>
a9415644 19#include <linux/capability.h>
91c6d400
AK
20#include <linux/cpu.h>
21#include <linux/percpu.h>
e02e68d3
TH
22#include <linux/poll.h>
23#include <linux/thread_info.h>
8c566ef5 24#include <linux/ctype.h>
a98f0dd3 25#include <linux/kmod.h>
1eeb66a1 26#include <linux/kdebug.h>
d88203d1 27#include <asm/processor.h>
1da177e4
LT
28#include <asm/msr.h>
29#include <asm/mce.h>
1da177e4 30#include <asm/uaccess.h>
0a9c3ee7 31#include <asm/smp.h>
e02e68d3 32#include <asm/idle.h>
1da177e4
LT
33
34#define MISC_MCELOG_MINOR 227
8edc5cc5 35#define NR_SYSFS_BANKS 6
1da177e4 36
553f265f
AK
37atomic_t mce_entry;
38
1da177e4
LT
39static int mce_dont_init;
40
bd78432c
TH
41/*
42 * Tolerant levels:
43 * 0: always panic on uncorrected errors, log corrected errors
44 * 1: panic or SIGBUS on uncorrected errors, log corrected errors
45 * 2: SIGBUS or log uncorrected errors (if possible), log corrected errors
46 * 3: never panic or SIGBUS, log all errors (for testing only)
47 */
1da177e4
LT
48static int tolerant = 1;
49static int banks;
8edc5cc5 50static unsigned long bank[NR_SYSFS_BANKS] = { [0 ... NR_SYSFS_BANKS-1] = ~0UL };
e02e68d3 51static unsigned long notify_user;
94ad8474 52static int rip_msr;
911f6a7b 53static int mce_bootlog = -1;
a98f0dd3
AK
54static atomic_t mce_events;
55
56static char trigger[128];
57static char *trigger_argv[2] = { trigger, NULL };
1da177e4 58
e02e68d3
TH
59static DECLARE_WAIT_QUEUE_HEAD(mce_wait);
60
1da177e4
LT
61/*
62 * Lockless MCE logging infrastructure.
63 * This avoids deadlocks on printk locks without having to break locks. Also
64 * separate MCEs from kernel messages to avoid bogus bug reports.
65 */
66
231fd906 67static struct mce_log mcelog = {
1da177e4
LT
68 MCE_LOG_SIGNATURE,
69 MCE_LOG_LEN,
d88203d1 70};
1da177e4
LT
71
72void mce_log(struct mce *mce)
73{
74 unsigned next, entry;
a98f0dd3 75 atomic_inc(&mce_events);
1da177e4 76 mce->finished = 0;
7644143c 77 wmb();
1da177e4
LT
78 for (;;) {
79 entry = rcu_dereference(mcelog.next);
673242c1
AK
80 for (;;) {
81 /* When the buffer fills up discard new entries. Assume
82 that the earlier errors are the more interesting. */
83 if (entry >= MCE_LOG_LEN) {
53756d37 84 set_bit(MCE_OVERFLOW, (unsigned long *)&mcelog.flags);
673242c1
AK
85 return;
86 }
87 /* Old left over entry. Skip. */
88 if (mcelog.entry[entry].finished) {
89 entry++;
90 continue;
91 }
7644143c 92 break;
1da177e4 93 }
1da177e4
LT
94 smp_rmb();
95 next = entry + 1;
96 if (cmpxchg(&mcelog.next, entry, next) == entry)
97 break;
98 }
99 memcpy(mcelog.entry + entry, mce, sizeof(struct mce));
7644143c 100 wmb();
1da177e4 101 mcelog.entry[entry].finished = 1;
7644143c 102 wmb();
1da177e4 103
e02e68d3 104 set_bit(0, &notify_user);
1da177e4
LT
105}
106
107static void print_mce(struct mce *m)
108{
109 printk(KERN_EMERG "\n"
4855170f 110 KERN_EMERG "HARDWARE ERROR\n"
1da177e4
LT
111 KERN_EMERG
112 "CPU %d: Machine Check Exception: %16Lx Bank %d: %016Lx\n",
113 m->cpu, m->mcgstatus, m->bank, m->status);
65ea5b03 114 if (m->ip) {
d88203d1 115 printk(KERN_EMERG "RIP%s %02x:<%016Lx> ",
1da177e4 116 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
65ea5b03 117 m->cs, m->ip);
1da177e4 118 if (m->cs == __KERNEL_CS)
65ea5b03 119 print_symbol("{%s}", m->ip);
1da177e4
LT
120 printk("\n");
121 }
d88203d1 122 printk(KERN_EMERG "TSC %Lx ", m->tsc);
1da177e4
LT
123 if (m->addr)
124 printk("ADDR %Lx ", m->addr);
125 if (m->misc)
d88203d1 126 printk("MISC %Lx ", m->misc);
1da177e4 127 printk("\n");
4855170f 128 printk(KERN_EMERG "This is not a software problem!\n");
d88203d1
TG
129 printk(KERN_EMERG "Run through mcelog --ascii to decode "
130 "and contact your hardware vendor\n");
1da177e4
LT
131}
132
133static void mce_panic(char *msg, struct mce *backup, unsigned long start)
d88203d1 134{
1da177e4 135 int i;
e02e68d3 136
1da177e4
LT
137 oops_begin();
138 for (i = 0; i < MCE_LOG_LEN; i++) {
139 unsigned long tsc = mcelog.entry[i].tsc;
d88203d1 140
1da177e4
LT
141 if (time_before(tsc, start))
142 continue;
d88203d1 143 print_mce(&mcelog.entry[i]);
1da177e4
LT
144 if (backup && mcelog.entry[i].tsc == backup->tsc)
145 backup = NULL;
146 }
147 if (backup)
148 print_mce(backup);
e02e68d3 149 panic(msg);
d88203d1 150}
1da177e4
LT
151
152static int mce_available(struct cpuinfo_x86 *c)
153{
3d1712c9 154 return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA);
1da177e4
LT
155}
156
94ad8474
AK
157static inline void mce_get_rip(struct mce *m, struct pt_regs *regs)
158{
159 if (regs && (m->mcgstatus & MCG_STATUS_RIPV)) {
65ea5b03 160 m->ip = regs->ip;
94ad8474
AK
161 m->cs = regs->cs;
162 } else {
65ea5b03 163 m->ip = 0;
94ad8474
AK
164 m->cs = 0;
165 }
166 if (rip_msr) {
167 /* Assume the RIP in the MSR is exact. Is this true? */
168 m->mcgstatus |= MCG_STATUS_EIPV;
65ea5b03 169 rdmsrl(rip_msr, m->ip);
94ad8474
AK
170 m->cs = 0;
171 }
172}
173
d88203d1 174/*
1da177e4
LT
175 * The actual machine check handler
176 */
1da177e4
LT
177void do_machine_check(struct pt_regs * regs, long error_code)
178{
179 struct mce m, panicm;
1da177e4
LT
180 u64 mcestart = 0;
181 int i;
182 int panicm_found = 0;
bd78432c
TH
183 /*
184 * If no_way_out gets set, there is no safe way to recover from this
185 * MCE. If tolerant is cranked up, we'll try anyway.
186 */
187 int no_way_out = 0;
188 /*
189 * If kill_it gets set, there might be a way to recover from this
190 * error.
191 */
192 int kill_it = 0;
1da177e4 193
553f265f
AK
194 atomic_inc(&mce_entry);
195
22f5991c
JB
196 if ((regs
197 && notify_die(DIE_NMI, "machine check", regs, error_code,
198 18, SIGKILL) == NOTIFY_STOP)
199 || !banks)
553f265f 200 goto out2;
1da177e4
LT
201
202 memset(&m, 0, sizeof(struct mce));
151f8cc1 203 m.cpu = smp_processor_id();
1da177e4 204 rdmsrl(MSR_IA32_MCG_STATUS, m.mcgstatus);
bd78432c 205 /* if the restart IP is not valid, we're done for */
1da177e4 206 if (!(m.mcgstatus & MCG_STATUS_RIPV))
bd78432c 207 no_way_out = 1;
d88203d1 208
1da177e4
LT
209 rdtscll(mcestart);
210 barrier();
211
212 for (i = 0; i < banks; i++) {
8edc5cc5 213 if (i < NR_SYSFS_BANKS && !bank[i])
1da177e4 214 continue;
d88203d1
TG
215
216 m.misc = 0;
1da177e4
LT
217 m.addr = 0;
218 m.bank = i;
219 m.tsc = 0;
220
221 rdmsrl(MSR_IA32_MC0_STATUS + i*4, m.status);
222 if ((m.status & MCI_STATUS_VAL) == 0)
223 continue;
224
225 if (m.status & MCI_STATUS_EN) {
bd78432c
TH
226 /* if PCC was set, there's no way out */
227 no_way_out |= !!(m.status & MCI_STATUS_PCC);
228 /*
229 * If this error was uncorrectable and there was
230 * an overflow, we're in trouble. If no overflow,
231 * we might get away with just killing a task.
232 */
233 if (m.status & MCI_STATUS_UC) {
234 if (tolerant < 1 || m.status & MCI_STATUS_OVER)
235 no_way_out = 1;
236 kill_it = 1;
237 }
1da177e4
LT
238 }
239
240 if (m.status & MCI_STATUS_MISCV)
241 rdmsrl(MSR_IA32_MC0_MISC + i*4, m.misc);
242 if (m.status & MCI_STATUS_ADDRV)
243 rdmsrl(MSR_IA32_MC0_ADDR + i*4, m.addr);
244
94ad8474 245 mce_get_rip(&m, regs);
d5172f26 246 if (error_code >= 0)
1da177e4 247 rdtscll(m.tsc);
d5172f26
AK
248 if (error_code != -2)
249 mce_log(&m);
1da177e4
LT
250
251 /* Did this bank cause the exception? */
252 /* Assume that the bank with uncorrectable errors did it,
253 and that there is only a single one. */
254 if ((m.status & MCI_STATUS_UC) && (m.status & MCI_STATUS_EN)) {
255 panicm = m;
256 panicm_found = 1;
257 }
258
9f158333 259 add_taint(TAINT_MACHINE_CHECK);
1da177e4
LT
260 }
261
262 /* Never do anything final in the polling timer */
e02e68d3 263 if (!regs)
1da177e4
LT
264 goto out;
265
266 /* If we didn't find an uncorrectable error, pick
267 the last one (shouldn't happen, just being safe). */
268 if (!panicm_found)
269 panicm = m;
bd78432c
TH
270
271 /*
272 * If we have decided that we just CAN'T continue, and the user
273 * has not set tolerant to an insane level, give up and die.
274 */
275 if (no_way_out && tolerant < 3)
1da177e4 276 mce_panic("Machine check", &panicm, mcestart);
bd78432c
TH
277
278 /*
279 * If the error seems to be unrecoverable, something should be
280 * done. Try to kill as little as possible. If we can kill just
281 * one task, do that. If the user has set the tolerance very
282 * high, don't try to do anything at all.
283 */
284 if (kill_it && tolerant < 3) {
1da177e4
LT
285 int user_space = 0;
286
bd78432c
TH
287 /*
288 * If the EIPV bit is set, it means the saved IP is the
289 * instruction which caused the MCE.
290 */
291 if (m.mcgstatus & MCG_STATUS_EIPV)
65ea5b03 292 user_space = panicm.ip && (panicm.cs & 3);
bd78432c
TH
293
294 /*
295 * If we know that the error was in user space, send a
296 * SIGBUS. Otherwise, panic if tolerance is low.
297 *
380851bc 298 * force_sig() takes an awful lot of locks and has a slight
bd78432c
TH
299 * risk of deadlocking.
300 */
301 if (user_space) {
380851bc 302 force_sig(SIGBUS, current);
bd78432c
TH
303 } else if (panic_on_oops || tolerant < 2) {
304 mce_panic("Uncorrected machine check",
305 &panicm, mcestart);
306 }
1da177e4
LT
307 }
308
e02e68d3
TH
309 /* notify userspace ASAP */
310 set_thread_flag(TIF_MCE_NOTIFY);
311
1da177e4 312 out:
bd78432c
TH
313 /* the last thing we do is clear state */
314 for (i = 0; i < banks; i++)
315 wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0);
1da177e4 316 wrmsrl(MSR_IA32_MCG_STATUS, 0);
553f265f
AK
317 out2:
318 atomic_dec(&mce_entry);
1da177e4
LT
319}
320
15d5f839
DZ
321#ifdef CONFIG_X86_MCE_INTEL
322/***
323 * mce_log_therm_throt_event - Logs the thermal throttling event to mcelog
676b1855 324 * @cpu: The CPU on which the event occurred.
15d5f839
DZ
325 * @status: Event status information
326 *
327 * This function should be called by the thermal interrupt after the
328 * event has been processed and the decision was made to log the event
329 * further.
330 *
331 * The status parameter will be saved to the 'status' field of 'struct mce'
332 * and historically has been the register value of the
333 * MSR_IA32_THERMAL_STATUS (Intel) msr.
334 */
335void mce_log_therm_throt_event(unsigned int cpu, __u64 status)
336{
337 struct mce m;
338
339 memset(&m, 0, sizeof(m));
340 m.cpu = cpu;
341 m.bank = MCE_THERMAL_BANK;
342 m.status = status;
343 rdtscll(m.tsc);
344 mce_log(&m);
345}
346#endif /* CONFIG_X86_MCE_INTEL */
347
1da177e4 348/*
8a336b0a
TH
349 * Periodic polling timer for "silent" machine check errors. If the
350 * poller finds an MCE, poll 2x faster. When the poller finds no more
351 * errors, poll 2x slower (up to check_interval seconds).
1da177e4
LT
352 */
353
354static int check_interval = 5 * 60; /* 5 minutes */
8a336b0a 355static int next_interval; /* in jiffies */
65f27f38
DH
356static void mcheck_timer(struct work_struct *work);
357static DECLARE_DELAYED_WORK(mcheck_work, mcheck_timer);
1da177e4
LT
358
359static void mcheck_check_cpu(void *info)
360{
361 if (mce_available(&current_cpu_data))
362 do_machine_check(NULL, 0);
363}
364
65f27f38 365static void mcheck_timer(struct work_struct *work)
1da177e4 366{
15c8b6c1 367 on_each_cpu(mcheck_check_cpu, NULL, 1);
1da177e4
LT
368
369 /*
e02e68d3
TH
370 * Alert userspace if needed. If we logged an MCE, reduce the
371 * polling interval, otherwise increase the polling interval.
1da177e4 372 */
e02e68d3
TH
373 if (mce_notify_user()) {
374 next_interval = max(next_interval/2, HZ/100);
375 } else {
d88203d1 376 next_interval = min(next_interval * 2,
22293e58 377 (int)round_jiffies_relative(check_interval*HZ));
e02e68d3
TH
378 }
379
380 schedule_delayed_work(&mcheck_work, next_interval);
381}
382
383/*
384 * This is only called from process context. This is where we do
385 * anything we need to alert userspace about new MCEs. This is called
386 * directly from the poller and also from entry.S and idle, thanks to
387 * TIF_MCE_NOTIFY.
388 */
389int mce_notify_user(void)
390{
391 clear_thread_flag(TIF_MCE_NOTIFY);
392 if (test_and_clear_bit(0, &notify_user)) {
8a336b0a
TH
393 static unsigned long last_print;
394 unsigned long now = jiffies;
395
e02e68d3
TH
396 wake_up_interruptible(&mce_wait);
397 if (trigger[0])
398 call_usermodehelper(trigger, trigger_argv, NULL,
399 UMH_NO_WAIT);
400
8a336b0a
TH
401 if (time_after_eq(now, last_print + (check_interval*HZ))) {
402 last_print = now;
403 printk(KERN_INFO "Machine check events logged\n");
404 }
e02e68d3
TH
405
406 return 1;
1da177e4 407 }
e02e68d3
TH
408 return 0;
409}
8a336b0a 410
e02e68d3
TH
411/* see if the idle task needs to notify userspace */
412static int
413mce_idle_callback(struct notifier_block *nfb, unsigned long action, void *junk)
414{
415 /* IDLE_END should be safe - interrupts are back on */
416 if (action == IDLE_END && test_thread_flag(TIF_MCE_NOTIFY))
417 mce_notify_user();
418
419 return NOTIFY_OK;
1da177e4
LT
420}
421
e02e68d3
TH
422static struct notifier_block mce_idle_notifier = {
423 .notifier_call = mce_idle_callback,
424};
1da177e4
LT
425
426static __init int periodic_mcheck_init(void)
d88203d1 427{
8a336b0a
TH
428 next_interval = check_interval * HZ;
429 if (next_interval)
22293e58
VP
430 schedule_delayed_work(&mcheck_work,
431 round_jiffies_relative(next_interval));
e02e68d3 432 idle_notifier_register(&mce_idle_notifier);
1da177e4 433 return 0;
d88203d1 434}
1da177e4
LT
435__initcall(periodic_mcheck_init);
436
437
d88203d1 438/*
1da177e4
LT
439 * Initialize Machine Checks for a CPU.
440 */
441static void mce_init(void *dummy)
442{
443 u64 cap;
444 int i;
445
446 rdmsrl(MSR_IA32_MCG_CAP, cap);
447 banks = cap & 0xff;
8edc5cc5 448 if (banks > MCE_EXTENDED_BANK) {
b4b3bd96 449 banks = MCE_EXTENDED_BANK;
8edc5cc5
VP
450 printk(KERN_INFO "MCE: warning: using only %d banks\n",
451 MCE_EXTENDED_BANK);
1da177e4 452 }
94ad8474
AK
453 /* Use accurate RIP reporting if available. */
454 if ((cap & (1<<9)) && ((cap >> 16) & 0xff) >= 9)
455 rip_msr = MSR_IA32_MCG_EIP;
1da177e4
LT
456
457 /* Log the machine checks left over from the previous reset.
458 This also clears all registers */
d5172f26 459 do_machine_check(NULL, mce_bootlog ? -1 : -2);
1da177e4
LT
460
461 set_in_cr4(X86_CR4_MCE);
462
463 if (cap & MCG_CTL_P)
464 wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
465
466 for (i = 0; i < banks; i++) {
2d144e63
VP
467 if (i < NR_SYSFS_BANKS)
468 wrmsrl(MSR_IA32_MC0_CTL+4*i, bank[i]);
469 else
470 wrmsrl(MSR_IA32_MC0_CTL+4*i, ~0UL);
471
1da177e4 472 wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0);
d88203d1 473 }
1da177e4
LT
474}
475
476/* Add per CPU specific workarounds here */
e6982c67 477static void __cpuinit mce_cpu_quirks(struct cpuinfo_x86 *c)
d88203d1 478{
1da177e4 479 /* This should be disabled by the BIOS, but isn't always */
911f6a7b
JB
480 if (c->x86_vendor == X86_VENDOR_AMD) {
481 if(c->x86 == 15)
482 /* disable GART TBL walk error reporting, which trips off
483 incorrectly with the IOMMU & 3ware & Cerberus. */
484 clear_bit(10, &bank[4]);
485 if(c->x86 <= 17 && mce_bootlog < 0)
486 /* Lots of broken BIOS around that don't clear them
487 by default and leave crap in there. Don't log. */
488 mce_bootlog = 0;
1da177e4 489 }
e583538f 490
d88203d1 491}
1da177e4 492
e6982c67 493static void __cpuinit mce_cpu_features(struct cpuinfo_x86 *c)
1da177e4
LT
494{
495 switch (c->x86_vendor) {
496 case X86_VENDOR_INTEL:
497 mce_intel_feature_init(c);
498 break;
89b831ef
JS
499 case X86_VENDOR_AMD:
500 mce_amd_feature_init(c);
501 break;
1da177e4
LT
502 default:
503 break;
504 }
505}
506
d88203d1 507/*
1da177e4 508 * Called for each booted CPU to set up machine checks.
d88203d1 509 * Must be called with preempt off.
1da177e4 510 */
e6982c67 511void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
1da177e4 512{
d88203d1 513 mce_cpu_quirks(c);
1da177e4
LT
514
515 if (mce_dont_init ||
1da177e4
LT
516 !mce_available(c))
517 return;
518
519 mce_init(NULL);
520 mce_cpu_features(c);
521}
522
523/*
524 * Character device to read and clear the MCE log.
525 */
526
f528e7ba
TH
527static DEFINE_SPINLOCK(mce_state_lock);
528static int open_count; /* #times opened */
529static int open_exclu; /* already open exclusive? */
530
531static int mce_open(struct inode *inode, struct file *file)
532{
38c4c97c 533 lock_kernel();
f528e7ba
TH
534 spin_lock(&mce_state_lock);
535
536 if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
537 spin_unlock(&mce_state_lock);
38c4c97c 538 unlock_kernel();
f528e7ba
TH
539 return -EBUSY;
540 }
541
542 if (file->f_flags & O_EXCL)
543 open_exclu = 1;
544 open_count++;
545
546 spin_unlock(&mce_state_lock);
38c4c97c 547 unlock_kernel();
f528e7ba 548
bd78432c 549 return nonseekable_open(inode, file);
f528e7ba
TH
550}
551
552static int mce_release(struct inode *inode, struct file *file)
553{
554 spin_lock(&mce_state_lock);
555
556 open_count--;
557 open_exclu = 0;
558
559 spin_unlock(&mce_state_lock);
560
561 return 0;
562}
563
d88203d1
TG
564static void collect_tscs(void *data)
565{
1da177e4 566 unsigned long *cpu_tsc = (unsigned long *)data;
d88203d1 567
1da177e4 568 rdtscll(cpu_tsc[smp_processor_id()]);
d88203d1 569}
1da177e4 570
d88203d1
TG
571static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize,
572 loff_t *off)
1da177e4 573{
f0de53bb 574 unsigned long *cpu_tsc;
8c8b8859 575 static DEFINE_MUTEX(mce_read_mutex);
1da177e4
LT
576 unsigned next;
577 char __user *buf = ubuf;
578 int i, err;
579
6bca67f9 580 cpu_tsc = kmalloc(nr_cpu_ids * sizeof(long), GFP_KERNEL);
f0de53bb
AK
581 if (!cpu_tsc)
582 return -ENOMEM;
583
8c8b8859 584 mutex_lock(&mce_read_mutex);
1da177e4
LT
585 next = rcu_dereference(mcelog.next);
586
587 /* Only supports full reads right now */
d88203d1 588 if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce)) {
8c8b8859 589 mutex_unlock(&mce_read_mutex);
f0de53bb 590 kfree(cpu_tsc);
1da177e4
LT
591 return -EINVAL;
592 }
593
594 err = 0;
d88203d1 595 for (i = 0; i < next; i++) {
673242c1 596 unsigned long start = jiffies;
d88203d1 597
673242c1 598 while (!mcelog.entry[i].finished) {
4f84e4be 599 if (time_after_eq(jiffies, start + 2)) {
673242c1 600 memset(mcelog.entry + i,0, sizeof(struct mce));
4f84e4be 601 goto timeout;
673242c1
AK
602 }
603 cpu_relax();
604 }
1da177e4
LT
605 smp_rmb();
606 err |= copy_to_user(buf, mcelog.entry + i, sizeof(struct mce));
d88203d1 607 buf += sizeof(struct mce);
4f84e4be
JW
608 timeout:
609 ;
d88203d1 610 }
1da177e4
LT
611
612 memset(mcelog.entry, 0, next * sizeof(struct mce));
613 mcelog.next = 0;
614
b2b18660 615 synchronize_sched();
1da177e4 616
d88203d1
TG
617 /*
618 * Collect entries that were still getting written before the
619 * synchronize.
620 */
15c8b6c1 621 on_each_cpu(collect_tscs, cpu_tsc, 1);
d88203d1
TG
622 for (i = next; i < MCE_LOG_LEN; i++) {
623 if (mcelog.entry[i].finished &&
624 mcelog.entry[i].tsc < cpu_tsc[mcelog.entry[i].cpu]) {
625 err |= copy_to_user(buf, mcelog.entry+i,
626 sizeof(struct mce));
1da177e4
LT
627 smp_rmb();
628 buf += sizeof(struct mce);
629 memset(&mcelog.entry[i], 0, sizeof(struct mce));
630 }
d88203d1 631 }
8c8b8859 632 mutex_unlock(&mce_read_mutex);
f0de53bb 633 kfree(cpu_tsc);
d88203d1 634 return err ? -EFAULT : buf - ubuf;
1da177e4
LT
635}
636
e02e68d3
TH
637static unsigned int mce_poll(struct file *file, poll_table *wait)
638{
639 poll_wait(file, &mce_wait, wait);
640 if (rcu_dereference(mcelog.next))
641 return POLLIN | POLLRDNORM;
642 return 0;
643}
644
c68461b6 645static long mce_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
1da177e4
LT
646{
647 int __user *p = (int __user *)arg;
d88203d1 648
1da177e4 649 if (!capable(CAP_SYS_ADMIN))
d88203d1 650 return -EPERM;
1da177e4 651 switch (cmd) {
d88203d1 652 case MCE_GET_RECORD_LEN:
1da177e4
LT
653 return put_user(sizeof(struct mce), p);
654 case MCE_GET_LOG_LEN:
d88203d1 655 return put_user(MCE_LOG_LEN, p);
1da177e4
LT
656 case MCE_GETCLEAR_FLAGS: {
657 unsigned flags;
d88203d1
TG
658
659 do {
1da177e4 660 flags = mcelog.flags;
d88203d1
TG
661 } while (cmpxchg(&mcelog.flags, flags, 0) != flags);
662 return put_user(flags, p);
1da177e4
LT
663 }
664 default:
d88203d1
TG
665 return -ENOTTY;
666 }
1da177e4
LT
667}
668
5dfe4c96 669static const struct file_operations mce_chrdev_ops = {
f528e7ba
TH
670 .open = mce_open,
671 .release = mce_release,
1da177e4 672 .read = mce_read,
e02e68d3 673 .poll = mce_poll,
c68461b6 674 .unlocked_ioctl = mce_ioctl,
1da177e4
LT
675};
676
677static struct miscdevice mce_log_device = {
678 MISC_MCELOG_MINOR,
679 "mcelog",
680 &mce_chrdev_ops,
681};
682
8f4e956b
AK
683static unsigned long old_cr4 __initdata;
684
685void __init stop_mce(void)
686{
687 old_cr4 = read_cr4();
688 clear_in_cr4(X86_CR4_MCE);
689}
690
691void __init restart_mce(void)
692{
693 if (old_cr4 & X86_CR4_MCE)
694 set_in_cr4(X86_CR4_MCE);
695}
696
d88203d1
TG
697/*
698 * Old style boot options parsing. Only for compatibility.
1da177e4 699 */
1da177e4
LT
700static int __init mcheck_disable(char *str)
701{
702 mce_dont_init = 1;
9b41046c 703 return 1;
1da177e4
LT
704}
705
676b1855 706/* mce=off disables machine check. Note you can re-enable it later
d5172f26 707 using sysfs.
8c566ef5 708 mce=TOLERANCELEVEL (number, see above)
e583538f
AK
709 mce=bootlog Log MCEs from before booting. Disabled by default on AMD.
710 mce=nobootlog Don't log MCEs from before booting. */
1da177e4
LT
711static int __init mcheck_enable(char *str)
712{
713 if (!strcmp(str, "off"))
714 mce_dont_init = 1;
e583538f
AK
715 else if (!strcmp(str, "bootlog") || !strcmp(str,"nobootlog"))
716 mce_bootlog = str[0] == 'b';
8c566ef5
AK
717 else if (isdigit(str[0]))
718 get_option(&str, &tolerant);
1da177e4 719 else
d88203d1 720 printk("mce= argument %s ignored. Please use /sys", str);
9b41046c 721 return 1;
1da177e4
LT
722}
723
724__setup("nomce", mcheck_disable);
909dd324 725__setup("mce=", mcheck_enable);
1da177e4 726
d88203d1 727/*
1da177e4 728 * Sysfs support
d88203d1 729 */
1da177e4 730
413588c7
AK
731/* On resume clear all MCE state. Don't want to see leftovers from the BIOS.
732 Only one CPU is active at this time, the others get readded later using
733 CPU hotplug. */
1da177e4
LT
734static int mce_resume(struct sys_device *dev)
735{
413588c7 736 mce_init(NULL);
6ec68bff 737 mce_cpu_features(&current_cpu_data);
1da177e4
LT
738 return 0;
739}
740
741/* Reinit MCEs after user configuration changes */
d88203d1
TG
742static void mce_restart(void)
743{
8a336b0a 744 if (next_interval)
1da177e4
LT
745 cancel_delayed_work(&mcheck_work);
746 /* Timer race is harmless here */
15c8b6c1 747 on_each_cpu(mce_init, NULL, 1);
8a336b0a
TH
748 next_interval = check_interval * HZ;
749 if (next_interval)
22293e58
VP
750 schedule_delayed_work(&mcheck_work,
751 round_jiffies_relative(next_interval));
1da177e4
LT
752}
753
754static struct sysdev_class mce_sysclass = {
755 .resume = mce_resume,
af5ca3f4 756 .name = "machinecheck",
1da177e4
LT
757};
758
fff2e89f 759DEFINE_PER_CPU(struct sys_device, device_mce);
8735728e 760void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu) __cpuinitdata;
1da177e4
LT
761
762/* Why are there no generic functions for this? */
763#define ACCESSOR(name, var, start) \
4a0b2b4d
AK
764 static ssize_t show_ ## name(struct sys_device *s, \
765 struct sysdev_attribute *attr, \
766 char *buf) { \
d88203d1
TG
767 return sprintf(buf, "%lx\n", (unsigned long)var); \
768 } \
4a0b2b4d
AK
769 static ssize_t set_ ## name(struct sys_device *s, \
770 struct sysdev_attribute *attr, \
771 const char *buf, size_t siz) { \
d88203d1
TG
772 char *end; \
773 unsigned long new = simple_strtoul(buf, &end, 0); \
774 if (end == buf) return -EINVAL; \
775 var = new; \
776 start; \
777 return end-buf; \
778 } \
1da177e4
LT
779 static SYSDEV_ATTR(name, 0644, show_ ## name, set_ ## name);
780
8edc5cc5
VP
781/*
782 * TBD should generate these dynamically based on number of available banks.
783 * Have only 6 contol banks in /sysfs until then.
784 */
1da177e4
LT
785ACCESSOR(bank0ctl,bank[0],mce_restart())
786ACCESSOR(bank1ctl,bank[1],mce_restart())
787ACCESSOR(bank2ctl,bank[2],mce_restart())
788ACCESSOR(bank3ctl,bank[3],mce_restart())
789ACCESSOR(bank4ctl,bank[4],mce_restart())
73ca5358 790ACCESSOR(bank5ctl,bank[5],mce_restart())
a98f0dd3 791
4a0b2b4d
AK
792static ssize_t show_trigger(struct sys_device *s, struct sysdev_attribute *attr,
793 char *buf)
a98f0dd3
AK
794{
795 strcpy(buf, trigger);
796 strcat(buf, "\n");
797 return strlen(trigger) + 1;
798}
799
4a0b2b4d
AK
800static ssize_t set_trigger(struct sys_device *s, struct sysdev_attribute *attr,
801 const char *buf,size_t siz)
a98f0dd3
AK
802{
803 char *p;
804 int len;
805 strncpy(trigger, buf, sizeof(trigger));
806 trigger[sizeof(trigger)-1] = 0;
807 len = strlen(trigger);
808 p = strchr(trigger, '\n');
809 if (*p) *p = 0;
810 return len;
811}
812
813static SYSDEV_ATTR(trigger, 0644, show_trigger, set_trigger);
d95d62c0 814static SYSDEV_INT_ATTR(tolerant, 0644, tolerant);
1da177e4 815ACCESSOR(check_interval,check_interval,mce_restart())
a98f0dd3
AK
816static struct sysdev_attribute *mce_attributes[] = {
817 &attr_bank0ctl, &attr_bank1ctl, &attr_bank2ctl,
818 &attr_bank3ctl, &attr_bank4ctl, &attr_bank5ctl,
d95d62c0 819 &attr_tolerant.attr, &attr_check_interval, &attr_trigger,
a98f0dd3
AK
820 NULL
821};
1da177e4 822
bae19fe0
AH
823static cpumask_t mce_device_initialized = CPU_MASK_NONE;
824
91c6d400
AK
825/* Per cpu sysdev init. All of the cpus still share the same ctl bank */
826static __cpuinit int mce_create_device(unsigned int cpu)
1da177e4
LT
827{
828 int err;
73ca5358 829 int i;
92cb7612 830
90367556 831 if (!mce_available(&boot_cpu_data))
91c6d400
AK
832 return -EIO;
833
d435d862 834 memset(&per_cpu(device_mce, cpu).kobj, 0, sizeof(struct kobject));
91c6d400
AK
835 per_cpu(device_mce,cpu).id = cpu;
836 per_cpu(device_mce,cpu).cls = &mce_sysclass;
837
838 err = sysdev_register(&per_cpu(device_mce,cpu));
d435d862
AM
839 if (err)
840 return err;
841
842 for (i = 0; mce_attributes[i]; i++) {
843 err = sysdev_create_file(&per_cpu(device_mce,cpu),
844 mce_attributes[i]);
845 if (err)
846 goto error;
847 }
bae19fe0 848 cpu_set(cpu, mce_device_initialized);
91c6d400 849
d435d862
AM
850 return 0;
851error:
852 while (i--) {
853 sysdev_remove_file(&per_cpu(device_mce,cpu),
854 mce_attributes[i]);
91c6d400 855 }
d435d862
AM
856 sysdev_unregister(&per_cpu(device_mce,cpu));
857
91c6d400
AK
858 return err;
859}
860
2d9cd6c2 861static __cpuinit void mce_remove_device(unsigned int cpu)
91c6d400 862{
73ca5358
SL
863 int i;
864
bae19fe0
AH
865 if (!cpu_isset(cpu, mce_device_initialized))
866 return;
867
a98f0dd3 868 for (i = 0; mce_attributes[i]; i++)
73ca5358 869 sysdev_remove_file(&per_cpu(device_mce,cpu),
a98f0dd3 870 mce_attributes[i]);
91c6d400 871 sysdev_unregister(&per_cpu(device_mce,cpu));
bae19fe0 872 cpu_clear(cpu, mce_device_initialized);
91c6d400 873}
91c6d400
AK
874
875/* Get notified when a cpu comes on/off. Be hotplug friendly. */
1e35669d
SR
876static int __cpuinit mce_cpu_callback(struct notifier_block *nfb,
877 unsigned long action, void *hcpu)
91c6d400
AK
878{
879 unsigned int cpu = (unsigned long)hcpu;
880
881 switch (action) {
bae19fe0
AH
882 case CPU_ONLINE:
883 case CPU_ONLINE_FROZEN:
884 mce_create_device(cpu);
8735728e
RW
885 if (threshold_cpu_callback)
886 threshold_cpu_callback(action, cpu);
91c6d400 887 break;
91c6d400 888 case CPU_DEAD:
8bb78442 889 case CPU_DEAD_FROZEN:
8735728e
RW
890 if (threshold_cpu_callback)
891 threshold_cpu_callback(action, cpu);
91c6d400
AK
892 mce_remove_device(cpu);
893 break;
91c6d400 894 }
bae19fe0 895 return NOTIFY_OK;
91c6d400
AK
896}
897
1e35669d 898static struct notifier_block mce_cpu_notifier __cpuinitdata = {
91c6d400
AK
899 .notifier_call = mce_cpu_callback,
900};
901
902static __init int mce_init_device(void)
903{
904 int err;
905 int i = 0;
906
1da177e4
LT
907 if (!mce_available(&boot_cpu_data))
908 return -EIO;
909 err = sysdev_class_register(&mce_sysclass);
d435d862
AM
910 if (err)
911 return err;
91c6d400
AK
912
913 for_each_online_cpu(i) {
d435d862
AM
914 err = mce_create_device(i);
915 if (err)
916 return err;
91c6d400
AK
917 }
918
be6b5a35 919 register_hotcpu_notifier(&mce_cpu_notifier);
1da177e4
LT
920 misc_register(&mce_log_device);
921 return err;
1da177e4 922}
91c6d400 923
1da177e4 924device_initcall(mce_init_device);