]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - arch/parisc/kernel/traps.c
Merge tag 'ceph-for-4.18-rc1' of git://github.com/ceph/ceph-client
[thirdparty/kernel/stable.git] / arch / parisc / kernel / traps.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/arch/parisc/traps.c
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 * Copyright (C) 1999, 2000 Philipp Rumpf <prumpf@tux.org>
7 */
8
9 /*
10 * 'Traps.c' handles hardware traps and faults after we have saved some
11 * state in 'asm.s'.
12 */
13
14 #include <linux/sched.h>
15 #include <linux/sched/debug.h>
16 #include <linux/kernel.h>
17 #include <linux/string.h>
18 #include <linux/errno.h>
19 #include <linux/ptrace.h>
20 #include <linux/timer.h>
21 #include <linux/delay.h>
22 #include <linux/mm.h>
23 #include <linux/module.h>
24 #include <linux/smp.h>
25 #include <linux/spinlock.h>
26 #include <linux/init.h>
27 #include <linux/interrupt.h>
28 #include <linux/console.h>
29 #include <linux/bug.h>
30 #include <linux/ratelimit.h>
31 #include <linux/uaccess.h>
32
33 #include <asm/assembly.h>
34 #include <asm/io.h>
35 #include <asm/irq.h>
36 #include <asm/traps.h>
37 #include <asm/unaligned.h>
38 #include <linux/atomic.h>
39 #include <asm/smp.h>
40 #include <asm/pdc.h>
41 #include <asm/pdc_chassis.h>
42 #include <asm/unwind.h>
43 #include <asm/tlbflush.h>
44 #include <asm/cacheflush.h>
45
46 #include "../math-emu/math-emu.h" /* for handle_fpe() */
47
48 static void parisc_show_stack(struct task_struct *task, unsigned long *sp,
49 struct pt_regs *regs);
50
51 static int printbinary(char *buf, unsigned long x, int nbits)
52 {
53 unsigned long mask = 1UL << (nbits - 1);
54 while (mask != 0) {
55 *buf++ = (mask & x ? '1' : '0');
56 mask >>= 1;
57 }
58 *buf = '\0';
59
60 return nbits;
61 }
62
63 #ifdef CONFIG_64BIT
64 #define RFMT "%016lx"
65 #else
66 #define RFMT "%08lx"
67 #endif
68 #define FFMT "%016llx" /* fpregs are 64-bit always */
69
70 #define PRINTREGS(lvl,r,f,fmt,x) \
71 printk("%s%s%02d-%02d " fmt " " fmt " " fmt " " fmt "\n", \
72 lvl, f, (x), (x+3), (r)[(x)+0], (r)[(x)+1], \
73 (r)[(x)+2], (r)[(x)+3])
74
75 static void print_gr(char *level, struct pt_regs *regs)
76 {
77 int i;
78 char buf[64];
79
80 printk("%s\n", level);
81 printk("%s YZrvWESTHLNXBCVMcbcbcbcbOGFRQPDI\n", level);
82 printbinary(buf, regs->gr[0], 32);
83 printk("%sPSW: %s %s\n", level, buf, print_tainted());
84
85 for (i = 0; i < 32; i += 4)
86 PRINTREGS(level, regs->gr, "r", RFMT, i);
87 }
88
89 static void print_fr(char *level, struct pt_regs *regs)
90 {
91 int i;
92 char buf[64];
93 struct { u32 sw[2]; } s;
94
95 /* FR are 64bit everywhere. Need to use asm to get the content
96 * of fpsr/fper1, and we assume that we won't have a FP Identify
97 * in our way, otherwise we're screwed.
98 * The fldd is used to restore the T-bit if there was one, as the
99 * store clears it anyway.
100 * PA2.0 book says "thou shall not use fstw on FPSR/FPERs" - T-Bone */
101 asm volatile ("fstd %%fr0,0(%1) \n\t"
102 "fldd 0(%1),%%fr0 \n\t"
103 : "=m" (s) : "r" (&s) : "r0");
104
105 printk("%s\n", level);
106 printk("%s VZOUICununcqcqcqcqcqcrmunTDVZOUI\n", level);
107 printbinary(buf, s.sw[0], 32);
108 printk("%sFPSR: %s\n", level, buf);
109 printk("%sFPER1: %08x\n", level, s.sw[1]);
110
111 /* here we'll print fr0 again, tho it'll be meaningless */
112 for (i = 0; i < 32; i += 4)
113 PRINTREGS(level, regs->fr, "fr", FFMT, i);
114 }
115
116 void show_regs(struct pt_regs *regs)
117 {
118 int i, user;
119 char *level;
120 unsigned long cr30, cr31;
121
122 user = user_mode(regs);
123 level = user ? KERN_DEBUG : KERN_CRIT;
124
125 show_regs_print_info(level);
126
127 print_gr(level, regs);
128
129 for (i = 0; i < 8; i += 4)
130 PRINTREGS(level, regs->sr, "sr", RFMT, i);
131
132 if (user)
133 print_fr(level, regs);
134
135 cr30 = mfctl(30);
136 cr31 = mfctl(31);
137 printk("%s\n", level);
138 printk("%sIASQ: " RFMT " " RFMT " IAOQ: " RFMT " " RFMT "\n",
139 level, regs->iasq[0], regs->iasq[1], regs->iaoq[0], regs->iaoq[1]);
140 printk("%s IIR: %08lx ISR: " RFMT " IOR: " RFMT "\n",
141 level, regs->iir, regs->isr, regs->ior);
142 printk("%s CPU: %8d CR30: " RFMT " CR31: " RFMT "\n",
143 level, current_thread_info()->cpu, cr30, cr31);
144 printk("%s ORIG_R28: " RFMT "\n", level, regs->orig_r28);
145
146 if (user) {
147 printk("%s IAOQ[0]: " RFMT "\n", level, regs->iaoq[0]);
148 printk("%s IAOQ[1]: " RFMT "\n", level, regs->iaoq[1]);
149 printk("%s RP(r2): " RFMT "\n", level, regs->gr[2]);
150 } else {
151 printk("%s IAOQ[0]: %pS\n", level, (void *) regs->iaoq[0]);
152 printk("%s IAOQ[1]: %pS\n", level, (void *) regs->iaoq[1]);
153 printk("%s RP(r2): %pS\n", level, (void *) regs->gr[2]);
154
155 parisc_show_stack(current, NULL, regs);
156 }
157 }
158
159 static DEFINE_RATELIMIT_STATE(_hppa_rs,
160 DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);
161
162 #define parisc_printk_ratelimited(critical, regs, fmt, ...) { \
163 if ((critical || show_unhandled_signals) && __ratelimit(&_hppa_rs)) { \
164 printk(fmt, ##__VA_ARGS__); \
165 show_regs(regs); \
166 } \
167 }
168
169
170 static void do_show_stack(struct unwind_frame_info *info)
171 {
172 int i = 1;
173
174 printk(KERN_CRIT "Backtrace:\n");
175 while (i <= 16) {
176 if (unwind_once(info) < 0 || info->ip == 0)
177 break;
178
179 if (__kernel_text_address(info->ip)) {
180 printk(KERN_CRIT " [<" RFMT ">] %pS\n",
181 info->ip, (void *) info->ip);
182 i++;
183 }
184 }
185 printk(KERN_CRIT "\n");
186 }
187
188 static void parisc_show_stack(struct task_struct *task, unsigned long *sp,
189 struct pt_regs *regs)
190 {
191 struct unwind_frame_info info;
192 struct task_struct *t;
193
194 t = task ? task : current;
195 if (regs) {
196 unwind_frame_init(&info, t, regs);
197 goto show_stack;
198 }
199
200 if (t == current) {
201 unsigned long sp;
202
203 HERE:
204 asm volatile ("copy %%r30, %0" : "=r"(sp));
205 {
206 struct pt_regs r;
207
208 memset(&r, 0, sizeof(struct pt_regs));
209 r.iaoq[0] = (unsigned long)&&HERE;
210 r.gr[2] = (unsigned long)__builtin_return_address(0);
211 r.gr[30] = sp;
212
213 unwind_frame_init(&info, current, &r);
214 }
215 } else {
216 unwind_frame_init_from_blocked_task(&info, t);
217 }
218
219 show_stack:
220 do_show_stack(&info);
221 }
222
223 void show_stack(struct task_struct *t, unsigned long *sp)
224 {
225 return parisc_show_stack(t, sp, NULL);
226 }
227
228 int is_valid_bugaddr(unsigned long iaoq)
229 {
230 return 1;
231 }
232
233 void die_if_kernel(char *str, struct pt_regs *regs, long err)
234 {
235 if (user_mode(regs)) {
236 if (err == 0)
237 return; /* STFU */
238
239 parisc_printk_ratelimited(1, regs,
240 KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n",
241 current->comm, task_pid_nr(current), str, err, regs->iaoq[0]);
242
243 return;
244 }
245
246 oops_in_progress = 1;
247
248 oops_enter();
249
250 /* Amuse the user in a SPARC fashion */
251 if (err) printk(KERN_CRIT
252 " _______________________________ \n"
253 " < Your System ate a SPARC! Gah! >\n"
254 " ------------------------------- \n"
255 " \\ ^__^\n"
256 " (__)\\ )\\/\\\n"
257 " U ||----w |\n"
258 " || ||\n");
259
260 /* unlock the pdc lock if necessary */
261 pdc_emergency_unlock();
262
263 /* maybe the kernel hasn't booted very far yet and hasn't been able
264 * to initialize the serial or STI console. In that case we should
265 * re-enable the pdc console, so that the user will be able to
266 * identify the problem. */
267 if (!console_drivers)
268 pdc_console_restart();
269
270 if (err)
271 printk(KERN_CRIT "%s (pid %d): %s (code %ld)\n",
272 current->comm, task_pid_nr(current), str, err);
273
274 /* Wot's wrong wif bein' racy? */
275 if (current->thread.flags & PARISC_KERNEL_DEATH) {
276 printk(KERN_CRIT "%s() recursion detected.\n", __func__);
277 local_irq_enable();
278 while (1);
279 }
280 current->thread.flags |= PARISC_KERNEL_DEATH;
281
282 show_regs(regs);
283 dump_stack();
284 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
285
286 if (in_interrupt())
287 panic("Fatal exception in interrupt");
288
289 if (panic_on_oops)
290 panic("Fatal exception");
291
292 oops_exit();
293 do_exit(SIGSEGV);
294 }
295
296 /* gdb uses break 4,8 */
297 #define GDB_BREAK_INSN 0x10004
298 static void handle_gdb_break(struct pt_regs *regs, int wot)
299 {
300 force_sig_fault(SIGTRAP, wot,
301 (void __user *) (regs->iaoq[0] & ~3), current);
302 }
303
304 static void handle_break(struct pt_regs *regs)
305 {
306 unsigned iir = regs->iir;
307
308 if (unlikely(iir == PARISC_BUG_BREAK_INSN && !user_mode(regs))) {
309 /* check if a BUG() or WARN() trapped here. */
310 enum bug_trap_type tt;
311 tt = report_bug(regs->iaoq[0] & ~3, regs);
312 if (tt == BUG_TRAP_TYPE_WARN) {
313 regs->iaoq[0] += 4;
314 regs->iaoq[1] += 4;
315 return; /* return to next instruction when WARN_ON(). */
316 }
317 die_if_kernel("Unknown kernel breakpoint", regs,
318 (tt == BUG_TRAP_TYPE_NONE) ? 9 : 0);
319 }
320
321 if (unlikely(iir != GDB_BREAK_INSN))
322 parisc_printk_ratelimited(0, regs,
323 KERN_DEBUG "break %d,%d: pid=%d command='%s'\n",
324 iir & 31, (iir>>13) & ((1<<13)-1),
325 task_pid_nr(current), current->comm);
326
327 /* send standard GDB signal */
328 handle_gdb_break(regs, TRAP_BRKPT);
329 }
330
331 static void default_trap(int code, struct pt_regs *regs)
332 {
333 printk(KERN_ERR "Trap %d on CPU %d\n", code, smp_processor_id());
334 show_regs(regs);
335 }
336
337 void (*cpu_lpmc) (int code, struct pt_regs *regs) __read_mostly = default_trap;
338
339
340 void transfer_pim_to_trap_frame(struct pt_regs *regs)
341 {
342 register int i;
343 extern unsigned int hpmc_pim_data[];
344 struct pdc_hpmc_pim_11 *pim_narrow;
345 struct pdc_hpmc_pim_20 *pim_wide;
346
347 if (boot_cpu_data.cpu_type >= pcxu) {
348
349 pim_wide = (struct pdc_hpmc_pim_20 *)hpmc_pim_data;
350
351 /*
352 * Note: The following code will probably generate a
353 * bunch of truncation error warnings from the compiler.
354 * Could be handled with an ifdef, but perhaps there
355 * is a better way.
356 */
357
358 regs->gr[0] = pim_wide->cr[22];
359
360 for (i = 1; i < 32; i++)
361 regs->gr[i] = pim_wide->gr[i];
362
363 for (i = 0; i < 32; i++)
364 regs->fr[i] = pim_wide->fr[i];
365
366 for (i = 0; i < 8; i++)
367 regs->sr[i] = pim_wide->sr[i];
368
369 regs->iasq[0] = pim_wide->cr[17];
370 regs->iasq[1] = pim_wide->iasq_back;
371 regs->iaoq[0] = pim_wide->cr[18];
372 regs->iaoq[1] = pim_wide->iaoq_back;
373
374 regs->sar = pim_wide->cr[11];
375 regs->iir = pim_wide->cr[19];
376 regs->isr = pim_wide->cr[20];
377 regs->ior = pim_wide->cr[21];
378 }
379 else {
380 pim_narrow = (struct pdc_hpmc_pim_11 *)hpmc_pim_data;
381
382 regs->gr[0] = pim_narrow->cr[22];
383
384 for (i = 1; i < 32; i++)
385 regs->gr[i] = pim_narrow->gr[i];
386
387 for (i = 0; i < 32; i++)
388 regs->fr[i] = pim_narrow->fr[i];
389
390 for (i = 0; i < 8; i++)
391 regs->sr[i] = pim_narrow->sr[i];
392
393 regs->iasq[0] = pim_narrow->cr[17];
394 regs->iasq[1] = pim_narrow->iasq_back;
395 regs->iaoq[0] = pim_narrow->cr[18];
396 regs->iaoq[1] = pim_narrow->iaoq_back;
397
398 regs->sar = pim_narrow->cr[11];
399 regs->iir = pim_narrow->cr[19];
400 regs->isr = pim_narrow->cr[20];
401 regs->ior = pim_narrow->cr[21];
402 }
403
404 /*
405 * The following fields only have meaning if we came through
406 * another path. So just zero them here.
407 */
408
409 regs->ksp = 0;
410 regs->kpc = 0;
411 regs->orig_r28 = 0;
412 }
413
414
415 /*
416 * This routine is called as a last resort when everything else
417 * has gone clearly wrong. We get called for faults in kernel space,
418 * and HPMC's.
419 */
420 void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long offset)
421 {
422 static DEFINE_SPINLOCK(terminate_lock);
423
424 oops_in_progress = 1;
425
426 set_eiem(0);
427 local_irq_disable();
428 spin_lock(&terminate_lock);
429
430 /* unlock the pdc lock if necessary */
431 pdc_emergency_unlock();
432
433 /* restart pdc console if necessary */
434 if (!console_drivers)
435 pdc_console_restart();
436
437 /* Not all paths will gutter the processor... */
438 switch(code){
439
440 case 1:
441 transfer_pim_to_trap_frame(regs);
442 break;
443
444 default:
445 /* Fall through */
446 break;
447
448 }
449
450 {
451 /* show_stack(NULL, (unsigned long *)regs->gr[30]); */
452 struct unwind_frame_info info;
453 unwind_frame_init(&info, current, regs);
454 do_show_stack(&info);
455 }
456
457 printk("\n");
458 pr_crit("%s: Code=%d (%s) regs=%p (Addr=" RFMT ")\n",
459 msg, code, trap_name(code), regs, offset);
460 show_regs(regs);
461
462 spin_unlock(&terminate_lock);
463
464 /* put soft power button back under hardware control;
465 * if the user had pressed it once at any time, the
466 * system will shut down immediately right here. */
467 pdc_soft_power_button(0);
468
469 /* Call kernel panic() so reboot timeouts work properly
470 * FIXME: This function should be on the list of
471 * panic notifiers, and we should call panic
472 * directly from the location that we wish.
473 * e.g. We should not call panic from
474 * parisc_terminate, but rather the oter way around.
475 * This hack works, prints the panic message twice,
476 * and it enables reboot timers!
477 */
478 panic(msg);
479 }
480
481 void notrace handle_interruption(int code, struct pt_regs *regs)
482 {
483 unsigned long fault_address = 0;
484 unsigned long fault_space = 0;
485 int si_code;
486
487 if (code == 1)
488 pdc_console_restart(); /* switch back to pdc if HPMC */
489 else
490 local_irq_enable();
491
492 /* Security check:
493 * If the priority level is still user, and the
494 * faulting space is not equal to the active space
495 * then the user is attempting something in a space
496 * that does not belong to them. Kill the process.
497 *
498 * This is normally the situation when the user
499 * attempts to jump into the kernel space at the
500 * wrong offset, be it at the gateway page or a
501 * random location.
502 *
503 * We cannot normally signal the process because it
504 * could *be* on the gateway page, and processes
505 * executing on the gateway page can't have signals
506 * delivered.
507 *
508 * We merely readjust the address into the users
509 * space, at a destination address of zero, and
510 * allow processing to continue.
511 */
512 if (((unsigned long)regs->iaoq[0] & 3) &&
513 ((unsigned long)regs->iasq[0] != (unsigned long)regs->sr[7])) {
514 /* Kill the user process later */
515 regs->iaoq[0] = 0 | 3;
516 regs->iaoq[1] = regs->iaoq[0] + 4;
517 regs->iasq[0] = regs->iasq[1] = regs->sr[7];
518 regs->gr[0] &= ~PSW_B;
519 return;
520 }
521
522 #if 0
523 printk(KERN_CRIT "Interruption # %d\n", code);
524 #endif
525
526 switch(code) {
527
528 case 1:
529 /* High-priority machine check (HPMC) */
530
531 /* set up a new led state on systems shipped with a LED State panel */
532 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_HPMC);
533
534 parisc_terminate("High Priority Machine Check (HPMC)",
535 regs, code, 0);
536 /* NOT REACHED */
537
538 case 2:
539 /* Power failure interrupt */
540 printk(KERN_CRIT "Power failure interrupt !\n");
541 return;
542
543 case 3:
544 /* Recovery counter trap */
545 regs->gr[0] &= ~PSW_R;
546 if (user_space(regs))
547 handle_gdb_break(regs, TRAP_TRACE);
548 /* else this must be the start of a syscall - just let it run */
549 return;
550
551 case 5:
552 /* Low-priority machine check */
553 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_LPMC);
554
555 flush_cache_all();
556 flush_tlb_all();
557 cpu_lpmc(5, regs);
558 return;
559
560 case 6:
561 /* Instruction TLB miss fault/Instruction page fault */
562 fault_address = regs->iaoq[0];
563 fault_space = regs->iasq[0];
564 break;
565
566 case 8:
567 /* Illegal instruction trap */
568 die_if_kernel("Illegal instruction", regs, code);
569 si_code = ILL_ILLOPC;
570 goto give_sigill;
571
572 case 9:
573 /* Break instruction trap */
574 handle_break(regs);
575 return;
576
577 case 10:
578 /* Privileged operation trap */
579 die_if_kernel("Privileged operation", regs, code);
580 si_code = ILL_PRVOPC;
581 goto give_sigill;
582
583 case 11:
584 /* Privileged register trap */
585 if ((regs->iir & 0xffdfffe0) == 0x034008a0) {
586
587 /* This is a MFCTL cr26/cr27 to gr instruction.
588 * PCXS traps on this, so we need to emulate it.
589 */
590
591 if (regs->iir & 0x00200000)
592 regs->gr[regs->iir & 0x1f] = mfctl(27);
593 else
594 regs->gr[regs->iir & 0x1f] = mfctl(26);
595
596 regs->iaoq[0] = regs->iaoq[1];
597 regs->iaoq[1] += 4;
598 regs->iasq[0] = regs->iasq[1];
599 return;
600 }
601
602 die_if_kernel("Privileged register usage", regs, code);
603 si_code = ILL_PRVREG;
604 give_sigill:
605 force_sig_fault(SIGILL, si_code,
606 (void __user *) regs->iaoq[0], current);
607 return;
608
609 case 12:
610 /* Overflow Trap, let the userland signal handler do the cleanup */
611 force_sig_fault(SIGFPE, FPE_INTOVF,
612 (void __user *) regs->iaoq[0], current);
613 return;
614
615 case 13:
616 /* Conditional Trap
617 The condition succeeds in an instruction which traps
618 on condition */
619 if(user_mode(regs)){
620 /* Let userspace app figure it out from the insn pointed
621 * to by si_addr.
622 */
623 force_sig_fault(SIGFPE, FPE_CONDTRAP,
624 (void __user *) regs->iaoq[0], current);
625 return;
626 }
627 /* The kernel doesn't want to handle condition codes */
628 break;
629
630 case 14:
631 /* Assist Exception Trap, i.e. floating point exception. */
632 die_if_kernel("Floating point exception", regs, 0); /* quiet */
633 __inc_irq_stat(irq_fpassist_count);
634 handle_fpe(regs);
635 return;
636
637 case 15:
638 /* Data TLB miss fault/Data page fault */
639 /* Fall through */
640 case 16:
641 /* Non-access instruction TLB miss fault */
642 /* The instruction TLB entry needed for the target address of the FIC
643 is absent, and hardware can't find it, so we get to cleanup */
644 /* Fall through */
645 case 17:
646 /* Non-access data TLB miss fault/Non-access data page fault */
647 /* FIXME:
648 Still need to add slow path emulation code here!
649 If the insn used a non-shadow register, then the tlb
650 handlers could not have their side-effect (e.g. probe
651 writing to a target register) emulated since rfir would
652 erase the changes to said register. Instead we have to
653 setup everything, call this function we are in, and emulate
654 by hand. Technically we need to emulate:
655 fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw
656 */
657 fault_address = regs->ior;
658 fault_space = regs->isr;
659 break;
660
661 case 18:
662 /* PCXS only -- later cpu's split this into types 26,27 & 28 */
663 /* Check for unaligned access */
664 if (check_unaligned(regs)) {
665 handle_unaligned(regs);
666 return;
667 }
668 /* Fall Through */
669 case 26:
670 /* PCXL: Data memory access rights trap */
671 fault_address = regs->ior;
672 fault_space = regs->isr;
673 break;
674
675 case 19:
676 /* Data memory break trap */
677 regs->gr[0] |= PSW_X; /* So we can single-step over the trap */
678 /* fall thru */
679 case 21:
680 /* Page reference trap */
681 handle_gdb_break(regs, TRAP_HWBKPT);
682 return;
683
684 case 25:
685 /* Taken branch trap */
686 regs->gr[0] &= ~PSW_T;
687 if (user_space(regs))
688 handle_gdb_break(regs, TRAP_BRANCH);
689 /* else this must be the start of a syscall - just let it
690 * run.
691 */
692 return;
693
694 case 7:
695 /* Instruction access rights */
696 /* PCXL: Instruction memory protection trap */
697
698 /*
699 * This could be caused by either: 1) a process attempting
700 * to execute within a vma that does not have execute
701 * permission, or 2) an access rights violation caused by a
702 * flush only translation set up by ptep_get_and_clear().
703 * So we check the vma permissions to differentiate the two.
704 * If the vma indicates we have execute permission, then
705 * the cause is the latter one. In this case, we need to
706 * call do_page_fault() to fix the problem.
707 */
708
709 if (user_mode(regs)) {
710 struct vm_area_struct *vma;
711
712 down_read(&current->mm->mmap_sem);
713 vma = find_vma(current->mm,regs->iaoq[0]);
714 if (vma && (regs->iaoq[0] >= vma->vm_start)
715 && (vma->vm_flags & VM_EXEC)) {
716
717 fault_address = regs->iaoq[0];
718 fault_space = regs->iasq[0];
719
720 up_read(&current->mm->mmap_sem);
721 break; /* call do_page_fault() */
722 }
723 up_read(&current->mm->mmap_sem);
724 }
725 /* Fall Through */
726 case 27:
727 /* Data memory protection ID trap */
728 if (code == 27 && !user_mode(regs) &&
729 fixup_exception(regs))
730 return;
731
732 die_if_kernel("Protection id trap", regs, code);
733 force_sig_fault(SIGSEGV, SEGV_MAPERR,
734 (code == 7)?
735 ((void __user *) regs->iaoq[0]) :
736 ((void __user *) regs->ior), current);
737 return;
738
739 case 28:
740 /* Unaligned data reference trap */
741 handle_unaligned(regs);
742 return;
743
744 default:
745 if (user_mode(regs)) {
746 parisc_printk_ratelimited(0, regs, KERN_DEBUG
747 "handle_interruption() pid=%d command='%s'\n",
748 task_pid_nr(current), current->comm);
749 /* SIGBUS, for lack of a better one. */
750 force_sig_fault(SIGBUS, BUS_OBJERR,
751 (void __user *)regs->ior, current);
752 return;
753 }
754 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
755
756 parisc_terminate("Unexpected interruption", regs, code, 0);
757 /* NOT REACHED */
758 }
759
760 if (user_mode(regs)) {
761 if ((fault_space >> SPACEID_SHIFT) != (regs->sr[7] >> SPACEID_SHIFT)) {
762 parisc_printk_ratelimited(0, regs, KERN_DEBUG
763 "User fault %d on space 0x%08lx, pid=%d command='%s'\n",
764 code, fault_space,
765 task_pid_nr(current), current->comm);
766 force_sig_fault(SIGSEGV, SEGV_MAPERR,
767 (void __user *)regs->ior, current);
768 return;
769 }
770 }
771 else {
772
773 /*
774 * The kernel should never fault on its own address space,
775 * unless pagefault_disable() was called before.
776 */
777
778 if (fault_space == 0 && !faulthandler_disabled())
779 {
780 /* Clean up and return if in exception table. */
781 if (fixup_exception(regs))
782 return;
783 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
784 parisc_terminate("Kernel Fault", regs, code, fault_address);
785 }
786 }
787
788 do_page_fault(regs, code, fault_address);
789 }
790
791
792 void __init initialize_ivt(const void *iva)
793 {
794 extern u32 os_hpmc_size;
795 extern const u32 os_hpmc[];
796
797 int i;
798 u32 check = 0;
799 u32 *ivap;
800 u32 *hpmcp;
801 u32 length, instr;
802
803 if (strcmp((const char *)iva, "cows can fly"))
804 panic("IVT invalid");
805
806 ivap = (u32 *)iva;
807
808 for (i = 0; i < 8; i++)
809 *ivap++ = 0;
810
811 /*
812 * Use PDC_INSTR firmware function to get instruction that invokes
813 * PDCE_CHECK in HPMC handler. See programming note at page 1-31 of
814 * the PA 1.1 Firmware Architecture document.
815 */
816 if (pdc_instr(&instr) == PDC_OK)
817 ivap[0] = instr;
818
819 /*
820 * Rules for the checksum of the HPMC handler:
821 * 1. The IVA does not point to PDC/PDH space (ie: the OS has installed
822 * its own IVA).
823 * 2. The word at IVA + 32 is nonzero.
824 * 3. If Length (IVA + 60) is not zero, then Length (IVA + 60) and
825 * Address (IVA + 56) are word-aligned.
826 * 4. The checksum of the 8 words starting at IVA + 32 plus the sum of
827 * the Length/4 words starting at Address is zero.
828 */
829
830 /* Compute Checksum for HPMC handler */
831 length = os_hpmc_size;
832 ivap[7] = length;
833
834 hpmcp = (u32 *)os_hpmc;
835
836 for (i=0; i<length/4; i++)
837 check += *hpmcp++;
838
839 for (i=0; i<8; i++)
840 check += ivap[i];
841
842 ivap[5] = -check;
843 }
844
845
846 /* early_trap_init() is called before we set up kernel mappings and
847 * write-protect the kernel */
848 void __init early_trap_init(void)
849 {
850 extern const void fault_vector_20;
851
852 #ifndef CONFIG_64BIT
853 extern const void fault_vector_11;
854 initialize_ivt(&fault_vector_11);
855 #endif
856
857 initialize_ivt(&fault_vector_20);
858 }
859
860 void __init trap_init(void)
861 {
862 }