2 * arch/sparc64/mm/fault.c: Page fault handlers for the 64-bit Sparc.
4 * Copyright (C) 1996, 2008 David S. Miller (davem@davemloft.net)
5 * Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz)
10 #include <linux/string.h>
11 #include <linux/types.h>
12 #include <linux/sched.h>
13 #include <linux/ptrace.h>
14 #include <linux/mman.h>
15 #include <linux/signal.h>
17 #include <linux/module.h>
18 #include <linux/init.h>
19 #include <linux/perf_event.h>
20 #include <linux/interrupt.h>
21 #include <linux/kprobes.h>
22 #include <linux/kdebug.h>
23 #include <linux/percpu.h>
24 #include <linux/context_tracking.h>
25 #include <linux/slab.h>
26 #include <linux/pagemap.h>
27 #include <linux/compiler.h>
30 #include <asm/pgtable.h>
31 #include <asm/openprom.h>
32 #include <asm/oplib.h>
33 #include <asm/uaccess.h>
36 #include <asm/sections.h>
37 #include <asm/mmu_context.h>
38 #include <asm/setup.h>
40 int show_unhandled_signals
= 1;
42 static inline __kprobes
int notify_page_fault(struct pt_regs
*regs
)
46 /* kprobe_running() needs smp_processor_id() */
47 if (kprobes_built_in() && !user_mode(regs
)) {
49 if (kprobe_running() && kprobe_fault_handler(regs
, 0))
56 static void __kprobes
unhandled_fault(unsigned long address
,
57 struct task_struct
*tsk
,
60 if ((unsigned long) address
< PAGE_SIZE
) {
61 printk(KERN_ALERT
"Unable to handle kernel NULL "
62 "pointer dereference\n");
64 printk(KERN_ALERT
"Unable to handle kernel paging request "
65 "at virtual address %016lx\n", (unsigned long)address
);
67 printk(KERN_ALERT
"tsk->{mm,active_mm}->context = %016lx\n",
69 CTX_HWBITS(tsk
->mm
->context
) :
70 CTX_HWBITS(tsk
->active_mm
->context
)));
71 printk(KERN_ALERT
"tsk->{mm,active_mm}->pgd = %016lx\n",
72 (tsk
->mm
? (unsigned long) tsk
->mm
->pgd
:
73 (unsigned long) tsk
->active_mm
->pgd
));
74 die_if_kernel("Oops", regs
);
77 static void __kprobes
bad_kernel_pc(struct pt_regs
*regs
, unsigned long vaddr
)
79 printk(KERN_CRIT
"OOPS: Bogus kernel PC [%016lx] in fault handler\n",
81 printk(KERN_CRIT
"OOPS: RPC [%016lx]\n", regs
->u_regs
[15]);
82 printk("OOPS: RPC <%pA>\n", (void *) regs
->u_regs
[15]);
83 printk(KERN_CRIT
"OOPS: Fault was to vaddr[%lx]\n", vaddr
);
85 unhandled_fault(regs
->tpc
, current
, regs
);
89 * We now make sure that mmap_sem is held in all paths that call
90 * this. Additionally, to prevent kswapd from ripping ptes from
91 * under us, raise interrupts around the time that we look at the
92 * pte, kswapd will have to wait to get his smp ipi response from
93 * us. vmtruncate likewise. This saves us having to get pte lock.
95 static unsigned int get_user_insn(unsigned long tpc
)
97 pgd_t
*pgdp
= pgd_offset(current
->mm
, tpc
);
104 if (pgd_none(*pgdp
) || unlikely(pgd_bad(*pgdp
)))
106 pudp
= pud_offset(pgdp
, tpc
);
107 if (pud_none(*pudp
) || unlikely(pud_bad(*pudp
)))
110 /* This disables preemption for us as well. */
113 pmdp
= pmd_offset(pudp
, tpc
);
114 if (pmd_none(*pmdp
) || unlikely(pmd_bad(*pmdp
)))
117 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
118 if (pmd_trans_huge(*pmdp
)) {
119 if (pmd_trans_splitting(*pmdp
))
122 pa
= pmd_pfn(*pmdp
) << PAGE_SHIFT
;
123 pa
+= tpc
& ~HPAGE_MASK
;
125 /* Use phys bypass so we don't pollute dtlb/dcache. */
126 __asm__
__volatile__("lduwa [%1] %2, %0"
128 : "r" (pa
), "i" (ASI_PHYS_USE_EC
));
132 ptep
= pte_offset_map(pmdp
, tpc
);
134 if (pte_present(pte
)) {
135 pa
= (pte_pfn(pte
) << PAGE_SHIFT
);
136 pa
+= (tpc
& ~PAGE_MASK
);
138 /* Use phys bypass so we don't pollute dtlb/dcache. */
139 __asm__
__volatile__("lduwa [%1] %2, %0"
141 : "r" (pa
), "i" (ASI_PHYS_USE_EC
));
152 show_signal_msg(struct pt_regs
*regs
, int sig
, int code
,
153 unsigned long address
, struct task_struct
*tsk
)
155 if (!unhandled_signal(tsk
, sig
))
158 if (!printk_ratelimit())
161 printk("%s%s[%d]: segfault at %lx ip %p (rpc %p) sp %p error %x",
162 task_pid_nr(tsk
) > 1 ? KERN_INFO
: KERN_EMERG
,
163 tsk
->comm
, task_pid_nr(tsk
), address
,
164 (void *)regs
->tpc
, (void *)regs
->u_regs
[UREG_I7
],
165 (void *)regs
->u_regs
[UREG_FP
], code
);
167 print_vma_addr(KERN_CONT
" in ", regs
->tpc
);
169 printk(KERN_CONT
"\n");
172 static void do_fault_siginfo(int code
, int sig
, struct pt_regs
*regs
,
173 unsigned long fault_addr
, unsigned int insn
,
182 if (fault_code
& FAULT_CODE_ITLB
) {
185 /* If we were able to probe the faulting instruction, use it
186 * to compute a precise fault address. Otherwise use the fault
187 * time provided address which may only have page granularity.
190 addr
= compute_effective_address(regs
, insn
, 0);
194 info
.si_addr
= (void __user
*) addr
;
197 if (unlikely(show_unhandled_signals
))
198 show_signal_msg(regs
, sig
, code
, addr
, current
);
200 force_sig_info(sig
, &info
, current
);
203 static unsigned int get_fault_insn(struct pt_regs
*regs
, unsigned int insn
)
206 if (!regs
->tpc
|| (regs
->tpc
& 0x3))
208 if (regs
->tstate
& TSTATE_PRIV
) {
209 insn
= *(unsigned int *) regs
->tpc
;
211 insn
= get_user_insn(regs
->tpc
);
217 static void __kprobes
do_kernel_fault(struct pt_regs
*regs
, int si_code
,
218 int fault_code
, unsigned int insn
,
219 unsigned long address
)
221 unsigned char asi
= ASI_P
;
223 if ((!insn
) && (regs
->tstate
& TSTATE_PRIV
))
226 /* If user insn could be read (thus insn is zero), that
227 * is fine. We will just gun down the process with a signal
231 if (!(fault_code
& (FAULT_CODE_WRITE
|FAULT_CODE_ITLB
)) &&
232 (insn
& 0xc0800000) == 0xc0800000) {
234 asi
= (regs
->tstate
>> 24);
237 if ((asi
& 0xf2) == 0x82) {
238 if (insn
& 0x1000000) {
239 handle_ldf_stq(insn
, regs
);
241 /* This was a non-faulting load. Just clear the
242 * destination register(s) and continue with the next
245 handle_ld_nf(insn
, regs
);
251 /* Is this in ex_table? */
252 if (regs
->tstate
& TSTATE_PRIV
) {
253 const struct exception_table_entry
*entry
;
255 entry
= search_exception_tables(regs
->tpc
);
257 regs
->tpc
= entry
->fixup
;
258 regs
->tnpc
= regs
->tpc
+ 4;
262 /* The si_code was set to make clear whether
263 * this was a SEGV_MAPERR or SEGV_ACCERR fault.
265 do_fault_siginfo(si_code
, SIGSEGV
, regs
, address
, insn
, fault_code
);
270 unhandled_fault (address
, current
, regs
);
273 static void noinline __kprobes
bogus_32bit_fault_tpc(struct pt_regs
*regs
)
278 printk(KERN_ERR
"FAULT[%s:%d]: 32-bit process reports "
279 "64-bit TPC [%lx]\n",
280 current
->comm
, current
->pid
,
285 #ifdef CONFIG_PAX_PAGEEXEC
286 #ifdef CONFIG_PAX_DLRESOLVE
287 static void pax_emuplt_close(struct vm_area_struct
*vma
)
289 vma
->vm_mm
->call_dl_resolve
= 0UL;
292 static int pax_emuplt_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
296 vmf
->page
= alloc_page(GFP_HIGHUSER
);
300 kaddr
= kmap(vmf
->page
);
301 memset(kaddr
, 0, PAGE_SIZE
);
302 kaddr
[0] = 0x9DE3BFA8U
; /* save */
303 flush_dcache_page(vmf
->page
);
305 return VM_FAULT_MAJOR
;
308 static const struct vm_operations_struct pax_vm_ops
= {
309 .close
= pax_emuplt_close
,
310 .fault
= pax_emuplt_fault
313 static int pax_insert_vma(struct vm_area_struct
*vma
, unsigned long addr
)
317 INIT_LIST_HEAD(&vma
->anon_vma_chain
);
318 vma
->vm_mm
= current
->mm
;
319 vma
->vm_start
= addr
;
320 vma
->vm_end
= addr
+ PAGE_SIZE
;
321 vma
->vm_flags
= VM_READ
| VM_EXEC
| VM_MAYREAD
| VM_MAYEXEC
;
322 vma
->vm_page_prot
= vm_get_page_prot(vma
->vm_flags
);
323 vma
->vm_ops
= &pax_vm_ops
;
325 ret
= insert_vm_struct(current
->mm
, vma
);
329 ++current
->mm
->total_vm
;
335 * PaX: decide what to do with offenders (regs->tpc = fault address)
337 * returns 1 when task should be killed
338 * 2 when patched PLT trampoline was detected
339 * 3 when unpatched PLT trampoline was detected
341 static int pax_handle_fetch_fault(struct pt_regs
*regs
)
344 #ifdef CONFIG_PAX_EMUPLT
347 do { /* PaX: patched PLT emulation #1 */
348 unsigned int sethi1
, sethi2
, jmpl
;
350 err
= get_user(sethi1
, (unsigned int *)regs
->tpc
);
351 err
|= get_user(sethi2
, (unsigned int *)(regs
->tpc
+4));
352 err
|= get_user(jmpl
, (unsigned int *)(regs
->tpc
+8));
357 if ((sethi1
& 0xFFC00000U
) == 0x03000000U
&&
358 (sethi2
& 0xFFC00000U
) == 0x03000000U
&&
359 (jmpl
& 0xFFFFE000U
) == 0x81C06000U
)
363 regs
->u_regs
[UREG_G1
] = (sethi2
& 0x003FFFFFU
) << 10;
364 addr
= regs
->u_regs
[UREG_G1
];
365 addr
+= (((jmpl
| 0xFFFFFFFFFFFFE000UL
) ^ 0x00001000UL
) + 0x00001000UL
);
367 if (test_thread_flag(TIF_32BIT
))
368 addr
&= 0xFFFFFFFFUL
;
376 do { /* PaX: patched PLT emulation #2 */
379 err
= get_user(ba
, (unsigned int *)regs
->tpc
);
384 if ((ba
& 0xFFC00000U
) == 0x30800000U
|| (ba
& 0xFFF80000U
) == 0x30480000U
) {
387 if ((ba
& 0xFFC00000U
) == 0x30800000U
)
388 addr
= regs
->tpc
+ ((((ba
| 0xFFFFFFFFFFC00000UL
) ^ 0x00200000UL
) + 0x00200000UL
) << 2);
390 addr
= regs
->tpc
+ ((((ba
| 0xFFFFFFFFFFF80000UL
) ^ 0x00040000UL
) + 0x00040000UL
) << 2);
392 if (test_thread_flag(TIF_32BIT
))
393 addr
&= 0xFFFFFFFFUL
;
401 do { /* PaX: patched PLT emulation #3 */
402 unsigned int sethi
, bajmpl
, nop
;
404 err
= get_user(sethi
, (unsigned int *)regs
->tpc
);
405 err
|= get_user(bajmpl
, (unsigned int *)(regs
->tpc
+4));
406 err
|= get_user(nop
, (unsigned int *)(regs
->tpc
+8));
411 if ((sethi
& 0xFFC00000U
) == 0x03000000U
&&
412 ((bajmpl
& 0xFFFFE000U
) == 0x81C06000U
|| (bajmpl
& 0xFFF80000U
) == 0x30480000U
) &&
417 addr
= (sethi
& 0x003FFFFFU
) << 10;
418 regs
->u_regs
[UREG_G1
] = addr
;
419 if ((bajmpl
& 0xFFFFE000U
) == 0x81C06000U
)
420 addr
+= (((bajmpl
| 0xFFFFFFFFFFFFE000UL
) ^ 0x00001000UL
) + 0x00001000UL
);
422 addr
= regs
->tpc
+ ((((bajmpl
| 0xFFFFFFFFFFF80000UL
) ^ 0x00040000UL
) + 0x00040000UL
) << 2);
424 if (test_thread_flag(TIF_32BIT
))
425 addr
&= 0xFFFFFFFFUL
;
433 do { /* PaX: patched PLT emulation #4 */
434 unsigned int sethi
, mov1
, call
, mov2
;
436 err
= get_user(sethi
, (unsigned int *)regs
->tpc
);
437 err
|= get_user(mov1
, (unsigned int *)(regs
->tpc
+4));
438 err
|= get_user(call
, (unsigned int *)(regs
->tpc
+8));
439 err
|= get_user(mov2
, (unsigned int *)(regs
->tpc
+12));
444 if ((sethi
& 0xFFC00000U
) == 0x03000000U
&&
445 mov1
== 0x8210000FU
&&
446 (call
& 0xC0000000U
) == 0x40000000U
&&
451 regs
->u_regs
[UREG_G1
] = regs
->u_regs
[UREG_RETPC
];
452 addr
= regs
->tpc
+ 4 + ((((call
| 0xFFFFFFFFC0000000UL
) ^ 0x20000000UL
) + 0x20000000UL
) << 2);
454 if (test_thread_flag(TIF_32BIT
))
455 addr
&= 0xFFFFFFFFUL
;
463 do { /* PaX: patched PLT emulation #5 */
464 unsigned int sethi
, sethi1
, sethi2
, or1
, or2
, sllx
, jmpl
, nop
;
466 err
= get_user(sethi
, (unsigned int *)regs
->tpc
);
467 err
|= get_user(sethi1
, (unsigned int *)(regs
->tpc
+4));
468 err
|= get_user(sethi2
, (unsigned int *)(regs
->tpc
+8));
469 err
|= get_user(or1
, (unsigned int *)(regs
->tpc
+12));
470 err
|= get_user(or2
, (unsigned int *)(regs
->tpc
+16));
471 err
|= get_user(sllx
, (unsigned int *)(regs
->tpc
+20));
472 err
|= get_user(jmpl
, (unsigned int *)(regs
->tpc
+24));
473 err
|= get_user(nop
, (unsigned int *)(regs
->tpc
+28));
478 if ((sethi
& 0xFFC00000U
) == 0x03000000U
&&
479 (sethi1
& 0xFFC00000U
) == 0x03000000U
&&
480 (sethi2
& 0xFFC00000U
) == 0x0B000000U
&&
481 (or1
& 0xFFFFE000U
) == 0x82106000U
&&
482 (or2
& 0xFFFFE000U
) == 0x8A116000U
&&
483 sllx
== 0x83287020U
&&
484 jmpl
== 0x81C04005U
&&
489 regs
->u_regs
[UREG_G1
] = ((sethi1
& 0x003FFFFFU
) << 10) | (or1
& 0x000003FFU
);
490 regs
->u_regs
[UREG_G1
] <<= 32;
491 regs
->u_regs
[UREG_G5
] = ((sethi2
& 0x003FFFFFU
) << 10) | (or2
& 0x000003FFU
);
492 addr
= regs
->u_regs
[UREG_G1
] + regs
->u_regs
[UREG_G5
];
499 do { /* PaX: patched PLT emulation #6 */
500 unsigned int sethi
, sethi1
, sethi2
, sllx
, or, jmpl
, nop
;
502 err
= get_user(sethi
, (unsigned int *)regs
->tpc
);
503 err
|= get_user(sethi1
, (unsigned int *)(regs
->tpc
+4));
504 err
|= get_user(sethi2
, (unsigned int *)(regs
->tpc
+8));
505 err
|= get_user(sllx
, (unsigned int *)(regs
->tpc
+12));
506 err
|= get_user(or, (unsigned int *)(regs
->tpc
+16));
507 err
|= get_user(jmpl
, (unsigned int *)(regs
->tpc
+20));
508 err
|= get_user(nop
, (unsigned int *)(regs
->tpc
+24));
513 if ((sethi
& 0xFFC00000U
) == 0x03000000U
&&
514 (sethi1
& 0xFFC00000U
) == 0x03000000U
&&
515 (sethi2
& 0xFFC00000U
) == 0x0B000000U
&&
516 sllx
== 0x83287020U
&&
517 (or & 0xFFFFE000U
) == 0x8A116000U
&&
518 jmpl
== 0x81C04005U
&&
523 regs
->u_regs
[UREG_G1
] = (sethi1
& 0x003FFFFFU
) << 10;
524 regs
->u_regs
[UREG_G1
] <<= 32;
525 regs
->u_regs
[UREG_G5
] = ((sethi2
& 0x003FFFFFU
) << 10) | (or & 0x3FFU
);
526 addr
= regs
->u_regs
[UREG_G1
] + regs
->u_regs
[UREG_G5
];
533 do { /* PaX: unpatched PLT emulation step 1 */
534 unsigned int sethi
, ba
, nop
;
536 err
= get_user(sethi
, (unsigned int *)regs
->tpc
);
537 err
|= get_user(ba
, (unsigned int *)(regs
->tpc
+4));
538 err
|= get_user(nop
, (unsigned int *)(regs
->tpc
+8));
543 if ((sethi
& 0xFFC00000U
) == 0x03000000U
&&
544 ((ba
& 0xFFC00000U
) == 0x30800000U
|| (ba
& 0xFFF80000U
) == 0x30680000U
) &&
548 unsigned int save
, call
;
549 unsigned int sethi1
, sethi2
, or1
, or2
, sllx
, add
, jmpl
;
551 if ((ba
& 0xFFC00000U
) == 0x30800000U
)
552 addr
= regs
->tpc
+ 4 + ((((ba
| 0xFFFFFFFFFFC00000UL
) ^ 0x00200000UL
) + 0x00200000UL
) << 2);
554 addr
= regs
->tpc
+ 4 + ((((ba
| 0xFFFFFFFFFFF80000UL
) ^ 0x00040000UL
) + 0x00040000UL
) << 2);
556 if (test_thread_flag(TIF_32BIT
))
557 addr
&= 0xFFFFFFFFUL
;
559 err
= get_user(save
, (unsigned int *)addr
);
560 err
|= get_user(call
, (unsigned int *)(addr
+4));
561 err
|= get_user(nop
, (unsigned int *)(addr
+8));
565 #ifdef CONFIG_PAX_DLRESOLVE
566 if (save
== 0x9DE3BFA8U
&&
567 (call
& 0xC0000000U
) == 0x40000000U
&&
570 struct vm_area_struct
*vma
;
571 unsigned long call_dl_resolve
;
573 down_read(¤t
->mm
->mmap_sem
);
574 call_dl_resolve
= current
->mm
->call_dl_resolve
;
575 up_read(¤t
->mm
->mmap_sem
);
576 if (likely(call_dl_resolve
))
579 vma
= kmem_cache_zalloc(vm_area_cachep
, GFP_KERNEL
);
581 down_write(¤t
->mm
->mmap_sem
);
582 if (current
->mm
->call_dl_resolve
) {
583 call_dl_resolve
= current
->mm
->call_dl_resolve
;
584 up_write(¤t
->mm
->mmap_sem
);
586 kmem_cache_free(vm_area_cachep
, vma
);
590 call_dl_resolve
= get_unmapped_area(NULL
, 0UL, PAGE_SIZE
, 0UL, MAP_PRIVATE
);
591 if (!vma
|| (call_dl_resolve
& ~PAGE_MASK
)) {
592 up_write(¤t
->mm
->mmap_sem
);
594 kmem_cache_free(vm_area_cachep
, vma
);
598 if (pax_insert_vma(vma
, call_dl_resolve
)) {
599 up_write(¤t
->mm
->mmap_sem
);
600 kmem_cache_free(vm_area_cachep
, vma
);
604 current
->mm
->call_dl_resolve
= call_dl_resolve
;
605 up_write(¤t
->mm
->mmap_sem
);
608 regs
->u_regs
[UREG_G1
] = (sethi
& 0x003FFFFFU
) << 10;
609 regs
->tpc
= call_dl_resolve
;
615 /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
616 if ((save
& 0xFFC00000U
) == 0x05000000U
&&
617 (call
& 0xFFFFE000U
) == 0x85C0A000U
&&
620 regs
->u_regs
[UREG_G1
] = (sethi
& 0x003FFFFFU
) << 10;
621 regs
->u_regs
[UREG_G2
] = addr
+ 4;
622 addr
= (save
& 0x003FFFFFU
) << 10;
623 addr
+= (((call
| 0xFFFFFFFFFFFFE000UL
) ^ 0x00001000UL
) + 0x00001000UL
);
625 if (test_thread_flag(TIF_32BIT
))
626 addr
&= 0xFFFFFFFFUL
;
633 /* PaX: 64-bit PLT stub */
634 err
= get_user(sethi1
, (unsigned int *)addr
);
635 err
|= get_user(sethi2
, (unsigned int *)(addr
+4));
636 err
|= get_user(or1
, (unsigned int *)(addr
+8));
637 err
|= get_user(or2
, (unsigned int *)(addr
+12));
638 err
|= get_user(sllx
, (unsigned int *)(addr
+16));
639 err
|= get_user(add
, (unsigned int *)(addr
+20));
640 err
|= get_user(jmpl
, (unsigned int *)(addr
+24));
641 err
|= get_user(nop
, (unsigned int *)(addr
+28));
645 if ((sethi1
& 0xFFC00000U
) == 0x09000000U
&&
646 (sethi2
& 0xFFC00000U
) == 0x0B000000U
&&
647 (or1
& 0xFFFFE000U
) == 0x88112000U
&&
648 (or2
& 0xFFFFE000U
) == 0x8A116000U
&&
649 sllx
== 0x89293020U
&&
650 add
== 0x8A010005U
&&
651 jmpl
== 0x89C14000U
&&
654 regs
->u_regs
[UREG_G1
] = (sethi
& 0x003FFFFFU
) << 10;
655 regs
->u_regs
[UREG_G4
] = ((sethi1
& 0x003FFFFFU
) << 10) | (or1
& 0x000003FFU
);
656 regs
->u_regs
[UREG_G4
] <<= 32;
657 regs
->u_regs
[UREG_G5
] = ((sethi2
& 0x003FFFFFU
) << 10) | (or2
& 0x000003FFU
);
658 regs
->u_regs
[UREG_G5
] += regs
->u_regs
[UREG_G4
];
659 regs
->u_regs
[UREG_G4
] = addr
+ 24;
660 addr
= regs
->u_regs
[UREG_G5
];
668 #ifdef CONFIG_PAX_DLRESOLVE
669 do { /* PaX: unpatched PLT emulation step 2 */
670 unsigned int save
, call
, nop
;
672 err
= get_user(save
, (unsigned int *)(regs
->tpc
-4));
673 err
|= get_user(call
, (unsigned int *)regs
->tpc
);
674 err
|= get_user(nop
, (unsigned int *)(regs
->tpc
+4));
678 if (save
== 0x9DE3BFA8U
&&
679 (call
& 0xC0000000U
) == 0x40000000U
&&
682 unsigned long dl_resolve
= regs
->tpc
+ ((((call
| 0xFFFFFFFFC0000000UL
) ^ 0x20000000UL
) + 0x20000000UL
) << 2);
684 if (test_thread_flag(TIF_32BIT
))
685 dl_resolve
&= 0xFFFFFFFFUL
;
687 regs
->u_regs
[UREG_RETPC
] = regs
->tpc
;
688 regs
->tpc
= dl_resolve
;
689 regs
->tnpc
= dl_resolve
+4;
695 do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
696 unsigned int sethi
, ba
, nop
;
698 err
= get_user(sethi
, (unsigned int *)regs
->tpc
);
699 err
|= get_user(ba
, (unsigned int *)(regs
->tpc
+4));
700 err
|= get_user(nop
, (unsigned int *)(regs
->tpc
+8));
705 if ((sethi
& 0xFFC00000U
) == 0x03000000U
&&
706 (ba
& 0xFFF00000U
) == 0x30600000U
&&
711 addr
= (sethi
& 0x003FFFFFU
) << 10;
712 regs
->u_regs
[UREG_G1
] = addr
;
713 addr
= regs
->tpc
+ ((((ba
| 0xFFFFFFFFFFF80000UL
) ^ 0x00040000UL
) + 0x00040000UL
) << 2);
715 if (test_thread_flag(TIF_32BIT
))
716 addr
&= 0xFFFFFFFFUL
;
729 void pax_report_insns(struct pt_regs
*regs
, void *pc
, void *sp
)
733 printk(KERN_ERR
"PAX: bytes at PC: ");
734 for (i
= 0; i
< 8; i
++) {
736 if (get_user(c
, (unsigned int *)pc
+i
))
737 printk(KERN_CONT
"???????? ");
739 printk(KERN_CONT
"%08x ", c
);
745 asmlinkage
void __kprobes
do_sparc64_fault(struct pt_regs
*regs
)
747 enum ctx_state prev_state
= exception_enter();
748 struct mm_struct
*mm
= current
->mm
;
749 struct vm_area_struct
*vma
;
750 unsigned int insn
= 0;
751 int si_code
, fault_code
, fault
;
752 unsigned long address
, mm_rss
;
753 unsigned int flags
= FAULT_FLAG_ALLOW_RETRY
| FAULT_FLAG_KILLABLE
;
755 fault_code
= get_thread_fault_code();
757 if (notify_page_fault(regs
))
760 si_code
= SEGV_MAPERR
;
761 address
= current_thread_info()->fault_address
;
763 if ((fault_code
& FAULT_CODE_ITLB
) &&
764 (fault_code
& FAULT_CODE_DTLB
))
767 if (test_thread_flag(TIF_32BIT
)) {
768 if (!(regs
->tstate
& TSTATE_PRIV
)) {
769 if (unlikely((regs
->tpc
>> 32) != 0)) {
770 bogus_32bit_fault_tpc(regs
);
774 if (unlikely((address
>> 32) != 0))
778 if (regs
->tstate
& TSTATE_PRIV
) {
779 unsigned long tpc
= regs
->tpc
;
781 /* Sanity check the PC. */
782 if ((tpc
>= KERNBASE
&& tpc
< (unsigned long) __init_end
) ||
783 (tpc
>= MODULES_VADDR
&& tpc
< MODULES_END
)) {
784 /* Valid, no problems... */
786 bad_kernel_pc(regs
, address
);
790 flags
|= FAULT_FLAG_USER
;
793 * If we're in an interrupt or have no user
794 * context, we must not take the fault..
796 if (in_atomic() || !mm
)
799 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS
, 1, regs
, address
);
801 if (!down_read_trylock(&mm
->mmap_sem
)) {
802 if ((regs
->tstate
& TSTATE_PRIV
) &&
803 !search_exception_tables(regs
->tpc
)) {
804 insn
= get_fault_insn(regs
, insn
);
805 goto handle_kernel_fault
;
809 down_read(&mm
->mmap_sem
);
812 if (fault_code
& FAULT_CODE_BAD_RA
)
815 vma
= find_vma(mm
, address
);
819 #ifdef CONFIG_PAX_PAGEEXEC
820 /* PaX: detect ITLB misses on non-exec pages */
821 if ((mm
->pax_flags
& MF_PAX_PAGEEXEC
) && vma
->vm_start
<= address
&&
822 !(vma
->vm_flags
& VM_EXEC
) && (fault_code
& FAULT_CODE_ITLB
))
824 if (address
!= regs
->tpc
)
827 up_read(&mm
->mmap_sem
);
828 switch (pax_handle_fetch_fault(regs
)) {
830 #ifdef CONFIG_PAX_EMUPLT
837 pax_report_fault(regs
, (void *)regs
->tpc
, (void *)(regs
->u_regs
[UREG_FP
] + STACK_BIAS
));
838 do_group_exit(SIGKILL
);
842 /* Pure DTLB misses do not tell us whether the fault causing
843 * load/store/atomic was a write or not, it only says that there
844 * was no match. So in such a case we (carefully) read the
845 * instruction to try and figure this out. It's an optimization
846 * so it's ok if we can't do this.
848 * Special hack, window spill/fill knows the exact fault type.
851 (FAULT_CODE_DTLB
| FAULT_CODE_WRITE
| FAULT_CODE_WINFIXUP
)) == FAULT_CODE_DTLB
) &&
852 (vma
->vm_flags
& VM_WRITE
) != 0) {
853 insn
= get_fault_insn(regs
, 0);
856 /* All loads, stores and atomics have bits 30 and 31 both set
857 * in the instruction. Bit 21 is set in all stores, but we
858 * have to avoid prefetches which also have bit 21 set.
860 if ((insn
& 0xc0200000) == 0xc0200000 &&
861 (insn
& 0x01780000) != 0x01680000) {
862 /* Don't bother updating thread struct value,
863 * because update_mmu_cache only cares which tlb
864 * the access came from.
866 fault_code
|= FAULT_CODE_WRITE
;
871 if (vma
->vm_start
<= address
)
873 if (!(vma
->vm_flags
& VM_GROWSDOWN
))
875 if (!(fault_code
& FAULT_CODE_WRITE
)) {
876 /* Non-faulting loads shouldn't expand stack. */
877 insn
= get_fault_insn(regs
, insn
);
878 if ((insn
& 0xc0800000) == 0xc0800000) {
882 asi
= (regs
->tstate
>> 24);
885 if ((asi
& 0xf2) == 0x82)
889 if (expand_stack(vma
, address
))
892 * Ok, we have a good vm_area for this memory access, so
896 si_code
= SEGV_ACCERR
;
898 /* If we took a ITLB miss on a non-executable page, catch
901 if ((fault_code
& FAULT_CODE_ITLB
) && !(vma
->vm_flags
& VM_EXEC
)) {
902 BUG_ON(address
!= regs
->tpc
);
903 BUG_ON(regs
->tstate
& TSTATE_PRIV
);
907 if (fault_code
& FAULT_CODE_WRITE
) {
908 if (!(vma
->vm_flags
& VM_WRITE
))
911 /* Spitfire has an icache which does not snoop
912 * processor stores. Later processors do...
914 if (tlb_type
== spitfire
&&
915 (vma
->vm_flags
& VM_EXEC
) != 0 &&
916 vma
->vm_file
!= NULL
)
917 set_thread_fault_code(fault_code
|
918 FAULT_CODE_BLKCOMMIT
);
920 flags
|= FAULT_FLAG_WRITE
;
922 /* Allow reads even for write-only mappings */
923 if (!(vma
->vm_flags
& (VM_READ
| VM_EXEC
)))
927 fault
= handle_mm_fault(mm
, vma
, address
, flags
);
929 if ((fault
& VM_FAULT_RETRY
) && fatal_signal_pending(current
))
932 if (unlikely(fault
& VM_FAULT_ERROR
)) {
933 if (fault
& VM_FAULT_OOM
)
935 else if (fault
& VM_FAULT_SIGSEGV
)
937 else if (fault
& VM_FAULT_SIGBUS
)
942 if (flags
& FAULT_FLAG_ALLOW_RETRY
) {
943 if (fault
& VM_FAULT_MAJOR
) {
945 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ
,
949 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN
,
952 if (fault
& VM_FAULT_RETRY
) {
953 flags
&= ~FAULT_FLAG_ALLOW_RETRY
;
954 flags
|= FAULT_FLAG_TRIED
;
956 /* No need to up_read(&mm->mmap_sem) as we would
957 * have already released it in __lock_page_or_retry
964 up_read(&mm
->mmap_sem
);
966 mm_rss
= get_mm_rss(mm
);
967 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
968 mm_rss
-= (mm
->context
.huge_pte_count
* (HPAGE_SIZE
/ PAGE_SIZE
));
970 if (unlikely(mm_rss
>
971 mm
->context
.tsb_block
[MM_TSB_BASE
].tsb_rss_limit
))
972 tsb_grow(mm
, MM_TSB_BASE
, mm_rss
);
973 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
974 mm_rss
= mm
->context
.huge_pte_count
;
975 if (unlikely(mm_rss
>
976 mm
->context
.tsb_block
[MM_TSB_HUGE
].tsb_rss_limit
)) {
977 if (mm
->context
.tsb_block
[MM_TSB_HUGE
].tsb
)
978 tsb_grow(mm
, MM_TSB_HUGE
, mm_rss
);
985 exception_exit(prev_state
);
989 * Something tried to access memory that isn't in our memory map..
990 * Fix it, but check if it's kernel or user first..
993 insn
= get_fault_insn(regs
, insn
);
994 up_read(&mm
->mmap_sem
);
997 do_kernel_fault(regs
, si_code
, fault_code
, insn
, address
);
1001 * We ran out of memory, or some other thing happened to us that made
1002 * us unable to handle the page fault gracefully.
1005 insn
= get_fault_insn(regs
, insn
);
1006 up_read(&mm
->mmap_sem
);
1007 if (!(regs
->tstate
& TSTATE_PRIV
)) {
1008 pagefault_out_of_memory();
1009 goto exit_exception
;
1011 goto handle_kernel_fault
;
1014 insn
= get_fault_insn(regs
, 0);
1015 goto handle_kernel_fault
;
1018 insn
= get_fault_insn(regs
, insn
);
1019 up_read(&mm
->mmap_sem
);
1022 * Send a sigbus, regardless of whether we were in kernel
1025 do_fault_siginfo(BUS_ADRERR
, SIGBUS
, regs
, address
, insn
, fault_code
);
1027 /* Kernel mode? Handle exceptions or die */
1028 if (regs
->tstate
& TSTATE_PRIV
)
1029 goto handle_kernel_fault
;