]> git.ipfire.org Git - people/ms/linux.git/blob - arch/sparc/mm/fault_64.c
Importing "grsecurity-3.1-3.19.2-201503201903.patch"
[people/ms/linux.git] / arch / sparc / mm / fault_64.c
1 /*
2 * arch/sparc64/mm/fault.c: Page fault handlers for the 64-bit Sparc.
3 *
4 * Copyright (C) 1996, 2008 David S. Miller (davem@davemloft.net)
5 * Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz)
6 */
7
8 #include <asm/head.h>
9
10 #include <linux/string.h>
11 #include <linux/types.h>
12 #include <linux/sched.h>
13 #include <linux/ptrace.h>
14 #include <linux/mman.h>
15 #include <linux/signal.h>
16 #include <linux/mm.h>
17 #include <linux/module.h>
18 #include <linux/init.h>
19 #include <linux/perf_event.h>
20 #include <linux/interrupt.h>
21 #include <linux/kprobes.h>
22 #include <linux/kdebug.h>
23 #include <linux/percpu.h>
24 #include <linux/context_tracking.h>
25 #include <linux/slab.h>
26 #include <linux/pagemap.h>
27 #include <linux/compiler.h>
28
29 #include <asm/page.h>
30 #include <asm/pgtable.h>
31 #include <asm/openprom.h>
32 #include <asm/oplib.h>
33 #include <asm/uaccess.h>
34 #include <asm/asi.h>
35 #include <asm/lsu.h>
36 #include <asm/sections.h>
37 #include <asm/mmu_context.h>
38 #include <asm/setup.h>
39
40 int show_unhandled_signals = 1;
41
42 static inline __kprobes int notify_page_fault(struct pt_regs *regs)
43 {
44 int ret = 0;
45
46 /* kprobe_running() needs smp_processor_id() */
47 if (kprobes_built_in() && !user_mode(regs)) {
48 preempt_disable();
49 if (kprobe_running() && kprobe_fault_handler(regs, 0))
50 ret = 1;
51 preempt_enable();
52 }
53 return ret;
54 }
55
56 static void __kprobes unhandled_fault(unsigned long address,
57 struct task_struct *tsk,
58 struct pt_regs *regs)
59 {
60 if ((unsigned long) address < PAGE_SIZE) {
61 printk(KERN_ALERT "Unable to handle kernel NULL "
62 "pointer dereference\n");
63 } else {
64 printk(KERN_ALERT "Unable to handle kernel paging request "
65 "at virtual address %016lx\n", (unsigned long)address);
66 }
67 printk(KERN_ALERT "tsk->{mm,active_mm}->context = %016lx\n",
68 (tsk->mm ?
69 CTX_HWBITS(tsk->mm->context) :
70 CTX_HWBITS(tsk->active_mm->context)));
71 printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %016lx\n",
72 (tsk->mm ? (unsigned long) tsk->mm->pgd :
73 (unsigned long) tsk->active_mm->pgd));
74 die_if_kernel("Oops", regs);
75 }
76
77 static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
78 {
79 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
80 regs->tpc);
81 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
82 printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
83 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
84 dump_stack();
85 unhandled_fault(regs->tpc, current, regs);
86 }
87
88 /*
89 * We now make sure that mmap_sem is held in all paths that call
90 * this. Additionally, to prevent kswapd from ripping ptes from
91 * under us, raise interrupts around the time that we look at the
92 * pte, kswapd will have to wait to get his smp ipi response from
93 * us. vmtruncate likewise. This saves us having to get pte lock.
94 */
95 static unsigned int get_user_insn(unsigned long tpc)
96 {
97 pgd_t *pgdp = pgd_offset(current->mm, tpc);
98 pud_t *pudp;
99 pmd_t *pmdp;
100 pte_t *ptep, pte;
101 unsigned long pa;
102 u32 insn = 0;
103
104 if (pgd_none(*pgdp) || unlikely(pgd_bad(*pgdp)))
105 goto out;
106 pudp = pud_offset(pgdp, tpc);
107 if (pud_none(*pudp) || unlikely(pud_bad(*pudp)))
108 goto out;
109
110 /* This disables preemption for us as well. */
111 local_irq_disable();
112
113 pmdp = pmd_offset(pudp, tpc);
114 if (pmd_none(*pmdp) || unlikely(pmd_bad(*pmdp)))
115 goto out_irq_enable;
116
117 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
118 if (pmd_trans_huge(*pmdp)) {
119 if (pmd_trans_splitting(*pmdp))
120 goto out_irq_enable;
121
122 pa = pmd_pfn(*pmdp) << PAGE_SHIFT;
123 pa += tpc & ~HPAGE_MASK;
124
125 /* Use phys bypass so we don't pollute dtlb/dcache. */
126 __asm__ __volatile__("lduwa [%1] %2, %0"
127 : "=r" (insn)
128 : "r" (pa), "i" (ASI_PHYS_USE_EC));
129 } else
130 #endif
131 {
132 ptep = pte_offset_map(pmdp, tpc);
133 pte = *ptep;
134 if (pte_present(pte)) {
135 pa = (pte_pfn(pte) << PAGE_SHIFT);
136 pa += (tpc & ~PAGE_MASK);
137
138 /* Use phys bypass so we don't pollute dtlb/dcache. */
139 __asm__ __volatile__("lduwa [%1] %2, %0"
140 : "=r" (insn)
141 : "r" (pa), "i" (ASI_PHYS_USE_EC));
142 }
143 pte_unmap(ptep);
144 }
145 out_irq_enable:
146 local_irq_enable();
147 out:
148 return insn;
149 }
150
151 static inline void
152 show_signal_msg(struct pt_regs *regs, int sig, int code,
153 unsigned long address, struct task_struct *tsk)
154 {
155 if (!unhandled_signal(tsk, sig))
156 return;
157
158 if (!printk_ratelimit())
159 return;
160
161 printk("%s%s[%d]: segfault at %lx ip %p (rpc %p) sp %p error %x",
162 task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
163 tsk->comm, task_pid_nr(tsk), address,
164 (void *)regs->tpc, (void *)regs->u_regs[UREG_I7],
165 (void *)regs->u_regs[UREG_FP], code);
166
167 print_vma_addr(KERN_CONT " in ", regs->tpc);
168
169 printk(KERN_CONT "\n");
170 }
171
172 static void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
173 unsigned long fault_addr, unsigned int insn,
174 int fault_code)
175 {
176 unsigned long addr;
177 siginfo_t info;
178
179 info.si_code = code;
180 info.si_signo = sig;
181 info.si_errno = 0;
182 if (fault_code & FAULT_CODE_ITLB) {
183 addr = regs->tpc;
184 } else {
185 /* If we were able to probe the faulting instruction, use it
186 * to compute a precise fault address. Otherwise use the fault
187 * time provided address which may only have page granularity.
188 */
189 if (insn)
190 addr = compute_effective_address(regs, insn, 0);
191 else
192 addr = fault_addr;
193 }
194 info.si_addr = (void __user *) addr;
195 info.si_trapno = 0;
196
197 if (unlikely(show_unhandled_signals))
198 show_signal_msg(regs, sig, code, addr, current);
199
200 force_sig_info(sig, &info, current);
201 }
202
203 static unsigned int get_fault_insn(struct pt_regs *regs, unsigned int insn)
204 {
205 if (!insn) {
206 if (!regs->tpc || (regs->tpc & 0x3))
207 return 0;
208 if (regs->tstate & TSTATE_PRIV) {
209 insn = *(unsigned int *) regs->tpc;
210 } else {
211 insn = get_user_insn(regs->tpc);
212 }
213 }
214 return insn;
215 }
216
217 static void __kprobes do_kernel_fault(struct pt_regs *regs, int si_code,
218 int fault_code, unsigned int insn,
219 unsigned long address)
220 {
221 unsigned char asi = ASI_P;
222
223 if ((!insn) && (regs->tstate & TSTATE_PRIV))
224 goto cannot_handle;
225
226 /* If user insn could be read (thus insn is zero), that
227 * is fine. We will just gun down the process with a signal
228 * in that case.
229 */
230
231 if (!(fault_code & (FAULT_CODE_WRITE|FAULT_CODE_ITLB)) &&
232 (insn & 0xc0800000) == 0xc0800000) {
233 if (insn & 0x2000)
234 asi = (regs->tstate >> 24);
235 else
236 asi = (insn >> 5);
237 if ((asi & 0xf2) == 0x82) {
238 if (insn & 0x1000000) {
239 handle_ldf_stq(insn, regs);
240 } else {
241 /* This was a non-faulting load. Just clear the
242 * destination register(s) and continue with the next
243 * instruction. -jj
244 */
245 handle_ld_nf(insn, regs);
246 }
247 return;
248 }
249 }
250
251 /* Is this in ex_table? */
252 if (regs->tstate & TSTATE_PRIV) {
253 const struct exception_table_entry *entry;
254
255 entry = search_exception_tables(regs->tpc);
256 if (entry) {
257 regs->tpc = entry->fixup;
258 regs->tnpc = regs->tpc + 4;
259 return;
260 }
261 } else {
262 /* The si_code was set to make clear whether
263 * this was a SEGV_MAPERR or SEGV_ACCERR fault.
264 */
265 do_fault_siginfo(si_code, SIGSEGV, regs, address, insn, fault_code);
266 return;
267 }
268
269 cannot_handle:
270 unhandled_fault (address, current, regs);
271 }
272
273 static void noinline __kprobes bogus_32bit_fault_tpc(struct pt_regs *regs)
274 {
275 static int times;
276
277 if (times++ < 10)
278 printk(KERN_ERR "FAULT[%s:%d]: 32-bit process reports "
279 "64-bit TPC [%lx]\n",
280 current->comm, current->pid,
281 regs->tpc);
282 show_regs(regs);
283 }
284
285 #ifdef CONFIG_PAX_PAGEEXEC
286 #ifdef CONFIG_PAX_DLRESOLVE
287 static void pax_emuplt_close(struct vm_area_struct *vma)
288 {
289 vma->vm_mm->call_dl_resolve = 0UL;
290 }
291
292 static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
293 {
294 unsigned int *kaddr;
295
296 vmf->page = alloc_page(GFP_HIGHUSER);
297 if (!vmf->page)
298 return VM_FAULT_OOM;
299
300 kaddr = kmap(vmf->page);
301 memset(kaddr, 0, PAGE_SIZE);
302 kaddr[0] = 0x9DE3BFA8U; /* save */
303 flush_dcache_page(vmf->page);
304 kunmap(vmf->page);
305 return VM_FAULT_MAJOR;
306 }
307
308 static const struct vm_operations_struct pax_vm_ops = {
309 .close = pax_emuplt_close,
310 .fault = pax_emuplt_fault
311 };
312
313 static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
314 {
315 int ret;
316
317 INIT_LIST_HEAD(&vma->anon_vma_chain);
318 vma->vm_mm = current->mm;
319 vma->vm_start = addr;
320 vma->vm_end = addr + PAGE_SIZE;
321 vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
322 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
323 vma->vm_ops = &pax_vm_ops;
324
325 ret = insert_vm_struct(current->mm, vma);
326 if (ret)
327 return ret;
328
329 ++current->mm->total_vm;
330 return 0;
331 }
332 #endif
333
334 /*
335 * PaX: decide what to do with offenders (regs->tpc = fault address)
336 *
337 * returns 1 when task should be killed
338 * 2 when patched PLT trampoline was detected
339 * 3 when unpatched PLT trampoline was detected
340 */
341 static int pax_handle_fetch_fault(struct pt_regs *regs)
342 {
343
344 #ifdef CONFIG_PAX_EMUPLT
345 int err;
346
347 do { /* PaX: patched PLT emulation #1 */
348 unsigned int sethi1, sethi2, jmpl;
349
350 err = get_user(sethi1, (unsigned int *)regs->tpc);
351 err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
352 err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
353
354 if (err)
355 break;
356
357 if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
358 (sethi2 & 0xFFC00000U) == 0x03000000U &&
359 (jmpl & 0xFFFFE000U) == 0x81C06000U)
360 {
361 unsigned long addr;
362
363 regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
364 addr = regs->u_regs[UREG_G1];
365 addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
366
367 if (test_thread_flag(TIF_32BIT))
368 addr &= 0xFFFFFFFFUL;
369
370 regs->tpc = addr;
371 regs->tnpc = addr+4;
372 return 2;
373 }
374 } while (0);
375
376 do { /* PaX: patched PLT emulation #2 */
377 unsigned int ba;
378
379 err = get_user(ba, (unsigned int *)regs->tpc);
380
381 if (err)
382 break;
383
384 if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
385 unsigned long addr;
386
387 if ((ba & 0xFFC00000U) == 0x30800000U)
388 addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
389 else
390 addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
391
392 if (test_thread_flag(TIF_32BIT))
393 addr &= 0xFFFFFFFFUL;
394
395 regs->tpc = addr;
396 regs->tnpc = addr+4;
397 return 2;
398 }
399 } while (0);
400
401 do { /* PaX: patched PLT emulation #3 */
402 unsigned int sethi, bajmpl, nop;
403
404 err = get_user(sethi, (unsigned int *)regs->tpc);
405 err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4));
406 err |= get_user(nop, (unsigned int *)(regs->tpc+8));
407
408 if (err)
409 break;
410
411 if ((sethi & 0xFFC00000U) == 0x03000000U &&
412 ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
413 nop == 0x01000000U)
414 {
415 unsigned long addr;
416
417 addr = (sethi & 0x003FFFFFU) << 10;
418 regs->u_regs[UREG_G1] = addr;
419 if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
420 addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
421 else
422 addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
423
424 if (test_thread_flag(TIF_32BIT))
425 addr &= 0xFFFFFFFFUL;
426
427 regs->tpc = addr;
428 regs->tnpc = addr+4;
429 return 2;
430 }
431 } while (0);
432
433 do { /* PaX: patched PLT emulation #4 */
434 unsigned int sethi, mov1, call, mov2;
435
436 err = get_user(sethi, (unsigned int *)regs->tpc);
437 err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
438 err |= get_user(call, (unsigned int *)(regs->tpc+8));
439 err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
440
441 if (err)
442 break;
443
444 if ((sethi & 0xFFC00000U) == 0x03000000U &&
445 mov1 == 0x8210000FU &&
446 (call & 0xC0000000U) == 0x40000000U &&
447 mov2 == 0x9E100001U)
448 {
449 unsigned long addr;
450
451 regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
452 addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
453
454 if (test_thread_flag(TIF_32BIT))
455 addr &= 0xFFFFFFFFUL;
456
457 regs->tpc = addr;
458 regs->tnpc = addr+4;
459 return 2;
460 }
461 } while (0);
462
463 do { /* PaX: patched PLT emulation #5 */
464 unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
465
466 err = get_user(sethi, (unsigned int *)regs->tpc);
467 err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
468 err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
469 err |= get_user(or1, (unsigned int *)(regs->tpc+12));
470 err |= get_user(or2, (unsigned int *)(regs->tpc+16));
471 err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
472 err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
473 err |= get_user(nop, (unsigned int *)(regs->tpc+28));
474
475 if (err)
476 break;
477
478 if ((sethi & 0xFFC00000U) == 0x03000000U &&
479 (sethi1 & 0xFFC00000U) == 0x03000000U &&
480 (sethi2 & 0xFFC00000U) == 0x0B000000U &&
481 (or1 & 0xFFFFE000U) == 0x82106000U &&
482 (or2 & 0xFFFFE000U) == 0x8A116000U &&
483 sllx == 0x83287020U &&
484 jmpl == 0x81C04005U &&
485 nop == 0x01000000U)
486 {
487 unsigned long addr;
488
489 regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
490 regs->u_regs[UREG_G1] <<= 32;
491 regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
492 addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
493 regs->tpc = addr;
494 regs->tnpc = addr+4;
495 return 2;
496 }
497 } while (0);
498
499 do { /* PaX: patched PLT emulation #6 */
500 unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
501
502 err = get_user(sethi, (unsigned int *)regs->tpc);
503 err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
504 err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
505 err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
506 err |= get_user(or, (unsigned int *)(regs->tpc+16));
507 err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
508 err |= get_user(nop, (unsigned int *)(regs->tpc+24));
509
510 if (err)
511 break;
512
513 if ((sethi & 0xFFC00000U) == 0x03000000U &&
514 (sethi1 & 0xFFC00000U) == 0x03000000U &&
515 (sethi2 & 0xFFC00000U) == 0x0B000000U &&
516 sllx == 0x83287020U &&
517 (or & 0xFFFFE000U) == 0x8A116000U &&
518 jmpl == 0x81C04005U &&
519 nop == 0x01000000U)
520 {
521 unsigned long addr;
522
523 regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
524 regs->u_regs[UREG_G1] <<= 32;
525 regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
526 addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
527 regs->tpc = addr;
528 regs->tnpc = addr+4;
529 return 2;
530 }
531 } while (0);
532
533 do { /* PaX: unpatched PLT emulation step 1 */
534 unsigned int sethi, ba, nop;
535
536 err = get_user(sethi, (unsigned int *)regs->tpc);
537 err |= get_user(ba, (unsigned int *)(regs->tpc+4));
538 err |= get_user(nop, (unsigned int *)(regs->tpc+8));
539
540 if (err)
541 break;
542
543 if ((sethi & 0xFFC00000U) == 0x03000000U &&
544 ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
545 nop == 0x01000000U)
546 {
547 unsigned long addr;
548 unsigned int save, call;
549 unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
550
551 if ((ba & 0xFFC00000U) == 0x30800000U)
552 addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
553 else
554 addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
555
556 if (test_thread_flag(TIF_32BIT))
557 addr &= 0xFFFFFFFFUL;
558
559 err = get_user(save, (unsigned int *)addr);
560 err |= get_user(call, (unsigned int *)(addr+4));
561 err |= get_user(nop, (unsigned int *)(addr+8));
562 if (err)
563 break;
564
565 #ifdef CONFIG_PAX_DLRESOLVE
566 if (save == 0x9DE3BFA8U &&
567 (call & 0xC0000000U) == 0x40000000U &&
568 nop == 0x01000000U)
569 {
570 struct vm_area_struct *vma;
571 unsigned long call_dl_resolve;
572
573 down_read(&current->mm->mmap_sem);
574 call_dl_resolve = current->mm->call_dl_resolve;
575 up_read(&current->mm->mmap_sem);
576 if (likely(call_dl_resolve))
577 goto emulate;
578
579 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
580
581 down_write(&current->mm->mmap_sem);
582 if (current->mm->call_dl_resolve) {
583 call_dl_resolve = current->mm->call_dl_resolve;
584 up_write(&current->mm->mmap_sem);
585 if (vma)
586 kmem_cache_free(vm_area_cachep, vma);
587 goto emulate;
588 }
589
590 call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
591 if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
592 up_write(&current->mm->mmap_sem);
593 if (vma)
594 kmem_cache_free(vm_area_cachep, vma);
595 return 1;
596 }
597
598 if (pax_insert_vma(vma, call_dl_resolve)) {
599 up_write(&current->mm->mmap_sem);
600 kmem_cache_free(vm_area_cachep, vma);
601 return 1;
602 }
603
604 current->mm->call_dl_resolve = call_dl_resolve;
605 up_write(&current->mm->mmap_sem);
606
607 emulate:
608 regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
609 regs->tpc = call_dl_resolve;
610 regs->tnpc = addr+4;
611 return 3;
612 }
613 #endif
614
615 /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
616 if ((save & 0xFFC00000U) == 0x05000000U &&
617 (call & 0xFFFFE000U) == 0x85C0A000U &&
618 nop == 0x01000000U)
619 {
620 regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
621 regs->u_regs[UREG_G2] = addr + 4;
622 addr = (save & 0x003FFFFFU) << 10;
623 addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
624
625 if (test_thread_flag(TIF_32BIT))
626 addr &= 0xFFFFFFFFUL;
627
628 regs->tpc = addr;
629 regs->tnpc = addr+4;
630 return 3;
631 }
632
633 /* PaX: 64-bit PLT stub */
634 err = get_user(sethi1, (unsigned int *)addr);
635 err |= get_user(sethi2, (unsigned int *)(addr+4));
636 err |= get_user(or1, (unsigned int *)(addr+8));
637 err |= get_user(or2, (unsigned int *)(addr+12));
638 err |= get_user(sllx, (unsigned int *)(addr+16));
639 err |= get_user(add, (unsigned int *)(addr+20));
640 err |= get_user(jmpl, (unsigned int *)(addr+24));
641 err |= get_user(nop, (unsigned int *)(addr+28));
642 if (err)
643 break;
644
645 if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
646 (sethi2 & 0xFFC00000U) == 0x0B000000U &&
647 (or1 & 0xFFFFE000U) == 0x88112000U &&
648 (or2 & 0xFFFFE000U) == 0x8A116000U &&
649 sllx == 0x89293020U &&
650 add == 0x8A010005U &&
651 jmpl == 0x89C14000U &&
652 nop == 0x01000000U)
653 {
654 regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
655 regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
656 regs->u_regs[UREG_G4] <<= 32;
657 regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
658 regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
659 regs->u_regs[UREG_G4] = addr + 24;
660 addr = regs->u_regs[UREG_G5];
661 regs->tpc = addr;
662 regs->tnpc = addr+4;
663 return 3;
664 }
665 }
666 } while (0);
667
668 #ifdef CONFIG_PAX_DLRESOLVE
669 do { /* PaX: unpatched PLT emulation step 2 */
670 unsigned int save, call, nop;
671
672 err = get_user(save, (unsigned int *)(regs->tpc-4));
673 err |= get_user(call, (unsigned int *)regs->tpc);
674 err |= get_user(nop, (unsigned int *)(regs->tpc+4));
675 if (err)
676 break;
677
678 if (save == 0x9DE3BFA8U &&
679 (call & 0xC0000000U) == 0x40000000U &&
680 nop == 0x01000000U)
681 {
682 unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
683
684 if (test_thread_flag(TIF_32BIT))
685 dl_resolve &= 0xFFFFFFFFUL;
686
687 regs->u_regs[UREG_RETPC] = regs->tpc;
688 regs->tpc = dl_resolve;
689 regs->tnpc = dl_resolve+4;
690 return 3;
691 }
692 } while (0);
693 #endif
694
695 do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
696 unsigned int sethi, ba, nop;
697
698 err = get_user(sethi, (unsigned int *)regs->tpc);
699 err |= get_user(ba, (unsigned int *)(regs->tpc+4));
700 err |= get_user(nop, (unsigned int *)(regs->tpc+8));
701
702 if (err)
703 break;
704
705 if ((sethi & 0xFFC00000U) == 0x03000000U &&
706 (ba & 0xFFF00000U) == 0x30600000U &&
707 nop == 0x01000000U)
708 {
709 unsigned long addr;
710
711 addr = (sethi & 0x003FFFFFU) << 10;
712 regs->u_regs[UREG_G1] = addr;
713 addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
714
715 if (test_thread_flag(TIF_32BIT))
716 addr &= 0xFFFFFFFFUL;
717
718 regs->tpc = addr;
719 regs->tnpc = addr+4;
720 return 2;
721 }
722 } while (0);
723
724 #endif
725
726 return 1;
727 }
728
729 void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
730 {
731 unsigned long i;
732
733 printk(KERN_ERR "PAX: bytes at PC: ");
734 for (i = 0; i < 8; i++) {
735 unsigned int c;
736 if (get_user(c, (unsigned int *)pc+i))
737 printk(KERN_CONT "???????? ");
738 else
739 printk(KERN_CONT "%08x ", c);
740 }
741 printk("\n");
742 }
743 #endif
744
745 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
746 {
747 enum ctx_state prev_state = exception_enter();
748 struct mm_struct *mm = current->mm;
749 struct vm_area_struct *vma;
750 unsigned int insn = 0;
751 int si_code, fault_code, fault;
752 unsigned long address, mm_rss;
753 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
754
755 fault_code = get_thread_fault_code();
756
757 if (notify_page_fault(regs))
758 goto exit_exception;
759
760 si_code = SEGV_MAPERR;
761 address = current_thread_info()->fault_address;
762
763 if ((fault_code & FAULT_CODE_ITLB) &&
764 (fault_code & FAULT_CODE_DTLB))
765 BUG();
766
767 if (test_thread_flag(TIF_32BIT)) {
768 if (!(regs->tstate & TSTATE_PRIV)) {
769 if (unlikely((regs->tpc >> 32) != 0)) {
770 bogus_32bit_fault_tpc(regs);
771 goto intr_or_no_mm;
772 }
773 }
774 if (unlikely((address >> 32) != 0))
775 goto intr_or_no_mm;
776 }
777
778 if (regs->tstate & TSTATE_PRIV) {
779 unsigned long tpc = regs->tpc;
780
781 /* Sanity check the PC. */
782 if ((tpc >= KERNBASE && tpc < (unsigned long) __init_end) ||
783 (tpc >= MODULES_VADDR && tpc < MODULES_END)) {
784 /* Valid, no problems... */
785 } else {
786 bad_kernel_pc(regs, address);
787 goto exit_exception;
788 }
789 } else
790 flags |= FAULT_FLAG_USER;
791
792 /*
793 * If we're in an interrupt or have no user
794 * context, we must not take the fault..
795 */
796 if (in_atomic() || !mm)
797 goto intr_or_no_mm;
798
799 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
800
801 if (!down_read_trylock(&mm->mmap_sem)) {
802 if ((regs->tstate & TSTATE_PRIV) &&
803 !search_exception_tables(regs->tpc)) {
804 insn = get_fault_insn(regs, insn);
805 goto handle_kernel_fault;
806 }
807
808 retry:
809 down_read(&mm->mmap_sem);
810 }
811
812 if (fault_code & FAULT_CODE_BAD_RA)
813 goto do_sigbus;
814
815 vma = find_vma(mm, address);
816 if (!vma)
817 goto bad_area;
818
819 #ifdef CONFIG_PAX_PAGEEXEC
820 /* PaX: detect ITLB misses on non-exec pages */
821 if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
822 !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
823 {
824 if (address != regs->tpc)
825 goto good_area;
826
827 up_read(&mm->mmap_sem);
828 switch (pax_handle_fetch_fault(regs)) {
829
830 #ifdef CONFIG_PAX_EMUPLT
831 case 2:
832 case 3:
833 return;
834 #endif
835
836 }
837 pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
838 do_group_exit(SIGKILL);
839 }
840 #endif
841
842 /* Pure DTLB misses do not tell us whether the fault causing
843 * load/store/atomic was a write or not, it only says that there
844 * was no match. So in such a case we (carefully) read the
845 * instruction to try and figure this out. It's an optimization
846 * so it's ok if we can't do this.
847 *
848 * Special hack, window spill/fill knows the exact fault type.
849 */
850 if (((fault_code &
851 (FAULT_CODE_DTLB | FAULT_CODE_WRITE | FAULT_CODE_WINFIXUP)) == FAULT_CODE_DTLB) &&
852 (vma->vm_flags & VM_WRITE) != 0) {
853 insn = get_fault_insn(regs, 0);
854 if (!insn)
855 goto continue_fault;
856 /* All loads, stores and atomics have bits 30 and 31 both set
857 * in the instruction. Bit 21 is set in all stores, but we
858 * have to avoid prefetches which also have bit 21 set.
859 */
860 if ((insn & 0xc0200000) == 0xc0200000 &&
861 (insn & 0x01780000) != 0x01680000) {
862 /* Don't bother updating thread struct value,
863 * because update_mmu_cache only cares which tlb
864 * the access came from.
865 */
866 fault_code |= FAULT_CODE_WRITE;
867 }
868 }
869 continue_fault:
870
871 if (vma->vm_start <= address)
872 goto good_area;
873 if (!(vma->vm_flags & VM_GROWSDOWN))
874 goto bad_area;
875 if (!(fault_code & FAULT_CODE_WRITE)) {
876 /* Non-faulting loads shouldn't expand stack. */
877 insn = get_fault_insn(regs, insn);
878 if ((insn & 0xc0800000) == 0xc0800000) {
879 unsigned char asi;
880
881 if (insn & 0x2000)
882 asi = (regs->tstate >> 24);
883 else
884 asi = (insn >> 5);
885 if ((asi & 0xf2) == 0x82)
886 goto bad_area;
887 }
888 }
889 if (expand_stack(vma, address))
890 goto bad_area;
891 /*
892 * Ok, we have a good vm_area for this memory access, so
893 * we can handle it..
894 */
895 good_area:
896 si_code = SEGV_ACCERR;
897
898 /* If we took a ITLB miss on a non-executable page, catch
899 * that here.
900 */
901 if ((fault_code & FAULT_CODE_ITLB) && !(vma->vm_flags & VM_EXEC)) {
902 BUG_ON(address != regs->tpc);
903 BUG_ON(regs->tstate & TSTATE_PRIV);
904 goto bad_area;
905 }
906
907 if (fault_code & FAULT_CODE_WRITE) {
908 if (!(vma->vm_flags & VM_WRITE))
909 goto bad_area;
910
911 /* Spitfire has an icache which does not snoop
912 * processor stores. Later processors do...
913 */
914 if (tlb_type == spitfire &&
915 (vma->vm_flags & VM_EXEC) != 0 &&
916 vma->vm_file != NULL)
917 set_thread_fault_code(fault_code |
918 FAULT_CODE_BLKCOMMIT);
919
920 flags |= FAULT_FLAG_WRITE;
921 } else {
922 /* Allow reads even for write-only mappings */
923 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
924 goto bad_area;
925 }
926
927 fault = handle_mm_fault(mm, vma, address, flags);
928
929 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
930 goto exit_exception;
931
932 if (unlikely(fault & VM_FAULT_ERROR)) {
933 if (fault & VM_FAULT_OOM)
934 goto out_of_memory;
935 else if (fault & VM_FAULT_SIGSEGV)
936 goto bad_area;
937 else if (fault & VM_FAULT_SIGBUS)
938 goto do_sigbus;
939 BUG();
940 }
941
942 if (flags & FAULT_FLAG_ALLOW_RETRY) {
943 if (fault & VM_FAULT_MAJOR) {
944 current->maj_flt++;
945 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ,
946 1, regs, address);
947 } else {
948 current->min_flt++;
949 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN,
950 1, regs, address);
951 }
952 if (fault & VM_FAULT_RETRY) {
953 flags &= ~FAULT_FLAG_ALLOW_RETRY;
954 flags |= FAULT_FLAG_TRIED;
955
956 /* No need to up_read(&mm->mmap_sem) as we would
957 * have already released it in __lock_page_or_retry
958 * in mm/filemap.c.
959 */
960
961 goto retry;
962 }
963 }
964 up_read(&mm->mmap_sem);
965
966 mm_rss = get_mm_rss(mm);
967 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
968 mm_rss -= (mm->context.huge_pte_count * (HPAGE_SIZE / PAGE_SIZE));
969 #endif
970 if (unlikely(mm_rss >
971 mm->context.tsb_block[MM_TSB_BASE].tsb_rss_limit))
972 tsb_grow(mm, MM_TSB_BASE, mm_rss);
973 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
974 mm_rss = mm->context.huge_pte_count;
975 if (unlikely(mm_rss >
976 mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit)) {
977 if (mm->context.tsb_block[MM_TSB_HUGE].tsb)
978 tsb_grow(mm, MM_TSB_HUGE, mm_rss);
979 else
980 hugetlb_setup(regs);
981
982 }
983 #endif
984 exit_exception:
985 exception_exit(prev_state);
986 return;
987
988 /*
989 * Something tried to access memory that isn't in our memory map..
990 * Fix it, but check if it's kernel or user first..
991 */
992 bad_area:
993 insn = get_fault_insn(regs, insn);
994 up_read(&mm->mmap_sem);
995
996 handle_kernel_fault:
997 do_kernel_fault(regs, si_code, fault_code, insn, address);
998 goto exit_exception;
999
1000 /*
1001 * We ran out of memory, or some other thing happened to us that made
1002 * us unable to handle the page fault gracefully.
1003 */
1004 out_of_memory:
1005 insn = get_fault_insn(regs, insn);
1006 up_read(&mm->mmap_sem);
1007 if (!(regs->tstate & TSTATE_PRIV)) {
1008 pagefault_out_of_memory();
1009 goto exit_exception;
1010 }
1011 goto handle_kernel_fault;
1012
1013 intr_or_no_mm:
1014 insn = get_fault_insn(regs, 0);
1015 goto handle_kernel_fault;
1016
1017 do_sigbus:
1018 insn = get_fault_insn(regs, insn);
1019 up_read(&mm->mmap_sem);
1020
1021 /*
1022 * Send a sigbus, regardless of whether we were in kernel
1023 * or user mode.
1024 */
1025 do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, address, insn, fault_code);
1026
1027 /* Kernel mode? Handle exceptions or die */
1028 if (regs->tstate & TSTATE_PRIV)
1029 goto handle_kernel_fault;
1030 }