1 // TODO VM_EXEC flag work-around, cache aliasing
3 * arch/xtensa/mm/fault.c
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
9 * Copyright (C) 2001 - 2010 Tensilica Inc.
11 * Chris Zankel <chris@zankel.net>
12 * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
16 #include <linux/extable.h>
17 #include <linux/hardirq.h>
18 #include <linux/perf_event.h>
19 #include <linux/uaccess.h>
20 #include <asm/mmu_context.h>
21 #include <asm/cacheflush.h>
22 #include <asm/hardirq.h>
23 #include <asm/traps.h>
25 void bad_page_fault(struct pt_regs
*, unsigned long, int);
27 static void vmalloc_fault(struct pt_regs
*regs
, unsigned int address
)
30 /* Synchronize this task's top level page-table
31 * with the 'reference' page table.
33 struct mm_struct
*act_mm
= current
->active_mm
;
34 int index
= pgd_index(address
);
44 pgd
= act_mm
->pgd
+ index
;
45 pgd_k
= init_mm
.pgd
+ index
;
47 if (!pgd_present(*pgd_k
))
50 pgd_val(*pgd
) = pgd_val(*pgd_k
);
52 p4d
= p4d_offset(pgd
, address
);
53 p4d_k
= p4d_offset(pgd_k
, address
);
54 if (!p4d_present(*p4d
) || !p4d_present(*p4d_k
))
57 pud
= pud_offset(p4d
, address
);
58 pud_k
= pud_offset(p4d_k
, address
);
59 if (!pud_present(*pud
) || !pud_present(*pud_k
))
62 pmd
= pmd_offset(pud
, address
);
63 pmd_k
= pmd_offset(pud_k
, address
);
64 if (!pmd_present(*pmd
) || !pmd_present(*pmd_k
))
67 pmd_val(*pmd
) = pmd_val(*pmd_k
);
68 pte_k
= pte_offset_kernel(pmd_k
, address
);
70 if (!pte_present(*pte_k
))
75 bad_page_fault(regs
, address
, SIGKILL
);
77 WARN_ONCE(1, "%s in noMMU configuration\n", __func__
);
81 * This routine handles page faults. It determines the address,
82 * and the problem, and then passes it off to one of the appropriate
85 * Note: does not handle Miss and MultiHit.
88 void do_page_fault(struct pt_regs
*regs
)
90 struct vm_area_struct
* vma
;
91 struct mm_struct
*mm
= current
->mm
;
92 unsigned int exccause
= regs
->exccause
;
93 unsigned int address
= regs
->excvaddr
;
96 int is_write
, is_exec
;
98 unsigned int flags
= FAULT_FLAG_DEFAULT
;
102 /* We fault-in kernel-space virtual memory on-demand. The
103 * 'reference' page table is init_mm.pgd.
105 if (address
>= TASK_SIZE
&& !user_mode(regs
)) {
106 vmalloc_fault(regs
, address
);
110 /* If we're in an interrupt or have no user
111 * context, we must not take the fault..
113 if (faulthandler_disabled() || !mm
) {
114 bad_page_fault(regs
, address
, SIGSEGV
);
118 is_write
= (exccause
== EXCCAUSE_STORE_CACHE_ATTRIBUTE
) ? 1 : 0;
119 is_exec
= (exccause
== EXCCAUSE_ITLB_PRIVILEGE
||
120 exccause
== EXCCAUSE_ITLB_MISS
||
121 exccause
== EXCCAUSE_FETCH_CACHE_ATTRIBUTE
) ? 1 : 0;
123 pr_debug("[%s:%d:%08x:%d:%08lx:%s%s]\n",
124 current
->comm
, current
->pid
,
125 address
, exccause
, regs
->pc
,
126 is_write
? "w" : "", is_exec
? "x" : "");
129 flags
|= FAULT_FLAG_USER
;
131 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS
, 1, regs
, address
);
134 vma
= lock_mm_and_find_vma(mm
, address
, regs
);
136 goto bad_area_nosemaphore
;
138 /* Ok, we have a good vm_area for this memory access, so
145 if (!(vma
->vm_flags
& VM_WRITE
))
147 flags
|= FAULT_FLAG_WRITE
;
148 } else if (is_exec
) {
149 if (!(vma
->vm_flags
& VM_EXEC
))
151 } else /* Allow read even from write-only pages. */
152 if (!(vma
->vm_flags
& (VM_READ
| VM_WRITE
)))
155 /* If for any reason at all we couldn't handle the fault,
156 * make sure we exit gracefully rather than endlessly redo
159 fault
= handle_mm_fault(vma
, address
, flags
, regs
);
161 if (fault_signal_pending(fault
, regs
)) {
162 if (!user_mode(regs
))
163 bad_page_fault(regs
, address
, SIGKILL
);
167 /* The fault is fully completed (including releasing mmap lock) */
168 if (fault
& VM_FAULT_COMPLETED
)
171 if (unlikely(fault
& VM_FAULT_ERROR
)) {
172 if (fault
& VM_FAULT_OOM
)
174 else if (fault
& VM_FAULT_SIGSEGV
)
176 else if (fault
& VM_FAULT_SIGBUS
)
181 if (fault
& VM_FAULT_RETRY
) {
182 flags
|= FAULT_FLAG_TRIED
;
184 /* No need to mmap_read_unlock(mm) as we would
185 * have already released it in __lock_page_or_retry
192 mmap_read_unlock(mm
);
195 /* Something tried to access memory that isn't in our memory map..
196 * Fix it, but check if it's kernel or user first..
199 mmap_read_unlock(mm
);
200 bad_area_nosemaphore
:
201 if (user_mode(regs
)) {
202 force_sig_fault(SIGSEGV
, code
, (void *) address
);
205 bad_page_fault(regs
, address
, SIGSEGV
);
209 /* We ran out of memory, or some other thing happened to us that made
210 * us unable to handle the page fault gracefully.
213 mmap_read_unlock(mm
);
214 if (!user_mode(regs
))
215 bad_page_fault(regs
, address
, SIGKILL
);
217 pagefault_out_of_memory();
221 mmap_read_unlock(mm
);
223 /* Send a sigbus, regardless of whether we were in kernel
226 force_sig_fault(SIGBUS
, BUS_ADRERR
, (void *) address
);
228 /* Kernel mode? Handle exceptions or die */
229 if (!user_mode(regs
))
230 bad_page_fault(regs
, address
, SIGBUS
);
236 bad_page_fault(struct pt_regs
*regs
, unsigned long address
, int sig
)
238 extern void __noreturn
die(const char*, struct pt_regs
*, long);
239 const struct exception_table_entry
*entry
;
241 /* Are we prepared to handle this kernel fault? */
242 if ((entry
= search_exception_tables(regs
->pc
)) != NULL
) {
243 pr_debug("%s: Exception at pc=%#010lx (%lx)\n",
244 current
->comm
, regs
->pc
, entry
->fixup
);
245 regs
->pc
= entry
->fixup
;
249 /* Oops. The kernel tried to access some bad page. We'll have to
250 * terminate things with extreme prejudice.
252 pr_alert("Unable to handle kernel paging request at virtual "
253 "address %08lx\n pc = %08lx, ra = %08lx\n",
254 address
, regs
->pc
, regs
->areg
[0]);
255 die("Oops", regs
, sig
);