1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
4 * Lennox Wu <lennox.wu@sunplusct.com>
5 * Chen Liqin <liqin.chen@sunplusct.com>
6 * Copyright (C) 2012 Regents of the University of California
11 #include <linux/kernel.h>
12 #include <linux/interrupt.h>
13 #include <linux/perf_event.h>
14 #include <linux/signal.h>
15 #include <linux/uaccess.h>
17 #include <asm/pgalloc.h>
18 #include <asm/ptrace.h>
19 #include <asm/tlbflush.h>
22 * This routine handles page faults. It determines the address and the
23 * problem, and then passes it off to one of the appropriate routines.
25 asmlinkage
void do_page_fault(struct pt_regs
*regs
)
27 struct task_struct
*tsk
;
28 struct vm_area_struct
*vma
;
30 unsigned long addr
, cause
;
31 unsigned int flags
= FAULT_FLAG_ALLOW_RETRY
| FAULT_FLAG_KILLABLE
;
32 int code
= SEGV_MAPERR
;
36 addr
= regs
->sbadaddr
;
42 * Fault-in kernel-space virtual memory on-demand.
43 * The 'reference' page table is init_mm.pgd.
45 * NOTE! We MUST NOT take any locks for this case. We may
46 * be in an interrupt or a critical region, and should
47 * only copy the information from the master page table,
50 if (unlikely((addr
>= VMALLOC_START
) && (addr
<= VMALLOC_END
)))
53 /* Enable interrupts if they were enabled in the parent context. */
54 if (likely(regs
->sstatus
& SR_SPIE
))
58 * If we're in an interrupt, have no user context, or are running
59 * in an atomic region, then we must not take the fault.
61 if (unlikely(faulthandler_disabled() || !mm
))
65 flags
|= FAULT_FLAG_USER
;
67 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS
, 1, regs
, addr
);
70 down_read(&mm
->mmap_sem
);
71 vma
= find_vma(mm
, addr
);
74 if (likely(vma
->vm_start
<= addr
))
76 if (unlikely(!(vma
->vm_flags
& VM_GROWSDOWN
)))
78 if (unlikely(expand_stack(vma
, addr
)))
82 * Ok, we have a good vm_area for this memory access, so
89 case EXC_INST_PAGE_FAULT
:
90 if (!(vma
->vm_flags
& VM_EXEC
))
93 case EXC_LOAD_PAGE_FAULT
:
94 if (!(vma
->vm_flags
& VM_READ
))
97 case EXC_STORE_PAGE_FAULT
:
98 if (!(vma
->vm_flags
& VM_WRITE
))
100 flags
|= FAULT_FLAG_WRITE
;
103 panic("%s: unhandled cause %lu", __func__
, cause
);
107 * If for any reason at all we could not handle the fault,
108 * make sure we exit gracefully rather than endlessly redo
111 fault
= handle_mm_fault(vma
, addr
, flags
);
114 * If we need to retry but a fatal signal is pending, handle the
115 * signal first. We do not need to release the mmap_sem because it
116 * would already be released in __lock_page_or_retry in mm/filemap.c.
118 if ((fault
& VM_FAULT_RETRY
) && fatal_signal_pending(tsk
))
121 if (unlikely(fault
& VM_FAULT_ERROR
)) {
122 if (fault
& VM_FAULT_OOM
)
124 else if (fault
& VM_FAULT_SIGBUS
)
130 * Major/minor page fault accounting is only done on the
131 * initial attempt. If we go through a retry, it is extremely
132 * likely that the page will be found in page cache at that point.
134 if (flags
& FAULT_FLAG_ALLOW_RETRY
) {
135 if (fault
& VM_FAULT_MAJOR
) {
137 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ
,
141 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN
,
144 if (fault
& VM_FAULT_RETRY
) {
146 * Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
149 flags
&= ~(FAULT_FLAG_ALLOW_RETRY
);
150 flags
|= FAULT_FLAG_TRIED
;
153 * No need to up_read(&mm->mmap_sem) as we would
154 * have already released it in __lock_page_or_retry
161 up_read(&mm
->mmap_sem
);
165 * Something tried to access memory that isn't in our memory map.
166 * Fix it, but check if it's kernel or user first.
169 up_read(&mm
->mmap_sem
);
170 /* User mode accesses just cause a SIGSEGV */
171 if (user_mode(regs
)) {
172 do_trap(regs
, SIGSEGV
, code
, addr
, tsk
);
177 /* Are we prepared to handle this kernel fault? */
178 if (fixup_exception(regs
))
182 * Oops. The kernel tried to access some bad page. We'll have to
183 * terminate things with extreme prejudice.
186 pr_alert("Unable to handle kernel %s at virtual address " REG_FMT
"\n",
187 (addr
< PAGE_SIZE
) ? "NULL pointer dereference" :
188 "paging request", addr
);
193 * We ran out of memory, call the OOM killer, and return the userspace
194 * (which will retry the fault, or kill us if we got oom-killed).
197 up_read(&mm
->mmap_sem
);
198 if (!user_mode(regs
))
200 pagefault_out_of_memory();
204 up_read(&mm
->mmap_sem
);
205 /* Kernel mode? Handle exceptions or die */
206 if (!user_mode(regs
))
208 do_trap(regs
, SIGBUS
, BUS_ADRERR
, addr
, tsk
);
220 /* User mode accesses just cause a SIGSEGV */
222 return do_trap(regs
, SIGSEGV
, code
, addr
, tsk
);
225 * Synchronize this task's top level page-table
226 * with the 'reference' page table.
228 * Do _not_ use "tsk->active_mm->pgd" here.
229 * We might be inside an interrupt in the middle
232 index
= pgd_index(addr
);
233 pgd
= (pgd_t
*)pfn_to_virt(csr_read(CSR_SATP
)) + index
;
234 pgd_k
= init_mm
.pgd
+ index
;
236 if (!pgd_present(*pgd_k
))
238 set_pgd(pgd
, *pgd_k
);
240 p4d
= p4d_offset(pgd
, addr
);
241 p4d_k
= p4d_offset(pgd_k
, addr
);
242 if (!p4d_present(*p4d_k
))
245 pud
= pud_offset(p4d
, addr
);
246 pud_k
= pud_offset(p4d_k
, addr
);
247 if (!pud_present(*pud_k
))
251 * Since the vmalloc area is global, it is unnecessary
252 * to copy individual PTEs
254 pmd
= pmd_offset(pud
, addr
);
255 pmd_k
= pmd_offset(pud_k
, addr
);
256 if (!pmd_present(*pmd_k
))
258 set_pmd(pmd
, *pmd_k
);
261 * Make sure the actual PTE exists as well to
262 * catch kernel vmalloc-area accesses to non-mapped
263 * addresses. If we don't do this, this will just
264 * silently loop forever.
266 pte_k
= pte_offset_kernel(pmd_k
, addr
);
267 if (!pte_present(*pte_k
))
271 * The kernel assumes that TLBs don't cache invalid
272 * entries, but in RISC-V, SFENCE.VMA specifies an
273 * ordering constraint, not a cache flush; it is
274 * necessary even after writing invalid entries.
275 * Relying on flush_tlb_fix_spurious_fault would
276 * suffice, but the extra traps reduce
277 * performance. So, eagerly SFENCE.VMA.
279 local_flush_tlb_page(addr
);