]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - arch/riscv/mm/fault.c
apparmor: reset pos on failure to unpack for various functions
[thirdparty/kernel/stable.git] / arch / riscv / mm / fault.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
4 * Lennox Wu <lennox.wu@sunplusct.com>
5 * Chen Liqin <liqin.chen@sunplusct.com>
6 * Copyright (C) 2012 Regents of the University of California
7 */
8
9
10 #include <linux/mm.h>
11 #include <linux/kernel.h>
12 #include <linux/interrupt.h>
13 #include <linux/perf_event.h>
14 #include <linux/signal.h>
15 #include <linux/uaccess.h>
16
17 #include <asm/pgalloc.h>
18 #include <asm/ptrace.h>
19
20 /*
21 * This routine handles page faults. It determines the address and the
22 * problem, and then passes it off to one of the appropriate routines.
23 */
24 asmlinkage void do_page_fault(struct pt_regs *regs)
25 {
26 struct task_struct *tsk;
27 struct vm_area_struct *vma;
28 struct mm_struct *mm;
29 unsigned long addr, cause;
30 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
31 int code = SEGV_MAPERR;
32 vm_fault_t fault;
33
34 cause = regs->scause;
35 addr = regs->sbadaddr;
36
37 tsk = current;
38 mm = tsk->mm;
39
40 /*
41 * Fault-in kernel-space virtual memory on-demand.
42 * The 'reference' page table is init_mm.pgd.
43 *
44 * NOTE! We MUST NOT take any locks for this case. We may
45 * be in an interrupt or a critical region, and should
46 * only copy the information from the master page table,
47 * nothing more.
48 */
49 if (unlikely((addr >= VMALLOC_START) && (addr <= VMALLOC_END)))
50 goto vmalloc_fault;
51
52 /* Enable interrupts if they were enabled in the parent context. */
53 if (likely(regs->sstatus & SR_SPIE))
54 local_irq_enable();
55
56 /*
57 * If we're in an interrupt, have no user context, or are running
58 * in an atomic region, then we must not take the fault.
59 */
60 if (unlikely(faulthandler_disabled() || !mm))
61 goto no_context;
62
63 if (user_mode(regs))
64 flags |= FAULT_FLAG_USER;
65
66 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
67
68 retry:
69 down_read(&mm->mmap_sem);
70 vma = find_vma(mm, addr);
71 if (unlikely(!vma))
72 goto bad_area;
73 if (likely(vma->vm_start <= addr))
74 goto good_area;
75 if (unlikely(!(vma->vm_flags & VM_GROWSDOWN)))
76 goto bad_area;
77 if (unlikely(expand_stack(vma, addr)))
78 goto bad_area;
79
80 /*
81 * Ok, we have a good vm_area for this memory access, so
82 * we can handle it.
83 */
84 good_area:
85 code = SEGV_ACCERR;
86
87 switch (cause) {
88 case EXC_INST_PAGE_FAULT:
89 if (!(vma->vm_flags & VM_EXEC))
90 goto bad_area;
91 break;
92 case EXC_LOAD_PAGE_FAULT:
93 if (!(vma->vm_flags & VM_READ))
94 goto bad_area;
95 break;
96 case EXC_STORE_PAGE_FAULT:
97 if (!(vma->vm_flags & VM_WRITE))
98 goto bad_area;
99 flags |= FAULT_FLAG_WRITE;
100 break;
101 default:
102 panic("%s: unhandled cause %lu", __func__, cause);
103 }
104
105 /*
106 * If for any reason at all we could not handle the fault,
107 * make sure we exit gracefully rather than endlessly redo
108 * the fault.
109 */
110 fault = handle_mm_fault(vma, addr, flags);
111
112 /*
113 * If we need to retry but a fatal signal is pending, handle the
114 * signal first. We do not need to release the mmap_sem because it
115 * would already be released in __lock_page_or_retry in mm/filemap.c.
116 */
117 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(tsk))
118 return;
119
120 if (unlikely(fault & VM_FAULT_ERROR)) {
121 if (fault & VM_FAULT_OOM)
122 goto out_of_memory;
123 else if (fault & VM_FAULT_SIGBUS)
124 goto do_sigbus;
125 BUG();
126 }
127
128 /*
129 * Major/minor page fault accounting is only done on the
130 * initial attempt. If we go through a retry, it is extremely
131 * likely that the page will be found in page cache at that point.
132 */
133 if (flags & FAULT_FLAG_ALLOW_RETRY) {
134 if (fault & VM_FAULT_MAJOR) {
135 tsk->maj_flt++;
136 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ,
137 1, regs, addr);
138 } else {
139 tsk->min_flt++;
140 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN,
141 1, regs, addr);
142 }
143 if (fault & VM_FAULT_RETRY) {
144 /*
145 * Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
146 * of starvation.
147 */
148 flags &= ~(FAULT_FLAG_ALLOW_RETRY);
149 flags |= FAULT_FLAG_TRIED;
150
151 /*
152 * No need to up_read(&mm->mmap_sem) as we would
153 * have already released it in __lock_page_or_retry
154 * in mm/filemap.c.
155 */
156 goto retry;
157 }
158 }
159
160 up_read(&mm->mmap_sem);
161 return;
162
163 /*
164 * Something tried to access memory that isn't in our memory map.
165 * Fix it, but check if it's kernel or user first.
166 */
167 bad_area:
168 up_read(&mm->mmap_sem);
169 /* User mode accesses just cause a SIGSEGV */
170 if (user_mode(regs)) {
171 do_trap(regs, SIGSEGV, code, addr, tsk);
172 return;
173 }
174
175 no_context:
176 /* Are we prepared to handle this kernel fault? */
177 if (fixup_exception(regs))
178 return;
179
180 /*
181 * Oops. The kernel tried to access some bad page. We'll have to
182 * terminate things with extreme prejudice.
183 */
184 bust_spinlocks(1);
185 pr_alert("Unable to handle kernel %s at virtual address " REG_FMT "\n",
186 (addr < PAGE_SIZE) ? "NULL pointer dereference" :
187 "paging request", addr);
188 die(regs, "Oops");
189 do_exit(SIGKILL);
190
191 /*
192 * We ran out of memory, call the OOM killer, and return the userspace
193 * (which will retry the fault, or kill us if we got oom-killed).
194 */
195 out_of_memory:
196 up_read(&mm->mmap_sem);
197 if (!user_mode(regs))
198 goto no_context;
199 pagefault_out_of_memory();
200 return;
201
202 do_sigbus:
203 up_read(&mm->mmap_sem);
204 /* Kernel mode? Handle exceptions or die */
205 if (!user_mode(regs))
206 goto no_context;
207 do_trap(regs, SIGBUS, BUS_ADRERR, addr, tsk);
208 return;
209
210 vmalloc_fault:
211 {
212 pgd_t *pgd, *pgd_k;
213 pud_t *pud, *pud_k;
214 p4d_t *p4d, *p4d_k;
215 pmd_t *pmd, *pmd_k;
216 pte_t *pte_k;
217 int index;
218
219 /* User mode accesses just cause a SIGSEGV */
220 if (user_mode(regs))
221 return do_trap(regs, SIGSEGV, code, addr, tsk);
222
223 /*
224 * Synchronize this task's top level page-table
225 * with the 'reference' page table.
226 *
227 * Do _not_ use "tsk->active_mm->pgd" here.
228 * We might be inside an interrupt in the middle
229 * of a task switch.
230 */
231 index = pgd_index(addr);
232 pgd = (pgd_t *)pfn_to_virt(csr_read(CSR_SATP)) + index;
233 pgd_k = init_mm.pgd + index;
234
235 if (!pgd_present(*pgd_k))
236 goto no_context;
237 set_pgd(pgd, *pgd_k);
238
239 p4d = p4d_offset(pgd, addr);
240 p4d_k = p4d_offset(pgd_k, addr);
241 if (!p4d_present(*p4d_k))
242 goto no_context;
243
244 pud = pud_offset(p4d, addr);
245 pud_k = pud_offset(p4d_k, addr);
246 if (!pud_present(*pud_k))
247 goto no_context;
248
249 /*
250 * Since the vmalloc area is global, it is unnecessary
251 * to copy individual PTEs
252 */
253 pmd = pmd_offset(pud, addr);
254 pmd_k = pmd_offset(pud_k, addr);
255 if (!pmd_present(*pmd_k))
256 goto no_context;
257 set_pmd(pmd, *pmd_k);
258
259 /*
260 * Make sure the actual PTE exists as well to
261 * catch kernel vmalloc-area accesses to non-mapped
262 * addresses. If we don't do this, this will just
263 * silently loop forever.
264 */
265 pte_k = pte_offset_kernel(pmd_k, addr);
266 if (!pte_present(*pte_k))
267 goto no_context;
268 return;
269 }
270 }