]> git.ipfire.org Git - people/ms/linux.git/blame - arch/m32r/mm/fault.c
header cleaning: don't include smp_lock.h when not used
[people/ms/linux.git] / arch / m32r / mm / fault.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/m32r/mm/fault.c
3 *
4 * Copyright (c) 2001, 2002 Hitoshi Yamamoto, and H. Kondo
5 * Copyright (c) 2004 Naoto Sugai, NIIBE Yutaka
6 *
7 * Some code taken from i386 version.
8 * Copyright (C) 1995 Linus Torvalds
9 */
10
1da177e4
LT
11#include <linux/signal.h>
12#include <linux/sched.h>
13#include <linux/kernel.h>
14#include <linux/errno.h>
15#include <linux/string.h>
16#include <linux/types.h>
17#include <linux/ptrace.h>
18#include <linux/mman.h>
19#include <linux/mm.h>
20#include <linux/smp.h>
1da177e4
LT
21#include <linux/interrupt.h>
22#include <linux/init.h>
23#include <linux/tty.h>
24#include <linux/vt_kern.h> /* For unblank_screen() */
25#include <linux/highmem.h>
26#include <linux/module.h>
27
28#include <asm/m32r.h>
29#include <asm/system.h>
30#include <asm/uaccess.h>
31#include <asm/hardirq.h>
32#include <asm/mmu_context.h>
33#include <asm/tlbflush.h>
34
35extern void die(const char *, struct pt_regs *, long);
36
37#ifndef CONFIG_SMP
38asmlinkage unsigned int tlb_entry_i_dat;
39asmlinkage unsigned int tlb_entry_d_dat;
40#define tlb_entry_i tlb_entry_i_dat
41#define tlb_entry_d tlb_entry_d_dat
42#else
43unsigned int tlb_entry_i_dat[NR_CPUS];
44unsigned int tlb_entry_d_dat[NR_CPUS];
45#define tlb_entry_i tlb_entry_i_dat[smp_processor_id()]
46#define tlb_entry_d tlb_entry_d_dat[smp_processor_id()]
47#endif
48
49extern void init_tlb(void);
50
1da177e4
LT
51/*======================================================================*
52 * do_page_fault()
53 *======================================================================*
54 * This routine handles page faults. It determines the address,
55 * and the problem, and then passes it off to one of the appropriate
56 * routines.
57 *
58 * ARGUMENT:
59 * regs : M32R SP reg.
60 * error_code : See below
61 * address : M32R MMU MDEVA reg. (Operand ACE)
62 * : M32R BPC reg. (Instruction ACE)
63 *
64 * error_code :
65 * bit 0 == 0 means no page found, 1 means protection fault
66 * bit 1 == 0 means read, 1 means write
67 * bit 2 == 0 means kernel, 1 means user-mode
68 * bit 3 == 0 means data, 1 means instruction
69 *======================================================================*/
70#define ACE_PROTECTION 1
71#define ACE_WRITE 2
72#define ACE_USERMODE 4
73#define ACE_INSTRUCTION 8
74
75asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code,
76 unsigned long address)
77{
78 struct task_struct *tsk;
79 struct mm_struct *mm;
80 struct vm_area_struct * vma;
81 unsigned long page, addr;
82 int write;
83 siginfo_t info;
84
85 /*
86 * If BPSW IE bit enable --> set PSW IE bit
87 */
88 if (regs->psw & M32R_PSW_BIE)
89 local_irq_enable();
90
91 tsk = current;
92
93 info.si_code = SEGV_MAPERR;
94
95 /*
96 * We fault-in kernel-space virtual memory on-demand. The
97 * 'reference' page table is init_mm.pgd.
98 *
99 * NOTE! We MUST NOT take any locks for this case. We may
100 * be in an interrupt or a critical region, and should
101 * only copy the information from the master page table,
102 * nothing more.
103 *
104 * This verifies that the fault happens in kernel space
105 * (error_code & ACE_USERMODE) == 0, and that the fault was not a
106 * protection error (error_code & ACE_PROTECTION) == 0.
107 */
108 if (address >= TASK_SIZE && !(error_code & ACE_USERMODE))
109 goto vmalloc_fault;
110
111 mm = tsk->mm;
112
113 /*
114 * If we're in an interrupt or have no user context or are running in an
115 * atomic region then we must not take the fault..
116 */
117 if (in_atomic() || !mm)
118 goto bad_area_nosemaphore;
119
120 /* When running in the kernel we expect faults to occur only to
121 * addresses in user space. All other faults represent errors in the
122 * kernel and should generate an OOPS. Unfortunatly, in the case of an
80f7228b 123 * erroneous fault occurring in a code path which already holds mmap_sem
1da177e4
LT
124 * we will deadlock attempting to validate the fault against the
125 * address space. Luckily the kernel only validly references user
126 * space from well defined areas of code, which are listed in the
127 * exceptions table.
128 *
129 * As the vast majority of faults will be valid we will only perform
130 * the source reference check when there is a possibilty of a deadlock.
131 * Attempt to lock the address space, if we cannot we then validate the
132 * source. If this is invalid we can skip the address space check,
133 * thus avoiding the deadlock.
134 */
135 if (!down_read_trylock(&mm->mmap_sem)) {
136 if ((error_code & ACE_USERMODE) == 0 &&
137 !search_exception_tables(regs->psw))
138 goto bad_area_nosemaphore;
139 down_read(&mm->mmap_sem);
140 }
141
142 vma = find_vma(mm, address);
143 if (!vma)
144 goto bad_area;
145 if (vma->vm_start <= address)
146 goto good_area;
147 if (!(vma->vm_flags & VM_GROWSDOWN))
148 goto bad_area;
6b8bd3f4 149
1da177e4
LT
150 if (error_code & ACE_USERMODE) {
151 /*
152 * accessing the stack below "spu" is always a bug.
153 * The "+ 4" is there due to the push instruction
154 * doing pre-decrement on the stack and that
155 * doesn't show up until later..
156 */
157 if (address + 4 < regs->spu)
158 goto bad_area;
159 }
6b8bd3f4 160
1da177e4
LT
161 if (expand_stack(vma, address))
162 goto bad_area;
163/*
164 * Ok, we have a good vm_area for this memory access, so
165 * we can handle it..
166 */
167good_area:
168 info.si_code = SEGV_ACCERR;
169 write = 0;
170 switch (error_code & (ACE_WRITE|ACE_PROTECTION)) {
171 default: /* 3: write, present */
172 /* fall through */
173 case ACE_WRITE: /* write, not present */
174 if (!(vma->vm_flags & VM_WRITE))
175 goto bad_area;
176 write++;
177 break;
178 case ACE_PROTECTION: /* read, present */
179 case 0: /* read, not present */
180 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
181 goto bad_area;
182 }
183
184 /*
185 * For instruction access exception, check if the area is executable
186 */
187 if ((error_code & ACE_INSTRUCTION) && !(vma->vm_flags & VM_EXEC))
188 goto bad_area;
189
190survive:
191 /*
192 * If for any reason at all we couldn't handle the fault,
193 * make sure we exit gracefully rather than endlessly redo
194 * the fault.
195 */
196 addr = (address & PAGE_MASK);
197 set_thread_fault_code(error_code);
198 switch (handle_mm_fault(mm, vma, addr, write)) {
199 case VM_FAULT_MINOR:
200 tsk->min_flt++;
201 break;
202 case VM_FAULT_MAJOR:
203 tsk->maj_flt++;
204 break;
205 case VM_FAULT_SIGBUS:
206 goto do_sigbus;
207 case VM_FAULT_OOM:
208 goto out_of_memory;
209 default:
210 BUG();
211 }
212 set_thread_fault_code(0);
213 up_read(&mm->mmap_sem);
214 return;
215
216/*
217 * Something tried to access memory that isn't in our memory map..
218 * Fix it, but check if it's kernel or user first..
219 */
220bad_area:
221 up_read(&mm->mmap_sem);
222
223bad_area_nosemaphore:
224 /* User mode accesses just cause a SIGSEGV */
225 if (error_code & ACE_USERMODE) {
226 tsk->thread.address = address;
227 tsk->thread.error_code = error_code | (address >= TASK_SIZE);
228 tsk->thread.trap_no = 14;
229 info.si_signo = SIGSEGV;
230 info.si_errno = 0;
231 /* info.si_code has been set above */
232 info.si_addr = (void __user *)address;
233 force_sig_info(SIGSEGV, &info, tsk);
234 return;
235 }
236
237no_context:
238 /* Are we prepared to handle this kernel fault? */
239 if (fixup_exception(regs))
240 return;
241
242/*
243 * Oops. The kernel tried to access some bad page. We'll have to
244 * terminate things with extreme prejudice.
245 */
246
247 bust_spinlocks(1);
248
249 if (address < PAGE_SIZE)
250 printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
251 else
252 printk(KERN_ALERT "Unable to handle kernel paging request");
253 printk(" at virtual address %08lx\n",address);
254 printk(KERN_ALERT " printing bpc:\n");
255 printk("%08lx\n", regs->bpc);
256 page = *(unsigned long *)MPTB;
257 page = ((unsigned long *) page)[address >> PGDIR_SHIFT];
258 printk(KERN_ALERT "*pde = %08lx\n", page);
259 if (page & _PAGE_PRESENT) {
260 page &= PAGE_MASK;
261 address &= 0x003ff000;
262 page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT];
263 printk(KERN_ALERT "*pte = %08lx\n", page);
264 }
265 die("Oops", regs, error_code);
266 bust_spinlocks(0);
267 do_exit(SIGKILL);
268
269/*
270 * We ran out of memory, or some other thing happened to us that made
271 * us unable to handle the page fault gracefully.
272 */
273out_of_memory:
274 up_read(&mm->mmap_sem);
f400e198 275 if (is_init(tsk)) {
1da177e4
LT
276 yield();
277 down_read(&mm->mmap_sem);
278 goto survive;
279 }
280 printk("VM: killing process %s\n", tsk->comm);
281 if (error_code & ACE_USERMODE)
282 do_exit(SIGKILL);
283 goto no_context;
284
285do_sigbus:
286 up_read(&mm->mmap_sem);
287
288 /* Kernel mode? Handle exception or die */
289 if (!(error_code & ACE_USERMODE))
290 goto no_context;
291
292 tsk->thread.address = address;
293 tsk->thread.error_code = error_code;
294 tsk->thread.trap_no = 14;
295 info.si_signo = SIGBUS;
296 info.si_errno = 0;
297 info.si_code = BUS_ADRERR;
298 info.si_addr = (void __user *)address;
299 force_sig_info(SIGBUS, &info, tsk);
300 return;
301
302vmalloc_fault:
303 {
304 /*
305 * Synchronize this task's top level page-table
306 * with the 'reference' page table.
307 *
308 * Do _not_ use "tsk" here. We might be inside
309 * an interrupt in the middle of a task switch..
310 */
311 int offset = pgd_index(address);
312 pgd_t *pgd, *pgd_k;
313 pmd_t *pmd, *pmd_k;
314 pte_t *pte_k;
315
316 pgd = (pgd_t *)*(unsigned long *)MPTB;
317 pgd = offset + (pgd_t *)pgd;
318 pgd_k = init_mm.pgd + offset;
319
320 if (!pgd_present(*pgd_k))
321 goto no_context;
322
323 /*
324 * set_pgd(pgd, *pgd_k); here would be useless on PAE
325 * and redundant with the set_pmd() on non-PAE.
326 */
327
328 pmd = pmd_offset(pgd, address);
329 pmd_k = pmd_offset(pgd_k, address);
330 if (!pmd_present(*pmd_k))
331 goto no_context;
332 set_pmd(pmd, *pmd_k);
333
334 pte_k = pte_offset_kernel(pmd_k, address);
335 if (!pte_present(*pte_k))
336 goto no_context;
337
9b87ed79
HT
338 addr = (address & PAGE_MASK);
339 set_thread_fault_code(error_code);
1da177e4 340 update_mmu_cache(NULL, addr, *pte_k);
9b87ed79 341 set_thread_fault_code(0);
1da177e4
LT
342 return;
343 }
344}
345
346/*======================================================================*
347 * update_mmu_cache()
348 *======================================================================*/
349#define TLB_MASK (NR_TLB_ENTRIES - 1)
350#define ITLB_END (unsigned long *)(ITLB_BASE + (NR_TLB_ENTRIES * 8))
351#define DTLB_END (unsigned long *)(DTLB_BASE + (NR_TLB_ENTRIES * 8))
352void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr,
353 pte_t pte)
354{
9b87ed79 355 volatile unsigned long *entry1, *entry2;
1da177e4
LT
356 unsigned long pte_data, flags;
357 unsigned int *entry_dat;
358 int inst = get_thread_fault_code() & ACE_INSTRUCTION;
359 int i;
360
361 /* Ptrace may call this routine. */
362 if (vma && current->active_mm != vma->vm_mm)
363 return;
364
365 local_irq_save(flags);
366
367 vaddr = (vaddr & PAGE_MASK) | get_asid();
368
9b87ed79
HT
369 pte_data = pte_val(pte);
370
1da177e4
LT
371#ifdef CONFIG_CHIP_OPSP
372 entry1 = (unsigned long *)ITLB_BASE;
9b87ed79
HT
373 for (i = 0; i < NR_TLB_ENTRIES; i++) {
374 if (*entry1++ == vaddr) {
375 set_tlb_data(entry1, pte_data);
376 break;
377 }
378 entry1++;
1da177e4
LT
379 }
380 entry2 = (unsigned long *)DTLB_BASE;
9b87ed79
HT
381 for (i = 0; i < NR_TLB_ENTRIES; i++) {
382 if (*entry2++ == vaddr) {
383 set_tlb_data(entry2, pte_data);
384 break;
385 }
386 entry2++;
1da177e4 387 }
1da177e4 388#else
1da177e4
LT
389 /*
390 * Update TLB entries
391 * entry1: ITLB entry address
392 * entry2: DTLB entry address
393 */
394 __asm__ __volatile__ (
395 "seth %0, #high(%4) \n\t"
396 "st %2, @(%5, %0) \n\t"
397 "ldi %1, #1 \n\t"
398 "st %1, @(%6, %0) \n\t"
399 "add3 r4, %0, %7 \n\t"
400 ".fillinsn \n"
401 "1: \n\t"
402 "ld %1, @(%6, %0) \n\t"
403 "bnez %1, 1b \n\t"
404 "ld %0, @r4+ \n\t"
405 "ld %1, @r4 \n\t"
406 "st %3, @+%0 \n\t"
407 "st %3, @+%1 \n\t"
408 : "=&r" (entry1), "=&r" (entry2)
409 : "r" (vaddr), "r" (pte_data), "i" (MMU_REG_BASE),
410 "i" (MSVA_offset), "i" (MTOP_offset), "i" (MIDXI_offset)
411 : "r4", "memory"
412 );
9b87ed79 413#endif
1da177e4
LT
414
415 if ((!inst && entry2 >= DTLB_END) || (inst && entry1 >= ITLB_END))
416 goto notfound;
417
418found:
419 local_irq_restore(flags);
420
421 return;
422
423 /* Valid entry not found */
424notfound:
425 /*
426 * Update ITLB or DTLB entry
427 * entry1: TLB entry address
428 * entry2: TLB base address
429 */
430 if (!inst) {
431 entry2 = (unsigned long *)DTLB_BASE;
432 entry_dat = &tlb_entry_d;
433 } else {
434 entry2 = (unsigned long *)ITLB_BASE;
435 entry_dat = &tlb_entry_i;
436 }
437 entry1 = entry2 + (((*entry_dat - 1) & TLB_MASK) << 1);
438
439 for (i = 0 ; i < NR_TLB_ENTRIES ; i++) {
440 if (!(entry1[1] & 2)) /* Valid bit check */
441 break;
442
443 if (entry1 != entry2)
444 entry1 -= 2;
445 else
446 entry1 += TLB_MASK << 1;
447 }
448
449 if (i >= NR_TLB_ENTRIES) { /* Empty entry not found */
450 entry1 = entry2 + (*entry_dat << 1);
451 *entry_dat = (*entry_dat + 1) & TLB_MASK;
452 }
453 *entry1++ = vaddr; /* Set TLB tag */
454 set_tlb_data(entry1, pte_data);
455
456 goto found;
1da177e4
LT
457}
458
459/*======================================================================*
460 * flush_tlb_page() : flushes one page
461 *======================================================================*/
462void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
463{
464 if (vma->vm_mm && mm_context(vma->vm_mm) != NO_CONTEXT) {
465 unsigned long flags;
466
467 local_irq_save(flags);
468 page &= PAGE_MASK;
469 page |= (mm_context(vma->vm_mm) & MMU_CONTEXT_ASID_MASK);
470 __flush_tlb_page(page);
471 local_irq_restore(flags);
472 }
473}
474
475/*======================================================================*
476 * flush_tlb_range() : flushes a range of pages
477 *======================================================================*/
478void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
479 unsigned long end)
480{
481 struct mm_struct *mm;
482
483 mm = vma->vm_mm;
484 if (mm_context(mm) != NO_CONTEXT) {
485 unsigned long flags;
486 int size;
487
488 local_irq_save(flags);
489 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
490 if (size > (NR_TLB_ENTRIES / 4)) { /* Too many TLB to flush */
491 mm_context(mm) = NO_CONTEXT;
492 if (mm == current->mm)
493 activate_context(mm);
494 } else {
495 unsigned long asid;
496
497 asid = mm_context(mm) & MMU_CONTEXT_ASID_MASK;
498 start &= PAGE_MASK;
499 end += (PAGE_SIZE - 1);
500 end &= PAGE_MASK;
501
502 start |= asid;
503 end |= asid;
504 while (start < end) {
505 __flush_tlb_page(start);
506 start += PAGE_SIZE;
507 }
508 }
509 local_irq_restore(flags);
510 }
511}
512
513/*======================================================================*
514 * flush_tlb_mm() : flushes the specified mm context TLB's
515 *======================================================================*/
516void local_flush_tlb_mm(struct mm_struct *mm)
517{
518 /* Invalidate all TLB of this process. */
519 /* Instead of invalidating each TLB, we get new MMU context. */
520 if (mm_context(mm) != NO_CONTEXT) {
521 unsigned long flags;
522
523 local_irq_save(flags);
524 mm_context(mm) = NO_CONTEXT;
525 if (mm == current->mm)
526 activate_context(mm);
527 local_irq_restore(flags);
528 }
529}
530
531/*======================================================================*
532 * flush_tlb_all() : flushes all processes TLBs
533 *======================================================================*/
534void local_flush_tlb_all(void)
535{
536 unsigned long flags;
537
538 local_irq_save(flags);
539 __flush_tlb_all();
540 local_irq_restore(flags);
541}
542
543/*======================================================================*
544 * init_mmu()
545 *======================================================================*/
546void __init init_mmu(void)
547{
548 tlb_entry_i = 0;
549 tlb_entry_d = 0;
550 mmu_context_cache = MMU_CONTEXT_FIRST_VERSION;
551 set_asid(mmu_context_cache & MMU_CONTEXT_ASID_MASK);
552 *(volatile unsigned long *)MPTB = (unsigned long)swapper_pg_dir;
553}