]> git.ipfire.org Git - thirdparty/linux.git/blob - arch/powerpc/kernel/head_32.S
io_uring: reset -EBUSY error when io sq thread is waken up
[thirdparty/linux.git] / arch / powerpc / kernel / head_32.S
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * PowerPC version
4 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 *
6 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
7 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
8 * Adapted for Power Macintosh by Paul Mackerras.
9 * Low-level exception handlers and MMU support
10 * rewritten by Paul Mackerras.
11 * Copyright (C) 1996 Paul Mackerras.
12 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
13 *
14 * This file contains the low-level support and setup for the
15 * PowerPC platform, including trap and interrupt dispatch.
16 * (The PPC 8xx embedded CPUs use head_8xx.S instead.)
17 */
18
19 #include <linux/init.h>
20 #include <asm/reg.h>
21 #include <asm/page.h>
22 #include <asm/mmu.h>
23 #include <asm/pgtable.h>
24 #include <asm/cputable.h>
25 #include <asm/cache.h>
26 #include <asm/thread_info.h>
27 #include <asm/ppc_asm.h>
28 #include <asm/asm-offsets.h>
29 #include <asm/ptrace.h>
30 #include <asm/bug.h>
31 #include <asm/kvm_book3s_asm.h>
32 #include <asm/export.h>
33 #include <asm/feature-fixups.h>
34
35 #include "head_32.h"
36
37 /* 601 only have IBAT */
38 #ifdef CONFIG_PPC_BOOK3S_601
39 #define LOAD_BAT(n, reg, RA, RB) \
40 li RA,0; \
41 mtspr SPRN_IBAT##n##U,RA; \
42 lwz RA,(n*16)+0(reg); \
43 lwz RB,(n*16)+4(reg); \
44 mtspr SPRN_IBAT##n##U,RA; \
45 mtspr SPRN_IBAT##n##L,RB
46 #else
47 #define LOAD_BAT(n, reg, RA, RB) \
48 /* see the comment for clear_bats() -- Cort */ \
49 li RA,0; \
50 mtspr SPRN_IBAT##n##U,RA; \
51 mtspr SPRN_DBAT##n##U,RA; \
52 lwz RA,(n*16)+0(reg); \
53 lwz RB,(n*16)+4(reg); \
54 mtspr SPRN_IBAT##n##U,RA; \
55 mtspr SPRN_IBAT##n##L,RB; \
56 lwz RA,(n*16)+8(reg); \
57 lwz RB,(n*16)+12(reg); \
58 mtspr SPRN_DBAT##n##U,RA; \
59 mtspr SPRN_DBAT##n##L,RB
60 #endif
61
62 __HEAD
63 .stabs "arch/powerpc/kernel/",N_SO,0,0,0f
64 .stabs "head_32.S",N_SO,0,0,0f
65 0:
66 _ENTRY(_stext);
67
68 /*
69 * _start is defined this way because the XCOFF loader in the OpenFirmware
70 * on the powermac expects the entry point to be a procedure descriptor.
71 */
72 _ENTRY(_start);
73 /*
74 * These are here for legacy reasons, the kernel used to
75 * need to look like a coff function entry for the pmac
76 * but we're always started by some kind of bootloader now.
77 * -- Cort
78 */
79 nop /* used by __secondary_hold on prep (mtx) and chrp smp */
80 nop /* used by __secondary_hold on prep (mtx) and chrp smp */
81 nop
82
83 /* PMAC
84 * Enter here with the kernel text, data and bss loaded starting at
85 * 0, running with virtual == physical mapping.
86 * r5 points to the prom entry point (the client interface handler
87 * address). Address translation is turned on, with the prom
88 * managing the hash table. Interrupts are disabled. The stack
89 * pointer (r1) points to just below the end of the half-meg region
90 * from 0x380000 - 0x400000, which is mapped in already.
91 *
92 * If we are booted from MacOS via BootX, we enter with the kernel
93 * image loaded somewhere, and the following values in registers:
94 * r3: 'BooX' (0x426f6f58)
95 * r4: virtual address of boot_infos_t
96 * r5: 0
97 *
98 * PREP
99 * This is jumped to on prep systems right after the kernel is relocated
100 * to its proper place in memory by the boot loader. The expected layout
101 * of the regs is:
102 * r3: ptr to residual data
103 * r4: initrd_start or if no initrd then 0
104 * r5: initrd_end - unused if r4 is 0
105 * r6: Start of command line string
106 * r7: End of command line string
107 *
108 * This just gets a minimal mmu environment setup so we can call
109 * start_here() to do the real work.
110 * -- Cort
111 */
112
113 .globl __start
114 __start:
115 /*
116 * We have to do any OF calls before we map ourselves to KERNELBASE,
117 * because OF may have I/O devices mapped into that area
118 * (particularly on CHRP).
119 */
120 cmpwi 0,r5,0
121 beq 1f
122
123 #ifdef CONFIG_PPC_OF_BOOT_TRAMPOLINE
124 /* find out where we are now */
125 bcl 20,31,$+4
126 0: mflr r8 /* r8 = runtime addr here */
127 addis r8,r8,(_stext - 0b)@ha
128 addi r8,r8,(_stext - 0b)@l /* current runtime base addr */
129 bl prom_init
130 #endif /* CONFIG_PPC_OF_BOOT_TRAMPOLINE */
131
132 /* We never return. We also hit that trap if trying to boot
133 * from OF while CONFIG_PPC_OF_BOOT_TRAMPOLINE isn't selected */
134 trap
135
136 /*
137 * Check for BootX signature when supporting PowerMac and branch to
138 * appropriate trampoline if it's present
139 */
140 #ifdef CONFIG_PPC_PMAC
141 1: lis r31,0x426f
142 ori r31,r31,0x6f58
143 cmpw 0,r3,r31
144 bne 1f
145 bl bootx_init
146 trap
147 #endif /* CONFIG_PPC_PMAC */
148
149 1: mr r31,r3 /* save device tree ptr */
150 li r24,0 /* cpu # */
151
152 /*
153 * early_init() does the early machine identification and does
154 * the necessary low-level setup and clears the BSS
155 * -- Cort <cort@fsmlabs.com>
156 */
157 bl early_init
158
159 /* Switch MMU off, clear BATs and flush TLB. At this point, r3 contains
160 * the physical address we are running at, returned by early_init()
161 */
162 bl mmu_off
163 __after_mmu_off:
164 bl clear_bats
165 bl flush_tlbs
166
167 bl initial_bats
168 bl load_segment_registers
169 #ifdef CONFIG_KASAN
170 bl early_hash_table
171 #endif
172 #if defined(CONFIG_BOOTX_TEXT)
173 bl setup_disp_bat
174 #endif
175 #ifdef CONFIG_PPC_EARLY_DEBUG_CPM
176 bl setup_cpm_bat
177 #endif
178 #ifdef CONFIG_PPC_EARLY_DEBUG_USBGECKO
179 bl setup_usbgecko_bat
180 #endif
181
182 /*
183 * Call setup_cpu for CPU 0 and initialize 6xx Idle
184 */
185 bl reloc_offset
186 li r24,0 /* cpu# */
187 bl call_setup_cpu /* Call setup_cpu for this CPU */
188 #ifdef CONFIG_PPC_BOOK3S_32
189 bl reloc_offset
190 bl init_idle_6xx
191 #endif /* CONFIG_PPC_BOOK3S_32 */
192
193
194 /*
195 * We need to run with _start at physical address 0.
196 * On CHRP, we are loaded at 0x10000 since OF on CHRP uses
197 * the exception vectors at 0 (and therefore this copy
198 * overwrites OF's exception vectors with our own).
199 * The MMU is off at this point.
200 */
201 bl reloc_offset
202 mr r26,r3
203 addis r4,r3,KERNELBASE@h /* current address of _start */
204 lis r5,PHYSICAL_START@h
205 cmplw 0,r4,r5 /* already running at PHYSICAL_START? */
206 bne relocate_kernel
207 /*
208 * we now have the 1st 16M of ram mapped with the bats.
209 * prep needs the mmu to be turned on here, but pmac already has it on.
210 * this shouldn't bother the pmac since it just gets turned on again
211 * as we jump to our code at KERNELBASE. -- Cort
212 * Actually no, pmac doesn't have it on any more. BootX enters with MMU
213 * off, and in other cases, we now turn it off before changing BATs above.
214 */
215 turn_on_mmu:
216 mfmsr r0
217 ori r0,r0,MSR_DR|MSR_IR|MSR_RI
218 mtspr SPRN_SRR1,r0
219 lis r0,start_here@h
220 ori r0,r0,start_here@l
221 mtspr SPRN_SRR0,r0
222 SYNC
223 RFI /* enables MMU */
224
225 /*
226 * We need __secondary_hold as a place to hold the other cpus on
227 * an SMP machine, even when we are running a UP kernel.
228 */
229 . = 0xc0 /* for prep bootloader */
230 li r3,1 /* MTX only has 1 cpu */
231 .globl __secondary_hold
232 __secondary_hold:
233 /* tell the master we're here */
234 stw r3,__secondary_hold_acknowledge@l(0)
235 #ifdef CONFIG_SMP
236 100: lwz r4,0(0)
237 /* wait until we're told to start */
238 cmpw 0,r4,r3
239 bne 100b
240 /* our cpu # was at addr 0 - go */
241 mr r24,r3 /* cpu # */
242 b __secondary_start
243 #else
244 b .
245 #endif /* CONFIG_SMP */
246
247 .globl __secondary_hold_spinloop
248 __secondary_hold_spinloop:
249 .long 0
250 .globl __secondary_hold_acknowledge
251 __secondary_hold_acknowledge:
252 .long -1
253
254 /* System reset */
255 /* core99 pmac starts the seconary here by changing the vector, and
256 putting it back to what it was (unknown_exception) when done. */
257 EXCEPTION(0x100, Reset, unknown_exception, EXC_XFER_STD)
258
259 /* Machine check */
260 /*
261 * On CHRP, this is complicated by the fact that we could get a
262 * machine check inside RTAS, and we have no guarantee that certain
263 * critical registers will have the values we expect. The set of
264 * registers that might have bad values includes all the GPRs
265 * and all the BATs. We indicate that we are in RTAS by putting
266 * a non-zero value, the address of the exception frame to use,
267 * in thread.rtas_sp. The machine check handler checks thread.rtas_sp
268 * and uses its value if it is non-zero.
269 * (Other exception handlers assume that r1 is a valid kernel stack
270 * pointer when we take an exception from supervisor mode.)
271 * -- paulus.
272 */
273 . = 0x200
274 DO_KVM 0x200
275 MachineCheck:
276 EXCEPTION_PROLOG_0
277 #ifdef CONFIG_VMAP_STACK
278 li r11, MSR_KERNEL & ~(MSR_IR | MSR_RI) /* can take DTLB miss */
279 mtmsr r11
280 isync
281 #endif
282 #ifdef CONFIG_PPC_CHRP
283 mfspr r11, SPRN_SPRG_THREAD
284 tovirt_vmstack r11, r11
285 lwz r11, RTAS_SP(r11)
286 cmpwi cr1, r11, 0
287 bne cr1, 7f
288 #endif /* CONFIG_PPC_CHRP */
289 EXCEPTION_PROLOG_1 for_rtas=1
290 7: EXCEPTION_PROLOG_2
291 addi r3,r1,STACK_FRAME_OVERHEAD
292 #ifdef CONFIG_PPC_CHRP
293 #ifdef CONFIG_VMAP_STACK
294 mfspr r4, SPRN_SPRG_THREAD
295 tovirt(r4, r4)
296 lwz r4, RTAS_SP(r4)
297 cmpwi cr1, r4, 0
298 #endif
299 beq cr1, machine_check_tramp
300 b machine_check_in_rtas
301 #else
302 b machine_check_tramp
303 #endif
304
305 /* Data access exception. */
306 . = 0x300
307 DO_KVM 0x300
308 DataAccess:
309 #ifdef CONFIG_VMAP_STACK
310 mtspr SPRN_SPRG_SCRATCH0,r10
311 mfspr r10, SPRN_SPRG_THREAD
312 BEGIN_MMU_FTR_SECTION
313 stw r11, THR11(r10)
314 mfspr r10, SPRN_DSISR
315 mfcr r11
316 #ifdef CONFIG_PPC_KUAP
317 andis. r10, r10, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH | DSISR_PROTFAULT)@h
318 #else
319 andis. r10, r10, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH)@h
320 #endif
321 mfspr r10, SPRN_SPRG_THREAD
322 beq hash_page_dsi
323 .Lhash_page_dsi_cont:
324 mtcr r11
325 lwz r11, THR11(r10)
326 END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
327 mtspr SPRN_SPRG_SCRATCH1,r11
328 mfspr r11, SPRN_DAR
329 stw r11, DAR(r10)
330 mfspr r11, SPRN_DSISR
331 stw r11, DSISR(r10)
332 mfspr r11, SPRN_SRR0
333 stw r11, SRR0(r10)
334 mfspr r11, SPRN_SRR1 /* check whether user or kernel */
335 stw r11, SRR1(r10)
336 mfcr r10
337 andi. r11, r11, MSR_PR
338
339 EXCEPTION_PROLOG_1
340 b handle_page_fault_tramp_1
341 #else /* CONFIG_VMAP_STACK */
342 EXCEPTION_PROLOG handle_dar_dsisr=1
343 get_and_save_dar_dsisr_on_stack r4, r5, r11
344 BEGIN_MMU_FTR_SECTION
345 #ifdef CONFIG_PPC_KUAP
346 andis. r0, r5, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH | DSISR_PROTFAULT)@h
347 #else
348 andis. r0, r5, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH)@h
349 #endif
350 bne handle_page_fault_tramp_2 /* if not, try to put a PTE */
351 rlwinm r3, r5, 32 - 24, 30, 30 /* DSISR_STORE -> _PAGE_RW */
352 bl hash_page
353 b handle_page_fault_tramp_1
354 FTR_SECTION_ELSE
355 b handle_page_fault_tramp_2
356 ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_HPTE_TABLE)
357 #endif /* CONFIG_VMAP_STACK */
358
359 /* Instruction access exception. */
360 . = 0x400
361 DO_KVM 0x400
362 InstructionAccess:
363 #ifdef CONFIG_VMAP_STACK
364 mtspr SPRN_SPRG_SCRATCH0,r10
365 mtspr SPRN_SPRG_SCRATCH1,r11
366 mfspr r10, SPRN_SPRG_THREAD
367 mfspr r11, SPRN_SRR0
368 stw r11, SRR0(r10)
369 mfspr r11, SPRN_SRR1 /* check whether user or kernel */
370 stw r11, SRR1(r10)
371 mfcr r10
372 BEGIN_MMU_FTR_SECTION
373 andis. r11, r11, SRR1_ISI_NOPT@h /* no pte found? */
374 bne hash_page_isi
375 .Lhash_page_isi_cont:
376 mfspr r11, SPRN_SRR1 /* check whether user or kernel */
377 END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
378 andi. r11, r11, MSR_PR
379
380 EXCEPTION_PROLOG_1
381 EXCEPTION_PROLOG_2
382 #else /* CONFIG_VMAP_STACK */
383 EXCEPTION_PROLOG
384 andis. r0,r9,SRR1_ISI_NOPT@h /* no pte found? */
385 beq 1f /* if so, try to put a PTE */
386 li r3,0 /* into the hash table */
387 mr r4,r12 /* SRR0 is fault address */
388 BEGIN_MMU_FTR_SECTION
389 bl hash_page
390 END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
391 #endif /* CONFIG_VMAP_STACK */
392 1: mr r4,r12
393 andis. r5,r9,DSISR_SRR1_MATCH_32S@h /* Filter relevant SRR1 bits */
394 stw r4, _DAR(r11)
395 EXC_XFER_LITE(0x400, handle_page_fault)
396
397 /* External interrupt */
398 EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)
399
400 /* Alignment exception */
401 . = 0x600
402 DO_KVM 0x600
403 Alignment:
404 EXCEPTION_PROLOG handle_dar_dsisr=1
405 save_dar_dsisr_on_stack r4, r5, r11
406 addi r3,r1,STACK_FRAME_OVERHEAD
407 b alignment_exception_tramp
408
409 /* Program check exception */
410 EXCEPTION(0x700, ProgramCheck, program_check_exception, EXC_XFER_STD)
411
412 /* Floating-point unavailable */
413 . = 0x800
414 DO_KVM 0x800
415 FPUnavailable:
416 BEGIN_FTR_SECTION
417 /*
418 * Certain Freescale cores don't have a FPU and treat fp instructions
419 * as a FP Unavailable exception. Redirect to illegal/emulation handling.
420 */
421 b ProgramCheck
422 END_FTR_SECTION_IFSET(CPU_FTR_FPU_UNAVAILABLE)
423 EXCEPTION_PROLOG
424 beq 1f
425 bl load_up_fpu /* if from user, just load it up */
426 b fast_exception_return
427 1: addi r3,r1,STACK_FRAME_OVERHEAD
428 EXC_XFER_LITE(0x800, kernel_fp_unavailable_exception)
429
430 /* Decrementer */
431 EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE)
432
433 EXCEPTION(0xa00, Trap_0a, unknown_exception, EXC_XFER_STD)
434 EXCEPTION(0xb00, Trap_0b, unknown_exception, EXC_XFER_STD)
435
436 /* System call */
437 . = 0xc00
438 DO_KVM 0xc00
439 SystemCall:
440 SYSCALL_ENTRY 0xc00
441
442 /* Single step - not used on 601 */
443 EXCEPTION(0xd00, SingleStep, single_step_exception, EXC_XFER_STD)
444 EXCEPTION(0xe00, Trap_0e, unknown_exception, EXC_XFER_STD)
445
446 /*
447 * The Altivec unavailable trap is at 0x0f20. Foo.
448 * We effectively remap it to 0x3000.
449 * We include an altivec unavailable exception vector even if
450 * not configured for Altivec, so that you can't panic a
451 * non-altivec kernel running on a machine with altivec just
452 * by executing an altivec instruction.
453 */
454 . = 0xf00
455 DO_KVM 0xf00
456 b PerformanceMonitor
457
458 . = 0xf20
459 DO_KVM 0xf20
460 b AltiVecUnavailable
461
462 /*
463 * Handle TLB miss for instruction on 603/603e.
464 * Note: we get an alternate set of r0 - r3 to use automatically.
465 */
466 . = 0x1000
467 InstructionTLBMiss:
468 /*
469 * r0: scratch
470 * r1: linux style pte ( later becomes ppc hardware pte )
471 * r2: ptr to linux-style pte
472 * r3: scratch
473 */
474 /* Get PTE (linux-style) and check access */
475 mfspr r3,SPRN_IMISS
476 #if defined(CONFIG_MODULES) || defined(CONFIG_DEBUG_PAGEALLOC)
477 lis r1,PAGE_OFFSET@h /* check if kernel address */
478 cmplw 0,r1,r3
479 #endif
480 mfspr r2, SPRN_SPRG_PGDIR
481 #ifdef CONFIG_SWAP
482 li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
483 #else
484 li r1,_PAGE_PRESENT | _PAGE_EXEC
485 #endif
486 #if defined(CONFIG_MODULES) || defined(CONFIG_DEBUG_PAGEALLOC)
487 bge- 112f
488 lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
489 addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
490 #endif
491 112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
492 lwz r2,0(r2) /* get pmd entry */
493 rlwinm. r2,r2,0,0,19 /* extract address of pte page */
494 beq- InstructionAddressInvalid /* return if no mapping */
495 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
496 lwz r0,0(r2) /* get linux-style pte */
497 andc. r1,r1,r0 /* check access & ~permission */
498 bne- InstructionAddressInvalid /* return if access not permitted */
499 /* Convert linux-style PTE to low word of PPC-style PTE */
500 ori r1, r1, 0xe06 /* clear out reserved bits */
501 andc r1, r0, r1 /* PP = user? 1 : 0 */
502 BEGIN_FTR_SECTION
503 rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */
504 END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
505 mtspr SPRN_RPA,r1
506 tlbli r3
507 mfspr r3,SPRN_SRR1 /* Need to restore CR0 */
508 mtcrf 0x80,r3
509 rfi
510 InstructionAddressInvalid:
511 mfspr r3,SPRN_SRR1
512 rlwinm r1,r3,9,6,6 /* Get load/store bit */
513
514 addis r1,r1,0x2000
515 mtspr SPRN_DSISR,r1 /* (shouldn't be needed) */
516 andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */
517 or r2,r2,r1
518 mtspr SPRN_SRR1,r2
519 mfspr r1,SPRN_IMISS /* Get failing address */
520 rlwinm. r2,r2,0,31,31 /* Check for little endian access */
521 rlwimi r2,r2,1,30,30 /* change 1 -> 3 */
522 xor r1,r1,r2
523 mtspr SPRN_DAR,r1 /* Set fault address */
524 mfmsr r0 /* Restore "normal" registers */
525 xoris r0,r0,MSR_TGPR>>16
526 mtcrf 0x80,r3 /* Restore CR0 */
527 mtmsr r0
528 b InstructionAccess
529
530 /*
531 * Handle TLB miss for DATA Load operation on 603/603e
532 */
533 . = 0x1100
534 DataLoadTLBMiss:
535 /*
536 * r0: scratch
537 * r1: linux style pte ( later becomes ppc hardware pte )
538 * r2: ptr to linux-style pte
539 * r3: scratch
540 */
541 /* Get PTE (linux-style) and check access */
542 mfspr r3,SPRN_DMISS
543 lis r1,PAGE_OFFSET@h /* check if kernel address */
544 cmplw 0,r1,r3
545 mfspr r2, SPRN_SPRG_PGDIR
546 #ifdef CONFIG_SWAP
547 li r1, _PAGE_PRESENT | _PAGE_ACCESSED
548 #else
549 li r1, _PAGE_PRESENT
550 #endif
551 bge- 112f
552 lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
553 addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
554 112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
555 lwz r2,0(r2) /* get pmd entry */
556 rlwinm. r2,r2,0,0,19 /* extract address of pte page */
557 beq- DataAddressInvalid /* return if no mapping */
558 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
559 lwz r0,0(r2) /* get linux-style pte */
560 andc. r1,r1,r0 /* check access & ~permission */
561 bne- DataAddressInvalid /* return if access not permitted */
562 /*
563 * NOTE! We are assuming this is not an SMP system, otherwise
564 * we would need to update the pte atomically with lwarx/stwcx.
565 */
566 /* Convert linux-style PTE to low word of PPC-style PTE */
567 rlwinm r1,r0,0,30,30 /* _PAGE_RW -> PP msb */
568 rlwimi r0,r0,1,30,30 /* _PAGE_USER -> PP msb */
569 ori r1,r1,0xe04 /* clear out reserved bits */
570 andc r1,r0,r1 /* PP = user? rw? 1: 3: 0 */
571 BEGIN_FTR_SECTION
572 rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */
573 END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
574 mtspr SPRN_RPA,r1
575 mfspr r2,SPRN_SRR1 /* Need to restore CR0 */
576 mtcrf 0x80,r2
577 BEGIN_MMU_FTR_SECTION
578 li r0,1
579 mfspr r1,SPRN_SPRG_603_LRU
580 rlwinm r2,r3,20,27,31 /* Get Address bits 15:19 */
581 slw r0,r0,r2
582 xor r1,r0,r1
583 srw r0,r1,r2
584 mtspr SPRN_SPRG_603_LRU,r1
585 mfspr r2,SPRN_SRR1
586 rlwimi r2,r0,31-14,14,14
587 mtspr SPRN_SRR1,r2
588 END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
589 tlbld r3
590 rfi
591 DataAddressInvalid:
592 mfspr r3,SPRN_SRR1
593 rlwinm r1,r3,9,6,6 /* Get load/store bit */
594 addis r1,r1,0x2000
595 mtspr SPRN_DSISR,r1
596 andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */
597 mtspr SPRN_SRR1,r2
598 mfspr r1,SPRN_DMISS /* Get failing address */
599 rlwinm. r2,r2,0,31,31 /* Check for little endian access */
600 beq 20f /* Jump if big endian */
601 xori r1,r1,3
602 20: mtspr SPRN_DAR,r1 /* Set fault address */
603 mfmsr r0 /* Restore "normal" registers */
604 xoris r0,r0,MSR_TGPR>>16
605 mtcrf 0x80,r3 /* Restore CR0 */
606 mtmsr r0
607 b DataAccess
608
609 /*
610 * Handle TLB miss for DATA Store on 603/603e
611 */
612 . = 0x1200
613 DataStoreTLBMiss:
614 /*
615 * r0: scratch
616 * r1: linux style pte ( later becomes ppc hardware pte )
617 * r2: ptr to linux-style pte
618 * r3: scratch
619 */
620 /* Get PTE (linux-style) and check access */
621 mfspr r3,SPRN_DMISS
622 lis r1,PAGE_OFFSET@h /* check if kernel address */
623 cmplw 0,r1,r3
624 mfspr r2, SPRN_SPRG_PGDIR
625 #ifdef CONFIG_SWAP
626 li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED
627 #else
628 li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT
629 #endif
630 bge- 112f
631 lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
632 addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
633 112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
634 lwz r2,0(r2) /* get pmd entry */
635 rlwinm. r2,r2,0,0,19 /* extract address of pte page */
636 beq- DataAddressInvalid /* return if no mapping */
637 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
638 lwz r0,0(r2) /* get linux-style pte */
639 andc. r1,r1,r0 /* check access & ~permission */
640 bne- DataAddressInvalid /* return if access not permitted */
641 /*
642 * NOTE! We are assuming this is not an SMP system, otherwise
643 * we would need to update the pte atomically with lwarx/stwcx.
644 */
645 /* Convert linux-style PTE to low word of PPC-style PTE */
646 li r1,0xe06 /* clear out reserved bits & PP msb */
647 andc r1,r0,r1 /* PP = user? 1: 0 */
648 BEGIN_FTR_SECTION
649 rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */
650 END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
651 mtspr SPRN_RPA,r1
652 mfspr r2,SPRN_SRR1 /* Need to restore CR0 */
653 mtcrf 0x80,r2
654 BEGIN_MMU_FTR_SECTION
655 li r0,1
656 mfspr r1,SPRN_SPRG_603_LRU
657 rlwinm r2,r3,20,27,31 /* Get Address bits 15:19 */
658 slw r0,r0,r2
659 xor r1,r0,r1
660 srw r0,r1,r2
661 mtspr SPRN_SPRG_603_LRU,r1
662 mfspr r2,SPRN_SRR1
663 rlwimi r2,r0,31-14,14,14
664 mtspr SPRN_SRR1,r2
665 END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
666 tlbld r3
667 rfi
668
669 #ifndef CONFIG_ALTIVEC
670 #define altivec_assist_exception unknown_exception
671 #endif
672
673 EXCEPTION(0x1300, Trap_13, instruction_breakpoint_exception, EXC_XFER_STD)
674 EXCEPTION(0x1400, SMI, SMIException, EXC_XFER_STD)
675 EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_STD)
676 EXCEPTION(0x1600, Trap_16, altivec_assist_exception, EXC_XFER_STD)
677 EXCEPTION(0x1700, Trap_17, TAUException, EXC_XFER_STD)
678 EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_STD)
679 EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_STD)
680 EXCEPTION(0x1a00, Trap_1a, unknown_exception, EXC_XFER_STD)
681 EXCEPTION(0x1b00, Trap_1b, unknown_exception, EXC_XFER_STD)
682 EXCEPTION(0x1c00, Trap_1c, unknown_exception, EXC_XFER_STD)
683 EXCEPTION(0x1d00, Trap_1d, unknown_exception, EXC_XFER_STD)
684 EXCEPTION(0x1e00, Trap_1e, unknown_exception, EXC_XFER_STD)
685 EXCEPTION(0x1f00, Trap_1f, unknown_exception, EXC_XFER_STD)
686 EXCEPTION(0x2000, RunMode, RunModeException, EXC_XFER_STD)
687 EXCEPTION(0x2100, Trap_21, unknown_exception, EXC_XFER_STD)
688 EXCEPTION(0x2200, Trap_22, unknown_exception, EXC_XFER_STD)
689 EXCEPTION(0x2300, Trap_23, unknown_exception, EXC_XFER_STD)
690 EXCEPTION(0x2400, Trap_24, unknown_exception, EXC_XFER_STD)
691 EXCEPTION(0x2500, Trap_25, unknown_exception, EXC_XFER_STD)
692 EXCEPTION(0x2600, Trap_26, unknown_exception, EXC_XFER_STD)
693 EXCEPTION(0x2700, Trap_27, unknown_exception, EXC_XFER_STD)
694 EXCEPTION(0x2800, Trap_28, unknown_exception, EXC_XFER_STD)
695 EXCEPTION(0x2900, Trap_29, unknown_exception, EXC_XFER_STD)
696 EXCEPTION(0x2a00, Trap_2a, unknown_exception, EXC_XFER_STD)
697 EXCEPTION(0x2b00, Trap_2b, unknown_exception, EXC_XFER_STD)
698 EXCEPTION(0x2c00, Trap_2c, unknown_exception, EXC_XFER_STD)
699 EXCEPTION(0x2d00, Trap_2d, unknown_exception, EXC_XFER_STD)
700 EXCEPTION(0x2e00, Trap_2e, unknown_exception, EXC_XFER_STD)
701 EXCEPTION(0x2f00, Trap_2f, unknown_exception, EXC_XFER_STD)
702
703 . = 0x3000
704
705 machine_check_tramp:
706 EXC_XFER_STD(0x200, machine_check_exception)
707
708 alignment_exception_tramp:
709 EXC_XFER_STD(0x600, alignment_exception)
710
711 handle_page_fault_tramp_1:
712 #ifdef CONFIG_VMAP_STACK
713 EXCEPTION_PROLOG_2 handle_dar_dsisr=1
714 #endif
715 lwz r4, _DAR(r11)
716 lwz r5, _DSISR(r11)
717 /* fall through */
718 handle_page_fault_tramp_2:
719 EXC_XFER_LITE(0x300, handle_page_fault)
720
721 #ifdef CONFIG_VMAP_STACK
722 .macro save_regs_thread thread
723 stw r0, THR0(\thread)
724 stw r3, THR3(\thread)
725 stw r4, THR4(\thread)
726 stw r5, THR5(\thread)
727 stw r6, THR6(\thread)
728 stw r8, THR8(\thread)
729 stw r9, THR9(\thread)
730 mflr r0
731 stw r0, THLR(\thread)
732 mfctr r0
733 stw r0, THCTR(\thread)
734 .endm
735
736 .macro restore_regs_thread thread
737 lwz r0, THLR(\thread)
738 mtlr r0
739 lwz r0, THCTR(\thread)
740 mtctr r0
741 lwz r0, THR0(\thread)
742 lwz r3, THR3(\thread)
743 lwz r4, THR4(\thread)
744 lwz r5, THR5(\thread)
745 lwz r6, THR6(\thread)
746 lwz r8, THR8(\thread)
747 lwz r9, THR9(\thread)
748 .endm
749
750 hash_page_dsi:
751 save_regs_thread r10
752 mfdsisr r3
753 mfdar r4
754 mfsrr0 r5
755 mfsrr1 r9
756 rlwinm r3, r3, 32 - 15, _PAGE_RW /* DSISR_STORE -> _PAGE_RW */
757 bl hash_page
758 mfspr r10, SPRN_SPRG_THREAD
759 restore_regs_thread r10
760 b .Lhash_page_dsi_cont
761
762 hash_page_isi:
763 mr r11, r10
764 mfspr r10, SPRN_SPRG_THREAD
765 save_regs_thread r10
766 li r3, 0
767 lwz r4, SRR0(r10)
768 lwz r9, SRR1(r10)
769 bl hash_page
770 mfspr r10, SPRN_SPRG_THREAD
771 restore_regs_thread r10
772 mr r10, r11
773 b .Lhash_page_isi_cont
774
775 .globl fast_hash_page_return
776 fast_hash_page_return:
777 andis. r10, r9, SRR1_ISI_NOPT@h /* Set on ISI, cleared on DSI */
778 mfspr r10, SPRN_SPRG_THREAD
779 restore_regs_thread r10
780 bne 1f
781
782 /* DSI */
783 mtcr r11
784 lwz r11, THR11(r10)
785 mfspr r10, SPRN_SPRG_SCRATCH0
786 SYNC
787 RFI
788
789 1: /* ISI */
790 mtcr r11
791 mfspr r11, SPRN_SPRG_SCRATCH1
792 mfspr r10, SPRN_SPRG_SCRATCH0
793 SYNC
794 RFI
795
796 stack_overflow:
797 vmap_stack_overflow_exception
798 #endif
799
800 AltiVecUnavailable:
801 EXCEPTION_PROLOG
802 #ifdef CONFIG_ALTIVEC
803 beq 1f
804 bl load_up_altivec /* if from user, just load it up */
805 b fast_exception_return
806 #endif /* CONFIG_ALTIVEC */
807 1: addi r3,r1,STACK_FRAME_OVERHEAD
808 EXC_XFER_LITE(0xf20, altivec_unavailable_exception)
809
810 PerformanceMonitor:
811 EXCEPTION_PROLOG
812 addi r3,r1,STACK_FRAME_OVERHEAD
813 EXC_XFER_STD(0xf00, performance_monitor_exception)
814
815
816 /*
817 * This code is jumped to from the startup code to copy
818 * the kernel image to physical address PHYSICAL_START.
819 */
820 relocate_kernel:
821 addis r9,r26,klimit@ha /* fetch klimit */
822 lwz r25,klimit@l(r9)
823 addis r25,r25,-KERNELBASE@h
824 lis r3,PHYSICAL_START@h /* Destination base address */
825 li r6,0 /* Destination offset */
826 li r5,0x4000 /* # bytes of memory to copy */
827 bl copy_and_flush /* copy the first 0x4000 bytes */
828 addi r0,r3,4f@l /* jump to the address of 4f */
829 mtctr r0 /* in copy and do the rest. */
830 bctr /* jump to the copy */
831 4: mr r5,r25
832 bl copy_and_flush /* copy the rest */
833 b turn_on_mmu
834
835 /*
836 * Copy routine used to copy the kernel to start at physical address 0
837 * and flush and invalidate the caches as needed.
838 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
839 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
840 */
841 _ENTRY(copy_and_flush)
842 addi r5,r5,-4
843 addi r6,r6,-4
844 4: li r0,L1_CACHE_BYTES/4
845 mtctr r0
846 3: addi r6,r6,4 /* copy a cache line */
847 lwzx r0,r6,r4
848 stwx r0,r6,r3
849 bdnz 3b
850 dcbst r6,r3 /* write it to memory */
851 sync
852 icbi r6,r3 /* flush the icache line */
853 cmplw 0,r6,r5
854 blt 4b
855 sync /* additional sync needed on g4 */
856 isync
857 addi r5,r5,4
858 addi r6,r6,4
859 blr
860
861 #ifdef CONFIG_SMP
862 .globl __secondary_start_mpc86xx
863 __secondary_start_mpc86xx:
864 mfspr r3, SPRN_PIR
865 stw r3, __secondary_hold_acknowledge@l(0)
866 mr r24, r3 /* cpu # */
867 b __secondary_start
868
869 .globl __secondary_start_pmac_0
870 __secondary_start_pmac_0:
871 /* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
872 li r24,0
873 b 1f
874 li r24,1
875 b 1f
876 li r24,2
877 b 1f
878 li r24,3
879 1:
880 /* on powersurge, we come in here with IR=0 and DR=1, and DBAT 0
881 set to map the 0xf0000000 - 0xffffffff region */
882 mfmsr r0
883 rlwinm r0,r0,0,28,26 /* clear DR (0x10) */
884 SYNC
885 mtmsr r0
886 isync
887
888 .globl __secondary_start
889 __secondary_start:
890 /* Copy some CPU settings from CPU 0 */
891 bl __restore_cpu_setup
892
893 lis r3,-KERNELBASE@h
894 mr r4,r24
895 bl call_setup_cpu /* Call setup_cpu for this CPU */
896 #ifdef CONFIG_PPC_BOOK3S_32
897 lis r3,-KERNELBASE@h
898 bl init_idle_6xx
899 #endif /* CONFIG_PPC_BOOK3S_32 */
900
901 /* get current's stack and current */
902 lis r2,secondary_current@ha
903 tophys(r2,r2)
904 lwz r2,secondary_current@l(r2)
905 tophys(r1,r2)
906 lwz r1,TASK_STACK(r1)
907
908 /* stack */
909 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
910 li r0,0
911 tophys(r3,r1)
912 stw r0,0(r3)
913
914 /* load up the MMU */
915 bl load_segment_registers
916 bl load_up_mmu
917
918 /* ptr to phys current thread */
919 tophys(r4,r2)
920 addi r4,r4,THREAD /* phys address of our thread_struct */
921 mtspr SPRN_SPRG_THREAD,r4
922 lis r4, (swapper_pg_dir - PAGE_OFFSET)@h
923 ori r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l
924 mtspr SPRN_SPRG_PGDIR, r4
925
926 /* enable MMU and jump to start_secondary */
927 li r4,MSR_KERNEL
928 lis r3,start_secondary@h
929 ori r3,r3,start_secondary@l
930 mtspr SPRN_SRR0,r3
931 mtspr SPRN_SRR1,r4
932 SYNC
933 RFI
934 #endif /* CONFIG_SMP */
935
936 #ifdef CONFIG_KVM_BOOK3S_HANDLER
937 #include "../kvm/book3s_rmhandlers.S"
938 #endif
939
940 /*
941 * Those generic dummy functions are kept for CPUs not
942 * included in CONFIG_PPC_BOOK3S_32
943 */
944 #if !defined(CONFIG_PPC_BOOK3S_32)
945 _ENTRY(__save_cpu_setup)
946 blr
947 _ENTRY(__restore_cpu_setup)
948 blr
949 #endif /* !defined(CONFIG_PPC_BOOK3S_32) */
950
951 /*
952 * Load stuff into the MMU. Intended to be called with
953 * IR=0 and DR=0.
954 */
955 #ifdef CONFIG_KASAN
956 early_hash_table:
957 sync /* Force all PTE updates to finish */
958 isync
959 tlbia /* Clear all TLB entries */
960 sync /* wait for tlbia/tlbie to finish */
961 TLBSYNC /* ... on all CPUs */
962 /* Load the SDR1 register (hash table base & size) */
963 lis r6, early_hash - PAGE_OFFSET@h
964 ori r6, r6, 3 /* 256kB table */
965 mtspr SPRN_SDR1, r6
966 blr
967 #endif
968
969 load_up_mmu:
970 sync /* Force all PTE updates to finish */
971 isync
972 tlbia /* Clear all TLB entries */
973 sync /* wait for tlbia/tlbie to finish */
974 TLBSYNC /* ... on all CPUs */
975 /* Load the SDR1 register (hash table base & size) */
976 lis r6,_SDR1@ha
977 tophys(r6,r6)
978 lwz r6,_SDR1@l(r6)
979 mtspr SPRN_SDR1,r6
980
981 /* Load the BAT registers with the values set up by MMU_init.
982 MMU_init takes care of whether we're on a 601 or not. */
983 lis r3,BATS@ha
984 addi r3,r3,BATS@l
985 tophys(r3,r3)
986 LOAD_BAT(0,r3,r4,r5)
987 LOAD_BAT(1,r3,r4,r5)
988 LOAD_BAT(2,r3,r4,r5)
989 LOAD_BAT(3,r3,r4,r5)
990 BEGIN_MMU_FTR_SECTION
991 LOAD_BAT(4,r3,r4,r5)
992 LOAD_BAT(5,r3,r4,r5)
993 LOAD_BAT(6,r3,r4,r5)
994 LOAD_BAT(7,r3,r4,r5)
995 END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
996 blr
997
998 load_segment_registers:
999 li r0, NUM_USER_SEGMENTS /* load up user segment register values */
1000 mtctr r0 /* for context 0 */
1001 li r3, 0 /* Kp = 0, Ks = 0, VSID = 0 */
1002 #ifdef CONFIG_PPC_KUEP
1003 oris r3, r3, SR_NX@h /* Set Nx */
1004 #endif
1005 #ifdef CONFIG_PPC_KUAP
1006 oris r3, r3, SR_KS@h /* Set Ks */
1007 #endif
1008 li r4, 0
1009 3: mtsrin r3, r4
1010 addi r3, r3, 0x111 /* increment VSID */
1011 addis r4, r4, 0x1000 /* address of next segment */
1012 bdnz 3b
1013 li r0, 16 - NUM_USER_SEGMENTS /* load up kernel segment registers */
1014 mtctr r0 /* for context 0 */
1015 rlwinm r3, r3, 0, ~SR_NX /* Nx = 0 */
1016 rlwinm r3, r3, 0, ~SR_KS /* Ks = 0 */
1017 oris r3, r3, SR_KP@h /* Kp = 1 */
1018 3: mtsrin r3, r4
1019 addi r3, r3, 0x111 /* increment VSID */
1020 addis r4, r4, 0x1000 /* address of next segment */
1021 bdnz 3b
1022 blr
1023
1024 /*
1025 * This is where the main kernel code starts.
1026 */
1027 start_here:
1028 /* ptr to current */
1029 lis r2,init_task@h
1030 ori r2,r2,init_task@l
1031 /* Set up for using our exception vectors */
1032 /* ptr to phys current thread */
1033 tophys(r4,r2)
1034 addi r4,r4,THREAD /* init task's THREAD */
1035 mtspr SPRN_SPRG_THREAD,r4
1036 lis r4, (swapper_pg_dir - PAGE_OFFSET)@h
1037 ori r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l
1038 mtspr SPRN_SPRG_PGDIR, r4
1039
1040 /* stack */
1041 lis r1,init_thread_union@ha
1042 addi r1,r1,init_thread_union@l
1043 li r0,0
1044 stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
1045 /*
1046 * Do early platform-specific initialization,
1047 * and set up the MMU.
1048 */
1049 #ifdef CONFIG_KASAN
1050 bl kasan_early_init
1051 #endif
1052 li r3,0
1053 mr r4,r31
1054 bl machine_init
1055 bl __save_cpu_setup
1056 bl MMU_init
1057 #ifdef CONFIG_KASAN
1058 BEGIN_MMU_FTR_SECTION
1059 bl MMU_init_hw_patch
1060 END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
1061 #endif
1062
1063 /*
1064 * Go back to running unmapped so we can load up new values
1065 * for SDR1 (hash table pointer) and the segment registers
1066 * and change to using our exception vectors.
1067 */
1068 lis r4,2f@h
1069 ori r4,r4,2f@l
1070 tophys(r4,r4)
1071 li r3,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1072
1073 .align 4
1074 mtspr SPRN_SRR0,r4
1075 mtspr SPRN_SRR1,r3
1076 SYNC
1077 RFI
1078 /* Load up the kernel context */
1079 2: bl load_up_mmu
1080
1081 #ifdef CONFIG_BDI_SWITCH
1082 /* Add helper information for the Abatron bdiGDB debugger.
1083 * We do this here because we know the mmu is disabled, and
1084 * will be enabled for real in just a few instructions.
1085 */
1086 lis r5, abatron_pteptrs@h
1087 ori r5, r5, abatron_pteptrs@l
1088 stw r5, 0xf0(r0) /* This much match your Abatron config */
1089 lis r6, swapper_pg_dir@h
1090 ori r6, r6, swapper_pg_dir@l
1091 tophys(r5, r5)
1092 stw r6, 0(r5)
1093 #endif /* CONFIG_BDI_SWITCH */
1094
1095 /* Now turn on the MMU for real! */
1096 li r4,MSR_KERNEL
1097 lis r3,start_kernel@h
1098 ori r3,r3,start_kernel@l
1099 mtspr SPRN_SRR0,r3
1100 mtspr SPRN_SRR1,r4
1101 SYNC
1102 RFI
1103
1104 /*
1105 * void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next);
1106 *
1107 * Set up the segment registers for a new context.
1108 */
1109 _ENTRY(switch_mmu_context)
1110 lwz r3,MMCONTEXTID(r4)
1111 cmpwi cr0,r3,0
1112 blt- 4f
1113 mulli r3,r3,897 /* multiply context by skew factor */
1114 rlwinm r3,r3,4,8,27 /* VSID = (context & 0xfffff) << 4 */
1115 #ifdef CONFIG_PPC_KUEP
1116 oris r3, r3, SR_NX@h /* Set Nx */
1117 #endif
1118 #ifdef CONFIG_PPC_KUAP
1119 oris r3, r3, SR_KS@h /* Set Ks */
1120 #endif
1121 li r0,NUM_USER_SEGMENTS
1122 mtctr r0
1123
1124 lwz r4, MM_PGD(r4)
1125 #ifdef CONFIG_BDI_SWITCH
1126 /* Context switch the PTE pointer for the Abatron BDI2000.
1127 * The PGDIR is passed as second argument.
1128 */
1129 lis r5, abatron_pteptrs@ha
1130 stw r4, abatron_pteptrs@l + 0x4(r5)
1131 #endif
1132 tophys(r4, r4)
1133 mtspr SPRN_SPRG_PGDIR, r4
1134 li r4,0
1135 isync
1136 3:
1137 mtsrin r3,r4
1138 addi r3,r3,0x111 /* next VSID */
1139 rlwinm r3,r3,0,8,3 /* clear out any overflow from VSID field */
1140 addis r4,r4,0x1000 /* address of next segment */
1141 bdnz 3b
1142 sync
1143 isync
1144 blr
1145 4: trap
1146 EMIT_BUG_ENTRY 4b,__FILE__,__LINE__,0
1147 blr
1148 EXPORT_SYMBOL(switch_mmu_context)
1149
1150 /*
1151 * An undocumented "feature" of 604e requires that the v bit
1152 * be cleared before changing BAT values.
1153 *
1154 * Also, newer IBM firmware does not clear bat3 and 4 so
1155 * this makes sure it's done.
1156 * -- Cort
1157 */
1158 clear_bats:
1159 li r10,0
1160
1161 #ifndef CONFIG_PPC_BOOK3S_601
1162 mtspr SPRN_DBAT0U,r10
1163 mtspr SPRN_DBAT0L,r10
1164 mtspr SPRN_DBAT1U,r10
1165 mtspr SPRN_DBAT1L,r10
1166 mtspr SPRN_DBAT2U,r10
1167 mtspr SPRN_DBAT2L,r10
1168 mtspr SPRN_DBAT3U,r10
1169 mtspr SPRN_DBAT3L,r10
1170 #endif
1171 mtspr SPRN_IBAT0U,r10
1172 mtspr SPRN_IBAT0L,r10
1173 mtspr SPRN_IBAT1U,r10
1174 mtspr SPRN_IBAT1L,r10
1175 mtspr SPRN_IBAT2U,r10
1176 mtspr SPRN_IBAT2L,r10
1177 mtspr SPRN_IBAT3U,r10
1178 mtspr SPRN_IBAT3L,r10
1179 BEGIN_MMU_FTR_SECTION
1180 /* Here's a tweak: at this point, CPU setup have
1181 * not been called yet, so HIGH_BAT_EN may not be
1182 * set in HID0 for the 745x processors. However, it
1183 * seems that doesn't affect our ability to actually
1184 * write to these SPRs.
1185 */
1186 mtspr SPRN_DBAT4U,r10
1187 mtspr SPRN_DBAT4L,r10
1188 mtspr SPRN_DBAT5U,r10
1189 mtspr SPRN_DBAT5L,r10
1190 mtspr SPRN_DBAT6U,r10
1191 mtspr SPRN_DBAT6L,r10
1192 mtspr SPRN_DBAT7U,r10
1193 mtspr SPRN_DBAT7L,r10
1194 mtspr SPRN_IBAT4U,r10
1195 mtspr SPRN_IBAT4L,r10
1196 mtspr SPRN_IBAT5U,r10
1197 mtspr SPRN_IBAT5L,r10
1198 mtspr SPRN_IBAT6U,r10
1199 mtspr SPRN_IBAT6L,r10
1200 mtspr SPRN_IBAT7U,r10
1201 mtspr SPRN_IBAT7L,r10
1202 END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
1203 blr
1204
1205 _ENTRY(update_bats)
1206 lis r4, 1f@h
1207 ori r4, r4, 1f@l
1208 tophys(r4, r4)
1209 mfmsr r6
1210 mflr r7
1211 li r3, MSR_KERNEL & ~(MSR_IR | MSR_DR)
1212 rlwinm r0, r6, 0, ~MSR_RI
1213 rlwinm r0, r0, 0, ~MSR_EE
1214 mtmsr r0
1215
1216 .align 4
1217 mtspr SPRN_SRR0, r4
1218 mtspr SPRN_SRR1, r3
1219 SYNC
1220 RFI
1221 1: bl clear_bats
1222 lis r3, BATS@ha
1223 addi r3, r3, BATS@l
1224 tophys(r3, r3)
1225 LOAD_BAT(0, r3, r4, r5)
1226 LOAD_BAT(1, r3, r4, r5)
1227 LOAD_BAT(2, r3, r4, r5)
1228 LOAD_BAT(3, r3, r4, r5)
1229 BEGIN_MMU_FTR_SECTION
1230 LOAD_BAT(4, r3, r4, r5)
1231 LOAD_BAT(5, r3, r4, r5)
1232 LOAD_BAT(6, r3, r4, r5)
1233 LOAD_BAT(7, r3, r4, r5)
1234 END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
1235 li r3, MSR_KERNEL & ~(MSR_IR | MSR_DR | MSR_RI)
1236 mtmsr r3
1237 mtspr SPRN_SRR0, r7
1238 mtspr SPRN_SRR1, r6
1239 SYNC
1240 RFI
1241
1242 flush_tlbs:
1243 lis r10, 0x40
1244 1: addic. r10, r10, -0x1000
1245 tlbie r10
1246 bgt 1b
1247 sync
1248 blr
1249
1250 mmu_off:
1251 addi r4, r3, __after_mmu_off - _start
1252 mfmsr r3
1253 andi. r0,r3,MSR_DR|MSR_IR /* MMU enabled? */
1254 beqlr
1255 andc r3,r3,r0
1256
1257 .align 4
1258 mtspr SPRN_SRR0,r4
1259 mtspr SPRN_SRR1,r3
1260 sync
1261 RFI
1262
1263 /*
1264 * On 601, we use 3 BATs to map up to 24M of RAM at _PAGE_OFFSET
1265 * (we keep one for debugging) and on others, we use one 256M BAT.
1266 */
1267 initial_bats:
1268 lis r11,PAGE_OFFSET@h
1269 #ifdef CONFIG_PPC_BOOK3S_601
1270 ori r11,r11,4 /* set up BAT registers for 601 */
1271 li r8,0x7f /* valid, block length = 8MB */
1272 mtspr SPRN_IBAT0U,r11 /* N.B. 601 has valid bit in */
1273 mtspr SPRN_IBAT0L,r8 /* lower BAT register */
1274 addis r11,r11,0x800000@h
1275 addis r8,r8,0x800000@h
1276 mtspr SPRN_IBAT1U,r11
1277 mtspr SPRN_IBAT1L,r8
1278 addis r11,r11,0x800000@h
1279 addis r8,r8,0x800000@h
1280 mtspr SPRN_IBAT2U,r11
1281 mtspr SPRN_IBAT2L,r8
1282 #else
1283 tophys(r8,r11)
1284 #ifdef CONFIG_SMP
1285 ori r8,r8,0x12 /* R/W access, M=1 */
1286 #else
1287 ori r8,r8,2 /* R/W access */
1288 #endif /* CONFIG_SMP */
1289 ori r11,r11,BL_256M<<2|0x2 /* set up BAT registers for 604 */
1290
1291 mtspr SPRN_DBAT0L,r8 /* N.B. 6xx (not 601) have valid */
1292 mtspr SPRN_DBAT0U,r11 /* bit in upper BAT register */
1293 mtspr SPRN_IBAT0L,r8
1294 mtspr SPRN_IBAT0U,r11
1295 #endif
1296 isync
1297 blr
1298
1299 #ifdef CONFIG_BOOTX_TEXT
1300 setup_disp_bat:
1301 /*
1302 * setup the display bat prepared for us in prom.c
1303 */
1304 mflr r8
1305 bl reloc_offset
1306 mtlr r8
1307 addis r8,r3,disp_BAT@ha
1308 addi r8,r8,disp_BAT@l
1309 cmpwi cr0,r8,0
1310 beqlr
1311 lwz r11,0(r8)
1312 lwz r8,4(r8)
1313 #ifndef CONFIG_PPC_BOOK3S_601
1314 mtspr SPRN_DBAT3L,r8
1315 mtspr SPRN_DBAT3U,r11
1316 #else
1317 mtspr SPRN_IBAT3L,r8
1318 mtspr SPRN_IBAT3U,r11
1319 #endif
1320 blr
1321 #endif /* CONFIG_BOOTX_TEXT */
1322
1323 #ifdef CONFIG_PPC_EARLY_DEBUG_CPM
1324 setup_cpm_bat:
1325 lis r8, 0xf000
1326 ori r8, r8, 0x002a
1327 mtspr SPRN_DBAT1L, r8
1328
1329 lis r11, 0xf000
1330 ori r11, r11, (BL_1M << 2) | 2
1331 mtspr SPRN_DBAT1U, r11
1332
1333 blr
1334 #endif
1335
1336 #ifdef CONFIG_PPC_EARLY_DEBUG_USBGECKO
1337 setup_usbgecko_bat:
1338 /* prepare a BAT for early io */
1339 #if defined(CONFIG_GAMECUBE)
1340 lis r8, 0x0c00
1341 #elif defined(CONFIG_WII)
1342 lis r8, 0x0d00
1343 #else
1344 #error Invalid platform for USB Gecko based early debugging.
1345 #endif
1346 /*
1347 * The virtual address used must match the virtual address
1348 * associated to the fixmap entry FIX_EARLY_DEBUG_BASE.
1349 */
1350 lis r11, 0xfffe /* top 128K */
1351 ori r8, r8, 0x002a /* uncached, guarded ,rw */
1352 ori r11, r11, 0x2 /* 128K, Vs=1, Vp=0 */
1353 mtspr SPRN_DBAT1L, r8
1354 mtspr SPRN_DBAT1U, r11
1355 blr
1356 #endif
1357
1358 #ifdef CONFIG_8260
1359 /* Jump into the system reset for the rom.
1360 * We first disable the MMU, and then jump to the ROM reset address.
1361 *
1362 * r3 is the board info structure, r4 is the location for starting.
1363 * I use this for building a small kernel that can load other kernels,
1364 * rather than trying to write or rely on a rom monitor that can tftp load.
1365 */
1366 .globl m8260_gorom
1367 m8260_gorom:
1368 mfmsr r0
1369 rlwinm r0,r0,0,17,15 /* clear MSR_EE in r0 */
1370 sync
1371 mtmsr r0
1372 sync
1373 mfspr r11, SPRN_HID0
1374 lis r10, 0
1375 ori r10,r10,HID0_ICE|HID0_DCE
1376 andc r11, r11, r10
1377 mtspr SPRN_HID0, r11
1378 isync
1379 li r5, MSR_ME|MSR_RI
1380 lis r6,2f@h
1381 addis r6,r6,-KERNELBASE@h
1382 ori r6,r6,2f@l
1383 mtspr SPRN_SRR0,r6
1384 mtspr SPRN_SRR1,r5
1385 isync
1386 sync
1387 rfi
1388 2:
1389 mtlr r4
1390 blr
1391 #endif
1392
1393
1394 /*
1395 * We put a few things here that have to be page-aligned.
1396 * This stuff goes at the beginning of the data segment,
1397 * which is page-aligned.
1398 */
1399 .data
1400 .globl sdata
1401 sdata:
1402 .globl empty_zero_page
1403 empty_zero_page:
1404 .space 4096
1405 EXPORT_SYMBOL(empty_zero_page)
1406
1407 .globl swapper_pg_dir
1408 swapper_pg_dir:
1409 .space PGD_TABLE_SIZE
1410
1411 /* Room for two PTE pointers, usually the kernel and current user pointers
1412 * to their respective root page table.
1413 */
1414 abatron_pteptrs:
1415 .space 8