]> git.ipfire.org Git - thirdparty/linux.git/blame - arch/powerpc/kernel/head_32.S
Merge tag 'io_uring-5.7-2020-05-22' of git://git.kernel.dk/linux-block
[thirdparty/linux.git] / arch / powerpc / kernel / head_32.S
CommitLineData
2874c5fd 1/* SPDX-License-Identifier: GPL-2.0-or-later */
14cf11af
PM
2/*
3 * PowerPC version
4 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 *
6 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
7 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
8 * Adapted for Power Macintosh by Paul Mackerras.
9 * Low-level exception handlers and MMU support
10 * rewritten by Paul Mackerras.
11 * Copyright (C) 1996 Paul Mackerras.
12 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
14cf11af
PM
13 *
14 * This file contains the low-level support and setup for the
15 * PowerPC platform, including trap and interrupt dispatch.
16 * (The PPC 8xx embedded CPUs use head_8xx.S instead.)
14cf11af
PM
17 */
18
e7039845 19#include <linux/init.h>
b3b8dc6c 20#include <asm/reg.h>
14cf11af
PM
21#include <asm/page.h>
22#include <asm/mmu.h>
23#include <asm/pgtable.h>
24#include <asm/cputable.h>
25#include <asm/cache.h>
26#include <asm/thread_info.h>
27#include <asm/ppc_asm.h>
28#include <asm/asm-offsets.h>
ec2b36b9 29#include <asm/ptrace.h>
5e696617 30#include <asm/bug.h>
dd84c217 31#include <asm/kvm_book3s_asm.h>
9445aa1a 32#include <asm/export.h>
2c86cd18 33#include <asm/feature-fixups.h>
14cf11af 34
8a23fdec
CL
35#include "head_32.h"
36
39097b9c
CL
37/* 601 only have IBAT */
38#ifdef CONFIG_PPC_BOOK3S_601
39#define LOAD_BAT(n, reg, RA, RB) \
40 li RA,0; \
41 mtspr SPRN_IBAT##n##U,RA; \
42 lwz RA,(n*16)+0(reg); \
43 lwz RB,(n*16)+4(reg); \
44 mtspr SPRN_IBAT##n##U,RA; \
45 mtspr SPRN_IBAT##n##L,RB
46#else
14cf11af
PM
47#define LOAD_BAT(n, reg, RA, RB) \
48 /* see the comment for clear_bats() -- Cort */ \
49 li RA,0; \
50 mtspr SPRN_IBAT##n##U,RA; \
51 mtspr SPRN_DBAT##n##U,RA; \
52 lwz RA,(n*16)+0(reg); \
53 lwz RB,(n*16)+4(reg); \
54 mtspr SPRN_IBAT##n##U,RA; \
55 mtspr SPRN_IBAT##n##L,RB; \
14cf11af
PM
56 lwz RA,(n*16)+8(reg); \
57 lwz RB,(n*16)+12(reg); \
58 mtspr SPRN_DBAT##n##U,RA; \
39097b9c
CL
59 mtspr SPRN_DBAT##n##L,RB
60#endif
14cf11af 61
e7039845 62 __HEAD
b3b8dc6c
PM
63 .stabs "arch/powerpc/kernel/",N_SO,0,0,0f
64 .stabs "head_32.S",N_SO,0,0,0f
14cf11af 650:
748a7683 66_ENTRY(_stext);
14cf11af
PM
67
68/*
69 * _start is defined this way because the XCOFF loader in the OpenFirmware
70 * on the powermac expects the entry point to be a procedure descriptor.
71 */
748a7683 72_ENTRY(_start);
14cf11af
PM
73 /*
74 * These are here for legacy reasons, the kernel used to
75 * need to look like a coff function entry for the pmac
76 * but we're always started by some kind of bootloader now.
77 * -- Cort
78 */
79 nop /* used by __secondary_hold on prep (mtx) and chrp smp */
80 nop /* used by __secondary_hold on prep (mtx) and chrp smp */
81 nop
82
83/* PMAC
84 * Enter here with the kernel text, data and bss loaded starting at
85 * 0, running with virtual == physical mapping.
86 * r5 points to the prom entry point (the client interface handler
87 * address). Address translation is turned on, with the prom
88 * managing the hash table. Interrupts are disabled. The stack
89 * pointer (r1) points to just below the end of the half-meg region
90 * from 0x380000 - 0x400000, which is mapped in already.
91 *
92 * If we are booted from MacOS via BootX, we enter with the kernel
93 * image loaded somewhere, and the following values in registers:
94 * r3: 'BooX' (0x426f6f58)
95 * r4: virtual address of boot_infos_t
96 * r5: 0
97 *
14cf11af
PM
98 * PREP
99 * This is jumped to on prep systems right after the kernel is relocated
100 * to its proper place in memory by the boot loader. The expected layout
101 * of the regs is:
102 * r3: ptr to residual data
103 * r4: initrd_start or if no initrd then 0
104 * r5: initrd_end - unused if r4 is 0
105 * r6: Start of command line string
106 * r7: End of command line string
107 *
108 * This just gets a minimal mmu environment setup so we can call
109 * start_here() to do the real work.
110 * -- Cort
111 */
112
113 .globl __start
114__start:
115/*
116 * We have to do any OF calls before we map ourselves to KERNELBASE,
117 * because OF may have I/O devices mapped into that area
118 * (particularly on CHRP).
119 */
9b6b563c
PM
120 cmpwi 0,r5,0
121 beq 1f
2bda347b 122
28794d34 123#ifdef CONFIG_PPC_OF_BOOT_TRAMPOLINE
2bda347b
BH
124 /* find out where we are now */
125 bcl 20,31,$+4
1260: mflr r8 /* r8 = runtime addr here */
127 addis r8,r8,(_stext - 0b)@ha
128 addi r8,r8,(_stext - 0b)@l /* current runtime base addr */
9b6b563c 129 bl prom_init
28794d34
BH
130#endif /* CONFIG_PPC_OF_BOOT_TRAMPOLINE */
131
132 /* We never return. We also hit that trap if trying to boot
133 * from OF while CONFIG_PPC_OF_BOOT_TRAMPOLINE isn't selected */
9b6b563c
PM
134 trap
135
d7f39454
BH
136/*
137 * Check for BootX signature when supporting PowerMac and branch to
138 * appropriate trampoline if it's present
139 */
140#ifdef CONFIG_PPC_PMAC
1411: lis r31,0x426f
142 ori r31,r31,0x6f58
143 cmpw 0,r3,r31
144 bne 1f
145 bl bootx_init
146 trap
147#endif /* CONFIG_PPC_PMAC */
148
6dece0eb 1491: mr r31,r3 /* save device tree ptr */
14cf11af
PM
150 li r24,0 /* cpu # */
151
152/*
153 * early_init() does the early machine identification and does
154 * the necessary low-level setup and clears the BSS
155 * -- Cort <cort@fsmlabs.com>
156 */
157 bl early_init
158
14cf11af
PM
159/* Switch MMU off, clear BATs and flush TLB. At this point, r3 contains
160 * the physical address we are running at, returned by early_init()
161 */
162 bl mmu_off
163__after_mmu_off:
14cf11af
PM
164 bl clear_bats
165 bl flush_tlbs
166
167 bl initial_bats
215b8237
CL
168 bl load_segment_registers
169#ifdef CONFIG_KASAN
170 bl early_hash_table
171#endif
f21f49ea 172#if defined(CONFIG_BOOTX_TEXT)
51d3082f
BH
173 bl setup_disp_bat
174#endif
c374e00e
SW
175#ifdef CONFIG_PPC_EARLY_DEBUG_CPM
176 bl setup_cpm_bat
177#endif
d1d56f8c
AH
178#ifdef CONFIG_PPC_EARLY_DEBUG_USBGECKO
179 bl setup_usbgecko_bat
180#endif
14cf11af
PM
181
182/*
183 * Call setup_cpu for CPU 0 and initialize 6xx Idle
184 */
185 bl reloc_offset
186 li r24,0 /* cpu# */
187 bl call_setup_cpu /* Call setup_cpu for this CPU */
d7cceda9 188#ifdef CONFIG_PPC_BOOK3S_32
14cf11af
PM
189 bl reloc_offset
190 bl init_idle_6xx
d7cceda9 191#endif /* CONFIG_PPC_BOOK3S_32 */
14cf11af
PM
192
193
14cf11af
PM
194/*
195 * We need to run with _start at physical address 0.
196 * On CHRP, we are loaded at 0x10000 since OF on CHRP uses
197 * the exception vectors at 0 (and therefore this copy
198 * overwrites OF's exception vectors with our own).
9b6b563c 199 * The MMU is off at this point.
14cf11af
PM
200 */
201 bl reloc_offset
202 mr r26,r3
203 addis r4,r3,KERNELBASE@h /* current address of _start */
ccdcef72
DF
204 lis r5,PHYSICAL_START@h
205 cmplw 0,r4,r5 /* already running at PHYSICAL_START? */
14cf11af 206 bne relocate_kernel
14cf11af
PM
207/*
208 * we now have the 1st 16M of ram mapped with the bats.
209 * prep needs the mmu to be turned on here, but pmac already has it on.
210 * this shouldn't bother the pmac since it just gets turned on again
211 * as we jump to our code at KERNELBASE. -- Cort
212 * Actually no, pmac doesn't have it on any more. BootX enters with MMU
213 * off, and in other cases, we now turn it off before changing BATs above.
214 */
215turn_on_mmu:
216 mfmsr r0
215b8237 217 ori r0,r0,MSR_DR|MSR_IR|MSR_RI
14cf11af
PM
218 mtspr SPRN_SRR1,r0
219 lis r0,start_here@h
220 ori r0,r0,start_here@l
221 mtspr SPRN_SRR0,r0
222 SYNC
223 RFI /* enables MMU */
224
225/*
226 * We need __secondary_hold as a place to hold the other cpus on
227 * an SMP machine, even when we are running a UP kernel.
228 */
229 . = 0xc0 /* for prep bootloader */
230 li r3,1 /* MTX only has 1 cpu */
231 .globl __secondary_hold
232__secondary_hold:
233 /* tell the master we're here */
bbd0abda 234 stw r3,__secondary_hold_acknowledge@l(0)
14cf11af
PM
235#ifdef CONFIG_SMP
236100: lwz r4,0(0)
237 /* wait until we're told to start */
238 cmpw 0,r4,r3
239 bne 100b
240 /* our cpu # was at addr 0 - go */
241 mr r24,r3 /* cpu # */
242 b __secondary_start
243#else
244 b .
245#endif /* CONFIG_SMP */
246
bbd0abda
PM
247 .globl __secondary_hold_spinloop
248__secondary_hold_spinloop:
249 .long 0
250 .globl __secondary_hold_acknowledge
251__secondary_hold_acknowledge:
252 .long -1
253
14cf11af
PM
254/* System reset */
255/* core99 pmac starts the seconary here by changing the vector, and
dc1c1ca3 256 putting it back to what it was (unknown_exception) when done. */
dc1c1ca3 257 EXCEPTION(0x100, Reset, unknown_exception, EXC_XFER_STD)
14cf11af
PM
258
259/* Machine check */
260/*
261 * On CHRP, this is complicated by the fact that we could get a
262 * machine check inside RTAS, and we have no guarantee that certain
263 * critical registers will have the values we expect. The set of
264 * registers that might have bad values includes all the GPRs
265 * and all the BATs. We indicate that we are in RTAS by putting
266 * a non-zero value, the address of the exception frame to use,
0df977ea
CL
267 * in thread.rtas_sp. The machine check handler checks thread.rtas_sp
268 * and uses its value if it is non-zero.
14cf11af
PM
269 * (Other exception handlers assume that r1 is a valid kernel stack
270 * pointer when we take an exception from supervisor mode.)
271 * -- paulus.
272 */
273 . = 0x200
dd84c217 274 DO_KVM 0x200
cd08f109 275MachineCheck:
1f1c4d01 276 EXCEPTION_PROLOG_0
cd08f109
CL
277#ifdef CONFIG_VMAP_STACK
278 li r11, MSR_KERNEL & ~(MSR_IR | MSR_RI) /* can take DTLB miss */
279 mtmsr r11
280 isync
281#endif
14cf11af 282#ifdef CONFIG_PPC_CHRP
0df977ea 283 mfspr r11, SPRN_SPRG_THREAD
cd08f109 284 tovirt_vmstack r11, r11
0df977ea
CL
285 lwz r11, RTAS_SP(r11)
286 cmpwi cr1, r11, 0
287 bne cr1, 7f
14cf11af 288#endif /* CONFIG_PPC_CHRP */
cd08f109 289 EXCEPTION_PROLOG_1 for_rtas=1
14cf11af
PM
2907: EXCEPTION_PROLOG_2
291 addi r3,r1,STACK_FRAME_OVERHEAD
292#ifdef CONFIG_PPC_CHRP
232ca1ee
CL
293#ifdef CONFIG_VMAP_STACK
294 mfspr r4, SPRN_SPRG_THREAD
295 tovirt(r4, r4)
296 lwz r4, RTAS_SP(r4)
297 cmpwi cr1, r4, 0
14cf11af 298#endif
232ca1ee
CL
299 beq cr1, machine_check_tramp
300 b machine_check_in_rtas
301#else
302 b machine_check_tramp
14cf11af
PM
303#endif
304
305/* Data access exception. */
306 . = 0x300
dd84c217 307 DO_KVM 0x300
14cf11af 308DataAccess:
232ca1ee
CL
309#ifdef CONFIG_VMAP_STACK
310 mtspr SPRN_SPRG_SCRATCH0,r10
311 mfspr r10, SPRN_SPRG_THREAD
312BEGIN_MMU_FTR_SECTION
313 stw r11, THR11(r10)
314 mfspr r10, SPRN_DSISR
315 mfcr r11
316#ifdef CONFIG_PPC_KUAP
317 andis. r10, r10, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH | DSISR_PROTFAULT)@h
318#else
319 andis. r10, r10, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH)@h
320#endif
321 mfspr r10, SPRN_SPRG_THREAD
322 beq hash_page_dsi
323.Lhash_page_dsi_cont:
324 mtcr r11
325 lwz r11, THR11(r10)
326END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
327 mtspr SPRN_SPRG_SCRATCH1,r11
328 mfspr r11, SPRN_DAR
329 stw r11, DAR(r10)
330 mfspr r11, SPRN_DSISR
331 stw r11, DSISR(r10)
332 mfspr r11, SPRN_SRR0
333 stw r11, SRR0(r10)
334 mfspr r11, SPRN_SRR1 /* check whether user or kernel */
335 stw r11, SRR1(r10)
336 mfcr r10
337 andi. r11, r11, MSR_PR
338
339 EXCEPTION_PROLOG_1
340 b handle_page_fault_tramp_1
341#else /* CONFIG_VMAP_STACK */
cd08f109 342 EXCEPTION_PROLOG handle_dar_dsisr=1
2e15001e
CL
343 get_and_save_dar_dsisr_on_stack r4, r5, r11
344BEGIN_MMU_FTR_SECTION
a68c31fc 345#ifdef CONFIG_PPC_KUAP
2e15001e 346 andis. r0, r5, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH | DSISR_PROTFAULT)@h
a68c31fc 347#else
2e15001e 348 andis. r0, r5, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH)@h
a68c31fc 349#endif
2e15001e 350 bne handle_page_fault_tramp_2 /* if not, try to put a PTE */
40bb0e90 351 rlwinm r3, r5, 32 - 15, 21, 21 /* DSISR_STORE -> _PAGE_RW */
14cf11af 352 bl hash_page
2e15001e
CL
353 b handle_page_fault_tramp_1
354FTR_SECTION_ELSE
355 b handle_page_fault_tramp_2
356ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_HPTE_TABLE)
232ca1ee 357#endif /* CONFIG_VMAP_STACK */
14cf11af
PM
358
359/* Instruction access exception. */
360 . = 0x400
dd84c217 361 DO_KVM 0x400
14cf11af 362InstructionAccess:
232ca1ee
CL
363#ifdef CONFIG_VMAP_STACK
364 mtspr SPRN_SPRG_SCRATCH0,r10
365 mtspr SPRN_SPRG_SCRATCH1,r11
366 mfspr r10, SPRN_SPRG_THREAD
367 mfspr r11, SPRN_SRR0
368 stw r11, SRR0(r10)
369 mfspr r11, SPRN_SRR1 /* check whether user or kernel */
370 stw r11, SRR1(r10)
371 mfcr r10
372BEGIN_MMU_FTR_SECTION
373 andis. r11, r11, SRR1_ISI_NOPT@h /* no pte found? */
374 bne hash_page_isi
375.Lhash_page_isi_cont:
376 mfspr r11, SPRN_SRR1 /* check whether user or kernel */
377END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
378 andi. r11, r11, MSR_PR
379
380 EXCEPTION_PROLOG_1
381 EXCEPTION_PROLOG_2
382#else /* CONFIG_VMAP_STACK */
14cf11af 383 EXCEPTION_PROLOG
b4c001dc 384 andis. r0,r9,SRR1_ISI_NOPT@h /* no pte found? */
14cf11af
PM
385 beq 1f /* if so, try to put a PTE */
386 li r3,0 /* into the hash table */
387 mr r4,r12 /* SRR0 is fault address */
4a3a224c 388BEGIN_MMU_FTR_SECTION
14cf11af 389 bl hash_page
4a3a224c 390END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
232ca1ee 391#endif /* CONFIG_VMAP_STACK */
14cf11af 3921: mr r4,r12
b4c001dc 393 andis. r5,r9,DSISR_SRR1_MATCH_32S@h /* Filter relevant SRR1 bits */
1ca9db5b 394 stw r4, _DAR(r11)
a546498f 395 EXC_XFER_LITE(0x400, handle_page_fault)
14cf11af 396
14cf11af
PM
397/* External interrupt */
398 EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)
399
400/* Alignment exception */
401 . = 0x600
dd84c217 402 DO_KVM 0x600
14cf11af 403Alignment:
cd08f109 404 EXCEPTION_PROLOG handle_dar_dsisr=1
c9c84fd9 405 save_dar_dsisr_on_stack r4, r5, r11
14cf11af 406 addi r3,r1,STACK_FRAME_OVERHEAD
232ca1ee 407 b alignment_exception_tramp
14cf11af
PM
408
409/* Program check exception */
dc1c1ca3 410 EXCEPTION(0x700, ProgramCheck, program_check_exception, EXC_XFER_STD)
14cf11af
PM
411
412/* Floating-point unavailable */
413 . = 0x800
dd84c217 414 DO_KVM 0x800
14cf11af 415FPUnavailable:
aa42c69c
KP
416BEGIN_FTR_SECTION
417/*
418 * Certain Freescale cores don't have a FPU and treat fp instructions
419 * as a FP Unavailable exception. Redirect to illegal/emulation handling.
420 */
421 b ProgramCheck
422END_FTR_SECTION_IFSET(CPU_FTR_FPU_UNAVAILABLE)
14cf11af 423 EXCEPTION_PROLOG
6f3d8e69
MN
424 beq 1f
425 bl load_up_fpu /* if from user, just load it up */
426 b fast_exception_return
4271: addi r3,r1,STACK_FRAME_OVERHEAD
642770dd 428 EXC_XFER_LITE(0x800, kernel_fp_unavailable_exception)
14cf11af
PM
429
430/* Decrementer */
431 EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE)
432
642770dd
CL
433 EXCEPTION(0xa00, Trap_0a, unknown_exception, EXC_XFER_STD)
434 EXCEPTION(0xb00, Trap_0b, unknown_exception, EXC_XFER_STD)
14cf11af
PM
435
436/* System call */
437 . = 0xc00
dd84c217 438 DO_KVM 0xc00
14cf11af 439SystemCall:
b86fb888 440 SYSCALL_ENTRY 0xc00
14cf11af
PM
441
442/* Single step - not used on 601 */
dc1c1ca3 443 EXCEPTION(0xd00, SingleStep, single_step_exception, EXC_XFER_STD)
642770dd 444 EXCEPTION(0xe00, Trap_0e, unknown_exception, EXC_XFER_STD)
14cf11af
PM
445
446/*
447 * The Altivec unavailable trap is at 0x0f20. Foo.
448 * We effectively remap it to 0x3000.
449 * We include an altivec unavailable exception vector even if
450 * not configured for Altivec, so that you can't panic a
451 * non-altivec kernel running on a machine with altivec just
452 * by executing an altivec instruction.
453 */
454 . = 0xf00
dd84c217 455 DO_KVM 0xf00
555d97ac 456 b PerformanceMonitor
14cf11af
PM
457
458 . = 0xf20
dd84c217 459 DO_KVM 0xf20
14cf11af
PM
460 b AltiVecUnavailable
461
14cf11af
PM
462/*
463 * Handle TLB miss for instruction on 603/603e.
464 * Note: we get an alternate set of r0 - r3 to use automatically.
465 */
466 . = 0x1000
467InstructionTLBMiss:
468/*
00fcb147 469 * r0: scratch
14cf11af
PM
470 * r1: linux style pte ( later becomes ppc hardware pte )
471 * r2: ptr to linux-style pte
472 * r3: scratch
473 */
14cf11af
PM
474 /* Get PTE (linux-style) and check access */
475 mfspr r3,SPRN_IMISS
a8a12199 476#if defined(CONFIG_MODULES) || defined(CONFIG_DEBUG_PAGEALLOC)
8a13c4f9
KG
477 lis r1,PAGE_OFFSET@h /* check if kernel address */
478 cmplw 0,r1,r3
a8a12199 479#endif
93c4a162 480 mfspr r2, SPRN_SPRG_PGDIR
84de6ab0
CL
481#ifdef CONFIG_SWAP
482 li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
483#else
451b3ec0 484 li r1,_PAGE_PRESENT | _PAGE_EXEC
84de6ab0 485#endif
a8a12199 486#if defined(CONFIG_MODULES) || defined(CONFIG_DEBUG_PAGEALLOC)
8a13c4f9 487 bge- 112f
2c12393f
CL
488 lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
489 addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
a8a12199 490#endif
93c4a162 491112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
14cf11af
PM
492 lwz r2,0(r2) /* get pmd entry */
493 rlwinm. r2,r2,0,0,19 /* extract address of pte page */
494 beq- InstructionAddressInvalid /* return if no mapping */
495 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
eb3436a0
KG
496 lwz r0,0(r2) /* get linux-style pte */
497 andc. r1,r1,r0 /* check access & ~permission */
14cf11af 498 bne- InstructionAddressInvalid /* return if access not permitted */
14cf11af 499 /* Convert linux-style PTE to low word of PPC-style PTE */
40bb0e90 500 rlwimi r0,r0,32-2,31,31 /* _PAGE_USER -> PP lsb */
f342adca
CL
501 ori r1, r1, 0xe06 /* clear out reserved bits */
502 andc r1, r0, r1 /* PP = user? 1 : 0 */
345953cf
KG
503BEGIN_FTR_SECTION
504 rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */
505END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
14cf11af 506 mtspr SPRN_RPA,r1
14cf11af
PM
507 tlbli r3
508 mfspr r3,SPRN_SRR1 /* Need to restore CR0 */
509 mtcrf 0x80,r3
510 rfi
511InstructionAddressInvalid:
512 mfspr r3,SPRN_SRR1
513 rlwinm r1,r3,9,6,6 /* Get load/store bit */
514
515 addis r1,r1,0x2000
516 mtspr SPRN_DSISR,r1 /* (shouldn't be needed) */
14cf11af
PM
517 andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */
518 or r2,r2,r1
519 mtspr SPRN_SRR1,r2
520 mfspr r1,SPRN_IMISS /* Get failing address */
521 rlwinm. r2,r2,0,31,31 /* Check for little endian access */
522 rlwimi r2,r2,1,30,30 /* change 1 -> 3 */
523 xor r1,r1,r2
524 mtspr SPRN_DAR,r1 /* Set fault address */
525 mfmsr r0 /* Restore "normal" registers */
526 xoris r0,r0,MSR_TGPR>>16
527 mtcrf 0x80,r3 /* Restore CR0 */
528 mtmsr r0
529 b InstructionAccess
530
531/*
532 * Handle TLB miss for DATA Load operation on 603/603e
533 */
534 . = 0x1100
535DataLoadTLBMiss:
536/*
00fcb147 537 * r0: scratch
14cf11af
PM
538 * r1: linux style pte ( later becomes ppc hardware pte )
539 * r2: ptr to linux-style pte
540 * r3: scratch
541 */
14cf11af
PM
542 /* Get PTE (linux-style) and check access */
543 mfspr r3,SPRN_DMISS
8a13c4f9
KG
544 lis r1,PAGE_OFFSET@h /* check if kernel address */
545 cmplw 0,r1,r3
93c4a162 546 mfspr r2, SPRN_SPRG_PGDIR
84de6ab0
CL
547#ifdef CONFIG_SWAP
548 li r1, _PAGE_PRESENT | _PAGE_ACCESSED
549#else
451b3ec0 550 li r1, _PAGE_PRESENT
84de6ab0 551#endif
8a13c4f9 552 bge- 112f
2c12393f
CL
553 lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
554 addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
93c4a162 555112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
14cf11af
PM
556 lwz r2,0(r2) /* get pmd entry */
557 rlwinm. r2,r2,0,0,19 /* extract address of pte page */
558 beq- DataAddressInvalid /* return if no mapping */
559 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
eb3436a0
KG
560 lwz r0,0(r2) /* get linux-style pte */
561 andc. r1,r1,r0 /* check access & ~permission */
14cf11af 562 bne- DataAddressInvalid /* return if access not permitted */
14cf11af
PM
563 /*
564 * NOTE! We are assuming this is not an SMP system, otherwise
565 * we would need to update the pte atomically with lwarx/stwcx.
566 */
14cf11af 567 /* Convert linux-style PTE to low word of PPC-style PTE */
40bb0e90
CL
568 rlwinm r1,r0,32-9,30,30 /* _PAGE_RW -> PP msb */
569 rlwimi r0,r0,32-1,30,30 /* _PAGE_USER -> PP msb */
570 rlwimi r0,r0,32-1,31,31 /* _PAGE_USER -> PP lsb */
a4bd6a93 571 ori r1,r1,0xe04 /* clear out reserved bits */
f342adca 572 andc r1,r0,r1 /* PP = user? rw? 1: 3: 0 */
345953cf
KG
573BEGIN_FTR_SECTION
574 rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */
575END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
14cf11af 576 mtspr SPRN_RPA,r1
2319f123
KG
577 mfspr r2,SPRN_SRR1 /* Need to restore CR0 */
578 mtcrf 0x80,r2
579BEGIN_MMU_FTR_SECTION
580 li r0,1
ee43eb78 581 mfspr r1,SPRN_SPRG_603_LRU
2319f123
KG
582 rlwinm r2,r3,20,27,31 /* Get Address bits 15:19 */
583 slw r0,r0,r2
584 xor r1,r0,r1
585 srw r0,r1,r2
ee43eb78 586 mtspr SPRN_SPRG_603_LRU,r1
2319f123
KG
587 mfspr r2,SPRN_SRR1
588 rlwimi r2,r0,31-14,14,14
589 mtspr SPRN_SRR1,r2
590END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
14cf11af 591 tlbld r3
14cf11af
PM
592 rfi
593DataAddressInvalid:
594 mfspr r3,SPRN_SRR1
595 rlwinm r1,r3,9,6,6 /* Get load/store bit */
596 addis r1,r1,0x2000
597 mtspr SPRN_DSISR,r1
14cf11af
PM
598 andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */
599 mtspr SPRN_SRR1,r2
600 mfspr r1,SPRN_DMISS /* Get failing address */
601 rlwinm. r2,r2,0,31,31 /* Check for little endian access */
602 beq 20f /* Jump if big endian */
603 xori r1,r1,3
60420: mtspr SPRN_DAR,r1 /* Set fault address */
605 mfmsr r0 /* Restore "normal" registers */
606 xoris r0,r0,MSR_TGPR>>16
607 mtcrf 0x80,r3 /* Restore CR0 */
608 mtmsr r0
609 b DataAccess
610
611/*
612 * Handle TLB miss for DATA Store on 603/603e
613 */
614 . = 0x1200
615DataStoreTLBMiss:
616/*
00fcb147 617 * r0: scratch
14cf11af
PM
618 * r1: linux style pte ( later becomes ppc hardware pte )
619 * r2: ptr to linux-style pte
620 * r3: scratch
621 */
14cf11af
PM
622 /* Get PTE (linux-style) and check access */
623 mfspr r3,SPRN_DMISS
8a13c4f9
KG
624 lis r1,PAGE_OFFSET@h /* check if kernel address */
625 cmplw 0,r1,r3
93c4a162 626 mfspr r2, SPRN_SPRG_PGDIR
84de6ab0 627#ifdef CONFIG_SWAP
415480dc 628 li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED
84de6ab0 629#else
415480dc 630 li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT
84de6ab0 631#endif
8a13c4f9 632 bge- 112f
2c12393f
CL
633 lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
634 addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
93c4a162 635112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
14cf11af
PM
636 lwz r2,0(r2) /* get pmd entry */
637 rlwinm. r2,r2,0,0,19 /* extract address of pte page */
638 beq- DataAddressInvalid /* return if no mapping */
639 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
eb3436a0
KG
640 lwz r0,0(r2) /* get linux-style pte */
641 andc. r1,r1,r0 /* check access & ~permission */
14cf11af 642 bne- DataAddressInvalid /* return if access not permitted */
14cf11af
PM
643 /*
644 * NOTE! We are assuming this is not an SMP system, otherwise
645 * we would need to update the pte atomically with lwarx/stwcx.
646 */
14cf11af 647 /* Convert linux-style PTE to low word of PPC-style PTE */
40bb0e90 648 rlwimi r0,r0,32-2,31,31 /* _PAGE_USER -> PP lsb */
f342adca
CL
649 li r1,0xe06 /* clear out reserved bits & PP msb */
650 andc r1,r0,r1 /* PP = user? 1: 0 */
345953cf
KG
651BEGIN_FTR_SECTION
652 rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */
653END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
14cf11af 654 mtspr SPRN_RPA,r1
2319f123
KG
655 mfspr r2,SPRN_SRR1 /* Need to restore CR0 */
656 mtcrf 0x80,r2
657BEGIN_MMU_FTR_SECTION
658 li r0,1
ee43eb78 659 mfspr r1,SPRN_SPRG_603_LRU
2319f123
KG
660 rlwinm r2,r3,20,27,31 /* Get Address bits 15:19 */
661 slw r0,r0,r2
662 xor r1,r0,r1
663 srw r0,r1,r2
ee43eb78 664 mtspr SPRN_SPRG_603_LRU,r1
2319f123
KG
665 mfspr r2,SPRN_SRR1
666 rlwimi r2,r0,31-14,14,14
667 mtspr SPRN_SRR1,r2
668END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
14cf11af 669 tlbld r3
14cf11af
PM
670 rfi
671
672#ifndef CONFIG_ALTIVEC
dc1c1ca3 673#define altivec_assist_exception unknown_exception
14cf11af
PM
674#endif
675
642770dd
CL
676 EXCEPTION(0x1300, Trap_13, instruction_breakpoint_exception, EXC_XFER_STD)
677 EXCEPTION(0x1400, SMI, SMIException, EXC_XFER_STD)
678 EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_STD)
679 EXCEPTION(0x1600, Trap_16, altivec_assist_exception, EXC_XFER_STD)
14cf11af 680 EXCEPTION(0x1700, Trap_17, TAUException, EXC_XFER_STD)
642770dd
CL
681 EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_STD)
682 EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_STD)
683 EXCEPTION(0x1a00, Trap_1a, unknown_exception, EXC_XFER_STD)
684 EXCEPTION(0x1b00, Trap_1b, unknown_exception, EXC_XFER_STD)
685 EXCEPTION(0x1c00, Trap_1c, unknown_exception, EXC_XFER_STD)
686 EXCEPTION(0x1d00, Trap_1d, unknown_exception, EXC_XFER_STD)
687 EXCEPTION(0x1e00, Trap_1e, unknown_exception, EXC_XFER_STD)
688 EXCEPTION(0x1f00, Trap_1f, unknown_exception, EXC_XFER_STD)
689 EXCEPTION(0x2000, RunMode, RunModeException, EXC_XFER_STD)
690 EXCEPTION(0x2100, Trap_21, unknown_exception, EXC_XFER_STD)
691 EXCEPTION(0x2200, Trap_22, unknown_exception, EXC_XFER_STD)
692 EXCEPTION(0x2300, Trap_23, unknown_exception, EXC_XFER_STD)
693 EXCEPTION(0x2400, Trap_24, unknown_exception, EXC_XFER_STD)
694 EXCEPTION(0x2500, Trap_25, unknown_exception, EXC_XFER_STD)
695 EXCEPTION(0x2600, Trap_26, unknown_exception, EXC_XFER_STD)
696 EXCEPTION(0x2700, Trap_27, unknown_exception, EXC_XFER_STD)
697 EXCEPTION(0x2800, Trap_28, unknown_exception, EXC_XFER_STD)
698 EXCEPTION(0x2900, Trap_29, unknown_exception, EXC_XFER_STD)
699 EXCEPTION(0x2a00, Trap_2a, unknown_exception, EXC_XFER_STD)
700 EXCEPTION(0x2b00, Trap_2b, unknown_exception, EXC_XFER_STD)
701 EXCEPTION(0x2c00, Trap_2c, unknown_exception, EXC_XFER_STD)
702 EXCEPTION(0x2d00, Trap_2d, unknown_exception, EXC_XFER_STD)
703 EXCEPTION(0x2e00, Trap_2e, unknown_exception, EXC_XFER_STD)
704 EXCEPTION(0x2f00, Trap_2f, unknown_exception, EXC_XFER_STD)
14cf11af
PM
705
706 . = 0x3000
707
232ca1ee
CL
708machine_check_tramp:
709 EXC_XFER_STD(0x200, machine_check_exception)
710
711alignment_exception_tramp:
712 EXC_XFER_STD(0x600, alignment_exception)
713
2e15001e 714handle_page_fault_tramp_1:
232ca1ee
CL
715#ifdef CONFIG_VMAP_STACK
716 EXCEPTION_PROLOG_2 handle_dar_dsisr=1
717#endif
2e15001e
CL
718 lwz r4, _DAR(r11)
719 lwz r5, _DSISR(r11)
720 /* fall through */
721handle_page_fault_tramp_2:
722 EXC_XFER_LITE(0x300, handle_page_fault)
723
232ca1ee
CL
724#ifdef CONFIG_VMAP_STACK
725.macro save_regs_thread thread
726 stw r0, THR0(\thread)
727 stw r3, THR3(\thread)
728 stw r4, THR4(\thread)
729 stw r5, THR5(\thread)
730 stw r6, THR6(\thread)
731 stw r8, THR8(\thread)
732 stw r9, THR9(\thread)
733 mflr r0
734 stw r0, THLR(\thread)
735 mfctr r0
736 stw r0, THCTR(\thread)
737.endm
738
739.macro restore_regs_thread thread
740 lwz r0, THLR(\thread)
741 mtlr r0
742 lwz r0, THCTR(\thread)
743 mtctr r0
744 lwz r0, THR0(\thread)
745 lwz r3, THR3(\thread)
746 lwz r4, THR4(\thread)
747 lwz r5, THR5(\thread)
748 lwz r6, THR6(\thread)
749 lwz r8, THR8(\thread)
750 lwz r9, THR9(\thread)
751.endm
752
753hash_page_dsi:
754 save_regs_thread r10
755 mfdsisr r3
756 mfdar r4
757 mfsrr0 r5
758 mfsrr1 r9
759 rlwinm r3, r3, 32 - 15, _PAGE_RW /* DSISR_STORE -> _PAGE_RW */
760 bl hash_page
761 mfspr r10, SPRN_SPRG_THREAD
762 restore_regs_thread r10
763 b .Lhash_page_dsi_cont
764
765hash_page_isi:
766 mr r11, r10
767 mfspr r10, SPRN_SPRG_THREAD
768 save_regs_thread r10
769 li r3, 0
770 lwz r4, SRR0(r10)
771 lwz r9, SRR1(r10)
772 bl hash_page
773 mfspr r10, SPRN_SPRG_THREAD
774 restore_regs_thread r10
775 mr r10, r11
776 b .Lhash_page_isi_cont
777
778 .globl fast_hash_page_return
779fast_hash_page_return:
780 andis. r10, r9, SRR1_ISI_NOPT@h /* Set on ISI, cleared on DSI */
781 mfspr r10, SPRN_SPRG_THREAD
782 restore_regs_thread r10
783 bne 1f
784
785 /* DSI */
786 mtcr r11
787 lwz r11, THR11(r10)
788 mfspr r10, SPRN_SPRG_SCRATCH0
789 SYNC
790 RFI
791
7921: /* ISI */
793 mtcr r11
794 mfspr r11, SPRN_SPRG_SCRATCH1
795 mfspr r10, SPRN_SPRG_SCRATCH0
796 SYNC
797 RFI
798
cd08f109
CL
799stack_overflow:
800 vmap_stack_overflow_exception
232ca1ee 801#endif
cd08f109 802
14cf11af
PM
803AltiVecUnavailable:
804 EXCEPTION_PROLOG
805#ifdef CONFIG_ALTIVEC
37f9ef55
BH
806 beq 1f
807 bl load_up_altivec /* if from user, just load it up */
808 b fast_exception_return
14cf11af 809#endif /* CONFIG_ALTIVEC */
37f9ef55 8101: addi r3,r1,STACK_FRAME_OVERHEAD
642770dd 811 EXC_XFER_LITE(0xf20, altivec_unavailable_exception)
14cf11af 812
555d97ac
AF
813PerformanceMonitor:
814 EXCEPTION_PROLOG
815 addi r3,r1,STACK_FRAME_OVERHEAD
816 EXC_XFER_STD(0xf00, performance_monitor_exception)
817
14cf11af
PM
818
819/*
820 * This code is jumped to from the startup code to copy
ccdcef72 821 * the kernel image to physical address PHYSICAL_START.
14cf11af
PM
822 */
823relocate_kernel:
824 addis r9,r26,klimit@ha /* fetch klimit */
825 lwz r25,klimit@l(r9)
826 addis r25,r25,-KERNELBASE@h
ccdcef72 827 lis r3,PHYSICAL_START@h /* Destination base address */
14cf11af
PM
828 li r6,0 /* Destination offset */
829 li r5,0x4000 /* # bytes of memory to copy */
830 bl copy_and_flush /* copy the first 0x4000 bytes */
831 addi r0,r3,4f@l /* jump to the address of 4f */
832 mtctr r0 /* in copy and do the rest. */
833 bctr /* jump to the copy */
8344: mr r5,r25
835 bl copy_and_flush /* copy the rest */
836 b turn_on_mmu
837
838/*
839 * Copy routine used to copy the kernel to start at physical address 0
840 * and flush and invalidate the caches as needed.
841 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
842 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
843 */
748a7683 844_ENTRY(copy_and_flush)
14cf11af
PM
845 addi r5,r5,-4
846 addi r6,r6,-4
7dffb720 8474: li r0,L1_CACHE_BYTES/4
14cf11af
PM
848 mtctr r0
8493: addi r6,r6,4 /* copy a cache line */
850 lwzx r0,r6,r4
851 stwx r0,r6,r3
852 bdnz 3b
853 dcbst r6,r3 /* write it to memory */
854 sync
855 icbi r6,r3 /* flush the icache line */
856 cmplw 0,r6,r5
857 blt 4b
858 sync /* additional sync needed on g4 */
859 isync
860 addi r5,r5,4
861 addi r6,r6,4
862 blr
863
14cf11af 864#ifdef CONFIG_SMP
ee0339f2
JL
865 .globl __secondary_start_mpc86xx
866__secondary_start_mpc86xx:
867 mfspr r3, SPRN_PIR
868 stw r3, __secondary_hold_acknowledge@l(0)
869 mr r24, r3 /* cpu # */
870 b __secondary_start
871
14cf11af
PM
872 .globl __secondary_start_pmac_0
873__secondary_start_pmac_0:
874 /* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
875 li r24,0
876 b 1f
877 li r24,1
878 b 1f
879 li r24,2
880 b 1f
881 li r24,3
8821:
883 /* on powersurge, we come in here with IR=0 and DR=1, and DBAT 0
884 set to map the 0xf0000000 - 0xffffffff region */
885 mfmsr r0
886 rlwinm r0,r0,0,28,26 /* clear DR (0x10) */
887 SYNC
888 mtmsr r0
889 isync
890
891 .globl __secondary_start
892__secondary_start:
14cf11af
PM
893 /* Copy some CPU settings from CPU 0 */
894 bl __restore_cpu_setup
895
896 lis r3,-KERNELBASE@h
897 mr r4,r24
14cf11af 898 bl call_setup_cpu /* Call setup_cpu for this CPU */
d7cceda9 899#ifdef CONFIG_PPC_BOOK3S_32
14cf11af
PM
900 lis r3,-KERNELBASE@h
901 bl init_idle_6xx
d7cceda9 902#endif /* CONFIG_PPC_BOOK3S_32 */
14cf11af 903
4e67bfd7 904 /* get current's stack and current */
7c19c2e5
CL
905 lis r2,secondary_current@ha
906 tophys(r2,r2)
907 lwz r2,secondary_current@l(r2)
ed1cd6de
CL
908 tophys(r1,r2)
909 lwz r1,TASK_STACK(r1)
14cf11af
PM
910
911 /* stack */
912 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
913 li r0,0
914 tophys(r3,r1)
915 stw r0,0(r3)
916
917 /* load up the MMU */
b7f8b440 918 bl load_segment_registers
14cf11af
PM
919 bl load_up_mmu
920
921 /* ptr to phys current thread */
922 tophys(r4,r2)
923 addi r4,r4,THREAD /* phys address of our thread_struct */
ee43eb78 924 mtspr SPRN_SPRG_THREAD,r4
4622a2d4
CL
925 lis r4, (swapper_pg_dir - PAGE_OFFSET)@h
926 ori r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l
927 mtspr SPRN_SPRG_PGDIR, r4
14cf11af
PM
928
929 /* enable MMU and jump to start_secondary */
930 li r4,MSR_KERNEL
14cf11af
PM
931 lis r3,start_secondary@h
932 ori r3,r3,start_secondary@l
933 mtspr SPRN_SRR0,r3
934 mtspr SPRN_SRR1,r4
935 SYNC
936 RFI
937#endif /* CONFIG_SMP */
938
dd84c217
AG
939#ifdef CONFIG_KVM_BOOK3S_HANDLER
940#include "../kvm/book3s_rmhandlers.S"
941#endif
942
14cf11af
PM
943/*
944 * Those generic dummy functions are kept for CPUs not
d7cceda9 945 * included in CONFIG_PPC_BOOK3S_32
14cf11af 946 */
d7cceda9 947#if !defined(CONFIG_PPC_BOOK3S_32)
748a7683 948_ENTRY(__save_cpu_setup)
14cf11af 949 blr
748a7683 950_ENTRY(__restore_cpu_setup)
14cf11af 951 blr
d7cceda9 952#endif /* !defined(CONFIG_PPC_BOOK3S_32) */
14cf11af 953
14cf11af
PM
954/*
955 * Load stuff into the MMU. Intended to be called with
956 * IR=0 and DR=0.
957 */
215b8237
CL
958#ifdef CONFIG_KASAN
959early_hash_table:
960 sync /* Force all PTE updates to finish */
961 isync
962 tlbia /* Clear all TLB entries */
963 sync /* wait for tlbia/tlbie to finish */
964 TLBSYNC /* ... on all CPUs */
965 /* Load the SDR1 register (hash table base & size) */
966 lis r6, early_hash - PAGE_OFFSET@h
967 ori r6, r6, 3 /* 256kB table */
968 mtspr SPRN_SDR1, r6
969 blr
970#endif
971
14cf11af
PM
972load_up_mmu:
973 sync /* Force all PTE updates to finish */
974 isync
975 tlbia /* Clear all TLB entries */
976 sync /* wait for tlbia/tlbie to finish */
977 TLBSYNC /* ... on all CPUs */
978 /* Load the SDR1 register (hash table base & size) */
979 lis r6,_SDR1@ha
980 tophys(r6,r6)
981 lwz r6,_SDR1@l(r6)
982 mtspr SPRN_SDR1,r6
187a0067 983
14cf11af
PM
984/* Load the BAT registers with the values set up by MMU_init.
985 MMU_init takes care of whether we're on a 601 or not. */
14cf11af
PM
986 lis r3,BATS@ha
987 addi r3,r3,BATS@l
988 tophys(r3,r3)
989 LOAD_BAT(0,r3,r4,r5)
990 LOAD_BAT(1,r3,r4,r5)
991 LOAD_BAT(2,r3,r4,r5)
992 LOAD_BAT(3,r3,r4,r5)
7c03d653 993BEGIN_MMU_FTR_SECTION
ee0339f2
JL
994 LOAD_BAT(4,r3,r4,r5)
995 LOAD_BAT(5,r3,r4,r5)
996 LOAD_BAT(6,r3,r4,r5)
997 LOAD_BAT(7,r3,r4,r5)
7c03d653 998END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
14cf11af
PM
999 blr
1000
215b8237
CL
1001load_segment_registers:
1002 li r0, NUM_USER_SEGMENTS /* load up user segment register values */
1003 mtctr r0 /* for context 0 */
1004 li r3, 0 /* Kp = 0, Ks = 0, VSID = 0 */
1005#ifdef CONFIG_PPC_KUEP
1006 oris r3, r3, SR_NX@h /* Set Nx */
1007#endif
1008#ifdef CONFIG_PPC_KUAP
1009 oris r3, r3, SR_KS@h /* Set Ks */
1010#endif
1011 li r4, 0
10123: mtsrin r3, r4
1013 addi r3, r3, 0x111 /* increment VSID */
1014 addis r4, r4, 0x1000 /* address of next segment */
1015 bdnz 3b
1016 li r0, 16 - NUM_USER_SEGMENTS /* load up kernel segment registers */
1017 mtctr r0 /* for context 0 */
1018 rlwinm r3, r3, 0, ~SR_NX /* Nx = 0 */
1019 rlwinm r3, r3, 0, ~SR_KS /* Ks = 0 */
1020 oris r3, r3, SR_KP@h /* Kp = 1 */
10213: mtsrin r3, r4
1022 addi r3, r3, 0x111 /* increment VSID */
1023 addis r4, r4, 0x1000 /* address of next segment */
1024 bdnz 3b
1025 blr
1026
14cf11af
PM
1027/*
1028 * This is where the main kernel code starts.
1029 */
1030start_here:
1031 /* ptr to current */
1032 lis r2,init_task@h
1033 ori r2,r2,init_task@l
1034 /* Set up for using our exception vectors */
1035 /* ptr to phys current thread */
1036 tophys(r4,r2)
1037 addi r4,r4,THREAD /* init task's THREAD */
ee43eb78 1038 mtspr SPRN_SPRG_THREAD,r4
4622a2d4
CL
1039 lis r4, (swapper_pg_dir - PAGE_OFFSET)@h
1040 ori r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l
1041 mtspr SPRN_SPRG_PGDIR, r4
14cf11af
PM
1042
1043 /* stack */
1044 lis r1,init_thread_union@ha
1045 addi r1,r1,init_thread_union@l
1046 li r0,0
1047 stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
1048/*
187a0067 1049 * Do early platform-specific initialization,
14cf11af
PM
1050 * and set up the MMU.
1051 */
2edb16ef
CL
1052#ifdef CONFIG_KASAN
1053 bl kasan_early_init
1054#endif
6dece0eb
SW
1055 li r3,0
1056 mr r4,r31
14cf11af 1057 bl machine_init
22c841c9 1058 bl __save_cpu_setup
14cf11af 1059 bl MMU_init
9d6d712f 1060#ifdef CONFIG_KASAN
72f208c6
CL
1061BEGIN_MMU_FTR_SECTION
1062 bl MMU_init_hw_patch
1063END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
9d6d712f 1064#endif
14cf11af 1065
14cf11af
PM
1066/*
1067 * Go back to running unmapped so we can load up new values
1068 * for SDR1 (hash table pointer) and the segment registers
1069 * and change to using our exception vectors.
1070 */
1071 lis r4,2f@h
1072 ori r4,r4,2f@l
1073 tophys(r4,r4)
1074 li r3,MSR_KERNEL & ~(MSR_IR|MSR_DR)
94dd54c5
CL
1075
1076 .align 4
14cf11af
PM
1077 mtspr SPRN_SRR0,r4
1078 mtspr SPRN_SRR1,r3
1079 SYNC
1080 RFI
1081/* Load up the kernel context */
10822: bl load_up_mmu
1083
1084#ifdef CONFIG_BDI_SWITCH
1085 /* Add helper information for the Abatron bdiGDB debugger.
1086 * We do this here because we know the mmu is disabled, and
1087 * will be enabled for real in just a few instructions.
1088 */
1089 lis r5, abatron_pteptrs@h
1090 ori r5, r5, abatron_pteptrs@l
1091 stw r5, 0xf0(r0) /* This much match your Abatron config */
1092 lis r6, swapper_pg_dir@h
1093 ori r6, r6, swapper_pg_dir@l
1094 tophys(r5, r5)
1095 stw r6, 0(r5)
1096#endif /* CONFIG_BDI_SWITCH */
1097
1098/* Now turn on the MMU for real! */
1099 li r4,MSR_KERNEL
14cf11af
PM
1100 lis r3,start_kernel@h
1101 ori r3,r3,start_kernel@l
1102 mtspr SPRN_SRR0,r3
1103 mtspr SPRN_SRR1,r4
1104 SYNC
1105 RFI
1106
1107/*
5e696617
BH
1108 * void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next);
1109 *
14cf11af
PM
1110 * Set up the segment registers for a new context.
1111 */
5e696617
BH
1112_ENTRY(switch_mmu_context)
1113 lwz r3,MMCONTEXTID(r4)
1114 cmpwi cr0,r3,0
1115 blt- 4f
14cf11af
PM
1116 mulli r3,r3,897 /* multiply context by skew factor */
1117 rlwinm r3,r3,4,8,27 /* VSID = (context & 0xfffff) << 4 */
31ed2b13
CL
1118#ifdef CONFIG_PPC_KUEP
1119 oris r3, r3, SR_NX@h /* Set Nx */
a68c31fc
CL
1120#endif
1121#ifdef CONFIG_PPC_KUAP
1122 oris r3, r3, SR_KS@h /* Set Ks */
31ed2b13 1123#endif
14cf11af
PM
1124 li r0,NUM_USER_SEGMENTS
1125 mtctr r0
1126
93c4a162 1127 lwz r4, MM_PGD(r4)
14cf11af
PM
1128#ifdef CONFIG_BDI_SWITCH
1129 /* Context switch the PTE pointer for the Abatron BDI2000.
1130 * The PGDIR is passed as second argument.
1131 */
40058337
CL
1132 lis r5, abatron_pteptrs@ha
1133 stw r4, abatron_pteptrs@l + 0x4(r5)
14cf11af 1134#endif
93c4a162
CL
1135 tophys(r4, r4)
1136 mtspr SPRN_SPRG_PGDIR, r4
14cf11af
PM
1137 li r4,0
1138 isync
11393:
14cf11af
PM
1140 mtsrin r3,r4
1141 addi r3,r3,0x111 /* next VSID */
1142 rlwinm r3,r3,0,8,3 /* clear out any overflow from VSID field */
1143 addis r4,r4,0x1000 /* address of next segment */
1144 bdnz 3b
1145 sync
1146 isync
1147 blr
5e696617
BH
11484: trap
1149 EMIT_BUG_ENTRY 4b,__FILE__,__LINE__,0
1150 blr
9445aa1a 1151EXPORT_SYMBOL(switch_mmu_context)
14cf11af
PM
1152
1153/*
1154 * An undocumented "feature" of 604e requires that the v bit
1155 * be cleared before changing BAT values.
1156 *
1157 * Also, newer IBM firmware does not clear bat3 and 4 so
1158 * this makes sure it's done.
1159 * -- Cort
1160 */
1161clear_bats:
1162 li r10,0
14cf11af 1163
39097b9c 1164#ifndef CONFIG_PPC_BOOK3S_601
14cf11af
PM
1165 mtspr SPRN_DBAT0U,r10
1166 mtspr SPRN_DBAT0L,r10
1167 mtspr SPRN_DBAT1U,r10
1168 mtspr SPRN_DBAT1L,r10
1169 mtspr SPRN_DBAT2U,r10
1170 mtspr SPRN_DBAT2L,r10
1171 mtspr SPRN_DBAT3U,r10
1172 mtspr SPRN_DBAT3L,r10
39097b9c 1173#endif
14cf11af
PM
1174 mtspr SPRN_IBAT0U,r10
1175 mtspr SPRN_IBAT0L,r10
1176 mtspr SPRN_IBAT1U,r10
1177 mtspr SPRN_IBAT1L,r10
1178 mtspr SPRN_IBAT2U,r10
1179 mtspr SPRN_IBAT2L,r10
1180 mtspr SPRN_IBAT3U,r10
1181 mtspr SPRN_IBAT3L,r10
7c03d653 1182BEGIN_MMU_FTR_SECTION
14cf11af
PM
1183 /* Here's a tweak: at this point, CPU setup have
1184 * not been called yet, so HIGH_BAT_EN may not be
1185 * set in HID0 for the 745x processors. However, it
1186 * seems that doesn't affect our ability to actually
1187 * write to these SPRs.
1188 */
1189 mtspr SPRN_DBAT4U,r10
1190 mtspr SPRN_DBAT4L,r10
1191 mtspr SPRN_DBAT5U,r10
1192 mtspr SPRN_DBAT5L,r10
1193 mtspr SPRN_DBAT6U,r10
1194 mtspr SPRN_DBAT6L,r10
1195 mtspr SPRN_DBAT7U,r10
1196 mtspr SPRN_DBAT7L,r10
1197 mtspr SPRN_IBAT4U,r10
1198 mtspr SPRN_IBAT4L,r10
1199 mtspr SPRN_IBAT5U,r10
1200 mtspr SPRN_IBAT5L,r10
1201 mtspr SPRN_IBAT6U,r10
1202 mtspr SPRN_IBAT6L,r10
1203 mtspr SPRN_IBAT7U,r10
1204 mtspr SPRN_IBAT7L,r10
7c03d653 1205END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
14cf11af
PM
1206 blr
1207
5e04ae85
CL
1208_ENTRY(update_bats)
1209 lis r4, 1f@h
1210 ori r4, r4, 1f@l
1211 tophys(r4, r4)
1212 mfmsr r6
1213 mflr r7
1214 li r3, MSR_KERNEL & ~(MSR_IR | MSR_DR)
1215 rlwinm r0, r6, 0, ~MSR_RI
1216 rlwinm r0, r0, 0, ~MSR_EE
1217 mtmsr r0
94dd54c5
CL
1218
1219 .align 4
5e04ae85
CL
1220 mtspr SPRN_SRR0, r4
1221 mtspr SPRN_SRR1, r3
1222 SYNC
1223 RFI
12241: bl clear_bats
1225 lis r3, BATS@ha
1226 addi r3, r3, BATS@l
1227 tophys(r3, r3)
1228 LOAD_BAT(0, r3, r4, r5)
1229 LOAD_BAT(1, r3, r4, r5)
1230 LOAD_BAT(2, r3, r4, r5)
1231 LOAD_BAT(3, r3, r4, r5)
1232BEGIN_MMU_FTR_SECTION
1233 LOAD_BAT(4, r3, r4, r5)
1234 LOAD_BAT(5, r3, r4, r5)
1235 LOAD_BAT(6, r3, r4, r5)
1236 LOAD_BAT(7, r3, r4, r5)
1237END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
1238 li r3, MSR_KERNEL & ~(MSR_IR | MSR_DR | MSR_RI)
1239 mtmsr r3
1240 mtspr SPRN_SRR0, r7
1241 mtspr SPRN_SRR1, r6
1242 SYNC
1243 RFI
1244
14cf11af
PM
1245flush_tlbs:
1246 lis r10, 0x40
12471: addic. r10, r10, -0x1000
1248 tlbie r10
9acd57ca 1249 bgt 1b
14cf11af
PM
1250 sync
1251 blr
1252
1253mmu_off:
1254 addi r4, r3, __after_mmu_off - _start
1255 mfmsr r3
1256 andi. r0,r3,MSR_DR|MSR_IR /* MMU enabled? */
1257 beqlr
1258 andc r3,r3,r0
94dd54c5
CL
1259
1260 .align 4
14cf11af
PM
1261 mtspr SPRN_SRR0,r4
1262 mtspr SPRN_SRR1,r3
1263 sync
1264 RFI
1265
14cf11af 1266/*
4a5cbf17
BH
1267 * On 601, we use 3 BATs to map up to 24M of RAM at _PAGE_OFFSET
1268 * (we keep one for debugging) and on others, we use one 256M BAT.
14cf11af
PM
1269 */
1270initial_bats:
ccdcef72 1271 lis r11,PAGE_OFFSET@h
39097b9c 1272#ifdef CONFIG_PPC_BOOK3S_601
14cf11af
PM
1273 ori r11,r11,4 /* set up BAT registers for 601 */
1274 li r8,0x7f /* valid, block length = 8MB */
14cf11af
PM
1275 mtspr SPRN_IBAT0U,r11 /* N.B. 601 has valid bit in */
1276 mtspr SPRN_IBAT0L,r8 /* lower BAT register */
4a5cbf17
BH
1277 addis r11,r11,0x800000@h
1278 addis r8,r8,0x800000@h
1279 mtspr SPRN_IBAT1U,r11
1280 mtspr SPRN_IBAT1L,r8
1281 addis r11,r11,0x800000@h
1282 addis r8,r8,0x800000@h
1283 mtspr SPRN_IBAT2U,r11
1284 mtspr SPRN_IBAT2L,r8
39097b9c
CL
1285#else
1286 tophys(r8,r11)
14cf11af
PM
1287#ifdef CONFIG_SMP
1288 ori r8,r8,0x12 /* R/W access, M=1 */
1289#else
1290 ori r8,r8,2 /* R/W access */
1291#endif /* CONFIG_SMP */
14cf11af 1292 ori r11,r11,BL_256M<<2|0x2 /* set up BAT registers for 604 */
14cf11af 1293
14cf11af
PM
1294 mtspr SPRN_DBAT0L,r8 /* N.B. 6xx (not 601) have valid */
1295 mtspr SPRN_DBAT0U,r11 /* bit in upper BAT register */
1296 mtspr SPRN_IBAT0L,r8
1297 mtspr SPRN_IBAT0U,r11
39097b9c 1298#endif
14cf11af
PM
1299 isync
1300 blr
1301
f21f49ea 1302#ifdef CONFIG_BOOTX_TEXT
51d3082f
BH
1303setup_disp_bat:
1304 /*
1305 * setup the display bat prepared for us in prom.c
1306 */
1307 mflr r8
1308 bl reloc_offset
1309 mtlr r8
1310 addis r8,r3,disp_BAT@ha
1311 addi r8,r8,disp_BAT@l
1312 cmpwi cr0,r8,0
1313 beqlr
1314 lwz r11,0(r8)
1315 lwz r8,4(r8)
39097b9c 1316#ifndef CONFIG_PPC_BOOK3S_601
51d3082f
BH
1317 mtspr SPRN_DBAT3L,r8
1318 mtspr SPRN_DBAT3U,r11
39097b9c
CL
1319#else
1320 mtspr SPRN_IBAT3L,r8
51d3082f 1321 mtspr SPRN_IBAT3U,r11
39097b9c 1322#endif
51d3082f 1323 blr
f21f49ea 1324#endif /* CONFIG_BOOTX_TEXT */
51d3082f 1325
c374e00e
SW
1326#ifdef CONFIG_PPC_EARLY_DEBUG_CPM
1327setup_cpm_bat:
1328 lis r8, 0xf000
1329 ori r8, r8, 0x002a
1330 mtspr SPRN_DBAT1L, r8
1331
1332 lis r11, 0xf000
1333 ori r11, r11, (BL_1M << 2) | 2
1334 mtspr SPRN_DBAT1U, r11
1335
1336 blr
1337#endif
1338
d1d56f8c
AH
1339#ifdef CONFIG_PPC_EARLY_DEBUG_USBGECKO
1340setup_usbgecko_bat:
1341 /* prepare a BAT for early io */
1342#if defined(CONFIG_GAMECUBE)
1343 lis r8, 0x0c00
1344#elif defined(CONFIG_WII)
1345 lis r8, 0x0d00
1346#else
1347#error Invalid platform for USB Gecko based early debugging.
1348#endif
1349 /*
1350 * The virtual address used must match the virtual address
1351 * associated to the fixmap entry FIX_EARLY_DEBUG_BASE.
1352 */
1353 lis r11, 0xfffe /* top 128K */
1354 ori r8, r8, 0x002a /* uncached, guarded ,rw */
1355 ori r11, r11, 0x2 /* 128K, Vs=1, Vp=0 */
1356 mtspr SPRN_DBAT1L, r8
1357 mtspr SPRN_DBAT1U, r11
1358 blr
1359#endif
1360
14cf11af
PM
1361#ifdef CONFIG_8260
1362/* Jump into the system reset for the rom.
1363 * We first disable the MMU, and then jump to the ROM reset address.
1364 *
1365 * r3 is the board info structure, r4 is the location for starting.
1366 * I use this for building a small kernel that can load other kernels,
1367 * rather than trying to write or rely on a rom monitor that can tftp load.
1368 */
1369 .globl m8260_gorom
1370m8260_gorom:
1371 mfmsr r0
1372 rlwinm r0,r0,0,17,15 /* clear MSR_EE in r0 */
1373 sync
1374 mtmsr r0
1375 sync
1376 mfspr r11, SPRN_HID0
1377 lis r10, 0
1378 ori r10,r10,HID0_ICE|HID0_DCE
1379 andc r11, r11, r10
1380 mtspr SPRN_HID0, r11
1381 isync
1382 li r5, MSR_ME|MSR_RI
1383 lis r6,2f@h
1384 addis r6,r6,-KERNELBASE@h
1385 ori r6,r6,2f@l
1386 mtspr SPRN_SRR0,r6
1387 mtspr SPRN_SRR1,r5
1388 isync
1389 sync
1390 rfi
13912:
1392 mtlr r4
1393 blr
1394#endif
1395
1396
1397/*
1398 * We put a few things here that have to be page-aligned.
1399 * This stuff goes at the beginning of the data segment,
1400 * which is page-aligned.
1401 */
1402 .data
1403 .globl sdata
1404sdata:
1405 .globl empty_zero_page
1406empty_zero_page:
1407 .space 4096
9445aa1a 1408EXPORT_SYMBOL(empty_zero_page)
14cf11af
PM
1409
1410 .globl swapper_pg_dir
1411swapper_pg_dir:
bee86f14 1412 .space PGD_TABLE_SIZE
14cf11af 1413
14cf11af
PM
1414/* Room for two PTE pointers, usually the kernel and current user pointers
1415 * to their respective root page table.
1416 */
1417abatron_pteptrs:
1418 .space 8