]> git.ipfire.org Git - thirdparty/linux.git/blame - arch/powerpc/kernel/entry_32.S
Merge tag 'drm/tegra/for-5.7-fixes' of git://anongit.freedesktop.org/tegra/linux...
[thirdparty/linux.git] / arch / powerpc / kernel / entry_32.S
CommitLineData
2874c5fd 1/* SPDX-License-Identifier: GPL-2.0-or-later */
9994a338
PM
2/*
3 * PowerPC version
4 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 * Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
6 * Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
7 * Adapted for Power Macintosh by Paul Mackerras.
8 * Low-level exception handlers and MMU support
9 * rewritten by Paul Mackerras.
10 * Copyright (C) 1996 Paul Mackerras.
11 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
12 *
13 * This file contains the system call entry code, context switch
14 * code, and exception/interrupt return code for PowerPC.
9994a338
PM
15 */
16
9994a338 17#include <linux/errno.h>
c3525940 18#include <linux/err.h>
9994a338
PM
19#include <linux/sys.h>
20#include <linux/threads.h>
21#include <asm/reg.h>
22#include <asm/page.h>
23#include <asm/mmu.h>
24#include <asm/cputable.h>
25#include <asm/thread_info.h>
26#include <asm/ppc_asm.h>
27#include <asm/asm-offsets.h>
28#include <asm/unistd.h>
46f52210 29#include <asm/ptrace.h>
9445aa1a 30#include <asm/export.h>
36a7eeaf 31#include <asm/asm-405.h>
2c86cd18 32#include <asm/feature-fixups.h>
c28218d4 33#include <asm/barrier.h>
e2fb9f54 34#include <asm/kup.h>
40530db7 35#include <asm/bug.h>
9994a338 36
37737a2a 37#include "head_32.h"
9994a338 38
0eb0d2e7
CL
39/*
40 * Align to 4k in order to ensure that all functions modyfing srr0/srr1
41 * fit into one page in order to not encounter a TLB miss between the
42 * modification of srr0/srr1 and the associated rfi.
43 */
44 .align 12
45
9994a338 46#ifdef CONFIG_BOOKE
9994a338
PM
47 .globl mcheck_transfer_to_handler
48mcheck_transfer_to_handler:
fca622c5
KG
49 mfspr r0,SPRN_DSRR0
50 stw r0,_DSRR0(r11)
51 mfspr r0,SPRN_DSRR1
52 stw r0,_DSRR1(r11)
53 /* fall through */
9994a338
PM
54
55 .globl debug_transfer_to_handler
56debug_transfer_to_handler:
fca622c5
KG
57 mfspr r0,SPRN_CSRR0
58 stw r0,_CSRR0(r11)
59 mfspr r0,SPRN_CSRR1
60 stw r0,_CSRR1(r11)
61 /* fall through */
9994a338
PM
62
63 .globl crit_transfer_to_handler
64crit_transfer_to_handler:
70fe3af8 65#ifdef CONFIG_PPC_BOOK3E_MMU
fca622c5
KG
66 mfspr r0,SPRN_MAS0
67 stw r0,MAS0(r11)
68 mfspr r0,SPRN_MAS1
69 stw r0,MAS1(r11)
70 mfspr r0,SPRN_MAS2
71 stw r0,MAS2(r11)
72 mfspr r0,SPRN_MAS3
73 stw r0,MAS3(r11)
74 mfspr r0,SPRN_MAS6
75 stw r0,MAS6(r11)
76#ifdef CONFIG_PHYS_64BIT
77 mfspr r0,SPRN_MAS7
78 stw r0,MAS7(r11)
79#endif /* CONFIG_PHYS_64BIT */
70fe3af8 80#endif /* CONFIG_PPC_BOOK3E_MMU */
fca622c5
KG
81#ifdef CONFIG_44x
82 mfspr r0,SPRN_MMUCR
83 stw r0,MMUCR(r11)
84#endif
85 mfspr r0,SPRN_SRR0
86 stw r0,_SRR0(r11)
87 mfspr r0,SPRN_SRR1
88 stw r0,_SRR1(r11)
89
a7916a1d 90 /* set the stack limit to the current stack */
ee43eb78 91 mfspr r8,SPRN_SPRG_THREAD
fca622c5
KG
92 lwz r0,KSP_LIMIT(r8)
93 stw r0,SAVED_KSP_LIMIT(r11)
a7916a1d 94 rlwinm r0,r1,0,0,(31 - THREAD_SHIFT)
fca622c5 95 stw r0,KSP_LIMIT(r8)
9994a338
PM
96 /* fall through */
97#endif
98
99#ifdef CONFIG_40x
100 .globl crit_transfer_to_handler
101crit_transfer_to_handler:
102 lwz r0,crit_r10@l(0)
103 stw r0,GPR10(r11)
104 lwz r0,crit_r11@l(0)
105 stw r0,GPR11(r11)
fca622c5
KG
106 mfspr r0,SPRN_SRR0
107 stw r0,crit_srr0@l(0)
108 mfspr r0,SPRN_SRR1
109 stw r0,crit_srr1@l(0)
110
a7916a1d 111 /* set the stack limit to the current stack */
ee43eb78 112 mfspr r8,SPRN_SPRG_THREAD
fca622c5
KG
113 lwz r0,KSP_LIMIT(r8)
114 stw r0,saved_ksp_limit@l(0)
a7916a1d 115 rlwinm r0,r1,0,0,(31 - THREAD_SHIFT)
fca622c5 116 stw r0,KSP_LIMIT(r8)
9994a338
PM
117 /* fall through */
118#endif
119
120/*
121 * This code finishes saving the registers to the exception frame
122 * and jumps to the appropriate handler for the exception, turning
123 * on address translation.
124 * Note that we rely on the caller having set cr0.eq iff the exception
125 * occurred in kernel mode (i.e. MSR:PR = 0).
126 */
127 .globl transfer_to_handler_full
128transfer_to_handler_full:
129 SAVE_NVGPRS(r11)
130 /* fall through */
131
132 .globl transfer_to_handler
133transfer_to_handler:
134 stw r2,GPR2(r11)
135 stw r12,_NIP(r11)
136 stw r9,_MSR(r11)
137 andi. r2,r9,MSR_PR
138 mfctr r12
139 mfspr r2,SPRN_XER
140 stw r12,_CTR(r11)
141 stw r2,_XER(r11)
ee43eb78 142 mfspr r12,SPRN_SPRG_THREAD
02847487 143 tovirt_vmstack r12, r12
9994a338 144 beq 2f /* if from user, fix up THREAD.regs */
e2fb9f54 145 addi r2, r12, -THREAD
9994a338
PM
146 addi r11,r1,STACK_FRAME_OVERHEAD
147 stw r11,PT_REGS(r12)
148#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
149 /* Check to see if the dbcr0 register is set up to debug. Use the
4eaddb4d 150 internal debug mode bit to do this. */
9994a338 151 lwz r12,THREAD_DBCR0(r12)
2325f0a0 152 andis. r12,r12,DBCR0_IDM@h
6b9166f0 153#endif
f7354cca 154 ACCOUNT_CPU_USER_ENTRY(r2, r11, r12)
31ed2b13
CL
155#ifdef CONFIG_PPC_BOOK3S_32
156 kuep_lock r11, r12
157#endif
6b9166f0 158#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
9994a338
PM
159 beq+ 3f
160 /* From user and task is ptraced - load up global dbcr0 */
161 li r12,-1 /* clear all pending debug events */
162 mtspr SPRN_DBSR,r12
163 lis r11,global_dbcr0@ha
164 tophys(r11,r11)
165 addi r11,r11,global_dbcr0@l
4eaddb4d 166#ifdef CONFIG_SMP
f7354cca 167 lwz r9,TASK_CPU(r2)
4eaddb4d
KG
168 slwi r9,r9,3
169 add r11,r11,r9
170#endif
9994a338
PM
171 lwz r12,0(r11)
172 mtspr SPRN_DBCR0,r12
173 lwz r12,4(r11)
174 addi r12,r12,-1
175 stw r12,4(r11)
176#endif
c223c903 177
9994a338 178 b 3f
f39224a8 179
9994a338
PM
1802: /* if from kernel, check interrupted DOZE/NAP mode and
181 * check for stack overflow
182 */
99338190 183 kuap_save_and_lock r11, r12, r9, r2, r6
e2fb9f54 184 addi r2, r12, -THREAD
3978eb78 185#ifndef CONFIG_VMAP_STACK
85218827
KG
186 lwz r9,KSP_LIMIT(r12)
187 cmplw r1,r9 /* if r1 <= ksp_limit */
f39224a8 188 ble- stack_ovf /* then the kernel stack overflowed */
3978eb78 189#endif
f39224a8 1905:
d7cceda9 191#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500)
f7354cca 192 lwz r12,TI_LOCAL_FLAGS(r2)
f39224a8
PM
193 mtcrf 0x01,r12
194 bt- 31-TLF_NAPPING,4f
a560643e 195 bt- 31-TLF_SLEEPING,7f
d7cceda9 196#endif /* CONFIG_PPC_BOOK3S_32 || CONFIG_E500 */
9994a338
PM
197 .globl transfer_to_handler_cont
198transfer_to_handler_cont:
9994a338
PM
1993:
200 mflr r9
02847487
CL
201 tovirt_novmstack r2, r2 /* set r2 to current */
202 tovirt_vmstack r9, r9
9994a338
PM
203 lwz r11,0(r9) /* virtual address of handler */
204 lwz r9,4(r9) /* where to go when done */
cd99ddbe 205#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
75b82472
CL
206 mtspr SPRN_NRI, r0
207#endif
5d38902c 208#ifdef CONFIG_TRACE_IRQFLAGS
40530db7
CL
209 /*
210 * When tracing IRQ state (lockdep) we enable the MMU before we call
211 * the IRQ tracing functions as they might access vmalloc space or
212 * perform IOs for console output.
213 *
214 * To speed up the syscall path where interrupts stay on, let's check
215 * first if we are changing the MSR value at all.
216 */
d4bf9053 217 tophys_novmstack r12, r1
40530db7 218 lwz r12,_MSR(r12)
40530db7
CL
219 andi. r12,r12,MSR_EE
220 bne 1f
221
222 /* MSR isn't changing, just transition directly */
223#endif
224 mtspr SPRN_SRR0,r11
225 mtspr SPRN_SRR1,r10
226 mtlr r9
227 SYNC
228 RFI /* jump to handler, enable MMU */
229
230#ifdef CONFIG_TRACE_IRQFLAGS
2311: /* MSR is changing, re-enable MMU so we can notify lockdep. We need to
232 * keep interrupts disabled at this point otherwise we might risk
233 * taking an interrupt before we tell lockdep they are enabled.
234 */
5d38902c
BH
235 lis r12,reenable_mmu@h
236 ori r12,r12,reenable_mmu@l
ba18025f 237 LOAD_REG_IMMEDIATE(r0, MSR_KERNEL)
5d38902c 238 mtspr SPRN_SRR0,r12
40530db7 239 mtspr SPRN_SRR1,r0
5d38902c
BH
240 SYNC
241 RFI
5d38902c 242
40530db7 243reenable_mmu:
2cd76629 244 /*
d1865e71 245 * We save a bunch of GPRs,
08f1ec8a
BH
246 * r3 can be different from GPR3(r1) at this point, r9 and r11
247 * contains the old MSR and handler address respectively,
248 * r4 & r5 can contain page fault arguments that need to be passed
05642cf7
CL
249 * along as well. r0, r6-r8, r12, CCR, CTR, XER etc... are left
250 * clobbered as they aren't useful past this point.
2cd76629 251 */
40530db7 252
08f1ec8a
BH
253 stwu r1,-32(r1)
254 stw r9,8(r1)
255 stw r11,12(r1)
256 stw r3,16(r1)
257 stw r4,20(r1)
258 stw r5,24(r1)
40530db7 259
40530db7
CL
260 /* If we are disabling interrupts (normal case), simply log it with
261 * lockdep
262 */
2631: bl trace_hardirqs_off
05642cf7 264 lwz r5,24(r1)
08f1ec8a
BH
265 lwz r4,20(r1)
266 lwz r3,16(r1)
267 lwz r11,12(r1)
268 lwz r9,8(r1)
269 addi r1,r1,32
40530db7 270 mtctr r11
5d38902c
BH
271 mtlr r9
272 bctr /* jump to handler */
5d38902c 273#endif /* CONFIG_TRACE_IRQFLAGS */
9994a338 274
d7cceda9 275#if defined (CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500)
f39224a8 2764: rlwinm r12,r12,0,~_TLF_NAPPING
f7354cca 277 stw r12,TI_LOCAL_FLAGS(r2)
fc4033b2 278 b power_save_ppc32_restore
a560643e
PM
279
2807: rlwinm r12,r12,0,~_TLF_SLEEPING
f7354cca 281 stw r12,TI_LOCAL_FLAGS(r2)
a560643e
PM
282 lwz r9,_MSR(r11) /* if sleeping, clear MSR.EE */
283 rlwinm r9,r9,0,~MSR_EE
284 lwz r12,_LINK(r11) /* and return to address in LR */
e2fb9f54 285 kuap_restore r11, r2, r3, r4, r5
99338190 286 lwz r2, GPR2(r11)
a560643e 287 b fast_exception_return
a0652fc9
PM
288#endif
289
3978eb78 290#ifndef CONFIG_VMAP_STACK
9994a338
PM
291/*
292 * On kernel stack overflow, load up an initial stack pointer
293 * and call StackOverflow(regs), which should not return.
294 */
295stack_ovf:
296 /* sometimes we use a statically-allocated stack, which is OK. */
f39224a8
PM
297 lis r12,_end@h
298 ori r12,r12,_end@l
299 cmplw r1,r12
300 ble 5b /* r1 <= &_end is OK */
9994a338
PM
301 SAVE_NVGPRS(r11)
302 addi r3,r1,STACK_FRAME_OVERHEAD
303 lis r1,init_thread_union@ha
304 addi r1,r1,init_thread_union@l
305 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
306 lis r9,StackOverflow@ha
307 addi r9,r9,StackOverflow@l
ba18025f 308 LOAD_REG_IMMEDIATE(r10,MSR_KERNEL)
cd99ddbe 309#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
75b82472
CL
310 mtspr SPRN_NRI, r0
311#endif
9994a338
PM
312 mtspr SPRN_SRR0,r9
313 mtspr SPRN_SRR1,r10
314 SYNC
315 RFI
3978eb78 316#endif
9994a338 317
b86fb888
CL
318#ifdef CONFIG_TRACE_IRQFLAGS
319trace_syscall_entry_irq_off:
320 /*
321 * Syscall shouldn't happen while interrupts are disabled,
322 * so let's do a warning here.
323 */
3240: trap
325 EMIT_BUG_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING
326 bl trace_hardirqs_on
327
328 /* Now enable for real */
ba18025f 329 LOAD_REG_IMMEDIATE(r10, MSR_KERNEL | MSR_EE)
b86fb888
CL
330 mtmsr r10
331
332 REST_GPR(0, r1)
333 REST_4GPRS(3, r1)
334 REST_2GPRS(7, r1)
335 b DoSyscall
336#endif /* CONFIG_TRACE_IRQFLAGS */
337
338 .globl transfer_to_syscall
339transfer_to_syscall:
340#ifdef CONFIG_TRACE_IRQFLAGS
341 andi. r12,r9,MSR_EE
342 beq- trace_syscall_entry_irq_off
343#endif /* CONFIG_TRACE_IRQFLAGS */
b86fb888 344
9994a338
PM
345/*
346 * Handle a system call.
347 */
348 .stabs "arch/powerpc/kernel/",N_SO,0,0,0f
349 .stabs "entry_32.S",N_SO,0,0,0f
3500:
351
352_GLOBAL(DoSyscall)
9994a338
PM
353 stw r3,ORIG_GPR3(r1)
354 li r12,0
355 stw r12,RESULT(r1)
5d38902c 356#ifdef CONFIG_TRACE_IRQFLAGS
40530db7 357 /* Make sure interrupts are enabled */
5d38902c
BH
358 mfmsr r11
359 andi. r12,r11,MSR_EE
40530db7
CL
360 /* We came in with interrupts disabled, we WARN and mark them enabled
361 * for lockdep now */
3620: tweqi r12, 0
363 EMIT_BUG_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING
5d38902c 364#endif /* CONFIG_TRACE_IRQFLAGS */
f7354cca 365 lwz r11,TI_FLAGS(r2)
10ea8343 366 andi. r11,r11,_TIF_SYSCALL_DOTRACE
9994a338
PM
367 bne- syscall_dotrace
368syscall_dotrace_cont:
369 cmplwi 0,r0,NR_syscalls
370 lis r10,sys_call_table@h
371 ori r10,r10,sys_call_table@l
372 slwi r0,r0,2
373 bge- 66f
c28218d4
DC
374
375 barrier_nospec_asm
376 /*
377 * Prevent the load of the handler below (based on the user-passed
378 * system call number) being speculatively executed until the test
379 * against NR_syscalls and branch to .66f above has
380 * committed.
381 */
382
9994a338
PM
383 lwzx r10,r10,r0 /* Fetch system call handler [ptr] */
384 mtlr r10
385 addi r9,r1,STACK_FRAME_OVERHEAD
386 PPC440EP_ERR42
387 blrl /* Call handler */
388 .globl ret_from_syscall
389ret_from_syscall:
6f37be4b
BF
390#ifdef CONFIG_DEBUG_RSEQ
391 /* Check whether the syscall is issued inside a restartable sequence */
392 stw r3,GPR3(r1)
393 addi r3,r1,STACK_FRAME_OVERHEAD
394 bl rseq_syscall
395 lwz r3,GPR3(r1)
396#endif
9994a338 397 mr r6,r3
9994a338 398 /* disable interrupts so current_thread_info()->flags can't change */
ba18025f 399 LOAD_REG_IMMEDIATE(r10,MSR_KERNEL) /* doesn't include MSR_EE */
5d38902c 400 /* Note: We don't bother telling lockdep about it */
9994a338 401 SYNC
39bccfd1 402 mtmsr r10
f7354cca 403 lwz r9,TI_FLAGS(r2)
c3525940 404 li r8,-MAX_ERRNO
10ea8343 405 andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
9994a338 406 bne- syscall_exit_work
401d1f02
DW
407 cmplw 0,r3,r8
408 blt+ syscall_exit_cont
409 lwz r11,_CCR(r1) /* Load CR */
410 neg r3,r3
411 oris r11,r11,0x1000 /* Set SO bit in CR */
412 stw r11,_CCR(r1)
9994a338 413syscall_exit_cont:
5d38902c
BH
414 lwz r8,_MSR(r1)
415#ifdef CONFIG_TRACE_IRQFLAGS
416 /* If we are going to return from the syscall with interrupts
40530db7 417 * off, we trace that here. It shouldn't normally happen.
5d38902c
BH
418 */
419 andi. r10,r8,MSR_EE
420 bne+ 1f
421 stw r3,GPR3(r1)
422 bl trace_hardirqs_off
423 lwz r3,GPR3(r1)
4241:
425#endif /* CONFIG_TRACE_IRQFLAGS */
9994a338 426#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
4eaddb4d
KG
427 /* If the process has its own DBCR0 value, load it up. The internal
428 debug mode bit tells us that dbcr0 should be loaded. */
9994a338 429 lwz r0,THREAD+THREAD_DBCR0(r2)
2325f0a0 430 andis. r10,r0,DBCR0_IDM@h
9994a338
PM
431 bnel- load_dbcr0
432#endif
b98ac05d 433#ifdef CONFIG_44x
e7f75ad0 434BEGIN_MMU_FTR_SECTION
b98ac05d
BH
435 lis r4,icache_44x_need_flush@ha
436 lwz r5,icache_44x_need_flush@l(r4)
437 cmplwi cr0,r5,0
438 bne- 2f
4391:
e7f75ad0 440END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_47x)
b98ac05d 441#endif /* CONFIG_44x */
b64f87c1
BB
442BEGIN_FTR_SECTION
443 lwarx r7,0,r1
444END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
9994a338 445 stwcx. r0,0,r1 /* to clear the reservation */
f7354cca 446 ACCOUNT_CPU_USER_EXIT(r2, r5, r7)
31ed2b13
CL
447#ifdef CONFIG_PPC_BOOK3S_32
448 kuep_unlock r5, r7
449#endif
e2fb9f54 450 kuap_check r2, r4
9994a338
PM
451 lwz r4,_LINK(r1)
452 lwz r5,_CCR(r1)
453 mtlr r4
454 mtcr r5
455 lwz r7,_NIP(r1)
9994a338
PM
456 lwz r2,GPR2(r1)
457 lwz r1,GPR1(r1)
cd99ddbe 458#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
75b82472
CL
459 mtspr SPRN_NRI, r0
460#endif
9994a338
PM
461 mtspr SPRN_SRR0,r7
462 mtspr SPRN_SRR1,r8
463 SYNC
464 RFI
b98ac05d
BH
465#ifdef CONFIG_44x
4662: li r7,0
467 iccci r0,r0
468 stw r7,icache_44x_need_flush@l(r4)
469 b 1b
470#endif /* CONFIG_44x */
9994a338
PM
471
47266: li r3,-ENOSYS
473 b ret_from_syscall
474
475 .globl ret_from_fork
476ret_from_fork:
477 REST_NVGPRS(r1)
478 bl schedule_tail
479 li r3,0
480 b ret_from_syscall
481
58254e10
AV
482 .globl ret_from_kernel_thread
483ret_from_kernel_thread:
484 REST_NVGPRS(r1)
485 bl schedule_tail
486 mtlr r14
487 mr r3,r15
488 PPC440EP_ERR42
489 blrl
490 li r3,0
be6abfa7 491 b ret_from_syscall
9994a338
PM
492
493/* Traced system call support */
494syscall_dotrace:
495 SAVE_NVGPRS(r1)
496 li r0,0xc00
d73e0c99 497 stw r0,_TRAP(r1)
9994a338
PM
498 addi r3,r1,STACK_FRAME_OVERHEAD
499 bl do_syscall_trace_enter
4f72c427
RM
500 /*
501 * Restore argument registers possibly just changed.
502 * We use the return value of do_syscall_trace_enter
503 * for call number to look up in the table (r0).
504 */
505 mr r0,r3
9994a338
PM
506 lwz r3,GPR3(r1)
507 lwz r4,GPR4(r1)
508 lwz r5,GPR5(r1)
509 lwz r6,GPR6(r1)
510 lwz r7,GPR7(r1)
511 lwz r8,GPR8(r1)
512 REST_NVGPRS(r1)
d3837414
ME
513
514 cmplwi r0,NR_syscalls
515 /* Return code is already in r3 thanks to do_syscall_trace_enter() */
516 bge- ret_from_syscall
9994a338
PM
517 b syscall_dotrace_cont
518
519syscall_exit_work:
401d1f02 520 andi. r0,r9,_TIF_RESTOREALL
1bd79336
PM
521 beq+ 0f
522 REST_NVGPRS(r1)
523 b 2f
5240: cmplw 0,r3,r8
401d1f02
DW
525 blt+ 1f
526 andi. r0,r9,_TIF_NOERROR
527 bne- 1f
528 lwz r11,_CCR(r1) /* Load CR */
529 neg r3,r3
530 oris r11,r11,0x1000 /* Set SO bit in CR */
531 stw r11,_CCR(r1)
532
5331: stw r6,RESULT(r1) /* Save result */
9994a338 534 stw r3,GPR3(r1) /* Update return value */
401d1f02
DW
5352: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
536 beq 4f
537
1bd79336 538 /* Clear per-syscall TIF flags if any are set. */
401d1f02
DW
539
540 li r11,_TIF_PERSYSCALL_MASK
f7354cca 541 addi r12,r2,TI_FLAGS
401d1f02
DW
5423: lwarx r8,0,r12
543 andc r8,r8,r11
544#ifdef CONFIG_IBM405_ERR77
545 dcbt 0,r12
546#endif
547 stwcx. r8,0,r12
548 bne- 3b
401d1f02
DW
549
5504: /* Anything which requires enabling interrupts? */
10ea8343 551 andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP)
1bd79336
PM
552 beq ret_from_except
553
5d38902c
BH
554 /* Re-enable interrupts. There is no need to trace that with
555 * lockdep as we are supposed to have IRQs on at this point
556 */
1bd79336
PM
557 ori r10,r10,MSR_EE
558 SYNC
39bccfd1 559 mtmsr r10
401d1f02
DW
560
561 /* Save NVGPRS if they're not saved already */
d73e0c99 562 lwz r4,_TRAP(r1)
9994a338 563 andi. r4,r4,1
401d1f02 564 beq 5f
9994a338
PM
565 SAVE_NVGPRS(r1)
566 li r4,0xc00
d73e0c99 567 stw r4,_TRAP(r1)
1bd79336 5685:
9994a338
PM
569 addi r3,r1,STACK_FRAME_OVERHEAD
570 bl do_syscall_trace_leave
1bd79336 571 b ret_from_except_full
9994a338 572
9e270862
CL
573 /*
574 * System call was called from kernel. We get here with SRR1 in r9.
575 * Mark the exception as recoverable once we have retrieved SRR0,
576 * trap a warning and return ENOSYS with CR[SO] set.
577 */
578 .globl ret_from_kernel_syscall
579ret_from_kernel_syscall:
580 mfspr r9, SPRN_SRR0
581 mfspr r10, SPRN_SRR1
582#if !defined(CONFIG_4xx) && !defined(CONFIG_BOOKE)
583 LOAD_REG_IMMEDIATE(r11, MSR_KERNEL & ~(MSR_IR|MSR_DR))
584 mtmsr r11
585#endif
586
5870: trap
588 EMIT_BUG_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING
589
590 li r3, ENOSYS
591 crset so
592#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
593 mtspr SPRN_NRI, r0
594#endif
595 mtspr SPRN_SRR0, r9
596 mtspr SPRN_SRR1, r10
597 SYNC
598 RFI
599
9994a338 600/*
401d1f02
DW
601 * The fork/clone functions need to copy the full register set into
602 * the child process. Therefore we need to save all the nonvolatile
603 * registers (r13 - r31) before calling the C code.
9994a338 604 */
9994a338
PM
605 .globl ppc_fork
606ppc_fork:
607 SAVE_NVGPRS(r1)
d73e0c99 608 lwz r0,_TRAP(r1)
9994a338 609 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
d73e0c99 610 stw r0,_TRAP(r1) /* register set saved */
9994a338
PM
611 b sys_fork
612
613 .globl ppc_vfork
614ppc_vfork:
615 SAVE_NVGPRS(r1)
d73e0c99 616 lwz r0,_TRAP(r1)
9994a338 617 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
d73e0c99 618 stw r0,_TRAP(r1) /* register set saved */
9994a338
PM
619 b sys_vfork
620
621 .globl ppc_clone
622ppc_clone:
623 SAVE_NVGPRS(r1)
d73e0c99 624 lwz r0,_TRAP(r1)
9994a338 625 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
d73e0c99 626 stw r0,_TRAP(r1) /* register set saved */
9994a338
PM
627 b sys_clone
628
cee3536d
ME
629 .globl ppc_clone3
630ppc_clone3:
631 SAVE_NVGPRS(r1)
632 lwz r0,_TRAP(r1)
633 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
634 stw r0,_TRAP(r1) /* register set saved */
635 b sys_clone3
636
1bd79336
PM
637 .globl ppc_swapcontext
638ppc_swapcontext:
639 SAVE_NVGPRS(r1)
640 lwz r0,_TRAP(r1)
641 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
642 stw r0,_TRAP(r1) /* register set saved */
643 b sys_swapcontext
644
9994a338
PM
645/*
646 * Top-level page fault handling.
647 * This is in assembler because if do_page_fault tells us that
648 * it is a bad kernel page fault, we want to save the non-volatile
649 * registers before calling bad_page_fault.
650 */
651 .globl handle_page_fault
652handle_page_fault:
9994a338 653 addi r3,r1,STACK_FRAME_OVERHEAD
d7cceda9 654#ifdef CONFIG_PPC_BOOK3S_32
64d0a506 655 andis. r0,r5,DSISR_DABRMATCH@h
d300627c 656 bne- handle_dabr_fault
d300627c 657#endif
64d0a506 658 bl do_page_fault
9994a338
PM
659 cmpwi r3,0
660 beq+ ret_from_except
661 SAVE_NVGPRS(r1)
d73e0c99 662 lwz r0,_TRAP(r1)
9994a338 663 clrrwi r0,r0,1
d73e0c99 664 stw r0,_TRAP(r1)
9994a338
PM
665 mr r5,r3
666 addi r3,r1,STACK_FRAME_OVERHEAD
667 lwz r4,_DAR(r1)
668 bl bad_page_fault
669 b ret_from_except_full
670
d7cceda9 671#ifdef CONFIG_PPC_BOOK3S_32
d300627c
BH
672 /* We have a data breakpoint exception - handle it */
673handle_dabr_fault:
674 SAVE_NVGPRS(r1)
675 lwz r0,_TRAP(r1)
676 clrrwi r0,r0,1
677 stw r0,_TRAP(r1)
678 bl do_break
679 b ret_from_except_full
680#endif
681
9994a338
PM
682/*
683 * This routine switches between two different tasks. The process
684 * state of one is saved on its kernel stack. Then the state
685 * of the other is restored from its kernel stack. The memory
686 * management hardware is updated to the second process's state.
687 * Finally, we can return to the second process.
688 * On entry, r3 points to the THREAD for the current task, r4
689 * points to the THREAD for the new task.
690 *
691 * This routine is always called with interrupts disabled.
692 *
693 * Note: there are two ways to get to the "going out" portion
694 * of this code; either by coming in via the entry (_switch)
695 * or via "fork" which must set up an environment equivalent
696 * to the "_switch" path. If you change this , you'll have to
697 * change the fork code also.
698 *
699 * The code which creates the new task context is in 'copy_thread'
700 * in arch/ppc/kernel/process.c
701 */
702_GLOBAL(_switch)
703 stwu r1,-INT_FRAME_SIZE(r1)
704 mflr r0
705 stw r0,INT_FRAME_SIZE+4(r1)
706 /* r3-r12 are caller saved -- Cort */
707 SAVE_NVGPRS(r1)
708 stw r0,_NIP(r1) /* Return to switch caller */
709 mfmsr r11
710 li r0,MSR_FP /* Disable floating-point */
711#ifdef CONFIG_ALTIVEC
712BEGIN_FTR_SECTION
713 oris r0,r0,MSR_VEC@h /* Disable altivec */
714 mfspr r12,SPRN_VRSAVE /* save vrsave register value */
715 stw r12,THREAD+THREAD_VRSAVE(r2)
716END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
717#endif /* CONFIG_ALTIVEC */
718#ifdef CONFIG_SPE
5e14d21e 719BEGIN_FTR_SECTION
9994a338
PM
720 oris r0,r0,MSR_SPE@h /* Disable SPE */
721 mfspr r12,SPRN_SPEFSCR /* save spefscr register value */
722 stw r12,THREAD+THREAD_SPEFSCR(r2)
5e14d21e 723END_FTR_SECTION_IFSET(CPU_FTR_SPE)
9994a338
PM
724#endif /* CONFIG_SPE */
725 and. r0,r0,r11 /* FP or altivec or SPE enabled? */
726 beq+ 1f
727 andc r11,r11,r0
39bccfd1 728 mtmsr r11
9994a338
PM
729 isync
7301: stw r11,_MSR(r1)
731 mfcr r10
732 stw r10,_CCR(r1)
733 stw r1,KSP(r3) /* Set old stack pointer */
734
feb8e960 735 kuap_check r2, r0
9994a338
PM
736#ifdef CONFIG_SMP
737 /* We need a sync somewhere here to make sure that if the
738 * previous task gets rescheduled on another CPU, it sees all
739 * stores it has performed on this one.
740 */
741 sync
742#endif /* CONFIG_SMP */
743
744 tophys(r0,r4)
ee43eb78 745 mtspr SPRN_SPRG_THREAD,r0 /* Update current THREAD phys addr */
9994a338
PM
746 lwz r1,KSP(r4) /* Load new stack pointer */
747
748 /* save the old current 'last' for return value */
749 mr r3,r2
750 addi r2,r4,-THREAD /* Update current */
751
752#ifdef CONFIG_ALTIVEC
753BEGIN_FTR_SECTION
754 lwz r0,THREAD+THREAD_VRSAVE(r2)
755 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
756END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
757#endif /* CONFIG_ALTIVEC */
758#ifdef CONFIG_SPE
5e14d21e 759BEGIN_FTR_SECTION
9994a338
PM
760 lwz r0,THREAD+THREAD_SPEFSCR(r2)
761 mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */
5e14d21e 762END_FTR_SECTION_IFSET(CPU_FTR_SPE)
9994a338 763#endif /* CONFIG_SPE */
f2574030 764
9994a338
PM
765 lwz r0,_CCR(r1)
766 mtcrf 0xFF,r0
767 /* r3-r12 are destroyed -- Cort */
768 REST_NVGPRS(r1)
769
770 lwz r4,_NIP(r1) /* Return to _switch caller in new task */
771 mtlr r4
772 addi r1,r1,INT_FRAME_SIZE
773 blr
774
775 .globl fast_exception_return
776fast_exception_return:
777#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
778 andi. r10,r9,MSR_RI /* check for recoverable interrupt */
779 beq 1f /* if not, we've got problems */
780#endif
781
7822: REST_4GPRS(3, r11)
783 lwz r10,_CCR(r11)
784 REST_GPR(1, r11)
785 mtcr r10
786 lwz r10,_LINK(r11)
787 mtlr r10
9580b71b
CL
788 /* Clear the exception_marker on the stack to avoid confusing stacktrace */
789 li r10, 0
790 stw r10, 8(r11)
9994a338 791 REST_GPR(10, r11)
cd99ddbe 792#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
75b82472
CL
793 mtspr SPRN_NRI, r0
794#endif
9994a338
PM
795 mtspr SPRN_SRR1,r9
796 mtspr SPRN_SRR0,r12
797 REST_GPR(9, r11)
798 REST_GPR(12, r11)
799 lwz r11,GPR11(r11)
800 SYNC
801 RFI
802
803#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
804/* check if the exception happened in a restartable section */
8051: lis r3,exc_exit_restart_end@ha
806 addi r3,r3,exc_exit_restart_end@l
807 cmplw r12,r3
9eb425b2 808#ifdef CONFIG_PPC_BOOK3S_601
12c3f1fd
CL
809 bge 2b
810#else
9994a338 811 bge 3f
12c3f1fd 812#endif
9994a338
PM
813 lis r4,exc_exit_restart@ha
814 addi r4,r4,exc_exit_restart@l
815 cmplw r12,r4
9eb425b2 816#ifdef CONFIG_PPC_BOOK3S_601
12c3f1fd
CL
817 blt 2b
818#else
9994a338 819 blt 3f
12c3f1fd 820#endif
9994a338
PM
821 lis r3,fee_restarts@ha
822 tophys(r3,r3)
823 lwz r5,fee_restarts@l(r3)
824 addi r5,r5,1
825 stw r5,fee_restarts@l(r3)
826 mr r12,r4 /* restart at exc_exit_restart */
827 b 2b
828
991eb43a
KG
829 .section .bss
830 .align 2
831fee_restarts:
832 .space 4
833 .previous
9994a338
PM
834
835/* aargh, a nonrecoverable interrupt, panic */
836/* aargh, we don't know which trap this is */
837/* but the 601 doesn't implement the RI bit, so assume it's OK */
8383:
9994a338 839 li r10,-1
d73e0c99 840 stw r10,_TRAP(r11)
9994a338
PM
841 addi r3,r1,STACK_FRAME_OVERHEAD
842 lis r10,MSR_KERNEL@h
843 ori r10,r10,MSR_KERNEL@l
844 bl transfer_to_handler_full
51423a9c 845 .long unrecoverable_exception
9994a338
PM
846 .long ret_from_except
847#endif
848
9994a338
PM
849 .globl ret_from_except_full
850ret_from_except_full:
851 REST_NVGPRS(r1)
852 /* fall through */
853
854 .globl ret_from_except
855ret_from_except:
856 /* Hard-disable interrupts so that current_thread_info()->flags
857 * can't change between when we test it and when we return
858 * from the interrupt. */
5d38902c 859 /* Note: We don't bother telling lockdep about it */
ba18025f 860 LOAD_REG_IMMEDIATE(r10,MSR_KERNEL)
9994a338 861 SYNC /* Some chip revs have problems here... */
39bccfd1 862 mtmsr r10 /* disable interrupts */
9994a338
PM
863
864 lwz r3,_MSR(r1) /* Returning to user mode? */
865 andi. r0,r3,MSR_PR
866 beq resume_kernel
867
868user_exc_return: /* r10 contains MSR_KERNEL here */
869 /* Check current_thread_info()->flags */
f7354cca 870 lwz r9,TI_FLAGS(r2)
7a10174e 871 andi. r0,r9,_TIF_USER_WORK_MASK
9994a338
PM
872 bne do_work
873
874restore_user:
875#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
4eaddb4d
KG
876 /* Check whether this process has its own DBCR0 value. The internal
877 debug mode bit tells us that dbcr0 should be loaded. */
9994a338 878 lwz r0,THREAD+THREAD_DBCR0(r2)
2325f0a0 879 andis. r10,r0,DBCR0_IDM@h
9994a338
PM
880 bnel- load_dbcr0
881#endif
f7354cca 882 ACCOUNT_CPU_USER_EXIT(r2, r10, r11)
31ed2b13
CL
883#ifdef CONFIG_PPC_BOOK3S_32
884 kuep_unlock r10, r11
885#endif
9994a338 886
9994a338
PM
887 b restore
888
889/* N.B. the only way to get here is from the beq following ret_from_except. */
890resume_kernel:
a9c4e541 891 /* check current_thread_info, _TIF_EMULATE_STACK_STORE */
f7354cca 892 lwz r8,TI_FLAGS(r2)
f7b33677 893 andis. r0,r8,_TIF_EMULATE_STACK_STORE@h
a9c4e541
TC
894 beq+ 1f
895
896 addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
897
898 lwz r3,GPR1(r1)
899 subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */
900 mr r4,r1 /* src: current exception frame */
901 mr r1,r3 /* Reroute the trampoline frame to r1 */
902
903 /* Copy from the original to the trampoline. */
904 li r5,INT_FRAME_SIZE/4 /* size: INT_FRAME_SIZE */
905 li r6,0 /* start offset: 0 */
906 mtctr r5
9072: lwzx r0,r6,r4
908 stwx r0,r6,r3
909 addi r6,r6,4
910 bdnz 2b
911
912 /* Do real store operation to complete stwu */
913 lwz r5,GPR1(r1)
914 stw r8,0(r5)
915
916 /* Clear _TIF_EMULATE_STACK_STORE flag */
917 lis r11,_TIF_EMULATE_STACK_STORE@h
f7354cca 918 addi r5,r2,TI_FLAGS
a9c4e541
TC
9190: lwarx r8,0,r5
920 andc r8,r8,r11
921#ifdef CONFIG_IBM405_ERR77
922 dcbt 0,r5
923#endif
924 stwcx. r8,0,r5
925 bne- 0b
9261:
927
fdc5569e 928#ifdef CONFIG_PREEMPTION
a9c4e541 929 /* check current_thread_info->preempt_count */
f7354cca 930 lwz r0,TI_PREEMPT(r2)
9994a338 931 cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
e2fb9f54 932 bne restore_kuap
a9c4e541 933 andi. r8,r8,_TIF_NEED_RESCHED
e2fb9f54 934 beq+ restore_kuap
a9c4e541 935 lwz r3,_MSR(r1)
9994a338 936 andi. r0,r3,MSR_EE /* interrupts off? */
e2fb9f54 937 beq restore_kuap /* don't schedule if so */
5d38902c
BH
938#ifdef CONFIG_TRACE_IRQFLAGS
939 /* Lockdep thinks irqs are enabled, we need to call
940 * preempt_schedule_irq with IRQs off, so we inform lockdep
941 * now that we -did- turn them off already
942 */
943 bl trace_hardirqs_off
944#endif
90437bff 945 bl preempt_schedule_irq
5d38902c
BH
946#ifdef CONFIG_TRACE_IRQFLAGS
947 /* And now, to properly rebalance the above, we tell lockdep they
948 * are being turned back on, which will happen when we return
949 */
950 bl trace_hardirqs_on
951#endif
fdc5569e 952#endif /* CONFIG_PREEMPTION */
e2fb9f54
CL
953restore_kuap:
954 kuap_restore r1, r2, r9, r10, r0
9994a338
PM
955
956 /* interrupts are hard-disabled at this point */
957restore:
b98ac05d 958#ifdef CONFIG_44x
e7f75ad0
DK
959BEGIN_MMU_FTR_SECTION
960 b 1f
961END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
b98ac05d
BH
962 lis r4,icache_44x_need_flush@ha
963 lwz r5,icache_44x_need_flush@l(r4)
964 cmplwi cr0,r5,0
965 beq+ 1f
966 li r6,0
967 iccci r0,r0
968 stw r6,icache_44x_need_flush@l(r4)
9691:
970#endif /* CONFIG_44x */
5d38902c
BH
971
972 lwz r9,_MSR(r1)
973#ifdef CONFIG_TRACE_IRQFLAGS
974 /* Lockdep doesn't know about the fact that IRQs are temporarily turned
975 * off in this assembly code while peeking at TI_FLAGS() and such. However
976 * we need to inform it if the exception turned interrupts off, and we
977 * are about to trun them back on.
5d38902c
BH
978 */
979 andi. r10,r9,MSR_EE
980 beq 1f
06ca2188
SR
981 stwu r1,-32(r1)
982 mflr r0
983 stw r0,4(r1)
5d38902c 984 bl trace_hardirqs_on
d1865e71 985 addi r1, r1, 32
5d38902c
BH
986 lwz r9,_MSR(r1)
9871:
988#endif /* CONFIG_TRACE_IRQFLAGS */
989
9994a338
PM
990 lwz r0,GPR0(r1)
991 lwz r2,GPR2(r1)
992 REST_4GPRS(3, r1)
993 REST_2GPRS(7, r1)
994
995 lwz r10,_XER(r1)
996 lwz r11,_CTR(r1)
997 mtspr SPRN_XER,r10
998 mtctr r11
999
1000 PPC405_ERR77(0,r1)
b64f87c1
BB
1001BEGIN_FTR_SECTION
1002 lwarx r11,0,r1
1003END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
9994a338
PM
1004 stwcx. r0,0,r1 /* to clear the reservation */
1005
1006#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
9994a338
PM
1007 andi. r10,r9,MSR_RI /* check if this exception occurred */
1008 beql nonrecoverable /* at a bad place (MSR:RI = 0) */
1009
1010 lwz r10,_CCR(r1)
1011 lwz r11,_LINK(r1)
1012 mtcrf 0xFF,r10
1013 mtlr r11
1014
9580b71b
CL
1015 /* Clear the exception_marker on the stack to avoid confusing stacktrace */
1016 li r10, 0
1017 stw r10, 8(r1)
9994a338
PM
1018 /*
1019 * Once we put values in SRR0 and SRR1, we are in a state
1020 * where exceptions are not recoverable, since taking an
1021 * exception will trash SRR0 and SRR1. Therefore we clear the
1022 * MSR:RI bit to indicate this. If we do take an exception,
1023 * we can't return to the point of the exception but we
1024 * can restart the exception exit path at the label
1025 * exc_exit_restart below. -- paulus
1026 */
ba18025f 1027 LOAD_REG_IMMEDIATE(r10,MSR_KERNEL & ~MSR_RI)
9994a338 1028 SYNC
39bccfd1 1029 mtmsr r10 /* clear the RI bit */
9994a338
PM
1030 .globl exc_exit_restart
1031exc_exit_restart:
9994a338 1032 lwz r12,_NIP(r1)
9994a338
PM
1033 mtspr SPRN_SRR0,r12
1034 mtspr SPRN_SRR1,r9
1035 REST_4GPRS(9, r1)
1036 lwz r1,GPR1(r1)
1037 .globl exc_exit_restart_end
1038exc_exit_restart_end:
1039 SYNC
1040 RFI
1041
1042#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
1043 /*
1044 * This is a bit different on 4xx/Book-E because it doesn't have
1045 * the RI bit in the MSR.
1046 * The TLB miss handler checks if we have interrupted
1047 * the exception exit path and restarts it if so
1048 * (well maybe one day it will... :).
1049 */
1050 lwz r11,_LINK(r1)
1051 mtlr r11
1052 lwz r10,_CCR(r1)
1053 mtcrf 0xff,r10
9580b71b
CL
1054 /* Clear the exception_marker on the stack to avoid confusing stacktrace */
1055 li r10, 0
1056 stw r10, 8(r1)
9994a338
PM
1057 REST_2GPRS(9, r1)
1058 .globl exc_exit_restart
1059exc_exit_restart:
1060 lwz r11,_NIP(r1)
1061 lwz r12,_MSR(r1)
1062exc_exit_start:
1063 mtspr SPRN_SRR0,r11
1064 mtspr SPRN_SRR1,r12
1065 REST_2GPRS(11, r1)
1066 lwz r1,GPR1(r1)
1067 .globl exc_exit_restart_end
1068exc_exit_restart_end:
1069 PPC405_ERR77_SYNC
1070 rfi
1071 b . /* prevent prefetch past rfi */
1072
1073/*
1074 * Returning from a critical interrupt in user mode doesn't need
1075 * to be any different from a normal exception. For a critical
1076 * interrupt in the kernel, we just return (without checking for
1077 * preemption) since the interrupt may have happened at some crucial
1078 * place (e.g. inside the TLB miss handler), and because we will be
1079 * running with r1 pointing into critical_stack, not the current
1080 * process's kernel stack (and therefore current_thread_info() will
1081 * give the wrong answer).
1082 * We have to restore various SPRs that may have been in use at the
1083 * time of the critical interrupt.
1084 *
1085 */
1086#ifdef CONFIG_40x
1087#define PPC_40x_TURN_OFF_MSR_DR \
1088 /* avoid any possible TLB misses here by turning off MSR.DR, we \
1089 * assume the instructions here are mapped by a pinned TLB entry */ \
1090 li r10,MSR_IR; \
1091 mtmsr r10; \
1092 isync; \
1093 tophys(r1, r1);
1094#else
1095#define PPC_40x_TURN_OFF_MSR_DR
1096#endif
1097
1098#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi) \
1099 REST_NVGPRS(r1); \
1100 lwz r3,_MSR(r1); \
1101 andi. r3,r3,MSR_PR; \
ba18025f 1102 LOAD_REG_IMMEDIATE(r10,MSR_KERNEL); \
9994a338
PM
1103 bne user_exc_return; \
1104 lwz r0,GPR0(r1); \
1105 lwz r2,GPR2(r1); \
1106 REST_4GPRS(3, r1); \
1107 REST_2GPRS(7, r1); \
1108 lwz r10,_XER(r1); \
1109 lwz r11,_CTR(r1); \
1110 mtspr SPRN_XER,r10; \
1111 mtctr r11; \
1112 PPC405_ERR77(0,r1); \
1113 stwcx. r0,0,r1; /* to clear the reservation */ \
1114 lwz r11,_LINK(r1); \
1115 mtlr r11; \
1116 lwz r10,_CCR(r1); \
1117 mtcrf 0xff,r10; \
1118 PPC_40x_TURN_OFF_MSR_DR; \
1119 lwz r9,_DEAR(r1); \
1120 lwz r10,_ESR(r1); \
1121 mtspr SPRN_DEAR,r9; \
1122 mtspr SPRN_ESR,r10; \
1123 lwz r11,_NIP(r1); \
1124 lwz r12,_MSR(r1); \
1125 mtspr exc_lvl_srr0,r11; \
1126 mtspr exc_lvl_srr1,r12; \
1127 lwz r9,GPR9(r1); \
1128 lwz r12,GPR12(r1); \
1129 lwz r10,GPR10(r1); \
1130 lwz r11,GPR11(r1); \
1131 lwz r1,GPR1(r1); \
1132 PPC405_ERR77_SYNC; \
1133 exc_lvl_rfi; \
1134 b .; /* prevent prefetch past exc_lvl_rfi */
1135
fca622c5
KG
1136#define RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1) \
1137 lwz r9,_##exc_lvl_srr0(r1); \
1138 lwz r10,_##exc_lvl_srr1(r1); \
1139 mtspr SPRN_##exc_lvl_srr0,r9; \
1140 mtspr SPRN_##exc_lvl_srr1,r10;
1141
70fe3af8 1142#if defined(CONFIG_PPC_BOOK3E_MMU)
fca622c5
KG
1143#ifdef CONFIG_PHYS_64BIT
1144#define RESTORE_MAS7 \
1145 lwz r11,MAS7(r1); \
1146 mtspr SPRN_MAS7,r11;
1147#else
1148#define RESTORE_MAS7
1149#endif /* CONFIG_PHYS_64BIT */
1150#define RESTORE_MMU_REGS \
1151 lwz r9,MAS0(r1); \
1152 lwz r10,MAS1(r1); \
1153 lwz r11,MAS2(r1); \
1154 mtspr SPRN_MAS0,r9; \
1155 lwz r9,MAS3(r1); \
1156 mtspr SPRN_MAS1,r10; \
1157 lwz r10,MAS6(r1); \
1158 mtspr SPRN_MAS2,r11; \
1159 mtspr SPRN_MAS3,r9; \
1160 mtspr SPRN_MAS6,r10; \
1161 RESTORE_MAS7;
1162#elif defined(CONFIG_44x)
1163#define RESTORE_MMU_REGS \
1164 lwz r9,MMUCR(r1); \
1165 mtspr SPRN_MMUCR,r9;
1166#else
1167#define RESTORE_MMU_REGS
1168#endif
1169
1170#ifdef CONFIG_40x
9994a338
PM
1171 .globl ret_from_crit_exc
1172ret_from_crit_exc:
ee43eb78 1173 mfspr r9,SPRN_SPRG_THREAD
fca622c5
KG
1174 lis r10,saved_ksp_limit@ha;
1175 lwz r10,saved_ksp_limit@l(r10);
1176 tovirt(r9,r9);
1177 stw r10,KSP_LIMIT(r9)
1178 lis r9,crit_srr0@ha;
1179 lwz r9,crit_srr0@l(r9);
1180 lis r10,crit_srr1@ha;
1181 lwz r10,crit_srr1@l(r10);
1182 mtspr SPRN_SRR0,r9;
1183 mtspr SPRN_SRR1,r10;
16c57b36 1184 RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
fca622c5 1185#endif /* CONFIG_40x */
9994a338
PM
1186
1187#ifdef CONFIG_BOOKE
fca622c5
KG
1188 .globl ret_from_crit_exc
1189ret_from_crit_exc:
ee43eb78 1190 mfspr r9,SPRN_SPRG_THREAD
fca622c5
KG
1191 lwz r10,SAVED_KSP_LIMIT(r1)
1192 stw r10,KSP_LIMIT(r9)
1193 RESTORE_xSRR(SRR0,SRR1);
1194 RESTORE_MMU_REGS;
16c57b36 1195 RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
fca622c5 1196
9994a338
PM
1197 .globl ret_from_debug_exc
1198ret_from_debug_exc:
ee43eb78 1199 mfspr r9,SPRN_SPRG_THREAD
fca622c5
KG
1200 lwz r10,SAVED_KSP_LIMIT(r1)
1201 stw r10,KSP_LIMIT(r9)
fca622c5
KG
1202 RESTORE_xSRR(SRR0,SRR1);
1203 RESTORE_xSRR(CSRR0,CSRR1);
1204 RESTORE_MMU_REGS;
16c57b36 1205 RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI)
9994a338
PM
1206
1207 .globl ret_from_mcheck_exc
1208ret_from_mcheck_exc:
ee43eb78 1209 mfspr r9,SPRN_SPRG_THREAD
fca622c5
KG
1210 lwz r10,SAVED_KSP_LIMIT(r1)
1211 stw r10,KSP_LIMIT(r9)
1212 RESTORE_xSRR(SRR0,SRR1);
1213 RESTORE_xSRR(CSRR0,CSRR1);
1214 RESTORE_xSRR(DSRR0,DSRR1);
1215 RESTORE_MMU_REGS;
16c57b36 1216 RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI)
9994a338
PM
1217#endif /* CONFIG_BOOKE */
1218
1219/*
1220 * Load the DBCR0 value for a task that is being ptraced,
1221 * having first saved away the global DBCR0. Note that r0
1222 * has the dbcr0 value to set upon entry to this.
1223 */
1224load_dbcr0:
1225 mfmsr r10 /* first disable debug exceptions */
1226 rlwinm r10,r10,0,~MSR_DE
1227 mtmsr r10
1228 isync
1229 mfspr r10,SPRN_DBCR0
1230 lis r11,global_dbcr0@ha
1231 addi r11,r11,global_dbcr0@l
4eaddb4d 1232#ifdef CONFIG_SMP
f7354cca 1233 lwz r9,TASK_CPU(r2)
4eaddb4d
KG
1234 slwi r9,r9,3
1235 add r11,r11,r9
1236#endif
9994a338
PM
1237 stw r10,0(r11)
1238 mtspr SPRN_DBCR0,r0
1239 lwz r10,4(r11)
1240 addi r10,r10,1
1241 stw r10,4(r11)
1242 li r11,-1
1243 mtspr SPRN_DBSR,r11 /* clear all pending debug events */
1244 blr
1245
991eb43a
KG
1246 .section .bss
1247 .align 4
b86fb888 1248 .global global_dbcr0
991eb43a 1249global_dbcr0:
4eaddb4d 1250 .space 8*NR_CPUS
991eb43a 1251 .previous
9994a338
PM
1252#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
1253
1254do_work: /* r10 contains MSR_KERNEL here */
1255 andi. r0,r9,_TIF_NEED_RESCHED
1256 beq do_user_signal
1257
1258do_resched: /* r10 contains MSR_KERNEL here */
40530db7
CL
1259#ifdef CONFIG_TRACE_IRQFLAGS
1260 bl trace_hardirqs_on
1261 mfmsr r10
1262#endif
9994a338
PM
1263 ori r10,r10,MSR_EE
1264 SYNC
39bccfd1 1265 mtmsr r10 /* hard-enable interrupts */
9994a338
PM
1266 bl schedule
1267recheck:
5d38902c
BH
1268 /* Note: And we don't tell it we are disabling them again
1269 * neither. Those disable/enable cycles used to peek at
1270 * TI_FLAGS aren't advertised.
1271 */
ba18025f 1272 LOAD_REG_IMMEDIATE(r10,MSR_KERNEL)
9994a338 1273 SYNC
39bccfd1 1274 mtmsr r10 /* disable interrupts */
f7354cca 1275 lwz r9,TI_FLAGS(r2)
9994a338
PM
1276 andi. r0,r9,_TIF_NEED_RESCHED
1277 bne- do_resched
7a10174e 1278 andi. r0,r9,_TIF_USER_WORK_MASK
9994a338
PM
1279 beq restore_user
1280do_user_signal: /* r10 contains MSR_KERNEL here */
1281 ori r10,r10,MSR_EE
1282 SYNC
39bccfd1 1283 mtmsr r10 /* hard-enable interrupts */
9994a338 1284 /* save r13-r31 in the exception frame, if not already done */
d73e0c99 1285 lwz r3,_TRAP(r1)
9994a338
PM
1286 andi. r0,r3,1
1287 beq 2f
1288 SAVE_NVGPRS(r1)
1289 rlwinm r3,r3,0,0,30
d73e0c99 1290 stw r3,_TRAP(r1)
7d6d637d
RM
12912: addi r3,r1,STACK_FRAME_OVERHEAD
1292 mr r4,r9
18b246fa 1293 bl do_notify_resume
9994a338
PM
1294 REST_NVGPRS(r1)
1295 b recheck
1296
1297/*
1298 * We come here when we are at the end of handling an exception
1299 * that occurred at a place where taking an exception will lose
1300 * state information, such as the contents of SRR0 and SRR1.
1301 */
1302nonrecoverable:
1303 lis r10,exc_exit_restart_end@ha
1304 addi r10,r10,exc_exit_restart_end@l
1305 cmplw r12,r10
12c3f1fd
CL
1306#ifdef CONFIG_PPC_BOOK3S_601
1307 bgelr
1308#else
9994a338 1309 bge 3f
12c3f1fd 1310#endif
9994a338
PM
1311 lis r11,exc_exit_restart@ha
1312 addi r11,r11,exc_exit_restart@l
1313 cmplw r12,r11
12c3f1fd
CL
1314#ifdef CONFIG_PPC_BOOK3S_601
1315 bltlr
1316#else
9994a338 1317 blt 3f
12c3f1fd 1318#endif
9994a338
PM
1319 lis r10,ee_restarts@ha
1320 lwz r12,ee_restarts@l(r10)
1321 addi r12,r12,1
1322 stw r12,ee_restarts@l(r10)
1323 mr r12,r11 /* restart at exc_exit_restart */
1324 blr
13253: /* OK, we can't recover, kill this process */
1326 /* but the 601 doesn't implement the RI bit, so assume it's OK */
d73e0c99 1327 lwz r3,_TRAP(r1)
9994a338 1328 andi. r0,r3,1
ed1cd6de 1329 beq 5f
9994a338
PM
1330 SAVE_NVGPRS(r1)
1331 rlwinm r3,r3,0,0,30
d73e0c99 1332 stw r3,_TRAP(r1)
ed1cd6de
CL
13335: mfspr r2,SPRN_SPRG_THREAD
1334 addi r2,r2,-THREAD
1335 tovirt(r2,r2) /* set back r2 to current */
9994a338 13364: addi r3,r1,STACK_FRAME_OVERHEAD
51423a9c 1337 bl unrecoverable_exception
9994a338
PM
1338 /* shouldn't return */
1339 b 4b
1340
991eb43a
KG
1341 .section .bss
1342 .align 2
1343ee_restarts:
1344 .space 4
1345 .previous
9994a338
PM
1346
1347/*
1348 * PROM code for specific machines follows. Put it
1349 * here so it's easy to add arch-specific sections later.
1350 * -- Cort
1351 */
033ef338 1352#ifdef CONFIG_PPC_RTAS
9994a338
PM
1353/*
1354 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
1355 * called with the MMU off.
1356 */
1357_GLOBAL(enter_rtas)
1358 stwu r1,-INT_FRAME_SIZE(r1)
1359 mflr r0
1360 stw r0,INT_FRAME_SIZE+4(r1)
e58c3495 1361 LOAD_REG_ADDR(r4, rtas)
9994a338
PM
1362 lis r6,1f@ha /* physical return address for rtas */
1363 addi r6,r6,1f@l
1364 tophys(r6,r6)
cd08f109 1365 tophys_novmstack r7, r1
033ef338
PM
1366 lwz r8,RTASENTRY(r4)
1367 lwz r4,RTASBASE(r4)
9994a338
PM
1368 mfmsr r9
1369 stw r9,8(r1)
ba18025f 1370 LOAD_REG_IMMEDIATE(r0,MSR_KERNEL)
9994a338 1371 SYNC /* disable interrupts so SRR0/1 */
39bccfd1 1372 mtmsr r0 /* don't get trashed */
9994a338
PM
1373 li r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1374 mtlr r6
0df977ea 1375 stw r7, THREAD + RTAS_SP(r2)
9994a338
PM
1376 mtspr SPRN_SRR0,r8
1377 mtspr SPRN_SRR1,r9
1378 RFI
5a528eb6
CL
13791: tophys_novmstack r9, r1
1380#ifdef CONFIG_VMAP_STACK
1381 li r0, MSR_KERNEL & ~MSR_IR /* can take DTLB miss */
1382 mtmsr r0
1383 isync
1384#endif
9994a338
PM
1385 lwz r8,INT_FRAME_SIZE+4(r9) /* get return address */
1386 lwz r9,8(r9) /* original msr value */
9994a338
PM
1387 addi r1,r1,INT_FRAME_SIZE
1388 li r0,0
5a528eb6 1389 tophys_novmstack r7, r2
0df977ea 1390 stw r0, THREAD + RTAS_SP(r7)
9994a338
PM
1391 mtspr SPRN_SRR0,r8
1392 mtspr SPRN_SRR1,r9
1393 RFI /* return to caller */
1394
1395 .globl machine_check_in_rtas
1396machine_check_in_rtas:
1397 twi 31,0,0
1398 /* XXX load up BATs and panic */
1399
033ef338 1400#endif /* CONFIG_PPC_RTAS */