]> git.ipfire.org Git - thirdparty/linux.git/blame - arch/powerpc/kernel/entry_64.S
Merge tag 'io_uring-5.7-2020-05-22' of git://git.kernel.dk/linux-block
[thirdparty/linux.git] / arch / powerpc / kernel / entry_64.S
CommitLineData
2874c5fd 1/* SPDX-License-Identifier: GPL-2.0-or-later */
9994a338 2/*
9994a338
PM
3 * PowerPC version
4 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
6 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
7 * Adapted for Power Macintosh by Paul Mackerras.
8 * Low-level exception handlers and MMU support
9 * rewritten by Paul Mackerras.
10 * Copyright (C) 1996 Paul Mackerras.
11 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
12 *
13 * This file contains the system call entry code, context switch
14 * code, and exception/interrupt return code for PowerPC.
9994a338
PM
15 */
16
9994a338 17#include <linux/errno.h>
c3525940 18#include <linux/err.h>
6cc0c16d 19#include <asm/cache.h>
9994a338
PM
20#include <asm/unistd.h>
21#include <asm/processor.h>
22#include <asm/page.h>
23#include <asm/mmu.h>
24#include <asm/thread_info.h>
ee13cb24 25#include <asm/code-patching-asm.h>
9994a338
PM
26#include <asm/ppc_asm.h>
27#include <asm/asm-offsets.h>
28#include <asm/cputable.h>
3f639ee8 29#include <asm/firmware.h>
007d88d0 30#include <asm/bug.h>
ec2b36b9 31#include <asm/ptrace.h>
945feb17 32#include <asm/irqflags.h>
7230c564 33#include <asm/hw_irq.h>
5d1c5745 34#include <asm/context_tracking.h>
b4b56f9e 35#include <asm/tm.h>
8a649045 36#include <asm/ppc-opcode.h>
51973a81 37#include <asm/barrier.h>
9445aa1a 38#include <asm/export.h>
ec0c464c 39#include <asm/asm-compat.h>
222f20f1
NP
40#ifdef CONFIG_PPC_BOOK3S
41#include <asm/exception-64s.h>
42#else
43#include <asm/exception-64e.h>
44#endif
2c86cd18 45#include <asm/feature-fixups.h>
890274c2 46#include <asm/kup.h>
9994a338
PM
47
48/*
49 * System calls.
50 */
51 .section ".toc","aw"
c857c43b
AB
52SYS_CALL_TABLE:
53 .tc sys_call_table[TC],sys_call_table
9994a338 54
0a7601b6 55#ifdef CONFIG_COMPAT
fbf508da
FK
56COMPAT_SYS_CALL_TABLE:
57 .tc compat_sys_call_table[TC],compat_sys_call_table
0a7601b6 58#endif
fbf508da 59
9994a338
PM
60/* This value is used to mark exception frames on the stack. */
61exception_marker:
ec2b36b9 62 .tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER
9994a338
PM
63
64 .section ".text"
65 .align 7
66
9994a338
PM
67 .globl system_call_common
68system_call_common:
b4b56f9e
S
69#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
70BEGIN_FTR_SECTION
71 extrdi. r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
cf7d6fb0 72 bne .Ltabort_syscall
b4b56f9e
S
73END_FTR_SECTION_IFSET(CPU_FTR_TM)
74#endif
68b34588 75_ASM_NOKPROBE_SYMBOL(system_call_common)
9994a338 76 mr r10,r1
9994a338 77 ld r1,PACAKSAVE(r13)
555e2817 78 std r10,0(r1)
9994a338
PM
79 std r11,_NIP(r1)
80 std r12,_MSR(r1)
81 std r0,GPR0(r1)
82 std r10,GPR1(r1)
68b34588 83 std r2,GPR2(r1)
10c5e83a
DC
84#ifdef CONFIG_PPC_FSL_BOOK3E
85START_BTB_FLUSH_SECTION
86 BTB_FLUSH(r10)
87END_BTB_FLUSH_SECTION
88#endif
68b34588
NP
89 ld r2,PACATOC(r13)
90 mfcr r12
91 li r11,0
92 /* Can we avoid saving r3-r8 in common case? */
9994a338
PM
93 std r3,GPR3(r1)
94 std r4,GPR4(r1)
95 std r5,GPR5(r1)
96 std r6,GPR6(r1)
97 std r7,GPR7(r1)
98 std r8,GPR8(r1)
68b34588 99 /* Zero r9-r12, this should only be required when restoring all GPRs */
9994a338
PM
100 std r11,GPR9(r1)
101 std r11,GPR10(r1)
102 std r11,GPR11(r1)
103 std r11,GPR12(r1)
104 std r9,GPR13(r1)
965dd3ad 105 SAVE_NVGPRS(r1)
68b34588
NP
106 std r11,_XER(r1)
107 std r11,_CTR(r1)
9994a338 108 mflr r10
68b34588 109
fd6c40f3
AB
110 /*
111 * This clears CR0.SO (bit 28), which is the error indication on
112 * return from this system call.
113 */
68b34588 114 rldimi r12,r11,28,(63-28)
965dd3ad 115 li r11,0xc00
9994a338
PM
116 std r10,_LINK(r1)
117 std r11,_TRAP(r1)
68b34588 118 std r12,_CCR(r1)
9994a338 119 std r3,ORIG_GPR3(r1)
68b34588 120 addi r10,r1,STACK_FRAME_OVERHEAD
9994a338 121 ld r11,exception_marker@toc(r2)
68b34588 122 std r11,-16(r10) /* "regshere" marker */
51973a81 123
5f0b6ac3
NP
124 /*
125 * RECONCILE_IRQ_STATE without calling trace_hardirqs_off(), which
126 * would clobber syscall parameters. Also we always enter with IRQs
127 * enabled and nothing pending. system_call_exception() will call
128 * trace_hardirqs_off().
129 */
130 li r11,IRQS_ALL_DISABLED
131 li r12,PACA_IRQ_HARD_DIS
132 stb r11,PACAIRQSOFTMASK(r13)
133 stb r12,PACAIRQHAPPENED(r13)
134
68b34588
NP
135 /* Calling convention has r9 = orig r0, r10 = regs */
136 mr r9,r0
137 bl system_call_exception
9994a338 138
4c3b2168 139.Lsyscall_exit:
68b34588
NP
140 addi r4,r1,STACK_FRAME_OVERHEAD
141 bl syscall_exit_prepare
2d27cfd3 142
68b34588
NP
143 ld r2,_CCR(r1)
144 ld r4,_NIP(r1)
145 ld r5,_MSR(r1)
146 ld r6,_LINK(r1)
70fe3d98 147
f89451fb 148BEGIN_FTR_SECTION
9994a338 149 stdcx. r0,0,r1 /* to clear the reservation */
f89451fb 150END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
890274c2 151
68b34588
NP
152 mtspr SPRN_SRR0,r4
153 mtspr SPRN_SRR1,r5
154 mtlr r6
e7fda7e5 155
68b34588
NP
156 cmpdi r3,0
157 bne .Lsyscall_restore_regs
993c670a
NP
158 /* Zero volatile regs that may contain sensitive kernel data */
159 li r0,0
160 li r4,0
161 li r5,0
162 li r6,0
163 li r7,0
164 li r8,0
165 li r9,0
166 li r10,0
167 li r11,0
168 li r12,0
169 mtctr r0
170 mtspr SPRN_XER,r0
68b34588 171.Lsyscall_restore_regs_cont:
d030a4b5
ME
172
173BEGIN_FTR_SECTION
174 HMT_MEDIUM_LOW
175END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
176
890274c2
ME
177 /*
178 * We don't need to restore AMR on the way back to userspace for KUAP.
179 * The value of AMR only matters while we're in the kernel.
180 */
68b34588 181 mtcr r2
b8e90cb7 182 ld r2,GPR2(r1)
68b34588
NP
183 ld r3,GPR3(r1)
184 ld r13,GPR13(r1)
b8e90cb7 185 ld r1,GPR1(r1)
b8e90cb7
NP
186 RFI_TO_USER
187 b . /* prevent speculative execution */
188
68b34588
NP
189.Lsyscall_restore_regs:
190 ld r3,_CTR(r1)
191 ld r4,_XER(r1)
1bd79336 192 REST_NVGPRS(r1)
68b34588
NP
193 mtctr r3
194 mtspr SPRN_XER,r4
195 ld r0,GPR0(r1)
196 REST_8GPRS(4, r1)
197 ld r12,GPR12(r1)
198 b .Lsyscall_restore_regs_cont
9994a338 199
b4b56f9e 200#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
cf7d6fb0 201.Ltabort_syscall:
b4b56f9e
S
202 /* Firstly we need to enable TM in the kernel */
203 mfmsr r10
cc7786d3
NP
204 li r9, 1
205 rldimi r10, r9, MSR_TM_LG, 63-MSR_TM_LG
b4b56f9e
S
206 mtmsrd r10, 0
207
208 /* tabort, this dooms the transaction, nothing else */
cc7786d3
NP
209 li r9, (TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT)
210 TABORT(R9)
b4b56f9e
S
211
212 /*
213 * Return directly to userspace. We have corrupted user register state,
214 * but userspace will never see that register state. Execution will
215 * resume after the tbegin of the aborted transaction with the
216 * checkpointed register state.
217 */
cc7786d3
NP
218 li r9, MSR_RI
219 andc r10, r10, r9
b4b56f9e
S
220 mtmsrd r10, 1
221 mtspr SPRN_SRR0, r11
222 mtspr SPRN_SRR1, r12
222f20f1 223 RFI_TO_USER
b4b56f9e
S
224 b . /* prevent speculative execution */
225#endif
226
9994a338 227_GLOBAL(ret_from_fork)
b1576fec 228 bl schedule_tail
9994a338
PM
229 REST_NVGPRS(r1)
230 li r3,0
4c3b2168 231 b .Lsyscall_exit
9994a338 232
58254e10 233_GLOBAL(ret_from_kernel_thread)
b1576fec 234 bl schedule_tail
58254e10 235 REST_NVGPRS(r1)
58254e10
AV
236 mtlr r14
237 mr r3,r15
f55d9665 238#ifdef PPC64_ELF_ABI_v2
7cedd601
AB
239 mr r12,r14
240#endif
58254e10
AV
241 blrl
242 li r3,0
4c3b2168 243 b .Lsyscall_exit
be6abfa7 244
6cc0c16d 245#ifdef CONFIG_PPC_BOOK3E
965dd3ad
NP
246/* Save non-volatile GPRs, if not already saved. */
247_GLOBAL(save_nvgprs)
248 ld r11,_TRAP(r1)
249 andi. r0,r11,1
250 beqlr-
251 SAVE_NVGPRS(r1)
252 clrrdi r0,r11,1
253 std r0,_TRAP(r1)
254 blr
255_ASM_NOKPROBE_SYMBOL(save_nvgprs);
6cc0c16d 256#endif
965dd3ad 257
ee13cb24
ME
258#ifdef CONFIG_PPC_BOOK3S_64
259
260#define FLUSH_COUNT_CACHE \
2611: nop; \
262 patch_site 1b, patch__call_flush_count_cache
263
264
265#define BCCTR_FLUSH .long 0x4c400420
266
267.macro nops number
268 .rept \number
269 nop
270 .endr
271.endm
272
273.balign 32
274.global flush_count_cache
275flush_count_cache:
276 /* Save LR into r9 */
277 mflr r9
278
39e72bf9 279 // Flush the link stack
ee13cb24
ME
280 .rept 64
281 bl .+4
282 .endr
283 b 1f
284 nops 6
285
286 .balign 32
287 /* Restore LR */
2881: mtlr r9
39e72bf9
ME
289
290 // If we're just flushing the link stack, return here
2913: nop
292 patch_site 3b patch__flush_link_stack_return
293
ee13cb24
ME
294 li r9,0x7fff
295 mtctr r9
296
297 BCCTR_FLUSH
298
2992: nop
300 patch_site 2b patch__flush_count_cache_return
301
302 nops 3
303
304 .rept 278
305 .balign 32
306 BCCTR_FLUSH
307 nops 7
308 .endr
309
310 blr
311#else
312#define FLUSH_COUNT_CACHE
313#endif /* CONFIG_PPC_BOOK3S_64 */
314
9994a338
PM
315/*
316 * This routine switches between two different tasks. The process
317 * state of one is saved on its kernel stack. Then the state
318 * of the other is restored from its kernel stack. The memory
319 * management hardware is updated to the second process's state.
6cc0c16d 320 * Finally, we can return to the second process, via interrupt_return.
9994a338
PM
321 * On entry, r3 points to the THREAD for the current task, r4
322 * points to the THREAD for the new task.
323 *
324 * Note: there are two ways to get to the "going out" portion
325 * of this code; either by coming in via the entry (_switch)
326 * or via "fork" which must set up an environment equivalent
327 * to the "_switch" path. If you change this you'll have to change
328 * the fork code also.
329 *
330 * The code which creates the new task context is in 'copy_thread'
2ef9481e 331 * in arch/powerpc/kernel/process.c
9994a338
PM
332 */
333 .align 7
334_GLOBAL(_switch)
335 mflr r0
336 std r0,16(r1)
337 stdu r1,-SWITCH_FRAME_SIZE(r1)
338 /* r3-r13 are caller saved -- Cort */
5290ae2b 339 SAVE_NVGPRS(r1)
68bfa962 340 std r0,_NIP(r1) /* Return to switch caller */
9994a338
PM
341 mfcr r23
342 std r23,_CCR(r1)
343 std r1,KSP(r3) /* Set old stack pointer */
344
890274c2
ME
345 kuap_check_amr r9, r10
346
ee13cb24
ME
347 FLUSH_COUNT_CACHE
348
9145effd
NP
349 /*
350 * On SMP kernels, care must be taken because a task may be
351 * scheduled off CPUx and on to CPUy. Memory ordering must be
352 * considered.
353 *
354 * Cacheable stores on CPUx will be visible when the task is
355 * scheduled on CPUy by virtue of the core scheduler barriers
356 * (see "Notes on Program-Order guarantees on SMP systems." in
357 * kernel/sched/core.c).
358 *
359 * Uncacheable stores in the case of involuntary preemption must
360 * be taken care of. The smp_mb__before_spin_lock() in __schedule()
361 * is implemented as hwsync on powerpc, which orders MMIO too. So
362 * long as there is an hwsync in the context switch path, it will
363 * be executed on the source CPU after the task has performed
364 * all MMIO ops on that CPU, and on the destination CPU before the
365 * task performs any MMIO ops there.
9994a338 366 */
9994a338 367
f89451fb 368 /*
837e72f7
NP
369 * The kernel context switch path must contain a spin_lock,
370 * which contains larx/stcx, which will clear any reservation
371 * of the task being switched.
f89451fb 372 */
a515348f
MN
373#ifdef CONFIG_PPC_BOOK3S
374/* Cancel all explict user streams as they will have no use after context
375 * switch and will stop the HW from creating streams itself
376 */
15a3204d 377 DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r6)
a515348f
MN
378#endif
379
9994a338
PM
380 addi r6,r4,-THREAD /* Convert THREAD to 'current' */
381 std r6,PACACURRENT(r13) /* Set new 'current' */
06ec27ae
CL
382#if defined(CONFIG_STACKPROTECTOR)
383 ld r6, TASK_CANARY(r6)
384 std r6, PACA_CANARY(r13)
385#endif
9994a338
PM
386
387 ld r8,KSP(r4) /* new stack pointer */
4e003747 388#ifdef CONFIG_PPC_BOOK3S_64
caca285e
AK
389BEGIN_MMU_FTR_SECTION
390 b 2f
5a25b6f5 391END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
1189be65 392BEGIN_FTR_SECTION
9994a338
PM
393 clrrdi r6,r8,28 /* get its ESID */
394 clrrdi r9,r1,28 /* get current sp ESID */
13b3d13b 395FTR_SECTION_ELSE
1189be65
PM
396 clrrdi r6,r8,40 /* get its 1T ESID */
397 clrrdi r9,r1,40 /* get current sp 1T ESID */
13b3d13b 398ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_1T_SEGMENT)
9994a338
PM
399 clrldi. r0,r6,2 /* is new ESID c00000000? */
400 cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */
401 cror eq,4*cr1+eq,eq
402 beq 2f /* if yes, don't slbie it */
403
404 /* Bolt in the new stack SLB entry */
405 ld r7,KSP_VSID(r4) /* Get new stack's VSID */
406 oris r0,r6,(SLB_ESID_V)@h
407 ori r0,r0,(SLB_NUM_BOLTED-1)@l
1189be65
PM
408BEGIN_FTR_SECTION
409 li r9,MMU_SEGSIZE_1T /* insert B field */
410 oris r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h
411 rldimi r7,r9,SLB_VSID_SSIZE_SHIFT,0
44ae3ab3 412END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
2f6093c8 413
00efee7d
MN
414 /* Update the last bolted SLB. No write barriers are needed
415 * here, provided we only update the current CPU's SLB shadow
416 * buffer.
417 */
2f6093c8 418 ld r9,PACA_SLBSHADOWPTR(r13)
11a27ad7 419 li r12,0
7ffcf8ec
AB
420 std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */
421 li r12,SLBSHADOW_STACKVSID
422 STDX_BE r7,r12,r9 /* Save VSID */
423 li r12,SLBSHADOW_STACKESID
424 STDX_BE r0,r12,r9 /* Save ESID */
2f6093c8 425
44ae3ab3 426 /* No need to check for MMU_FTR_NO_SLBIE_B here, since when
f66bce5e
OJ
427 * we have 1TB segments, the only CPUs known to have the errata
428 * only support less than 1TB of system memory and we'll never
429 * actually hit this code path.
430 */
431
91d06971 432 isync
9994a338 433 slbie r6
505ea82e 434BEGIN_FTR_SECTION
9994a338 435 slbie r6 /* Workaround POWER5 < DD2.1 issue */
505ea82e 436END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
9994a338
PM
437 slbmte r7,r0
438 isync
9994a338 4392:
4e003747 440#endif /* CONFIG_PPC_BOOK3S_64 */
2d27cfd3 441
7306e83c 442 clrrdi r7, r8, THREAD_SHIFT /* base of new stack */
9994a338
PM
443 /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
444 because we don't need to leave the 288-byte ABI gap at the
445 top of the kernel stack. */
446 addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
447
e4c0fc5f
NP
448 /*
449 * PMU interrupts in radix may come in here. They will use r1, not
450 * PACAKSAVE, so this stack switch will not cause a problem. They
451 * will store to the process stack, which may then be migrated to
452 * another CPU. However the rq lock release on this CPU paired with
453 * the rq lock acquire on the new CPU before the stack becomes
454 * active on the new CPU, will order those stores.
455 */
9994a338
PM
456 mr r1,r8 /* start using new stack pointer */
457 std r7,PACAKSAVE(r13)
458
71433285
AB
459 ld r6,_CCR(r1)
460 mtcrf 0xFF,r6
461
9994a338 462 /* r3-r13 are destroyed -- Cort */
5290ae2b 463 REST_NVGPRS(r1)
9994a338
PM
464
465 /* convert old thread to its task_struct for return value */
466 addi r3,r3,-THREAD
467 ld r7,_NIP(r1) /* Return to _switch caller in new task */
468 mtlr r7
469 addi r1,r1,SWITCH_FRAME_SIZE
470 blr
471
6cc0c16d 472#ifdef CONFIG_PPC_BOOK3S
9994a338 473 /*
6cc0c16d 474 * If MSR EE/RI was never enabled, IRQs not reconciled, NVGPRs not
c44dc632 475 * touched, no exit work created, then this can be used.
9994a338 476 */
6cc0c16d
NP
477 .balign IFETCH_ALIGN_BYTES
478 .globl fast_interrupt_return
479fast_interrupt_return:
480_ASM_NOKPROBE_SYMBOL(fast_interrupt_return)
c44dc632 481 kuap_check_amr r3, r4
6cc0c16d
NP
482 ld r4,_MSR(r1)
483 andi. r0,r4,MSR_PR
484 bne .Lfast_user_interrupt_return
c44dc632 485 kuap_restore_amr r3
6cc0c16d
NP
486 andi. r0,r4,MSR_RI
487 li r3,0 /* 0 return value, no EMULATE_STACK_STORE */
488 bne+ .Lfast_kernel_interrupt_return
489 addi r3,r1,STACK_FRAME_OVERHEAD
490 bl unrecoverable_exception
491 b . /* should not get here */
9994a338 492
6cc0c16d
NP
493 .balign IFETCH_ALIGN_BYTES
494 .globl interrupt_return
495interrupt_return:
496_ASM_NOKPROBE_SYMBOL(interrupt_return)
6cc0c16d
NP
497 ld r4,_MSR(r1)
498 andi. r0,r4,MSR_PR
499 beq .Lkernel_interrupt_return
c58ce2b1 500 addi r3,r1,STACK_FRAME_OVERHEAD
6cc0c16d
NP
501 bl interrupt_exit_user_prepare
502 cmpdi r3,0
503 bne- .Lrestore_nvgprs
c58ce2b1 504
6cc0c16d
NP
505.Lfast_user_interrupt_return:
506 ld r11,_NIP(r1)
507 ld r12,_MSR(r1)
508BEGIN_FTR_SECTION
509 ld r10,_PPR(r1)
510 mtspr SPRN_PPR,r10
511END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
512 mtspr SPRN_SRR0,r11
513 mtspr SPRN_SRR1,r12
a9c4e541 514
6cc0c16d
NP
515BEGIN_FTR_SECTION
516 stdcx. r0,0,r1 /* to clear the reservation */
517FTR_SECTION_ELSE
518 ldarx r0,0,r1
519ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
a9c4e541 520
6cc0c16d
NP
521 ld r3,_CCR(r1)
522 ld r4,_LINK(r1)
523 ld r5,_CTR(r1)
524 ld r6,_XER(r1)
525 li r0,0
a9c4e541 526
6cc0c16d
NP
527 REST_4GPRS(7, r1)
528 REST_2GPRS(11, r1)
529 REST_GPR(13, r1)
c58ce2b1 530
6cc0c16d
NP
531 mtcr r3
532 mtlr r4
533 mtctr r5
534 mtspr SPRN_XER,r6
572177d7 535
6cc0c16d
NP
536 REST_4GPRS(2, r1)
537 REST_GPR(6, r1)
538 REST_GPR(0, r1)
539 REST_GPR(1, r1)
540 RFI_TO_USER
541 b . /* prevent speculative execution */
7230c564 542
6cc0c16d
NP
543.Lrestore_nvgprs:
544 REST_NVGPRS(r1)
545 b .Lfast_user_interrupt_return
9994a338 546
6cc0c16d
NP
547 .balign IFETCH_ALIGN_BYTES
548.Lkernel_interrupt_return:
549 addi r3,r1,STACK_FRAME_OVERHEAD
550 bl interrupt_exit_kernel_prepare
e56a6e20 551
6cc0c16d
NP
552.Lfast_kernel_interrupt_return:
553 cmpdi cr1,r3,0
554 ld r11,_NIP(r1)
555 ld r12,_MSR(r1)
556 mtspr SPRN_SRR0,r11
557 mtspr SPRN_SRR1,r12
7230c564 558
7230c564
BH
559BEGIN_FTR_SECTION
560 stdcx. r0,0,r1 /* to clear the reservation */
561FTR_SECTION_ELSE
6cc0c16d 562 ldarx r0,0,r1
7230c564
BH
563ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
564
6cc0c16d 565 ld r3,_LINK(r1)
e56a6e20 566 ld r4,_CTR(r1)
6cc0c16d
NP
567 ld r5,_XER(r1)
568 ld r6,_CCR(r1)
569 li r0,0
9994a338 570
6cc0c16d
NP
571 REST_4GPRS(7, r1)
572 REST_2GPRS(11, r1)
a08f828c 573
6cc0c16d
NP
574 mtlr r3
575 mtctr r4
576 mtspr SPRN_XER,r5
9994a338 577
eddd0b33
NS
578 /*
579 * Leaving a stale exception_marker on the stack can confuse
580 * the reliable stack unwinder later on. Clear it.
581 */
6cc0c16d 582 std r0,STACK_FRAME_OVERHEAD-16(r1)
890274c2 583
6cc0c16d 584 REST_4GPRS(2, r1)
890274c2 585
6cc0c16d
NP
586 bne- cr1,1f /* emulate stack store */
587 mtcr r6
588 REST_GPR(6, r1)
589 REST_GPR(0, r1)
590 REST_GPR(1, r1)
a08f828c 591 RFI_TO_KERNEL
9994a338
PM
592 b . /* prevent speculative execution */
593
6cc0c16d
NP
5941: /*
595 * Emulate stack store with update. New r1 value was already calculated
596 * and updated in our interrupt regs by emulate_loadstore, but we can't
597 * store the previous value of r1 to the stack before re-loading our
598 * registers from it, otherwise they could be clobbered. Use
599 * PACA_EXGEN as temporary storage to hold the store data, as
600 * interrupts are disabled here so it won't be clobbered.
7c0482e3 601 */
6cc0c16d
NP
602 mtcr r6
603 std r9,PACA_EXGEN+0(r13)
604 addi r9,r1,INT_FRAME_SIZE /* get original r1 */
605 REST_GPR(6, r1)
606 REST_GPR(0, r1)
607 REST_GPR(1, r1)
608 std r9,0(r1) /* perform store component of stdu */
609 ld r9,PACA_EXGEN+0(r13)
15770a13 610
6cc0c16d
NP
611 RFI_TO_KERNEL
612 b . /* prevent speculative execution */
613#endif /* CONFIG_PPC_BOOK3S */
9994a338
PM
614
615#ifdef CONFIG_PPC_RTAS
616/*
617 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
618 * called with the MMU off.
619 *
620 * In addition, we need to be in 32b mode, at least for now.
621 *
622 * Note: r3 is an input parameter to rtas, so don't trash it...
623 */
624_GLOBAL(enter_rtas)
625 mflr r0
626 std r0,16(r1)
ed9e84a4 627 stdu r1,-SWITCH_FRAME_SIZE(r1) /* Save SP and create stack space. */
9994a338
PM
628
629 /* Because RTAS is running in 32b mode, it clobbers the high order half
630 * of all registers that it saves. We therefore save those registers
631 * RTAS might touch to the stack. (r0, r3-r13 are caller saved)
632 */
633 SAVE_GPR(2, r1) /* Save the TOC */
634 SAVE_GPR(13, r1) /* Save paca */
5290ae2b 635 SAVE_NVGPRS(r1) /* Save the non-volatiles */
9994a338
PM
636
637 mfcr r4
638 std r4,_CCR(r1)
639 mfctr r5
640 std r5,_CTR(r1)
641 mfspr r6,SPRN_XER
642 std r6,_XER(r1)
643 mfdar r7
644 std r7,_DAR(r1)
645 mfdsisr r8
646 std r8,_DSISR(r1)
9994a338 647
9fe901d1
MK
648 /* Temporary workaround to clear CR until RTAS can be modified to
649 * ignore all bits.
650 */
651 li r0,0
652 mtcr r0
653
01417c6c 654#ifdef CONFIG_BUG
9994a338
PM
655 /* There is no way it is acceptable to get here with interrupts enabled,
656 * check it with the asm equivalent of WARN_ON
657 */
4e26bc4a 658 lbz r0,PACAIRQSOFTMASK(r13)
01417c6c 6591: tdeqi r0,IRQS_ENABLED
007d88d0
DW
660 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
661#endif
01417c6c 662
d04c56f7
PM
663 /* Hard-disable interrupts */
664 mfmsr r6
665 rldicl r7,r6,48,1
666 rotldi r7,r7,16
667 mtmsrd r7,1
668
9994a338
PM
669 /* Unfortunately, the stack pointer and the MSR are also clobbered,
670 * so they are saved in the PACA which allows us to restore
671 * our original state after RTAS returns.
672 */
673 std r1,PACAR1(r13)
674 std r6,PACASAVEDMSR(r13)
675
676 /* Setup our real return addr */
ad0289e4 677 LOAD_REG_ADDR(r4,rtas_return_loc)
e58c3495 678 clrldi r4,r4,2 /* convert to realmode address */
9994a338
PM
679 mtlr r4
680
681 li r0,0
682 ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
683 andc r0,r6,r0
684
685 li r9,1
686 rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
5c0484e2 687 ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI|MSR_LE
9994a338 688 andc r6,r0,r9
90653a84
NR
689
690__enter_rtas:
9994a338
PM
691 sync /* disable interrupts so SRR0/1 */
692 mtmsrd r0 /* don't get trashed */
693
e58c3495 694 LOAD_REG_ADDR(r4, rtas)
9994a338
PM
695 ld r5,RTASENTRY(r4) /* get the rtas->entry value */
696 ld r4,RTASBASE(r4) /* get the rtas->base value */
697
698 mtspr SPRN_SRR0,r5
699 mtspr SPRN_SRR1,r6
222f20f1 700 RFI_TO_KERNEL
9994a338
PM
701 b . /* prevent speculative execution */
702
ad0289e4 703rtas_return_loc:
5c0484e2
BH
704 FIXUP_ENDIAN
705
47fee31d
NP
706 /*
707 * Clear RI and set SF before anything.
708 */
709 mfmsr r6
710 li r0,MSR_RI
711 andc r6,r6,r0
712 sldi r0,r0,(MSR_SF_LG - MSR_RI_LG)
713 or r6,r6,r0
714 sync
715 mtmsrd r6
716
9994a338 717 /* relocation is off at this point */
2dd60d79 718 GET_PACA(r4)
e58c3495 719 clrldi r4,r4,2 /* convert to realmode address */
9994a338 720
e31aa453
PM
721 bcl 20,31,$+4
7220: mflr r3
ad0289e4 723 ld r3,(1f-0b)(r3) /* get &rtas_restore_regs */
e31aa453 724
9994a338 725 ld r1,PACAR1(r4) /* Restore our SP */
9994a338
PM
726 ld r4,PACASAVEDMSR(r4) /* Restore our MSR */
727
728 mtspr SPRN_SRR0,r3
729 mtspr SPRN_SRR1,r4
222f20f1 730 RFI_TO_KERNEL
9994a338 731 b . /* prevent speculative execution */
90653a84
NR
732_ASM_NOKPROBE_SYMBOL(__enter_rtas)
733_ASM_NOKPROBE_SYMBOL(rtas_return_loc)
9994a338 734
e31aa453 735 .align 3
eb039161 7361: .8byte rtas_restore_regs
e31aa453 737
ad0289e4 738rtas_restore_regs:
9994a338
PM
739 /* relocation is on at this point */
740 REST_GPR(2, r1) /* Restore the TOC */
741 REST_GPR(13, r1) /* Restore paca */
5290ae2b 742 REST_NVGPRS(r1) /* Restore the non-volatiles */
9994a338 743
2dd60d79 744 GET_PACA(r13)
9994a338
PM
745
746 ld r4,_CCR(r1)
747 mtcr r4
748 ld r5,_CTR(r1)
749 mtctr r5
750 ld r6,_XER(r1)
751 mtspr SPRN_XER,r6
752 ld r7,_DAR(r1)
753 mtdar r7
754 ld r8,_DSISR(r1)
755 mtdsisr r8
9994a338 756
ed9e84a4 757 addi r1,r1,SWITCH_FRAME_SIZE /* Unstack our frame */
9994a338
PM
758 ld r0,16(r1) /* get return address */
759
760 mtlr r0
761 blr /* return to caller */
762
763#endif /* CONFIG_PPC_RTAS */
764
9994a338
PM
765_GLOBAL(enter_prom)
766 mflr r0
767 std r0,16(r1)
ed9e84a4 768 stdu r1,-SWITCH_FRAME_SIZE(r1) /* Save SP and create stack space */
9994a338
PM
769
770 /* Because PROM is running in 32b mode, it clobbers the high order half
771 * of all registers that it saves. We therefore save those registers
772 * PROM might touch to the stack. (r0, r3-r13 are caller saved)
773 */
6c171994 774 SAVE_GPR(2, r1)
9994a338 775 SAVE_GPR(13, r1)
5290ae2b 776 SAVE_NVGPRS(r1)
6c171994 777 mfcr r10
9994a338 778 mfmsr r11
6c171994 779 std r10,_CCR(r1)
9994a338
PM
780 std r11,_MSR(r1)
781
5c0484e2
BH
782 /* Put PROM address in SRR0 */
783 mtsrr0 r4
784
785 /* Setup our trampoline return addr in LR */
786 bcl 20,31,$+4
7870: mflr r4
788 addi r4,r4,(1f - 0b)
789 mtlr r4
9994a338 790
5c0484e2 791 /* Prepare a 32-bit mode big endian MSR
9994a338 792 */
2d27cfd3
BH
793#ifdef CONFIG_PPC_BOOK3E
794 rlwinm r11,r11,0,1,31
5c0484e2
BH
795 mtsrr1 r11
796 rfi
2d27cfd3 797#else /* CONFIG_PPC_BOOK3E */
5c0484e2
BH
798 LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_ISF | MSR_LE)
799 andc r11,r11,r12
800 mtsrr1 r11
222f20f1 801 RFI_TO_KERNEL
2d27cfd3 802#endif /* CONFIG_PPC_BOOK3E */
9994a338 803
5c0484e2
BH
8041: /* Return from OF */
805 FIXUP_ENDIAN
9994a338
PM
806
807 /* Just make sure that r1 top 32 bits didn't get
808 * corrupt by OF
809 */
810 rldicl r1,r1,0,32
811
812 /* Restore the MSR (back to 64 bits) */
813 ld r0,_MSR(r1)
6c171994 814 MTMSRD(r0)
9994a338
PM
815 isync
816
817 /* Restore other registers */
818 REST_GPR(2, r1)
819 REST_GPR(13, r1)
5290ae2b 820 REST_NVGPRS(r1)
9994a338
PM
821 ld r4,_CCR(r1)
822 mtcr r4
ed9e84a4
JS
823
824 addi r1,r1,SWITCH_FRAME_SIZE
9994a338
PM
825 ld r0,16(r1)
826 mtlr r0
827 blr