Commit
2997876c4a1a ("powerpc/32: Restore clearing of MSR[RI] at
interrupt/syscall exit") delayed clearing of MSR[RI], but missed that
both MSR[RI] and MSR[EE] are cleared at the same time, so the commit
also delayed the disabling of interrupts, leading to unexpected
behaviour.
To fix that, mostly revert the blamed commit and restore the clearing
of MSR[RI] in interrupt_exit_kernel_prepare() instead. For 8xx it
implies adding a synchronising instruction after the mtspr in order to
make sure no instruction counter interrupt (used for perf events) will
fire just after clearing MSR[RI].
Reported-by: Christian Zigotzky <chzigotzky@xenosoft.de>
Closes: https://lore.kernel.org/all/4d0bd05d-6158-1323-3509-744d3fbe8fc7@xenosoft.de/
Reported-by: Guenter Roeck <linux@roeck-us.net>
Closes: https://lore.kernel.org/all/6b05eb1c-fdef-44e0-91a7-8286825e68f1@roeck-us.net/
Fixes: 2997876c4a1a ("powerpc/32: Restore clearing of MSR[RI] at interrupt/syscall exit")
Signed-off-by: Christophe Leroy (CS GROUP) <chleroy@kernel.org>
Signed-off-by: Madhavan Srinivasan <maddy@linux.ibm.com>
Link: https://patch.msgid.link/585ea521b2be99d293b539bbfae148366cfb3687.1766146895.git.chleroy@kernel.org
if (IS_ENABLED(CONFIG_BOOKE))
wrtee(0);
else if (IS_ENABLED(CONFIG_PPC_8xx))
- wrtspr(SPRN_NRI);
+ wrtspr_sync(SPRN_NRI);
else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
__mtmsrd(0, 1);
else
: "r" ((unsigned long)(v)) \
: "memory")
#define wrtspr(rn) asm volatile("mtspr " __stringify(rn) ",2" : : : "memory")
+#define wrtspr_sync(rn) asm volatile("mtspr " __stringify(rn) ",2; sync" : : : "memory")
static inline void wrtee(unsigned long val)
{
.endm
#endif
-.macro clr_ri trash
-#ifndef CONFIG_BOOKE
-#ifdef CONFIG_PPC_8xx
- mtspr SPRN_NRI, \trash
-#else
- li \trash, MSR_KERNEL & ~MSR_RI
- mtmsr \trash
-#endif
-#endif
-.endm
-
.globl transfer_to_syscall
transfer_to_syscall:
stw r3, ORIG_GPR3(r1)
cmpwi r3,0
REST_GPR(3, r1)
syscall_exit_finish:
- clr_ri r4
mtspr SPRN_SRR0,r7
mtspr SPRN_SRR1,r8
/* Clear the exception marker on the stack to avoid confusing stacktrace */
li r10, 0
stw r10, 8(r11)
- clr_ri r10
mtspr SPRN_SRR1,r9
mtspr SPRN_SRR0,r12
REST_GPR(9, r11)
.Lfast_user_interrupt_return:
lwz r11,_NIP(r1)
lwz r12,_MSR(r1)
- clr_ri r4
mtspr SPRN_SRR0,r11
mtspr SPRN_SRR1,r12
cmpwi cr1,r3,0
lwz r11,_NIP(r1)
lwz r12,_MSR(r1)
- clr_ri r4
mtspr SPRN_SRR0,r11
mtspr SPRN_SRR1,r12
#else
static inline bool exit_must_hard_disable(void)
{
- return false;
+ return true;
}
#endif
if (unlikely(stack_store))
__hard_EE_RI_disable();
+#else
+ } else {
+ __hard_EE_RI_disable();
#endif /* CONFIG_PPC64 */
}