]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
powerpc/32: Restore disabling of interrupts at interrupt/syscall exit
authorChristophe Leroy (CS GROUP) <chleroy@kernel.org>
Fri, 19 Dec 2025 12:23:52 +0000 (13:23 +0100)
committerMadhavan Srinivasan <maddy@linux.ibm.com>
Mon, 22 Dec 2025 12:55:07 +0000 (18:25 +0530)
Commit 2997876c4a1a ("powerpc/32: Restore clearing of MSR[RI] at
interrupt/syscall exit") delayed clearing of MSR[RI], but missed that
both MSR[RI] and MSR[EE] are cleared at the same time, so the commit
also delayed the disabling of interrupts, leading to unexpected
behaviour.

To fix that, mostly revert the blamed commit and restore the clearing
of MSR[RI] in interrupt_exit_kernel_prepare() instead. For 8xx it
implies adding a synchronising instruction after the mtspr in order to
make sure no instruction counter interrupt (used for perf events) will
fire just after clearing MSR[RI].

Reported-by: Christian Zigotzky <chzigotzky@xenosoft.de>
Closes: https://lore.kernel.org/all/4d0bd05d-6158-1323-3509-744d3fbe8fc7@xenosoft.de/
Reported-by: Guenter Roeck <linux@roeck-us.net>
Closes: https://lore.kernel.org/all/6b05eb1c-fdef-44e0-91a7-8286825e68f1@roeck-us.net/
Fixes: 2997876c4a1a ("powerpc/32: Restore clearing of MSR[RI] at interrupt/syscall exit")
Signed-off-by: Christophe Leroy (CS GROUP) <chleroy@kernel.org>
Signed-off-by: Madhavan Srinivasan <maddy@linux.ibm.com>
Link: https://patch.msgid.link/585ea521b2be99d293b539bbfae148366cfb3687.1766146895.git.chleroy@kernel.org
arch/powerpc/include/asm/hw_irq.h
arch/powerpc/include/asm/reg.h
arch/powerpc/kernel/entry_32.S
arch/powerpc/kernel/interrupt.c

index 1078ba88efaf46face84f67e962689e0bf0af02c..9cd945f2acafa948c80bb3e07c1408eb7fa838f4 100644 (file)
@@ -90,7 +90,7 @@ static inline void __hard_EE_RI_disable(void)
        if (IS_ENABLED(CONFIG_BOOKE))
                wrtee(0);
        else if (IS_ENABLED(CONFIG_PPC_8xx))
-               wrtspr(SPRN_NRI);
+               wrtspr_sync(SPRN_NRI);
        else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
                __mtmsrd(0, 1);
        else
index 3fe1866354323b55108bcba1ecf27ee8f1798faf..3449dd2b577d4a00c212fe37d2267e38c432fcac 100644 (file)
@@ -1400,6 +1400,7 @@ static inline void mtmsr_isync(unsigned long val)
                                     : "r" ((unsigned long)(v)) \
                                     : "memory")
 #define wrtspr(rn)     asm volatile("mtspr " __stringify(rn) ",2" : : : "memory")
+#define wrtspr_sync(rn)        asm volatile("mtspr " __stringify(rn) ",2; sync" : : : "memory")
 
 static inline void wrtee(unsigned long val)
 {
index 16f8ee6cb2cd62efd3c657c5fe0f66df3afbd0ad..d8426251b1cda33b8b8ed28c5867037d0f573c45 100644 (file)
@@ -101,17 +101,6 @@ SYM_FUNC_END(__kuep_unlock)
 .endm
 #endif
 
-.macro clr_ri trash
-#ifndef CONFIG_BOOKE
-#ifdef CONFIG_PPC_8xx
-       mtspr   SPRN_NRI, \trash
-#else
-       li      \trash, MSR_KERNEL & ~MSR_RI
-       mtmsr   \trash
-#endif
-#endif
-.endm
-
        .globl  transfer_to_syscall
 transfer_to_syscall:
        stw     r3, ORIG_GPR3(r1)
@@ -160,7 +149,6 @@ ret_from_syscall:
        cmpwi   r3,0
        REST_GPR(3, r1)
 syscall_exit_finish:
-       clr_ri  r4
        mtspr   SPRN_SRR0,r7
        mtspr   SPRN_SRR1,r8
 
@@ -237,7 +225,6 @@ fast_exception_return:
        /* Clear the exception marker on the stack to avoid confusing stacktrace */
        li      r10, 0
        stw     r10, 8(r11)
-       clr_ri  r10
        mtspr   SPRN_SRR1,r9
        mtspr   SPRN_SRR0,r12
        REST_GPR(9, r11)
@@ -270,7 +257,6 @@ interrupt_return:
 .Lfast_user_interrupt_return:
        lwz     r11,_NIP(r1)
        lwz     r12,_MSR(r1)
-       clr_ri  r4
        mtspr   SPRN_SRR0,r11
        mtspr   SPRN_SRR1,r12
 
@@ -313,7 +299,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
        cmpwi   cr1,r3,0
        lwz     r11,_NIP(r1)
        lwz     r12,_MSR(r1)
-       clr_ri  r4
        mtspr   SPRN_SRR0,r11
        mtspr   SPRN_SRR1,r12
 
index aea6f7e8e9c67ad9e615c35e35128e9ef2a8d95c..e63bfde13e03167beb5b25aa43332e91b5fd17e0 100644 (file)
@@ -38,7 +38,7 @@ static inline bool exit_must_hard_disable(void)
 #else
 static inline bool exit_must_hard_disable(void)
 {
-       return false;
+       return true;
 }
 #endif
 
@@ -443,6 +443,9 @@ again:
 
                if (unlikely(stack_store))
                        __hard_EE_RI_disable();
+#else
+       } else {
+               __hard_EE_RI_disable();
 #endif /* CONFIG_PPC64 */
        }