]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
arm64: entry: Add entry and exit functions for debug exceptions
authorAda Couprie Diaz <ada.coupriediaz@arm.com>
Mon, 7 Jul 2025 11:41:02 +0000 (12:41 +0100)
committerWill Deacon <will@kernel.org>
Tue, 8 Jul 2025 12:27:41 +0000 (13:27 +0100)
Move the `debug_exception_enter()` and `debug_exception_exit()`
functions from mm/fault.c, as they are needed to split
the debug exceptions entry paths from the current unified one.

Make them externally visible in include/asm/exception.h until
the caller in mm/fault.c is cleaned up.

Signed-off-by: Ada Couprie Diaz <ada.coupriediaz@arm.com>
Tested-by: Luis Claudio R. Goncalves <lgoncalv@redhat.com>
Reviewed-by: Anshuman Khandual <anshuman.khandual@arm.com>
Reviewed-by: Will Deacon <will@kernel.org>
Acked-by: Mark Rutland <mark.rutland@arm.com>
Link: https://lore.kernel.org/r/20250707114109.35672-7-ada.coupriediaz@arm.com
Signed-off-by: Will Deacon <will@kernel.org>
arch/arm64/include/asm/exception.h
arch/arm64/kernel/entry-common.c
arch/arm64/mm/fault.c

index d48fc16584cd3198d04e5c0926d4a0e7388dd62c..e54b5466fd2c844e2c6cf475297d9d310e3ee50e 100644 (file)
@@ -80,4 +80,8 @@ void do_serror(struct pt_regs *regs, unsigned long esr);
 void do_signal(struct pt_regs *regs);
 
 void __noreturn panic_bad_stack(struct pt_regs *regs, unsigned long esr, unsigned long far);
+
+void debug_exception_enter(struct pt_regs *regs);
+void debug_exception_exit(struct pt_regs *regs);
+
 #endif /* __ASM_EXCEPTION_H */
index 7c1970b341b8cbdef1b8f6fcd3b9143ec396bf3e..3bdfa5abaf7afafd7507de49d83c69954c62faad 100644 (file)
@@ -441,6 +441,28 @@ static __always_inline void fpsimd_syscall_exit(void)
        __this_cpu_write(fpsimd_last_state.to_save, FP_STATE_CURRENT);
 }
 
+/*
+ * In debug exception context, we explicitly disable preemption despite
+ * having interrupts disabled.
+ * This serves two purposes: it makes it much less likely that we would
+ * accidentally schedule in exception context and it will force a warning
+ * if we somehow manage to schedule by accident.
+ */
+void debug_exception_enter(struct pt_regs *regs)
+{
+       preempt_disable();
+
+       /* This code is a bit fragile.  Test it. */
+       RCU_LOCKDEP_WARN(!rcu_is_watching(), "exception_enter didn't work");
+}
+NOKPROBE_SYMBOL(debug_exception_enter);
+
+void debug_exception_exit(struct pt_regs *regs)
+{
+       preempt_enable_no_resched();
+}
+NOKPROBE_SYMBOL(debug_exception_exit);
+
 UNHANDLED(el1t, 64, sync)
 UNHANDLED(el1t, 64, irq)
 UNHANDLED(el1t, 64, fiq)
index ec0a337891ddfc705238fe07444c085f0d48aa5c..d451d7d834f1a39a89fe94c0a2ad5b686fc29999 100644 (file)
@@ -966,28 +966,6 @@ void __init hook_debug_fault_code(int nr,
        debug_fault_info[nr].name       = name;
 }
 
-/*
- * In debug exception context, we explicitly disable preemption despite
- * having interrupts disabled.
- * This serves two purposes: it makes it much less likely that we would
- * accidentally schedule in exception context and it will force a warning
- * if we somehow manage to schedule by accident.
- */
-static void debug_exception_enter(struct pt_regs *regs)
-{
-       preempt_disable();
-
-       /* This code is a bit fragile.  Test it. */
-       RCU_LOCKDEP_WARN(!rcu_is_watching(), "exception_enter didn't work");
-}
-NOKPROBE_SYMBOL(debug_exception_enter);
-
-static void debug_exception_exit(struct pt_regs *regs)
-{
-       preempt_enable_no_resched();
-}
-NOKPROBE_SYMBOL(debug_exception_exit);
-
 void do_debug_exception(unsigned long addr_if_watchpoint, unsigned long esr,
                        struct pt_regs *regs)
 {