]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
entry: Split up exit_to_user_mode_prepare()
authorThomas Gleixner <tglx@linutronix.de>
Mon, 27 Oct 2025 08:45:21 +0000 (09:45 +0100)
committerIngo Molnar <mingo@kernel.org>
Tue, 4 Nov 2025 07:35:17 +0000 (08:35 +0100)
exit_to_user_mode_prepare() is used for both interrupts and syscalls, but
there is extra rseq work, which is only required for in the interrupt exit
case.

Split up the function and provide wrappers for syscalls and interrupts,
which allows to separate the rseq exit work in the next step.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Reviewed-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Link: https://patch.msgid.link/20251027084307.782234789@linutronix.de
arch/arm64/kernel/entry-common.c
include/linux/entry-common.h
include/linux/irq-entry-common.h

index a9c81715ce59ed73f3447be7327580de8c735adc..0a97e2621f60a640ee72bbd91145de509b351922 100644 (file)
@@ -100,7 +100,7 @@ static __always_inline void arm64_enter_from_user_mode(struct pt_regs *regs)
 static __always_inline void arm64_exit_to_user_mode(struct pt_regs *regs)
 {
        local_irq_disable();
-       exit_to_user_mode_prepare(regs);
+       exit_to_user_mode_prepare_legacy(regs);
        local_daif_mask();
        mte_check_tfsr_exit();
        exit_to_user_mode();
index d967184ae08fc06c378d4f5463dfef1c87a9dcca..87efb38b70817fd9371942e5ffd7a55d225edca7 100644 (file)
@@ -156,7 +156,7 @@ static __always_inline void syscall_exit_to_user_mode_work(struct pt_regs *regs)
        if (unlikely(work & SYSCALL_WORK_EXIT))
                syscall_exit_work(regs, work);
        local_irq_disable_exit_to_user();
-       exit_to_user_mode_prepare(regs);
+       syscall_exit_to_user_mode_prepare(regs);
 }
 
 /**
index 8f5ceeaaaea5918d48e3e4ccd7d41adbf664a6f9..5ea61722bb708d75cb8b37dc361e860ebed4496d 100644 (file)
@@ -201,7 +201,7 @@ void arch_do_signal_or_restart(struct pt_regs *regs);
 unsigned long exit_to_user_mode_loop(struct pt_regs *regs, unsigned long ti_work);
 
 /**
- * exit_to_user_mode_prepare - call exit_to_user_mode_loop() if required
+ * __exit_to_user_mode_prepare - call exit_to_user_mode_loop() if required
  * @regs:      Pointer to pt_regs on entry stack
  *
  * 1) check that interrupts are disabled
@@ -209,8 +209,10 @@ unsigned long exit_to_user_mode_loop(struct pt_regs *regs, unsigned long ti_work
  * 3) call exit_to_user_mode_loop() if any flags from
  *    EXIT_TO_USER_MODE_WORK are set
  * 4) check that interrupts are still disabled
+ *
+ * Don't invoke directly, use the syscall/irqentry_ prefixed variants below
  */
-static __always_inline void exit_to_user_mode_prepare(struct pt_regs *regs)
+static __always_inline void __exit_to_user_mode_prepare(struct pt_regs *regs)
 {
        unsigned long ti_work;
 
@@ -224,15 +226,52 @@ static __always_inline void exit_to_user_mode_prepare(struct pt_regs *regs)
                ti_work = exit_to_user_mode_loop(regs, ti_work);
 
        arch_exit_to_user_mode_prepare(regs, ti_work);
+}
 
-       rseq_exit_to_user_mode();
-
+static __always_inline void __exit_to_user_mode_validate(void)
+{
        /* Ensure that kernel state is sane for a return to userspace */
        kmap_assert_nomap();
        lockdep_assert_irqs_disabled();
        lockdep_sys_exit();
 }
 
+/* Temporary workaround to keep ARM64 alive */
+static __always_inline void exit_to_user_mode_prepare_legacy(struct pt_regs *regs)
+{
+       __exit_to_user_mode_prepare(regs);
+       rseq_exit_to_user_mode();
+       __exit_to_user_mode_validate();
+}
+
+/**
+ * syscall_exit_to_user_mode_prepare - call exit_to_user_mode_loop() if required
+ * @regs:      Pointer to pt_regs on entry stack
+ *
+ * Wrapper around __exit_to_user_mode_prepare() to separate the exit work for
+ * syscalls and interrupts.
+ */
+static __always_inline void syscall_exit_to_user_mode_prepare(struct pt_regs *regs)
+{
+       __exit_to_user_mode_prepare(regs);
+       rseq_exit_to_user_mode();
+       __exit_to_user_mode_validate();
+}
+
+/**
+ * irqentry_exit_to_user_mode_prepare - call exit_to_user_mode_loop() if required
+ * @regs:      Pointer to pt_regs on entry stack
+ *
+ * Wrapper around __exit_to_user_mode_prepare() to separate the exit work for
+ * syscalls and interrupts.
+ */
+static __always_inline void irqentry_exit_to_user_mode_prepare(struct pt_regs *regs)
+{
+       __exit_to_user_mode_prepare(regs);
+       rseq_exit_to_user_mode();
+       __exit_to_user_mode_validate();
+}
+
 /**
  * exit_to_user_mode - Fixup state when exiting to user mode
  *
@@ -297,7 +336,7 @@ static __always_inline void irqentry_enter_from_user_mode(struct pt_regs *regs)
 static __always_inline void irqentry_exit_to_user_mode(struct pt_regs *regs)
 {
        instrumentation_begin();
-       exit_to_user_mode_prepare(regs);
+       irqentry_exit_to_user_mode_prepare(regs);
        instrumentation_end();
        exit_to_user_mode();
 }