]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
x86/xen: Move Xen upcall handler
authorBrian Gerst <brgerst@gmail.com>
Fri, 14 Mar 2025 15:12:14 +0000 (11:12 -0400)
committerIngo Molnar <mingo@kernel.org>
Wed, 19 Mar 2025 10:18:58 +0000 (11:18 +0100)
Move the upcall handler to Xen-specific files.

No functional changes.

Signed-off-by: Brian Gerst <brgerst@gmail.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Reviewed-by: Juergen Gross <jgross@suse.com>
Reviewed-by: Sohil Mehta <sohil.mehta@intel.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Link: https://lore.kernel.org/r/20250314151220.862768-2-brgerst@gmail.com
arch/x86/entry/common.c
arch/x86/xen/enlighten_pv.c

index 3514bf2978eed31f8a0e6027366d1d79e36bb539..ce4d88eda6930b16b087979dacdb8905914693fb 100644 (file)
 #include <linux/uaccess.h>
 #include <linux/init.h>
 
-#ifdef CONFIG_XEN_PV
-#include <xen/xen-ops.h>
-#include <xen/events.h>
-#endif
-
 #include <asm/apic.h>
 #include <asm/desc.h>
 #include <asm/traps.h>
@@ -455,70 +450,3 @@ SYSCALL_DEFINE0(ni_syscall)
 {
        return -ENOSYS;
 }
-
-#ifdef CONFIG_XEN_PV
-#ifndef CONFIG_PREEMPTION
-/*
- * Some hypercalls issued by the toolstack can take many 10s of
- * seconds. Allow tasks running hypercalls via the privcmd driver to
- * be voluntarily preempted even if full kernel preemption is
- * disabled.
- *
- * Such preemptible hypercalls are bracketed by
- * xen_preemptible_hcall_begin() and xen_preemptible_hcall_end()
- * calls.
- */
-DEFINE_PER_CPU(bool, xen_in_preemptible_hcall);
-EXPORT_SYMBOL_GPL(xen_in_preemptible_hcall);
-
-/*
- * In case of scheduling the flag must be cleared and restored after
- * returning from schedule as the task might move to a different CPU.
- */
-static __always_inline bool get_and_clear_inhcall(void)
-{
-       bool inhcall = __this_cpu_read(xen_in_preemptible_hcall);
-
-       __this_cpu_write(xen_in_preemptible_hcall, false);
-       return inhcall;
-}
-
-static __always_inline void restore_inhcall(bool inhcall)
-{
-       __this_cpu_write(xen_in_preemptible_hcall, inhcall);
-}
-#else
-static __always_inline bool get_and_clear_inhcall(void) { return false; }
-static __always_inline void restore_inhcall(bool inhcall) { }
-#endif
-
-static void __xen_pv_evtchn_do_upcall(struct pt_regs *regs)
-{
-       struct pt_regs *old_regs = set_irq_regs(regs);
-
-       inc_irq_stat(irq_hv_callback_count);
-
-       xen_evtchn_do_upcall();
-
-       set_irq_regs(old_regs);
-}
-
-__visible noinstr void xen_pv_evtchn_do_upcall(struct pt_regs *regs)
-{
-       irqentry_state_t state = irqentry_enter(regs);
-       bool inhcall;
-
-       instrumentation_begin();
-       run_sysvec_on_irqstack_cond(__xen_pv_evtchn_do_upcall, regs);
-
-       inhcall = get_and_clear_inhcall();
-       if (inhcall && !WARN_ON_ONCE(state.exit_rcu)) {
-               irqentry_exit_cond_resched();
-               instrumentation_end();
-               restore_inhcall(inhcall);
-       } else {
-               instrumentation_end();
-               irqentry_exit(regs, state);
-       }
-}
-#endif /* CONFIG_XEN_PV */
index 5e57835e999d31c403b35dc535fb30558759b278..dcc2041f8e613a4054081804269ef1400c2176ec 100644 (file)
@@ -73,6 +73,7 @@
 #include <asm/mwait.h>
 #include <asm/pci_x86.h>
 #include <asm/cpu.h>
+#include <asm/irq_stack.h>
 #ifdef CONFIG_X86_IOPL_IOPERM
 #include <asm/io_bitmap.h>
 #endif
@@ -94,6 +95,44 @@ void *xen_initial_gdt;
 static int xen_cpu_up_prepare_pv(unsigned int cpu);
 static int xen_cpu_dead_pv(unsigned int cpu);
 
+#ifndef CONFIG_PREEMPTION
+/*
+ * Some hypercalls issued by the toolstack can take many 10s of
+ * seconds. Allow tasks running hypercalls via the privcmd driver to
+ * be voluntarily preempted even if full kernel preemption is
+ * disabled.
+ *
+ * Such preemptible hypercalls are bracketed by
+ * xen_preemptible_hcall_begin() and xen_preemptible_hcall_end()
+ * calls.
+ */
+DEFINE_PER_CPU(bool, xen_in_preemptible_hcall);
+EXPORT_SYMBOL_GPL(xen_in_preemptible_hcall);
+
+/*
+ * In case of scheduling the flag must be cleared and restored after
+ * returning from schedule as the task might move to a different CPU.
+ */
+static __always_inline bool get_and_clear_inhcall(void)
+{
+       bool inhcall = __this_cpu_read(xen_in_preemptible_hcall);
+
+       __this_cpu_write(xen_in_preemptible_hcall, false);
+       return inhcall;
+}
+
+static __always_inline void restore_inhcall(bool inhcall)
+{
+       __this_cpu_write(xen_in_preemptible_hcall, inhcall);
+}
+
+#else
+
+static __always_inline bool get_and_clear_inhcall(void) { return false; }
+static __always_inline void restore_inhcall(bool inhcall) { }
+
+#endif
+
 struct tls_descs {
        struct desc_struct desc[3];
 };
@@ -687,6 +726,36 @@ DEFINE_IDTENTRY_RAW(xenpv_exc_machine_check)
 }
 #endif
 
+static void __xen_pv_evtchn_do_upcall(struct pt_regs *regs)
+{
+       struct pt_regs *old_regs = set_irq_regs(regs);
+
+       inc_irq_stat(irq_hv_callback_count);
+
+       xen_evtchn_do_upcall();
+
+       set_irq_regs(old_regs);
+}
+
+__visible noinstr void xen_pv_evtchn_do_upcall(struct pt_regs *regs)
+{
+       irqentry_state_t state = irqentry_enter(regs);
+       bool inhcall;
+
+       instrumentation_begin();
+       run_sysvec_on_irqstack_cond(__xen_pv_evtchn_do_upcall, regs);
+
+       inhcall = get_and_clear_inhcall();
+       if (inhcall && !WARN_ON_ONCE(state.exit_rcu)) {
+               irqentry_exit_cond_resched();
+               instrumentation_end();
+               restore_inhcall(inhcall);
+       } else {
+               instrumentation_end();
+               irqentry_exit(regs, state);
+       }
+}
+
 struct trap_array_entry {
        void (*orig)(void);
        void (*xen)(void);