]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
KVM: x86: Move "kvm_rebooting" to kernel as "virt_rebooting"
authorSean Christopherson <seanjc@google.com>
Sat, 14 Feb 2026 01:26:49 +0000 (17:26 -0800)
committerSean Christopherson <seanjc@google.com>
Wed, 4 Mar 2026 16:52:31 +0000 (08:52 -0800)
Move "kvm_rebooting" to the kernel, exported for KVM, as one of many steps
towards extracting the innermost VMXON and EFER.SVME management logic out
of KVM and into to core x86.

For lack of a better name, call the new file "hw.c", to yield "virt
hardware" when combined with its parent directory.

No functional change intended.

Tested-by: Chao Gao <chao.gao@intel.com>
Reviewed-by: Dan Williams <dan.j.williams@intel.com>
Tested-by: Sagi Shahar <sagis@google.com>
Link: https://patch.msgid.link/20260214012702.2368778-4-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/include/asm/virt.h [new file with mode: 0644]
arch/x86/kvm/svm/svm.c
arch/x86/kvm/svm/vmenter.S
arch/x86/kvm/vmx/tdx.c
arch/x86/kvm/vmx/vmenter.S
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/x86.c
arch/x86/kvm/x86.h
arch/x86/virt/Makefile
arch/x86/virt/hw.c [new file with mode: 0644]

diff --git a/arch/x86/include/asm/virt.h b/arch/x86/include/asm/virt.h
new file mode 100644 (file)
index 0000000..131b9bf
--- /dev/null
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _ASM_X86_VIRT_H
+#define _ASM_X86_VIRT_H
+
+#include <linux/types.h>
+
+#if IS_ENABLED(CONFIG_KVM_X86)
+extern bool virt_rebooting;
+#endif
+
+#endif /* _ASM_X86_VIRT_H */
index 8f8bc863e214353284642830db5bd819ed594e5d..0ae66c770ebc681e8f466c25bf276aa60cfefd43 100644 (file)
@@ -44,6 +44,7 @@
 #include <asm/traps.h>
 #include <asm/reboot.h>
 #include <asm/fpu/api.h>
+#include <asm/virt.h>
 
 #include <trace/events/ipi.h>
 
@@ -495,7 +496,7 @@ static inline void kvm_cpu_svm_disable(void)
 
 static void svm_emergency_disable_virtualization_cpu(void)
 {
-       kvm_rebooting = true;
+       virt_rebooting = true;
 
        kvm_cpu_svm_disable();
 }
index 3392bcadfb896c3907e747aecfa77236b8510a05..d47c5c93c991323ee0d7fec7a64a863132f26f35 100644 (file)
@@ -298,16 +298,16 @@ SYM_FUNC_START(__svm_vcpu_run)
        RESTORE_GUEST_SPEC_CTRL_BODY
        RESTORE_HOST_SPEC_CTRL_BODY (%_ASM_SP)
 
-10:    cmpb $0, _ASM_RIP(kvm_rebooting)
+10:    cmpb $0, _ASM_RIP(virt_rebooting)
        jne 2b
        ud2
-30:    cmpb $0, _ASM_RIP(kvm_rebooting)
+30:    cmpb $0, _ASM_RIP(virt_rebooting)
        jne 4b
        ud2
-50:    cmpb $0, _ASM_RIP(kvm_rebooting)
+50:    cmpb $0, _ASM_RIP(virt_rebooting)
        jne 6b
        ud2
-70:    cmpb $0, _ASM_RIP(kvm_rebooting)
+70:    cmpb $0, _ASM_RIP(virt_rebooting)
        jne 8b
        ud2
 
@@ -394,7 +394,7 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run)
        RESTORE_GUEST_SPEC_CTRL_BODY
        RESTORE_HOST_SPEC_CTRL_BODY %sil
 
-3:     cmpb $0, kvm_rebooting(%rip)
+3:     cmpb $0, virt_rebooting(%rip)
        jne 2b
        ud2
 
index c5065f84b78be3ca09cd854611e6c9dc3ec61ae4..f81b562733efe22322f6b2801407b4d1e3c7536d 100644 (file)
@@ -6,6 +6,7 @@
 #include <linux/misc_cgroup.h>
 #include <linux/mmu_context.h>
 #include <asm/tdx.h>
+#include <asm/virt.h>
 #include "capabilities.h"
 #include "mmu.h"
 #include "x86_ops.h"
@@ -1994,7 +1995,7 @@ int tdx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t fastpath)
         * TDX_SEAMCALL_VMFAILINVALID.
         */
        if (unlikely((vp_enter_ret & TDX_SW_ERROR) == TDX_SW_ERROR)) {
-               KVM_BUG_ON(!kvm_rebooting, vcpu->kvm);
+               KVM_BUG_ON(!virt_rebooting, vcpu->kvm);
                goto unhandled_exit;
        }
 
index 4426d34811fce608973779862d86864e52baad8e..8a481dae9cae2738c1331901da512bf0e691f527 100644 (file)
@@ -310,7 +310,7 @@ SYM_INNER_LABEL_ALIGN(vmx_vmexit, SYM_L_GLOBAL)
        RET
 
 .Lfixup:
-       cmpb $0, _ASM_RIP(kvm_rebooting)
+       cmpb $0, _ASM_RIP(virt_rebooting)
        jne .Lvmfail
        ud2
 .Lvmfail:
index 967b58a8ab9d0d47fb24def7b4ae70bfbe9d5ec0..fc6e3b6208661235031a1a1c1599cac0002297b1 100644 (file)
@@ -48,6 +48,7 @@
 #include <asm/msr.h>
 #include <asm/mwait.h>
 #include <asm/spec-ctrl.h>
+#include <asm/virt.h>
 #include <asm/vmx.h>
 
 #include <trace/events/ipi.h>
@@ -814,13 +815,13 @@ void vmx_emergency_disable_virtualization_cpu(void)
        int cpu = raw_smp_processor_id();
        struct loaded_vmcs *v;
 
-       kvm_rebooting = true;
+       virt_rebooting = true;
 
        /*
         * Note, CR4.VMXE can be _cleared_ in NMI context, but it can only be
         * set in task context.  If this races with VMX is disabled by an NMI,
         * VMCLEAR and VMXOFF may #UD, but KVM will eat those faults due to
-        * kvm_rebooting set.
+        * virt_rebooting set.
         */
        if (!(__read_cr4() & X86_CR4_VMXE))
                return;
index 7ac3578e6ec005a5796744a2cc7d6a4331275b3e..91a20fffedc3a197125d5be787ee4c0df10804bb 100644 (file)
@@ -83,6 +83,8 @@
 #include <asm/intel_pt.h>
 #include <asm/emulate_prefix.h>
 #include <asm/sgx.h>
+#include <asm/virt.h>
+
 #include <clocksource/hyperv_timer.h>
 
 #define CREATE_TRACE_POINTS
@@ -700,9 +702,6 @@ static void drop_user_return_notifiers(void)
                kvm_on_user_return(&msrs->urn);
 }
 
-__visible bool kvm_rebooting;
-EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_rebooting);
-
 /*
  * Handle a fault on a hardware virtualization (VMX or SVM) instruction.
  *
@@ -713,7 +712,7 @@ EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_rebooting);
 noinstr void kvm_spurious_fault(void)
 {
        /* Fault while not rebooting.  We want the trace. */
-       BUG_ON(!kvm_rebooting);
+       BUG_ON(!virt_rebooting);
 }
 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_spurious_fault);
 
@@ -13183,16 +13182,16 @@ int kvm_arch_enable_virtualization_cpu(void)
 void kvm_arch_shutdown(void)
 {
        /*
-        * Set kvm_rebooting to indicate that KVM has asynchronously disabled
+        * Set virt_rebooting to indicate that KVM has asynchronously disabled
         * hardware virtualization, i.e. that errors and/or exceptions on SVM
         * and VMX instructions are expected and should be ignored.
         */
-       kvm_rebooting = true;
+       virt_rebooting = true;
 
        /*
-        * Ensure kvm_rebooting is visible before IPIs are sent to other CPUs
+        * Ensure virt_rebooting is visible before IPIs are sent to other CPUs
         * to disable virtualization.  Effectively pairs with the reception of
-        * the IPI (kvm_rebooting is read in task/exception context, but only
+        * the IPI (virt_rebooting is read in task/exception context, but only
         * _needs_ to be read as %true after the IPI function callback disables
         * virtualization).
         */
@@ -13213,7 +13212,7 @@ void kvm_arch_disable_virtualization_cpu(void)
         * disable virtualization arrives.  Handle the extreme edge case here
         * instead of trying to account for it in the normal flows.
         */
-       if (in_task() || WARN_ON_ONCE(!kvm_rebooting))
+       if (in_task() || WARN_ON_ONCE(!virt_rebooting))
                drop_user_return_notifiers();
        else
                __module_get(THIS_MODULE);
index b314649e5c025019ab859ed6f3efc3bb6d8130b2..94d4f07aaaa09e9ac6c401ca18ee9cf504f69d07 100644 (file)
@@ -54,7 +54,6 @@ struct kvm_host_values {
        u64 arch_capabilities;
 };
 
-extern bool kvm_rebooting;
 void kvm_spurious_fault(void);
 
 #define SIZE_OF_MEMSLOTS_HASHTABLE \
index ea343fc392dcc89f0aa2690cebb25fe6fa68aee8..6e485751650cbfc222054f56ecd05c63494608b4 100644 (file)
@@ -1,2 +1,4 @@
 # SPDX-License-Identifier: GPL-2.0-only
 obj-y  += svm/ vmx/
+
+obj-$(subst m,y,$(CONFIG_KVM_X86)) += hw.o
\ No newline at end of file
diff --git a/arch/x86/virt/hw.c b/arch/x86/virt/hw.c
new file mode 100644 (file)
index 0000000..df3dc18
--- /dev/null
@@ -0,0 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/kvm_types.h>
+
+#include <asm/virt.h>
+
+__visible bool virt_rebooting;
+EXPORT_SYMBOL_FOR_KVM(virt_rebooting);