Move "kvm_rebooting" to the kernel, exported for KVM, as one of many steps
towards extracting the innermost VMXON and EFER.SVME management logic out
of KVM and into to core x86.
For lack of a better name, call the new file "hw.c", to yield "virt
hardware" when combined with its parent directory.
No functional change intended.
Tested-by: Chao Gao <chao.gao@intel.com>
Reviewed-by: Dan Williams <dan.j.williams@intel.com>
Tested-by: Sagi Shahar <sagis@google.com>
Link: https://patch.msgid.link/20260214012702.2368778-4-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _ASM_X86_VIRT_H
+#define _ASM_X86_VIRT_H
+
+#include <linux/types.h>
+
+#if IS_ENABLED(CONFIG_KVM_X86)
+extern bool virt_rebooting;
+#endif
+
+#endif /* _ASM_X86_VIRT_H */
#include <asm/traps.h>
#include <asm/reboot.h>
#include <asm/fpu/api.h>
+#include <asm/virt.h>
#include <trace/events/ipi.h>
static void svm_emergency_disable_virtualization_cpu(void)
{
- kvm_rebooting = true;
+ virt_rebooting = true;
kvm_cpu_svm_disable();
}
RESTORE_GUEST_SPEC_CTRL_BODY
RESTORE_HOST_SPEC_CTRL_BODY (%_ASM_SP)
-10: cmpb $0, _ASM_RIP(kvm_rebooting)
+10: cmpb $0, _ASM_RIP(virt_rebooting)
jne 2b
ud2
-30: cmpb $0, _ASM_RIP(kvm_rebooting)
+30: cmpb $0, _ASM_RIP(virt_rebooting)
jne 4b
ud2
-50: cmpb $0, _ASM_RIP(kvm_rebooting)
+50: cmpb $0, _ASM_RIP(virt_rebooting)
jne 6b
ud2
-70: cmpb $0, _ASM_RIP(kvm_rebooting)
+70: cmpb $0, _ASM_RIP(virt_rebooting)
jne 8b
ud2
RESTORE_GUEST_SPEC_CTRL_BODY
RESTORE_HOST_SPEC_CTRL_BODY %sil
-3: cmpb $0, kvm_rebooting(%rip)
+3: cmpb $0, virt_rebooting(%rip)
jne 2b
ud2
#include <linux/misc_cgroup.h>
#include <linux/mmu_context.h>
#include <asm/tdx.h>
+#include <asm/virt.h>
#include "capabilities.h"
#include "mmu.h"
#include "x86_ops.h"
* TDX_SEAMCALL_VMFAILINVALID.
*/
if (unlikely((vp_enter_ret & TDX_SW_ERROR) == TDX_SW_ERROR)) {
- KVM_BUG_ON(!kvm_rebooting, vcpu->kvm);
+ KVM_BUG_ON(!virt_rebooting, vcpu->kvm);
goto unhandled_exit;
}
RET
.Lfixup:
- cmpb $0, _ASM_RIP(kvm_rebooting)
+ cmpb $0, _ASM_RIP(virt_rebooting)
jne .Lvmfail
ud2
.Lvmfail:
#include <asm/msr.h>
#include <asm/mwait.h>
#include <asm/spec-ctrl.h>
+#include <asm/virt.h>
#include <asm/vmx.h>
#include <trace/events/ipi.h>
int cpu = raw_smp_processor_id();
struct loaded_vmcs *v;
- kvm_rebooting = true;
+ virt_rebooting = true;
/*
* Note, CR4.VMXE can be _cleared_ in NMI context, but it can only be
* set in task context. If this races with VMX is disabled by an NMI,
* VMCLEAR and VMXOFF may #UD, but KVM will eat those faults due to
- * kvm_rebooting set.
+ * virt_rebooting set.
*/
if (!(__read_cr4() & X86_CR4_VMXE))
return;
#include <asm/intel_pt.h>
#include <asm/emulate_prefix.h>
#include <asm/sgx.h>
+#include <asm/virt.h>
+
#include <clocksource/hyperv_timer.h>
#define CREATE_TRACE_POINTS
kvm_on_user_return(&msrs->urn);
}
-__visible bool kvm_rebooting;
-EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_rebooting);
-
/*
* Handle a fault on a hardware virtualization (VMX or SVM) instruction.
*
noinstr void kvm_spurious_fault(void)
{
/* Fault while not rebooting. We want the trace. */
- BUG_ON(!kvm_rebooting);
+ BUG_ON(!virt_rebooting);
}
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_spurious_fault);
void kvm_arch_shutdown(void)
{
/*
- * Set kvm_rebooting to indicate that KVM has asynchronously disabled
+ * Set virt_rebooting to indicate that KVM has asynchronously disabled
* hardware virtualization, i.e. that errors and/or exceptions on SVM
* and VMX instructions are expected and should be ignored.
*/
- kvm_rebooting = true;
+ virt_rebooting = true;
/*
- * Ensure kvm_rebooting is visible before IPIs are sent to other CPUs
+ * Ensure virt_rebooting is visible before IPIs are sent to other CPUs
* to disable virtualization. Effectively pairs with the reception of
- * the IPI (kvm_rebooting is read in task/exception context, but only
+ * the IPI (virt_rebooting is read in task/exception context, but only
* _needs_ to be read as %true after the IPI function callback disables
* virtualization).
*/
* disable virtualization arrives. Handle the extreme edge case here
* instead of trying to account for it in the normal flows.
*/
- if (in_task() || WARN_ON_ONCE(!kvm_rebooting))
+ if (in_task() || WARN_ON_ONCE(!virt_rebooting))
drop_user_return_notifiers();
else
__module_get(THIS_MODULE);
u64 arch_capabilities;
};
-extern bool kvm_rebooting;
void kvm_spurious_fault(void);
#define SIZE_OF_MEMSLOTS_HASHTABLE \
# SPDX-License-Identifier: GPL-2.0-only
obj-y += svm/ vmx/
+
+obj-$(subst m,y,$(CONFIG_KVM_X86)) += hw.o
\ No newline at end of file
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/kvm_types.h>
+
+#include <asm/virt.h>
+
+__visible bool virt_rebooting;
+EXPORT_SYMBOL_FOR_KVM(virt_rebooting);