From: Sean Christopherson Date: Sat, 14 Feb 2026 01:26:53 +0000 (-0800) Subject: KVM: SVM: Move core EFER.SVME enablement to kernel X-Git-Tag: v7.1-rc1~118^2~3^2~10 X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=32d76cdfa1222c88262da5b12e0b2bba444c96fa;p=thirdparty%2Fkernel%2Flinux.git KVM: SVM: Move core EFER.SVME enablement to kernel Move the innermost EFER.SVME logic out of KVM and into to core x86 to land the SVM support alongside VMX support. This will allow providing a more unified API from the kernel to KVM, and will allow moving the bulk of the emergency disabling insanity out of KVM without having a weird split between kernel and KVM for SVM vs. VMX. No functional change intended. Tested-by: Chao Gao Reviewed-by: Dan Williams Tested-by: Sagi Shahar Link: https://patch.msgid.link/20260214012702.2368778-8-seanjc@google.com Signed-off-by: Sean Christopherson --- diff --git a/arch/x86/include/asm/virt.h b/arch/x86/include/asm/virt.h index cca0210a5c164..9a0753eaa20ce 100644 --- a/arch/x86/include/asm/virt.h +++ b/arch/x86/include/asm/virt.h @@ -15,6 +15,12 @@ int x86_vmx_disable_virtualization_cpu(void); void x86_vmx_emergency_disable_virtualization_cpu(void); #endif +#if IS_ENABLED(CONFIG_KVM_AMD) +int x86_svm_enable_virtualization_cpu(void); +int x86_svm_disable_virtualization_cpu(void); +void x86_svm_emergency_disable_virtualization_cpu(void); +#endif + #else static __always_inline void x86_virt_init(void) {} #endif diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 0ae66c770ebc6..fc08450cb4b78 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -478,27 +478,9 @@ static __always_inline struct sev_es_save_area *sev_es_host_save_area(struct svm return &sd->save_area->host_sev_es_save; } -static inline void kvm_cpu_svm_disable(void) -{ - uint64_t efer; - - wrmsrq(MSR_VM_HSAVE_PA, 0); - rdmsrq(MSR_EFER, efer); - if (efer & EFER_SVME) { - /* - * Force GIF=1 prior to disabling SVM, e.g. to ensure INIT and - * NMI aren't blocked. - */ - stgi(); - wrmsrq(MSR_EFER, efer & ~EFER_SVME); - } -} - static void svm_emergency_disable_virtualization_cpu(void) { - virt_rebooting = true; - - kvm_cpu_svm_disable(); + wrmsrq(MSR_VM_HSAVE_PA, 0); } static void svm_disable_virtualization_cpu(void) @@ -507,7 +489,8 @@ static void svm_disable_virtualization_cpu(void) if (tsc_scaling) __svm_write_tsc_multiplier(SVM_TSC_RATIO_DEFAULT); - kvm_cpu_svm_disable(); + x86_svm_disable_virtualization_cpu(); + wrmsrq(MSR_VM_HSAVE_PA, 0); amd_pmu_disable_virt(); } @@ -516,12 +499,12 @@ static int svm_enable_virtualization_cpu(void) { struct svm_cpu_data *sd; - uint64_t efer; int me = raw_smp_processor_id(); + int r; - rdmsrq(MSR_EFER, efer); - if (efer & EFER_SVME) - return -EBUSY; + r = x86_svm_enable_virtualization_cpu(); + if (r) + return r; sd = per_cpu_ptr(&svm_data, me); sd->asid_generation = 1; @@ -529,8 +512,6 @@ static int svm_enable_virtualization_cpu(void) sd->next_asid = sd->max_asid + 1; sd->min_asid = max_sev_asid + 1; - wrmsrq(MSR_EFER, efer | EFER_SVME); - wrmsrq(MSR_VM_HSAVE_PA, sd->save_area_pa); if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) { @@ -541,7 +522,6 @@ static int svm_enable_virtualization_cpu(void) __svm_write_tsc_multiplier(SVM_TSC_RATIO_DEFAULT); } - /* * Get OSVW bits. * diff --git a/arch/x86/virt/hw.c b/arch/x86/virt/hw.c index dc426c2bc24ad..014e9dfab805b 100644 --- a/arch/x86/virt/hw.c +++ b/arch/x86/virt/hw.c @@ -163,6 +163,59 @@ static __init int x86_vmx_init(void) static __init int x86_vmx_init(void) { return -EOPNOTSUPP; } #endif +#if IS_ENABLED(CONFIG_KVM_AMD) +int x86_svm_enable_virtualization_cpu(void) +{ + u64 efer; + + if (!cpu_feature_enabled(X86_FEATURE_SVM)) + return -EOPNOTSUPP; + + rdmsrq(MSR_EFER, efer); + if (efer & EFER_SVME) + return -EBUSY; + + wrmsrq(MSR_EFER, efer | EFER_SVME); + return 0; +} +EXPORT_SYMBOL_FOR_KVM(x86_svm_enable_virtualization_cpu); + +int x86_svm_disable_virtualization_cpu(void) +{ + int r = -EIO; + u64 efer; + + /* + * Force GIF=1 prior to disabling SVM, e.g. to ensure INIT and + * NMI aren't blocked. + */ + asm goto("1: stgi\n\t" + _ASM_EXTABLE(1b, %l[fault]) + ::: "memory" : fault); + r = 0; + +fault: + rdmsrq(MSR_EFER, efer); + wrmsrq(MSR_EFER, efer & ~EFER_SVME); + return r; +} +EXPORT_SYMBOL_FOR_KVM(x86_svm_disable_virtualization_cpu); + +void x86_svm_emergency_disable_virtualization_cpu(void) +{ + u64 efer; + + virt_rebooting = true; + + rdmsrq(MSR_EFER, efer); + if (!(efer & EFER_SVME)) + return; + + x86_svm_disable_virtualization_cpu(); +} +EXPORT_SYMBOL_FOR_KVM(x86_svm_emergency_disable_virtualization_cpu); +#endif + void __init x86_virt_init(void) { x86_vmx_init();