]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
KVM: SVM: Move core EFER.SVME enablement to kernel
authorSean Christopherson <seanjc@google.com>
Sat, 14 Feb 2026 01:26:53 +0000 (17:26 -0800)
committerSean Christopherson <seanjc@google.com>
Wed, 4 Mar 2026 16:52:45 +0000 (08:52 -0800)
Move the innermost EFER.SVME logic out of KVM and into to core x86 to land
the SVM support alongside VMX support.  This will allow providing a more
unified API from the kernel to KVM, and will allow moving the bulk of the
emergency disabling insanity out of KVM without having a weird split
between kernel and KVM for SVM vs. VMX.

No functional change intended.

Tested-by: Chao Gao <chao.gao@intel.com>
Reviewed-by: Dan Williams <dan.j.williams@intel.com>
Tested-by: Sagi Shahar <sagis@google.com>
Link: https://patch.msgid.link/20260214012702.2368778-8-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/include/asm/virt.h
arch/x86/kvm/svm/svm.c
arch/x86/virt/hw.c

index cca0210a5c16412aabf933e912078bd3c79bc488..9a0753eaa20ce3179c4c3001a3152e14d14d6c02 100644 (file)
@@ -15,6 +15,12 @@ int x86_vmx_disable_virtualization_cpu(void);
 void x86_vmx_emergency_disable_virtualization_cpu(void);
 #endif
 
+#if IS_ENABLED(CONFIG_KVM_AMD)
+int x86_svm_enable_virtualization_cpu(void);
+int x86_svm_disable_virtualization_cpu(void);
+void x86_svm_emergency_disable_virtualization_cpu(void);
+#endif
+
 #else
 static __always_inline void x86_virt_init(void) {}
 #endif
index 0ae66c770ebc681e8f466c25bf276aa60cfefd43..fc08450cb4b7803917232ec9c59139e195d177aa 100644 (file)
@@ -478,27 +478,9 @@ static __always_inline struct sev_es_save_area *sev_es_host_save_area(struct svm
        return &sd->save_area->host_sev_es_save;
 }
 
-static inline void kvm_cpu_svm_disable(void)
-{
-       uint64_t efer;
-
-       wrmsrq(MSR_VM_HSAVE_PA, 0);
-       rdmsrq(MSR_EFER, efer);
-       if (efer & EFER_SVME) {
-               /*
-                * Force GIF=1 prior to disabling SVM, e.g. to ensure INIT and
-                * NMI aren't blocked.
-                */
-               stgi();
-               wrmsrq(MSR_EFER, efer & ~EFER_SVME);
-       }
-}
-
 static void svm_emergency_disable_virtualization_cpu(void)
 {
-       virt_rebooting = true;
-
-       kvm_cpu_svm_disable();
+       wrmsrq(MSR_VM_HSAVE_PA, 0);
 }
 
 static void svm_disable_virtualization_cpu(void)
@@ -507,7 +489,8 @@ static void svm_disable_virtualization_cpu(void)
        if (tsc_scaling)
                __svm_write_tsc_multiplier(SVM_TSC_RATIO_DEFAULT);
 
-       kvm_cpu_svm_disable();
+       x86_svm_disable_virtualization_cpu();
+       wrmsrq(MSR_VM_HSAVE_PA, 0);
 
        amd_pmu_disable_virt();
 }
@@ -516,12 +499,12 @@ static int svm_enable_virtualization_cpu(void)
 {
 
        struct svm_cpu_data *sd;
-       uint64_t efer;
        int me = raw_smp_processor_id();
+       int r;
 
-       rdmsrq(MSR_EFER, efer);
-       if (efer & EFER_SVME)
-               return -EBUSY;
+       r = x86_svm_enable_virtualization_cpu();
+       if (r)
+               return r;
 
        sd = per_cpu_ptr(&svm_data, me);
        sd->asid_generation = 1;
@@ -529,8 +512,6 @@ static int svm_enable_virtualization_cpu(void)
        sd->next_asid = sd->max_asid + 1;
        sd->min_asid = max_sev_asid + 1;
 
-       wrmsrq(MSR_EFER, efer | EFER_SVME);
-
        wrmsrq(MSR_VM_HSAVE_PA, sd->save_area_pa);
 
        if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
@@ -541,7 +522,6 @@ static int svm_enable_virtualization_cpu(void)
                __svm_write_tsc_multiplier(SVM_TSC_RATIO_DEFAULT);
        }
 
-
        /*
         * Get OSVW bits.
         *
index dc426c2bc24adfd3b238e0b78687cad004e796db..014e9dfab805bb434705db4ef496c486863fec49 100644 (file)
@@ -163,6 +163,59 @@ static __init int x86_vmx_init(void)
 static __init int x86_vmx_init(void) { return -EOPNOTSUPP; }
 #endif
 
+#if IS_ENABLED(CONFIG_KVM_AMD)
+int x86_svm_enable_virtualization_cpu(void)
+{
+       u64 efer;
+
+       if (!cpu_feature_enabled(X86_FEATURE_SVM))
+               return -EOPNOTSUPP;
+
+       rdmsrq(MSR_EFER, efer);
+       if (efer & EFER_SVME)
+               return -EBUSY;
+
+       wrmsrq(MSR_EFER, efer | EFER_SVME);
+       return 0;
+}
+EXPORT_SYMBOL_FOR_KVM(x86_svm_enable_virtualization_cpu);
+
+int x86_svm_disable_virtualization_cpu(void)
+{
+       int r = -EIO;
+       u64 efer;
+
+       /*
+        * Force GIF=1 prior to disabling SVM, e.g. to ensure INIT and
+        * NMI aren't blocked.
+        */
+       asm goto("1: stgi\n\t"
+                _ASM_EXTABLE(1b, %l[fault])
+                ::: "memory" : fault);
+       r = 0;
+
+fault:
+       rdmsrq(MSR_EFER, efer);
+       wrmsrq(MSR_EFER, efer & ~EFER_SVME);
+       return r;
+}
+EXPORT_SYMBOL_FOR_KVM(x86_svm_disable_virtualization_cpu);
+
+void x86_svm_emergency_disable_virtualization_cpu(void)
+{
+       u64 efer;
+
+       virt_rebooting = true;
+
+       rdmsrq(MSR_EFER, efer);
+       if (!(efer & EFER_SVME))
+               return;
+
+       x86_svm_disable_virtualization_cpu();
+}
+EXPORT_SYMBOL_FOR_KVM(x86_svm_emergency_disable_virtualization_cpu);
+#endif
+
 void __init x86_virt_init(void)
 {
        x86_vmx_init();