]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
KVM: x86: Add a struct to consolidate host values, e.g. EFER, XCR0, etc...
authorSean Christopherson <seanjc@google.com>
Tue, 23 Apr 2024 22:15:18 +0000 (15:15 -0700)
committerSean Christopherson <seanjc@google.com>
Mon, 3 Jun 2024 15:58:53 +0000 (08:58 -0700)
Add "struct kvm_host_values kvm_host" to hold the various host values
that KVM snapshots during initialization.  Bundling the host values into
a single struct simplifies adding new MSRs and other features with host
state/values that KVM cares about, and provides a one-stop shop.  E.g.
adding a new value requires one line, whereas tracking each value
individual often requires three: declaration, definition, and export.

No functional change intended.

Link: https://lore.kernel.org/r/20240423221521.2923759-2-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/svm/sev.c
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/x86.c
arch/x86/kvm/x86.h

index ece45b3f6f2073ea81ad65b139173411c77b3d73..64e84e9190e645d49b50cd22d85ee56da69c41bc 100644 (file)
@@ -1853,7 +1853,6 @@ struct kvm_arch_async_pf {
 };
 
 extern u32 __read_mostly kvm_nr_uret_msrs;
-extern u64 __read_mostly host_efer;
 extern bool __read_mostly allow_smaller_maxphyaddr;
 extern bool __read_mostly enable_apicv;
 extern struct kvm_x86_ops kvm_x86_ops;
index 0623cfaa7bb0ee9f9ca3ec142e99feb41254533b..0435fab4f5366053ff3447bafb9c7adae4d61a63 100644 (file)
@@ -3324,7 +3324,7 @@ void sev_es_prepare_switch_to_guest(struct vcpu_svm *svm, struct sev_es_save_are
         */
        hostsa->xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
        hostsa->pkru = read_pkru();
-       hostsa->xss = host_xss;
+       hostsa->xss = kvm_host.xss;
 
        /*
         * If DebugSwap is enabled, debug registers are loaded but NOT saved by
index d5b832126e34580088a36091b62f0b5a66b011b7..a896df59eaada52d62e28909f49cb22b77a649d5 100644 (file)
@@ -2422,7 +2422,7 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct loaded_vmcs *vmcs0
        if (cpu_has_load_ia32_efer()) {
                if (guest_efer & EFER_LMA)
                        exec_control |= VM_ENTRY_IA32E_MODE;
-               if (guest_efer != host_efer)
+               if (guest_efer != kvm_host.efer)
                        exec_control |= VM_ENTRY_LOAD_IA32_EFER;
        }
        vm_entry_controls_set(vmx, exec_control);
@@ -2435,7 +2435,7 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct loaded_vmcs *vmcs0
         * bits may be modified by vmx_set_efer() in prepare_vmcs02().
         */
        exec_control = __vm_exit_controls_get(vmcs01);
-       if (cpu_has_load_ia32_efer() && guest_efer != host_efer)
+       if (cpu_has_load_ia32_efer() && guest_efer != kvm_host.efer)
                exec_control |= VM_EXIT_LOAD_IA32_EFER;
        else
                exec_control &= ~VM_EXIT_LOAD_IA32_EFER;
@@ -4662,7 +4662,7 @@ static inline u64 nested_vmx_get_vmcs01_guest_efer(struct vcpu_vmx *vmx)
                return vmcs_read64(GUEST_IA32_EFER);
 
        if (cpu_has_load_ia32_efer())
-               return host_efer;
+               return kvm_host.efer;
 
        for (i = 0; i < vmx->msr_autoload.guest.nr; ++i) {
                if (vmx->msr_autoload.guest.val[i].index == MSR_EFER)
@@ -4673,7 +4673,7 @@ static inline u64 nested_vmx_get_vmcs01_guest_efer(struct vcpu_vmx *vmx)
        if (efer_msr)
                return efer_msr->data;
 
-       return host_efer;
+       return kvm_host.efer;
 }
 
 static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu)
index 6051fad5945fa08f9a348fc380799ead2d415991..db4bc6f6c5a087dad807b6558d027927ebd7d11f 100644 (file)
@@ -259,7 +259,7 @@ static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf)
                return 0;
        }
 
-       if (host_arch_capabilities & ARCH_CAP_SKIP_VMENTRY_L1DFLUSH) {
+       if (kvm_host.arch_capabilities & ARCH_CAP_SKIP_VMENTRY_L1DFLUSH) {
                l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_NOT_REQUIRED;
                return 0;
        }
@@ -404,7 +404,7 @@ static void vmx_update_fb_clear_dis(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
         * and VM-Exit.
         */
        vmx->disable_fb_clear = !cpu_feature_enabled(X86_FEATURE_CLEAR_CPU_BUF) &&
-                               (host_arch_capabilities & ARCH_CAP_FB_CLEAR_CTRL) &&
+                               (kvm_host.arch_capabilities & ARCH_CAP_FB_CLEAR_CTRL) &&
                                !boot_cpu_has_bug(X86_BUG_MDS) &&
                                !boot_cpu_has_bug(X86_BUG_TAA);
 
@@ -1123,12 +1123,12 @@ static bool update_transition_efer(struct vcpu_vmx *vmx)
         * atomically, since it's faster than switching it manually.
         */
        if (cpu_has_load_ia32_efer() ||
-           (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX))) {
+           (enable_ept && ((vmx->vcpu.arch.efer ^ kvm_host.efer) & EFER_NX))) {
                if (!(guest_efer & EFER_LMA))
                        guest_efer &= ~EFER_LME;
-               if (guest_efer != host_efer)
+               if (guest_efer != kvm_host.efer)
                        add_atomic_switch_msr(vmx, MSR_EFER,
-                                             guest_efer, host_efer, false);
+                                             guest_efer, kvm_host.efer, false);
                else
                        clear_atomic_switch_msr(vmx, MSR_EFER);
                return false;
@@ -1141,7 +1141,7 @@ static bool update_transition_efer(struct vcpu_vmx *vmx)
        clear_atomic_switch_msr(vmx, MSR_EFER);
 
        guest_efer &= ~ignore_bits;
-       guest_efer |= host_efer & ignore_bits;
+       guest_efer |= kvm_host.efer & ignore_bits;
 
        vmx->guest_uret_msrs[i].data = guest_efer;
        vmx->guest_uret_msrs[i].mask = ~ignore_bits;
@@ -4357,7 +4357,7 @@ void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
        }
 
        if (cpu_has_load_ia32_efer())
-               vmcs_write64(HOST_IA32_EFER, host_efer);
+               vmcs_write64(HOST_IA32_EFER, kvm_host.efer);
 }
 
 void set_cr4_guest_host_mask(struct vcpu_vmx *vmx)
index 082ac6d95a3a08160d54ec832d0c075c0bd71489..86a0c2d04eaec00d6ab00ef7644f1866b102258f 100644 (file)
 struct kvm_caps kvm_caps __read_mostly;
 EXPORT_SYMBOL_GPL(kvm_caps);
 
+struct kvm_host_values kvm_host __read_mostly;
+EXPORT_SYMBOL_GPL(kvm_host);
+
 #define  ERR_PTR_USR(e)  ((void __user *)ERR_PTR(e))
 
 #define emul_to_vcpu(ctxt) \
@@ -229,21 +232,12 @@ static struct kvm_user_return_msrs __percpu *user_return_msrs;
                                | XFEATURE_MASK_BNDCSR | XFEATURE_MASK_AVX512 \
                                | XFEATURE_MASK_PKRU | XFEATURE_MASK_XTILE)
 
-u64 __read_mostly host_efer;
-EXPORT_SYMBOL_GPL(host_efer);
-
 bool __read_mostly allow_smaller_maxphyaddr = 0;
 EXPORT_SYMBOL_GPL(allow_smaller_maxphyaddr);
 
 bool __read_mostly enable_apicv = true;
 EXPORT_SYMBOL_GPL(enable_apicv);
 
-u64 __read_mostly host_xss;
-EXPORT_SYMBOL_GPL(host_xss);
-
-u64 __read_mostly host_arch_capabilities;
-EXPORT_SYMBOL_GPL(host_arch_capabilities);
-
 const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
        KVM_GENERIC_VM_STATS(),
        STATS_DESC_COUNTER(VM, mmu_shadow_zapped),
@@ -317,8 +311,6 @@ const struct kvm_stats_header kvm_vcpu_stats_header = {
                       sizeof(kvm_vcpu_stats_desc),
 };
 
-u64 __read_mostly host_xcr0;
-
 static struct kmem_cache *x86_emulator_cache;
 
 /*
@@ -1025,11 +1017,11 @@ void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu)
 
        if (kvm_is_cr4_bit_set(vcpu, X86_CR4_OSXSAVE)) {
 
-               if (vcpu->arch.xcr0 != host_xcr0)
+               if (vcpu->arch.xcr0 != kvm_host.xcr0)
                        xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
 
                if (guest_can_use(vcpu, X86_FEATURE_XSAVES) &&
-                   vcpu->arch.ia32_xss != host_xss)
+                   vcpu->arch.ia32_xss != kvm_host.xss)
                        wrmsrl(MSR_IA32_XSS, vcpu->arch.ia32_xss);
        }
 
@@ -1056,12 +1048,12 @@ void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu)
 
        if (kvm_is_cr4_bit_set(vcpu, X86_CR4_OSXSAVE)) {
 
-               if (vcpu->arch.xcr0 != host_xcr0)
-                       xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0);
+               if (vcpu->arch.xcr0 != kvm_host.xcr0)
+                       xsetbv(XCR_XFEATURE_ENABLED_MASK, kvm_host.xcr0);
 
                if (guest_can_use(vcpu, X86_FEATURE_XSAVES) &&
-                   vcpu->arch.ia32_xss != host_xss)
-                       wrmsrl(MSR_IA32_XSS, host_xss);
+                   vcpu->arch.ia32_xss != kvm_host.xss)
+                       wrmsrl(MSR_IA32_XSS, kvm_host.xss);
        }
 
 }
@@ -1628,7 +1620,7 @@ static bool kvm_is_immutable_feature_msr(u32 msr)
 
 static u64 kvm_get_arch_capabilities(void)
 {
-       u64 data = host_arch_capabilities & KVM_SUPPORTED_ARCH_CAP;
+       u64 data = kvm_host.arch_capabilities & KVM_SUPPORTED_ARCH_CAP;
 
        /*
         * If nx_huge_pages is enabled, KVM's shadow paging will ensure that
@@ -9781,19 +9773,19 @@ int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops)
        kvm_caps.supported_mce_cap = MCG_CTL_P | MCG_SER_P;
 
        if (boot_cpu_has(X86_FEATURE_XSAVE)) {
-               host_xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
-               kvm_caps.supported_xcr0 = host_xcr0 & KVM_SUPPORTED_XCR0;
+               kvm_host.xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
+               kvm_caps.supported_xcr0 = kvm_host.xcr0 & KVM_SUPPORTED_XCR0;
        }
 
-       rdmsrl_safe(MSR_EFER, &host_efer);
+       rdmsrl_safe(MSR_EFER, &kvm_host.efer);
 
        if (boot_cpu_has(X86_FEATURE_XSAVES))
-               rdmsrl(MSR_IA32_XSS, host_xss);
+               rdmsrl(MSR_IA32_XSS, kvm_host.xss);
 
        kvm_init_pmu_capability(ops->pmu_ops);
 
        if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES))
-               rdmsrl(MSR_IA32_ARCH_CAPABILITIES, host_arch_capabilities);
+               rdmsrl(MSR_IA32_ARCH_CAPABILITIES, kvm_host.arch_capabilities);
 
        r = ops->hardware_setup();
        if (r != 0)
index d80a4c6b5a3882a26a87ef8249c56a7ece4d179b..e69fff7d1f210c1441d69d36f594dac2b1caac69 100644 (file)
@@ -33,6 +33,13 @@ struct kvm_caps {
        u64 supported_perf_cap;
 };
 
+struct kvm_host_values {
+       u64 efer;
+       u64 xcr0;
+       u64 xss;
+       u64 arch_capabilities;
+};
+
 void kvm_spurious_fault(void);
 
 #define KVM_NESTED_VMENTER_CONSISTENCY_CHECK(consistency_check)                \
@@ -325,11 +332,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
                            int emulation_type, void *insn, int insn_len);
 fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu);
 
-extern u64 host_xcr0;
-extern u64 host_xss;
-extern u64 host_arch_capabilities;
-
 extern struct kvm_caps kvm_caps;
+extern struct kvm_host_values kvm_host;
 
 extern bool enable_pmu;