]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
KVM: x86: Rename "governed features" helpers to use "guest_cpu_cap"
authorSean Christopherson <seanjc@google.com>
Thu, 28 Nov 2024 01:34:06 +0000 (17:34 -0800)
committerSean Christopherson <seanjc@google.com>
Wed, 18 Dec 2024 22:20:03 +0000 (14:20 -0800)
As the first step toward replacing KVM's so-called "governed features"
framework with a more comprehensive, less poorly named implementation,
replace the "kvm_governed_feature" function prefix with "guest_cpu_cap"
and rename guest_can_use() to guest_cpu_cap_has().

The "guest_cpu_cap" naming scheme mirrors that of "kvm_cpu_cap", and
provides a more clear distinction between guest capabilities, which are
KVM controlled (heh, or one might say "governed"), and guest CPUID, which
with few exceptions is fully userspace controlled.

Opportunistically rewrite the comment about XSS passthrough for SEV-ES
guests to avoid referencing so many functions, as such comments are prone
to becoming stale (case in point...).

No functional change intended.

Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
Reviewed-by: Binbin Wu <binbin.wu@linux.intel.com>
Link: https://lore.kernel.org/r/20241128013424.4096668-40-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/kvm/cpuid.c
arch/x86/kvm/cpuid.h
arch/x86/kvm/mmu.h
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/svm/nested.c
arch/x86/kvm/svm/sev.c
arch/x86/kvm/svm/svm.c
arch/x86/kvm/svm/svm.h
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/x86.c

index 202ab3f55f53e38d110ebd8d736f3c0fa7ec76cc..42d05bb73e5b5e2632cf1d581ed0ac70236c93b2 100644 (file)
@@ -380,7 +380,7 @@ void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
        allow_gbpages = tdp_enabled ? boot_cpu_has(X86_FEATURE_GBPAGES) :
                                      guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES);
        if (allow_gbpages)
-               kvm_governed_feature_set(vcpu, X86_FEATURE_GBPAGES);
+               guest_cpu_cap_set(vcpu, X86_FEATURE_GBPAGES);
 
        best = kvm_find_cpuid_entry(vcpu, 1);
        if (best && apic) {
index 99d4f624561082a86442a7ac09a20eede742e114..8fe0c1772f19cbd1054bd6a3f29df3aa90064406 100644 (file)
@@ -238,8 +238,8 @@ static __always_inline bool kvm_is_governed_feature(unsigned int x86_feature)
        return kvm_governed_feature_index(x86_feature) >= 0;
 }
 
-static __always_inline void kvm_governed_feature_set(struct kvm_vcpu *vcpu,
-                                                    unsigned int x86_feature)
+static __always_inline void guest_cpu_cap_set(struct kvm_vcpu *vcpu,
+                                             unsigned int x86_feature)
 {
        BUILD_BUG_ON(!kvm_is_governed_feature(x86_feature));
 
@@ -247,15 +247,15 @@ static __always_inline void kvm_governed_feature_set(struct kvm_vcpu *vcpu,
                  vcpu->arch.governed_features.enabled);
 }
 
-static __always_inline void kvm_governed_feature_check_and_set(struct kvm_vcpu *vcpu,
-                                                              unsigned int x86_feature)
+static __always_inline void guest_cpu_cap_check_and_set(struct kvm_vcpu *vcpu,
+                                                       unsigned int x86_feature)
 {
        if (kvm_cpu_cap_has(x86_feature) && guest_cpuid_has(vcpu, x86_feature))
-               kvm_governed_feature_set(vcpu, x86_feature);
+               guest_cpu_cap_set(vcpu, x86_feature);
 }
 
-static __always_inline bool guest_can_use(struct kvm_vcpu *vcpu,
-                                         unsigned int x86_feature)
+static __always_inline bool guest_cpu_cap_has(struct kvm_vcpu *vcpu,
+                                             unsigned int x86_feature)
 {
        BUILD_BUG_ON(!kvm_is_governed_feature(x86_feature));
 
@@ -265,7 +265,7 @@ static __always_inline bool guest_can_use(struct kvm_vcpu *vcpu,
 
 static inline bool kvm_vcpu_is_legal_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
 {
-       if (guest_can_use(vcpu, X86_FEATURE_LAM))
+       if (guest_cpu_cap_has(vcpu, X86_FEATURE_LAM))
                cr3 &= ~(X86_CR3_LAM_U48 | X86_CR3_LAM_U57);
 
        return kvm_vcpu_is_legal_gpa(vcpu, cr3);
index e9322358678b672944497c7b7a8f1e7e9121a31c..caec3d11638d3b1849df7fff9cc7dcd4cb549371 100644 (file)
@@ -126,7 +126,7 @@ static inline unsigned long kvm_get_active_pcid(struct kvm_vcpu *vcpu)
 
 static inline unsigned long kvm_get_active_cr3_lam_bits(struct kvm_vcpu *vcpu)
 {
-       if (!guest_can_use(vcpu, X86_FEATURE_LAM))
+       if (!guest_cpu_cap_has(vcpu, X86_FEATURE_LAM))
                return 0;
 
        return kvm_read_cr3(vcpu) & (X86_CR3_LAM_U48 | X86_CR3_LAM_U57);
index 22e7ad235123136faf054138d4df3df40de844da..d138560a9320a0e1511206424d9bdee643a46819 100644 (file)
@@ -5034,7 +5034,7 @@ static void reset_guest_rsvds_bits_mask(struct kvm_vcpu *vcpu,
        __reset_rsvds_bits_mask(&context->guest_rsvd_check,
                                vcpu->arch.reserved_gpa_bits,
                                context->cpu_role.base.level, is_efer_nx(context),
-                               guest_can_use(vcpu, X86_FEATURE_GBPAGES),
+                               guest_cpu_cap_has(vcpu, X86_FEATURE_GBPAGES),
                                is_cr4_pse(context),
                                guest_cpuid_is_amd_compatible(vcpu));
 }
@@ -5111,7 +5111,7 @@ static void reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
        __reset_rsvds_bits_mask(shadow_zero_check, reserved_hpa_bits(),
                                context->root_role.level,
                                context->root_role.efer_nx,
-                               guest_can_use(vcpu, X86_FEATURE_GBPAGES),
+                               guest_cpu_cap_has(vcpu, X86_FEATURE_GBPAGES),
                                is_pse, is_amd);
 
        if (!shadow_me_mask)
index b708bdf7eaffd233af174c4f3740716306919c02..d77b094d9a4d60985bde7c0d2b7b93264edc4d59 100644 (file)
@@ -111,7 +111,7 @@ static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
 
 static bool nested_vmcb_needs_vls_intercept(struct vcpu_svm *svm)
 {
-       if (!guest_can_use(&svm->vcpu, X86_FEATURE_V_VMSAVE_VMLOAD))
+       if (!guest_cpu_cap_has(&svm->vcpu, X86_FEATURE_V_VMSAVE_VMLOAD))
                return true;
 
        if (!nested_npt_enabled(svm))
@@ -594,7 +594,7 @@ static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12
                vmcb_mark_dirty(vmcb02, VMCB_DR);
        }
 
-       if (unlikely(guest_can_use(vcpu, X86_FEATURE_LBRV) &&
+       if (unlikely(guest_cpu_cap_has(vcpu, X86_FEATURE_LBRV) &&
                     (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))) {
                /*
                 * Reserved bits of DEBUGCTL are ignored.  Be consistent with
@@ -651,7 +651,7 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm,
         * exit_int_info, exit_int_info_err, next_rip, insn_len, insn_bytes.
         */
 
-       if (guest_can_use(vcpu, X86_FEATURE_VGIF) &&
+       if (guest_cpu_cap_has(vcpu, X86_FEATURE_VGIF) &&
            (svm->nested.ctl.int_ctl & V_GIF_ENABLE_MASK))
                int_ctl_vmcb12_bits |= (V_GIF_MASK | V_GIF_ENABLE_MASK);
        else
@@ -689,7 +689,7 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm,
 
        vmcb02->control.tsc_offset = vcpu->arch.tsc_offset;
 
-       if (guest_can_use(vcpu, X86_FEATURE_TSCRATEMSR) &&
+       if (guest_cpu_cap_has(vcpu, X86_FEATURE_TSCRATEMSR) &&
            svm->tsc_ratio_msr != kvm_caps.default_tsc_scaling_ratio)
                nested_svm_update_tsc_ratio_msr(vcpu);
 
@@ -710,7 +710,7 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm,
         * what a nrips=0 CPU would do (L1 is responsible for advancing RIP
         * prior to injecting the event).
         */
-       if (guest_can_use(vcpu, X86_FEATURE_NRIPS))
+       if (guest_cpu_cap_has(vcpu, X86_FEATURE_NRIPS))
                vmcb02->control.next_rip    = svm->nested.ctl.next_rip;
        else if (boot_cpu_has(X86_FEATURE_NRIPS))
                vmcb02->control.next_rip    = vmcb12_rip;
@@ -720,7 +720,7 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm,
                svm->soft_int_injected = true;
                svm->soft_int_csbase = vmcb12_csbase;
                svm->soft_int_old_rip = vmcb12_rip;
-               if (guest_can_use(vcpu, X86_FEATURE_NRIPS))
+               if (guest_cpu_cap_has(vcpu, X86_FEATURE_NRIPS))
                        svm->soft_int_next_rip = svm->nested.ctl.next_rip;
                else
                        svm->soft_int_next_rip = vmcb12_rip;
@@ -728,18 +728,18 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm,
 
        vmcb02->control.virt_ext            = vmcb01->control.virt_ext &
                                              LBR_CTL_ENABLE_MASK;
-       if (guest_can_use(vcpu, X86_FEATURE_LBRV))
+       if (guest_cpu_cap_has(vcpu, X86_FEATURE_LBRV))
                vmcb02->control.virt_ext  |=
                        (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK);
 
        if (!nested_vmcb_needs_vls_intercept(svm))
                vmcb02->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
 
-       if (guest_can_use(vcpu, X86_FEATURE_PAUSEFILTER))
+       if (guest_cpu_cap_has(vcpu, X86_FEATURE_PAUSEFILTER))
                pause_count12 = svm->nested.ctl.pause_filter_count;
        else
                pause_count12 = 0;
-       if (guest_can_use(vcpu, X86_FEATURE_PFTHRESHOLD))
+       if (guest_cpu_cap_has(vcpu, X86_FEATURE_PFTHRESHOLD))
                pause_thresh12 = svm->nested.ctl.pause_filter_thresh;
        else
                pause_thresh12 = 0;
@@ -1026,7 +1026,7 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
        if (vmcb12->control.exit_code != SVM_EXIT_ERR)
                nested_save_pending_event_to_vmcb12(svm, vmcb12);
 
-       if (guest_can_use(vcpu, X86_FEATURE_NRIPS))
+       if (guest_cpu_cap_has(vcpu, X86_FEATURE_NRIPS))
                vmcb12->control.next_rip  = vmcb02->control.next_rip;
 
        vmcb12->control.int_ctl           = svm->nested.ctl.int_ctl;
@@ -1065,7 +1065,7 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
        if (!nested_exit_on_intr(svm))
                kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
 
-       if (unlikely(guest_can_use(vcpu, X86_FEATURE_LBRV) &&
+       if (unlikely(guest_cpu_cap_has(vcpu, X86_FEATURE_LBRV) &&
                     (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))) {
                svm_copy_lbrs(vmcb12, vmcb02);
                svm_update_lbrv(vcpu);
index 943bd074a5d37212a1fdf1f01e42f1c8f0e416c6..1dad4b50b31cbba779a446c587ce43b2f591dde6 100644 (file)
@@ -4445,16 +4445,15 @@ static void sev_es_vcpu_after_set_cpuid(struct vcpu_svm *svm)
         * For SEV-ES, accesses to MSR_IA32_XSS should not be intercepted if
         * the host/guest supports its use.
         *
-        * guest_can_use() checks a number of requirements on the host/guest to
-        * ensure that MSR_IA32_XSS is available, but it might report true even
-        * if X86_FEATURE_XSAVES isn't configured in the guest to ensure host
-        * MSR_IA32_XSS is always properly restored. For SEV-ES, it is better
-        * to further check that the guest CPUID actually supports
-        * X86_FEATURE_XSAVES so that accesses to MSR_IA32_XSS by misbehaved
-        * guests will still get intercepted and caught in the normal
-        * kvm_emulate_rdmsr()/kvm_emulated_wrmsr() paths.
+        * KVM treats the guest as being capable of using XSAVES even if XSAVES
+        * isn't enabled in guest CPUID as there is no intercept for XSAVES,
+        * i.e. the guest can use XSAVES/XRSTOR to read/write XSS if XSAVE is
+        * exposed to the guest and XSAVES is supported in hardware.  Condition
+        * full XSS passthrough on the guest being able to use XSAVES *and*
+        * XSAVES being exposed to the guest so that KVM can at least honor
+        * guest CPUID for RDMSR and WRMSR.
         */
-       if (guest_can_use(vcpu, X86_FEATURE_XSAVES) &&
+       if (guest_cpu_cap_has(vcpu, X86_FEATURE_XSAVES) &&
            guest_cpuid_has(vcpu, X86_FEATURE_XSAVES))
                set_msr_interception(vcpu, svm->msrpm, MSR_IA32_XSS, 1, 1);
        else
index dd15cc635655347576097359c1a8fde1a6f740a4..f96c62a9d2c2885ea693a0f0ce30a6a236d77a18 100644 (file)
@@ -1049,7 +1049,7 @@ void svm_update_lbrv(struct kvm_vcpu *vcpu)
        struct vcpu_svm *svm = to_svm(vcpu);
        bool current_enable_lbrv = svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK;
        bool enable_lbrv = (svm_get_lbr_vmcb(svm)->save.dbgctl & DEBUGCTLMSR_LBR) ||
-                           (is_guest_mode(vcpu) && guest_can_use(vcpu, X86_FEATURE_LBRV) &&
+                           (is_guest_mode(vcpu) && guest_cpu_cap_has(vcpu, X86_FEATURE_LBRV) &&
                            (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK));
 
        if (enable_lbrv == current_enable_lbrv)
@@ -2864,7 +2864,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
        switch (msr_info->index) {
        case MSR_AMD64_TSC_RATIO:
                if (!msr_info->host_initiated &&
-                   !guest_can_use(vcpu, X86_FEATURE_TSCRATEMSR))
+                   !guest_cpu_cap_has(vcpu, X86_FEATURE_TSCRATEMSR))
                        return 1;
                msr_info->data = svm->tsc_ratio_msr;
                break;
@@ -3024,7 +3024,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
        switch (ecx) {
        case MSR_AMD64_TSC_RATIO:
 
-               if (!guest_can_use(vcpu, X86_FEATURE_TSCRATEMSR)) {
+               if (!guest_cpu_cap_has(vcpu, X86_FEATURE_TSCRATEMSR)) {
 
                        if (!msr->host_initiated)
                                return 1;
@@ -3046,7 +3046,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
 
                svm->tsc_ratio_msr = data;
 
-               if (guest_can_use(vcpu, X86_FEATURE_TSCRATEMSR) &&
+               if (guest_cpu_cap_has(vcpu, X86_FEATURE_TSCRATEMSR) &&
                    is_guest_mode(vcpu))
                        nested_svm_update_tsc_ratio_msr(vcpu);
 
@@ -4404,11 +4404,11 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
        if (boot_cpu_has(X86_FEATURE_XSAVE) &&
            boot_cpu_has(X86_FEATURE_XSAVES) &&
            guest_cpuid_has(vcpu, X86_FEATURE_XSAVE))
-               kvm_governed_feature_set(vcpu, X86_FEATURE_XSAVES);
+               guest_cpu_cap_set(vcpu, X86_FEATURE_XSAVES);
 
-       kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_NRIPS);
-       kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_TSCRATEMSR);
-       kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_LBRV);
+       guest_cpu_cap_check_and_set(vcpu, X86_FEATURE_NRIPS);
+       guest_cpu_cap_check_and_set(vcpu, X86_FEATURE_TSCRATEMSR);
+       guest_cpu_cap_check_and_set(vcpu, X86_FEATURE_LBRV);
 
        /*
         * Intercept VMLOAD if the vCPU model is Intel in order to emulate that
@@ -4416,12 +4416,12 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
         * SVM on Intel is bonkers and extremely unlikely to work).
         */
        if (!guest_cpuid_is_intel_compatible(vcpu))
-               kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_V_VMSAVE_VMLOAD);
+               guest_cpu_cap_check_and_set(vcpu, X86_FEATURE_V_VMSAVE_VMLOAD);
 
-       kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_PAUSEFILTER);
-       kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_PFTHRESHOLD);
-       kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_VGIF);
-       kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_VNMI);
+       guest_cpu_cap_check_and_set(vcpu, X86_FEATURE_PAUSEFILTER);
+       guest_cpu_cap_check_and_set(vcpu, X86_FEATURE_PFTHRESHOLD);
+       guest_cpu_cap_check_and_set(vcpu, X86_FEATURE_VGIF);
+       guest_cpu_cap_check_and_set(vcpu, X86_FEATURE_VNMI);
 
        svm_recalc_instruction_intercepts(vcpu, svm);
 
index 43fa6a16eb1917821945ca8fdebf1caff1ddcb54..6eff8c60d5eb9e2b6582c3c42ecac4e894f0a2b5 100644 (file)
@@ -502,7 +502,7 @@ static inline bool svm_is_intercept(struct vcpu_svm *svm, int bit)
 
 static inline bool nested_vgif_enabled(struct vcpu_svm *svm)
 {
-       return guest_can_use(&svm->vcpu, X86_FEATURE_VGIF) &&
+       return guest_cpu_cap_has(&svm->vcpu, X86_FEATURE_VGIF) &&
               (svm->nested.ctl.int_ctl & V_GIF_ENABLE_MASK);
 }
 
@@ -554,7 +554,7 @@ static inline bool nested_npt_enabled(struct vcpu_svm *svm)
 
 static inline bool nested_vnmi_enabled(struct vcpu_svm *svm)
 {
-       return guest_can_use(&svm->vcpu, X86_FEATURE_VNMI) &&
+       return guest_cpu_cap_has(&svm->vcpu, X86_FEATURE_VNMI) &&
               (svm->nested.ctl.int_ctl & V_NMI_ENABLE_MASK);
 }
 
index aa78b6f38dfefd5bc0a56043f7d1b6a94a51f221..9aaa703f5f9813167905b05ea52d2baa1894f975 100644 (file)
@@ -6617,7 +6617,7 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
        vmx = to_vmx(vcpu);
        vmcs12 = get_vmcs12(vcpu);
 
-       if (guest_can_use(vcpu, X86_FEATURE_VMX) &&
+       if (guest_cpu_cap_has(vcpu, X86_FEATURE_VMX) &&
            (vmx->nested.vmxon || vmx->nested.smm.vmxon)) {
                kvm_state.hdr.vmx.vmxon_pa = vmx->nested.vmxon_ptr;
                kvm_state.hdr.vmx.vmcs12_pa = vmx->nested.current_vmptr;
@@ -6758,7 +6758,7 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
                if (kvm_state->flags & ~KVM_STATE_NESTED_EVMCS)
                        return -EINVAL;
        } else {
-               if (!guest_can_use(vcpu, X86_FEATURE_VMX))
+               if (!guest_cpu_cap_has(vcpu, X86_FEATURE_VMX))
                        return -EINVAL;
 
                if (!page_address_valid(vcpu, kvm_state->hdr.vmx.vmxon_pa))
@@ -6792,7 +6792,7 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
                return -EINVAL;
 
        if ((kvm_state->flags & KVM_STATE_NESTED_EVMCS) &&
-           (!guest_can_use(vcpu, X86_FEATURE_VMX) ||
+           (!guest_cpu_cap_has(vcpu, X86_FEATURE_VMX) ||
             !vmx->nested.enlightened_vmcs_enabled))
                        return -EINVAL;
 
index 893366e5373224bfdff0125cf4e70abe5feafd30..ccba522246c37604ada47cec140ff8afd81293e3 100644 (file)
@@ -2084,7 +2084,7 @@ int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                        [msr_info->index - MSR_IA32_SGXLEPUBKEYHASH0];
                break;
        case KVM_FIRST_EMULATED_VMX_MSR ... KVM_LAST_EMULATED_VMX_MSR:
-               if (!guest_can_use(vcpu, X86_FEATURE_VMX))
+               if (!guest_cpu_cap_has(vcpu, X86_FEATURE_VMX))
                        return 1;
                if (vmx_get_vmx_msr(&vmx->nested.msrs, msr_info->index,
                                    &msr_info->data))
@@ -2394,7 +2394,7 @@ int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
        case KVM_FIRST_EMULATED_VMX_MSR ... KVM_LAST_EMULATED_VMX_MSR:
                if (!msr_info->host_initiated)
                        return 1; /* they are read-only */
-               if (!guest_can_use(vcpu, X86_FEATURE_VMX))
+               if (!guest_cpu_cap_has(vcpu, X86_FEATURE_VMX))
                        return 1;
                return vmx_set_vmx_msr(vcpu, msr_index, data);
        case MSR_IA32_RTIT_CTL:
@@ -4591,7 +4591,7 @@ vmx_adjust_secondary_exec_control(struct vcpu_vmx *vmx, u32 *exec_control,
                                                                                                \
        if (cpu_has_vmx_##name()) {                                                             \
                if (kvm_is_governed_feature(X86_FEATURE_##feat_name))                           \
-                       __enabled = guest_can_use(__vcpu, X86_FEATURE_##feat_name);             \
+                       __enabled = guest_cpu_cap_has(__vcpu, X86_FEATURE_##feat_name);         \
                else                                                                            \
                        __enabled = guest_cpuid_has(__vcpu, X86_FEATURE_##feat_name);           \
                vmx_adjust_secondary_exec_control(vmx, exec_control, SECONDARY_EXEC_##ctrl_name,\
@@ -7830,10 +7830,10 @@ void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
         */
        if (boot_cpu_has(X86_FEATURE_XSAVE) &&
            guest_cpuid_has(vcpu, X86_FEATURE_XSAVE))
-               kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_XSAVES);
+               guest_cpu_cap_check_and_set(vcpu, X86_FEATURE_XSAVES);
 
-       kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_VMX);
-       kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_LAM);
+       guest_cpu_cap_check_and_set(vcpu, X86_FEATURE_VMX);
+       guest_cpu_cap_check_and_set(vcpu, X86_FEATURE_LAM);
 
        vmx_setup_uret_msrs(vmx);
 
@@ -7841,7 +7841,7 @@ void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
                vmcs_set_secondary_exec_control(vmx,
                                                vmx_secondary_exec_control(vmx));
 
-       if (guest_can_use(vcpu, X86_FEATURE_VMX))
+       if (guest_cpu_cap_has(vcpu, X86_FEATURE_VMX))
                vmx->msr_ia32_feature_control_valid_bits |=
                        FEAT_CTL_VMX_ENABLED_INSIDE_SMX |
                        FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX;
@@ -7850,7 +7850,7 @@ void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
                        ~(FEAT_CTL_VMX_ENABLED_INSIDE_SMX |
                          FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX);
 
-       if (guest_can_use(vcpu, X86_FEATURE_VMX))
+       if (guest_cpu_cap_has(vcpu, X86_FEATURE_VMX))
                nested_vmx_cr_fixed1_bits_update(vcpu);
 
        if (boot_cpu_has(X86_FEATURE_INTEL_PT) &&
index adcaa033c5d37d7f3792d575e4ea9739cf2732dd..743278e36ba40f2da8e5a8337c8fc132a09686bc 100644 (file)
@@ -1177,7 +1177,7 @@ void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu)
                if (vcpu->arch.xcr0 != kvm_host.xcr0)
                        xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
 
-               if (guest_can_use(vcpu, X86_FEATURE_XSAVES) &&
+               if (guest_cpu_cap_has(vcpu, X86_FEATURE_XSAVES) &&
                    vcpu->arch.ia32_xss != kvm_host.xss)
                        wrmsrl(MSR_IA32_XSS, vcpu->arch.ia32_xss);
        }
@@ -1208,7 +1208,7 @@ void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu)
                if (vcpu->arch.xcr0 != kvm_host.xcr0)
                        xsetbv(XCR_XFEATURE_ENABLED_MASK, kvm_host.xcr0);
 
-               if (guest_can_use(vcpu, X86_FEATURE_XSAVES) &&
+               if (guest_cpu_cap_has(vcpu, X86_FEATURE_XSAVES) &&
                    vcpu->arch.ia32_xss != kvm_host.xss)
                        wrmsrl(MSR_IA32_XSS, kvm_host.xss);
        }