]> git.ipfire.org Git - people/ms/linux.git/commitdiff
KVM: VMX: Adjust number of LBR records for PERF_CAPABILITIES at refresh
authorSean Christopherson <seanjc@google.com>
Wed, 27 Jul 2022 23:34:24 +0000 (23:34 +0000)
committerPaolo Bonzini <pbonzini@redhat.com>
Wed, 10 Aug 2022 19:08:30 +0000 (15:08 -0400)
Now that the PMU is refreshed when MSR_IA32_PERF_CAPABILITIES is written
by host userspace, zero out the number of LBR records for a vCPU during
PMU refresh if PMU_CAP_LBR_FMT is not set in PERF_CAPABILITIES instead of
handling the check at run-time.

guest_cpuid_has() is expensive due to the linear search of guest CPUID
entries, intel_pmu_lbr_is_enabled() is checked on every VM-Enter, _and_
simply enumerating the same "Model" as the host causes KVM to set the
number of LBR records to a non-zero value.

Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20220727233424.2968356-4-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/vmx/pmu_intel.c
arch/x86/kvm/vmx/vmx.h

index 862c1a4d971b21761947e68d375007e1740ad1c4..c399637a3a79bf399529b9a4069a9117a9b817fb 100644 (file)
@@ -171,13 +171,6 @@ static inline struct kvm_pmc *get_fw_gp_pmc(struct kvm_pmu *pmu, u32 msr)
        return get_gp_pmc(pmu, msr, MSR_IA32_PMC0);
 }
 
-bool intel_pmu_lbr_is_enabled(struct kvm_vcpu *vcpu)
-{
-       struct x86_pmu_lbr *lbr = vcpu_to_lbr_records(vcpu);
-
-       return lbr->nr && (vcpu_get_perf_capabilities(vcpu) & PMU_CAP_LBR_FMT);
-}
-
 static bool intel_pmu_is_valid_lbr_msr(struct kvm_vcpu *vcpu, u32 index)
 {
        struct x86_pmu_lbr *records = vcpu_to_lbr_records(vcpu);
@@ -592,7 +585,9 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
        bitmap_set(pmu->all_valid_pmc_idx,
                INTEL_PMC_MAX_GENERIC, pmu->nr_arch_fixed_counters);
 
-       if (cpuid_model_is_consistent(vcpu))
+       perf_capabilities = vcpu_get_perf_capabilities(vcpu);
+       if (cpuid_model_is_consistent(vcpu) &&
+           (perf_capabilities & PMU_CAP_LBR_FMT))
                x86_perf_get_lbr(&lbr_desc->records);
        else
                lbr_desc->records.nr = 0;
@@ -600,7 +595,6 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
        if (lbr_desc->records.nr)
                bitmap_set(pmu->all_valid_pmc_idx, INTEL_PMC_IDX_FIXED_VLBR, 1);
 
-       perf_capabilities = vcpu_get_perf_capabilities(vcpu);
        if (perf_capabilities & PERF_CAP_PEBS_FORMAT) {
                if (perf_capabilities & PERF_CAP_PEBS_BASELINE) {
                        pmu->pebs_enable_mask = counter_mask;
index f57a527cc5894e8b10b2532cca9c072d98a97418..24d58c2ffaa3d9dd4f4a6abcb12791abbe1b2a36 100644 (file)
@@ -544,9 +544,12 @@ static inline struct x86_pmu_lbr *vcpu_to_lbr_records(struct kvm_vcpu *vcpu)
        return &vcpu_to_lbr_desc(vcpu)->records;
 }
 
-void intel_pmu_cross_mapped_check(struct kvm_pmu *pmu);
-bool intel_pmu_lbr_is_enabled(struct kvm_vcpu *vcpu);
+static inline bool intel_pmu_lbr_is_enabled(struct kvm_vcpu *vcpu)
+{
+       return !!vcpu_to_lbr_records(vcpu)->nr;
+}
 
+void intel_pmu_cross_mapped_check(struct kvm_pmu *pmu);
 int intel_pmu_create_guest_lbr_event(struct kvm_vcpu *vcpu);
 void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu);