]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
KVM: x86/pmu: Zero out PMU metadata on AMD if PMU is disabled
authorSean Christopherson <seanjc@google.com>
Fri, 10 Nov 2023 02:28:48 +0000 (18:28 -0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 2 May 2024 14:35:23 +0000 (16:35 +0200)
[ Upstream commit f933b88e20150f15787390e2a1754a7e412754ed ]

Move the purging of common PMU metadata from intel_pmu_refresh() to
kvm_pmu_refresh(), and invoke the vendor refresh() hook if and only if
the VM is supposed to have a vPMU.

KVM already denies access to the PMU based on kvm->arch.enable_pmu, as
get_gp_pmc_amd() returns NULL for all PMCs in that case, i.e. KVM already
violates AMD's architecture by not virtualizing a PMU (kernels have long
since learned to not panic when the PMU is unavailable).  But configuring
the PMU as if it were enabled causes unwanted side effects, e.g. calls to
kvm_pmu_trigger_event() waste an absurd number of cycles due to the
all_valid_pmc_idx bitmap being non-zero.

Fixes: b1d66dad65dc ("KVM: x86/svm: Add module param to control PMU virtualization")
Reported-by: Konstantin Khorenko <khorenko@virtuozzo.com>
Closes: https://lore.kernel.org/all/20231109180646.2963718-2-khorenko@virtuozzo.com
Link: https://lore.kernel.org/r/20231110022857.1273836-2-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
Stable-dep-of: de120e1d692d ("KVM: x86/pmu: Set enable bits for GP counters in PERF_GLOBAL_CTRL at "RESET"")
Signed-off-by: Sasha Levin <sashal@kernel.org>
arch/x86/kvm/pmu.c
arch/x86/kvm/vmx/pmu_intel.c

index 87cc6c8809ad88898894bd0ea6199ab70e2a91ac..38512954ec26749e1d0f2a1ba62d0bd13a5c4fe9 100644 (file)
@@ -741,6 +741,8 @@ static void kvm_pmu_reset(struct kvm_vcpu *vcpu)
  */
 void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
 {
+       struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
+
        if (KVM_BUG_ON(kvm_vcpu_has_run(vcpu), vcpu->kvm))
                return;
 
@@ -750,8 +752,22 @@ void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
         */
        kvm_pmu_reset(vcpu);
 
-       bitmap_zero(vcpu_to_pmu(vcpu)->all_valid_pmc_idx, X86_PMC_IDX_MAX);
-       static_call(kvm_x86_pmu_refresh)(vcpu);
+       pmu->version = 0;
+       pmu->nr_arch_gp_counters = 0;
+       pmu->nr_arch_fixed_counters = 0;
+       pmu->counter_bitmask[KVM_PMC_GP] = 0;
+       pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
+       pmu->reserved_bits = 0xffffffff00200000ull;
+       pmu->raw_event_mask = X86_RAW_EVENT_MASK;
+       pmu->global_ctrl_mask = ~0ull;
+       pmu->global_status_mask = ~0ull;
+       pmu->fixed_ctr_ctrl_mask = ~0ull;
+       pmu->pebs_enable_mask = ~0ull;
+       pmu->pebs_data_cfg_mask = ~0ull;
+       bitmap_zero(pmu->all_valid_pmc_idx, X86_PMC_IDX_MAX);
+
+       if (vcpu->kvm->arch.enable_pmu)
+               static_call(kvm_x86_pmu_refresh)(vcpu);
 }
 
 void kvm_pmu_init(struct kvm_vcpu *vcpu)
index 315c7c2ba89b13437fe4c3cbb93d92f75bd8f3f1..600a021ae958b8b6b92588209e4f575308b00963 100644 (file)
@@ -491,19 +491,6 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
        u64 counter_mask;
        int i;
 
-       pmu->nr_arch_gp_counters = 0;
-       pmu->nr_arch_fixed_counters = 0;
-       pmu->counter_bitmask[KVM_PMC_GP] = 0;
-       pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
-       pmu->version = 0;
-       pmu->reserved_bits = 0xffffffff00200000ull;
-       pmu->raw_event_mask = X86_RAW_EVENT_MASK;
-       pmu->global_ctrl_mask = ~0ull;
-       pmu->global_status_mask = ~0ull;
-       pmu->fixed_ctr_ctrl_mask = ~0ull;
-       pmu->pebs_enable_mask = ~0ull;
-       pmu->pebs_data_cfg_mask = ~0ull;
-
        memset(&lbr_desc->records, 0, sizeof(lbr_desc->records));
 
        /*
@@ -515,8 +502,9 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
                return;
 
        entry = kvm_find_cpuid_entry(vcpu, 0xa);
-       if (!entry || !vcpu->kvm->arch.enable_pmu)
+       if (!entry)
                return;
+
        eax.full = entry->eax;
        edx.full = entry->edx;