]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
KVM: x86/pmu: Accept 0 for absent PMU MSRs when host-initiated if !enable_pmu
authorLike Xu <likexu@tencent.com>
Wed, 1 Jun 2022 03:19:23 +0000 (11:19 +0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 17 Aug 2022 13:16:14 +0000 (15:16 +0200)
[ Upstream commit 8e6a58e28b34e8d247e772159b8fa8f6bae39192 ]

Whenever an MSR is part of KVM_GET_MSR_INDEX_LIST, as is the case for
MSR_K7_EVNTSEL0 or MSR_F15H_PERF_CTL0, it has to be always retrievable
and settable with KVM_GET_MSR and KVM_SET_MSR.

Accept a zero value for these MSRs to obey the contract.

Signed-off-by: Like Xu <likexu@tencent.com>
Message-Id: <20220601031925.59693-1-likexu@tencent.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
arch/x86/kvm/pmu.c
arch/x86/kvm/svm/pmu.c

index 3f868fed91145308d8b36d47d58069407908582e..2334ddfbbab21f59e9be12e976ca6e7dce0931fe 100644 (file)
@@ -433,11 +433,19 @@ static void kvm_pmu_mark_pmc_in_use(struct kvm_vcpu *vcpu, u32 msr)
 
 int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 {
+       if (msr_info->host_initiated && !vcpu->kvm->arch.enable_pmu) {
+               msr_info->data = 0;
+               return 0;
+       }
+
        return static_call(kvm_x86_pmu_get_msr)(vcpu, msr_info);
 }
 
 int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 {
+       if (msr_info->host_initiated && !vcpu->kvm->arch.enable_pmu)
+               return !!msr_info->data;
+
        kvm_pmu_mark_pmc_in_use(vcpu, msr_info->index);
        return static_call(kvm_x86_pmu_set_msr)(vcpu, msr_info);
 }
index 136039fc6d0101c9f69150056f0b885c11ddb030..d93ecb25fe1721100bfbe15fbb93f99161b1fa63 100644 (file)
@@ -232,7 +232,16 @@ static struct kvm_pmc *amd_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu,
 static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
 {
        /* All MSRs refer to exactly one PMC, so msr_idx_to_pmc is enough.  */
-       return false;
+       if (!host_initiated)
+               return false;
+
+       switch (msr) {
+       case MSR_K7_EVNTSEL0 ... MSR_K7_PERFCTR3:
+       case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5:
+               return true;
+       default:
+               return false;
+       }
 }
 
 static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr)