return 0;
}
-bool kvm_need_rdpmc_intercept(struct kvm_vcpu *vcpu)
+static bool kvm_need_any_pmc_intercept(struct kvm_vcpu *vcpu)
{
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
if (!kvm_vcpu_has_mediated_pmu(vcpu))
return true;
+ /*
+ * Note! Check *host* PMU capabilities, not KVM's PMU capabilities, as
+ * KVM's capabilities are constrained based on KVM support, i.e. KVM's
+ * capabilities themselves may be a subset of hardware capabilities.
+ */
+ return pmu->nr_arch_gp_counters != kvm_host_pmu.num_counters_gp ||
+ pmu->nr_arch_fixed_counters != kvm_host_pmu.num_counters_fixed;
+}
+
+bool kvm_need_perf_global_ctrl_intercept(struct kvm_vcpu *vcpu)
+{
+ return kvm_need_any_pmc_intercept(vcpu) ||
+ !kvm_pmu_has_perf_global_ctrl(vcpu_to_pmu(vcpu));
+}
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_need_perf_global_ctrl_intercept);
+
+bool kvm_need_rdpmc_intercept(struct kvm_vcpu *vcpu)
+{
+ struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
+
/*
* VMware allows access to these Pseduo-PMCs even when read via RDPMC
* in Ring3 when CR4.PCE=0.
if (enable_vmware_backdoor)
return true;
- /*
- * Note! Check *host* PMU capabilities, not KVM's PMU capabilities, as
- * KVM's capabilities are constrained based on KVM support, i.e. KVM's
- * capabilities themselves may be a subset of hardware capabilities.
- */
- return pmu->nr_arch_gp_counters != kvm_host_pmu.num_counters_gp ||
- pmu->nr_arch_fixed_counters != kvm_host_pmu.num_counters_fixed ||
+ return kvm_need_any_pmc_intercept(vcpu) ||
pmu->counter_bitmask[KVM_PMC_GP] != (BIT_ULL(kvm_host_pmu.bit_width_gp) - 1) ||
pmu->counter_bitmask[KVM_PMC_FIXED] != (BIT_ULL(kvm_host_pmu.bit_width_fixed) - 1);
}
* in the global controls). Emulate that behavior when refreshing the
* PMU so that userspace doesn't need to manually set PERF_GLOBAL_CTRL.
*/
- if (kvm_pmu_has_perf_global_ctrl(pmu) && pmu->nr_arch_gp_counters) {
+ if (pmu->nr_arch_gp_counters &&
+ (kvm_pmu_has_perf_global_ctrl(pmu) || kvm_vcpu_has_mediated_pmu(vcpu)))
pmu->global_ctrl = GENMASK_ULL(pmu->nr_arch_gp_counters - 1, 0);
- if (kvm_vcpu_has_mediated_pmu(vcpu))
- kvm_pmu_call(write_global_ctrl)(pmu->global_ctrl);
- }
+
+ if (kvm_vcpu_has_mediated_pmu(vcpu))
+ kvm_pmu_call(write_global_ctrl)(pmu->global_ctrl);
bitmap_set(pmu->all_valid_pmc_idx, 0, pmu->nr_arch_gp_counters);
bitmap_set(pmu->all_valid_pmc_idx, KVM_FIXED_PMC_BASE_IDX,
void kvm_pmu_branch_retired(struct kvm_vcpu *vcpu);
bool is_vmware_backdoor_pmc(u32 pmc_idx);
+bool kvm_need_perf_global_ctrl_intercept(struct kvm_vcpu *vcpu);
bool kvm_need_rdpmc_intercept(struct kvm_vcpu *vcpu);
extern struct kvm_pmu_ops intel_pmu_ops;
__free_pages(virt_to_page(msrpm), get_order(MSRPM_SIZE));
}
+static void svm_recalc_pmu_msr_intercepts(struct kvm_vcpu *vcpu)
+{
+ bool intercept = !kvm_vcpu_has_mediated_pmu(vcpu);
+ struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
+ int i;
+
+ if (!enable_mediated_pmu)
+ return;
+
+ /* Legacy counters are always available for AMD CPUs with a PMU. */
+ for (i = 0; i < min(pmu->nr_arch_gp_counters, AMD64_NUM_COUNTERS); i++)
+ svm_set_intercept_for_msr(vcpu, MSR_K7_PERFCTR0 + i,
+ MSR_TYPE_RW, intercept);
+
+ intercept |= !guest_cpu_cap_has(vcpu, X86_FEATURE_PERFCTR_CORE);
+ for (i = 0; i < pmu->nr_arch_gp_counters; i++)
+ svm_set_intercept_for_msr(vcpu, MSR_F15H_PERF_CTR + 2 * i,
+ MSR_TYPE_RW, intercept);
+
+ for ( ; i < kvm_pmu_cap.num_counters_gp; i++)
+ svm_enable_intercept_for_msr(vcpu, MSR_F15H_PERF_CTR + 2 * i,
+ MSR_TYPE_RW);
+
+ intercept = kvm_need_perf_global_ctrl_intercept(vcpu);
+ svm_set_intercept_for_msr(vcpu, MSR_AMD64_PERF_CNTR_GLOBAL_CTL,
+ MSR_TYPE_RW, intercept);
+ svm_set_intercept_for_msr(vcpu, MSR_AMD64_PERF_CNTR_GLOBAL_STATUS,
+ MSR_TYPE_RW, intercept);
+ svm_set_intercept_for_msr(vcpu, MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR,
+ MSR_TYPE_RW, intercept);
+ svm_set_intercept_for_msr(vcpu, MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_SET,
+ MSR_TYPE_RW, intercept);
+}
+
static void svm_recalc_msr_intercepts(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
if (sev_es_guest(vcpu->kvm))
sev_es_recalc_msr_intercepts(vcpu);
+ svm_recalc_pmu_msr_intercepts(vcpu);
+
/*
* x2APIC intercepts are modified on-demand and cannot be filtered by
* userspace.
return &counters[array_index_nospec(idx, num_counters)];
}
-static inline u64 vcpu_get_perf_capabilities(struct kvm_vcpu *vcpu)
-{
- if (!guest_cpu_cap_has(vcpu, X86_FEATURE_PDCM))
- return 0;
-
- return vcpu->arch.perf_capabilities;
-}
-
-static inline bool fw_writes_is_enabled(struct kvm_vcpu *vcpu)
-{
- return (vcpu_get_perf_capabilities(vcpu) & PERF_CAP_FW_WRITES) != 0;
-}
-
static inline struct kvm_pmc *get_fw_gp_pmc(struct kvm_pmu *pmu, u32 msr)
{
if (!fw_writes_is_enabled(pmu_to_vcpu(pmu)))
#include <linux/kvm_host.h>
+#include "cpuid.h"
+
+static inline u64 vcpu_get_perf_capabilities(struct kvm_vcpu *vcpu)
+{
+ if (!guest_cpu_cap_has(vcpu, X86_FEATURE_PDCM))
+ return 0;
+
+ return vcpu->arch.perf_capabilities;
+}
+
+static inline bool fw_writes_is_enabled(struct kvm_vcpu *vcpu)
+{
+ return (vcpu_get_perf_capabilities(vcpu) & PERF_CAP_FW_WRITES) != 0;
+}
+
bool intel_pmu_lbr_is_enabled(struct kvm_vcpu *vcpu);
int intel_pmu_create_guest_lbr_event(struct kvm_vcpu *vcpu);
}
}
+static void vmx_recalc_pmu_msr_intercepts(struct kvm_vcpu *vcpu)
+{
+ bool has_mediated_pmu = kvm_vcpu_has_mediated_pmu(vcpu);
+ struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ bool intercept = !has_mediated_pmu;
+ int i;
+
+ if (!enable_mediated_pmu)
+ return;
+
+ vm_entry_controls_changebit(vmx, VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
+ has_mediated_pmu);
+
+ vm_exit_controls_changebit(vmx, VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
+ VM_EXIT_SAVE_IA32_PERF_GLOBAL_CTRL,
+ has_mediated_pmu);
+
+ for (i = 0; i < pmu->nr_arch_gp_counters; i++) {
+ vmx_set_intercept_for_msr(vcpu, MSR_IA32_PERFCTR0 + i,
+ MSR_TYPE_RW, intercept);
+ vmx_set_intercept_for_msr(vcpu, MSR_IA32_PMC0 + i, MSR_TYPE_RW,
+ intercept || !fw_writes_is_enabled(vcpu));
+ }
+ for ( ; i < kvm_pmu_cap.num_counters_gp; i++) {
+ vmx_set_intercept_for_msr(vcpu, MSR_IA32_PERFCTR0 + i,
+ MSR_TYPE_RW, true);
+ vmx_set_intercept_for_msr(vcpu, MSR_IA32_PMC0 + i,
+ MSR_TYPE_RW, true);
+ }
+
+ for (i = 0; i < pmu->nr_arch_fixed_counters; i++)
+ vmx_set_intercept_for_msr(vcpu, MSR_CORE_PERF_FIXED_CTR0 + i,
+ MSR_TYPE_RW, intercept);
+ for ( ; i < kvm_pmu_cap.num_counters_fixed; i++)
+ vmx_set_intercept_for_msr(vcpu, MSR_CORE_PERF_FIXED_CTR0 + i,
+ MSR_TYPE_RW, true);
+
+ intercept = kvm_need_perf_global_ctrl_intercept(vcpu);
+ vmx_set_intercept_for_msr(vcpu, MSR_CORE_PERF_GLOBAL_STATUS,
+ MSR_TYPE_RW, intercept);
+ vmx_set_intercept_for_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL,
+ MSR_TYPE_RW, intercept);
+ vmx_set_intercept_for_msr(vcpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL,
+ MSR_TYPE_RW, intercept);
+}
+
static void vmx_recalc_msr_intercepts(struct kvm_vcpu *vcpu)
{
bool intercept;
vmx_set_intercept_for_msr(vcpu, MSR_IA32_S_CET, MSR_TYPE_RW, intercept);
}
- if (enable_mediated_pmu) {
- bool is_mediated_pmu = kvm_vcpu_has_mediated_pmu(vcpu);
- struct vcpu_vmx *vmx = to_vmx(vcpu);
-
- vm_entry_controls_changebit(vmx,
- VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL, is_mediated_pmu);
-
- vm_exit_controls_changebit(vmx,
- VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
- VM_EXIT_SAVE_IA32_PERF_GLOBAL_CTRL, is_mediated_pmu);
- }
+ vmx_recalc_pmu_msr_intercepts(vcpu);
/*
* x2APIC and LBR MSR intercepts are modified on-demand and cannot be