]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
KVM: x86/pmu: Change ambiguous _mask suffix to _rsvd in kvm_pmu
authorDapeng Mi <dapeng1.mi@linux.intel.com>
Tue, 30 Apr 2024 00:52:38 +0000 (08:52 +0800)
committerSean Christopherson <seanjc@google.com>
Mon, 3 Jun 2024 21:23:14 +0000 (14:23 -0700)
Several '_mask' suffixed variables such as, global_ctrl_mask, are
defined in kvm_pmu structure. However the _mask suffix is ambiguous and
misleading since it's not a real mask with positive logic. On the contrary
it represents the reserved bits of corresponding MSRs and these bits
should not be accessed.

Suggested-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
Link: https://lore.kernel.org/r/20240430005239.13527-2-dapeng1.mi@linux.intel.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/pmu.c
arch/x86/kvm/pmu.h
arch/x86/kvm/svm/pmu.c
arch/x86/kvm/vmx/pmu_intel.c

index ece45b3f6f2073ea81ad65b139173411c77b3d73..24d4ac4e3c3310e7c9355de5ca582a773d439310 100644 (file)
@@ -546,12 +546,12 @@ struct kvm_pmu {
        unsigned nr_arch_fixed_counters;
        unsigned available_event_types;
        u64 fixed_ctr_ctrl;
-       u64 fixed_ctr_ctrl_mask;
+       u64 fixed_ctr_ctrl_rsvd;
        u64 global_ctrl;
        u64 global_status;
        u64 counter_bitmask[2];
-       u64 global_ctrl_mask;
-       u64 global_status_mask;
+       u64 global_ctrl_rsvd;
+       u64 global_status_rsvd;
        u64 reserved_bits;
        u64 raw_event_mask;
        struct kvm_pmc gp_counters[KVM_INTEL_PMC_MAX_GENERIC];
@@ -571,9 +571,9 @@ struct kvm_pmu {
 
        u64 ds_area;
        u64 pebs_enable;
-       u64 pebs_enable_mask;
+       u64 pebs_enable_rsvd;
        u64 pebs_data_cfg;
-       u64 pebs_data_cfg_mask;
+       u64 pebs_data_cfg_rsvd;
 
        /*
         * If a guest counter is cross-mapped to host counter with different
index 938d01bede8074934ec1ec54fdcbee6961a2d871..cc909a28e92f7dbb66801cebda453eb475885e68 100644 (file)
@@ -681,13 +681,13 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                if (!msr_info->host_initiated)
                        break;
 
-               if (data & pmu->global_status_mask)
+               if (data & pmu->global_status_rsvd)
                        return 1;
 
                pmu->global_status = data;
                break;
        case MSR_AMD64_PERF_CNTR_GLOBAL_CTL:
-               data &= ~pmu->global_ctrl_mask;
+               data &= ~pmu->global_ctrl_rsvd;
                fallthrough;
        case MSR_CORE_PERF_GLOBAL_CTRL:
                if (!kvm_valid_perf_global_ctrl(pmu, data))
@@ -704,7 +704,7 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                 * GLOBAL_OVF_CTRL, a.k.a. GLOBAL STATUS_RESET, clears bits in
                 * GLOBAL_STATUS, and so the set of reserved bits is the same.
                 */
-               if (data & pmu->global_status_mask)
+               if (data & pmu->global_status_rsvd)
                        return 1;
                fallthrough;
        case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR:
@@ -768,11 +768,11 @@ void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
        pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
        pmu->reserved_bits = 0xffffffff00200000ull;
        pmu->raw_event_mask = X86_RAW_EVENT_MASK;
-       pmu->global_ctrl_mask = ~0ull;
-       pmu->global_status_mask = ~0ull;
-       pmu->fixed_ctr_ctrl_mask = ~0ull;
-       pmu->pebs_enable_mask = ~0ull;
-       pmu->pebs_data_cfg_mask = ~0ull;
+       pmu->global_ctrl_rsvd = ~0ull;
+       pmu->global_status_rsvd = ~0ull;
+       pmu->fixed_ctr_ctrl_rsvd = ~0ull;
+       pmu->pebs_enable_rsvd = ~0ull;
+       pmu->pebs_data_cfg_rsvd = ~0ull;
        bitmap_zero(pmu->all_valid_pmc_idx, X86_PMC_IDX_MAX);
 
        if (!vcpu->kvm->arch.enable_pmu)
index 4d52b0b539bacf70821febdcb7754996eb7e389b..2eab8ea610db28a2065d58b78eadd1df8ad0944a 100644 (file)
@@ -129,7 +129,7 @@ static inline bool pmc_is_fixed(struct kvm_pmc *pmc)
 static inline bool kvm_valid_perf_global_ctrl(struct kvm_pmu *pmu,
                                                 u64 data)
 {
-       return !(pmu->global_ctrl_mask & data);
+       return !(pmu->global_ctrl_rsvd & data);
 }
 
 /* returns general purpose PMC with the specified MSR. Note that it can be
index dfcc38bd97d34f4c618bc88f202cc4dd627f00ea..6e908bdc33104ab52da7c80d5219f5535f613fb2 100644 (file)
@@ -199,8 +199,8 @@ static void amd_pmu_refresh(struct kvm_vcpu *vcpu)
                                         kvm_pmu_cap.num_counters_gp);
 
        if (pmu->version > 1) {
-               pmu->global_ctrl_mask = ~((1ull << pmu->nr_arch_gp_counters) - 1);
-               pmu->global_status_mask = pmu->global_ctrl_mask;
+               pmu->global_ctrl_rsvd = ~((1ull << pmu->nr_arch_gp_counters) - 1);
+               pmu->global_status_rsvd = pmu->global_ctrl_rsvd;
        }
 
        pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << 48) - 1;
index be40474de6e4dbd28379780c075385a79ed776d2..eaee9a08952e07cf10af0cadba3ccfeec8e0e8a1 100644 (file)
@@ -348,14 +348,14 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 
        switch (msr) {
        case MSR_CORE_PERF_FIXED_CTR_CTRL:
-               if (data & pmu->fixed_ctr_ctrl_mask)
+               if (data & pmu->fixed_ctr_ctrl_rsvd)
                        return 1;
 
                if (pmu->fixed_ctr_ctrl != data)
                        reprogram_fixed_counters(pmu, data);
                break;
        case MSR_IA32_PEBS_ENABLE:
-               if (data & pmu->pebs_enable_mask)
+               if (data & pmu->pebs_enable_rsvd)
                        return 1;
 
                if (pmu->pebs_enable != data) {
@@ -371,7 +371,7 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                pmu->ds_area = data;
                break;
        case MSR_PEBS_DATA_CFG:
-               if (data & pmu->pebs_data_cfg_mask)
+               if (data & pmu->pebs_data_cfg_rsvd)
                        return 1;
 
                pmu->pebs_data_cfg = data;
@@ -456,7 +456,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
        union cpuid10_eax eax;
        union cpuid10_edx edx;
        u64 perf_capabilities;
-       u64 counter_mask;
+       u64 counter_rsvd;
        int i;
 
        memset(&lbr_desc->records, 0, sizeof(lbr_desc->records));
@@ -502,21 +502,21 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
        }
 
        for (i = 0; i < pmu->nr_arch_fixed_counters; i++)
-               pmu->fixed_ctr_ctrl_mask &= ~(0xbull << (i * 4));
-       counter_mask = ~(((1ull << pmu->nr_arch_gp_counters) - 1) |
+               pmu->fixed_ctr_ctrl_rsvd &= ~(0xbull << (i * 4));
+       counter_rsvd = ~(((1ull << pmu->nr_arch_gp_counters) - 1) |
                (((1ull << pmu->nr_arch_fixed_counters) - 1) << KVM_FIXED_PMC_BASE_IDX));
-       pmu->global_ctrl_mask = counter_mask;
+       pmu->global_ctrl_rsvd = counter_rsvd;
 
        /*
         * GLOBAL_STATUS and GLOBAL_OVF_CONTROL (a.k.a. GLOBAL_STATUS_RESET)
         * share reserved bit definitions.  The kernel just happens to use
         * OVF_CTRL for the names.
         */
-       pmu->global_status_mask = pmu->global_ctrl_mask
+       pmu->global_status_rsvd = pmu->global_ctrl_rsvd
                        & ~(MSR_CORE_PERF_GLOBAL_OVF_CTRL_OVF_BUF |
                            MSR_CORE_PERF_GLOBAL_OVF_CTRL_COND_CHGD);
        if (vmx_pt_mode_is_host_guest())
-               pmu->global_status_mask &=
+               pmu->global_status_rsvd &=
                                ~MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI;
 
        entry = kvm_find_cpuid_entry_index(vcpu, 7, 0);
@@ -544,15 +544,15 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
 
        if (perf_capabilities & PERF_CAP_PEBS_FORMAT) {
                if (perf_capabilities & PERF_CAP_PEBS_BASELINE) {
-                       pmu->pebs_enable_mask = counter_mask;
+                       pmu->pebs_enable_rsvd = counter_rsvd;
                        pmu->reserved_bits &= ~ICL_EVENTSEL_ADAPTIVE;
                        for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
-                               pmu->fixed_ctr_ctrl_mask &=
+                               pmu->fixed_ctr_ctrl_rsvd &=
                                        ~(1ULL << (KVM_FIXED_PMC_BASE_IDX + i * 4));
                        }
-                       pmu->pebs_data_cfg_mask = ~0xff00000full;
+                       pmu->pebs_data_cfg_rsvd = ~0xff00000full;
                } else {
-                       pmu->pebs_enable_mask =
+                       pmu->pebs_enable_rsvd =
                                ~((1ull << pmu->nr_arch_gp_counters) - 1);
                }
        }