]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
KVM: x86/pmu: Move kvm_init_pmu_capability() to pmu.c
authorSean Christopherson <seanjc@google.com>
Tue, 5 Aug 2025 19:05:17 +0000 (12:05 -0700)
committerSean Christopherson <seanjc@google.com>
Tue, 19 Aug 2025 18:59:37 +0000 (11:59 -0700)
Move kvm_init_pmu_capability() to pmu.c so that future changes can access
variables that have no business being visible outside of pmu.c.
kvm_init_pmu_capability() is called once per module load, there's is zero
reason it needs to be inlined.

No functional change intended.

Cc: Dapeng Mi <dapeng1.mi@linux.intel.com>
Cc: Sandipan Das <sandipan.das@amd.com>
Reviewed-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
Link: https://lore.kernel.org/r/20250805190526.1453366-10-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/kvm/pmu.c
arch/x86/kvm/pmu.h

index a84fb3d28885b171f0afb0f0dce3a3b2ebf7b930..adb0fb8f6bb7e31e2021fb1e61c34d3866abdafd 100644 (file)
@@ -96,6 +96,53 @@ void kvm_pmu_ops_update(const struct kvm_pmu_ops *pmu_ops)
 #undef __KVM_X86_PMU_OP
 }
 
+void kvm_init_pmu_capability(const struct kvm_pmu_ops *pmu_ops)
+{
+       bool is_intel = boot_cpu_data.x86_vendor == X86_VENDOR_INTEL;
+       int min_nr_gp_ctrs = pmu_ops->MIN_NR_GP_COUNTERS;
+
+       /*
+        * Hybrid PMUs don't play nice with virtualization without careful
+        * configuration by userspace, and KVM's APIs for reporting supported
+        * vPMU features do not account for hybrid PMUs.  Disable vPMU support
+        * for hybrid PMUs until KVM gains a way to let userspace opt-in.
+        */
+       if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU))
+               enable_pmu = false;
+
+       if (enable_pmu) {
+               perf_get_x86_pmu_capability(&kvm_pmu_cap);
+
+               /*
+                * WARN if perf did NOT disable hardware PMU if the number of
+                * architecturally required GP counters aren't present, i.e. if
+                * there are a non-zero number of counters, but fewer than what
+                * is architecturally required.
+                */
+               if (!kvm_pmu_cap.num_counters_gp ||
+                   WARN_ON_ONCE(kvm_pmu_cap.num_counters_gp < min_nr_gp_ctrs))
+                       enable_pmu = false;
+               else if (is_intel && !kvm_pmu_cap.version)
+                       enable_pmu = false;
+       }
+
+       if (!enable_pmu) {
+               memset(&kvm_pmu_cap, 0, sizeof(kvm_pmu_cap));
+               return;
+       }
+
+       kvm_pmu_cap.version = min(kvm_pmu_cap.version, 2);
+       kvm_pmu_cap.num_counters_gp = min(kvm_pmu_cap.num_counters_gp,
+                                         pmu_ops->MAX_NR_GP_COUNTERS);
+       kvm_pmu_cap.num_counters_fixed = min(kvm_pmu_cap.num_counters_fixed,
+                                            KVM_MAX_NR_FIXED_COUNTERS);
+
+       kvm_pmu_eventsel.INSTRUCTIONS_RETIRED =
+               perf_get_hw_event_config(PERF_COUNT_HW_INSTRUCTIONS);
+       kvm_pmu_eventsel.BRANCH_INSTRUCTIONS_RETIRED =
+               perf_get_hw_event_config(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
+}
+
 static inline void __kvm_perf_overflow(struct kvm_pmc *pmc, bool in_pmi)
 {
        struct kvm_pmu *pmu = pmc_to_pmu(pmc);
index ad89d0bd6005815c5a52b7d25a3b97b00c1c0071..13477066eb4028f79aad4607be04b57a71d1f86a 100644 (file)
@@ -180,52 +180,7 @@ static inline bool pmc_speculative_in_use(struct kvm_pmc *pmc)
 extern struct x86_pmu_capability kvm_pmu_cap;
 extern struct kvm_pmu_emulated_event_selectors kvm_pmu_eventsel;
 
-static inline void kvm_init_pmu_capability(const struct kvm_pmu_ops *pmu_ops)
-{
-       bool is_intel = boot_cpu_data.x86_vendor == X86_VENDOR_INTEL;
-       int min_nr_gp_ctrs = pmu_ops->MIN_NR_GP_COUNTERS;
-
-       /*
-        * Hybrid PMUs don't play nice with virtualization without careful
-        * configuration by userspace, and KVM's APIs for reporting supported
-        * vPMU features do not account for hybrid PMUs.  Disable vPMU support
-        * for hybrid PMUs until KVM gains a way to let userspace opt-in.
-        */
-       if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU))
-               enable_pmu = false;
-
-       if (enable_pmu) {
-               perf_get_x86_pmu_capability(&kvm_pmu_cap);
-
-               /*
-                * WARN if perf did NOT disable hardware PMU if the number of
-                * architecturally required GP counters aren't present, i.e. if
-                * there are a non-zero number of counters, but fewer than what
-                * is architecturally required.
-                */
-               if (!kvm_pmu_cap.num_counters_gp ||
-                   WARN_ON_ONCE(kvm_pmu_cap.num_counters_gp < min_nr_gp_ctrs))
-                       enable_pmu = false;
-               else if (is_intel && !kvm_pmu_cap.version)
-                       enable_pmu = false;
-       }
-
-       if (!enable_pmu) {
-               memset(&kvm_pmu_cap, 0, sizeof(kvm_pmu_cap));
-               return;
-       }
-
-       kvm_pmu_cap.version = min(kvm_pmu_cap.version, 2);
-       kvm_pmu_cap.num_counters_gp = min(kvm_pmu_cap.num_counters_gp,
-                                         pmu_ops->MAX_NR_GP_COUNTERS);
-       kvm_pmu_cap.num_counters_fixed = min(kvm_pmu_cap.num_counters_fixed,
-                                            KVM_MAX_NR_FIXED_COUNTERS);
-
-       kvm_pmu_eventsel.INSTRUCTIONS_RETIRED =
-               perf_get_hw_event_config(PERF_COUNT_HW_INSTRUCTIONS);
-       kvm_pmu_eventsel.BRANCH_INSTRUCTIONS_RETIRED =
-               perf_get_hw_event_config(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
-}
+void kvm_init_pmu_capability(const struct kvm_pmu_ops *pmu_ops);
 
 static inline void kvm_pmu_request_counter_reprogram(struct kvm_pmc *pmc)
 {