]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
perf/x86: Add dynamic constraint
authorKan Liang <kan.liang@linux.intel.com>
Thu, 27 Mar 2025 19:52:13 +0000 (12:52 -0700)
committerPeter Zijlstra <peterz@infradead.org>
Tue, 8 Apr 2025 18:55:48 +0000 (20:55 +0200)
More and more features require a dynamic event constraint, e.g., branch
counter logging, auto counter reload, Arch PEBS, etc.

Add a generic flag, PMU_FL_DYN_CONSTRAINT, to indicate the case. It
avoids keeping adding the individual flag in intel_cpuc_prepare().

Add a variable dyn_constraint in the struct hw_perf_event to track the
dynamic constraint of the event. Apply it if it's updated.

Apply the generic dynamic constraint for branch counter logging.
Many features on and after V6 require dynamic constraint. So
unconditionally set the flag for V6+.

Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Tested-by: Thomas Falcon <thomas.falcon@intel.com>
Link: https://lkml.kernel.org/r/20250327195217.2683619-2-kan.liang@linux.intel.com
arch/x86/events/core.c
arch/x86/events/intel/core.c
arch/x86/events/intel/lbr.c
arch/x86/events/perf_event.h
include/linux/perf_event.h

index 6866cc5acb0b57f2995b059382440fc36f6eba57..a0fe51e0c00f368ec078fa97260f01f90b578d08 100644 (file)
@@ -674,6 +674,7 @@ static int __x86_pmu_event_init(struct perf_event *event)
        event->hw.idx = -1;
        event->hw.last_cpu = -1;
        event->hw.last_tag = ~0ULL;
+       event->hw.dyn_constraint = ~0ULL;
 
        /* mark unused */
        event->hw.extra_reg.idx = EXTRA_REG_NONE;
index 09d2d66c9f21fafdb704c29696c7e0bcf4e21822..972492832d7f10aa45e59afeb35e7e313bc26c44 100644 (file)
@@ -3730,10 +3730,9 @@ intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
        if (cpuc->excl_cntrs)
                return intel_get_excl_constraints(cpuc, event, idx, c2);
 
-       /* Not all counters support the branch counter feature. */
-       if (branch_sample_counters(event)) {
+       if (event->hw.dyn_constraint != ~0ULL) {
                c2 = dyn_constraint(cpuc, c2, idx);
-               c2->idxmsk64 &= x86_pmu.lbr_counters;
+               c2->idxmsk64 &= event->hw.dyn_constraint;
                c2->weight = hweight64(c2->idxmsk64);
        }
 
@@ -4135,15 +4134,19 @@ static int intel_pmu_hw_config(struct perf_event *event)
                leader = event->group_leader;
                if (branch_sample_call_stack(leader))
                        return -EINVAL;
-               if (branch_sample_counters(leader))
+               if (branch_sample_counters(leader)) {
                        num++;
+                       leader->hw.dyn_constraint &= x86_pmu.lbr_counters;
+               }
                leader->hw.flags |= PERF_X86_EVENT_BRANCH_COUNTERS;
 
                for_each_sibling_event(sibling, leader) {
                        if (branch_sample_call_stack(sibling))
                                return -EINVAL;
-                       if (branch_sample_counters(sibling))
+                       if (branch_sample_counters(sibling)) {
                                num++;
+                               sibling->hw.dyn_constraint &= x86_pmu.lbr_counters;
+                       }
                }
 
                if (num > fls(x86_pmu.lbr_counters))
@@ -4943,7 +4946,7 @@ int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
                        goto err;
        }
 
-       if (x86_pmu.flags & (PMU_FL_EXCL_CNTRS | PMU_FL_TFA | PMU_FL_BR_CNTR)) {
+       if (x86_pmu.flags & (PMU_FL_EXCL_CNTRS | PMU_FL_TFA | PMU_FL_DYN_CONSTRAINT)) {
                size_t sz = X86_PMC_IDX_MAX * sizeof(struct event_constraint);
 
                cpuc->constraint_list = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu));
@@ -6664,6 +6667,12 @@ __init int intel_pmu_init(void)
                        pr_cont(" AnyThread deprecated, ");
        }
 
+       /*
+        * Many features on and after V6 require dynamic constraint,
+        * e.g., Arch PEBS, ACR.
+        */
+       if (version >= 6)
+               x86_pmu.flags |= PMU_FL_DYN_CONSTRAINT;
        /*
         * Install the hw-cache-events table:
         */
index f44c3d866f248ca7a91d19e12babbc4bcc0024ce..05acd6449cebd0e25d9aba876f38184c7e9eae99 100644 (file)
@@ -1618,7 +1618,7 @@ void __init intel_pmu_arch_lbr_init(void)
        x86_pmu.lbr_nr = lbr_nr;
 
        if (!!x86_pmu.lbr_counters)
-               x86_pmu.flags |= PMU_FL_BR_CNTR;
+               x86_pmu.flags |= PMU_FL_BR_CNTR | PMU_FL_DYN_CONSTRAINT;
 
        if (x86_pmu.lbr_mispred)
                static_branch_enable(&x86_lbr_mispred);
index 2c0ce0e9545e50ddfa217cf9e9a2dac48d30ae6e..f5ba1658a8899165aea927153a28173807328059 100644 (file)
@@ -1042,6 +1042,7 @@ do {                                                                      \
 #define PMU_FL_MEM_LOADS_AUX   0x100 /* Require an auxiliary event for the complete memory info */
 #define PMU_FL_RETIRE_LATENCY  0x200 /* Support Retire Latency in PEBS */
 #define PMU_FL_BR_CNTR         0x400 /* Support branch counter logging */
+#define PMU_FL_DYN_CONSTRAINT  0x800 /* Needs dynamic constraint */
 
 #define EVENT_VAR(_id)  event_attr_##_id
 #define EVENT_PTR(_id) &event_attr_##_id.attr.attr
index 7f49a58b271dd94fad189d814d9a51c20944f1e2..54dad174ed7a0213959328927cc7c240f6fcdc0f 100644 (file)
@@ -158,6 +158,7 @@ struct hw_perf_event {
                struct { /* hardware */
                        u64             config;
                        u64             last_tag;
+                       u64             dyn_constraint;
                        unsigned long   config_base;
                        unsigned long   event_base;
                        int             event_base_rdpmc;