]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
perf/x86/intel: Add missing branch counters constraint apply
authorDapeng Mi <dapeng1.mi@linux.intel.com>
Mon, 23 Mar 2026 13:19:20 +0000 (09:19 -0400)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 25 Mar 2026 10:08:57 +0000 (11:08 +0100)
[ Upstream commit 1d07bbd7ea36ea0b8dfa8068dbe67eb3a32d9590 ]

When running the command:
'perf record -e "{instructions,instructions:p}" -j any,counter sleep 1',
a "shift-out-of-bounds" warning is reported on CWF.

  UBSAN: shift-out-of-bounds in /kbuild/src/consumer/arch/x86/events/intel/lbr.c:970:15
  shift exponent 64 is too large for 64-bit type 'long long unsigned int'
  ......
  intel_pmu_lbr_counters_reorder.isra.0.cold+0x2a/0xa7
  intel_pmu_lbr_save_brstack+0xc0/0x4c0
  setup_arch_pebs_sample_data+0x114b/0x2400

The warning occurs because the second "instructions:p" event, which
involves branch counters sampling, is incorrectly programmed to fixed
counter 0 instead of the general-purpose (GP) counters 0-3 that support
branch counters sampling. Currently only GP counters 0-3 support branch
counters sampling on CWF, any event involving branch counters sampling
should be programed on GP counters 0-3. Since the counter index of fixed
counter 0 is 32, it leads to the "src" value in below code is right
shifted 64 bits and trigger the "shift-out-of-bounds" warning.

cnt = (src >> (order[j] * LBR_INFO_BR_CNTR_BITS)) & LBR_INFO_BR_CNTR_MASK;

The root cause is the loss of the branch counters constraint for the
new event in the branch counters sampling event group. Since it isn't
yet part of the sibling list. This results in the second
"instructions:p" event being programmed on fixed counter 0 incorrectly
instead of the appropriate GP counters 0-3.

To address this, we apply the missing branch counters constraint for
the last event in the group. Additionally, we introduce a new function,
`intel_set_branch_counter_constr()`, to apply the branch counters
constraint and avoid code duplication.

Fixes: 33744916196b ("perf/x86/intel: Support branch counters logging")
Reported-by: Xudong Hao <xudong.hao@intel.com>
Signed-off-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://patch.msgid.link/20260228053320.140406-2-dapeng1.mi@linux.intel.com
Cc: stable@vger.kernel.org
Signed-off-by: Sasha Levin <sashal@kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/x86/events/intel/core.c

index 4a57a9948c745db90091225319f3b68d0c0a62c3..ddde2f1d0bd29363ecc629bf1d71e059078f6459 100644 (file)
@@ -4031,6 +4031,17 @@ end:
        return start;
 }
 
+static inline int intel_set_branch_counter_constr(struct perf_event *event,
+                                                 int *num)
+{
+       if (branch_sample_call_stack(event))
+               return -EINVAL;
+       if (branch_sample_counters(event))
+               (*num)++;
+
+       return 0;
+}
+
 static int intel_pmu_hw_config(struct perf_event *event)
 {
        int ret = x86_pmu_hw_config(event);
@@ -4090,17 +4101,19 @@ static int intel_pmu_hw_config(struct perf_event *event)
                 * group, which requires the extra space to store the counters.
                 */
                leader = event->group_leader;
-               if (branch_sample_call_stack(leader))
+               if (intel_set_branch_counter_constr(leader, &num))
                        return -EINVAL;
-               if (branch_sample_counters(leader))
-                       num++;
                leader->hw.flags |= PERF_X86_EVENT_BRANCH_COUNTERS;
 
                for_each_sibling_event(sibling, leader) {
-                       if (branch_sample_call_stack(sibling))
+                       if (intel_set_branch_counter_constr(sibling, &num))
+                               return -EINVAL;
+               }
+
+               /* event isn't installed as a sibling yet. */
+               if (event != leader) {
+                       if (intel_set_branch_counter_constr(event, &num))
                                return -EINVAL;
-                       if (branch_sample_counters(sibling))
-                               num++;
                }
 
                if (num > fls(x86_pmu.lbr_counters))