]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
perf/x86/intel: Support the 4 new OMR MSRs introduced in DMR and NVL
authorDapeng Mi <dapeng1.mi@linux.intel.com>
Wed, 14 Jan 2026 01:17:44 +0000 (09:17 +0800)
committerPeter Zijlstra <peterz@infradead.org>
Thu, 15 Jan 2026 09:04:26 +0000 (10:04 +0100)
Diamond Rapids (DMR) and Nova Lake (NVL) introduce an enhanced
Off-Module Response (OMR) facility, replacing the Off-Core Response (OCR)
Performance Monitoring of previous processors.

Legacy microarchitectures used the OCR facility to evaluate off-core and
multi-core off-module transactions. The newly named OMR facility improves
OCR capabilities for scalable coverage of new memory systems in
multi-core module systems.

Similar to OCR, 4 additional off-module configuration MSRs
(OFFMODULE_RSP_0 to OFFMODULE_RSP_3) are introduced to specify attributes
of off-module transactions. When multiple identical OMR events are
created, they need to occupy the same OFFMODULE_RSP_x MSR. To ensure
these multiple identical OMR events can work simultaneously, the
intel_alt_er() and intel_fixup_er() helpers are enhanced to rotate these
OMR events across different OFFMODULE_RSP_* MSRs, similar to previous OCR
events.

For more details about OMR, please refer to section 16.1 "OFF-MODULE
 RESPONSE (OMR) FACILITY" in ISE documentation.

Signed-off-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://patch.msgid.link/20260114011750.350569-2-dapeng1.mi@linux.intel.com
arch/x86/events/intel/core.c
arch/x86/events/perf_event.h
arch/x86/include/asm/msr-index.h

index 1840ca1918d1c17ac6de81765383cb249e1e1b95..3578c660a90432e73bc4de46bf36f59b790268a3 100644 (file)
@@ -3532,17 +3532,32 @@ static int intel_alt_er(struct cpu_hw_events *cpuc,
        struct extra_reg *extra_regs = hybrid(cpuc->pmu, extra_regs);
        int alt_idx = idx;
 
-       if (!(x86_pmu.flags & PMU_FL_HAS_RSP_1))
-               return idx;
-
-       if (idx == EXTRA_REG_RSP_0)
-               alt_idx = EXTRA_REG_RSP_1;
+       switch (idx) {
+       case EXTRA_REG_RSP_0 ... EXTRA_REG_RSP_1:
+               if (!(x86_pmu.flags & PMU_FL_HAS_RSP_1))
+                       return idx;
+               if (++alt_idx > EXTRA_REG_RSP_1)
+                       alt_idx = EXTRA_REG_RSP_0;
+               if (config & ~extra_regs[alt_idx].valid_mask)
+                       return idx;
+               break;
 
-       if (idx == EXTRA_REG_RSP_1)
-               alt_idx = EXTRA_REG_RSP_0;
+       case EXTRA_REG_OMR_0 ... EXTRA_REG_OMR_3:
+               if (!(x86_pmu.flags & PMU_FL_HAS_OMR))
+                       return idx;
+               if (++alt_idx > EXTRA_REG_OMR_3)
+                       alt_idx = EXTRA_REG_OMR_0;
+               /*
+                * Subtracting EXTRA_REG_OMR_0 ensures to get correct
+                * OMR extra_reg entries which start from 0.
+                */
+               if (config & ~extra_regs[alt_idx - EXTRA_REG_OMR_0].valid_mask)
+                       return idx;
+               break;
 
-       if (config & ~extra_regs[alt_idx].valid_mask)
-               return idx;
+       default:
+               break;
+       }
 
        return alt_idx;
 }
@@ -3550,16 +3565,26 @@ static int intel_alt_er(struct cpu_hw_events *cpuc,
 static void intel_fixup_er(struct perf_event *event, int idx)
 {
        struct extra_reg *extra_regs = hybrid(event->pmu, extra_regs);
-       event->hw.extra_reg.idx = idx;
+       int er_idx;
 
-       if (idx == EXTRA_REG_RSP_0) {
-               event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
-               event->hw.config |= extra_regs[EXTRA_REG_RSP_0].event;
-               event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0;
-       } else if (idx == EXTRA_REG_RSP_1) {
+       event->hw.extra_reg.idx = idx;
+       switch (idx) {
+       case EXTRA_REG_RSP_0 ... EXTRA_REG_RSP_1:
+               er_idx = idx - EXTRA_REG_RSP_0;
                event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
-               event->hw.config |= extra_regs[EXTRA_REG_RSP_1].event;
-               event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1;
+               event->hw.config |= extra_regs[er_idx].event;
+               event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0 + er_idx;
+               break;
+
+       case EXTRA_REG_OMR_0 ... EXTRA_REG_OMR_3:
+               er_idx = idx - EXTRA_REG_OMR_0;
+               event->hw.config &= ~ARCH_PERFMON_EVENTSEL_UMASK;
+               event->hw.config |= 1ULL << (8 + er_idx);
+               event->hw.extra_reg.reg = MSR_OMR_0 + er_idx;
+               break;
+
+       default:
+               pr_warn("The extra reg idx %d is not supported.\n", idx);
        }
 }
 
index 3161ec0a3416d265336405b4d08a18efbbea1cab..586e3fdfe6d8f28878ed9a617d5fc2ff88a4adca 100644 (file)
@@ -45,6 +45,10 @@ enum extra_reg_type {
        EXTRA_REG_FE            = 4,  /* fe_* */
        EXTRA_REG_SNOOP_0       = 5,  /* snoop response 0 */
        EXTRA_REG_SNOOP_1       = 6,  /* snoop response 1 */
+       EXTRA_REG_OMR_0         = 7,  /* OMR 0 */
+       EXTRA_REG_OMR_1         = 8,  /* OMR 1 */
+       EXTRA_REG_OMR_2         = 9,  /* OMR 2 */
+       EXTRA_REG_OMR_3         = 10,  /* OMR 3 */
 
        EXTRA_REG_MAX                 /* number of entries needed */
 };
@@ -1099,6 +1103,7 @@ do {                                                                      \
 #define PMU_FL_RETIRE_LATENCY  0x200 /* Support Retire Latency in PEBS */
 #define PMU_FL_BR_CNTR         0x400 /* Support branch counter logging */
 #define PMU_FL_DYN_CONSTRAINT  0x800 /* Needs dynamic constraint */
+#define PMU_FL_HAS_OMR         0x1000 /* has 4 equivalent OMR regs */
 
 #define EVENT_VAR(_id)  event_attr_##_id
 #define EVENT_PTR(_id) &event_attr_##_id.attr.attr
index 3d0a0950d20a1609e02a8e854ed14d5d45a45dfa..6d1b69ea01c2f4b665f42567157b7273768caa3a 100644 (file)
 #define MSR_SNOOP_RSP_0                        0x00001328
 #define MSR_SNOOP_RSP_1                        0x00001329
 
+#define MSR_OMR_0                      0x000003e0
+#define MSR_OMR_1                      0x000003e1
+#define MSR_OMR_2                      0x000003e2
+#define MSR_OMR_3                      0x000003e3
+
 #define MSR_LBR_SELECT                 0x000001c8
 #define MSR_LBR_TOS                    0x000001c9