EVENT_CONSTRAINT_END
};
+static struct event_constraint intel_arw_event_constraints[] __read_mostly = {
+ FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
+ FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
+ FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */
+ FIXED_EVENT_CONSTRAINT(0x013c, 2), /* CPU_CLK_UNHALTED.REF_TSC_P */
+ FIXED_EVENT_CONSTRAINT(0x0073, 4), /* TOPDOWN_BAD_SPECULATION.ALL */
+ FIXED_EVENT_CONSTRAINT(0x019c, 5), /* TOPDOWN_FE_BOUND.ALL */
+ FIXED_EVENT_CONSTRAINT(0x02c2, 6), /* TOPDOWN_RETIRING.ALL */
+ INTEL_UEVENT_CONSTRAINT(0x01b7, 0x1),
+ INTEL_UEVENT_CONSTRAINT(0x02b7, 0x2),
+ INTEL_UEVENT_CONSTRAINT(0x04b7, 0x4),
+ INTEL_UEVENT_CONSTRAINT(0x08b7, 0x8),
+ INTEL_UEVENT_CONSTRAINT(0x01d4, 0x1),
+ INTEL_UEVENT_CONSTRAINT(0x02d4, 0x2),
+ INTEL_UEVENT_CONSTRAINT(0x04d4, 0x4),
+ INTEL_UEVENT_CONSTRAINT(0x08d4, 0x8),
+ INTEL_UEVENT_CONSTRAINT(0x0175, 0x1),
+ INTEL_UEVENT_CONSTRAINT(0x0275, 0x2),
+ INTEL_UEVENT_CONSTRAINT(0x21d3, 0x1),
+ INTEL_UEVENT_CONSTRAINT(0x22d3, 0x1),
+ EVENT_CONSTRAINT_END
+};
+
static struct event_constraint intel_skl_event_constraints[] = {
FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
},
};
+static __initconst const u64 arw_hw_cache_extra_regs
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+ [C(LL)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = 0x4000000000000001,
+ [C(RESULT_MISS)] = 0xFFFFF000000001,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = 0x4000000000000002,
+ [C(RESULT_MISS)] = 0xFFFFF000000002,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = 0x0,
+ [C(RESULT_MISS)] = 0x0,
+ },
+ },
+};
+
EVENT_ATTR_STR(topdown-fe-bound, td_fe_bound_tnt, "event=0x71,umask=0x0");
EVENT_ATTR_STR(topdown-retiring, td_retiring_tnt, "event=0xc2,umask=0x0");
EVENT_ATTR_STR(topdown-bad-spec, td_bad_spec_tnt, "event=0x73,umask=0x6");
EVENT_EXTRA_END
};
+static struct extra_reg intel_arw_extra_regs[] __read_mostly = {
+ /* must define OMR_X first, see intel_alt_er() */
+ INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OMR_0, 0xc0ffffffffffffffull, OMR_0),
+ INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OMR_1, 0xc0ffffffffffffffull, OMR_1),
+ INTEL_UEVENT_EXTRA_REG(0x04b7, MSR_OMR_2, 0xc0ffffffffffffffull, OMR_2),
+ INTEL_UEVENT_EXTRA_REG(0x08b7, MSR_OMR_3, 0xc0ffffffffffffffull, OMR_3),
+ INTEL_UEVENT_EXTRA_REG(0x01d4, MSR_OMR_0, 0xc0ffffffffffffffull, OMR_0),
+ INTEL_UEVENT_EXTRA_REG(0x02d4, MSR_OMR_1, 0xc0ffffffffffffffull, OMR_1),
+ INTEL_UEVENT_EXTRA_REG(0x04d4, MSR_OMR_2, 0xc0ffffffffffffffull, OMR_2),
+ INTEL_UEVENT_EXTRA_REG(0x08d4, MSR_OMR_3, 0xc0ffffffffffffffull, OMR_3),
+ INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x5d0),
+ INTEL_UEVENT_EXTRA_REG(0x0127, MSR_SNOOP_RSP_0, 0xffffffffffffffffull, SNOOP_0),
+ INTEL_UEVENT_EXTRA_REG(0x0227, MSR_SNOOP_RSP_1, 0xffffffffffffffffull, SNOOP_1),
+ EVENT_EXTRA_END
+};
+
EVENT_ATTR_STR(topdown-fe-bound, td_fe_bound_skt, "event=0x9c,umask=0x01");
EVENT_ATTR_STR(topdown-retiring, td_retiring_skt, "event=0xc2,umask=0x02");
EVENT_ATTR_STR(topdown-be-bound, td_be_bound_skt, "event=0xa4,umask=0x02");
static_call_update(intel_pmu_enable_acr_event, intel_pmu_enable_acr);
}
+static __always_inline void intel_pmu_init_arw(struct pmu *pmu)
+{
+ intel_pmu_init_grt(pmu);
+ x86_pmu.flags &= ~PMU_FL_HAS_RSP_1;
+ x86_pmu.flags |= PMU_FL_HAS_OMR;
+ memcpy(hybrid_var(pmu, hw_cache_extra_regs),
+ arw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
+ hybrid(pmu, event_constraints) = intel_arw_event_constraints;
+ hybrid(pmu, pebs_constraints) = intel_arw_pebs_event_constraints;
+ hybrid(pmu, extra_regs) = intel_arw_extra_regs;
+ static_call_update(intel_pmu_enable_acr_event, intel_pmu_enable_acr);
+}
+
__init int intel_pmu_init(void)
{
struct attribute **extra_skl_attr = &empty_attrs;
name = "arrowlake_h_hybrid";
break;
+ case INTEL_NOVALAKE:
+ case INTEL_NOVALAKE_L:
+ pr_cont("Novalake Hybrid events, ");
+ name = "novalake_hybrid";
+ intel_pmu_init_hybrid(hybrid_big_small);
+
+ x86_pmu.pebs_latency_data = nvl_latency_data;
+ x86_pmu.get_event_constraints = mtl_get_event_constraints;
+ x86_pmu.hw_config = adl_hw_config;
+
+ td_attr = lnl_hybrid_events_attrs;
+ mem_attr = mtl_hybrid_mem_attrs;
+ tsx_attr = adl_hybrid_tsx_attrs;
+ extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
+ mtl_hybrid_extra_attr_rtm : mtl_hybrid_extra_attr;
+
+ /* Initialize big core specific PerfMon capabilities.*/
+ pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX];
+ intel_pmu_init_pnc(&pmu->pmu);
+
+ /* Initialize Atom core specific PerfMon capabilities.*/
+ pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX];
+ intel_pmu_init_arw(&pmu->pmu);
+
+ intel_pmu_pebs_data_source_lnl();
+ break;
+
default:
switch (x86_pmu.version) {
case 1: