4 * Used to coordinate shared registers between HT threads or
5 * among events on a single PMU.
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/stddef.h>
11 #include <linux/types.h>
12 #include <linux/init.h>
13 #include <linux/slab.h>
14 #include <linux/export.h>
15 #include <linux/nmi.h>
17 #include <asm/cpufeature.h>
18 #include <asm/hardirq.h>
19 #include <asm/intel-family.h>
21 #include <asm/cpu_device_id.h>
23 #include "../perf_event.h"
26 * Intel PerfMon, used on Core and later.
28 static u64 intel_perfmon_event_map
[PERF_COUNT_HW_MAX
] __read_mostly
=
30 [PERF_COUNT_HW_CPU_CYCLES
] = 0x003c,
31 [PERF_COUNT_HW_INSTRUCTIONS
] = 0x00c0,
32 [PERF_COUNT_HW_CACHE_REFERENCES
] = 0x4f2e,
33 [PERF_COUNT_HW_CACHE_MISSES
] = 0x412e,
34 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = 0x00c4,
35 [PERF_COUNT_HW_BRANCH_MISSES
] = 0x00c5,
36 [PERF_COUNT_HW_BUS_CYCLES
] = 0x013c,
37 [PERF_COUNT_HW_REF_CPU_CYCLES
] = 0x0300, /* pseudo-encoding */
40 static struct event_constraint intel_core_event_constraints
[] __read_mostly
=
42 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
43 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
44 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
45 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
46 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
47 INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
51 static struct event_constraint intel_core2_event_constraints
[] __read_mostly
=
53 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
54 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
55 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
56 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
57 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
58 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
59 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
60 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
61 INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
62 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
63 INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
64 INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
65 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
69 static struct event_constraint intel_nehalem_event_constraints
[] __read_mostly
=
71 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
72 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
73 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
74 INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
75 INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
76 INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
77 INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
78 INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
79 INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
80 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
81 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
85 static struct extra_reg intel_nehalem_extra_regs
[] __read_mostly
=
87 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
88 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0
, 0xffff, RSP_0
),
89 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
93 static struct event_constraint intel_westmere_event_constraints
[] __read_mostly
=
95 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
96 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
97 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
98 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
99 INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
100 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
101 INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */
105 static struct event_constraint intel_snb_event_constraints
[] __read_mostly
=
107 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
108 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
109 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
110 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
111 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
112 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
113 INTEL_UEVENT_CONSTRAINT(0x06a3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
114 INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
115 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
116 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
117 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
118 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
121 * When HT is off these events can only run on the bottom 4 counters
122 * When HT is on, they are impacted by the HT bug and require EXCL access
124 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
125 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
126 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
127 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
132 static struct event_constraint intel_ivb_event_constraints
[] __read_mostly
=
134 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
135 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
136 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
137 INTEL_UEVENT_CONSTRAINT(0x0148, 0x4), /* L1D_PEND_MISS.PENDING */
138 INTEL_UEVENT_CONSTRAINT(0x0279, 0xf), /* IDQ.EMTPY */
139 INTEL_UEVENT_CONSTRAINT(0x019c, 0xf), /* IDQ_UOPS_NOT_DELIVERED.CORE */
140 INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_LDM_PENDING */
141 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
142 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
143 INTEL_UEVENT_CONSTRAINT(0x06a3, 0xf), /* CYCLE_ACTIVITY.STALLS_LDM_PENDING */
144 INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
145 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
146 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
149 * When HT is off these events can only run on the bottom 4 counters
150 * When HT is on, they are impacted by the HT bug and require EXCL access
152 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
153 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
154 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
155 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
160 static struct extra_reg intel_westmere_extra_regs
[] __read_mostly
=
162 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
163 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0
, 0xffff, RSP_0
),
164 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1
, 0xffff, RSP_1
),
165 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
169 static struct event_constraint intel_v1_event_constraints
[] __read_mostly
=
174 static struct event_constraint intel_gen_event_constraints
[] __read_mostly
=
176 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
177 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
178 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
182 static struct event_constraint intel_slm_event_constraints
[] __read_mostly
=
184 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
185 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
186 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */
190 static struct event_constraint intel_skl_event_constraints
[] = {
191 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
192 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
193 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
194 INTEL_UEVENT_CONSTRAINT(0x1c0, 0x2), /* INST_RETIRED.PREC_DIST */
197 * when HT is off, these can only run on the bottom 4 counters
199 INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_INST_RETIRED.* */
200 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_RETIRED.* */
201 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_L3_HIT_RETIRED.* */
202 INTEL_EVENT_CONSTRAINT(0xcd, 0xf), /* MEM_TRANS_RETIRED.* */
203 INTEL_EVENT_CONSTRAINT(0xc6, 0xf), /* FRONTEND_RETIRED.* */
208 static struct extra_reg intel_knl_extra_regs
[] __read_mostly
= {
209 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0
, 0x799ffbb6e7ull
, RSP_0
),
210 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1
, 0x399ffbffe7ull
, RSP_1
),
214 static struct extra_reg intel_snb_extra_regs
[] __read_mostly
= {
215 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
216 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0
, 0x3f807f8fffull
, RSP_0
),
217 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1
, 0x3f807f8fffull
, RSP_1
),
218 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
222 static struct extra_reg intel_snbep_extra_regs
[] __read_mostly
= {
223 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
224 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0
, 0x3fffff8fffull
, RSP_0
),
225 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1
, 0x3fffff8fffull
, RSP_1
),
226 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
230 static struct extra_reg intel_skl_extra_regs
[] __read_mostly
= {
231 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0
, 0x3fffff8fffull
, RSP_0
),
232 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1
, 0x3fffff8fffull
, RSP_1
),
233 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
235 * Note the low 8 bits eventsel code is not a continuous field, containing
236 * some #GPing bits. These are masked out.
238 INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND
, 0x7fff17, FE
),
242 EVENT_ATTR_STR(mem
-loads
, mem_ld_nhm
, "event=0x0b,umask=0x10,ldlat=3");
243 EVENT_ATTR_STR(mem
-loads
, mem_ld_snb
, "event=0xcd,umask=0x1,ldlat=3");
244 EVENT_ATTR_STR(mem
-stores
, mem_st_snb
, "event=0xcd,umask=0x2");
246 static struct attribute
*nhm_mem_events_attrs
[] = {
247 EVENT_PTR(mem_ld_nhm
),
252 * topdown events for Intel Core CPUs.
254 * The events are all in slots, which is a free slot in a 4 wide
255 * pipeline. Some events are already reported in slots, for cycle
256 * events we multiply by the pipeline width (4).
258 * With Hyper Threading on, topdown metrics are either summed or averaged
259 * between the threads of a core: (count_t0 + count_t1).
261 * For the average case the metric is always scaled to pipeline width,
262 * so we use factor 2 ((count_t0 + count_t1) / 2 * 4)
265 EVENT_ATTR_STR_HT(topdown
-total
-slots
, td_total_slots
,
266 "event=0x3c,umask=0x0", /* cpu_clk_unhalted.thread */
267 "event=0x3c,umask=0x0,any=1"); /* cpu_clk_unhalted.thread_any */
268 EVENT_ATTR_STR_HT(topdown
-total
-slots
.scale
, td_total_slots_scale
, "4", "2");
269 EVENT_ATTR_STR(topdown
-slots
-issued
, td_slots_issued
,
270 "event=0xe,umask=0x1"); /* uops_issued.any */
271 EVENT_ATTR_STR(topdown
-slots
-retired
, td_slots_retired
,
272 "event=0xc2,umask=0x2"); /* uops_retired.retire_slots */
273 EVENT_ATTR_STR(topdown
-fetch
-bubbles
, td_fetch_bubbles
,
274 "event=0x9c,umask=0x1"); /* idq_uops_not_delivered_core */
275 EVENT_ATTR_STR_HT(topdown
-recovery
-bubbles
, td_recovery_bubbles
,
276 "event=0xd,umask=0x3,cmask=1", /* int_misc.recovery_cycles */
277 "event=0xd,umask=0x3,cmask=1,any=1"); /* int_misc.recovery_cycles_any */
278 EVENT_ATTR_STR_HT(topdown
-recovery
-bubbles
.scale
, td_recovery_bubbles_scale
,
281 static struct attribute
*snb_events_attrs
[] = {
282 EVENT_PTR(td_slots_issued
),
283 EVENT_PTR(td_slots_retired
),
284 EVENT_PTR(td_fetch_bubbles
),
285 EVENT_PTR(td_total_slots
),
286 EVENT_PTR(td_total_slots_scale
),
287 EVENT_PTR(td_recovery_bubbles
),
288 EVENT_PTR(td_recovery_bubbles_scale
),
292 static struct attribute
*snb_mem_events_attrs
[] = {
293 EVENT_PTR(mem_ld_snb
),
294 EVENT_PTR(mem_st_snb
),
298 static struct event_constraint intel_hsw_event_constraints
[] = {
299 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
300 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
301 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
302 INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */
303 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
304 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
305 /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
306 INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4),
307 /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
308 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4),
309 /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
310 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf),
313 * When HT is off these events can only run on the bottom 4 counters
314 * When HT is on, they are impacted by the HT bug and require EXCL access
316 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
317 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
318 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
319 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
324 static struct event_constraint intel_bdw_event_constraints
[] = {
325 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
326 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
327 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
328 INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */
329 INTEL_UBIT_EVENT_CONSTRAINT(0x8a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_MISS */
331 * when HT is off, these can only run on the bottom 4 counters
333 INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_INST_RETIRED.* */
334 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_RETIRED.* */
335 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_L3_HIT_RETIRED.* */
336 INTEL_EVENT_CONSTRAINT(0xcd, 0xf), /* MEM_TRANS_RETIRED.* */
340 static u64
intel_pmu_event_map(int hw_event
)
342 return intel_perfmon_event_map
[hw_event
];
346 * Notes on the events:
347 * - data reads do not include code reads (comparable to earlier tables)
348 * - data counts include speculative execution (except L1 write, dtlb, bpu)
349 * - remote node access includes remote memory, remote cache, remote mmio.
350 * - prefetches are not included in the counts.
351 * - icache miss does not include decoded icache
354 #define SKL_DEMAND_DATA_RD BIT_ULL(0)
355 #define SKL_DEMAND_RFO BIT_ULL(1)
356 #define SKL_ANY_RESPONSE BIT_ULL(16)
357 #define SKL_SUPPLIER_NONE BIT_ULL(17)
358 #define SKL_L3_MISS_LOCAL_DRAM BIT_ULL(26)
359 #define SKL_L3_MISS_REMOTE_HOP0_DRAM BIT_ULL(27)
360 #define SKL_L3_MISS_REMOTE_HOP1_DRAM BIT_ULL(28)
361 #define SKL_L3_MISS_REMOTE_HOP2P_DRAM BIT_ULL(29)
362 #define SKL_L3_MISS (SKL_L3_MISS_LOCAL_DRAM| \
363 SKL_L3_MISS_REMOTE_HOP0_DRAM| \
364 SKL_L3_MISS_REMOTE_HOP1_DRAM| \
365 SKL_L3_MISS_REMOTE_HOP2P_DRAM)
366 #define SKL_SPL_HIT BIT_ULL(30)
367 #define SKL_SNOOP_NONE BIT_ULL(31)
368 #define SKL_SNOOP_NOT_NEEDED BIT_ULL(32)
369 #define SKL_SNOOP_MISS BIT_ULL(33)
370 #define SKL_SNOOP_HIT_NO_FWD BIT_ULL(34)
371 #define SKL_SNOOP_HIT_WITH_FWD BIT_ULL(35)
372 #define SKL_SNOOP_HITM BIT_ULL(36)
373 #define SKL_SNOOP_NON_DRAM BIT_ULL(37)
374 #define SKL_ANY_SNOOP (SKL_SPL_HIT|SKL_SNOOP_NONE| \
375 SKL_SNOOP_NOT_NEEDED|SKL_SNOOP_MISS| \
376 SKL_SNOOP_HIT_NO_FWD|SKL_SNOOP_HIT_WITH_FWD| \
377 SKL_SNOOP_HITM|SKL_SNOOP_NON_DRAM)
378 #define SKL_DEMAND_READ SKL_DEMAND_DATA_RD
379 #define SKL_SNOOP_DRAM (SKL_SNOOP_NONE| \
380 SKL_SNOOP_NOT_NEEDED|SKL_SNOOP_MISS| \
381 SKL_SNOOP_HIT_NO_FWD|SKL_SNOOP_HIT_WITH_FWD| \
382 SKL_SNOOP_HITM|SKL_SPL_HIT)
383 #define SKL_DEMAND_WRITE SKL_DEMAND_RFO
384 #define SKL_LLC_ACCESS SKL_ANY_RESPONSE
385 #define SKL_L3_MISS_REMOTE (SKL_L3_MISS_REMOTE_HOP0_DRAM| \
386 SKL_L3_MISS_REMOTE_HOP1_DRAM| \
387 SKL_L3_MISS_REMOTE_HOP2P_DRAM)
389 static __initconst
const u64 skl_hw_cache_event_ids
390 [PERF_COUNT_HW_CACHE_MAX
]
391 [PERF_COUNT_HW_CACHE_OP_MAX
]
392 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
396 [ C(RESULT_ACCESS
) ] = 0x81d0, /* MEM_INST_RETIRED.ALL_LOADS */
397 [ C(RESULT_MISS
) ] = 0x151, /* L1D.REPLACEMENT */
400 [ C(RESULT_ACCESS
) ] = 0x82d0, /* MEM_INST_RETIRED.ALL_STORES */
401 [ C(RESULT_MISS
) ] = 0x0,
403 [ C(OP_PREFETCH
) ] = {
404 [ C(RESULT_ACCESS
) ] = 0x0,
405 [ C(RESULT_MISS
) ] = 0x0,
410 [ C(RESULT_ACCESS
) ] = 0x0,
411 [ C(RESULT_MISS
) ] = 0x283, /* ICACHE_64B.MISS */
414 [ C(RESULT_ACCESS
) ] = -1,
415 [ C(RESULT_MISS
) ] = -1,
417 [ C(OP_PREFETCH
) ] = {
418 [ C(RESULT_ACCESS
) ] = 0x0,
419 [ C(RESULT_MISS
) ] = 0x0,
424 [ C(RESULT_ACCESS
) ] = 0x1b7, /* OFFCORE_RESPONSE */
425 [ C(RESULT_MISS
) ] = 0x1b7, /* OFFCORE_RESPONSE */
428 [ C(RESULT_ACCESS
) ] = 0x1b7, /* OFFCORE_RESPONSE */
429 [ C(RESULT_MISS
) ] = 0x1b7, /* OFFCORE_RESPONSE */
431 [ C(OP_PREFETCH
) ] = {
432 [ C(RESULT_ACCESS
) ] = 0x0,
433 [ C(RESULT_MISS
) ] = 0x0,
438 [ C(RESULT_ACCESS
) ] = 0x81d0, /* MEM_INST_RETIRED.ALL_LOADS */
439 [ C(RESULT_MISS
) ] = 0xe08, /* DTLB_LOAD_MISSES.WALK_COMPLETED */
442 [ C(RESULT_ACCESS
) ] = 0x82d0, /* MEM_INST_RETIRED.ALL_STORES */
443 [ C(RESULT_MISS
) ] = 0xe49, /* DTLB_STORE_MISSES.WALK_COMPLETED */
445 [ C(OP_PREFETCH
) ] = {
446 [ C(RESULT_ACCESS
) ] = 0x0,
447 [ C(RESULT_MISS
) ] = 0x0,
452 [ C(RESULT_ACCESS
) ] = 0x2085, /* ITLB_MISSES.STLB_HIT */
453 [ C(RESULT_MISS
) ] = 0xe85, /* ITLB_MISSES.WALK_COMPLETED */
456 [ C(RESULT_ACCESS
) ] = -1,
457 [ C(RESULT_MISS
) ] = -1,
459 [ C(OP_PREFETCH
) ] = {
460 [ C(RESULT_ACCESS
) ] = -1,
461 [ C(RESULT_MISS
) ] = -1,
466 [ C(RESULT_ACCESS
) ] = 0xc4, /* BR_INST_RETIRED.ALL_BRANCHES */
467 [ C(RESULT_MISS
) ] = 0xc5, /* BR_MISP_RETIRED.ALL_BRANCHES */
470 [ C(RESULT_ACCESS
) ] = -1,
471 [ C(RESULT_MISS
) ] = -1,
473 [ C(OP_PREFETCH
) ] = {
474 [ C(RESULT_ACCESS
) ] = -1,
475 [ C(RESULT_MISS
) ] = -1,
480 [ C(RESULT_ACCESS
) ] = 0x1b7, /* OFFCORE_RESPONSE */
481 [ C(RESULT_MISS
) ] = 0x1b7, /* OFFCORE_RESPONSE */
484 [ C(RESULT_ACCESS
) ] = 0x1b7, /* OFFCORE_RESPONSE */
485 [ C(RESULT_MISS
) ] = 0x1b7, /* OFFCORE_RESPONSE */
487 [ C(OP_PREFETCH
) ] = {
488 [ C(RESULT_ACCESS
) ] = 0x0,
489 [ C(RESULT_MISS
) ] = 0x0,
494 static __initconst
const u64 skl_hw_cache_extra_regs
495 [PERF_COUNT_HW_CACHE_MAX
]
496 [PERF_COUNT_HW_CACHE_OP_MAX
]
497 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
501 [ C(RESULT_ACCESS
) ] = SKL_DEMAND_READ
|
502 SKL_LLC_ACCESS
|SKL_ANY_SNOOP
,
503 [ C(RESULT_MISS
) ] = SKL_DEMAND_READ
|
504 SKL_L3_MISS
|SKL_ANY_SNOOP
|
508 [ C(RESULT_ACCESS
) ] = SKL_DEMAND_WRITE
|
509 SKL_LLC_ACCESS
|SKL_ANY_SNOOP
,
510 [ C(RESULT_MISS
) ] = SKL_DEMAND_WRITE
|
511 SKL_L3_MISS
|SKL_ANY_SNOOP
|
514 [ C(OP_PREFETCH
) ] = {
515 [ C(RESULT_ACCESS
) ] = 0x0,
516 [ C(RESULT_MISS
) ] = 0x0,
521 [ C(RESULT_ACCESS
) ] = SKL_DEMAND_READ
|
522 SKL_L3_MISS_LOCAL_DRAM
|SKL_SNOOP_DRAM
,
523 [ C(RESULT_MISS
) ] = SKL_DEMAND_READ
|
524 SKL_L3_MISS_REMOTE
|SKL_SNOOP_DRAM
,
527 [ C(RESULT_ACCESS
) ] = SKL_DEMAND_WRITE
|
528 SKL_L3_MISS_LOCAL_DRAM
|SKL_SNOOP_DRAM
,
529 [ C(RESULT_MISS
) ] = SKL_DEMAND_WRITE
|
530 SKL_L3_MISS_REMOTE
|SKL_SNOOP_DRAM
,
532 [ C(OP_PREFETCH
) ] = {
533 [ C(RESULT_ACCESS
) ] = 0x0,
534 [ C(RESULT_MISS
) ] = 0x0,
539 #define SNB_DMND_DATA_RD (1ULL << 0)
540 #define SNB_DMND_RFO (1ULL << 1)
541 #define SNB_DMND_IFETCH (1ULL << 2)
542 #define SNB_DMND_WB (1ULL << 3)
543 #define SNB_PF_DATA_RD (1ULL << 4)
544 #define SNB_PF_RFO (1ULL << 5)
545 #define SNB_PF_IFETCH (1ULL << 6)
546 #define SNB_LLC_DATA_RD (1ULL << 7)
547 #define SNB_LLC_RFO (1ULL << 8)
548 #define SNB_LLC_IFETCH (1ULL << 9)
549 #define SNB_BUS_LOCKS (1ULL << 10)
550 #define SNB_STRM_ST (1ULL << 11)
551 #define SNB_OTHER (1ULL << 15)
552 #define SNB_RESP_ANY (1ULL << 16)
553 #define SNB_NO_SUPP (1ULL << 17)
554 #define SNB_LLC_HITM (1ULL << 18)
555 #define SNB_LLC_HITE (1ULL << 19)
556 #define SNB_LLC_HITS (1ULL << 20)
557 #define SNB_LLC_HITF (1ULL << 21)
558 #define SNB_LOCAL (1ULL << 22)
559 #define SNB_REMOTE (0xffULL << 23)
560 #define SNB_SNP_NONE (1ULL << 31)
561 #define SNB_SNP_NOT_NEEDED (1ULL << 32)
562 #define SNB_SNP_MISS (1ULL << 33)
563 #define SNB_NO_FWD (1ULL << 34)
564 #define SNB_SNP_FWD (1ULL << 35)
565 #define SNB_HITM (1ULL << 36)
566 #define SNB_NON_DRAM (1ULL << 37)
568 #define SNB_DMND_READ (SNB_DMND_DATA_RD|SNB_LLC_DATA_RD)
569 #define SNB_DMND_WRITE (SNB_DMND_RFO|SNB_LLC_RFO)
570 #define SNB_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
572 #define SNB_SNP_ANY (SNB_SNP_NONE|SNB_SNP_NOT_NEEDED| \
573 SNB_SNP_MISS|SNB_NO_FWD|SNB_SNP_FWD| \
576 #define SNB_DRAM_ANY (SNB_LOCAL|SNB_REMOTE|SNB_SNP_ANY)
577 #define SNB_DRAM_REMOTE (SNB_REMOTE|SNB_SNP_ANY)
579 #define SNB_L3_ACCESS SNB_RESP_ANY
580 #define SNB_L3_MISS (SNB_DRAM_ANY|SNB_NON_DRAM)
582 static __initconst
const u64 snb_hw_cache_extra_regs
583 [PERF_COUNT_HW_CACHE_MAX
]
584 [PERF_COUNT_HW_CACHE_OP_MAX
]
585 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
589 [ C(RESULT_ACCESS
) ] = SNB_DMND_READ
|SNB_L3_ACCESS
,
590 [ C(RESULT_MISS
) ] = SNB_DMND_READ
|SNB_L3_MISS
,
593 [ C(RESULT_ACCESS
) ] = SNB_DMND_WRITE
|SNB_L3_ACCESS
,
594 [ C(RESULT_MISS
) ] = SNB_DMND_WRITE
|SNB_L3_MISS
,
596 [ C(OP_PREFETCH
) ] = {
597 [ C(RESULT_ACCESS
) ] = SNB_DMND_PREFETCH
|SNB_L3_ACCESS
,
598 [ C(RESULT_MISS
) ] = SNB_DMND_PREFETCH
|SNB_L3_MISS
,
603 [ C(RESULT_ACCESS
) ] = SNB_DMND_READ
|SNB_DRAM_ANY
,
604 [ C(RESULT_MISS
) ] = SNB_DMND_READ
|SNB_DRAM_REMOTE
,
607 [ C(RESULT_ACCESS
) ] = SNB_DMND_WRITE
|SNB_DRAM_ANY
,
608 [ C(RESULT_MISS
) ] = SNB_DMND_WRITE
|SNB_DRAM_REMOTE
,
610 [ C(OP_PREFETCH
) ] = {
611 [ C(RESULT_ACCESS
) ] = SNB_DMND_PREFETCH
|SNB_DRAM_ANY
,
612 [ C(RESULT_MISS
) ] = SNB_DMND_PREFETCH
|SNB_DRAM_REMOTE
,
617 static __initconst
const u64 snb_hw_cache_event_ids
618 [PERF_COUNT_HW_CACHE_MAX
]
619 [PERF_COUNT_HW_CACHE_OP_MAX
]
620 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
624 [ C(RESULT_ACCESS
) ] = 0xf1d0, /* MEM_UOP_RETIRED.LOADS */
625 [ C(RESULT_MISS
) ] = 0x0151, /* L1D.REPLACEMENT */
628 [ C(RESULT_ACCESS
) ] = 0xf2d0, /* MEM_UOP_RETIRED.STORES */
629 [ C(RESULT_MISS
) ] = 0x0851, /* L1D.ALL_M_REPLACEMENT */
631 [ C(OP_PREFETCH
) ] = {
632 [ C(RESULT_ACCESS
) ] = 0x0,
633 [ C(RESULT_MISS
) ] = 0x024e, /* HW_PRE_REQ.DL1_MISS */
638 [ C(RESULT_ACCESS
) ] = 0x0,
639 [ C(RESULT_MISS
) ] = 0x0280, /* ICACHE.MISSES */
642 [ C(RESULT_ACCESS
) ] = -1,
643 [ C(RESULT_MISS
) ] = -1,
645 [ C(OP_PREFETCH
) ] = {
646 [ C(RESULT_ACCESS
) ] = 0x0,
647 [ C(RESULT_MISS
) ] = 0x0,
652 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
653 [ C(RESULT_ACCESS
) ] = 0x01b7,
654 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
655 [ C(RESULT_MISS
) ] = 0x01b7,
658 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
659 [ C(RESULT_ACCESS
) ] = 0x01b7,
660 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
661 [ C(RESULT_MISS
) ] = 0x01b7,
663 [ C(OP_PREFETCH
) ] = {
664 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
665 [ C(RESULT_ACCESS
) ] = 0x01b7,
666 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
667 [ C(RESULT_MISS
) ] = 0x01b7,
672 [ C(RESULT_ACCESS
) ] = 0x81d0, /* MEM_UOP_RETIRED.ALL_LOADS */
673 [ C(RESULT_MISS
) ] = 0x0108, /* DTLB_LOAD_MISSES.CAUSES_A_WALK */
676 [ C(RESULT_ACCESS
) ] = 0x82d0, /* MEM_UOP_RETIRED.ALL_STORES */
677 [ C(RESULT_MISS
) ] = 0x0149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
679 [ C(OP_PREFETCH
) ] = {
680 [ C(RESULT_ACCESS
) ] = 0x0,
681 [ C(RESULT_MISS
) ] = 0x0,
686 [ C(RESULT_ACCESS
) ] = 0x1085, /* ITLB_MISSES.STLB_HIT */
687 [ C(RESULT_MISS
) ] = 0x0185, /* ITLB_MISSES.CAUSES_A_WALK */
690 [ C(RESULT_ACCESS
) ] = -1,
691 [ C(RESULT_MISS
) ] = -1,
693 [ C(OP_PREFETCH
) ] = {
694 [ C(RESULT_ACCESS
) ] = -1,
695 [ C(RESULT_MISS
) ] = -1,
700 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
701 [ C(RESULT_MISS
) ] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
704 [ C(RESULT_ACCESS
) ] = -1,
705 [ C(RESULT_MISS
) ] = -1,
707 [ C(OP_PREFETCH
) ] = {
708 [ C(RESULT_ACCESS
) ] = -1,
709 [ C(RESULT_MISS
) ] = -1,
714 [ C(RESULT_ACCESS
) ] = 0x01b7,
715 [ C(RESULT_MISS
) ] = 0x01b7,
718 [ C(RESULT_ACCESS
) ] = 0x01b7,
719 [ C(RESULT_MISS
) ] = 0x01b7,
721 [ C(OP_PREFETCH
) ] = {
722 [ C(RESULT_ACCESS
) ] = 0x01b7,
723 [ C(RESULT_MISS
) ] = 0x01b7,
730 * Notes on the events:
731 * - data reads do not include code reads (comparable to earlier tables)
732 * - data counts include speculative execution (except L1 write, dtlb, bpu)
733 * - remote node access includes remote memory, remote cache, remote mmio.
734 * - prefetches are not included in the counts because they are not
738 #define HSW_DEMAND_DATA_RD BIT_ULL(0)
739 #define HSW_DEMAND_RFO BIT_ULL(1)
740 #define HSW_ANY_RESPONSE BIT_ULL(16)
741 #define HSW_SUPPLIER_NONE BIT_ULL(17)
742 #define HSW_L3_MISS_LOCAL_DRAM BIT_ULL(22)
743 #define HSW_L3_MISS_REMOTE_HOP0 BIT_ULL(27)
744 #define HSW_L3_MISS_REMOTE_HOP1 BIT_ULL(28)
745 #define HSW_L3_MISS_REMOTE_HOP2P BIT_ULL(29)
746 #define HSW_L3_MISS (HSW_L3_MISS_LOCAL_DRAM| \
747 HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \
748 HSW_L3_MISS_REMOTE_HOP2P)
749 #define HSW_SNOOP_NONE BIT_ULL(31)
750 #define HSW_SNOOP_NOT_NEEDED BIT_ULL(32)
751 #define HSW_SNOOP_MISS BIT_ULL(33)
752 #define HSW_SNOOP_HIT_NO_FWD BIT_ULL(34)
753 #define HSW_SNOOP_HIT_WITH_FWD BIT_ULL(35)
754 #define HSW_SNOOP_HITM BIT_ULL(36)
755 #define HSW_SNOOP_NON_DRAM BIT_ULL(37)
756 #define HSW_ANY_SNOOP (HSW_SNOOP_NONE| \
757 HSW_SNOOP_NOT_NEEDED|HSW_SNOOP_MISS| \
758 HSW_SNOOP_HIT_NO_FWD|HSW_SNOOP_HIT_WITH_FWD| \
759 HSW_SNOOP_HITM|HSW_SNOOP_NON_DRAM)
760 #define HSW_SNOOP_DRAM (HSW_ANY_SNOOP & ~HSW_SNOOP_NON_DRAM)
761 #define HSW_DEMAND_READ HSW_DEMAND_DATA_RD
762 #define HSW_DEMAND_WRITE HSW_DEMAND_RFO
763 #define HSW_L3_MISS_REMOTE (HSW_L3_MISS_REMOTE_HOP0|\
764 HSW_L3_MISS_REMOTE_HOP1|HSW_L3_MISS_REMOTE_HOP2P)
765 #define HSW_LLC_ACCESS HSW_ANY_RESPONSE
767 #define BDW_L3_MISS_LOCAL BIT(26)
768 #define BDW_L3_MISS (BDW_L3_MISS_LOCAL| \
769 HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \
770 HSW_L3_MISS_REMOTE_HOP2P)
773 static __initconst
const u64 hsw_hw_cache_event_ids
774 [PERF_COUNT_HW_CACHE_MAX
]
775 [PERF_COUNT_HW_CACHE_OP_MAX
]
776 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
780 [ C(RESULT_ACCESS
) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
781 [ C(RESULT_MISS
) ] = 0x151, /* L1D.REPLACEMENT */
784 [ C(RESULT_ACCESS
) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
785 [ C(RESULT_MISS
) ] = 0x0,
787 [ C(OP_PREFETCH
) ] = {
788 [ C(RESULT_ACCESS
) ] = 0x0,
789 [ C(RESULT_MISS
) ] = 0x0,
794 [ C(RESULT_ACCESS
) ] = 0x0,
795 [ C(RESULT_MISS
) ] = 0x280, /* ICACHE.MISSES */
798 [ C(RESULT_ACCESS
) ] = -1,
799 [ C(RESULT_MISS
) ] = -1,
801 [ C(OP_PREFETCH
) ] = {
802 [ C(RESULT_ACCESS
) ] = 0x0,
803 [ C(RESULT_MISS
) ] = 0x0,
808 [ C(RESULT_ACCESS
) ] = 0x1b7, /* OFFCORE_RESPONSE */
809 [ C(RESULT_MISS
) ] = 0x1b7, /* OFFCORE_RESPONSE */
812 [ C(RESULT_ACCESS
) ] = 0x1b7, /* OFFCORE_RESPONSE */
813 [ C(RESULT_MISS
) ] = 0x1b7, /* OFFCORE_RESPONSE */
815 [ C(OP_PREFETCH
) ] = {
816 [ C(RESULT_ACCESS
) ] = 0x0,
817 [ C(RESULT_MISS
) ] = 0x0,
822 [ C(RESULT_ACCESS
) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
823 [ C(RESULT_MISS
) ] = 0x108, /* DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK */
826 [ C(RESULT_ACCESS
) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
827 [ C(RESULT_MISS
) ] = 0x149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
829 [ C(OP_PREFETCH
) ] = {
830 [ C(RESULT_ACCESS
) ] = 0x0,
831 [ C(RESULT_MISS
) ] = 0x0,
836 [ C(RESULT_ACCESS
) ] = 0x6085, /* ITLB_MISSES.STLB_HIT */
837 [ C(RESULT_MISS
) ] = 0x185, /* ITLB_MISSES.MISS_CAUSES_A_WALK */
840 [ C(RESULT_ACCESS
) ] = -1,
841 [ C(RESULT_MISS
) ] = -1,
843 [ C(OP_PREFETCH
) ] = {
844 [ C(RESULT_ACCESS
) ] = -1,
845 [ C(RESULT_MISS
) ] = -1,
850 [ C(RESULT_ACCESS
) ] = 0xc4, /* BR_INST_RETIRED.ALL_BRANCHES */
851 [ C(RESULT_MISS
) ] = 0xc5, /* BR_MISP_RETIRED.ALL_BRANCHES */
854 [ C(RESULT_ACCESS
) ] = -1,
855 [ C(RESULT_MISS
) ] = -1,
857 [ C(OP_PREFETCH
) ] = {
858 [ C(RESULT_ACCESS
) ] = -1,
859 [ C(RESULT_MISS
) ] = -1,
864 [ C(RESULT_ACCESS
) ] = 0x1b7, /* OFFCORE_RESPONSE */
865 [ C(RESULT_MISS
) ] = 0x1b7, /* OFFCORE_RESPONSE */
868 [ C(RESULT_ACCESS
) ] = 0x1b7, /* OFFCORE_RESPONSE */
869 [ C(RESULT_MISS
) ] = 0x1b7, /* OFFCORE_RESPONSE */
871 [ C(OP_PREFETCH
) ] = {
872 [ C(RESULT_ACCESS
) ] = 0x0,
873 [ C(RESULT_MISS
) ] = 0x0,
878 static __initconst
const u64 hsw_hw_cache_extra_regs
879 [PERF_COUNT_HW_CACHE_MAX
]
880 [PERF_COUNT_HW_CACHE_OP_MAX
]
881 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
885 [ C(RESULT_ACCESS
) ] = HSW_DEMAND_READ
|
887 [ C(RESULT_MISS
) ] = HSW_DEMAND_READ
|
888 HSW_L3_MISS
|HSW_ANY_SNOOP
,
891 [ C(RESULT_ACCESS
) ] = HSW_DEMAND_WRITE
|
893 [ C(RESULT_MISS
) ] = HSW_DEMAND_WRITE
|
894 HSW_L3_MISS
|HSW_ANY_SNOOP
,
896 [ C(OP_PREFETCH
) ] = {
897 [ C(RESULT_ACCESS
) ] = 0x0,
898 [ C(RESULT_MISS
) ] = 0x0,
903 [ C(RESULT_ACCESS
) ] = HSW_DEMAND_READ
|
904 HSW_L3_MISS_LOCAL_DRAM
|
906 [ C(RESULT_MISS
) ] = HSW_DEMAND_READ
|
911 [ C(RESULT_ACCESS
) ] = HSW_DEMAND_WRITE
|
912 HSW_L3_MISS_LOCAL_DRAM
|
914 [ C(RESULT_MISS
) ] = HSW_DEMAND_WRITE
|
918 [ C(OP_PREFETCH
) ] = {
919 [ C(RESULT_ACCESS
) ] = 0x0,
920 [ C(RESULT_MISS
) ] = 0x0,
925 static __initconst
const u64 westmere_hw_cache_event_ids
926 [PERF_COUNT_HW_CACHE_MAX
]
927 [PERF_COUNT_HW_CACHE_OP_MAX
]
928 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
932 [ C(RESULT_ACCESS
) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
933 [ C(RESULT_MISS
) ] = 0x0151, /* L1D.REPL */
936 [ C(RESULT_ACCESS
) ] = 0x020b, /* MEM_INST_RETURED.STORES */
937 [ C(RESULT_MISS
) ] = 0x0251, /* L1D.M_REPL */
939 [ C(OP_PREFETCH
) ] = {
940 [ C(RESULT_ACCESS
) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
941 [ C(RESULT_MISS
) ] = 0x024e, /* L1D_PREFETCH.MISS */
946 [ C(RESULT_ACCESS
) ] = 0x0380, /* L1I.READS */
947 [ C(RESULT_MISS
) ] = 0x0280, /* L1I.MISSES */
950 [ C(RESULT_ACCESS
) ] = -1,
951 [ C(RESULT_MISS
) ] = -1,
953 [ C(OP_PREFETCH
) ] = {
954 [ C(RESULT_ACCESS
) ] = 0x0,
955 [ C(RESULT_MISS
) ] = 0x0,
960 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
961 [ C(RESULT_ACCESS
) ] = 0x01b7,
962 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
963 [ C(RESULT_MISS
) ] = 0x01b7,
966 * Use RFO, not WRITEBACK, because a write miss would typically occur
970 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
971 [ C(RESULT_ACCESS
) ] = 0x01b7,
972 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
973 [ C(RESULT_MISS
) ] = 0x01b7,
975 [ C(OP_PREFETCH
) ] = {
976 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
977 [ C(RESULT_ACCESS
) ] = 0x01b7,
978 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
979 [ C(RESULT_MISS
) ] = 0x01b7,
984 [ C(RESULT_ACCESS
) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
985 [ C(RESULT_MISS
) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
988 [ C(RESULT_ACCESS
) ] = 0x020b, /* MEM_INST_RETURED.STORES */
989 [ C(RESULT_MISS
) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
991 [ C(OP_PREFETCH
) ] = {
992 [ C(RESULT_ACCESS
) ] = 0x0,
993 [ C(RESULT_MISS
) ] = 0x0,
998 [ C(RESULT_ACCESS
) ] = 0x01c0, /* INST_RETIRED.ANY_P */
999 [ C(RESULT_MISS
) ] = 0x0185, /* ITLB_MISSES.ANY */
1002 [ C(RESULT_ACCESS
) ] = -1,
1003 [ C(RESULT_MISS
) ] = -1,
1005 [ C(OP_PREFETCH
) ] = {
1006 [ C(RESULT_ACCESS
) ] = -1,
1007 [ C(RESULT_MISS
) ] = -1,
1012 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1013 [ C(RESULT_MISS
) ] = 0x03e8, /* BPU_CLEARS.ANY */
1016 [ C(RESULT_ACCESS
) ] = -1,
1017 [ C(RESULT_MISS
) ] = -1,
1019 [ C(OP_PREFETCH
) ] = {
1020 [ C(RESULT_ACCESS
) ] = -1,
1021 [ C(RESULT_MISS
) ] = -1,
1026 [ C(RESULT_ACCESS
) ] = 0x01b7,
1027 [ C(RESULT_MISS
) ] = 0x01b7,
1030 [ C(RESULT_ACCESS
) ] = 0x01b7,
1031 [ C(RESULT_MISS
) ] = 0x01b7,
1033 [ C(OP_PREFETCH
) ] = {
1034 [ C(RESULT_ACCESS
) ] = 0x01b7,
1035 [ C(RESULT_MISS
) ] = 0x01b7,
1041 * Nehalem/Westmere MSR_OFFCORE_RESPONSE bits;
1042 * See IA32 SDM Vol 3B 30.6.1.3
1045 #define NHM_DMND_DATA_RD (1 << 0)
1046 #define NHM_DMND_RFO (1 << 1)
1047 #define NHM_DMND_IFETCH (1 << 2)
1048 #define NHM_DMND_WB (1 << 3)
1049 #define NHM_PF_DATA_RD (1 << 4)
1050 #define NHM_PF_DATA_RFO (1 << 5)
1051 #define NHM_PF_IFETCH (1 << 6)
1052 #define NHM_OFFCORE_OTHER (1 << 7)
1053 #define NHM_UNCORE_HIT (1 << 8)
1054 #define NHM_OTHER_CORE_HIT_SNP (1 << 9)
1055 #define NHM_OTHER_CORE_HITM (1 << 10)
1057 #define NHM_REMOTE_CACHE_FWD (1 << 12)
1058 #define NHM_REMOTE_DRAM (1 << 13)
1059 #define NHM_LOCAL_DRAM (1 << 14)
1060 #define NHM_NON_DRAM (1 << 15)
1062 #define NHM_LOCAL (NHM_LOCAL_DRAM|NHM_REMOTE_CACHE_FWD)
1063 #define NHM_REMOTE (NHM_REMOTE_DRAM)
1065 #define NHM_DMND_READ (NHM_DMND_DATA_RD)
1066 #define NHM_DMND_WRITE (NHM_DMND_RFO|NHM_DMND_WB)
1067 #define NHM_DMND_PREFETCH (NHM_PF_DATA_RD|NHM_PF_DATA_RFO)
1069 #define NHM_L3_HIT (NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM)
1070 #define NHM_L3_MISS (NHM_NON_DRAM|NHM_LOCAL_DRAM|NHM_REMOTE_DRAM|NHM_REMOTE_CACHE_FWD)
1071 #define NHM_L3_ACCESS (NHM_L3_HIT|NHM_L3_MISS)
1073 static __initconst
const u64 nehalem_hw_cache_extra_regs
1074 [PERF_COUNT_HW_CACHE_MAX
]
1075 [PERF_COUNT_HW_CACHE_OP_MAX
]
1076 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
1080 [ C(RESULT_ACCESS
) ] = NHM_DMND_READ
|NHM_L3_ACCESS
,
1081 [ C(RESULT_MISS
) ] = NHM_DMND_READ
|NHM_L3_MISS
,
1084 [ C(RESULT_ACCESS
) ] = NHM_DMND_WRITE
|NHM_L3_ACCESS
,
1085 [ C(RESULT_MISS
) ] = NHM_DMND_WRITE
|NHM_L3_MISS
,
1087 [ C(OP_PREFETCH
) ] = {
1088 [ C(RESULT_ACCESS
) ] = NHM_DMND_PREFETCH
|NHM_L3_ACCESS
,
1089 [ C(RESULT_MISS
) ] = NHM_DMND_PREFETCH
|NHM_L3_MISS
,
1094 [ C(RESULT_ACCESS
) ] = NHM_DMND_READ
|NHM_LOCAL
|NHM_REMOTE
,
1095 [ C(RESULT_MISS
) ] = NHM_DMND_READ
|NHM_REMOTE
,
1098 [ C(RESULT_ACCESS
) ] = NHM_DMND_WRITE
|NHM_LOCAL
|NHM_REMOTE
,
1099 [ C(RESULT_MISS
) ] = NHM_DMND_WRITE
|NHM_REMOTE
,
1101 [ C(OP_PREFETCH
) ] = {
1102 [ C(RESULT_ACCESS
) ] = NHM_DMND_PREFETCH
|NHM_LOCAL
|NHM_REMOTE
,
1103 [ C(RESULT_MISS
) ] = NHM_DMND_PREFETCH
|NHM_REMOTE
,
1108 static __initconst
const u64 nehalem_hw_cache_event_ids
1109 [PERF_COUNT_HW_CACHE_MAX
]
1110 [PERF_COUNT_HW_CACHE_OP_MAX
]
1111 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
1115 [ C(RESULT_ACCESS
) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
1116 [ C(RESULT_MISS
) ] = 0x0151, /* L1D.REPL */
1119 [ C(RESULT_ACCESS
) ] = 0x020b, /* MEM_INST_RETURED.STORES */
1120 [ C(RESULT_MISS
) ] = 0x0251, /* L1D.M_REPL */
1122 [ C(OP_PREFETCH
) ] = {
1123 [ C(RESULT_ACCESS
) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
1124 [ C(RESULT_MISS
) ] = 0x024e, /* L1D_PREFETCH.MISS */
1129 [ C(RESULT_ACCESS
) ] = 0x0380, /* L1I.READS */
1130 [ C(RESULT_MISS
) ] = 0x0280, /* L1I.MISSES */
1133 [ C(RESULT_ACCESS
) ] = -1,
1134 [ C(RESULT_MISS
) ] = -1,
1136 [ C(OP_PREFETCH
) ] = {
1137 [ C(RESULT_ACCESS
) ] = 0x0,
1138 [ C(RESULT_MISS
) ] = 0x0,
1143 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
1144 [ C(RESULT_ACCESS
) ] = 0x01b7,
1145 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
1146 [ C(RESULT_MISS
) ] = 0x01b7,
1149 * Use RFO, not WRITEBACK, because a write miss would typically occur
1153 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
1154 [ C(RESULT_ACCESS
) ] = 0x01b7,
1155 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
1156 [ C(RESULT_MISS
) ] = 0x01b7,
1158 [ C(OP_PREFETCH
) ] = {
1159 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
1160 [ C(RESULT_ACCESS
) ] = 0x01b7,
1161 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
1162 [ C(RESULT_MISS
) ] = 0x01b7,
1167 [ C(RESULT_ACCESS
) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
1168 [ C(RESULT_MISS
) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
1171 [ C(RESULT_ACCESS
) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
1172 [ C(RESULT_MISS
) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
1174 [ C(OP_PREFETCH
) ] = {
1175 [ C(RESULT_ACCESS
) ] = 0x0,
1176 [ C(RESULT_MISS
) ] = 0x0,
1181 [ C(RESULT_ACCESS
) ] = 0x01c0, /* INST_RETIRED.ANY_P */
1182 [ C(RESULT_MISS
) ] = 0x20c8, /* ITLB_MISS_RETIRED */
1185 [ C(RESULT_ACCESS
) ] = -1,
1186 [ C(RESULT_MISS
) ] = -1,
1188 [ C(OP_PREFETCH
) ] = {
1189 [ C(RESULT_ACCESS
) ] = -1,
1190 [ C(RESULT_MISS
) ] = -1,
1195 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1196 [ C(RESULT_MISS
) ] = 0x03e8, /* BPU_CLEARS.ANY */
1199 [ C(RESULT_ACCESS
) ] = -1,
1200 [ C(RESULT_MISS
) ] = -1,
1202 [ C(OP_PREFETCH
) ] = {
1203 [ C(RESULT_ACCESS
) ] = -1,
1204 [ C(RESULT_MISS
) ] = -1,
1209 [ C(RESULT_ACCESS
) ] = 0x01b7,
1210 [ C(RESULT_MISS
) ] = 0x01b7,
1213 [ C(RESULT_ACCESS
) ] = 0x01b7,
1214 [ C(RESULT_MISS
) ] = 0x01b7,
1216 [ C(OP_PREFETCH
) ] = {
1217 [ C(RESULT_ACCESS
) ] = 0x01b7,
1218 [ C(RESULT_MISS
) ] = 0x01b7,
1223 static __initconst
const u64 core2_hw_cache_event_ids
1224 [PERF_COUNT_HW_CACHE_MAX
]
1225 [PERF_COUNT_HW_CACHE_OP_MAX
]
1226 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
1230 [ C(RESULT_ACCESS
) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
1231 [ C(RESULT_MISS
) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
1234 [ C(RESULT_ACCESS
) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
1235 [ C(RESULT_MISS
) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
1237 [ C(OP_PREFETCH
) ] = {
1238 [ C(RESULT_ACCESS
) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
1239 [ C(RESULT_MISS
) ] = 0,
1244 [ C(RESULT_ACCESS
) ] = 0x0080, /* L1I.READS */
1245 [ C(RESULT_MISS
) ] = 0x0081, /* L1I.MISSES */
1248 [ C(RESULT_ACCESS
) ] = -1,
1249 [ C(RESULT_MISS
) ] = -1,
1251 [ C(OP_PREFETCH
) ] = {
1252 [ C(RESULT_ACCESS
) ] = 0,
1253 [ C(RESULT_MISS
) ] = 0,
1258 [ C(RESULT_ACCESS
) ] = 0x4f29, /* L2_LD.MESI */
1259 [ C(RESULT_MISS
) ] = 0x4129, /* L2_LD.ISTATE */
1262 [ C(RESULT_ACCESS
) ] = 0x4f2A, /* L2_ST.MESI */
1263 [ C(RESULT_MISS
) ] = 0x412A, /* L2_ST.ISTATE */
1265 [ C(OP_PREFETCH
) ] = {
1266 [ C(RESULT_ACCESS
) ] = 0,
1267 [ C(RESULT_MISS
) ] = 0,
1272 [ C(RESULT_ACCESS
) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
1273 [ C(RESULT_MISS
) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
1276 [ C(RESULT_ACCESS
) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
1277 [ C(RESULT_MISS
) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
1279 [ C(OP_PREFETCH
) ] = {
1280 [ C(RESULT_ACCESS
) ] = 0,
1281 [ C(RESULT_MISS
) ] = 0,
1286 [ C(RESULT_ACCESS
) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1287 [ C(RESULT_MISS
) ] = 0x1282, /* ITLBMISSES */
1290 [ C(RESULT_ACCESS
) ] = -1,
1291 [ C(RESULT_MISS
) ] = -1,
1293 [ C(OP_PREFETCH
) ] = {
1294 [ C(RESULT_ACCESS
) ] = -1,
1295 [ C(RESULT_MISS
) ] = -1,
1300 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1301 [ C(RESULT_MISS
) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1304 [ C(RESULT_ACCESS
) ] = -1,
1305 [ C(RESULT_MISS
) ] = -1,
1307 [ C(OP_PREFETCH
) ] = {
1308 [ C(RESULT_ACCESS
) ] = -1,
1309 [ C(RESULT_MISS
) ] = -1,
1314 static __initconst
const u64 atom_hw_cache_event_ids
1315 [PERF_COUNT_HW_CACHE_MAX
]
1316 [PERF_COUNT_HW_CACHE_OP_MAX
]
1317 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
1321 [ C(RESULT_ACCESS
) ] = 0x2140, /* L1D_CACHE.LD */
1322 [ C(RESULT_MISS
) ] = 0,
1325 [ C(RESULT_ACCESS
) ] = 0x2240, /* L1D_CACHE.ST */
1326 [ C(RESULT_MISS
) ] = 0,
1328 [ C(OP_PREFETCH
) ] = {
1329 [ C(RESULT_ACCESS
) ] = 0x0,
1330 [ C(RESULT_MISS
) ] = 0,
1335 [ C(RESULT_ACCESS
) ] = 0x0380, /* L1I.READS */
1336 [ C(RESULT_MISS
) ] = 0x0280, /* L1I.MISSES */
1339 [ C(RESULT_ACCESS
) ] = -1,
1340 [ C(RESULT_MISS
) ] = -1,
1342 [ C(OP_PREFETCH
) ] = {
1343 [ C(RESULT_ACCESS
) ] = 0,
1344 [ C(RESULT_MISS
) ] = 0,
1349 [ C(RESULT_ACCESS
) ] = 0x4f29, /* L2_LD.MESI */
1350 [ C(RESULT_MISS
) ] = 0x4129, /* L2_LD.ISTATE */
1353 [ C(RESULT_ACCESS
) ] = 0x4f2A, /* L2_ST.MESI */
1354 [ C(RESULT_MISS
) ] = 0x412A, /* L2_ST.ISTATE */
1356 [ C(OP_PREFETCH
) ] = {
1357 [ C(RESULT_ACCESS
) ] = 0,
1358 [ C(RESULT_MISS
) ] = 0,
1363 [ C(RESULT_ACCESS
) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
1364 [ C(RESULT_MISS
) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
1367 [ C(RESULT_ACCESS
) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
1368 [ C(RESULT_MISS
) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
1370 [ C(OP_PREFETCH
) ] = {
1371 [ C(RESULT_ACCESS
) ] = 0,
1372 [ C(RESULT_MISS
) ] = 0,
1377 [ C(RESULT_ACCESS
) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1378 [ C(RESULT_MISS
) ] = 0x0282, /* ITLB.MISSES */
1381 [ C(RESULT_ACCESS
) ] = -1,
1382 [ C(RESULT_MISS
) ] = -1,
1384 [ C(OP_PREFETCH
) ] = {
1385 [ C(RESULT_ACCESS
) ] = -1,
1386 [ C(RESULT_MISS
) ] = -1,
1391 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1392 [ C(RESULT_MISS
) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1395 [ C(RESULT_ACCESS
) ] = -1,
1396 [ C(RESULT_MISS
) ] = -1,
1398 [ C(OP_PREFETCH
) ] = {
1399 [ C(RESULT_ACCESS
) ] = -1,
1400 [ C(RESULT_MISS
) ] = -1,
1405 EVENT_ATTR_STR(topdown
-total
-slots
, td_total_slots_slm
, "event=0x3c");
1406 EVENT_ATTR_STR(topdown
-total
-slots
.scale
, td_total_slots_scale_slm
, "2");
1407 /* no_alloc_cycles.not_delivered */
1408 EVENT_ATTR_STR(topdown
-fetch
-bubbles
, td_fetch_bubbles_slm
,
1409 "event=0xca,umask=0x50");
1410 EVENT_ATTR_STR(topdown
-fetch
-bubbles
.scale
, td_fetch_bubbles_scale_slm
, "2");
1411 /* uops_retired.all */
1412 EVENT_ATTR_STR(topdown
-slots
-issued
, td_slots_issued_slm
,
1413 "event=0xc2,umask=0x10");
1414 /* uops_retired.all */
1415 EVENT_ATTR_STR(topdown
-slots
-retired
, td_slots_retired_slm
,
1416 "event=0xc2,umask=0x10");
1418 static struct attribute
*slm_events_attrs
[] = {
1419 EVENT_PTR(td_total_slots_slm
),
1420 EVENT_PTR(td_total_slots_scale_slm
),
1421 EVENT_PTR(td_fetch_bubbles_slm
),
1422 EVENT_PTR(td_fetch_bubbles_scale_slm
),
1423 EVENT_PTR(td_slots_issued_slm
),
1424 EVENT_PTR(td_slots_retired_slm
),
1428 static struct extra_reg intel_slm_extra_regs
[] __read_mostly
=
1430 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
1431 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0
, 0x768005ffffull
, RSP_0
),
1432 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1
, 0x368005ffffull
, RSP_1
),
1436 #define SLM_DMND_READ SNB_DMND_DATA_RD
1437 #define SLM_DMND_WRITE SNB_DMND_RFO
1438 #define SLM_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
1440 #define SLM_SNP_ANY (SNB_SNP_NONE|SNB_SNP_MISS|SNB_NO_FWD|SNB_HITM)
1441 #define SLM_LLC_ACCESS SNB_RESP_ANY
1442 #define SLM_LLC_MISS (SLM_SNP_ANY|SNB_NON_DRAM)
1444 static __initconst
const u64 slm_hw_cache_extra_regs
1445 [PERF_COUNT_HW_CACHE_MAX
]
1446 [PERF_COUNT_HW_CACHE_OP_MAX
]
1447 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
1451 [ C(RESULT_ACCESS
) ] = SLM_DMND_READ
|SLM_LLC_ACCESS
,
1452 [ C(RESULT_MISS
) ] = 0,
1455 [ C(RESULT_ACCESS
) ] = SLM_DMND_WRITE
|SLM_LLC_ACCESS
,
1456 [ C(RESULT_MISS
) ] = SLM_DMND_WRITE
|SLM_LLC_MISS
,
1458 [ C(OP_PREFETCH
) ] = {
1459 [ C(RESULT_ACCESS
) ] = SLM_DMND_PREFETCH
|SLM_LLC_ACCESS
,
1460 [ C(RESULT_MISS
) ] = SLM_DMND_PREFETCH
|SLM_LLC_MISS
,
1465 static __initconst
const u64 slm_hw_cache_event_ids
1466 [PERF_COUNT_HW_CACHE_MAX
]
1467 [PERF_COUNT_HW_CACHE_OP_MAX
]
1468 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
1472 [ C(RESULT_ACCESS
) ] = 0,
1473 [ C(RESULT_MISS
) ] = 0x0104, /* LD_DCU_MISS */
1476 [ C(RESULT_ACCESS
) ] = 0,
1477 [ C(RESULT_MISS
) ] = 0,
1479 [ C(OP_PREFETCH
) ] = {
1480 [ C(RESULT_ACCESS
) ] = 0,
1481 [ C(RESULT_MISS
) ] = 0,
1486 [ C(RESULT_ACCESS
) ] = 0x0380, /* ICACHE.ACCESSES */
1487 [ C(RESULT_MISS
) ] = 0x0280, /* ICACGE.MISSES */
1490 [ C(RESULT_ACCESS
) ] = -1,
1491 [ C(RESULT_MISS
) ] = -1,
1493 [ C(OP_PREFETCH
) ] = {
1494 [ C(RESULT_ACCESS
) ] = 0,
1495 [ C(RESULT_MISS
) ] = 0,
1500 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
1501 [ C(RESULT_ACCESS
) ] = 0x01b7,
1502 [ C(RESULT_MISS
) ] = 0,
1505 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
1506 [ C(RESULT_ACCESS
) ] = 0x01b7,
1507 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
1508 [ C(RESULT_MISS
) ] = 0x01b7,
1510 [ C(OP_PREFETCH
) ] = {
1511 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
1512 [ C(RESULT_ACCESS
) ] = 0x01b7,
1513 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
1514 [ C(RESULT_MISS
) ] = 0x01b7,
1519 [ C(RESULT_ACCESS
) ] = 0,
1520 [ C(RESULT_MISS
) ] = 0x0804, /* LD_DTLB_MISS */
1523 [ C(RESULT_ACCESS
) ] = 0,
1524 [ C(RESULT_MISS
) ] = 0,
1526 [ C(OP_PREFETCH
) ] = {
1527 [ C(RESULT_ACCESS
) ] = 0,
1528 [ C(RESULT_MISS
) ] = 0,
1533 [ C(RESULT_ACCESS
) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1534 [ C(RESULT_MISS
) ] = 0x40205, /* PAGE_WALKS.I_SIDE_WALKS */
1537 [ C(RESULT_ACCESS
) ] = -1,
1538 [ C(RESULT_MISS
) ] = -1,
1540 [ C(OP_PREFETCH
) ] = {
1541 [ C(RESULT_ACCESS
) ] = -1,
1542 [ C(RESULT_MISS
) ] = -1,
1547 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1548 [ C(RESULT_MISS
) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1551 [ C(RESULT_ACCESS
) ] = -1,
1552 [ C(RESULT_MISS
) ] = -1,
1554 [ C(OP_PREFETCH
) ] = {
1555 [ C(RESULT_ACCESS
) ] = -1,
1556 [ C(RESULT_MISS
) ] = -1,
1561 EVENT_ATTR_STR(topdown
-total
-slots
, td_total_slots_glm
, "event=0x3c");
1562 EVENT_ATTR_STR(topdown
-total
-slots
.scale
, td_total_slots_scale_glm
, "3");
1563 /* UOPS_NOT_DELIVERED.ANY */
1564 EVENT_ATTR_STR(topdown
-fetch
-bubbles
, td_fetch_bubbles_glm
, "event=0x9c");
1565 /* ISSUE_SLOTS_NOT_CONSUMED.RECOVERY */
1566 EVENT_ATTR_STR(topdown
-recovery
-bubbles
, td_recovery_bubbles_glm
, "event=0xca,umask=0x02");
1567 /* UOPS_RETIRED.ANY */
1568 EVENT_ATTR_STR(topdown
-slots
-retired
, td_slots_retired_glm
, "event=0xc2");
1569 /* UOPS_ISSUED.ANY */
1570 EVENT_ATTR_STR(topdown
-slots
-issued
, td_slots_issued_glm
, "event=0x0e");
1572 static struct attribute
*glm_events_attrs
[] = {
1573 EVENT_PTR(td_total_slots_glm
),
1574 EVENT_PTR(td_total_slots_scale_glm
),
1575 EVENT_PTR(td_fetch_bubbles_glm
),
1576 EVENT_PTR(td_recovery_bubbles_glm
),
1577 EVENT_PTR(td_slots_issued_glm
),
1578 EVENT_PTR(td_slots_retired_glm
),
1582 static struct extra_reg intel_glm_extra_regs
[] __read_mostly
= {
1583 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
1584 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0
, 0x760005ffbfull
, RSP_0
),
1585 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1
, 0x360005ffbfull
, RSP_1
),
1589 #define GLM_DEMAND_DATA_RD BIT_ULL(0)
1590 #define GLM_DEMAND_RFO BIT_ULL(1)
1591 #define GLM_ANY_RESPONSE BIT_ULL(16)
1592 #define GLM_SNP_NONE_OR_MISS BIT_ULL(33)
1593 #define GLM_DEMAND_READ GLM_DEMAND_DATA_RD
1594 #define GLM_DEMAND_WRITE GLM_DEMAND_RFO
1595 #define GLM_DEMAND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
1596 #define GLM_LLC_ACCESS GLM_ANY_RESPONSE
1597 #define GLM_SNP_ANY (GLM_SNP_NONE_OR_MISS|SNB_NO_FWD|SNB_HITM)
1598 #define GLM_LLC_MISS (GLM_SNP_ANY|SNB_NON_DRAM)
1600 static __initconst
const u64 glm_hw_cache_event_ids
1601 [PERF_COUNT_HW_CACHE_MAX
]
1602 [PERF_COUNT_HW_CACHE_OP_MAX
]
1603 [PERF_COUNT_HW_CACHE_RESULT_MAX
] = {
1606 [C(RESULT_ACCESS
)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
1607 [C(RESULT_MISS
)] = 0x0,
1610 [C(RESULT_ACCESS
)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
1611 [C(RESULT_MISS
)] = 0x0,
1613 [C(OP_PREFETCH
)] = {
1614 [C(RESULT_ACCESS
)] = 0x0,
1615 [C(RESULT_MISS
)] = 0x0,
1620 [C(RESULT_ACCESS
)] = 0x0380, /* ICACHE.ACCESSES */
1621 [C(RESULT_MISS
)] = 0x0280, /* ICACHE.MISSES */
1624 [C(RESULT_ACCESS
)] = -1,
1625 [C(RESULT_MISS
)] = -1,
1627 [C(OP_PREFETCH
)] = {
1628 [C(RESULT_ACCESS
)] = 0x0,
1629 [C(RESULT_MISS
)] = 0x0,
1634 [C(RESULT_ACCESS
)] = 0x1b7, /* OFFCORE_RESPONSE */
1635 [C(RESULT_MISS
)] = 0x1b7, /* OFFCORE_RESPONSE */
1638 [C(RESULT_ACCESS
)] = 0x1b7, /* OFFCORE_RESPONSE */
1639 [C(RESULT_MISS
)] = 0x1b7, /* OFFCORE_RESPONSE */
1641 [C(OP_PREFETCH
)] = {
1642 [C(RESULT_ACCESS
)] = 0x1b7, /* OFFCORE_RESPONSE */
1643 [C(RESULT_MISS
)] = 0x1b7, /* OFFCORE_RESPONSE */
1648 [C(RESULT_ACCESS
)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
1649 [C(RESULT_MISS
)] = 0x0,
1652 [C(RESULT_ACCESS
)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
1653 [C(RESULT_MISS
)] = 0x0,
1655 [C(OP_PREFETCH
)] = {
1656 [C(RESULT_ACCESS
)] = 0x0,
1657 [C(RESULT_MISS
)] = 0x0,
1662 [C(RESULT_ACCESS
)] = 0x00c0, /* INST_RETIRED.ANY_P */
1663 [C(RESULT_MISS
)] = 0x0481, /* ITLB.MISS */
1666 [C(RESULT_ACCESS
)] = -1,
1667 [C(RESULT_MISS
)] = -1,
1669 [C(OP_PREFETCH
)] = {
1670 [C(RESULT_ACCESS
)] = -1,
1671 [C(RESULT_MISS
)] = -1,
1676 [C(RESULT_ACCESS
)] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1677 [C(RESULT_MISS
)] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
1680 [C(RESULT_ACCESS
)] = -1,
1681 [C(RESULT_MISS
)] = -1,
1683 [C(OP_PREFETCH
)] = {
1684 [C(RESULT_ACCESS
)] = -1,
1685 [C(RESULT_MISS
)] = -1,
1690 static __initconst
const u64 glm_hw_cache_extra_regs
1691 [PERF_COUNT_HW_CACHE_MAX
]
1692 [PERF_COUNT_HW_CACHE_OP_MAX
]
1693 [PERF_COUNT_HW_CACHE_RESULT_MAX
] = {
1696 [C(RESULT_ACCESS
)] = GLM_DEMAND_READ
|
1698 [C(RESULT_MISS
)] = GLM_DEMAND_READ
|
1702 [C(RESULT_ACCESS
)] = GLM_DEMAND_WRITE
|
1704 [C(RESULT_MISS
)] = GLM_DEMAND_WRITE
|
1707 [C(OP_PREFETCH
)] = {
1708 [C(RESULT_ACCESS
)] = GLM_DEMAND_PREFETCH
|
1710 [C(RESULT_MISS
)] = GLM_DEMAND_PREFETCH
|
1716 static __initconst
const u64 glp_hw_cache_event_ids
1717 [PERF_COUNT_HW_CACHE_MAX
]
1718 [PERF_COUNT_HW_CACHE_OP_MAX
]
1719 [PERF_COUNT_HW_CACHE_RESULT_MAX
] = {
1722 [C(RESULT_ACCESS
)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
1723 [C(RESULT_MISS
)] = 0x0,
1726 [C(RESULT_ACCESS
)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
1727 [C(RESULT_MISS
)] = 0x0,
1729 [C(OP_PREFETCH
)] = {
1730 [C(RESULT_ACCESS
)] = 0x0,
1731 [C(RESULT_MISS
)] = 0x0,
1736 [C(RESULT_ACCESS
)] = 0x0380, /* ICACHE.ACCESSES */
1737 [C(RESULT_MISS
)] = 0x0280, /* ICACHE.MISSES */
1740 [C(RESULT_ACCESS
)] = -1,
1741 [C(RESULT_MISS
)] = -1,
1743 [C(OP_PREFETCH
)] = {
1744 [C(RESULT_ACCESS
)] = 0x0,
1745 [C(RESULT_MISS
)] = 0x0,
1750 [C(RESULT_ACCESS
)] = 0x1b7, /* OFFCORE_RESPONSE */
1751 [C(RESULT_MISS
)] = 0x1b7, /* OFFCORE_RESPONSE */
1754 [C(RESULT_ACCESS
)] = 0x1b7, /* OFFCORE_RESPONSE */
1755 [C(RESULT_MISS
)] = 0x1b7, /* OFFCORE_RESPONSE */
1757 [C(OP_PREFETCH
)] = {
1758 [C(RESULT_ACCESS
)] = 0x0,
1759 [C(RESULT_MISS
)] = 0x0,
1764 [C(RESULT_ACCESS
)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
1765 [C(RESULT_MISS
)] = 0xe08, /* DTLB_LOAD_MISSES.WALK_COMPLETED */
1768 [C(RESULT_ACCESS
)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
1769 [C(RESULT_MISS
)] = 0xe49, /* DTLB_STORE_MISSES.WALK_COMPLETED */
1771 [C(OP_PREFETCH
)] = {
1772 [C(RESULT_ACCESS
)] = 0x0,
1773 [C(RESULT_MISS
)] = 0x0,
1778 [C(RESULT_ACCESS
)] = 0x00c0, /* INST_RETIRED.ANY_P */
1779 [C(RESULT_MISS
)] = 0x0481, /* ITLB.MISS */
1782 [C(RESULT_ACCESS
)] = -1,
1783 [C(RESULT_MISS
)] = -1,
1785 [C(OP_PREFETCH
)] = {
1786 [C(RESULT_ACCESS
)] = -1,
1787 [C(RESULT_MISS
)] = -1,
1792 [C(RESULT_ACCESS
)] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1793 [C(RESULT_MISS
)] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
1796 [C(RESULT_ACCESS
)] = -1,
1797 [C(RESULT_MISS
)] = -1,
1799 [C(OP_PREFETCH
)] = {
1800 [C(RESULT_ACCESS
)] = -1,
1801 [C(RESULT_MISS
)] = -1,
1806 static __initconst
const u64 glp_hw_cache_extra_regs
1807 [PERF_COUNT_HW_CACHE_MAX
]
1808 [PERF_COUNT_HW_CACHE_OP_MAX
]
1809 [PERF_COUNT_HW_CACHE_RESULT_MAX
] = {
1812 [C(RESULT_ACCESS
)] = GLM_DEMAND_READ
|
1814 [C(RESULT_MISS
)] = GLM_DEMAND_READ
|
1818 [C(RESULT_ACCESS
)] = GLM_DEMAND_WRITE
|
1820 [C(RESULT_MISS
)] = GLM_DEMAND_WRITE
|
1823 [C(OP_PREFETCH
)] = {
1824 [C(RESULT_ACCESS
)] = 0x0,
1825 [C(RESULT_MISS
)] = 0x0,
1830 #define KNL_OT_L2_HITE BIT_ULL(19) /* Other Tile L2 Hit */
1831 #define KNL_OT_L2_HITF BIT_ULL(20) /* Other Tile L2 Hit */
1832 #define KNL_MCDRAM_LOCAL BIT_ULL(21)
1833 #define KNL_MCDRAM_FAR BIT_ULL(22)
1834 #define KNL_DDR_LOCAL BIT_ULL(23)
1835 #define KNL_DDR_FAR BIT_ULL(24)
1836 #define KNL_DRAM_ANY (KNL_MCDRAM_LOCAL | KNL_MCDRAM_FAR | \
1837 KNL_DDR_LOCAL | KNL_DDR_FAR)
1838 #define KNL_L2_READ SLM_DMND_READ
1839 #define KNL_L2_WRITE SLM_DMND_WRITE
1840 #define KNL_L2_PREFETCH SLM_DMND_PREFETCH
1841 #define KNL_L2_ACCESS SLM_LLC_ACCESS
1842 #define KNL_L2_MISS (KNL_OT_L2_HITE | KNL_OT_L2_HITF | \
1843 KNL_DRAM_ANY | SNB_SNP_ANY | \
1846 static __initconst
const u64 knl_hw_cache_extra_regs
1847 [PERF_COUNT_HW_CACHE_MAX
]
1848 [PERF_COUNT_HW_CACHE_OP_MAX
]
1849 [PERF_COUNT_HW_CACHE_RESULT_MAX
] = {
1852 [C(RESULT_ACCESS
)] = KNL_L2_READ
| KNL_L2_ACCESS
,
1853 [C(RESULT_MISS
)] = 0,
1856 [C(RESULT_ACCESS
)] = KNL_L2_WRITE
| KNL_L2_ACCESS
,
1857 [C(RESULT_MISS
)] = KNL_L2_WRITE
| KNL_L2_MISS
,
1859 [C(OP_PREFETCH
)] = {
1860 [C(RESULT_ACCESS
)] = KNL_L2_PREFETCH
| KNL_L2_ACCESS
,
1861 [C(RESULT_MISS
)] = KNL_L2_PREFETCH
| KNL_L2_MISS
,
1867 * Used from PMIs where the LBRs are already disabled.
1869 * This function could be called consecutively. It is required to remain in
1870 * disabled state if called consecutively.
1872 * During consecutive calls, the same disable value will be written to related
1873 * registers, so the PMU state remains unchanged.
1875 * intel_bts events don't coexist with intel PMU's BTS events because of
1876 * x86_add_exclusive(x86_lbr_exclusive_lbr); there's no need to keep them
1877 * disabled around intel PMU's event batching etc, only inside the PMI handler.
1879 static void __intel_pmu_disable_all(void)
1881 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
1883 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, 0);
1885 if (test_bit(INTEL_PMC_IDX_FIXED_BTS
, cpuc
->active_mask
))
1886 intel_pmu_disable_bts();
1888 intel_pmu_pebs_disable_all();
1891 static void intel_pmu_disable_all(void)
1893 __intel_pmu_disable_all();
1894 intel_pmu_lbr_disable_all();
1897 static void __intel_pmu_enable_all(int added
, bool pmi
)
1899 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
1901 intel_pmu_pebs_enable_all();
1902 intel_pmu_lbr_enable_all(pmi
);
1903 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
,
1904 x86_pmu
.intel_ctrl
& ~cpuc
->intel_ctrl_guest_mask
);
1906 if (test_bit(INTEL_PMC_IDX_FIXED_BTS
, cpuc
->active_mask
)) {
1907 struct perf_event
*event
=
1908 cpuc
->events
[INTEL_PMC_IDX_FIXED_BTS
];
1910 if (WARN_ON_ONCE(!event
))
1913 intel_pmu_enable_bts(event
->hw
.config
);
1917 static void intel_pmu_enable_all(int added
)
1919 __intel_pmu_enable_all(added
, false);
1924 * Intel Errata AAK100 (model 26)
1925 * Intel Errata AAP53 (model 30)
1926 * Intel Errata BD53 (model 44)
1928 * The official story:
1929 * These chips need to be 'reset' when adding counters by programming the
1930 * magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
1931 * in sequence on the same PMC or on different PMCs.
1933 * In practise it appears some of these events do in fact count, and
1934 * we need to program all 4 events.
1936 static void intel_pmu_nhm_workaround(void)
1938 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
1939 static const unsigned long nhm_magic
[4] = {
1945 struct perf_event
*event
;
1949 * The Errata requires below steps:
1950 * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL;
1951 * 2) Configure 4 PERFEVTSELx with the magic events and clear
1952 * the corresponding PMCx;
1953 * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL;
1954 * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL;
1955 * 5) Clear 4 pairs of ERFEVTSELx and PMCx;
1959 * The real steps we choose are a little different from above.
1960 * A) To reduce MSR operations, we don't run step 1) as they
1961 * are already cleared before this function is called;
1962 * B) Call x86_perf_event_update to save PMCx before configuring
1963 * PERFEVTSELx with magic number;
1964 * C) With step 5), we do clear only when the PERFEVTSELx is
1965 * not used currently.
1966 * D) Call x86_perf_event_set_period to restore PMCx;
1969 /* We always operate 4 pairs of PERF Counters */
1970 for (i
= 0; i
< 4; i
++) {
1971 event
= cpuc
->events
[i
];
1973 x86_perf_event_update(event
);
1976 for (i
= 0; i
< 4; i
++) {
1977 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0
+ i
, nhm_magic
[i
]);
1978 wrmsrl(MSR_ARCH_PERFMON_PERFCTR0
+ i
, 0x0);
1981 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, 0xf);
1982 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, 0x0);
1984 for (i
= 0; i
< 4; i
++) {
1985 event
= cpuc
->events
[i
];
1988 x86_perf_event_set_period(event
);
1989 __x86_pmu_enable_event(&event
->hw
,
1990 ARCH_PERFMON_EVENTSEL_ENABLE
);
1992 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0
+ i
, 0x0);
1996 static void intel_pmu_nhm_enable_all(int added
)
1999 intel_pmu_nhm_workaround();
2000 intel_pmu_enable_all(added
);
2003 static void intel_set_tfa(struct cpu_hw_events
*cpuc
, bool on
)
2005 u64 val
= on
? MSR_TFA_RTM_FORCE_ABORT
: 0;
2007 if (cpuc
->tfa_shadow
!= val
) {
2008 cpuc
->tfa_shadow
= val
;
2009 wrmsrl(MSR_TSX_FORCE_ABORT
, val
);
2013 static void intel_tfa_commit_scheduling(struct cpu_hw_events
*cpuc
, int idx
, int cntr
)
2016 * We're going to use PMC3, make sure TFA is set before we touch it.
2018 if (cntr
== 3 && !cpuc
->is_fake
)
2019 intel_set_tfa(cpuc
, true);
2022 static void intel_tfa_pmu_enable_all(int added
)
2024 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
2027 * If we find PMC3 is no longer used when we enable the PMU, we can
2030 if (!test_bit(3, cpuc
->active_mask
))
2031 intel_set_tfa(cpuc
, false);
2033 intel_pmu_enable_all(added
);
2036 static void enable_counter_freeze(void)
2038 update_debugctlmsr(get_debugctlmsr() |
2039 DEBUGCTLMSR_FREEZE_PERFMON_ON_PMI
);
2042 static void disable_counter_freeze(void)
2044 update_debugctlmsr(get_debugctlmsr() &
2045 ~DEBUGCTLMSR_FREEZE_PERFMON_ON_PMI
);
2048 static inline u64
intel_pmu_get_status(void)
2052 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS
, status
);
2057 static inline void intel_pmu_ack_status(u64 ack
)
2059 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL
, ack
);
2062 static void intel_pmu_disable_fixed(struct hw_perf_event
*hwc
)
2064 int idx
= hwc
->idx
- INTEL_PMC_IDX_FIXED
;
2067 mask
= 0xfULL
<< (idx
* 4);
2069 rdmsrl(hwc
->config_base
, ctrl_val
);
2071 wrmsrl(hwc
->config_base
, ctrl_val
);
2074 static inline bool event_is_checkpointed(struct perf_event
*event
)
2076 return (event
->hw
.config
& HSW_IN_TX_CHECKPOINTED
) != 0;
2079 static void intel_pmu_disable_event(struct perf_event
*event
)
2081 struct hw_perf_event
*hwc
= &event
->hw
;
2082 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
2084 if (unlikely(hwc
->idx
== INTEL_PMC_IDX_FIXED_BTS
)) {
2085 intel_pmu_disable_bts();
2086 intel_pmu_drain_bts_buffer();
2090 cpuc
->intel_ctrl_guest_mask
&= ~(1ull << hwc
->idx
);
2091 cpuc
->intel_ctrl_host_mask
&= ~(1ull << hwc
->idx
);
2092 cpuc
->intel_cp_status
&= ~(1ull << hwc
->idx
);
2094 if (unlikely(event
->attr
.precise_ip
))
2095 intel_pmu_pebs_disable(event
);
2097 if (unlikely(hwc
->config_base
== MSR_ARCH_PERFMON_FIXED_CTR_CTRL
)) {
2098 intel_pmu_disable_fixed(hwc
);
2102 x86_pmu_disable_event(event
);
2105 static void intel_pmu_del_event(struct perf_event
*event
)
2107 if (needs_branch_stack(event
))
2108 intel_pmu_lbr_del(event
);
2109 if (event
->attr
.precise_ip
)
2110 intel_pmu_pebs_del(event
);
2113 static void intel_pmu_read_event(struct perf_event
*event
)
2115 if (event
->hw
.flags
& PERF_X86_EVENT_AUTO_RELOAD
)
2116 intel_pmu_auto_reload_read(event
);
2118 x86_perf_event_update(event
);
2121 static void intel_pmu_enable_fixed(struct perf_event
*event
)
2123 struct hw_perf_event
*hwc
= &event
->hw
;
2124 int idx
= hwc
->idx
- INTEL_PMC_IDX_FIXED
;
2125 u64 ctrl_val
, mask
, bits
= 0;
2128 * Enable IRQ generation (0x8), if not PEBS,
2129 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
2132 if (!event
->attr
.precise_ip
)
2134 if (hwc
->config
& ARCH_PERFMON_EVENTSEL_USR
)
2136 if (hwc
->config
& ARCH_PERFMON_EVENTSEL_OS
)
2140 * ANY bit is supported in v3 and up
2142 if (x86_pmu
.version
> 2 && hwc
->config
& ARCH_PERFMON_EVENTSEL_ANY
)
2146 mask
= 0xfULL
<< (idx
* 4);
2148 rdmsrl(hwc
->config_base
, ctrl_val
);
2151 wrmsrl(hwc
->config_base
, ctrl_val
);
2154 static void intel_pmu_enable_event(struct perf_event
*event
)
2156 struct hw_perf_event
*hwc
= &event
->hw
;
2157 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
2159 if (unlikely(hwc
->idx
== INTEL_PMC_IDX_FIXED_BTS
)) {
2160 if (!__this_cpu_read(cpu_hw_events
.enabled
))
2163 intel_pmu_enable_bts(hwc
->config
);
2167 if (event
->attr
.exclude_host
)
2168 cpuc
->intel_ctrl_guest_mask
|= (1ull << hwc
->idx
);
2169 if (event
->attr
.exclude_guest
)
2170 cpuc
->intel_ctrl_host_mask
|= (1ull << hwc
->idx
);
2172 if (unlikely(event_is_checkpointed(event
)))
2173 cpuc
->intel_cp_status
|= (1ull << hwc
->idx
);
2175 if (unlikely(event
->attr
.precise_ip
))
2176 intel_pmu_pebs_enable(event
);
2178 if (unlikely(hwc
->config_base
== MSR_ARCH_PERFMON_FIXED_CTR_CTRL
)) {
2179 intel_pmu_enable_fixed(event
);
2183 __x86_pmu_enable_event(hwc
, ARCH_PERFMON_EVENTSEL_ENABLE
);
2186 static void intel_pmu_add_event(struct perf_event
*event
)
2188 if (event
->attr
.precise_ip
)
2189 intel_pmu_pebs_add(event
);
2190 if (needs_branch_stack(event
))
2191 intel_pmu_lbr_add(event
);
2195 * Save and restart an expired event. Called by NMI contexts,
2196 * so it has to be careful about preempting normal event ops:
2198 int intel_pmu_save_and_restart(struct perf_event
*event
)
2200 x86_perf_event_update(event
);
2202 * For a checkpointed counter always reset back to 0. This
2203 * avoids a situation where the counter overflows, aborts the
2204 * transaction and is then set back to shortly before the
2205 * overflow, and overflows and aborts again.
2207 if (unlikely(event_is_checkpointed(event
))) {
2208 /* No race with NMIs because the counter should not be armed */
2209 wrmsrl(event
->hw
.event_base
, 0);
2210 local64_set(&event
->hw
.prev_count
, 0);
2212 return x86_perf_event_set_period(event
);
2215 static void intel_pmu_reset(void)
2217 struct debug_store
*ds
= __this_cpu_read(cpu_hw_events
.ds
);
2218 unsigned long flags
;
2221 if (!x86_pmu
.num_counters
)
2224 local_irq_save(flags
);
2226 pr_info("clearing PMU state on CPU#%d\n", smp_processor_id());
2228 for (idx
= 0; idx
< x86_pmu
.num_counters
; idx
++) {
2229 wrmsrl_safe(x86_pmu_config_addr(idx
), 0ull);
2230 wrmsrl_safe(x86_pmu_event_addr(idx
), 0ull);
2232 for (idx
= 0; idx
< x86_pmu
.num_counters_fixed
; idx
++)
2233 wrmsrl_safe(MSR_ARCH_PERFMON_FIXED_CTR0
+ idx
, 0ull);
2236 ds
->bts_index
= ds
->bts_buffer_base
;
2238 /* Ack all overflows and disable fixed counters */
2239 if (x86_pmu
.version
>= 2) {
2240 intel_pmu_ack_status(intel_pmu_get_status());
2241 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, 0);
2244 /* Reset LBRs and LBR freezing */
2245 if (x86_pmu
.lbr_nr
) {
2246 update_debugctlmsr(get_debugctlmsr() &
2247 ~(DEBUGCTLMSR_FREEZE_LBRS_ON_PMI
|DEBUGCTLMSR_LBR
));
2250 local_irq_restore(flags
);
2253 static int handle_pmi_common(struct pt_regs
*regs
, u64 status
)
2255 struct perf_sample_data data
;
2256 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
2260 inc_irq_stat(apic_perf_irqs
);
2263 * Ignore a range of extra bits in status that do not indicate
2264 * overflow by themselves.
2266 status
&= ~(GLOBAL_STATUS_COND_CHG
|
2267 GLOBAL_STATUS_ASIF
|
2268 GLOBAL_STATUS_LBRS_FROZEN
);
2272 * In case multiple PEBS events are sampled at the same time,
2273 * it is possible to have GLOBAL_STATUS bit 62 set indicating
2274 * PEBS buffer overflow and also seeing at most 3 PEBS counters
2275 * having their bits set in the status register. This is a sign
2276 * that there was at least one PEBS record pending at the time
2277 * of the PMU interrupt. PEBS counters must only be processed
2278 * via the drain_pebs() calls and not via the regular sample
2279 * processing loop coming after that the function, otherwise
2280 * phony regular samples may be generated in the sampling buffer
2281 * not marked with the EXACT tag. Another possibility is to have
2282 * one PEBS event and at least one non-PEBS event whic hoverflows
2283 * while PEBS has armed. In this case, bit 62 of GLOBAL_STATUS will
2284 * not be set, yet the overflow status bit for the PEBS counter will
2287 * To avoid this problem, we systematically ignore the PEBS-enabled
2288 * counters from the GLOBAL_STATUS mask and we always process PEBS
2289 * events via drain_pebs().
2291 if (x86_pmu
.flags
& PMU_FL_PEBS_ALL
)
2292 status
&= ~cpuc
->pebs_enabled
;
2294 status
&= ~(cpuc
->pebs_enabled
& PEBS_COUNTER_MASK
);
2297 * PEBS overflow sets bit 62 in the global status register
2299 if (__test_and_clear_bit(62, (unsigned long *)&status
)) {
2301 x86_pmu
.drain_pebs(regs
);
2302 status
&= x86_pmu
.intel_ctrl
| GLOBAL_STATUS_TRACE_TOPAPMI
;
2308 if (__test_and_clear_bit(55, (unsigned long *)&status
)) {
2310 if (unlikely(perf_guest_cbs
&& perf_guest_cbs
->is_in_guest() &&
2311 perf_guest_cbs
->handle_intel_pt_intr
))
2312 perf_guest_cbs
->handle_intel_pt_intr();
2314 intel_pt_interrupt();
2318 * Checkpointed counters can lead to 'spurious' PMIs because the
2319 * rollback caused by the PMI will have cleared the overflow status
2320 * bit. Therefore always force probe these counters.
2322 status
|= cpuc
->intel_cp_status
;
2324 for_each_set_bit(bit
, (unsigned long *)&status
, X86_PMC_IDX_MAX
) {
2325 struct perf_event
*event
= cpuc
->events
[bit
];
2329 if (!test_bit(bit
, cpuc
->active_mask
))
2332 if (!intel_pmu_save_and_restart(event
))
2335 perf_sample_data_init(&data
, 0, event
->hw
.last_period
);
2337 if (has_branch_stack(event
))
2338 data
.br_stack
= &cpuc
->lbr_stack
;
2340 if (perf_event_overflow(event
, &data
, regs
))
2341 x86_pmu_stop(event
, 0);
2347 static bool disable_counter_freezing
= true;
2348 static int __init
intel_perf_counter_freezing_setup(char *s
)
2352 if (kstrtobool(s
, &res
))
2355 disable_counter_freezing
= !res
;
2358 __setup("perf_v4_pmi=", intel_perf_counter_freezing_setup
);
2361 * Simplified handler for Arch Perfmon v4:
2362 * - We rely on counter freezing/unfreezing to enable/disable the PMU.
2363 * This is done automatically on PMU ack.
2364 * - Ack the PMU only after the APIC.
2367 static int intel_pmu_handle_irq_v4(struct pt_regs
*regs
)
2369 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
2373 int pmu_enabled
= cpuc
->enabled
;
2376 /* PMU has been disabled because of counter freezing */
2378 if (test_bit(INTEL_PMC_IDX_FIXED_BTS
, cpuc
->active_mask
)) {
2380 intel_bts_disable_local();
2381 handled
= intel_pmu_drain_bts_buffer();
2382 handled
+= intel_bts_interrupt();
2384 status
= intel_pmu_get_status();
2388 intel_pmu_lbr_read();
2389 if (++loops
> 100) {
2393 WARN(1, "perfevents: irq loop stuck!\n");
2394 perf_event_print_debug();
2402 handled
+= handle_pmi_common(regs
, status
);
2404 /* Ack the PMI in the APIC */
2405 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
2408 * The counters start counting immediately while ack the status.
2409 * Make it as close as possible to IRET. This avoids bogus
2410 * freezing on Skylake CPUs.
2413 intel_pmu_ack_status(status
);
2416 * CPU may issues two PMIs very close to each other.
2417 * When the PMI handler services the first one, the
2418 * GLOBAL_STATUS is already updated to reflect both.
2419 * When it IRETs, the second PMI is immediately
2420 * handled and it sees clear status. At the meantime,
2421 * there may be a third PMI, because the freezing bit
2422 * isn't set since the ack in first PMI handlers.
2423 * Double check if there is more work to be done.
2425 status
= intel_pmu_get_status();
2431 intel_bts_enable_local();
2432 cpuc
->enabled
= pmu_enabled
;
2437 * This handler is triggered by the local APIC, so the APIC IRQ handling
2440 static int intel_pmu_handle_irq(struct pt_regs
*regs
)
2442 struct cpu_hw_events
*cpuc
;
2448 cpuc
= this_cpu_ptr(&cpu_hw_events
);
2451 * Save the PMU state.
2452 * It needs to be restored when leaving the handler.
2454 pmu_enabled
= cpuc
->enabled
;
2456 * No known reason to not always do late ACK,
2457 * but just in case do it opt-in.
2459 if (!x86_pmu
.late_ack
)
2460 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
2461 intel_bts_disable_local();
2463 __intel_pmu_disable_all();
2464 handled
= intel_pmu_drain_bts_buffer();
2465 handled
+= intel_bts_interrupt();
2466 status
= intel_pmu_get_status();
2472 intel_pmu_lbr_read();
2473 intel_pmu_ack_status(status
);
2474 if (++loops
> 100) {
2478 WARN(1, "perfevents: irq loop stuck!\n");
2479 perf_event_print_debug();
2486 handled
+= handle_pmi_common(regs
, status
);
2489 * Repeat if there is more work to be done:
2491 status
= intel_pmu_get_status();
2496 /* Only restore PMU state when it's active. See x86_pmu_disable(). */
2497 cpuc
->enabled
= pmu_enabled
;
2499 __intel_pmu_enable_all(0, true);
2500 intel_bts_enable_local();
2503 * Only unmask the NMI after the overflow counters
2504 * have been reset. This avoids spurious NMIs on
2507 if (x86_pmu
.late_ack
)
2508 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
2512 static struct event_constraint
*
2513 intel_bts_constraints(struct perf_event
*event
)
2515 if (unlikely(intel_pmu_has_bts(event
)))
2516 return &bts_constraint
;
2521 static int intel_alt_er(int idx
, u64 config
)
2525 if (!(x86_pmu
.flags
& PMU_FL_HAS_RSP_1
))
2528 if (idx
== EXTRA_REG_RSP_0
)
2529 alt_idx
= EXTRA_REG_RSP_1
;
2531 if (idx
== EXTRA_REG_RSP_1
)
2532 alt_idx
= EXTRA_REG_RSP_0
;
2534 if (config
& ~x86_pmu
.extra_regs
[alt_idx
].valid_mask
)
2540 static void intel_fixup_er(struct perf_event
*event
, int idx
)
2542 event
->hw
.extra_reg
.idx
= idx
;
2544 if (idx
== EXTRA_REG_RSP_0
) {
2545 event
->hw
.config
&= ~INTEL_ARCH_EVENT_MASK
;
2546 event
->hw
.config
|= x86_pmu
.extra_regs
[EXTRA_REG_RSP_0
].event
;
2547 event
->hw
.extra_reg
.reg
= MSR_OFFCORE_RSP_0
;
2548 } else if (idx
== EXTRA_REG_RSP_1
) {
2549 event
->hw
.config
&= ~INTEL_ARCH_EVENT_MASK
;
2550 event
->hw
.config
|= x86_pmu
.extra_regs
[EXTRA_REG_RSP_1
].event
;
2551 event
->hw
.extra_reg
.reg
= MSR_OFFCORE_RSP_1
;
2556 * manage allocation of shared extra msr for certain events
2559 * per-cpu: to be shared between the various events on a single PMU
2560 * per-core: per-cpu + shared by HT threads
2562 static struct event_constraint
*
2563 __intel_shared_reg_get_constraints(struct cpu_hw_events
*cpuc
,
2564 struct perf_event
*event
,
2565 struct hw_perf_event_extra
*reg
)
2567 struct event_constraint
*c
= &emptyconstraint
;
2568 struct er_account
*era
;
2569 unsigned long flags
;
2573 * reg->alloc can be set due to existing state, so for fake cpuc we
2574 * need to ignore this, otherwise we might fail to allocate proper fake
2575 * state for this extra reg constraint. Also see the comment below.
2577 if (reg
->alloc
&& !cpuc
->is_fake
)
2578 return NULL
; /* call x86_get_event_constraint() */
2581 era
= &cpuc
->shared_regs
->regs
[idx
];
2583 * we use spin_lock_irqsave() to avoid lockdep issues when
2584 * passing a fake cpuc
2586 raw_spin_lock_irqsave(&era
->lock
, flags
);
2588 if (!atomic_read(&era
->ref
) || era
->config
== reg
->config
) {
2591 * If its a fake cpuc -- as per validate_{group,event}() we
2592 * shouldn't touch event state and we can avoid doing so
2593 * since both will only call get_event_constraints() once
2594 * on each event, this avoids the need for reg->alloc.
2596 * Not doing the ER fixup will only result in era->reg being
2597 * wrong, but since we won't actually try and program hardware
2598 * this isn't a problem either.
2600 if (!cpuc
->is_fake
) {
2601 if (idx
!= reg
->idx
)
2602 intel_fixup_er(event
, idx
);
2605 * x86_schedule_events() can call get_event_constraints()
2606 * multiple times on events in the case of incremental
2607 * scheduling(). reg->alloc ensures we only do the ER
2613 /* lock in msr value */
2614 era
->config
= reg
->config
;
2615 era
->reg
= reg
->reg
;
2618 atomic_inc(&era
->ref
);
2621 * need to call x86_get_event_constraint()
2622 * to check if associated event has constraints
2626 idx
= intel_alt_er(idx
, reg
->config
);
2627 if (idx
!= reg
->idx
) {
2628 raw_spin_unlock_irqrestore(&era
->lock
, flags
);
2632 raw_spin_unlock_irqrestore(&era
->lock
, flags
);
2638 __intel_shared_reg_put_constraints(struct cpu_hw_events
*cpuc
,
2639 struct hw_perf_event_extra
*reg
)
2641 struct er_account
*era
;
2644 * Only put constraint if extra reg was actually allocated. Also takes
2645 * care of event which do not use an extra shared reg.
2647 * Also, if this is a fake cpuc we shouldn't touch any event state
2648 * (reg->alloc) and we don't care about leaving inconsistent cpuc state
2649 * either since it'll be thrown out.
2651 if (!reg
->alloc
|| cpuc
->is_fake
)
2654 era
= &cpuc
->shared_regs
->regs
[reg
->idx
];
2656 /* one fewer user */
2657 atomic_dec(&era
->ref
);
2659 /* allocate again next time */
2663 static struct event_constraint
*
2664 intel_shared_regs_constraints(struct cpu_hw_events
*cpuc
,
2665 struct perf_event
*event
)
2667 struct event_constraint
*c
= NULL
, *d
;
2668 struct hw_perf_event_extra
*xreg
, *breg
;
2670 xreg
= &event
->hw
.extra_reg
;
2671 if (xreg
->idx
!= EXTRA_REG_NONE
) {
2672 c
= __intel_shared_reg_get_constraints(cpuc
, event
, xreg
);
2673 if (c
== &emptyconstraint
)
2676 breg
= &event
->hw
.branch_reg
;
2677 if (breg
->idx
!= EXTRA_REG_NONE
) {
2678 d
= __intel_shared_reg_get_constraints(cpuc
, event
, breg
);
2679 if (d
== &emptyconstraint
) {
2680 __intel_shared_reg_put_constraints(cpuc
, xreg
);
2687 struct event_constraint
*
2688 x86_get_event_constraints(struct cpu_hw_events
*cpuc
, int idx
,
2689 struct perf_event
*event
)
2691 struct event_constraint
*c
;
2693 if (x86_pmu
.event_constraints
) {
2694 for_each_event_constraint(c
, x86_pmu
.event_constraints
) {
2695 if ((event
->hw
.config
& c
->cmask
) == c
->code
) {
2696 event
->hw
.flags
|= c
->flags
;
2702 return &unconstrained
;
2705 static struct event_constraint
*
2706 __intel_get_event_constraints(struct cpu_hw_events
*cpuc
, int idx
,
2707 struct perf_event
*event
)
2709 struct event_constraint
*c
;
2711 c
= intel_bts_constraints(event
);
2715 c
= intel_shared_regs_constraints(cpuc
, event
);
2719 c
= intel_pebs_constraints(event
);
2723 return x86_get_event_constraints(cpuc
, idx
, event
);
2727 intel_start_scheduling(struct cpu_hw_events
*cpuc
)
2729 struct intel_excl_cntrs
*excl_cntrs
= cpuc
->excl_cntrs
;
2730 struct intel_excl_states
*xl
;
2731 int tid
= cpuc
->excl_thread_id
;
2734 * nothing needed if in group validation mode
2736 if (cpuc
->is_fake
|| !is_ht_workaround_enabled())
2740 * no exclusion needed
2742 if (WARN_ON_ONCE(!excl_cntrs
))
2745 xl
= &excl_cntrs
->states
[tid
];
2747 xl
->sched_started
= true;
2749 * lock shared state until we are done scheduling
2750 * in stop_event_scheduling()
2751 * makes scheduling appear as a transaction
2753 raw_spin_lock(&excl_cntrs
->lock
);
2756 static void intel_commit_scheduling(struct cpu_hw_events
*cpuc
, int idx
, int cntr
)
2758 struct intel_excl_cntrs
*excl_cntrs
= cpuc
->excl_cntrs
;
2759 struct event_constraint
*c
= cpuc
->event_constraint
[idx
];
2760 struct intel_excl_states
*xl
;
2761 int tid
= cpuc
->excl_thread_id
;
2763 if (cpuc
->is_fake
|| !is_ht_workaround_enabled())
2766 if (WARN_ON_ONCE(!excl_cntrs
))
2769 if (!(c
->flags
& PERF_X86_EVENT_DYNAMIC
))
2772 xl
= &excl_cntrs
->states
[tid
];
2774 lockdep_assert_held(&excl_cntrs
->lock
);
2776 if (c
->flags
& PERF_X86_EVENT_EXCL
)
2777 xl
->state
[cntr
] = INTEL_EXCL_EXCLUSIVE
;
2779 xl
->state
[cntr
] = INTEL_EXCL_SHARED
;
2783 intel_stop_scheduling(struct cpu_hw_events
*cpuc
)
2785 struct intel_excl_cntrs
*excl_cntrs
= cpuc
->excl_cntrs
;
2786 struct intel_excl_states
*xl
;
2787 int tid
= cpuc
->excl_thread_id
;
2790 * nothing needed if in group validation mode
2792 if (cpuc
->is_fake
|| !is_ht_workaround_enabled())
2795 * no exclusion needed
2797 if (WARN_ON_ONCE(!excl_cntrs
))
2800 xl
= &excl_cntrs
->states
[tid
];
2802 xl
->sched_started
= false;
2804 * release shared state lock (acquired in intel_start_scheduling())
2806 raw_spin_unlock(&excl_cntrs
->lock
);
2809 static struct event_constraint
*
2810 dyn_constraint(struct cpu_hw_events
*cpuc
, struct event_constraint
*c
, int idx
)
2812 WARN_ON_ONCE(!cpuc
->constraint_list
);
2814 if (!(c
->flags
& PERF_X86_EVENT_DYNAMIC
)) {
2815 struct event_constraint
*cx
;
2818 * grab pre-allocated constraint entry
2820 cx
= &cpuc
->constraint_list
[idx
];
2823 * initialize dynamic constraint
2824 * with static constraint
2829 * mark constraint as dynamic
2831 cx
->flags
|= PERF_X86_EVENT_DYNAMIC
;
2838 static struct event_constraint
*
2839 intel_get_excl_constraints(struct cpu_hw_events
*cpuc
, struct perf_event
*event
,
2840 int idx
, struct event_constraint
*c
)
2842 struct intel_excl_cntrs
*excl_cntrs
= cpuc
->excl_cntrs
;
2843 struct intel_excl_states
*xlo
;
2844 int tid
= cpuc
->excl_thread_id
;
2848 * validating a group does not require
2849 * enforcing cross-thread exclusion
2851 if (cpuc
->is_fake
|| !is_ht_workaround_enabled())
2855 * no exclusion needed
2857 if (WARN_ON_ONCE(!excl_cntrs
))
2861 * because we modify the constraint, we need
2862 * to make a copy. Static constraints come
2863 * from static const tables.
2865 * only needed when constraint has not yet
2866 * been cloned (marked dynamic)
2868 c
= dyn_constraint(cpuc
, c
, idx
);
2871 * From here on, the constraint is dynamic.
2872 * Either it was just allocated above, or it
2873 * was allocated during a earlier invocation
2878 * state of sibling HT
2880 xlo
= &excl_cntrs
->states
[tid
^ 1];
2883 * event requires exclusive counter access
2886 is_excl
= c
->flags
& PERF_X86_EVENT_EXCL
;
2887 if (is_excl
&& !(event
->hw
.flags
& PERF_X86_EVENT_EXCL_ACCT
)) {
2888 event
->hw
.flags
|= PERF_X86_EVENT_EXCL_ACCT
;
2889 if (!cpuc
->n_excl
++)
2890 WRITE_ONCE(excl_cntrs
->has_exclusive
[tid
], 1);
2894 * Modify static constraint with current dynamic
2897 * EXCLUSIVE: sibling counter measuring exclusive event
2898 * SHARED : sibling counter measuring non-exclusive event
2899 * UNUSED : sibling counter unused
2901 for_each_set_bit(i
, c
->idxmsk
, X86_PMC_IDX_MAX
) {
2903 * exclusive event in sibling counter
2904 * our corresponding counter cannot be used
2905 * regardless of our event
2907 if (xlo
->state
[i
] == INTEL_EXCL_EXCLUSIVE
)
2908 __clear_bit(i
, c
->idxmsk
);
2910 * if measuring an exclusive event, sibling
2911 * measuring non-exclusive, then counter cannot
2914 if (is_excl
&& xlo
->state
[i
] == INTEL_EXCL_SHARED
)
2915 __clear_bit(i
, c
->idxmsk
);
2919 * recompute actual bit weight for scheduling algorithm
2921 c
->weight
= hweight64(c
->idxmsk64
);
2924 * if we return an empty mask, then switch
2925 * back to static empty constraint to avoid
2926 * the cost of freeing later on
2929 c
= &emptyconstraint
;
2934 static struct event_constraint
*
2935 intel_get_event_constraints(struct cpu_hw_events
*cpuc
, int idx
,
2936 struct perf_event
*event
)
2938 struct event_constraint
*c1
= NULL
;
2939 struct event_constraint
*c2
;
2941 if (idx
>= 0) /* fake does < 0 */
2942 c1
= cpuc
->event_constraint
[idx
];
2946 * - static constraint: no change across incremental scheduling calls
2947 * - dynamic constraint: handled by intel_get_excl_constraints()
2949 c2
= __intel_get_event_constraints(cpuc
, idx
, event
);
2950 if (c1
&& (c1
->flags
& PERF_X86_EVENT_DYNAMIC
)) {
2951 bitmap_copy(c1
->idxmsk
, c2
->idxmsk
, X86_PMC_IDX_MAX
);
2952 c1
->weight
= c2
->weight
;
2956 if (cpuc
->excl_cntrs
)
2957 return intel_get_excl_constraints(cpuc
, event
, idx
, c2
);
2962 static void intel_put_excl_constraints(struct cpu_hw_events
*cpuc
,
2963 struct perf_event
*event
)
2965 struct hw_perf_event
*hwc
= &event
->hw
;
2966 struct intel_excl_cntrs
*excl_cntrs
= cpuc
->excl_cntrs
;
2967 int tid
= cpuc
->excl_thread_id
;
2968 struct intel_excl_states
*xl
;
2971 * nothing needed if in group validation mode
2976 if (WARN_ON_ONCE(!excl_cntrs
))
2979 if (hwc
->flags
& PERF_X86_EVENT_EXCL_ACCT
) {
2980 hwc
->flags
&= ~PERF_X86_EVENT_EXCL_ACCT
;
2981 if (!--cpuc
->n_excl
)
2982 WRITE_ONCE(excl_cntrs
->has_exclusive
[tid
], 0);
2986 * If event was actually assigned, then mark the counter state as
2989 if (hwc
->idx
>= 0) {
2990 xl
= &excl_cntrs
->states
[tid
];
2993 * put_constraint may be called from x86_schedule_events()
2994 * which already has the lock held so here make locking
2997 if (!xl
->sched_started
)
2998 raw_spin_lock(&excl_cntrs
->lock
);
3000 xl
->state
[hwc
->idx
] = INTEL_EXCL_UNUSED
;
3002 if (!xl
->sched_started
)
3003 raw_spin_unlock(&excl_cntrs
->lock
);
3008 intel_put_shared_regs_event_constraints(struct cpu_hw_events
*cpuc
,
3009 struct perf_event
*event
)
3011 struct hw_perf_event_extra
*reg
;
3013 reg
= &event
->hw
.extra_reg
;
3014 if (reg
->idx
!= EXTRA_REG_NONE
)
3015 __intel_shared_reg_put_constraints(cpuc
, reg
);
3017 reg
= &event
->hw
.branch_reg
;
3018 if (reg
->idx
!= EXTRA_REG_NONE
)
3019 __intel_shared_reg_put_constraints(cpuc
, reg
);
3022 static void intel_put_event_constraints(struct cpu_hw_events
*cpuc
,
3023 struct perf_event
*event
)
3025 intel_put_shared_regs_event_constraints(cpuc
, event
);
3028 * is PMU has exclusive counter restrictions, then
3029 * all events are subject to and must call the
3030 * put_excl_constraints() routine
3032 if (cpuc
->excl_cntrs
)
3033 intel_put_excl_constraints(cpuc
, event
);
3036 static void intel_pebs_aliases_core2(struct perf_event
*event
)
3038 if ((event
->hw
.config
& X86_RAW_EVENT_MASK
) == 0x003c) {
3040 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
3041 * (0x003c) so that we can use it with PEBS.
3043 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
3044 * PEBS capable. However we can use INST_RETIRED.ANY_P
3045 * (0x00c0), which is a PEBS capable event, to get the same
3048 * INST_RETIRED.ANY_P counts the number of cycles that retires
3049 * CNTMASK instructions. By setting CNTMASK to a value (16)
3050 * larger than the maximum number of instructions that can be
3051 * retired per cycle (4) and then inverting the condition, we
3052 * count all cycles that retire 16 or less instructions, which
3055 * Thereby we gain a PEBS capable cycle counter.
3057 u64 alt_config
= X86_CONFIG(.event
=0xc0, .inv
=1, .cmask
=16);
3059 alt_config
|= (event
->hw
.config
& ~X86_RAW_EVENT_MASK
);
3060 event
->hw
.config
= alt_config
;
3064 static void intel_pebs_aliases_snb(struct perf_event
*event
)
3066 if ((event
->hw
.config
& X86_RAW_EVENT_MASK
) == 0x003c) {
3068 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
3069 * (0x003c) so that we can use it with PEBS.
3071 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
3072 * PEBS capable. However we can use UOPS_RETIRED.ALL
3073 * (0x01c2), which is a PEBS capable event, to get the same
3076 * UOPS_RETIRED.ALL counts the number of cycles that retires
3077 * CNTMASK micro-ops. By setting CNTMASK to a value (16)
3078 * larger than the maximum number of micro-ops that can be
3079 * retired per cycle (4) and then inverting the condition, we
3080 * count all cycles that retire 16 or less micro-ops, which
3083 * Thereby we gain a PEBS capable cycle counter.
3085 u64 alt_config
= X86_CONFIG(.event
=0xc2, .umask
=0x01, .inv
=1, .cmask
=16);
3087 alt_config
|= (event
->hw
.config
& ~X86_RAW_EVENT_MASK
);
3088 event
->hw
.config
= alt_config
;
3092 static void intel_pebs_aliases_precdist(struct perf_event
*event
)
3094 if ((event
->hw
.config
& X86_RAW_EVENT_MASK
) == 0x003c) {
3096 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
3097 * (0x003c) so that we can use it with PEBS.
3099 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
3100 * PEBS capable. However we can use INST_RETIRED.PREC_DIST
3101 * (0x01c0), which is a PEBS capable event, to get the same
3104 * The PREC_DIST event has special support to minimize sample
3105 * shadowing effects. One drawback is that it can be
3106 * only programmed on counter 1, but that seems like an
3107 * acceptable trade off.
3109 u64 alt_config
= X86_CONFIG(.event
=0xc0, .umask
=0x01, .inv
=1, .cmask
=16);
3111 alt_config
|= (event
->hw
.config
& ~X86_RAW_EVENT_MASK
);
3112 event
->hw
.config
= alt_config
;
3116 static void intel_pebs_aliases_ivb(struct perf_event
*event
)
3118 if (event
->attr
.precise_ip
< 3)
3119 return intel_pebs_aliases_snb(event
);
3120 return intel_pebs_aliases_precdist(event
);
3123 static void intel_pebs_aliases_skl(struct perf_event
*event
)
3125 if (event
->attr
.precise_ip
< 3)
3126 return intel_pebs_aliases_core2(event
);
3127 return intel_pebs_aliases_precdist(event
);
3130 static unsigned long intel_pmu_large_pebs_flags(struct perf_event
*event
)
3132 unsigned long flags
= x86_pmu
.large_pebs_flags
;
3134 if (event
->attr
.use_clockid
)
3135 flags
&= ~PERF_SAMPLE_TIME
;
3136 if (!event
->attr
.exclude_kernel
)
3137 flags
&= ~PERF_SAMPLE_REGS_USER
;
3138 if (event
->attr
.sample_regs_user
& ~PEBS_REGS
)
3139 flags
&= ~(PERF_SAMPLE_REGS_USER
| PERF_SAMPLE_REGS_INTR
);
3143 static int intel_pmu_bts_config(struct perf_event
*event
)
3145 struct perf_event_attr
*attr
= &event
->attr
;
3147 if (unlikely(intel_pmu_has_bts(event
))) {
3148 /* BTS is not supported by this architecture. */
3149 if (!x86_pmu
.bts_active
)
3152 /* BTS is currently only allowed for user-mode. */
3153 if (!attr
->exclude_kernel
)
3156 /* BTS is not allowed for precise events. */
3157 if (attr
->precise_ip
)
3160 /* disallow bts if conflicting events are present */
3161 if (x86_add_exclusive(x86_lbr_exclusive_lbr
))
3164 event
->destroy
= hw_perf_lbr_event_destroy
;
3170 static int core_pmu_hw_config(struct perf_event
*event
)
3172 int ret
= x86_pmu_hw_config(event
);
3177 return intel_pmu_bts_config(event
);
3180 static int intel_pmu_hw_config(struct perf_event
*event
)
3182 int ret
= x86_pmu_hw_config(event
);
3187 ret
= intel_pmu_bts_config(event
);
3191 if (event
->attr
.precise_ip
) {
3192 if (!event
->attr
.freq
) {
3193 event
->hw
.flags
|= PERF_X86_EVENT_AUTO_RELOAD
;
3194 if (!(event
->attr
.sample_type
&
3195 ~intel_pmu_large_pebs_flags(event
)))
3196 event
->hw
.flags
|= PERF_X86_EVENT_LARGE_PEBS
;
3198 if (x86_pmu
.pebs_aliases
)
3199 x86_pmu
.pebs_aliases(event
);
3201 if (event
->attr
.sample_type
& PERF_SAMPLE_CALLCHAIN
)
3202 event
->attr
.sample_type
|= __PERF_SAMPLE_CALLCHAIN_EARLY
;
3205 if (needs_branch_stack(event
)) {
3206 ret
= intel_pmu_setup_lbr_filter(event
);
3211 * BTS is set up earlier in this path, so don't account twice
3213 if (!unlikely(intel_pmu_has_bts(event
))) {
3214 /* disallow lbr if conflicting events are present */
3215 if (x86_add_exclusive(x86_lbr_exclusive_lbr
))
3218 event
->destroy
= hw_perf_lbr_event_destroy
;
3222 if (event
->attr
.type
!= PERF_TYPE_RAW
)
3225 if (!(event
->attr
.config
& ARCH_PERFMON_EVENTSEL_ANY
))
3228 if (x86_pmu
.version
< 3)
3231 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN
))
3234 event
->hw
.config
|= ARCH_PERFMON_EVENTSEL_ANY
;
3239 struct perf_guest_switch_msr
*perf_guest_get_msrs(int *nr
)
3241 if (x86_pmu
.guest_get_msrs
)
3242 return x86_pmu
.guest_get_msrs(nr
);
3246 EXPORT_SYMBOL_GPL(perf_guest_get_msrs
);
3248 static struct perf_guest_switch_msr
*intel_guest_get_msrs(int *nr
)
3250 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
3251 struct perf_guest_switch_msr
*arr
= cpuc
->guest_switch_msrs
;
3253 arr
[0].msr
= MSR_CORE_PERF_GLOBAL_CTRL
;
3254 arr
[0].host
= x86_pmu
.intel_ctrl
& ~cpuc
->intel_ctrl_guest_mask
;
3255 arr
[0].guest
= x86_pmu
.intel_ctrl
& ~cpuc
->intel_ctrl_host_mask
;
3256 if (x86_pmu
.flags
& PMU_FL_PEBS_ALL
)
3257 arr
[0].guest
&= ~cpuc
->pebs_enabled
;
3259 arr
[0].guest
&= ~(cpuc
->pebs_enabled
& PEBS_COUNTER_MASK
);
3262 if (x86_pmu
.pebs
&& x86_pmu
.pebs_no_isolation
) {
3264 * If PMU counter has PEBS enabled it is not enough to
3265 * disable counter on a guest entry since PEBS memory
3266 * write can overshoot guest entry and corrupt guest
3267 * memory. Disabling PEBS solves the problem.
3269 * Don't do this if the CPU already enforces it.
3271 arr
[1].msr
= MSR_IA32_PEBS_ENABLE
;
3272 arr
[1].host
= cpuc
->pebs_enabled
;
3280 static struct perf_guest_switch_msr
*core_guest_get_msrs(int *nr
)
3282 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
3283 struct perf_guest_switch_msr
*arr
= cpuc
->guest_switch_msrs
;
3286 for (idx
= 0; idx
< x86_pmu
.num_counters
; idx
++) {
3287 struct perf_event
*event
= cpuc
->events
[idx
];
3289 arr
[idx
].msr
= x86_pmu_config_addr(idx
);
3290 arr
[idx
].host
= arr
[idx
].guest
= 0;
3292 if (!test_bit(idx
, cpuc
->active_mask
))
3295 arr
[idx
].host
= arr
[idx
].guest
=
3296 event
->hw
.config
| ARCH_PERFMON_EVENTSEL_ENABLE
;
3298 if (event
->attr
.exclude_host
)
3299 arr
[idx
].host
&= ~ARCH_PERFMON_EVENTSEL_ENABLE
;
3300 else if (event
->attr
.exclude_guest
)
3301 arr
[idx
].guest
&= ~ARCH_PERFMON_EVENTSEL_ENABLE
;
3304 *nr
= x86_pmu
.num_counters
;
3308 static void core_pmu_enable_event(struct perf_event
*event
)
3310 if (!event
->attr
.exclude_host
)
3311 x86_pmu_enable_event(event
);
3314 static void core_pmu_enable_all(int added
)
3316 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
3319 for (idx
= 0; idx
< x86_pmu
.num_counters
; idx
++) {
3320 struct hw_perf_event
*hwc
= &cpuc
->events
[idx
]->hw
;
3322 if (!test_bit(idx
, cpuc
->active_mask
) ||
3323 cpuc
->events
[idx
]->attr
.exclude_host
)
3326 __x86_pmu_enable_event(hwc
, ARCH_PERFMON_EVENTSEL_ENABLE
);
3330 static int hsw_hw_config(struct perf_event
*event
)
3332 int ret
= intel_pmu_hw_config(event
);
3336 if (!boot_cpu_has(X86_FEATURE_RTM
) && !boot_cpu_has(X86_FEATURE_HLE
))
3338 event
->hw
.config
|= event
->attr
.config
& (HSW_IN_TX
|HSW_IN_TX_CHECKPOINTED
);
3341 * IN_TX/IN_TX-CP filters are not supported by the Haswell PMU with
3342 * PEBS or in ANY thread mode. Since the results are non-sensical forbid
3345 if ((event
->hw
.config
& (HSW_IN_TX
|HSW_IN_TX_CHECKPOINTED
)) &&
3346 ((event
->hw
.config
& ARCH_PERFMON_EVENTSEL_ANY
) ||
3347 event
->attr
.precise_ip
> 0))
3350 if (event_is_checkpointed(event
)) {
3352 * Sampling of checkpointed events can cause situations where
3353 * the CPU constantly aborts because of a overflow, which is
3354 * then checkpointed back and ignored. Forbid checkpointing
3357 * But still allow a long sampling period, so that perf stat
3360 if (event
->attr
.sample_period
> 0 &&
3361 event
->attr
.sample_period
< 0x7fffffff)
3367 static struct event_constraint counter0_constraint
=
3368 INTEL_ALL_EVENT_CONSTRAINT(0, 0x1);
3370 static struct event_constraint counter2_constraint
=
3371 EVENT_CONSTRAINT(0, 0x4, 0);
3373 static struct event_constraint
*
3374 hsw_get_event_constraints(struct cpu_hw_events
*cpuc
, int idx
,
3375 struct perf_event
*event
)
3377 struct event_constraint
*c
;
3379 c
= intel_get_event_constraints(cpuc
, idx
, event
);
3381 /* Handle special quirk on in_tx_checkpointed only in counter 2 */
3382 if (event
->hw
.config
& HSW_IN_TX_CHECKPOINTED
) {
3383 if (c
->idxmsk64
& (1U << 2))
3384 return &counter2_constraint
;
3385 return &emptyconstraint
;
3391 static struct event_constraint
*
3392 glp_get_event_constraints(struct cpu_hw_events
*cpuc
, int idx
,
3393 struct perf_event
*event
)
3395 struct event_constraint
*c
;
3397 /* :ppp means to do reduced skid PEBS which is PMC0 only. */
3398 if (event
->attr
.precise_ip
== 3)
3399 return &counter0_constraint
;
3401 c
= intel_get_event_constraints(cpuc
, idx
, event
);
3406 static bool allow_tsx_force_abort
= true;
3408 static struct event_constraint
*
3409 tfa_get_event_constraints(struct cpu_hw_events
*cpuc
, int idx
,
3410 struct perf_event
*event
)
3412 struct event_constraint
*c
= hsw_get_event_constraints(cpuc
, idx
, event
);
3415 * Without TFA we must not use PMC3.
3417 if (!allow_tsx_force_abort
&& test_bit(3, c
->idxmsk
) && idx
>= 0) {
3418 c
= dyn_constraint(cpuc
, c
, idx
);
3419 c
->idxmsk64
&= ~(1ULL << 3);
3429 * The INST_RETIRED.ALL period always needs to have lowest 6 bits cleared
3430 * (BDM55) and it must not use a period smaller than 100 (BDM11). We combine
3431 * the two to enforce a minimum period of 128 (the smallest value that has bits
3432 * 0-5 cleared and >= 100).
3434 * Because of how the code in x86_perf_event_set_period() works, the truncation
3435 * of the lower 6 bits is 'harmless' as we'll occasionally add a longer period
3436 * to make up for the 'lost' events due to carrying the 'error' in period_left.
3438 * Therefore the effective (average) period matches the requested period,
3439 * despite coarser hardware granularity.
3441 static u64
bdw_limit_period(struct perf_event
*event
, u64 left
)
3443 if ((event
->hw
.config
& INTEL_ARCH_EVENT_MASK
) ==
3444 X86_CONFIG(.event
=0xc0, .umask
=0x01)) {
3452 PMU_FORMAT_ATTR(event
, "config:0-7" );
3453 PMU_FORMAT_ATTR(umask
, "config:8-15" );
3454 PMU_FORMAT_ATTR(edge
, "config:18" );
3455 PMU_FORMAT_ATTR(pc
, "config:19" );
3456 PMU_FORMAT_ATTR(any
, "config:21" ); /* v3 + */
3457 PMU_FORMAT_ATTR(inv
, "config:23" );
3458 PMU_FORMAT_ATTR(cmask
, "config:24-31" );
3459 PMU_FORMAT_ATTR(in_tx
, "config:32");
3460 PMU_FORMAT_ATTR(in_tx_cp
, "config:33");
3462 static struct attribute
*intel_arch_formats_attr
[] = {
3463 &format_attr_event
.attr
,
3464 &format_attr_umask
.attr
,
3465 &format_attr_edge
.attr
,
3466 &format_attr_pc
.attr
,
3467 &format_attr_inv
.attr
,
3468 &format_attr_cmask
.attr
,
3472 ssize_t
intel_event_sysfs_show(char *page
, u64 config
)
3474 u64 event
= (config
& ARCH_PERFMON_EVENTSEL_EVENT
);
3476 return x86_event_sysfs_show(page
, config
, event
);
3479 static struct intel_shared_regs
*allocate_shared_regs(int cpu
)
3481 struct intel_shared_regs
*regs
;
3484 regs
= kzalloc_node(sizeof(struct intel_shared_regs
),
3485 GFP_KERNEL
, cpu_to_node(cpu
));
3488 * initialize the locks to keep lockdep happy
3490 for (i
= 0; i
< EXTRA_REG_MAX
; i
++)
3491 raw_spin_lock_init(®s
->regs
[i
].lock
);
3498 static struct intel_excl_cntrs
*allocate_excl_cntrs(int cpu
)
3500 struct intel_excl_cntrs
*c
;
3502 c
= kzalloc_node(sizeof(struct intel_excl_cntrs
),
3503 GFP_KERNEL
, cpu_to_node(cpu
));
3505 raw_spin_lock_init(&c
->lock
);
3512 int intel_cpuc_prepare(struct cpu_hw_events
*cpuc
, int cpu
)
3514 if (x86_pmu
.extra_regs
|| x86_pmu
.lbr_sel_map
) {
3515 cpuc
->shared_regs
= allocate_shared_regs(cpu
);
3516 if (!cpuc
->shared_regs
)
3520 if (x86_pmu
.flags
& (PMU_FL_EXCL_CNTRS
| PMU_FL_TFA
)) {
3521 size_t sz
= X86_PMC_IDX_MAX
* sizeof(struct event_constraint
);
3523 cpuc
->constraint_list
= kzalloc_node(sz
, GFP_KERNEL
, cpu_to_node(cpu
));
3524 if (!cpuc
->constraint_list
)
3525 goto err_shared_regs
;
3528 if (x86_pmu
.flags
& PMU_FL_EXCL_CNTRS
) {
3529 cpuc
->excl_cntrs
= allocate_excl_cntrs(cpu
);
3530 if (!cpuc
->excl_cntrs
)
3531 goto err_constraint_list
;
3533 cpuc
->excl_thread_id
= 0;
3538 err_constraint_list
:
3539 kfree(cpuc
->constraint_list
);
3540 cpuc
->constraint_list
= NULL
;
3543 kfree(cpuc
->shared_regs
);
3544 cpuc
->shared_regs
= NULL
;
3550 static int intel_pmu_cpu_prepare(int cpu
)
3552 return intel_cpuc_prepare(&per_cpu(cpu_hw_events
, cpu
), cpu
);
3555 static void flip_smm_bit(void *data
)
3557 unsigned long set
= *(unsigned long *)data
;
3560 msr_set_bit(MSR_IA32_DEBUGCTLMSR
,
3561 DEBUGCTLMSR_FREEZE_IN_SMM_BIT
);
3563 msr_clear_bit(MSR_IA32_DEBUGCTLMSR
,
3564 DEBUGCTLMSR_FREEZE_IN_SMM_BIT
);
3568 static void intel_pmu_cpu_starting(int cpu
)
3570 struct cpu_hw_events
*cpuc
= &per_cpu(cpu_hw_events
, cpu
);
3571 int core_id
= topology_core_id(cpu
);
3574 init_debug_store_on_cpu(cpu
);
3576 * Deal with CPUs that don't clear their LBRs on power-up.
3578 intel_pmu_lbr_reset();
3580 cpuc
->lbr_sel
= NULL
;
3582 if (x86_pmu
.version
> 1)
3583 flip_smm_bit(&x86_pmu
.attr_freeze_on_smi
);
3585 if (x86_pmu
.counter_freezing
)
3586 enable_counter_freeze();
3588 if (!cpuc
->shared_regs
)
3591 if (!(x86_pmu
.flags
& PMU_FL_NO_HT_SHARING
)) {
3592 for_each_cpu(i
, topology_sibling_cpumask(cpu
)) {
3593 struct intel_shared_regs
*pc
;
3595 pc
= per_cpu(cpu_hw_events
, i
).shared_regs
;
3596 if (pc
&& pc
->core_id
== core_id
) {
3597 cpuc
->kfree_on_online
[0] = cpuc
->shared_regs
;
3598 cpuc
->shared_regs
= pc
;
3602 cpuc
->shared_regs
->core_id
= core_id
;
3603 cpuc
->shared_regs
->refcnt
++;
3606 if (x86_pmu
.lbr_sel_map
)
3607 cpuc
->lbr_sel
= &cpuc
->shared_regs
->regs
[EXTRA_REG_LBR
];
3609 if (x86_pmu
.flags
& PMU_FL_EXCL_CNTRS
) {
3610 for_each_cpu(i
, topology_sibling_cpumask(cpu
)) {
3611 struct cpu_hw_events
*sibling
;
3612 struct intel_excl_cntrs
*c
;
3614 sibling
= &per_cpu(cpu_hw_events
, i
);
3615 c
= sibling
->excl_cntrs
;
3616 if (c
&& c
->core_id
== core_id
) {
3617 cpuc
->kfree_on_online
[1] = cpuc
->excl_cntrs
;
3618 cpuc
->excl_cntrs
= c
;
3619 if (!sibling
->excl_thread_id
)
3620 cpuc
->excl_thread_id
= 1;
3624 cpuc
->excl_cntrs
->core_id
= core_id
;
3625 cpuc
->excl_cntrs
->refcnt
++;
3629 static void free_excl_cntrs(struct cpu_hw_events
*cpuc
)
3631 struct intel_excl_cntrs
*c
;
3633 c
= cpuc
->excl_cntrs
;
3635 if (c
->core_id
== -1 || --c
->refcnt
== 0)
3637 cpuc
->excl_cntrs
= NULL
;
3640 kfree(cpuc
->constraint_list
);
3641 cpuc
->constraint_list
= NULL
;
3644 static void intel_pmu_cpu_dying(int cpu
)
3646 fini_debug_store_on_cpu(cpu
);
3648 if (x86_pmu
.counter_freezing
)
3649 disable_counter_freeze();
3652 void intel_cpuc_finish(struct cpu_hw_events
*cpuc
)
3654 struct intel_shared_regs
*pc
;
3656 pc
= cpuc
->shared_regs
;
3658 if (pc
->core_id
== -1 || --pc
->refcnt
== 0)
3660 cpuc
->shared_regs
= NULL
;
3663 free_excl_cntrs(cpuc
);
3666 static void intel_pmu_cpu_dead(int cpu
)
3668 intel_cpuc_finish(&per_cpu(cpu_hw_events
, cpu
));
3671 static void intel_pmu_sched_task(struct perf_event_context
*ctx
,
3674 intel_pmu_pebs_sched_task(ctx
, sched_in
);
3675 intel_pmu_lbr_sched_task(ctx
, sched_in
);
3678 static int intel_pmu_check_period(struct perf_event
*event
, u64 value
)
3680 return intel_pmu_has_bts_period(event
, value
) ? -EINVAL
: 0;
3683 PMU_FORMAT_ATTR(offcore_rsp
, "config1:0-63");
3685 PMU_FORMAT_ATTR(ldlat
, "config1:0-15");
3687 PMU_FORMAT_ATTR(frontend
, "config1:0-23");
3689 static struct attribute
*intel_arch3_formats_attr
[] = {
3690 &format_attr_event
.attr
,
3691 &format_attr_umask
.attr
,
3692 &format_attr_edge
.attr
,
3693 &format_attr_pc
.attr
,
3694 &format_attr_any
.attr
,
3695 &format_attr_inv
.attr
,
3696 &format_attr_cmask
.attr
,
3700 static struct attribute
*hsw_format_attr
[] = {
3701 &format_attr_in_tx
.attr
,
3702 &format_attr_in_tx_cp
.attr
,
3703 &format_attr_offcore_rsp
.attr
,
3704 &format_attr_ldlat
.attr
,
3708 static struct attribute
*nhm_format_attr
[] = {
3709 &format_attr_offcore_rsp
.attr
,
3710 &format_attr_ldlat
.attr
,
3714 static struct attribute
*slm_format_attr
[] = {
3715 &format_attr_offcore_rsp
.attr
,
3719 static struct attribute
*skl_format_attr
[] = {
3720 &format_attr_frontend
.attr
,
3724 static __initconst
const struct x86_pmu core_pmu
= {
3726 .handle_irq
= x86_pmu_handle_irq
,
3727 .disable_all
= x86_pmu_disable_all
,
3728 .enable_all
= core_pmu_enable_all
,
3729 .enable
= core_pmu_enable_event
,
3730 .disable
= x86_pmu_disable_event
,
3731 .hw_config
= core_pmu_hw_config
,
3732 .schedule_events
= x86_schedule_events
,
3733 .eventsel
= MSR_ARCH_PERFMON_EVENTSEL0
,
3734 .perfctr
= MSR_ARCH_PERFMON_PERFCTR0
,
3735 .event_map
= intel_pmu_event_map
,
3736 .max_events
= ARRAY_SIZE(intel_perfmon_event_map
),
3738 .large_pebs_flags
= LARGE_PEBS_FLAGS
,
3741 * Intel PMCs cannot be accessed sanely above 32-bit width,
3742 * so we install an artificial 1<<31 period regardless of
3743 * the generic event period:
3745 .max_period
= (1ULL<<31) - 1,
3746 .get_event_constraints
= intel_get_event_constraints
,
3747 .put_event_constraints
= intel_put_event_constraints
,
3748 .event_constraints
= intel_core_event_constraints
,
3749 .guest_get_msrs
= core_guest_get_msrs
,
3750 .format_attrs
= intel_arch_formats_attr
,
3751 .events_sysfs_show
= intel_event_sysfs_show
,
3754 * Virtual (or funny metal) CPU can define x86_pmu.extra_regs
3755 * together with PMU version 1 and thus be using core_pmu with
3756 * shared_regs. We need following callbacks here to allocate
3759 .cpu_prepare
= intel_pmu_cpu_prepare
,
3760 .cpu_starting
= intel_pmu_cpu_starting
,
3761 .cpu_dying
= intel_pmu_cpu_dying
,
3762 .cpu_dead
= intel_pmu_cpu_dead
,
3764 .check_period
= intel_pmu_check_period
,
3767 static struct attribute
*intel_pmu_attrs
[];
3769 static __initconst
const struct x86_pmu intel_pmu
= {
3771 .handle_irq
= intel_pmu_handle_irq
,
3772 .disable_all
= intel_pmu_disable_all
,
3773 .enable_all
= intel_pmu_enable_all
,
3774 .enable
= intel_pmu_enable_event
,
3775 .disable
= intel_pmu_disable_event
,
3776 .add
= intel_pmu_add_event
,
3777 .del
= intel_pmu_del_event
,
3778 .read
= intel_pmu_read_event
,
3779 .hw_config
= intel_pmu_hw_config
,
3780 .schedule_events
= x86_schedule_events
,
3781 .eventsel
= MSR_ARCH_PERFMON_EVENTSEL0
,
3782 .perfctr
= MSR_ARCH_PERFMON_PERFCTR0
,
3783 .event_map
= intel_pmu_event_map
,
3784 .max_events
= ARRAY_SIZE(intel_perfmon_event_map
),
3786 .large_pebs_flags
= LARGE_PEBS_FLAGS
,
3788 * Intel PMCs cannot be accessed sanely above 32 bit width,
3789 * so we install an artificial 1<<31 period regardless of
3790 * the generic event period:
3792 .max_period
= (1ULL << 31) - 1,
3793 .get_event_constraints
= intel_get_event_constraints
,
3794 .put_event_constraints
= intel_put_event_constraints
,
3795 .pebs_aliases
= intel_pebs_aliases_core2
,
3797 .format_attrs
= intel_arch3_formats_attr
,
3798 .events_sysfs_show
= intel_event_sysfs_show
,
3800 .attrs
= intel_pmu_attrs
,
3802 .cpu_prepare
= intel_pmu_cpu_prepare
,
3803 .cpu_starting
= intel_pmu_cpu_starting
,
3804 .cpu_dying
= intel_pmu_cpu_dying
,
3805 .cpu_dead
= intel_pmu_cpu_dead
,
3807 .guest_get_msrs
= intel_guest_get_msrs
,
3808 .sched_task
= intel_pmu_sched_task
,
3810 .check_period
= intel_pmu_check_period
,
3813 static __init
void intel_clovertown_quirk(void)
3816 * PEBS is unreliable due to:
3818 * AJ67 - PEBS may experience CPL leaks
3819 * AJ68 - PEBS PMI may be delayed by one event
3820 * AJ69 - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12]
3821 * AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS
3823 * AJ67 could be worked around by restricting the OS/USR flags.
3824 * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI.
3826 * AJ106 could possibly be worked around by not allowing LBR
3827 * usage from PEBS, including the fixup.
3828 * AJ68 could possibly be worked around by always programming
3829 * a pebs_event_reset[0] value and coping with the lost events.
3831 * But taken together it might just make sense to not enable PEBS on
3834 pr_warn("PEBS disabled due to CPU errata\n");
3836 x86_pmu
.pebs_constraints
= NULL
;
3839 static const struct x86_cpu_desc isolation_ucodes
[] = {
3840 INTEL_CPU_DESC(INTEL_FAM6_HASWELL_CORE
, 3, 0x0000001f),
3841 INTEL_CPU_DESC(INTEL_FAM6_HASWELL_ULT
, 1, 0x0000001e),
3842 INTEL_CPU_DESC(INTEL_FAM6_HASWELL_GT3E
, 1, 0x00000015),
3843 INTEL_CPU_DESC(INTEL_FAM6_HASWELL_X
, 2, 0x00000037),
3844 INTEL_CPU_DESC(INTEL_FAM6_HASWELL_X
, 4, 0x0000000a),
3845 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_CORE
, 4, 0x00000023),
3846 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_GT3E
, 1, 0x00000014),
3847 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_XEON_D
, 2, 0x00000010),
3848 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_XEON_D
, 3, 0x07000009),
3849 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_XEON_D
, 4, 0x0f000009),
3850 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_XEON_D
, 5, 0x0e000002),
3851 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_X
, 2, 0x0b000014),
3852 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X
, 3, 0x00000021),
3853 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X
, 4, 0x00000000),
3854 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_MOBILE
, 3, 0x0000007c),
3855 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_DESKTOP
, 3, 0x0000007c),
3856 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_DESKTOP
, 9, 0x0000004e),
3857 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_MOBILE
, 9, 0x0000004e),
3858 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_MOBILE
, 10, 0x0000004e),
3859 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_MOBILE
, 11, 0x0000004e),
3860 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_MOBILE
, 12, 0x0000004e),
3861 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_DESKTOP
, 10, 0x0000004e),
3862 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_DESKTOP
, 11, 0x0000004e),
3863 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_DESKTOP
, 12, 0x0000004e),
3864 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_DESKTOP
, 13, 0x0000004e),
3868 static void intel_check_pebs_isolation(void)
3870 x86_pmu
.pebs_no_isolation
= !x86_cpu_has_min_microcode_rev(isolation_ucodes
);
3873 static __init
void intel_pebs_isolation_quirk(void)
3875 WARN_ON_ONCE(x86_pmu
.check_microcode
);
3876 x86_pmu
.check_microcode
= intel_check_pebs_isolation
;
3877 intel_check_pebs_isolation();
3880 static const struct x86_cpu_desc pebs_ucodes
[] = {
3881 INTEL_CPU_DESC(INTEL_FAM6_SANDYBRIDGE
, 7, 0x00000028),
3882 INTEL_CPU_DESC(INTEL_FAM6_SANDYBRIDGE_X
, 6, 0x00000618),
3883 INTEL_CPU_DESC(INTEL_FAM6_SANDYBRIDGE_X
, 7, 0x0000070c),
3887 static bool intel_snb_pebs_broken(void)
3889 return !x86_cpu_has_min_microcode_rev(pebs_ucodes
);
3892 static void intel_snb_check_microcode(void)
3894 if (intel_snb_pebs_broken() == x86_pmu
.pebs_broken
)
3898 * Serialized by the microcode lock..
3900 if (x86_pmu
.pebs_broken
) {
3901 pr_info("PEBS enabled due to microcode update\n");
3902 x86_pmu
.pebs_broken
= 0;
3904 pr_info("PEBS disabled due to CPU errata, please upgrade microcode\n");
3905 x86_pmu
.pebs_broken
= 1;
3909 static bool is_lbr_from(unsigned long msr
)
3911 unsigned long lbr_from_nr
= x86_pmu
.lbr_from
+ x86_pmu
.lbr_nr
;
3913 return x86_pmu
.lbr_from
<= msr
&& msr
< lbr_from_nr
;
3917 * Under certain circumstances, access certain MSR may cause #GP.
3918 * The function tests if the input MSR can be safely accessed.
3920 static bool check_msr(unsigned long msr
, u64 mask
)
3922 u64 val_old
, val_new
, val_tmp
;
3925 * Read the current value, change it and read it back to see if it
3926 * matches, this is needed to detect certain hardware emulators
3927 * (qemu/kvm) that don't trap on the MSR access and always return 0s.
3929 if (rdmsrl_safe(msr
, &val_old
))
3933 * Only change the bits which can be updated by wrmsrl.
3935 val_tmp
= val_old
^ mask
;
3937 if (is_lbr_from(msr
))
3938 val_tmp
= lbr_from_signext_quirk_wr(val_tmp
);
3940 if (wrmsrl_safe(msr
, val_tmp
) ||
3941 rdmsrl_safe(msr
, &val_new
))
3945 * Quirk only affects validation in wrmsr(), so wrmsrl()'s value
3946 * should equal rdmsrl()'s even with the quirk.
3948 if (val_new
!= val_tmp
)
3951 if (is_lbr_from(msr
))
3952 val_old
= lbr_from_signext_quirk_wr(val_old
);
3954 /* Here it's sure that the MSR can be safely accessed.
3955 * Restore the old value and return.
3957 wrmsrl(msr
, val_old
);
3962 static __init
void intel_sandybridge_quirk(void)
3964 x86_pmu
.check_microcode
= intel_snb_check_microcode
;
3966 intel_snb_check_microcode();
3970 static const struct { int id
; char *name
; } intel_arch_events_map
[] __initconst
= {
3971 { PERF_COUNT_HW_CPU_CYCLES
, "cpu cycles" },
3972 { PERF_COUNT_HW_INSTRUCTIONS
, "instructions" },
3973 { PERF_COUNT_HW_BUS_CYCLES
, "bus cycles" },
3974 { PERF_COUNT_HW_CACHE_REFERENCES
, "cache references" },
3975 { PERF_COUNT_HW_CACHE_MISSES
, "cache misses" },
3976 { PERF_COUNT_HW_BRANCH_INSTRUCTIONS
, "branch instructions" },
3977 { PERF_COUNT_HW_BRANCH_MISSES
, "branch misses" },
3980 static __init
void intel_arch_events_quirk(void)
3984 /* disable event that reported as not presend by cpuid */
3985 for_each_set_bit(bit
, x86_pmu
.events_mask
, ARRAY_SIZE(intel_arch_events_map
)) {
3986 intel_perfmon_event_map
[intel_arch_events_map
[bit
].id
] = 0;
3987 pr_warn("CPUID marked event: \'%s\' unavailable\n",
3988 intel_arch_events_map
[bit
].name
);
3992 static __init
void intel_nehalem_quirk(void)
3994 union cpuid10_ebx ebx
;
3996 ebx
.full
= x86_pmu
.events_maskl
;
3997 if (ebx
.split
.no_branch_misses_retired
) {
3999 * Erratum AAJ80 detected, we work it around by using
4000 * the BR_MISP_EXEC.ANY event. This will over-count
4001 * branch-misses, but it's still much better than the
4002 * architectural event which is often completely bogus:
4004 intel_perfmon_event_map
[PERF_COUNT_HW_BRANCH_MISSES
] = 0x7f89;
4005 ebx
.split
.no_branch_misses_retired
= 0;
4006 x86_pmu
.events_maskl
= ebx
.full
;
4007 pr_info("CPU erratum AAJ80 worked around\n");
4011 static const struct x86_cpu_desc counter_freezing_ucodes
[] = {
4012 INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT
, 2, 0x0000000e),
4013 INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT
, 9, 0x0000002e),
4014 INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT
, 10, 0x00000008),
4015 INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT_X
, 1, 0x00000028),
4016 INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT_PLUS
, 1, 0x00000028),
4017 INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT_PLUS
, 8, 0x00000006),
4021 static bool intel_counter_freezing_broken(void)
4023 return !x86_cpu_has_min_microcode_rev(counter_freezing_ucodes
);
4026 static __init
void intel_counter_freezing_quirk(void)
4028 /* Check if it's already disabled */
4029 if (disable_counter_freezing
)
4033 * If the system starts with the wrong ucode, leave the
4034 * counter-freezing feature permanently disabled.
4036 if (intel_counter_freezing_broken()) {
4037 pr_info("PMU counter freezing disabled due to CPU errata,"
4038 "please upgrade microcode\n");
4039 x86_pmu
.counter_freezing
= false;
4040 x86_pmu
.handle_irq
= intel_pmu_handle_irq
;
4045 * enable software workaround for errata:
4050 * Only needed when HT is enabled. However detecting
4051 * if HT is enabled is difficult (model specific). So instead,
4052 * we enable the workaround in the early boot, and verify if
4053 * it is needed in a later initcall phase once we have valid
4054 * topology information to check if HT is actually enabled
4056 static __init
void intel_ht_bug(void)
4058 x86_pmu
.flags
|= PMU_FL_EXCL_CNTRS
| PMU_FL_EXCL_ENABLED
;
4060 x86_pmu
.start_scheduling
= intel_start_scheduling
;
4061 x86_pmu
.commit_scheduling
= intel_commit_scheduling
;
4062 x86_pmu
.stop_scheduling
= intel_stop_scheduling
;
4065 EVENT_ATTR_STR(mem
-loads
, mem_ld_hsw
, "event=0xcd,umask=0x1,ldlat=3");
4066 EVENT_ATTR_STR(mem
-stores
, mem_st_hsw
, "event=0xd0,umask=0x82")
4068 /* Haswell special events */
4069 EVENT_ATTR_STR(tx
-start
, tx_start
, "event=0xc9,umask=0x1");
4070 EVENT_ATTR_STR(tx
-commit
, tx_commit
, "event=0xc9,umask=0x2");
4071 EVENT_ATTR_STR(tx
-abort
, tx_abort
, "event=0xc9,umask=0x4");
4072 EVENT_ATTR_STR(tx
-capacity
, tx_capacity
, "event=0x54,umask=0x2");
4073 EVENT_ATTR_STR(tx
-conflict
, tx_conflict
, "event=0x54,umask=0x1");
4074 EVENT_ATTR_STR(el
-start
, el_start
, "event=0xc8,umask=0x1");
4075 EVENT_ATTR_STR(el
-commit
, el_commit
, "event=0xc8,umask=0x2");
4076 EVENT_ATTR_STR(el
-abort
, el_abort
, "event=0xc8,umask=0x4");
4077 EVENT_ATTR_STR(el
-capacity
, el_capacity
, "event=0x54,umask=0x2");
4078 EVENT_ATTR_STR(el
-conflict
, el_conflict
, "event=0x54,umask=0x1");
4079 EVENT_ATTR_STR(cycles
-t
, cycles_t
, "event=0x3c,in_tx=1");
4080 EVENT_ATTR_STR(cycles
-ct
, cycles_ct
, "event=0x3c,in_tx=1,in_tx_cp=1");
4082 static struct attribute
*hsw_events_attrs
[] = {
4083 EVENT_PTR(td_slots_issued
),
4084 EVENT_PTR(td_slots_retired
),
4085 EVENT_PTR(td_fetch_bubbles
),
4086 EVENT_PTR(td_total_slots
),
4087 EVENT_PTR(td_total_slots_scale
),
4088 EVENT_PTR(td_recovery_bubbles
),
4089 EVENT_PTR(td_recovery_bubbles_scale
),
4093 static struct attribute
*hsw_mem_events_attrs
[] = {
4094 EVENT_PTR(mem_ld_hsw
),
4095 EVENT_PTR(mem_st_hsw
),
4099 static struct attribute
*hsw_tsx_events_attrs
[] = {
4100 EVENT_PTR(tx_start
),
4101 EVENT_PTR(tx_commit
),
4102 EVENT_PTR(tx_abort
),
4103 EVENT_PTR(tx_capacity
),
4104 EVENT_PTR(tx_conflict
),
4105 EVENT_PTR(el_start
),
4106 EVENT_PTR(el_commit
),
4107 EVENT_PTR(el_abort
),
4108 EVENT_PTR(el_capacity
),
4109 EVENT_PTR(el_conflict
),
4110 EVENT_PTR(cycles_t
),
4111 EVENT_PTR(cycles_ct
),
4115 static ssize_t
freeze_on_smi_show(struct device
*cdev
,
4116 struct device_attribute
*attr
,
4119 return sprintf(buf
, "%lu\n", x86_pmu
.attr_freeze_on_smi
);
4122 static DEFINE_MUTEX(freeze_on_smi_mutex
);
4124 static ssize_t
freeze_on_smi_store(struct device
*cdev
,
4125 struct device_attribute
*attr
,
4126 const char *buf
, size_t count
)
4131 ret
= kstrtoul(buf
, 0, &val
);
4138 mutex_lock(&freeze_on_smi_mutex
);
4140 if (x86_pmu
.attr_freeze_on_smi
== val
)
4143 x86_pmu
.attr_freeze_on_smi
= val
;
4146 on_each_cpu(flip_smm_bit
, &val
, 1);
4149 mutex_unlock(&freeze_on_smi_mutex
);
4154 static DEVICE_ATTR_RW(freeze_on_smi
);
4156 static ssize_t
branches_show(struct device
*cdev
,
4157 struct device_attribute
*attr
,
4160 return snprintf(buf
, PAGE_SIZE
, "%d\n", x86_pmu
.lbr_nr
);
4163 static DEVICE_ATTR_RO(branches
);
4165 static struct attribute
*lbr_attrs
[] = {
4166 &dev_attr_branches
.attr
,
4170 static char pmu_name_str
[30];
4172 static ssize_t
pmu_name_show(struct device
*cdev
,
4173 struct device_attribute
*attr
,
4176 return snprintf(buf
, PAGE_SIZE
, "%s\n", pmu_name_str
);
4179 static DEVICE_ATTR_RO(pmu_name
);
4181 static struct attribute
*intel_pmu_caps_attrs
[] = {
4182 &dev_attr_pmu_name
.attr
,
4186 static DEVICE_BOOL_ATTR(allow_tsx_force_abort
, 0644, allow_tsx_force_abort
);
4188 static struct attribute
*intel_pmu_attrs
[] = {
4189 &dev_attr_freeze_on_smi
.attr
,
4190 NULL
, /* &dev_attr_allow_tsx_force_abort.attr.attr */
4194 static __init
struct attribute
**
4195 get_events_attrs(struct attribute
**base
,
4196 struct attribute
**mem
,
4197 struct attribute
**tsx
)
4199 struct attribute
**attrs
= base
;
4200 struct attribute
**old
;
4202 if (mem
&& x86_pmu
.pebs
)
4203 attrs
= merge_attr(attrs
, mem
);
4205 if (tsx
&& boot_cpu_has(X86_FEATURE_RTM
)) {
4207 attrs
= merge_attr(attrs
, tsx
);
4215 __init
int intel_pmu_init(void)
4217 struct attribute
**extra_attr
= NULL
;
4218 struct attribute
**mem_attr
= NULL
;
4219 struct attribute
**tsx_attr
= NULL
;
4220 struct attribute
**to_free
= NULL
;
4221 union cpuid10_edx edx
;
4222 union cpuid10_eax eax
;
4223 union cpuid10_ebx ebx
;
4224 struct event_constraint
*c
;
4225 unsigned int unused
;
4226 struct extra_reg
*er
;
4230 if (!cpu_has(&boot_cpu_data
, X86_FEATURE_ARCH_PERFMON
)) {
4231 switch (boot_cpu_data
.x86
) {
4233 return p6_pmu_init();
4235 return knc_pmu_init();
4237 return p4_pmu_init();
4243 * Check whether the Architectural PerfMon supports
4244 * Branch Misses Retired hw_event or not.
4246 cpuid(10, &eax
.full
, &ebx
.full
, &unused
, &edx
.full
);
4247 if (eax
.split
.mask_length
< ARCH_PERFMON_EVENTS_COUNT
)
4250 version
= eax
.split
.version_id
;
4254 x86_pmu
= intel_pmu
;
4256 x86_pmu
.version
= version
;
4257 x86_pmu
.num_counters
= eax
.split
.num_counters
;
4258 x86_pmu
.cntval_bits
= eax
.split
.bit_width
;
4259 x86_pmu
.cntval_mask
= (1ULL << eax
.split
.bit_width
) - 1;
4261 x86_pmu
.events_maskl
= ebx
.full
;
4262 x86_pmu
.events_mask_len
= eax
.split
.mask_length
;
4264 x86_pmu
.max_pebs_events
= min_t(unsigned, MAX_PEBS_EVENTS
, x86_pmu
.num_counters
);
4267 * Quirk: v2 perfmon does not report fixed-purpose events, so
4268 * assume at least 3 events, when not running in a hypervisor:
4271 int assume
= 3 * !boot_cpu_has(X86_FEATURE_HYPERVISOR
);
4273 x86_pmu
.num_counters_fixed
=
4274 max((int)edx
.split
.num_counters_fixed
, assume
);
4278 x86_pmu
.counter_freezing
= !disable_counter_freezing
;
4280 if (boot_cpu_has(X86_FEATURE_PDCM
)) {
4283 rdmsrl(MSR_IA32_PERF_CAPABILITIES
, capabilities
);
4284 x86_pmu
.intel_cap
.capabilities
= capabilities
;
4289 x86_add_quirk(intel_arch_events_quirk
); /* Install first, so it runs last */
4292 * Install the hw-cache-events table:
4294 switch (boot_cpu_data
.x86_model
) {
4295 case INTEL_FAM6_CORE_YONAH
:
4296 pr_cont("Core events, ");
4300 case INTEL_FAM6_CORE2_MEROM
:
4301 x86_add_quirk(intel_clovertown_quirk
);
4304 case INTEL_FAM6_CORE2_MEROM_L
:
4305 case INTEL_FAM6_CORE2_PENRYN
:
4306 case INTEL_FAM6_CORE2_DUNNINGTON
:
4307 memcpy(hw_cache_event_ids
, core2_hw_cache_event_ids
,
4308 sizeof(hw_cache_event_ids
));
4310 intel_pmu_lbr_init_core();
4312 x86_pmu
.event_constraints
= intel_core2_event_constraints
;
4313 x86_pmu
.pebs_constraints
= intel_core2_pebs_event_constraints
;
4314 pr_cont("Core2 events, ");
4318 case INTEL_FAM6_NEHALEM
:
4319 case INTEL_FAM6_NEHALEM_EP
:
4320 case INTEL_FAM6_NEHALEM_EX
:
4321 memcpy(hw_cache_event_ids
, nehalem_hw_cache_event_ids
,
4322 sizeof(hw_cache_event_ids
));
4323 memcpy(hw_cache_extra_regs
, nehalem_hw_cache_extra_regs
,
4324 sizeof(hw_cache_extra_regs
));
4326 intel_pmu_lbr_init_nhm();
4328 x86_pmu
.event_constraints
= intel_nehalem_event_constraints
;
4329 x86_pmu
.pebs_constraints
= intel_nehalem_pebs_event_constraints
;
4330 x86_pmu
.enable_all
= intel_pmu_nhm_enable_all
;
4331 x86_pmu
.extra_regs
= intel_nehalem_extra_regs
;
4333 mem_attr
= nhm_mem_events_attrs
;
4335 /* UOPS_ISSUED.STALLED_CYCLES */
4336 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] =
4337 X86_CONFIG(.event
=0x0e, .umask
=0x01, .inv
=1, .cmask
=1);
4338 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
4339 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] =
4340 X86_CONFIG(.event
=0xb1, .umask
=0x3f, .inv
=1, .cmask
=1);
4342 intel_pmu_pebs_data_source_nhm();
4343 x86_add_quirk(intel_nehalem_quirk
);
4344 x86_pmu
.pebs_no_tlb
= 1;
4345 extra_attr
= nhm_format_attr
;
4347 pr_cont("Nehalem events, ");
4351 case INTEL_FAM6_ATOM_BONNELL
:
4352 case INTEL_FAM6_ATOM_BONNELL_MID
:
4353 case INTEL_FAM6_ATOM_SALTWELL
:
4354 case INTEL_FAM6_ATOM_SALTWELL_MID
:
4355 case INTEL_FAM6_ATOM_SALTWELL_TABLET
:
4356 memcpy(hw_cache_event_ids
, atom_hw_cache_event_ids
,
4357 sizeof(hw_cache_event_ids
));
4359 intel_pmu_lbr_init_atom();
4361 x86_pmu
.event_constraints
= intel_gen_event_constraints
;
4362 x86_pmu
.pebs_constraints
= intel_atom_pebs_event_constraints
;
4363 x86_pmu
.pebs_aliases
= intel_pebs_aliases_core2
;
4364 pr_cont("Atom events, ");
4368 case INTEL_FAM6_ATOM_SILVERMONT
:
4369 case INTEL_FAM6_ATOM_SILVERMONT_X
:
4370 case INTEL_FAM6_ATOM_SILVERMONT_MID
:
4371 case INTEL_FAM6_ATOM_AIRMONT
:
4372 case INTEL_FAM6_ATOM_AIRMONT_MID
:
4373 memcpy(hw_cache_event_ids
, slm_hw_cache_event_ids
,
4374 sizeof(hw_cache_event_ids
));
4375 memcpy(hw_cache_extra_regs
, slm_hw_cache_extra_regs
,
4376 sizeof(hw_cache_extra_regs
));
4378 intel_pmu_lbr_init_slm();
4380 x86_pmu
.event_constraints
= intel_slm_event_constraints
;
4381 x86_pmu
.pebs_constraints
= intel_slm_pebs_event_constraints
;
4382 x86_pmu
.extra_regs
= intel_slm_extra_regs
;
4383 x86_pmu
.flags
|= PMU_FL_HAS_RSP_1
;
4384 x86_pmu
.cpu_events
= slm_events_attrs
;
4385 extra_attr
= slm_format_attr
;
4386 pr_cont("Silvermont events, ");
4387 name
= "silvermont";
4390 case INTEL_FAM6_ATOM_GOLDMONT
:
4391 case INTEL_FAM6_ATOM_GOLDMONT_X
:
4392 x86_add_quirk(intel_counter_freezing_quirk
);
4393 memcpy(hw_cache_event_ids
, glm_hw_cache_event_ids
,
4394 sizeof(hw_cache_event_ids
));
4395 memcpy(hw_cache_extra_regs
, glm_hw_cache_extra_regs
,
4396 sizeof(hw_cache_extra_regs
));
4398 intel_pmu_lbr_init_skl();
4400 x86_pmu
.event_constraints
= intel_slm_event_constraints
;
4401 x86_pmu
.pebs_constraints
= intel_glm_pebs_event_constraints
;
4402 x86_pmu
.extra_regs
= intel_glm_extra_regs
;
4404 * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS
4405 * for precise cycles.
4406 * :pp is identical to :ppp
4408 x86_pmu
.pebs_aliases
= NULL
;
4409 x86_pmu
.pebs_prec_dist
= true;
4410 x86_pmu
.lbr_pt_coexist
= true;
4411 x86_pmu
.flags
|= PMU_FL_HAS_RSP_1
;
4412 x86_pmu
.cpu_events
= glm_events_attrs
;
4413 extra_attr
= slm_format_attr
;
4414 pr_cont("Goldmont events, ");
4418 case INTEL_FAM6_ATOM_GOLDMONT_PLUS
:
4419 x86_add_quirk(intel_counter_freezing_quirk
);
4420 memcpy(hw_cache_event_ids
, glp_hw_cache_event_ids
,
4421 sizeof(hw_cache_event_ids
));
4422 memcpy(hw_cache_extra_regs
, glp_hw_cache_extra_regs
,
4423 sizeof(hw_cache_extra_regs
));
4425 intel_pmu_lbr_init_skl();
4427 x86_pmu
.event_constraints
= intel_slm_event_constraints
;
4428 x86_pmu
.extra_regs
= intel_glm_extra_regs
;
4430 * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS
4431 * for precise cycles.
4433 x86_pmu
.pebs_aliases
= NULL
;
4434 x86_pmu
.pebs_prec_dist
= true;
4435 x86_pmu
.lbr_pt_coexist
= true;
4436 x86_pmu
.flags
|= PMU_FL_HAS_RSP_1
;
4437 x86_pmu
.flags
|= PMU_FL_PEBS_ALL
;
4438 x86_pmu
.get_event_constraints
= glp_get_event_constraints
;
4439 x86_pmu
.cpu_events
= glm_events_attrs
;
4440 /* Goldmont Plus has 4-wide pipeline */
4441 event_attr_td_total_slots_scale_glm
.event_str
= "4";
4442 extra_attr
= slm_format_attr
;
4443 pr_cont("Goldmont plus events, ");
4444 name
= "goldmont_plus";
4447 case INTEL_FAM6_WESTMERE
:
4448 case INTEL_FAM6_WESTMERE_EP
:
4449 case INTEL_FAM6_WESTMERE_EX
:
4450 memcpy(hw_cache_event_ids
, westmere_hw_cache_event_ids
,
4451 sizeof(hw_cache_event_ids
));
4452 memcpy(hw_cache_extra_regs
, nehalem_hw_cache_extra_regs
,
4453 sizeof(hw_cache_extra_regs
));
4455 intel_pmu_lbr_init_nhm();
4457 x86_pmu
.event_constraints
= intel_westmere_event_constraints
;
4458 x86_pmu
.enable_all
= intel_pmu_nhm_enable_all
;
4459 x86_pmu
.pebs_constraints
= intel_westmere_pebs_event_constraints
;
4460 x86_pmu
.extra_regs
= intel_westmere_extra_regs
;
4461 x86_pmu
.flags
|= PMU_FL_HAS_RSP_1
;
4463 mem_attr
= nhm_mem_events_attrs
;
4465 /* UOPS_ISSUED.STALLED_CYCLES */
4466 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] =
4467 X86_CONFIG(.event
=0x0e, .umask
=0x01, .inv
=1, .cmask
=1);
4468 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
4469 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] =
4470 X86_CONFIG(.event
=0xb1, .umask
=0x3f, .inv
=1, .cmask
=1);
4472 intel_pmu_pebs_data_source_nhm();
4473 extra_attr
= nhm_format_attr
;
4474 pr_cont("Westmere events, ");
4478 case INTEL_FAM6_SANDYBRIDGE
:
4479 case INTEL_FAM6_SANDYBRIDGE_X
:
4480 x86_add_quirk(intel_sandybridge_quirk
);
4481 x86_add_quirk(intel_ht_bug
);
4482 memcpy(hw_cache_event_ids
, snb_hw_cache_event_ids
,
4483 sizeof(hw_cache_event_ids
));
4484 memcpy(hw_cache_extra_regs
, snb_hw_cache_extra_regs
,
4485 sizeof(hw_cache_extra_regs
));
4487 intel_pmu_lbr_init_snb();
4489 x86_pmu
.event_constraints
= intel_snb_event_constraints
;
4490 x86_pmu
.pebs_constraints
= intel_snb_pebs_event_constraints
;
4491 x86_pmu
.pebs_aliases
= intel_pebs_aliases_snb
;
4492 if (boot_cpu_data
.x86_model
== INTEL_FAM6_SANDYBRIDGE_X
)
4493 x86_pmu
.extra_regs
= intel_snbep_extra_regs
;
4495 x86_pmu
.extra_regs
= intel_snb_extra_regs
;
4498 /* all extra regs are per-cpu when HT is on */
4499 x86_pmu
.flags
|= PMU_FL_HAS_RSP_1
;
4500 x86_pmu
.flags
|= PMU_FL_NO_HT_SHARING
;
4502 x86_pmu
.cpu_events
= snb_events_attrs
;
4503 mem_attr
= snb_mem_events_attrs
;
4505 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
4506 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] =
4507 X86_CONFIG(.event
=0x0e, .umask
=0x01, .inv
=1, .cmask
=1);
4508 /* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/
4509 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] =
4510 X86_CONFIG(.event
=0xb1, .umask
=0x01, .inv
=1, .cmask
=1);
4512 extra_attr
= nhm_format_attr
;
4514 pr_cont("SandyBridge events, ");
4515 name
= "sandybridge";
4518 case INTEL_FAM6_IVYBRIDGE
:
4519 case INTEL_FAM6_IVYBRIDGE_X
:
4520 x86_add_quirk(intel_ht_bug
);
4521 memcpy(hw_cache_event_ids
, snb_hw_cache_event_ids
,
4522 sizeof(hw_cache_event_ids
));
4523 /* dTLB-load-misses on IVB is different than SNB */
4524 hw_cache_event_ids
[C(DTLB
)][C(OP_READ
)][C(RESULT_MISS
)] = 0x8108; /* DTLB_LOAD_MISSES.DEMAND_LD_MISS_CAUSES_A_WALK */
4526 memcpy(hw_cache_extra_regs
, snb_hw_cache_extra_regs
,
4527 sizeof(hw_cache_extra_regs
));
4529 intel_pmu_lbr_init_snb();
4531 x86_pmu
.event_constraints
= intel_ivb_event_constraints
;
4532 x86_pmu
.pebs_constraints
= intel_ivb_pebs_event_constraints
;
4533 x86_pmu
.pebs_aliases
= intel_pebs_aliases_ivb
;
4534 x86_pmu
.pebs_prec_dist
= true;
4535 if (boot_cpu_data
.x86_model
== INTEL_FAM6_IVYBRIDGE_X
)
4536 x86_pmu
.extra_regs
= intel_snbep_extra_regs
;
4538 x86_pmu
.extra_regs
= intel_snb_extra_regs
;
4539 /* all extra regs are per-cpu when HT is on */
4540 x86_pmu
.flags
|= PMU_FL_HAS_RSP_1
;
4541 x86_pmu
.flags
|= PMU_FL_NO_HT_SHARING
;
4543 x86_pmu
.cpu_events
= snb_events_attrs
;
4544 mem_attr
= snb_mem_events_attrs
;
4546 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
4547 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] =
4548 X86_CONFIG(.event
=0x0e, .umask
=0x01, .inv
=1, .cmask
=1);
4550 extra_attr
= nhm_format_attr
;
4552 pr_cont("IvyBridge events, ");
4557 case INTEL_FAM6_HASWELL_CORE
:
4558 case INTEL_FAM6_HASWELL_X
:
4559 case INTEL_FAM6_HASWELL_ULT
:
4560 case INTEL_FAM6_HASWELL_GT3E
:
4561 x86_add_quirk(intel_ht_bug
);
4562 x86_add_quirk(intel_pebs_isolation_quirk
);
4563 x86_pmu
.late_ack
= true;
4564 memcpy(hw_cache_event_ids
, hsw_hw_cache_event_ids
, sizeof(hw_cache_event_ids
));
4565 memcpy(hw_cache_extra_regs
, hsw_hw_cache_extra_regs
, sizeof(hw_cache_extra_regs
));
4567 intel_pmu_lbr_init_hsw();
4569 x86_pmu
.event_constraints
= intel_hsw_event_constraints
;
4570 x86_pmu
.pebs_constraints
= intel_hsw_pebs_event_constraints
;
4571 x86_pmu
.extra_regs
= intel_snbep_extra_regs
;
4572 x86_pmu
.pebs_aliases
= intel_pebs_aliases_ivb
;
4573 x86_pmu
.pebs_prec_dist
= true;
4574 /* all extra regs are per-cpu when HT is on */
4575 x86_pmu
.flags
|= PMU_FL_HAS_RSP_1
;
4576 x86_pmu
.flags
|= PMU_FL_NO_HT_SHARING
;
4578 x86_pmu
.hw_config
= hsw_hw_config
;
4579 x86_pmu
.get_event_constraints
= hsw_get_event_constraints
;
4580 x86_pmu
.cpu_events
= hsw_events_attrs
;
4581 x86_pmu
.lbr_double_abort
= true;
4582 extra_attr
= boot_cpu_has(X86_FEATURE_RTM
) ?
4583 hsw_format_attr
: nhm_format_attr
;
4584 mem_attr
= hsw_mem_events_attrs
;
4585 tsx_attr
= hsw_tsx_events_attrs
;
4586 pr_cont("Haswell events, ");
4590 case INTEL_FAM6_BROADWELL_CORE
:
4591 case INTEL_FAM6_BROADWELL_XEON_D
:
4592 case INTEL_FAM6_BROADWELL_GT3E
:
4593 case INTEL_FAM6_BROADWELL_X
:
4594 x86_add_quirk(intel_pebs_isolation_quirk
);
4595 x86_pmu
.late_ack
= true;
4596 memcpy(hw_cache_event_ids
, hsw_hw_cache_event_ids
, sizeof(hw_cache_event_ids
));
4597 memcpy(hw_cache_extra_regs
, hsw_hw_cache_extra_regs
, sizeof(hw_cache_extra_regs
));
4599 /* L3_MISS_LOCAL_DRAM is BIT(26) in Broadwell */
4600 hw_cache_extra_regs
[C(LL
)][C(OP_READ
)][C(RESULT_MISS
)] = HSW_DEMAND_READ
|
4601 BDW_L3_MISS
|HSW_SNOOP_DRAM
;
4602 hw_cache_extra_regs
[C(LL
)][C(OP_WRITE
)][C(RESULT_MISS
)] = HSW_DEMAND_WRITE
|BDW_L3_MISS
|
4604 hw_cache_extra_regs
[C(NODE
)][C(OP_READ
)][C(RESULT_ACCESS
)] = HSW_DEMAND_READ
|
4605 BDW_L3_MISS_LOCAL
|HSW_SNOOP_DRAM
;
4606 hw_cache_extra_regs
[C(NODE
)][C(OP_WRITE
)][C(RESULT_ACCESS
)] = HSW_DEMAND_WRITE
|
4607 BDW_L3_MISS_LOCAL
|HSW_SNOOP_DRAM
;
4609 intel_pmu_lbr_init_hsw();
4611 x86_pmu
.event_constraints
= intel_bdw_event_constraints
;
4612 x86_pmu
.pebs_constraints
= intel_bdw_pebs_event_constraints
;
4613 x86_pmu
.extra_regs
= intel_snbep_extra_regs
;
4614 x86_pmu
.pebs_aliases
= intel_pebs_aliases_ivb
;
4615 x86_pmu
.pebs_prec_dist
= true;
4616 /* all extra regs are per-cpu when HT is on */
4617 x86_pmu
.flags
|= PMU_FL_HAS_RSP_1
;
4618 x86_pmu
.flags
|= PMU_FL_NO_HT_SHARING
;
4620 x86_pmu
.hw_config
= hsw_hw_config
;
4621 x86_pmu
.get_event_constraints
= hsw_get_event_constraints
;
4622 x86_pmu
.cpu_events
= hsw_events_attrs
;
4623 x86_pmu
.limit_period
= bdw_limit_period
;
4624 extra_attr
= boot_cpu_has(X86_FEATURE_RTM
) ?
4625 hsw_format_attr
: nhm_format_attr
;
4626 mem_attr
= hsw_mem_events_attrs
;
4627 tsx_attr
= hsw_tsx_events_attrs
;
4628 pr_cont("Broadwell events, ");
4632 case INTEL_FAM6_XEON_PHI_KNL
:
4633 case INTEL_FAM6_XEON_PHI_KNM
:
4634 memcpy(hw_cache_event_ids
,
4635 slm_hw_cache_event_ids
, sizeof(hw_cache_event_ids
));
4636 memcpy(hw_cache_extra_regs
,
4637 knl_hw_cache_extra_regs
, sizeof(hw_cache_extra_regs
));
4638 intel_pmu_lbr_init_knl();
4640 x86_pmu
.event_constraints
= intel_slm_event_constraints
;
4641 x86_pmu
.pebs_constraints
= intel_slm_pebs_event_constraints
;
4642 x86_pmu
.extra_regs
= intel_knl_extra_regs
;
4644 /* all extra regs are per-cpu when HT is on */
4645 x86_pmu
.flags
|= PMU_FL_HAS_RSP_1
;
4646 x86_pmu
.flags
|= PMU_FL_NO_HT_SHARING
;
4647 extra_attr
= slm_format_attr
;
4648 pr_cont("Knights Landing/Mill events, ");
4649 name
= "knights-landing";
4652 case INTEL_FAM6_SKYLAKE_MOBILE
:
4653 case INTEL_FAM6_SKYLAKE_DESKTOP
:
4654 case INTEL_FAM6_SKYLAKE_X
:
4655 case INTEL_FAM6_KABYLAKE_MOBILE
:
4656 case INTEL_FAM6_KABYLAKE_DESKTOP
:
4657 x86_add_quirk(intel_pebs_isolation_quirk
);
4658 x86_pmu
.late_ack
= true;
4659 memcpy(hw_cache_event_ids
, skl_hw_cache_event_ids
, sizeof(hw_cache_event_ids
));
4660 memcpy(hw_cache_extra_regs
, skl_hw_cache_extra_regs
, sizeof(hw_cache_extra_regs
));
4661 intel_pmu_lbr_init_skl();
4663 /* INT_MISC.RECOVERY_CYCLES has umask 1 in Skylake */
4664 event_attr_td_recovery_bubbles
.event_str_noht
=
4665 "event=0xd,umask=0x1,cmask=1";
4666 event_attr_td_recovery_bubbles
.event_str_ht
=
4667 "event=0xd,umask=0x1,cmask=1,any=1";
4669 x86_pmu
.event_constraints
= intel_skl_event_constraints
;
4670 x86_pmu
.pebs_constraints
= intel_skl_pebs_event_constraints
;
4671 x86_pmu
.extra_regs
= intel_skl_extra_regs
;
4672 x86_pmu
.pebs_aliases
= intel_pebs_aliases_skl
;
4673 x86_pmu
.pebs_prec_dist
= true;
4674 /* all extra regs are per-cpu when HT is on */
4675 x86_pmu
.flags
|= PMU_FL_HAS_RSP_1
;
4676 x86_pmu
.flags
|= PMU_FL_NO_HT_SHARING
;
4678 x86_pmu
.hw_config
= hsw_hw_config
;
4679 x86_pmu
.get_event_constraints
= hsw_get_event_constraints
;
4680 extra_attr
= boot_cpu_has(X86_FEATURE_RTM
) ?
4681 hsw_format_attr
: nhm_format_attr
;
4682 extra_attr
= merge_attr(extra_attr
, skl_format_attr
);
4683 to_free
= extra_attr
;
4684 x86_pmu
.cpu_events
= hsw_events_attrs
;
4685 mem_attr
= hsw_mem_events_attrs
;
4686 tsx_attr
= hsw_tsx_events_attrs
;
4687 intel_pmu_pebs_data_source_skl(
4688 boot_cpu_data
.x86_model
== INTEL_FAM6_SKYLAKE_X
);
4690 if (boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT
)) {
4691 x86_pmu
.flags
|= PMU_FL_TFA
;
4692 x86_pmu
.get_event_constraints
= tfa_get_event_constraints
;
4693 x86_pmu
.enable_all
= intel_tfa_pmu_enable_all
;
4694 x86_pmu
.commit_scheduling
= intel_tfa_commit_scheduling
;
4695 intel_pmu_attrs
[1] = &dev_attr_allow_tsx_force_abort
.attr
.attr
;
4698 pr_cont("Skylake events, ");
4703 switch (x86_pmu
.version
) {
4705 x86_pmu
.event_constraints
= intel_v1_event_constraints
;
4706 pr_cont("generic architected perfmon v1, ");
4707 name
= "generic_arch_v1";
4711 * default constraints for v2 and up
4713 x86_pmu
.event_constraints
= intel_gen_event_constraints
;
4714 pr_cont("generic architected perfmon, ");
4715 name
= "generic_arch_v2+";
4720 snprintf(pmu_name_str
, sizeof(pmu_name_str
), "%s", name
);
4722 if (version
>= 2 && extra_attr
) {
4723 x86_pmu
.format_attrs
= merge_attr(intel_arch3_formats_attr
,
4725 WARN_ON(!x86_pmu
.format_attrs
);
4728 x86_pmu
.cpu_events
= get_events_attrs(x86_pmu
.cpu_events
,
4729 mem_attr
, tsx_attr
);
4731 if (x86_pmu
.num_counters
> INTEL_PMC_MAX_GENERIC
) {
4732 WARN(1, KERN_ERR
"hw perf events %d > max(%d), clipping!",
4733 x86_pmu
.num_counters
, INTEL_PMC_MAX_GENERIC
);
4734 x86_pmu
.num_counters
= INTEL_PMC_MAX_GENERIC
;
4736 x86_pmu
.intel_ctrl
= (1ULL << x86_pmu
.num_counters
) - 1;
4738 if (x86_pmu
.num_counters_fixed
> INTEL_PMC_MAX_FIXED
) {
4739 WARN(1, KERN_ERR
"hw perf events fixed %d > max(%d), clipping!",
4740 x86_pmu
.num_counters_fixed
, INTEL_PMC_MAX_FIXED
);
4741 x86_pmu
.num_counters_fixed
= INTEL_PMC_MAX_FIXED
;
4744 x86_pmu
.intel_ctrl
|=
4745 ((1LL << x86_pmu
.num_counters_fixed
)-1) << INTEL_PMC_IDX_FIXED
;
4747 if (x86_pmu
.event_constraints
) {
4749 * event on fixed counter2 (REF_CYCLES) only works on this
4750 * counter, so do not extend mask to generic counters
4752 for_each_event_constraint(c
, x86_pmu
.event_constraints
) {
4753 if (c
->cmask
== FIXED_EVENT_FLAGS
4754 && c
->idxmsk64
!= INTEL_PMC_MSK_FIXED_REF_CYCLES
) {
4755 c
->idxmsk64
|= (1ULL << x86_pmu
.num_counters
) - 1;
4758 ~(~0ULL << (INTEL_PMC_IDX_FIXED
+ x86_pmu
.num_counters_fixed
));
4759 c
->weight
= hweight64(c
->idxmsk64
);
4764 * Access LBR MSR may cause #GP under certain circumstances.
4765 * E.g. KVM doesn't support LBR MSR
4766 * Check all LBT MSR here.
4767 * Disable LBR access if any LBR MSRs can not be accessed.
4769 if (x86_pmu
.lbr_nr
&& !check_msr(x86_pmu
.lbr_tos
, 0x3UL
))
4771 for (i
= 0; i
< x86_pmu
.lbr_nr
; i
++) {
4772 if (!(check_msr(x86_pmu
.lbr_from
+ i
, 0xffffUL
) &&
4773 check_msr(x86_pmu
.lbr_to
+ i
, 0xffffUL
)))
4777 x86_pmu
.caps_attrs
= intel_pmu_caps_attrs
;
4779 if (x86_pmu
.lbr_nr
) {
4780 x86_pmu
.caps_attrs
= merge_attr(x86_pmu
.caps_attrs
, lbr_attrs
);
4781 pr_cont("%d-deep LBR, ", x86_pmu
.lbr_nr
);
4785 * Access extra MSR may cause #GP under certain circumstances.
4786 * E.g. KVM doesn't support offcore event
4787 * Check all extra_regs here.
4789 if (x86_pmu
.extra_regs
) {
4790 for (er
= x86_pmu
.extra_regs
; er
->msr
; er
++) {
4791 er
->extra_msr_access
= check_msr(er
->msr
, 0x11UL
);
4792 /* Disable LBR select mapping */
4793 if ((er
->idx
== EXTRA_REG_LBR
) && !er
->extra_msr_access
)
4794 x86_pmu
.lbr_sel_map
= NULL
;
4798 /* Support full width counters using alternative MSR range */
4799 if (x86_pmu
.intel_cap
.full_width_write
) {
4800 x86_pmu
.max_period
= x86_pmu
.cntval_mask
>> 1;
4801 x86_pmu
.perfctr
= MSR_IA32_PMC0
;
4802 pr_cont("full-width counters, ");
4806 * For arch perfmon 4 use counter freezing to avoid
4807 * several MSR accesses in the PMI.
4809 if (x86_pmu
.counter_freezing
)
4810 x86_pmu
.handle_irq
= intel_pmu_handle_irq_v4
;
4817 * HT bug: phase 2 init
4818 * Called once we have valid topology information to check
4819 * whether or not HT is enabled
4820 * If HT is off, then we disable the workaround
4822 static __init
int fixup_ht_bug(void)
4826 * problem not present on this CPU model, nothing to do
4828 if (!(x86_pmu
.flags
& PMU_FL_EXCL_ENABLED
))
4831 if (topology_max_smt_threads() > 1) {
4832 pr_info("PMU erratum BJ122, BV98, HSD29 worked around, HT is on\n");
4838 hardlockup_detector_perf_stop();
4840 x86_pmu
.flags
&= ~(PMU_FL_EXCL_CNTRS
| PMU_FL_EXCL_ENABLED
);
4842 x86_pmu
.start_scheduling
= NULL
;
4843 x86_pmu
.commit_scheduling
= NULL
;
4844 x86_pmu
.stop_scheduling
= NULL
;
4846 hardlockup_detector_perf_restart();
4848 for_each_online_cpu(c
)
4849 free_excl_cntrs(&per_cpu(cpu_hw_events
, c
));
4852 pr_info("PMU erratum BJ122, BV98, HSD29 workaround disabled, HT off\n");
4855 subsys_initcall(fixup_ht_bug
)