]> git.ipfire.org Git - thirdparty/linux.git/blob - arch/x86/events/intel/core.c
386151b2c62fec7868b6d17a31435243c0003402
[thirdparty/linux.git] / arch / x86 / events / intel / core.c
1 /*
2 * Per core/cpu state
3 *
4 * Used to coordinate shared registers between HT threads or
5 * among events on a single PMU.
6 */
7
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10 #include <linux/stddef.h>
11 #include <linux/types.h>
12 #include <linux/init.h>
13 #include <linux/slab.h>
14 #include <linux/export.h>
15 #include <linux/nmi.h>
16
17 #include <asm/cpufeature.h>
18 #include <asm/hardirq.h>
19 #include <asm/intel-family.h>
20 #include <asm/apic.h>
21 #include <asm/cpu_device_id.h>
22
23 #include "../perf_event.h"
24
25 /*
26 * Intel PerfMon, used on Core and later.
27 */
28 static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly =
29 {
30 [PERF_COUNT_HW_CPU_CYCLES] = 0x003c,
31 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
32 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e,
33 [PERF_COUNT_HW_CACHE_MISSES] = 0x412e,
34 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
35 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
36 [PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
37 [PERF_COUNT_HW_REF_CPU_CYCLES] = 0x0300, /* pseudo-encoding */
38 };
39
40 static struct event_constraint intel_core_event_constraints[] __read_mostly =
41 {
42 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
43 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
44 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
45 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
46 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
47 INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
48 EVENT_CONSTRAINT_END
49 };
50
51 static struct event_constraint intel_core2_event_constraints[] __read_mostly =
52 {
53 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
54 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
55 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
56 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
57 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
58 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
59 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
60 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
61 INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
62 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
63 INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
64 INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
65 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
66 EVENT_CONSTRAINT_END
67 };
68
69 static struct event_constraint intel_nehalem_event_constraints[] __read_mostly =
70 {
71 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
72 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
73 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
74 INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
75 INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
76 INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
77 INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
78 INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
79 INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
80 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
81 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
82 EVENT_CONSTRAINT_END
83 };
84
85 static struct extra_reg intel_nehalem_extra_regs[] __read_mostly =
86 {
87 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
88 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
89 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
90 EVENT_EXTRA_END
91 };
92
93 static struct event_constraint intel_westmere_event_constraints[] __read_mostly =
94 {
95 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
96 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
97 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
98 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
99 INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
100 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
101 INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */
102 EVENT_CONSTRAINT_END
103 };
104
105 static struct event_constraint intel_snb_event_constraints[] __read_mostly =
106 {
107 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
108 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
109 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
110 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
111 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
112 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
113 INTEL_UEVENT_CONSTRAINT(0x06a3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
114 INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
115 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
116 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
117 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
118 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
119
120 /*
121 * When HT is off these events can only run on the bottom 4 counters
122 * When HT is on, they are impacted by the HT bug and require EXCL access
123 */
124 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
125 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
126 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
127 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
128
129 EVENT_CONSTRAINT_END
130 };
131
132 static struct event_constraint intel_ivb_event_constraints[] __read_mostly =
133 {
134 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
135 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
136 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
137 INTEL_UEVENT_CONSTRAINT(0x0148, 0x4), /* L1D_PEND_MISS.PENDING */
138 INTEL_UEVENT_CONSTRAINT(0x0279, 0xf), /* IDQ.EMTPY */
139 INTEL_UEVENT_CONSTRAINT(0x019c, 0xf), /* IDQ_UOPS_NOT_DELIVERED.CORE */
140 INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_LDM_PENDING */
141 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
142 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
143 INTEL_UEVENT_CONSTRAINT(0x06a3, 0xf), /* CYCLE_ACTIVITY.STALLS_LDM_PENDING */
144 INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
145 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
146 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
147
148 /*
149 * When HT is off these events can only run on the bottom 4 counters
150 * When HT is on, they are impacted by the HT bug and require EXCL access
151 */
152 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
153 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
154 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
155 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
156
157 EVENT_CONSTRAINT_END
158 };
159
160 static struct extra_reg intel_westmere_extra_regs[] __read_mostly =
161 {
162 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
163 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
164 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0xffff, RSP_1),
165 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
166 EVENT_EXTRA_END
167 };
168
169 static struct event_constraint intel_v1_event_constraints[] __read_mostly =
170 {
171 EVENT_CONSTRAINT_END
172 };
173
174 static struct event_constraint intel_gen_event_constraints[] __read_mostly =
175 {
176 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
177 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
178 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
179 EVENT_CONSTRAINT_END
180 };
181
182 static struct event_constraint intel_slm_event_constraints[] __read_mostly =
183 {
184 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
185 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
186 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */
187 EVENT_CONSTRAINT_END
188 };
189
190 static struct event_constraint intel_skl_event_constraints[] = {
191 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
192 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
193 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
194 INTEL_UEVENT_CONSTRAINT(0x1c0, 0x2), /* INST_RETIRED.PREC_DIST */
195
196 /*
197 * when HT is off, these can only run on the bottom 4 counters
198 */
199 INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_INST_RETIRED.* */
200 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_RETIRED.* */
201 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_L3_HIT_RETIRED.* */
202 INTEL_EVENT_CONSTRAINT(0xcd, 0xf), /* MEM_TRANS_RETIRED.* */
203 INTEL_EVENT_CONSTRAINT(0xc6, 0xf), /* FRONTEND_RETIRED.* */
204
205 EVENT_CONSTRAINT_END
206 };
207
208 static struct extra_reg intel_knl_extra_regs[] __read_mostly = {
209 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x799ffbb6e7ull, RSP_0),
210 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x399ffbffe7ull, RSP_1),
211 EVENT_EXTRA_END
212 };
213
214 static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
215 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
216 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0),
217 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1),
218 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
219 EVENT_EXTRA_END
220 };
221
222 static struct extra_reg intel_snbep_extra_regs[] __read_mostly = {
223 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
224 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
225 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
226 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
227 EVENT_EXTRA_END
228 };
229
230 static struct extra_reg intel_skl_extra_regs[] __read_mostly = {
231 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
232 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
233 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
234 /*
235 * Note the low 8 bits eventsel code is not a continuous field, containing
236 * some #GPing bits. These are masked out.
237 */
238 INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE),
239 EVENT_EXTRA_END
240 };
241
242 EVENT_ATTR_STR(mem-loads, mem_ld_nhm, "event=0x0b,umask=0x10,ldlat=3");
243 EVENT_ATTR_STR(mem-loads, mem_ld_snb, "event=0xcd,umask=0x1,ldlat=3");
244 EVENT_ATTR_STR(mem-stores, mem_st_snb, "event=0xcd,umask=0x2");
245
246 static struct attribute *nhm_mem_events_attrs[] = {
247 EVENT_PTR(mem_ld_nhm),
248 NULL,
249 };
250
251 /*
252 * topdown events for Intel Core CPUs.
253 *
254 * The events are all in slots, which is a free slot in a 4 wide
255 * pipeline. Some events are already reported in slots, for cycle
256 * events we multiply by the pipeline width (4).
257 *
258 * With Hyper Threading on, topdown metrics are either summed or averaged
259 * between the threads of a core: (count_t0 + count_t1).
260 *
261 * For the average case the metric is always scaled to pipeline width,
262 * so we use factor 2 ((count_t0 + count_t1) / 2 * 4)
263 */
264
265 EVENT_ATTR_STR_HT(topdown-total-slots, td_total_slots,
266 "event=0x3c,umask=0x0", /* cpu_clk_unhalted.thread */
267 "event=0x3c,umask=0x0,any=1"); /* cpu_clk_unhalted.thread_any */
268 EVENT_ATTR_STR_HT(topdown-total-slots.scale, td_total_slots_scale, "4", "2");
269 EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued,
270 "event=0xe,umask=0x1"); /* uops_issued.any */
271 EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired,
272 "event=0xc2,umask=0x2"); /* uops_retired.retire_slots */
273 EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles,
274 "event=0x9c,umask=0x1"); /* idq_uops_not_delivered_core */
275 EVENT_ATTR_STR_HT(topdown-recovery-bubbles, td_recovery_bubbles,
276 "event=0xd,umask=0x3,cmask=1", /* int_misc.recovery_cycles */
277 "event=0xd,umask=0x3,cmask=1,any=1"); /* int_misc.recovery_cycles_any */
278 EVENT_ATTR_STR_HT(topdown-recovery-bubbles.scale, td_recovery_bubbles_scale,
279 "4", "2");
280
281 static struct attribute *snb_events_attrs[] = {
282 EVENT_PTR(td_slots_issued),
283 EVENT_PTR(td_slots_retired),
284 EVENT_PTR(td_fetch_bubbles),
285 EVENT_PTR(td_total_slots),
286 EVENT_PTR(td_total_slots_scale),
287 EVENT_PTR(td_recovery_bubbles),
288 EVENT_PTR(td_recovery_bubbles_scale),
289 NULL,
290 };
291
292 static struct attribute *snb_mem_events_attrs[] = {
293 EVENT_PTR(mem_ld_snb),
294 EVENT_PTR(mem_st_snb),
295 NULL,
296 };
297
298 static struct event_constraint intel_hsw_event_constraints[] = {
299 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
300 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
301 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
302 INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */
303 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
304 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
305 /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
306 INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4),
307 /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
308 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4),
309 /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
310 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf),
311
312 /*
313 * When HT is off these events can only run on the bottom 4 counters
314 * When HT is on, they are impacted by the HT bug and require EXCL access
315 */
316 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
317 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
318 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
319 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
320
321 EVENT_CONSTRAINT_END
322 };
323
324 static struct event_constraint intel_bdw_event_constraints[] = {
325 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
326 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
327 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
328 INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */
329 INTEL_UBIT_EVENT_CONSTRAINT(0x8a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_MISS */
330 /*
331 * when HT is off, these can only run on the bottom 4 counters
332 */
333 INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_INST_RETIRED.* */
334 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_RETIRED.* */
335 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_L3_HIT_RETIRED.* */
336 INTEL_EVENT_CONSTRAINT(0xcd, 0xf), /* MEM_TRANS_RETIRED.* */
337 EVENT_CONSTRAINT_END
338 };
339
340 static u64 intel_pmu_event_map(int hw_event)
341 {
342 return intel_perfmon_event_map[hw_event];
343 }
344
345 /*
346 * Notes on the events:
347 * - data reads do not include code reads (comparable to earlier tables)
348 * - data counts include speculative execution (except L1 write, dtlb, bpu)
349 * - remote node access includes remote memory, remote cache, remote mmio.
350 * - prefetches are not included in the counts.
351 * - icache miss does not include decoded icache
352 */
353
354 #define SKL_DEMAND_DATA_RD BIT_ULL(0)
355 #define SKL_DEMAND_RFO BIT_ULL(1)
356 #define SKL_ANY_RESPONSE BIT_ULL(16)
357 #define SKL_SUPPLIER_NONE BIT_ULL(17)
358 #define SKL_L3_MISS_LOCAL_DRAM BIT_ULL(26)
359 #define SKL_L3_MISS_REMOTE_HOP0_DRAM BIT_ULL(27)
360 #define SKL_L3_MISS_REMOTE_HOP1_DRAM BIT_ULL(28)
361 #define SKL_L3_MISS_REMOTE_HOP2P_DRAM BIT_ULL(29)
362 #define SKL_L3_MISS (SKL_L3_MISS_LOCAL_DRAM| \
363 SKL_L3_MISS_REMOTE_HOP0_DRAM| \
364 SKL_L3_MISS_REMOTE_HOP1_DRAM| \
365 SKL_L3_MISS_REMOTE_HOP2P_DRAM)
366 #define SKL_SPL_HIT BIT_ULL(30)
367 #define SKL_SNOOP_NONE BIT_ULL(31)
368 #define SKL_SNOOP_NOT_NEEDED BIT_ULL(32)
369 #define SKL_SNOOP_MISS BIT_ULL(33)
370 #define SKL_SNOOP_HIT_NO_FWD BIT_ULL(34)
371 #define SKL_SNOOP_HIT_WITH_FWD BIT_ULL(35)
372 #define SKL_SNOOP_HITM BIT_ULL(36)
373 #define SKL_SNOOP_NON_DRAM BIT_ULL(37)
374 #define SKL_ANY_SNOOP (SKL_SPL_HIT|SKL_SNOOP_NONE| \
375 SKL_SNOOP_NOT_NEEDED|SKL_SNOOP_MISS| \
376 SKL_SNOOP_HIT_NO_FWD|SKL_SNOOP_HIT_WITH_FWD| \
377 SKL_SNOOP_HITM|SKL_SNOOP_NON_DRAM)
378 #define SKL_DEMAND_READ SKL_DEMAND_DATA_RD
379 #define SKL_SNOOP_DRAM (SKL_SNOOP_NONE| \
380 SKL_SNOOP_NOT_NEEDED|SKL_SNOOP_MISS| \
381 SKL_SNOOP_HIT_NO_FWD|SKL_SNOOP_HIT_WITH_FWD| \
382 SKL_SNOOP_HITM|SKL_SPL_HIT)
383 #define SKL_DEMAND_WRITE SKL_DEMAND_RFO
384 #define SKL_LLC_ACCESS SKL_ANY_RESPONSE
385 #define SKL_L3_MISS_REMOTE (SKL_L3_MISS_REMOTE_HOP0_DRAM| \
386 SKL_L3_MISS_REMOTE_HOP1_DRAM| \
387 SKL_L3_MISS_REMOTE_HOP2P_DRAM)
388
389 static __initconst const u64 skl_hw_cache_event_ids
390 [PERF_COUNT_HW_CACHE_MAX]
391 [PERF_COUNT_HW_CACHE_OP_MAX]
392 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
393 {
394 [ C(L1D ) ] = {
395 [ C(OP_READ) ] = {
396 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_INST_RETIRED.ALL_LOADS */
397 [ C(RESULT_MISS) ] = 0x151, /* L1D.REPLACEMENT */
398 },
399 [ C(OP_WRITE) ] = {
400 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_INST_RETIRED.ALL_STORES */
401 [ C(RESULT_MISS) ] = 0x0,
402 },
403 [ C(OP_PREFETCH) ] = {
404 [ C(RESULT_ACCESS) ] = 0x0,
405 [ C(RESULT_MISS) ] = 0x0,
406 },
407 },
408 [ C(L1I ) ] = {
409 [ C(OP_READ) ] = {
410 [ C(RESULT_ACCESS) ] = 0x0,
411 [ C(RESULT_MISS) ] = 0x283, /* ICACHE_64B.MISS */
412 },
413 [ C(OP_WRITE) ] = {
414 [ C(RESULT_ACCESS) ] = -1,
415 [ C(RESULT_MISS) ] = -1,
416 },
417 [ C(OP_PREFETCH) ] = {
418 [ C(RESULT_ACCESS) ] = 0x0,
419 [ C(RESULT_MISS) ] = 0x0,
420 },
421 },
422 [ C(LL ) ] = {
423 [ C(OP_READ) ] = {
424 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
425 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
426 },
427 [ C(OP_WRITE) ] = {
428 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
429 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
430 },
431 [ C(OP_PREFETCH) ] = {
432 [ C(RESULT_ACCESS) ] = 0x0,
433 [ C(RESULT_MISS) ] = 0x0,
434 },
435 },
436 [ C(DTLB) ] = {
437 [ C(OP_READ) ] = {
438 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_INST_RETIRED.ALL_LOADS */
439 [ C(RESULT_MISS) ] = 0xe08, /* DTLB_LOAD_MISSES.WALK_COMPLETED */
440 },
441 [ C(OP_WRITE) ] = {
442 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_INST_RETIRED.ALL_STORES */
443 [ C(RESULT_MISS) ] = 0xe49, /* DTLB_STORE_MISSES.WALK_COMPLETED */
444 },
445 [ C(OP_PREFETCH) ] = {
446 [ C(RESULT_ACCESS) ] = 0x0,
447 [ C(RESULT_MISS) ] = 0x0,
448 },
449 },
450 [ C(ITLB) ] = {
451 [ C(OP_READ) ] = {
452 [ C(RESULT_ACCESS) ] = 0x2085, /* ITLB_MISSES.STLB_HIT */
453 [ C(RESULT_MISS) ] = 0xe85, /* ITLB_MISSES.WALK_COMPLETED */
454 },
455 [ C(OP_WRITE) ] = {
456 [ C(RESULT_ACCESS) ] = -1,
457 [ C(RESULT_MISS) ] = -1,
458 },
459 [ C(OP_PREFETCH) ] = {
460 [ C(RESULT_ACCESS) ] = -1,
461 [ C(RESULT_MISS) ] = -1,
462 },
463 },
464 [ C(BPU ) ] = {
465 [ C(OP_READ) ] = {
466 [ C(RESULT_ACCESS) ] = 0xc4, /* BR_INST_RETIRED.ALL_BRANCHES */
467 [ C(RESULT_MISS) ] = 0xc5, /* BR_MISP_RETIRED.ALL_BRANCHES */
468 },
469 [ C(OP_WRITE) ] = {
470 [ C(RESULT_ACCESS) ] = -1,
471 [ C(RESULT_MISS) ] = -1,
472 },
473 [ C(OP_PREFETCH) ] = {
474 [ C(RESULT_ACCESS) ] = -1,
475 [ C(RESULT_MISS) ] = -1,
476 },
477 },
478 [ C(NODE) ] = {
479 [ C(OP_READ) ] = {
480 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
481 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
482 },
483 [ C(OP_WRITE) ] = {
484 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
485 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
486 },
487 [ C(OP_PREFETCH) ] = {
488 [ C(RESULT_ACCESS) ] = 0x0,
489 [ C(RESULT_MISS) ] = 0x0,
490 },
491 },
492 };
493
494 static __initconst const u64 skl_hw_cache_extra_regs
495 [PERF_COUNT_HW_CACHE_MAX]
496 [PERF_COUNT_HW_CACHE_OP_MAX]
497 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
498 {
499 [ C(LL ) ] = {
500 [ C(OP_READ) ] = {
501 [ C(RESULT_ACCESS) ] = SKL_DEMAND_READ|
502 SKL_LLC_ACCESS|SKL_ANY_SNOOP,
503 [ C(RESULT_MISS) ] = SKL_DEMAND_READ|
504 SKL_L3_MISS|SKL_ANY_SNOOP|
505 SKL_SUPPLIER_NONE,
506 },
507 [ C(OP_WRITE) ] = {
508 [ C(RESULT_ACCESS) ] = SKL_DEMAND_WRITE|
509 SKL_LLC_ACCESS|SKL_ANY_SNOOP,
510 [ C(RESULT_MISS) ] = SKL_DEMAND_WRITE|
511 SKL_L3_MISS|SKL_ANY_SNOOP|
512 SKL_SUPPLIER_NONE,
513 },
514 [ C(OP_PREFETCH) ] = {
515 [ C(RESULT_ACCESS) ] = 0x0,
516 [ C(RESULT_MISS) ] = 0x0,
517 },
518 },
519 [ C(NODE) ] = {
520 [ C(OP_READ) ] = {
521 [ C(RESULT_ACCESS) ] = SKL_DEMAND_READ|
522 SKL_L3_MISS_LOCAL_DRAM|SKL_SNOOP_DRAM,
523 [ C(RESULT_MISS) ] = SKL_DEMAND_READ|
524 SKL_L3_MISS_REMOTE|SKL_SNOOP_DRAM,
525 },
526 [ C(OP_WRITE) ] = {
527 [ C(RESULT_ACCESS) ] = SKL_DEMAND_WRITE|
528 SKL_L3_MISS_LOCAL_DRAM|SKL_SNOOP_DRAM,
529 [ C(RESULT_MISS) ] = SKL_DEMAND_WRITE|
530 SKL_L3_MISS_REMOTE|SKL_SNOOP_DRAM,
531 },
532 [ C(OP_PREFETCH) ] = {
533 [ C(RESULT_ACCESS) ] = 0x0,
534 [ C(RESULT_MISS) ] = 0x0,
535 },
536 },
537 };
538
539 #define SNB_DMND_DATA_RD (1ULL << 0)
540 #define SNB_DMND_RFO (1ULL << 1)
541 #define SNB_DMND_IFETCH (1ULL << 2)
542 #define SNB_DMND_WB (1ULL << 3)
543 #define SNB_PF_DATA_RD (1ULL << 4)
544 #define SNB_PF_RFO (1ULL << 5)
545 #define SNB_PF_IFETCH (1ULL << 6)
546 #define SNB_LLC_DATA_RD (1ULL << 7)
547 #define SNB_LLC_RFO (1ULL << 8)
548 #define SNB_LLC_IFETCH (1ULL << 9)
549 #define SNB_BUS_LOCKS (1ULL << 10)
550 #define SNB_STRM_ST (1ULL << 11)
551 #define SNB_OTHER (1ULL << 15)
552 #define SNB_RESP_ANY (1ULL << 16)
553 #define SNB_NO_SUPP (1ULL << 17)
554 #define SNB_LLC_HITM (1ULL << 18)
555 #define SNB_LLC_HITE (1ULL << 19)
556 #define SNB_LLC_HITS (1ULL << 20)
557 #define SNB_LLC_HITF (1ULL << 21)
558 #define SNB_LOCAL (1ULL << 22)
559 #define SNB_REMOTE (0xffULL << 23)
560 #define SNB_SNP_NONE (1ULL << 31)
561 #define SNB_SNP_NOT_NEEDED (1ULL << 32)
562 #define SNB_SNP_MISS (1ULL << 33)
563 #define SNB_NO_FWD (1ULL << 34)
564 #define SNB_SNP_FWD (1ULL << 35)
565 #define SNB_HITM (1ULL << 36)
566 #define SNB_NON_DRAM (1ULL << 37)
567
568 #define SNB_DMND_READ (SNB_DMND_DATA_RD|SNB_LLC_DATA_RD)
569 #define SNB_DMND_WRITE (SNB_DMND_RFO|SNB_LLC_RFO)
570 #define SNB_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
571
572 #define SNB_SNP_ANY (SNB_SNP_NONE|SNB_SNP_NOT_NEEDED| \
573 SNB_SNP_MISS|SNB_NO_FWD|SNB_SNP_FWD| \
574 SNB_HITM)
575
576 #define SNB_DRAM_ANY (SNB_LOCAL|SNB_REMOTE|SNB_SNP_ANY)
577 #define SNB_DRAM_REMOTE (SNB_REMOTE|SNB_SNP_ANY)
578
579 #define SNB_L3_ACCESS SNB_RESP_ANY
580 #define SNB_L3_MISS (SNB_DRAM_ANY|SNB_NON_DRAM)
581
582 static __initconst const u64 snb_hw_cache_extra_regs
583 [PERF_COUNT_HW_CACHE_MAX]
584 [PERF_COUNT_HW_CACHE_OP_MAX]
585 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
586 {
587 [ C(LL ) ] = {
588 [ C(OP_READ) ] = {
589 [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_L3_ACCESS,
590 [ C(RESULT_MISS) ] = SNB_DMND_READ|SNB_L3_MISS,
591 },
592 [ C(OP_WRITE) ] = {
593 [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_L3_ACCESS,
594 [ C(RESULT_MISS) ] = SNB_DMND_WRITE|SNB_L3_MISS,
595 },
596 [ C(OP_PREFETCH) ] = {
597 [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_L3_ACCESS,
598 [ C(RESULT_MISS) ] = SNB_DMND_PREFETCH|SNB_L3_MISS,
599 },
600 },
601 [ C(NODE) ] = {
602 [ C(OP_READ) ] = {
603 [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_DRAM_ANY,
604 [ C(RESULT_MISS) ] = SNB_DMND_READ|SNB_DRAM_REMOTE,
605 },
606 [ C(OP_WRITE) ] = {
607 [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_DRAM_ANY,
608 [ C(RESULT_MISS) ] = SNB_DMND_WRITE|SNB_DRAM_REMOTE,
609 },
610 [ C(OP_PREFETCH) ] = {
611 [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_DRAM_ANY,
612 [ C(RESULT_MISS) ] = SNB_DMND_PREFETCH|SNB_DRAM_REMOTE,
613 },
614 },
615 };
616
617 static __initconst const u64 snb_hw_cache_event_ids
618 [PERF_COUNT_HW_CACHE_MAX]
619 [PERF_COUNT_HW_CACHE_OP_MAX]
620 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
621 {
622 [ C(L1D) ] = {
623 [ C(OP_READ) ] = {
624 [ C(RESULT_ACCESS) ] = 0xf1d0, /* MEM_UOP_RETIRED.LOADS */
625 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPLACEMENT */
626 },
627 [ C(OP_WRITE) ] = {
628 [ C(RESULT_ACCESS) ] = 0xf2d0, /* MEM_UOP_RETIRED.STORES */
629 [ C(RESULT_MISS) ] = 0x0851, /* L1D.ALL_M_REPLACEMENT */
630 },
631 [ C(OP_PREFETCH) ] = {
632 [ C(RESULT_ACCESS) ] = 0x0,
633 [ C(RESULT_MISS) ] = 0x024e, /* HW_PRE_REQ.DL1_MISS */
634 },
635 },
636 [ C(L1I ) ] = {
637 [ C(OP_READ) ] = {
638 [ C(RESULT_ACCESS) ] = 0x0,
639 [ C(RESULT_MISS) ] = 0x0280, /* ICACHE.MISSES */
640 },
641 [ C(OP_WRITE) ] = {
642 [ C(RESULT_ACCESS) ] = -1,
643 [ C(RESULT_MISS) ] = -1,
644 },
645 [ C(OP_PREFETCH) ] = {
646 [ C(RESULT_ACCESS) ] = 0x0,
647 [ C(RESULT_MISS) ] = 0x0,
648 },
649 },
650 [ C(LL ) ] = {
651 [ C(OP_READ) ] = {
652 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
653 [ C(RESULT_ACCESS) ] = 0x01b7,
654 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
655 [ C(RESULT_MISS) ] = 0x01b7,
656 },
657 [ C(OP_WRITE) ] = {
658 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
659 [ C(RESULT_ACCESS) ] = 0x01b7,
660 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
661 [ C(RESULT_MISS) ] = 0x01b7,
662 },
663 [ C(OP_PREFETCH) ] = {
664 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
665 [ C(RESULT_ACCESS) ] = 0x01b7,
666 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
667 [ C(RESULT_MISS) ] = 0x01b7,
668 },
669 },
670 [ C(DTLB) ] = {
671 [ C(OP_READ) ] = {
672 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOP_RETIRED.ALL_LOADS */
673 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.CAUSES_A_WALK */
674 },
675 [ C(OP_WRITE) ] = {
676 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOP_RETIRED.ALL_STORES */
677 [ C(RESULT_MISS) ] = 0x0149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
678 },
679 [ C(OP_PREFETCH) ] = {
680 [ C(RESULT_ACCESS) ] = 0x0,
681 [ C(RESULT_MISS) ] = 0x0,
682 },
683 },
684 [ C(ITLB) ] = {
685 [ C(OP_READ) ] = {
686 [ C(RESULT_ACCESS) ] = 0x1085, /* ITLB_MISSES.STLB_HIT */
687 [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.CAUSES_A_WALK */
688 },
689 [ C(OP_WRITE) ] = {
690 [ C(RESULT_ACCESS) ] = -1,
691 [ C(RESULT_MISS) ] = -1,
692 },
693 [ C(OP_PREFETCH) ] = {
694 [ C(RESULT_ACCESS) ] = -1,
695 [ C(RESULT_MISS) ] = -1,
696 },
697 },
698 [ C(BPU ) ] = {
699 [ C(OP_READ) ] = {
700 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
701 [ C(RESULT_MISS) ] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
702 },
703 [ C(OP_WRITE) ] = {
704 [ C(RESULT_ACCESS) ] = -1,
705 [ C(RESULT_MISS) ] = -1,
706 },
707 [ C(OP_PREFETCH) ] = {
708 [ C(RESULT_ACCESS) ] = -1,
709 [ C(RESULT_MISS) ] = -1,
710 },
711 },
712 [ C(NODE) ] = {
713 [ C(OP_READ) ] = {
714 [ C(RESULT_ACCESS) ] = 0x01b7,
715 [ C(RESULT_MISS) ] = 0x01b7,
716 },
717 [ C(OP_WRITE) ] = {
718 [ C(RESULT_ACCESS) ] = 0x01b7,
719 [ C(RESULT_MISS) ] = 0x01b7,
720 },
721 [ C(OP_PREFETCH) ] = {
722 [ C(RESULT_ACCESS) ] = 0x01b7,
723 [ C(RESULT_MISS) ] = 0x01b7,
724 },
725 },
726
727 };
728
729 /*
730 * Notes on the events:
731 * - data reads do not include code reads (comparable to earlier tables)
732 * - data counts include speculative execution (except L1 write, dtlb, bpu)
733 * - remote node access includes remote memory, remote cache, remote mmio.
734 * - prefetches are not included in the counts because they are not
735 * reliably counted.
736 */
737
738 #define HSW_DEMAND_DATA_RD BIT_ULL(0)
739 #define HSW_DEMAND_RFO BIT_ULL(1)
740 #define HSW_ANY_RESPONSE BIT_ULL(16)
741 #define HSW_SUPPLIER_NONE BIT_ULL(17)
742 #define HSW_L3_MISS_LOCAL_DRAM BIT_ULL(22)
743 #define HSW_L3_MISS_REMOTE_HOP0 BIT_ULL(27)
744 #define HSW_L3_MISS_REMOTE_HOP1 BIT_ULL(28)
745 #define HSW_L3_MISS_REMOTE_HOP2P BIT_ULL(29)
746 #define HSW_L3_MISS (HSW_L3_MISS_LOCAL_DRAM| \
747 HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \
748 HSW_L3_MISS_REMOTE_HOP2P)
749 #define HSW_SNOOP_NONE BIT_ULL(31)
750 #define HSW_SNOOP_NOT_NEEDED BIT_ULL(32)
751 #define HSW_SNOOP_MISS BIT_ULL(33)
752 #define HSW_SNOOP_HIT_NO_FWD BIT_ULL(34)
753 #define HSW_SNOOP_HIT_WITH_FWD BIT_ULL(35)
754 #define HSW_SNOOP_HITM BIT_ULL(36)
755 #define HSW_SNOOP_NON_DRAM BIT_ULL(37)
756 #define HSW_ANY_SNOOP (HSW_SNOOP_NONE| \
757 HSW_SNOOP_NOT_NEEDED|HSW_SNOOP_MISS| \
758 HSW_SNOOP_HIT_NO_FWD|HSW_SNOOP_HIT_WITH_FWD| \
759 HSW_SNOOP_HITM|HSW_SNOOP_NON_DRAM)
760 #define HSW_SNOOP_DRAM (HSW_ANY_SNOOP & ~HSW_SNOOP_NON_DRAM)
761 #define HSW_DEMAND_READ HSW_DEMAND_DATA_RD
762 #define HSW_DEMAND_WRITE HSW_DEMAND_RFO
763 #define HSW_L3_MISS_REMOTE (HSW_L3_MISS_REMOTE_HOP0|\
764 HSW_L3_MISS_REMOTE_HOP1|HSW_L3_MISS_REMOTE_HOP2P)
765 #define HSW_LLC_ACCESS HSW_ANY_RESPONSE
766
767 #define BDW_L3_MISS_LOCAL BIT(26)
768 #define BDW_L3_MISS (BDW_L3_MISS_LOCAL| \
769 HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \
770 HSW_L3_MISS_REMOTE_HOP2P)
771
772
773 static __initconst const u64 hsw_hw_cache_event_ids
774 [PERF_COUNT_HW_CACHE_MAX]
775 [PERF_COUNT_HW_CACHE_OP_MAX]
776 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
777 {
778 [ C(L1D ) ] = {
779 [ C(OP_READ) ] = {
780 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
781 [ C(RESULT_MISS) ] = 0x151, /* L1D.REPLACEMENT */
782 },
783 [ C(OP_WRITE) ] = {
784 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
785 [ C(RESULT_MISS) ] = 0x0,
786 },
787 [ C(OP_PREFETCH) ] = {
788 [ C(RESULT_ACCESS) ] = 0x0,
789 [ C(RESULT_MISS) ] = 0x0,
790 },
791 },
792 [ C(L1I ) ] = {
793 [ C(OP_READ) ] = {
794 [ C(RESULT_ACCESS) ] = 0x0,
795 [ C(RESULT_MISS) ] = 0x280, /* ICACHE.MISSES */
796 },
797 [ C(OP_WRITE) ] = {
798 [ C(RESULT_ACCESS) ] = -1,
799 [ C(RESULT_MISS) ] = -1,
800 },
801 [ C(OP_PREFETCH) ] = {
802 [ C(RESULT_ACCESS) ] = 0x0,
803 [ C(RESULT_MISS) ] = 0x0,
804 },
805 },
806 [ C(LL ) ] = {
807 [ C(OP_READ) ] = {
808 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
809 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
810 },
811 [ C(OP_WRITE) ] = {
812 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
813 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
814 },
815 [ C(OP_PREFETCH) ] = {
816 [ C(RESULT_ACCESS) ] = 0x0,
817 [ C(RESULT_MISS) ] = 0x0,
818 },
819 },
820 [ C(DTLB) ] = {
821 [ C(OP_READ) ] = {
822 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
823 [ C(RESULT_MISS) ] = 0x108, /* DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK */
824 },
825 [ C(OP_WRITE) ] = {
826 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
827 [ C(RESULT_MISS) ] = 0x149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
828 },
829 [ C(OP_PREFETCH) ] = {
830 [ C(RESULT_ACCESS) ] = 0x0,
831 [ C(RESULT_MISS) ] = 0x0,
832 },
833 },
834 [ C(ITLB) ] = {
835 [ C(OP_READ) ] = {
836 [ C(RESULT_ACCESS) ] = 0x6085, /* ITLB_MISSES.STLB_HIT */
837 [ C(RESULT_MISS) ] = 0x185, /* ITLB_MISSES.MISS_CAUSES_A_WALK */
838 },
839 [ C(OP_WRITE) ] = {
840 [ C(RESULT_ACCESS) ] = -1,
841 [ C(RESULT_MISS) ] = -1,
842 },
843 [ C(OP_PREFETCH) ] = {
844 [ C(RESULT_ACCESS) ] = -1,
845 [ C(RESULT_MISS) ] = -1,
846 },
847 },
848 [ C(BPU ) ] = {
849 [ C(OP_READ) ] = {
850 [ C(RESULT_ACCESS) ] = 0xc4, /* BR_INST_RETIRED.ALL_BRANCHES */
851 [ C(RESULT_MISS) ] = 0xc5, /* BR_MISP_RETIRED.ALL_BRANCHES */
852 },
853 [ C(OP_WRITE) ] = {
854 [ C(RESULT_ACCESS) ] = -1,
855 [ C(RESULT_MISS) ] = -1,
856 },
857 [ C(OP_PREFETCH) ] = {
858 [ C(RESULT_ACCESS) ] = -1,
859 [ C(RESULT_MISS) ] = -1,
860 },
861 },
862 [ C(NODE) ] = {
863 [ C(OP_READ) ] = {
864 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
865 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
866 },
867 [ C(OP_WRITE) ] = {
868 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
869 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
870 },
871 [ C(OP_PREFETCH) ] = {
872 [ C(RESULT_ACCESS) ] = 0x0,
873 [ C(RESULT_MISS) ] = 0x0,
874 },
875 },
876 };
877
878 static __initconst const u64 hsw_hw_cache_extra_regs
879 [PERF_COUNT_HW_CACHE_MAX]
880 [PERF_COUNT_HW_CACHE_OP_MAX]
881 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
882 {
883 [ C(LL ) ] = {
884 [ C(OP_READ) ] = {
885 [ C(RESULT_ACCESS) ] = HSW_DEMAND_READ|
886 HSW_LLC_ACCESS,
887 [ C(RESULT_MISS) ] = HSW_DEMAND_READ|
888 HSW_L3_MISS|HSW_ANY_SNOOP,
889 },
890 [ C(OP_WRITE) ] = {
891 [ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE|
892 HSW_LLC_ACCESS,
893 [ C(RESULT_MISS) ] = HSW_DEMAND_WRITE|
894 HSW_L3_MISS|HSW_ANY_SNOOP,
895 },
896 [ C(OP_PREFETCH) ] = {
897 [ C(RESULT_ACCESS) ] = 0x0,
898 [ C(RESULT_MISS) ] = 0x0,
899 },
900 },
901 [ C(NODE) ] = {
902 [ C(OP_READ) ] = {
903 [ C(RESULT_ACCESS) ] = HSW_DEMAND_READ|
904 HSW_L3_MISS_LOCAL_DRAM|
905 HSW_SNOOP_DRAM,
906 [ C(RESULT_MISS) ] = HSW_DEMAND_READ|
907 HSW_L3_MISS_REMOTE|
908 HSW_SNOOP_DRAM,
909 },
910 [ C(OP_WRITE) ] = {
911 [ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE|
912 HSW_L3_MISS_LOCAL_DRAM|
913 HSW_SNOOP_DRAM,
914 [ C(RESULT_MISS) ] = HSW_DEMAND_WRITE|
915 HSW_L3_MISS_REMOTE|
916 HSW_SNOOP_DRAM,
917 },
918 [ C(OP_PREFETCH) ] = {
919 [ C(RESULT_ACCESS) ] = 0x0,
920 [ C(RESULT_MISS) ] = 0x0,
921 },
922 },
923 };
924
925 static __initconst const u64 westmere_hw_cache_event_ids
926 [PERF_COUNT_HW_CACHE_MAX]
927 [PERF_COUNT_HW_CACHE_OP_MAX]
928 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
929 {
930 [ C(L1D) ] = {
931 [ C(OP_READ) ] = {
932 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
933 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
934 },
935 [ C(OP_WRITE) ] = {
936 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
937 [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
938 },
939 [ C(OP_PREFETCH) ] = {
940 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
941 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
942 },
943 },
944 [ C(L1I ) ] = {
945 [ C(OP_READ) ] = {
946 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
947 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
948 },
949 [ C(OP_WRITE) ] = {
950 [ C(RESULT_ACCESS) ] = -1,
951 [ C(RESULT_MISS) ] = -1,
952 },
953 [ C(OP_PREFETCH) ] = {
954 [ C(RESULT_ACCESS) ] = 0x0,
955 [ C(RESULT_MISS) ] = 0x0,
956 },
957 },
958 [ C(LL ) ] = {
959 [ C(OP_READ) ] = {
960 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
961 [ C(RESULT_ACCESS) ] = 0x01b7,
962 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
963 [ C(RESULT_MISS) ] = 0x01b7,
964 },
965 /*
966 * Use RFO, not WRITEBACK, because a write miss would typically occur
967 * on RFO.
968 */
969 [ C(OP_WRITE) ] = {
970 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
971 [ C(RESULT_ACCESS) ] = 0x01b7,
972 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
973 [ C(RESULT_MISS) ] = 0x01b7,
974 },
975 [ C(OP_PREFETCH) ] = {
976 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
977 [ C(RESULT_ACCESS) ] = 0x01b7,
978 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
979 [ C(RESULT_MISS) ] = 0x01b7,
980 },
981 },
982 [ C(DTLB) ] = {
983 [ C(OP_READ) ] = {
984 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
985 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
986 },
987 [ C(OP_WRITE) ] = {
988 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
989 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
990 },
991 [ C(OP_PREFETCH) ] = {
992 [ C(RESULT_ACCESS) ] = 0x0,
993 [ C(RESULT_MISS) ] = 0x0,
994 },
995 },
996 [ C(ITLB) ] = {
997 [ C(OP_READ) ] = {
998 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
999 [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.ANY */
1000 },
1001 [ C(OP_WRITE) ] = {
1002 [ C(RESULT_ACCESS) ] = -1,
1003 [ C(RESULT_MISS) ] = -1,
1004 },
1005 [ C(OP_PREFETCH) ] = {
1006 [ C(RESULT_ACCESS) ] = -1,
1007 [ C(RESULT_MISS) ] = -1,
1008 },
1009 },
1010 [ C(BPU ) ] = {
1011 [ C(OP_READ) ] = {
1012 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1013 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
1014 },
1015 [ C(OP_WRITE) ] = {
1016 [ C(RESULT_ACCESS) ] = -1,
1017 [ C(RESULT_MISS) ] = -1,
1018 },
1019 [ C(OP_PREFETCH) ] = {
1020 [ C(RESULT_ACCESS) ] = -1,
1021 [ C(RESULT_MISS) ] = -1,
1022 },
1023 },
1024 [ C(NODE) ] = {
1025 [ C(OP_READ) ] = {
1026 [ C(RESULT_ACCESS) ] = 0x01b7,
1027 [ C(RESULT_MISS) ] = 0x01b7,
1028 },
1029 [ C(OP_WRITE) ] = {
1030 [ C(RESULT_ACCESS) ] = 0x01b7,
1031 [ C(RESULT_MISS) ] = 0x01b7,
1032 },
1033 [ C(OP_PREFETCH) ] = {
1034 [ C(RESULT_ACCESS) ] = 0x01b7,
1035 [ C(RESULT_MISS) ] = 0x01b7,
1036 },
1037 },
1038 };
1039
1040 /*
1041 * Nehalem/Westmere MSR_OFFCORE_RESPONSE bits;
1042 * See IA32 SDM Vol 3B 30.6.1.3
1043 */
1044
1045 #define NHM_DMND_DATA_RD (1 << 0)
1046 #define NHM_DMND_RFO (1 << 1)
1047 #define NHM_DMND_IFETCH (1 << 2)
1048 #define NHM_DMND_WB (1 << 3)
1049 #define NHM_PF_DATA_RD (1 << 4)
1050 #define NHM_PF_DATA_RFO (1 << 5)
1051 #define NHM_PF_IFETCH (1 << 6)
1052 #define NHM_OFFCORE_OTHER (1 << 7)
1053 #define NHM_UNCORE_HIT (1 << 8)
1054 #define NHM_OTHER_CORE_HIT_SNP (1 << 9)
1055 #define NHM_OTHER_CORE_HITM (1 << 10)
1056 /* reserved */
1057 #define NHM_REMOTE_CACHE_FWD (1 << 12)
1058 #define NHM_REMOTE_DRAM (1 << 13)
1059 #define NHM_LOCAL_DRAM (1 << 14)
1060 #define NHM_NON_DRAM (1 << 15)
1061
1062 #define NHM_LOCAL (NHM_LOCAL_DRAM|NHM_REMOTE_CACHE_FWD)
1063 #define NHM_REMOTE (NHM_REMOTE_DRAM)
1064
1065 #define NHM_DMND_READ (NHM_DMND_DATA_RD)
1066 #define NHM_DMND_WRITE (NHM_DMND_RFO|NHM_DMND_WB)
1067 #define NHM_DMND_PREFETCH (NHM_PF_DATA_RD|NHM_PF_DATA_RFO)
1068
1069 #define NHM_L3_HIT (NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM)
1070 #define NHM_L3_MISS (NHM_NON_DRAM|NHM_LOCAL_DRAM|NHM_REMOTE_DRAM|NHM_REMOTE_CACHE_FWD)
1071 #define NHM_L3_ACCESS (NHM_L3_HIT|NHM_L3_MISS)
1072
1073 static __initconst const u64 nehalem_hw_cache_extra_regs
1074 [PERF_COUNT_HW_CACHE_MAX]
1075 [PERF_COUNT_HW_CACHE_OP_MAX]
1076 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1077 {
1078 [ C(LL ) ] = {
1079 [ C(OP_READ) ] = {
1080 [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_L3_ACCESS,
1081 [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_L3_MISS,
1082 },
1083 [ C(OP_WRITE) ] = {
1084 [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_L3_ACCESS,
1085 [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_L3_MISS,
1086 },
1087 [ C(OP_PREFETCH) ] = {
1088 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_L3_ACCESS,
1089 [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_L3_MISS,
1090 },
1091 },
1092 [ C(NODE) ] = {
1093 [ C(OP_READ) ] = {
1094 [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_LOCAL|NHM_REMOTE,
1095 [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_REMOTE,
1096 },
1097 [ C(OP_WRITE) ] = {
1098 [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_LOCAL|NHM_REMOTE,
1099 [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_REMOTE,
1100 },
1101 [ C(OP_PREFETCH) ] = {
1102 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_LOCAL|NHM_REMOTE,
1103 [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_REMOTE,
1104 },
1105 },
1106 };
1107
1108 static __initconst const u64 nehalem_hw_cache_event_ids
1109 [PERF_COUNT_HW_CACHE_MAX]
1110 [PERF_COUNT_HW_CACHE_OP_MAX]
1111 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1112 {
1113 [ C(L1D) ] = {
1114 [ C(OP_READ) ] = {
1115 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
1116 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
1117 },
1118 [ C(OP_WRITE) ] = {
1119 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
1120 [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
1121 },
1122 [ C(OP_PREFETCH) ] = {
1123 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
1124 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
1125 },
1126 },
1127 [ C(L1I ) ] = {
1128 [ C(OP_READ) ] = {
1129 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
1130 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
1131 },
1132 [ C(OP_WRITE) ] = {
1133 [ C(RESULT_ACCESS) ] = -1,
1134 [ C(RESULT_MISS) ] = -1,
1135 },
1136 [ C(OP_PREFETCH) ] = {
1137 [ C(RESULT_ACCESS) ] = 0x0,
1138 [ C(RESULT_MISS) ] = 0x0,
1139 },
1140 },
1141 [ C(LL ) ] = {
1142 [ C(OP_READ) ] = {
1143 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
1144 [ C(RESULT_ACCESS) ] = 0x01b7,
1145 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
1146 [ C(RESULT_MISS) ] = 0x01b7,
1147 },
1148 /*
1149 * Use RFO, not WRITEBACK, because a write miss would typically occur
1150 * on RFO.
1151 */
1152 [ C(OP_WRITE) ] = {
1153 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
1154 [ C(RESULT_ACCESS) ] = 0x01b7,
1155 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
1156 [ C(RESULT_MISS) ] = 0x01b7,
1157 },
1158 [ C(OP_PREFETCH) ] = {
1159 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
1160 [ C(RESULT_ACCESS) ] = 0x01b7,
1161 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
1162 [ C(RESULT_MISS) ] = 0x01b7,
1163 },
1164 },
1165 [ C(DTLB) ] = {
1166 [ C(OP_READ) ] = {
1167 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
1168 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
1169 },
1170 [ C(OP_WRITE) ] = {
1171 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
1172 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
1173 },
1174 [ C(OP_PREFETCH) ] = {
1175 [ C(RESULT_ACCESS) ] = 0x0,
1176 [ C(RESULT_MISS) ] = 0x0,
1177 },
1178 },
1179 [ C(ITLB) ] = {
1180 [ C(OP_READ) ] = {
1181 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
1182 [ C(RESULT_MISS) ] = 0x20c8, /* ITLB_MISS_RETIRED */
1183 },
1184 [ C(OP_WRITE) ] = {
1185 [ C(RESULT_ACCESS) ] = -1,
1186 [ C(RESULT_MISS) ] = -1,
1187 },
1188 [ C(OP_PREFETCH) ] = {
1189 [ C(RESULT_ACCESS) ] = -1,
1190 [ C(RESULT_MISS) ] = -1,
1191 },
1192 },
1193 [ C(BPU ) ] = {
1194 [ C(OP_READ) ] = {
1195 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1196 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
1197 },
1198 [ C(OP_WRITE) ] = {
1199 [ C(RESULT_ACCESS) ] = -1,
1200 [ C(RESULT_MISS) ] = -1,
1201 },
1202 [ C(OP_PREFETCH) ] = {
1203 [ C(RESULT_ACCESS) ] = -1,
1204 [ C(RESULT_MISS) ] = -1,
1205 },
1206 },
1207 [ C(NODE) ] = {
1208 [ C(OP_READ) ] = {
1209 [ C(RESULT_ACCESS) ] = 0x01b7,
1210 [ C(RESULT_MISS) ] = 0x01b7,
1211 },
1212 [ C(OP_WRITE) ] = {
1213 [ C(RESULT_ACCESS) ] = 0x01b7,
1214 [ C(RESULT_MISS) ] = 0x01b7,
1215 },
1216 [ C(OP_PREFETCH) ] = {
1217 [ C(RESULT_ACCESS) ] = 0x01b7,
1218 [ C(RESULT_MISS) ] = 0x01b7,
1219 },
1220 },
1221 };
1222
1223 static __initconst const u64 core2_hw_cache_event_ids
1224 [PERF_COUNT_HW_CACHE_MAX]
1225 [PERF_COUNT_HW_CACHE_OP_MAX]
1226 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1227 {
1228 [ C(L1D) ] = {
1229 [ C(OP_READ) ] = {
1230 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
1231 [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
1232 },
1233 [ C(OP_WRITE) ] = {
1234 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
1235 [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
1236 },
1237 [ C(OP_PREFETCH) ] = {
1238 [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
1239 [ C(RESULT_MISS) ] = 0,
1240 },
1241 },
1242 [ C(L1I ) ] = {
1243 [ C(OP_READ) ] = {
1244 [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */
1245 [ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */
1246 },
1247 [ C(OP_WRITE) ] = {
1248 [ C(RESULT_ACCESS) ] = -1,
1249 [ C(RESULT_MISS) ] = -1,
1250 },
1251 [ C(OP_PREFETCH) ] = {
1252 [ C(RESULT_ACCESS) ] = 0,
1253 [ C(RESULT_MISS) ] = 0,
1254 },
1255 },
1256 [ C(LL ) ] = {
1257 [ C(OP_READ) ] = {
1258 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
1259 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
1260 },
1261 [ C(OP_WRITE) ] = {
1262 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
1263 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
1264 },
1265 [ C(OP_PREFETCH) ] = {
1266 [ C(RESULT_ACCESS) ] = 0,
1267 [ C(RESULT_MISS) ] = 0,
1268 },
1269 },
1270 [ C(DTLB) ] = {
1271 [ C(OP_READ) ] = {
1272 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
1273 [ C(RESULT_MISS) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
1274 },
1275 [ C(OP_WRITE) ] = {
1276 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
1277 [ C(RESULT_MISS) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
1278 },
1279 [ C(OP_PREFETCH) ] = {
1280 [ C(RESULT_ACCESS) ] = 0,
1281 [ C(RESULT_MISS) ] = 0,
1282 },
1283 },
1284 [ C(ITLB) ] = {
1285 [ C(OP_READ) ] = {
1286 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1287 [ C(RESULT_MISS) ] = 0x1282, /* ITLBMISSES */
1288 },
1289 [ C(OP_WRITE) ] = {
1290 [ C(RESULT_ACCESS) ] = -1,
1291 [ C(RESULT_MISS) ] = -1,
1292 },
1293 [ C(OP_PREFETCH) ] = {
1294 [ C(RESULT_ACCESS) ] = -1,
1295 [ C(RESULT_MISS) ] = -1,
1296 },
1297 },
1298 [ C(BPU ) ] = {
1299 [ C(OP_READ) ] = {
1300 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1301 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1302 },
1303 [ C(OP_WRITE) ] = {
1304 [ C(RESULT_ACCESS) ] = -1,
1305 [ C(RESULT_MISS) ] = -1,
1306 },
1307 [ C(OP_PREFETCH) ] = {
1308 [ C(RESULT_ACCESS) ] = -1,
1309 [ C(RESULT_MISS) ] = -1,
1310 },
1311 },
1312 };
1313
1314 static __initconst const u64 atom_hw_cache_event_ids
1315 [PERF_COUNT_HW_CACHE_MAX]
1316 [PERF_COUNT_HW_CACHE_OP_MAX]
1317 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1318 {
1319 [ C(L1D) ] = {
1320 [ C(OP_READ) ] = {
1321 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD */
1322 [ C(RESULT_MISS) ] = 0,
1323 },
1324 [ C(OP_WRITE) ] = {
1325 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST */
1326 [ C(RESULT_MISS) ] = 0,
1327 },
1328 [ C(OP_PREFETCH) ] = {
1329 [ C(RESULT_ACCESS) ] = 0x0,
1330 [ C(RESULT_MISS) ] = 0,
1331 },
1332 },
1333 [ C(L1I ) ] = {
1334 [ C(OP_READ) ] = {
1335 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
1336 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
1337 },
1338 [ C(OP_WRITE) ] = {
1339 [ C(RESULT_ACCESS) ] = -1,
1340 [ C(RESULT_MISS) ] = -1,
1341 },
1342 [ C(OP_PREFETCH) ] = {
1343 [ C(RESULT_ACCESS) ] = 0,
1344 [ C(RESULT_MISS) ] = 0,
1345 },
1346 },
1347 [ C(LL ) ] = {
1348 [ C(OP_READ) ] = {
1349 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
1350 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
1351 },
1352 [ C(OP_WRITE) ] = {
1353 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
1354 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
1355 },
1356 [ C(OP_PREFETCH) ] = {
1357 [ C(RESULT_ACCESS) ] = 0,
1358 [ C(RESULT_MISS) ] = 0,
1359 },
1360 },
1361 [ C(DTLB) ] = {
1362 [ C(OP_READ) ] = {
1363 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
1364 [ C(RESULT_MISS) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
1365 },
1366 [ C(OP_WRITE) ] = {
1367 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
1368 [ C(RESULT_MISS) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
1369 },
1370 [ C(OP_PREFETCH) ] = {
1371 [ C(RESULT_ACCESS) ] = 0,
1372 [ C(RESULT_MISS) ] = 0,
1373 },
1374 },
1375 [ C(ITLB) ] = {
1376 [ C(OP_READ) ] = {
1377 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1378 [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */
1379 },
1380 [ C(OP_WRITE) ] = {
1381 [ C(RESULT_ACCESS) ] = -1,
1382 [ C(RESULT_MISS) ] = -1,
1383 },
1384 [ C(OP_PREFETCH) ] = {
1385 [ C(RESULT_ACCESS) ] = -1,
1386 [ C(RESULT_MISS) ] = -1,
1387 },
1388 },
1389 [ C(BPU ) ] = {
1390 [ C(OP_READ) ] = {
1391 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1392 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1393 },
1394 [ C(OP_WRITE) ] = {
1395 [ C(RESULT_ACCESS) ] = -1,
1396 [ C(RESULT_MISS) ] = -1,
1397 },
1398 [ C(OP_PREFETCH) ] = {
1399 [ C(RESULT_ACCESS) ] = -1,
1400 [ C(RESULT_MISS) ] = -1,
1401 },
1402 },
1403 };
1404
1405 EVENT_ATTR_STR(topdown-total-slots, td_total_slots_slm, "event=0x3c");
1406 EVENT_ATTR_STR(topdown-total-slots.scale, td_total_slots_scale_slm, "2");
1407 /* no_alloc_cycles.not_delivered */
1408 EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles_slm,
1409 "event=0xca,umask=0x50");
1410 EVENT_ATTR_STR(topdown-fetch-bubbles.scale, td_fetch_bubbles_scale_slm, "2");
1411 /* uops_retired.all */
1412 EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued_slm,
1413 "event=0xc2,umask=0x10");
1414 /* uops_retired.all */
1415 EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired_slm,
1416 "event=0xc2,umask=0x10");
1417
1418 static struct attribute *slm_events_attrs[] = {
1419 EVENT_PTR(td_total_slots_slm),
1420 EVENT_PTR(td_total_slots_scale_slm),
1421 EVENT_PTR(td_fetch_bubbles_slm),
1422 EVENT_PTR(td_fetch_bubbles_scale_slm),
1423 EVENT_PTR(td_slots_issued_slm),
1424 EVENT_PTR(td_slots_retired_slm),
1425 NULL
1426 };
1427
1428 static struct extra_reg intel_slm_extra_regs[] __read_mostly =
1429 {
1430 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
1431 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x768005ffffull, RSP_0),
1432 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x368005ffffull, RSP_1),
1433 EVENT_EXTRA_END
1434 };
1435
1436 #define SLM_DMND_READ SNB_DMND_DATA_RD
1437 #define SLM_DMND_WRITE SNB_DMND_RFO
1438 #define SLM_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
1439
1440 #define SLM_SNP_ANY (SNB_SNP_NONE|SNB_SNP_MISS|SNB_NO_FWD|SNB_HITM)
1441 #define SLM_LLC_ACCESS SNB_RESP_ANY
1442 #define SLM_LLC_MISS (SLM_SNP_ANY|SNB_NON_DRAM)
1443
1444 static __initconst const u64 slm_hw_cache_extra_regs
1445 [PERF_COUNT_HW_CACHE_MAX]
1446 [PERF_COUNT_HW_CACHE_OP_MAX]
1447 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1448 {
1449 [ C(LL ) ] = {
1450 [ C(OP_READ) ] = {
1451 [ C(RESULT_ACCESS) ] = SLM_DMND_READ|SLM_LLC_ACCESS,
1452 [ C(RESULT_MISS) ] = 0,
1453 },
1454 [ C(OP_WRITE) ] = {
1455 [ C(RESULT_ACCESS) ] = SLM_DMND_WRITE|SLM_LLC_ACCESS,
1456 [ C(RESULT_MISS) ] = SLM_DMND_WRITE|SLM_LLC_MISS,
1457 },
1458 [ C(OP_PREFETCH) ] = {
1459 [ C(RESULT_ACCESS) ] = SLM_DMND_PREFETCH|SLM_LLC_ACCESS,
1460 [ C(RESULT_MISS) ] = SLM_DMND_PREFETCH|SLM_LLC_MISS,
1461 },
1462 },
1463 };
1464
1465 static __initconst const u64 slm_hw_cache_event_ids
1466 [PERF_COUNT_HW_CACHE_MAX]
1467 [PERF_COUNT_HW_CACHE_OP_MAX]
1468 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1469 {
1470 [ C(L1D) ] = {
1471 [ C(OP_READ) ] = {
1472 [ C(RESULT_ACCESS) ] = 0,
1473 [ C(RESULT_MISS) ] = 0x0104, /* LD_DCU_MISS */
1474 },
1475 [ C(OP_WRITE) ] = {
1476 [ C(RESULT_ACCESS) ] = 0,
1477 [ C(RESULT_MISS) ] = 0,
1478 },
1479 [ C(OP_PREFETCH) ] = {
1480 [ C(RESULT_ACCESS) ] = 0,
1481 [ C(RESULT_MISS) ] = 0,
1482 },
1483 },
1484 [ C(L1I ) ] = {
1485 [ C(OP_READ) ] = {
1486 [ C(RESULT_ACCESS) ] = 0x0380, /* ICACHE.ACCESSES */
1487 [ C(RESULT_MISS) ] = 0x0280, /* ICACGE.MISSES */
1488 },
1489 [ C(OP_WRITE) ] = {
1490 [ C(RESULT_ACCESS) ] = -1,
1491 [ C(RESULT_MISS) ] = -1,
1492 },
1493 [ C(OP_PREFETCH) ] = {
1494 [ C(RESULT_ACCESS) ] = 0,
1495 [ C(RESULT_MISS) ] = 0,
1496 },
1497 },
1498 [ C(LL ) ] = {
1499 [ C(OP_READ) ] = {
1500 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
1501 [ C(RESULT_ACCESS) ] = 0x01b7,
1502 [ C(RESULT_MISS) ] = 0,
1503 },
1504 [ C(OP_WRITE) ] = {
1505 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
1506 [ C(RESULT_ACCESS) ] = 0x01b7,
1507 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
1508 [ C(RESULT_MISS) ] = 0x01b7,
1509 },
1510 [ C(OP_PREFETCH) ] = {
1511 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
1512 [ C(RESULT_ACCESS) ] = 0x01b7,
1513 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
1514 [ C(RESULT_MISS) ] = 0x01b7,
1515 },
1516 },
1517 [ C(DTLB) ] = {
1518 [ C(OP_READ) ] = {
1519 [ C(RESULT_ACCESS) ] = 0,
1520 [ C(RESULT_MISS) ] = 0x0804, /* LD_DTLB_MISS */
1521 },
1522 [ C(OP_WRITE) ] = {
1523 [ C(RESULT_ACCESS) ] = 0,
1524 [ C(RESULT_MISS) ] = 0,
1525 },
1526 [ C(OP_PREFETCH) ] = {
1527 [ C(RESULT_ACCESS) ] = 0,
1528 [ C(RESULT_MISS) ] = 0,
1529 },
1530 },
1531 [ C(ITLB) ] = {
1532 [ C(OP_READ) ] = {
1533 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1534 [ C(RESULT_MISS) ] = 0x40205, /* PAGE_WALKS.I_SIDE_WALKS */
1535 },
1536 [ C(OP_WRITE) ] = {
1537 [ C(RESULT_ACCESS) ] = -1,
1538 [ C(RESULT_MISS) ] = -1,
1539 },
1540 [ C(OP_PREFETCH) ] = {
1541 [ C(RESULT_ACCESS) ] = -1,
1542 [ C(RESULT_MISS) ] = -1,
1543 },
1544 },
1545 [ C(BPU ) ] = {
1546 [ C(OP_READ) ] = {
1547 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1548 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1549 },
1550 [ C(OP_WRITE) ] = {
1551 [ C(RESULT_ACCESS) ] = -1,
1552 [ C(RESULT_MISS) ] = -1,
1553 },
1554 [ C(OP_PREFETCH) ] = {
1555 [ C(RESULT_ACCESS) ] = -1,
1556 [ C(RESULT_MISS) ] = -1,
1557 },
1558 },
1559 };
1560
1561 EVENT_ATTR_STR(topdown-total-slots, td_total_slots_glm, "event=0x3c");
1562 EVENT_ATTR_STR(topdown-total-slots.scale, td_total_slots_scale_glm, "3");
1563 /* UOPS_NOT_DELIVERED.ANY */
1564 EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles_glm, "event=0x9c");
1565 /* ISSUE_SLOTS_NOT_CONSUMED.RECOVERY */
1566 EVENT_ATTR_STR(topdown-recovery-bubbles, td_recovery_bubbles_glm, "event=0xca,umask=0x02");
1567 /* UOPS_RETIRED.ANY */
1568 EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired_glm, "event=0xc2");
1569 /* UOPS_ISSUED.ANY */
1570 EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued_glm, "event=0x0e");
1571
1572 static struct attribute *glm_events_attrs[] = {
1573 EVENT_PTR(td_total_slots_glm),
1574 EVENT_PTR(td_total_slots_scale_glm),
1575 EVENT_PTR(td_fetch_bubbles_glm),
1576 EVENT_PTR(td_recovery_bubbles_glm),
1577 EVENT_PTR(td_slots_issued_glm),
1578 EVENT_PTR(td_slots_retired_glm),
1579 NULL
1580 };
1581
1582 static struct extra_reg intel_glm_extra_regs[] __read_mostly = {
1583 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
1584 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x760005ffbfull, RSP_0),
1585 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x360005ffbfull, RSP_1),
1586 EVENT_EXTRA_END
1587 };
1588
1589 #define GLM_DEMAND_DATA_RD BIT_ULL(0)
1590 #define GLM_DEMAND_RFO BIT_ULL(1)
1591 #define GLM_ANY_RESPONSE BIT_ULL(16)
1592 #define GLM_SNP_NONE_OR_MISS BIT_ULL(33)
1593 #define GLM_DEMAND_READ GLM_DEMAND_DATA_RD
1594 #define GLM_DEMAND_WRITE GLM_DEMAND_RFO
1595 #define GLM_DEMAND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
1596 #define GLM_LLC_ACCESS GLM_ANY_RESPONSE
1597 #define GLM_SNP_ANY (GLM_SNP_NONE_OR_MISS|SNB_NO_FWD|SNB_HITM)
1598 #define GLM_LLC_MISS (GLM_SNP_ANY|SNB_NON_DRAM)
1599
1600 static __initconst const u64 glm_hw_cache_event_ids
1601 [PERF_COUNT_HW_CACHE_MAX]
1602 [PERF_COUNT_HW_CACHE_OP_MAX]
1603 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1604 [C(L1D)] = {
1605 [C(OP_READ)] = {
1606 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
1607 [C(RESULT_MISS)] = 0x0,
1608 },
1609 [C(OP_WRITE)] = {
1610 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
1611 [C(RESULT_MISS)] = 0x0,
1612 },
1613 [C(OP_PREFETCH)] = {
1614 [C(RESULT_ACCESS)] = 0x0,
1615 [C(RESULT_MISS)] = 0x0,
1616 },
1617 },
1618 [C(L1I)] = {
1619 [C(OP_READ)] = {
1620 [C(RESULT_ACCESS)] = 0x0380, /* ICACHE.ACCESSES */
1621 [C(RESULT_MISS)] = 0x0280, /* ICACHE.MISSES */
1622 },
1623 [C(OP_WRITE)] = {
1624 [C(RESULT_ACCESS)] = -1,
1625 [C(RESULT_MISS)] = -1,
1626 },
1627 [C(OP_PREFETCH)] = {
1628 [C(RESULT_ACCESS)] = 0x0,
1629 [C(RESULT_MISS)] = 0x0,
1630 },
1631 },
1632 [C(LL)] = {
1633 [C(OP_READ)] = {
1634 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
1635 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
1636 },
1637 [C(OP_WRITE)] = {
1638 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
1639 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
1640 },
1641 [C(OP_PREFETCH)] = {
1642 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
1643 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
1644 },
1645 },
1646 [C(DTLB)] = {
1647 [C(OP_READ)] = {
1648 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
1649 [C(RESULT_MISS)] = 0x0,
1650 },
1651 [C(OP_WRITE)] = {
1652 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
1653 [C(RESULT_MISS)] = 0x0,
1654 },
1655 [C(OP_PREFETCH)] = {
1656 [C(RESULT_ACCESS)] = 0x0,
1657 [C(RESULT_MISS)] = 0x0,
1658 },
1659 },
1660 [C(ITLB)] = {
1661 [C(OP_READ)] = {
1662 [C(RESULT_ACCESS)] = 0x00c0, /* INST_RETIRED.ANY_P */
1663 [C(RESULT_MISS)] = 0x0481, /* ITLB.MISS */
1664 },
1665 [C(OP_WRITE)] = {
1666 [C(RESULT_ACCESS)] = -1,
1667 [C(RESULT_MISS)] = -1,
1668 },
1669 [C(OP_PREFETCH)] = {
1670 [C(RESULT_ACCESS)] = -1,
1671 [C(RESULT_MISS)] = -1,
1672 },
1673 },
1674 [C(BPU)] = {
1675 [C(OP_READ)] = {
1676 [C(RESULT_ACCESS)] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1677 [C(RESULT_MISS)] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
1678 },
1679 [C(OP_WRITE)] = {
1680 [C(RESULT_ACCESS)] = -1,
1681 [C(RESULT_MISS)] = -1,
1682 },
1683 [C(OP_PREFETCH)] = {
1684 [C(RESULT_ACCESS)] = -1,
1685 [C(RESULT_MISS)] = -1,
1686 },
1687 },
1688 };
1689
1690 static __initconst const u64 glm_hw_cache_extra_regs
1691 [PERF_COUNT_HW_CACHE_MAX]
1692 [PERF_COUNT_HW_CACHE_OP_MAX]
1693 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1694 [C(LL)] = {
1695 [C(OP_READ)] = {
1696 [C(RESULT_ACCESS)] = GLM_DEMAND_READ|
1697 GLM_LLC_ACCESS,
1698 [C(RESULT_MISS)] = GLM_DEMAND_READ|
1699 GLM_LLC_MISS,
1700 },
1701 [C(OP_WRITE)] = {
1702 [C(RESULT_ACCESS)] = GLM_DEMAND_WRITE|
1703 GLM_LLC_ACCESS,
1704 [C(RESULT_MISS)] = GLM_DEMAND_WRITE|
1705 GLM_LLC_MISS,
1706 },
1707 [C(OP_PREFETCH)] = {
1708 [C(RESULT_ACCESS)] = GLM_DEMAND_PREFETCH|
1709 GLM_LLC_ACCESS,
1710 [C(RESULT_MISS)] = GLM_DEMAND_PREFETCH|
1711 GLM_LLC_MISS,
1712 },
1713 },
1714 };
1715
1716 static __initconst const u64 glp_hw_cache_event_ids
1717 [PERF_COUNT_HW_CACHE_MAX]
1718 [PERF_COUNT_HW_CACHE_OP_MAX]
1719 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1720 [C(L1D)] = {
1721 [C(OP_READ)] = {
1722 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
1723 [C(RESULT_MISS)] = 0x0,
1724 },
1725 [C(OP_WRITE)] = {
1726 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
1727 [C(RESULT_MISS)] = 0x0,
1728 },
1729 [C(OP_PREFETCH)] = {
1730 [C(RESULT_ACCESS)] = 0x0,
1731 [C(RESULT_MISS)] = 0x0,
1732 },
1733 },
1734 [C(L1I)] = {
1735 [C(OP_READ)] = {
1736 [C(RESULT_ACCESS)] = 0x0380, /* ICACHE.ACCESSES */
1737 [C(RESULT_MISS)] = 0x0280, /* ICACHE.MISSES */
1738 },
1739 [C(OP_WRITE)] = {
1740 [C(RESULT_ACCESS)] = -1,
1741 [C(RESULT_MISS)] = -1,
1742 },
1743 [C(OP_PREFETCH)] = {
1744 [C(RESULT_ACCESS)] = 0x0,
1745 [C(RESULT_MISS)] = 0x0,
1746 },
1747 },
1748 [C(LL)] = {
1749 [C(OP_READ)] = {
1750 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
1751 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
1752 },
1753 [C(OP_WRITE)] = {
1754 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
1755 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
1756 },
1757 [C(OP_PREFETCH)] = {
1758 [C(RESULT_ACCESS)] = 0x0,
1759 [C(RESULT_MISS)] = 0x0,
1760 },
1761 },
1762 [C(DTLB)] = {
1763 [C(OP_READ)] = {
1764 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
1765 [C(RESULT_MISS)] = 0xe08, /* DTLB_LOAD_MISSES.WALK_COMPLETED */
1766 },
1767 [C(OP_WRITE)] = {
1768 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
1769 [C(RESULT_MISS)] = 0xe49, /* DTLB_STORE_MISSES.WALK_COMPLETED */
1770 },
1771 [C(OP_PREFETCH)] = {
1772 [C(RESULT_ACCESS)] = 0x0,
1773 [C(RESULT_MISS)] = 0x0,
1774 },
1775 },
1776 [C(ITLB)] = {
1777 [C(OP_READ)] = {
1778 [C(RESULT_ACCESS)] = 0x00c0, /* INST_RETIRED.ANY_P */
1779 [C(RESULT_MISS)] = 0x0481, /* ITLB.MISS */
1780 },
1781 [C(OP_WRITE)] = {
1782 [C(RESULT_ACCESS)] = -1,
1783 [C(RESULT_MISS)] = -1,
1784 },
1785 [C(OP_PREFETCH)] = {
1786 [C(RESULT_ACCESS)] = -1,
1787 [C(RESULT_MISS)] = -1,
1788 },
1789 },
1790 [C(BPU)] = {
1791 [C(OP_READ)] = {
1792 [C(RESULT_ACCESS)] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1793 [C(RESULT_MISS)] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
1794 },
1795 [C(OP_WRITE)] = {
1796 [C(RESULT_ACCESS)] = -1,
1797 [C(RESULT_MISS)] = -1,
1798 },
1799 [C(OP_PREFETCH)] = {
1800 [C(RESULT_ACCESS)] = -1,
1801 [C(RESULT_MISS)] = -1,
1802 },
1803 },
1804 };
1805
1806 static __initconst const u64 glp_hw_cache_extra_regs
1807 [PERF_COUNT_HW_CACHE_MAX]
1808 [PERF_COUNT_HW_CACHE_OP_MAX]
1809 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1810 [C(LL)] = {
1811 [C(OP_READ)] = {
1812 [C(RESULT_ACCESS)] = GLM_DEMAND_READ|
1813 GLM_LLC_ACCESS,
1814 [C(RESULT_MISS)] = GLM_DEMAND_READ|
1815 GLM_LLC_MISS,
1816 },
1817 [C(OP_WRITE)] = {
1818 [C(RESULT_ACCESS)] = GLM_DEMAND_WRITE|
1819 GLM_LLC_ACCESS,
1820 [C(RESULT_MISS)] = GLM_DEMAND_WRITE|
1821 GLM_LLC_MISS,
1822 },
1823 [C(OP_PREFETCH)] = {
1824 [C(RESULT_ACCESS)] = 0x0,
1825 [C(RESULT_MISS)] = 0x0,
1826 },
1827 },
1828 };
1829
1830 #define KNL_OT_L2_HITE BIT_ULL(19) /* Other Tile L2 Hit */
1831 #define KNL_OT_L2_HITF BIT_ULL(20) /* Other Tile L2 Hit */
1832 #define KNL_MCDRAM_LOCAL BIT_ULL(21)
1833 #define KNL_MCDRAM_FAR BIT_ULL(22)
1834 #define KNL_DDR_LOCAL BIT_ULL(23)
1835 #define KNL_DDR_FAR BIT_ULL(24)
1836 #define KNL_DRAM_ANY (KNL_MCDRAM_LOCAL | KNL_MCDRAM_FAR | \
1837 KNL_DDR_LOCAL | KNL_DDR_FAR)
1838 #define KNL_L2_READ SLM_DMND_READ
1839 #define KNL_L2_WRITE SLM_DMND_WRITE
1840 #define KNL_L2_PREFETCH SLM_DMND_PREFETCH
1841 #define KNL_L2_ACCESS SLM_LLC_ACCESS
1842 #define KNL_L2_MISS (KNL_OT_L2_HITE | KNL_OT_L2_HITF | \
1843 KNL_DRAM_ANY | SNB_SNP_ANY | \
1844 SNB_NON_DRAM)
1845
1846 static __initconst const u64 knl_hw_cache_extra_regs
1847 [PERF_COUNT_HW_CACHE_MAX]
1848 [PERF_COUNT_HW_CACHE_OP_MAX]
1849 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1850 [C(LL)] = {
1851 [C(OP_READ)] = {
1852 [C(RESULT_ACCESS)] = KNL_L2_READ | KNL_L2_ACCESS,
1853 [C(RESULT_MISS)] = 0,
1854 },
1855 [C(OP_WRITE)] = {
1856 [C(RESULT_ACCESS)] = KNL_L2_WRITE | KNL_L2_ACCESS,
1857 [C(RESULT_MISS)] = KNL_L2_WRITE | KNL_L2_MISS,
1858 },
1859 [C(OP_PREFETCH)] = {
1860 [C(RESULT_ACCESS)] = KNL_L2_PREFETCH | KNL_L2_ACCESS,
1861 [C(RESULT_MISS)] = KNL_L2_PREFETCH | KNL_L2_MISS,
1862 },
1863 },
1864 };
1865
1866 /*
1867 * Used from PMIs where the LBRs are already disabled.
1868 *
1869 * This function could be called consecutively. It is required to remain in
1870 * disabled state if called consecutively.
1871 *
1872 * During consecutive calls, the same disable value will be written to related
1873 * registers, so the PMU state remains unchanged.
1874 *
1875 * intel_bts events don't coexist with intel PMU's BTS events because of
1876 * x86_add_exclusive(x86_lbr_exclusive_lbr); there's no need to keep them
1877 * disabled around intel PMU's event batching etc, only inside the PMI handler.
1878 */
1879 static void __intel_pmu_disable_all(void)
1880 {
1881 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1882
1883 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
1884
1885 if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask))
1886 intel_pmu_disable_bts();
1887
1888 intel_pmu_pebs_disable_all();
1889 }
1890
1891 static void intel_pmu_disable_all(void)
1892 {
1893 __intel_pmu_disable_all();
1894 intel_pmu_lbr_disable_all();
1895 }
1896
1897 static void __intel_pmu_enable_all(int added, bool pmi)
1898 {
1899 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1900
1901 intel_pmu_pebs_enable_all();
1902 intel_pmu_lbr_enable_all(pmi);
1903 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL,
1904 x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask);
1905
1906 if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
1907 struct perf_event *event =
1908 cpuc->events[INTEL_PMC_IDX_FIXED_BTS];
1909
1910 if (WARN_ON_ONCE(!event))
1911 return;
1912
1913 intel_pmu_enable_bts(event->hw.config);
1914 }
1915 }
1916
1917 static void intel_pmu_enable_all(int added)
1918 {
1919 __intel_pmu_enable_all(added, false);
1920 }
1921
1922 /*
1923 * Workaround for:
1924 * Intel Errata AAK100 (model 26)
1925 * Intel Errata AAP53 (model 30)
1926 * Intel Errata BD53 (model 44)
1927 *
1928 * The official story:
1929 * These chips need to be 'reset' when adding counters by programming the
1930 * magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
1931 * in sequence on the same PMC or on different PMCs.
1932 *
1933 * In practise it appears some of these events do in fact count, and
1934 * we need to program all 4 events.
1935 */
1936 static void intel_pmu_nhm_workaround(void)
1937 {
1938 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1939 static const unsigned long nhm_magic[4] = {
1940 0x4300B5,
1941 0x4300D2,
1942 0x4300B1,
1943 0x4300B1
1944 };
1945 struct perf_event *event;
1946 int i;
1947
1948 /*
1949 * The Errata requires below steps:
1950 * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL;
1951 * 2) Configure 4 PERFEVTSELx with the magic events and clear
1952 * the corresponding PMCx;
1953 * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL;
1954 * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL;
1955 * 5) Clear 4 pairs of ERFEVTSELx and PMCx;
1956 */
1957
1958 /*
1959 * The real steps we choose are a little different from above.
1960 * A) To reduce MSR operations, we don't run step 1) as they
1961 * are already cleared before this function is called;
1962 * B) Call x86_perf_event_update to save PMCx before configuring
1963 * PERFEVTSELx with magic number;
1964 * C) With step 5), we do clear only when the PERFEVTSELx is
1965 * not used currently.
1966 * D) Call x86_perf_event_set_period to restore PMCx;
1967 */
1968
1969 /* We always operate 4 pairs of PERF Counters */
1970 for (i = 0; i < 4; i++) {
1971 event = cpuc->events[i];
1972 if (event)
1973 x86_perf_event_update(event);
1974 }
1975
1976 for (i = 0; i < 4; i++) {
1977 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, nhm_magic[i]);
1978 wrmsrl(MSR_ARCH_PERFMON_PERFCTR0 + i, 0x0);
1979 }
1980
1981 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0xf);
1982 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0);
1983
1984 for (i = 0; i < 4; i++) {
1985 event = cpuc->events[i];
1986
1987 if (event) {
1988 x86_perf_event_set_period(event);
1989 __x86_pmu_enable_event(&event->hw,
1990 ARCH_PERFMON_EVENTSEL_ENABLE);
1991 } else
1992 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0x0);
1993 }
1994 }
1995
1996 static void intel_pmu_nhm_enable_all(int added)
1997 {
1998 if (added)
1999 intel_pmu_nhm_workaround();
2000 intel_pmu_enable_all(added);
2001 }
2002
2003 static void intel_set_tfa(struct cpu_hw_events *cpuc, bool on)
2004 {
2005 u64 val = on ? MSR_TFA_RTM_FORCE_ABORT : 0;
2006
2007 if (cpuc->tfa_shadow != val) {
2008 cpuc->tfa_shadow = val;
2009 wrmsrl(MSR_TSX_FORCE_ABORT, val);
2010 }
2011 }
2012
2013 static void intel_tfa_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
2014 {
2015 /*
2016 * We're going to use PMC3, make sure TFA is set before we touch it.
2017 */
2018 if (cntr == 3 && !cpuc->is_fake)
2019 intel_set_tfa(cpuc, true);
2020 }
2021
2022 static void intel_tfa_pmu_enable_all(int added)
2023 {
2024 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2025
2026 /*
2027 * If we find PMC3 is no longer used when we enable the PMU, we can
2028 * clear TFA.
2029 */
2030 if (!test_bit(3, cpuc->active_mask))
2031 intel_set_tfa(cpuc, false);
2032
2033 intel_pmu_enable_all(added);
2034 }
2035
2036 static void enable_counter_freeze(void)
2037 {
2038 update_debugctlmsr(get_debugctlmsr() |
2039 DEBUGCTLMSR_FREEZE_PERFMON_ON_PMI);
2040 }
2041
2042 static void disable_counter_freeze(void)
2043 {
2044 update_debugctlmsr(get_debugctlmsr() &
2045 ~DEBUGCTLMSR_FREEZE_PERFMON_ON_PMI);
2046 }
2047
2048 static inline u64 intel_pmu_get_status(void)
2049 {
2050 u64 status;
2051
2052 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
2053
2054 return status;
2055 }
2056
2057 static inline void intel_pmu_ack_status(u64 ack)
2058 {
2059 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
2060 }
2061
2062 static void intel_pmu_disable_fixed(struct hw_perf_event *hwc)
2063 {
2064 int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
2065 u64 ctrl_val, mask;
2066
2067 mask = 0xfULL << (idx * 4);
2068
2069 rdmsrl(hwc->config_base, ctrl_val);
2070 ctrl_val &= ~mask;
2071 wrmsrl(hwc->config_base, ctrl_val);
2072 }
2073
2074 static inline bool event_is_checkpointed(struct perf_event *event)
2075 {
2076 return (event->hw.config & HSW_IN_TX_CHECKPOINTED) != 0;
2077 }
2078
2079 static void intel_pmu_disable_event(struct perf_event *event)
2080 {
2081 struct hw_perf_event *hwc = &event->hw;
2082 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2083
2084 if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) {
2085 intel_pmu_disable_bts();
2086 intel_pmu_drain_bts_buffer();
2087 return;
2088 }
2089
2090 cpuc->intel_ctrl_guest_mask &= ~(1ull << hwc->idx);
2091 cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx);
2092 cpuc->intel_cp_status &= ~(1ull << hwc->idx);
2093
2094 if (unlikely(event->attr.precise_ip))
2095 intel_pmu_pebs_disable(event);
2096
2097 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
2098 intel_pmu_disable_fixed(hwc);
2099 return;
2100 }
2101
2102 x86_pmu_disable_event(event);
2103 }
2104
2105 static void intel_pmu_del_event(struct perf_event *event)
2106 {
2107 if (needs_branch_stack(event))
2108 intel_pmu_lbr_del(event);
2109 if (event->attr.precise_ip)
2110 intel_pmu_pebs_del(event);
2111 }
2112
2113 static void intel_pmu_read_event(struct perf_event *event)
2114 {
2115 if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
2116 intel_pmu_auto_reload_read(event);
2117 else
2118 x86_perf_event_update(event);
2119 }
2120
2121 static void intel_pmu_enable_fixed(struct perf_event *event)
2122 {
2123 struct hw_perf_event *hwc = &event->hw;
2124 int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
2125 u64 ctrl_val, mask, bits = 0;
2126
2127 /*
2128 * Enable IRQ generation (0x8), if not PEBS,
2129 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
2130 * if requested:
2131 */
2132 if (!event->attr.precise_ip)
2133 bits |= 0x8;
2134 if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
2135 bits |= 0x2;
2136 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
2137 bits |= 0x1;
2138
2139 /*
2140 * ANY bit is supported in v3 and up
2141 */
2142 if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY)
2143 bits |= 0x4;
2144
2145 bits <<= (idx * 4);
2146 mask = 0xfULL << (idx * 4);
2147
2148 rdmsrl(hwc->config_base, ctrl_val);
2149 ctrl_val &= ~mask;
2150 ctrl_val |= bits;
2151 wrmsrl(hwc->config_base, ctrl_val);
2152 }
2153
2154 static void intel_pmu_enable_event(struct perf_event *event)
2155 {
2156 struct hw_perf_event *hwc = &event->hw;
2157 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2158
2159 if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) {
2160 if (!__this_cpu_read(cpu_hw_events.enabled))
2161 return;
2162
2163 intel_pmu_enable_bts(hwc->config);
2164 return;
2165 }
2166
2167 if (event->attr.exclude_host)
2168 cpuc->intel_ctrl_guest_mask |= (1ull << hwc->idx);
2169 if (event->attr.exclude_guest)
2170 cpuc->intel_ctrl_host_mask |= (1ull << hwc->idx);
2171
2172 if (unlikely(event_is_checkpointed(event)))
2173 cpuc->intel_cp_status |= (1ull << hwc->idx);
2174
2175 if (unlikely(event->attr.precise_ip))
2176 intel_pmu_pebs_enable(event);
2177
2178 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
2179 intel_pmu_enable_fixed(event);
2180 return;
2181 }
2182
2183 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
2184 }
2185
2186 static void intel_pmu_add_event(struct perf_event *event)
2187 {
2188 if (event->attr.precise_ip)
2189 intel_pmu_pebs_add(event);
2190 if (needs_branch_stack(event))
2191 intel_pmu_lbr_add(event);
2192 }
2193
2194 /*
2195 * Save and restart an expired event. Called by NMI contexts,
2196 * so it has to be careful about preempting normal event ops:
2197 */
2198 int intel_pmu_save_and_restart(struct perf_event *event)
2199 {
2200 x86_perf_event_update(event);
2201 /*
2202 * For a checkpointed counter always reset back to 0. This
2203 * avoids a situation where the counter overflows, aborts the
2204 * transaction and is then set back to shortly before the
2205 * overflow, and overflows and aborts again.
2206 */
2207 if (unlikely(event_is_checkpointed(event))) {
2208 /* No race with NMIs because the counter should not be armed */
2209 wrmsrl(event->hw.event_base, 0);
2210 local64_set(&event->hw.prev_count, 0);
2211 }
2212 return x86_perf_event_set_period(event);
2213 }
2214
2215 static void intel_pmu_reset(void)
2216 {
2217 struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
2218 unsigned long flags;
2219 int idx;
2220
2221 if (!x86_pmu.num_counters)
2222 return;
2223
2224 local_irq_save(flags);
2225
2226 pr_info("clearing PMU state on CPU#%d\n", smp_processor_id());
2227
2228 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
2229 wrmsrl_safe(x86_pmu_config_addr(idx), 0ull);
2230 wrmsrl_safe(x86_pmu_event_addr(idx), 0ull);
2231 }
2232 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++)
2233 wrmsrl_safe(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
2234
2235 if (ds)
2236 ds->bts_index = ds->bts_buffer_base;
2237
2238 /* Ack all overflows and disable fixed counters */
2239 if (x86_pmu.version >= 2) {
2240 intel_pmu_ack_status(intel_pmu_get_status());
2241 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
2242 }
2243
2244 /* Reset LBRs and LBR freezing */
2245 if (x86_pmu.lbr_nr) {
2246 update_debugctlmsr(get_debugctlmsr() &
2247 ~(DEBUGCTLMSR_FREEZE_LBRS_ON_PMI|DEBUGCTLMSR_LBR));
2248 }
2249
2250 local_irq_restore(flags);
2251 }
2252
2253 static int handle_pmi_common(struct pt_regs *regs, u64 status)
2254 {
2255 struct perf_sample_data data;
2256 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2257 int bit;
2258 int handled = 0;
2259
2260 inc_irq_stat(apic_perf_irqs);
2261
2262 /*
2263 * Ignore a range of extra bits in status that do not indicate
2264 * overflow by themselves.
2265 */
2266 status &= ~(GLOBAL_STATUS_COND_CHG |
2267 GLOBAL_STATUS_ASIF |
2268 GLOBAL_STATUS_LBRS_FROZEN);
2269 if (!status)
2270 return 0;
2271 /*
2272 * In case multiple PEBS events are sampled at the same time,
2273 * it is possible to have GLOBAL_STATUS bit 62 set indicating
2274 * PEBS buffer overflow and also seeing at most 3 PEBS counters
2275 * having their bits set in the status register. This is a sign
2276 * that there was at least one PEBS record pending at the time
2277 * of the PMU interrupt. PEBS counters must only be processed
2278 * via the drain_pebs() calls and not via the regular sample
2279 * processing loop coming after that the function, otherwise
2280 * phony regular samples may be generated in the sampling buffer
2281 * not marked with the EXACT tag. Another possibility is to have
2282 * one PEBS event and at least one non-PEBS event whic hoverflows
2283 * while PEBS has armed. In this case, bit 62 of GLOBAL_STATUS will
2284 * not be set, yet the overflow status bit for the PEBS counter will
2285 * be on Skylake.
2286 *
2287 * To avoid this problem, we systematically ignore the PEBS-enabled
2288 * counters from the GLOBAL_STATUS mask and we always process PEBS
2289 * events via drain_pebs().
2290 */
2291 if (x86_pmu.flags & PMU_FL_PEBS_ALL)
2292 status &= ~cpuc->pebs_enabled;
2293 else
2294 status &= ~(cpuc->pebs_enabled & PEBS_COUNTER_MASK);
2295
2296 /*
2297 * PEBS overflow sets bit 62 in the global status register
2298 */
2299 if (__test_and_clear_bit(62, (unsigned long *)&status)) {
2300 handled++;
2301 x86_pmu.drain_pebs(regs);
2302 status &= x86_pmu.intel_ctrl | GLOBAL_STATUS_TRACE_TOPAPMI;
2303 }
2304
2305 /*
2306 * Intel PT
2307 */
2308 if (__test_and_clear_bit(55, (unsigned long *)&status)) {
2309 handled++;
2310 if (unlikely(perf_guest_cbs && perf_guest_cbs->is_in_guest() &&
2311 perf_guest_cbs->handle_intel_pt_intr))
2312 perf_guest_cbs->handle_intel_pt_intr();
2313 else
2314 intel_pt_interrupt();
2315 }
2316
2317 /*
2318 * Checkpointed counters can lead to 'spurious' PMIs because the
2319 * rollback caused by the PMI will have cleared the overflow status
2320 * bit. Therefore always force probe these counters.
2321 */
2322 status |= cpuc->intel_cp_status;
2323
2324 for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
2325 struct perf_event *event = cpuc->events[bit];
2326
2327 handled++;
2328
2329 if (!test_bit(bit, cpuc->active_mask))
2330 continue;
2331
2332 if (!intel_pmu_save_and_restart(event))
2333 continue;
2334
2335 perf_sample_data_init(&data, 0, event->hw.last_period);
2336
2337 if (has_branch_stack(event))
2338 data.br_stack = &cpuc->lbr_stack;
2339
2340 if (perf_event_overflow(event, &data, regs))
2341 x86_pmu_stop(event, 0);
2342 }
2343
2344 return handled;
2345 }
2346
2347 static bool disable_counter_freezing = true;
2348 static int __init intel_perf_counter_freezing_setup(char *s)
2349 {
2350 bool res;
2351
2352 if (kstrtobool(s, &res))
2353 return -EINVAL;
2354
2355 disable_counter_freezing = !res;
2356 return 1;
2357 }
2358 __setup("perf_v4_pmi=", intel_perf_counter_freezing_setup);
2359
2360 /*
2361 * Simplified handler for Arch Perfmon v4:
2362 * - We rely on counter freezing/unfreezing to enable/disable the PMU.
2363 * This is done automatically on PMU ack.
2364 * - Ack the PMU only after the APIC.
2365 */
2366
2367 static int intel_pmu_handle_irq_v4(struct pt_regs *regs)
2368 {
2369 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2370 int handled = 0;
2371 bool bts = false;
2372 u64 status;
2373 int pmu_enabled = cpuc->enabled;
2374 int loops = 0;
2375
2376 /* PMU has been disabled because of counter freezing */
2377 cpuc->enabled = 0;
2378 if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
2379 bts = true;
2380 intel_bts_disable_local();
2381 handled = intel_pmu_drain_bts_buffer();
2382 handled += intel_bts_interrupt();
2383 }
2384 status = intel_pmu_get_status();
2385 if (!status)
2386 goto done;
2387 again:
2388 intel_pmu_lbr_read();
2389 if (++loops > 100) {
2390 static bool warned;
2391
2392 if (!warned) {
2393 WARN(1, "perfevents: irq loop stuck!\n");
2394 perf_event_print_debug();
2395 warned = true;
2396 }
2397 intel_pmu_reset();
2398 goto done;
2399 }
2400
2401
2402 handled += handle_pmi_common(regs, status);
2403 done:
2404 /* Ack the PMI in the APIC */
2405 apic_write(APIC_LVTPC, APIC_DM_NMI);
2406
2407 /*
2408 * The counters start counting immediately while ack the status.
2409 * Make it as close as possible to IRET. This avoids bogus
2410 * freezing on Skylake CPUs.
2411 */
2412 if (status) {
2413 intel_pmu_ack_status(status);
2414 } else {
2415 /*
2416 * CPU may issues two PMIs very close to each other.
2417 * When the PMI handler services the first one, the
2418 * GLOBAL_STATUS is already updated to reflect both.
2419 * When it IRETs, the second PMI is immediately
2420 * handled and it sees clear status. At the meantime,
2421 * there may be a third PMI, because the freezing bit
2422 * isn't set since the ack in first PMI handlers.
2423 * Double check if there is more work to be done.
2424 */
2425 status = intel_pmu_get_status();
2426 if (status)
2427 goto again;
2428 }
2429
2430 if (bts)
2431 intel_bts_enable_local();
2432 cpuc->enabled = pmu_enabled;
2433 return handled;
2434 }
2435
2436 /*
2437 * This handler is triggered by the local APIC, so the APIC IRQ handling
2438 * rules apply:
2439 */
2440 static int intel_pmu_handle_irq(struct pt_regs *regs)
2441 {
2442 struct cpu_hw_events *cpuc;
2443 int loops;
2444 u64 status;
2445 int handled;
2446 int pmu_enabled;
2447
2448 cpuc = this_cpu_ptr(&cpu_hw_events);
2449
2450 /*
2451 * Save the PMU state.
2452 * It needs to be restored when leaving the handler.
2453 */
2454 pmu_enabled = cpuc->enabled;
2455 /*
2456 * No known reason to not always do late ACK,
2457 * but just in case do it opt-in.
2458 */
2459 if (!x86_pmu.late_ack)
2460 apic_write(APIC_LVTPC, APIC_DM_NMI);
2461 intel_bts_disable_local();
2462 cpuc->enabled = 0;
2463 __intel_pmu_disable_all();
2464 handled = intel_pmu_drain_bts_buffer();
2465 handled += intel_bts_interrupt();
2466 status = intel_pmu_get_status();
2467 if (!status)
2468 goto done;
2469
2470 loops = 0;
2471 again:
2472 intel_pmu_lbr_read();
2473 intel_pmu_ack_status(status);
2474 if (++loops > 100) {
2475 static bool warned;
2476
2477 if (!warned) {
2478 WARN(1, "perfevents: irq loop stuck!\n");
2479 perf_event_print_debug();
2480 warned = true;
2481 }
2482 intel_pmu_reset();
2483 goto done;
2484 }
2485
2486 handled += handle_pmi_common(regs, status);
2487
2488 /*
2489 * Repeat if there is more work to be done:
2490 */
2491 status = intel_pmu_get_status();
2492 if (status)
2493 goto again;
2494
2495 done:
2496 /* Only restore PMU state when it's active. See x86_pmu_disable(). */
2497 cpuc->enabled = pmu_enabled;
2498 if (pmu_enabled)
2499 __intel_pmu_enable_all(0, true);
2500 intel_bts_enable_local();
2501
2502 /*
2503 * Only unmask the NMI after the overflow counters
2504 * have been reset. This avoids spurious NMIs on
2505 * Haswell CPUs.
2506 */
2507 if (x86_pmu.late_ack)
2508 apic_write(APIC_LVTPC, APIC_DM_NMI);
2509 return handled;
2510 }
2511
2512 static struct event_constraint *
2513 intel_bts_constraints(struct perf_event *event)
2514 {
2515 if (unlikely(intel_pmu_has_bts(event)))
2516 return &bts_constraint;
2517
2518 return NULL;
2519 }
2520
2521 static int intel_alt_er(int idx, u64 config)
2522 {
2523 int alt_idx = idx;
2524
2525 if (!(x86_pmu.flags & PMU_FL_HAS_RSP_1))
2526 return idx;
2527
2528 if (idx == EXTRA_REG_RSP_0)
2529 alt_idx = EXTRA_REG_RSP_1;
2530
2531 if (idx == EXTRA_REG_RSP_1)
2532 alt_idx = EXTRA_REG_RSP_0;
2533
2534 if (config & ~x86_pmu.extra_regs[alt_idx].valid_mask)
2535 return idx;
2536
2537 return alt_idx;
2538 }
2539
2540 static void intel_fixup_er(struct perf_event *event, int idx)
2541 {
2542 event->hw.extra_reg.idx = idx;
2543
2544 if (idx == EXTRA_REG_RSP_0) {
2545 event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
2546 event->hw.config |= x86_pmu.extra_regs[EXTRA_REG_RSP_0].event;
2547 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0;
2548 } else if (idx == EXTRA_REG_RSP_1) {
2549 event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
2550 event->hw.config |= x86_pmu.extra_regs[EXTRA_REG_RSP_1].event;
2551 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1;
2552 }
2553 }
2554
2555 /*
2556 * manage allocation of shared extra msr for certain events
2557 *
2558 * sharing can be:
2559 * per-cpu: to be shared between the various events on a single PMU
2560 * per-core: per-cpu + shared by HT threads
2561 */
2562 static struct event_constraint *
2563 __intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc,
2564 struct perf_event *event,
2565 struct hw_perf_event_extra *reg)
2566 {
2567 struct event_constraint *c = &emptyconstraint;
2568 struct er_account *era;
2569 unsigned long flags;
2570 int idx = reg->idx;
2571
2572 /*
2573 * reg->alloc can be set due to existing state, so for fake cpuc we
2574 * need to ignore this, otherwise we might fail to allocate proper fake
2575 * state for this extra reg constraint. Also see the comment below.
2576 */
2577 if (reg->alloc && !cpuc->is_fake)
2578 return NULL; /* call x86_get_event_constraint() */
2579
2580 again:
2581 era = &cpuc->shared_regs->regs[idx];
2582 /*
2583 * we use spin_lock_irqsave() to avoid lockdep issues when
2584 * passing a fake cpuc
2585 */
2586 raw_spin_lock_irqsave(&era->lock, flags);
2587
2588 if (!atomic_read(&era->ref) || era->config == reg->config) {
2589
2590 /*
2591 * If its a fake cpuc -- as per validate_{group,event}() we
2592 * shouldn't touch event state and we can avoid doing so
2593 * since both will only call get_event_constraints() once
2594 * on each event, this avoids the need for reg->alloc.
2595 *
2596 * Not doing the ER fixup will only result in era->reg being
2597 * wrong, but since we won't actually try and program hardware
2598 * this isn't a problem either.
2599 */
2600 if (!cpuc->is_fake) {
2601 if (idx != reg->idx)
2602 intel_fixup_er(event, idx);
2603
2604 /*
2605 * x86_schedule_events() can call get_event_constraints()
2606 * multiple times on events in the case of incremental
2607 * scheduling(). reg->alloc ensures we only do the ER
2608 * allocation once.
2609 */
2610 reg->alloc = 1;
2611 }
2612
2613 /* lock in msr value */
2614 era->config = reg->config;
2615 era->reg = reg->reg;
2616
2617 /* one more user */
2618 atomic_inc(&era->ref);
2619
2620 /*
2621 * need to call x86_get_event_constraint()
2622 * to check if associated event has constraints
2623 */
2624 c = NULL;
2625 } else {
2626 idx = intel_alt_er(idx, reg->config);
2627 if (idx != reg->idx) {
2628 raw_spin_unlock_irqrestore(&era->lock, flags);
2629 goto again;
2630 }
2631 }
2632 raw_spin_unlock_irqrestore(&era->lock, flags);
2633
2634 return c;
2635 }
2636
2637 static void
2638 __intel_shared_reg_put_constraints(struct cpu_hw_events *cpuc,
2639 struct hw_perf_event_extra *reg)
2640 {
2641 struct er_account *era;
2642
2643 /*
2644 * Only put constraint if extra reg was actually allocated. Also takes
2645 * care of event which do not use an extra shared reg.
2646 *
2647 * Also, if this is a fake cpuc we shouldn't touch any event state
2648 * (reg->alloc) and we don't care about leaving inconsistent cpuc state
2649 * either since it'll be thrown out.
2650 */
2651 if (!reg->alloc || cpuc->is_fake)
2652 return;
2653
2654 era = &cpuc->shared_regs->regs[reg->idx];
2655
2656 /* one fewer user */
2657 atomic_dec(&era->ref);
2658
2659 /* allocate again next time */
2660 reg->alloc = 0;
2661 }
2662
2663 static struct event_constraint *
2664 intel_shared_regs_constraints(struct cpu_hw_events *cpuc,
2665 struct perf_event *event)
2666 {
2667 struct event_constraint *c = NULL, *d;
2668 struct hw_perf_event_extra *xreg, *breg;
2669
2670 xreg = &event->hw.extra_reg;
2671 if (xreg->idx != EXTRA_REG_NONE) {
2672 c = __intel_shared_reg_get_constraints(cpuc, event, xreg);
2673 if (c == &emptyconstraint)
2674 return c;
2675 }
2676 breg = &event->hw.branch_reg;
2677 if (breg->idx != EXTRA_REG_NONE) {
2678 d = __intel_shared_reg_get_constraints(cpuc, event, breg);
2679 if (d == &emptyconstraint) {
2680 __intel_shared_reg_put_constraints(cpuc, xreg);
2681 c = d;
2682 }
2683 }
2684 return c;
2685 }
2686
2687 struct event_constraint *
2688 x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
2689 struct perf_event *event)
2690 {
2691 struct event_constraint *c;
2692
2693 if (x86_pmu.event_constraints) {
2694 for_each_event_constraint(c, x86_pmu.event_constraints) {
2695 if ((event->hw.config & c->cmask) == c->code) {
2696 event->hw.flags |= c->flags;
2697 return c;
2698 }
2699 }
2700 }
2701
2702 return &unconstrained;
2703 }
2704
2705 static struct event_constraint *
2706 __intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
2707 struct perf_event *event)
2708 {
2709 struct event_constraint *c;
2710
2711 c = intel_bts_constraints(event);
2712 if (c)
2713 return c;
2714
2715 c = intel_shared_regs_constraints(cpuc, event);
2716 if (c)
2717 return c;
2718
2719 c = intel_pebs_constraints(event);
2720 if (c)
2721 return c;
2722
2723 return x86_get_event_constraints(cpuc, idx, event);
2724 }
2725
2726 static void
2727 intel_start_scheduling(struct cpu_hw_events *cpuc)
2728 {
2729 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
2730 struct intel_excl_states *xl;
2731 int tid = cpuc->excl_thread_id;
2732
2733 /*
2734 * nothing needed if in group validation mode
2735 */
2736 if (cpuc->is_fake || !is_ht_workaround_enabled())
2737 return;
2738
2739 /*
2740 * no exclusion needed
2741 */
2742 if (WARN_ON_ONCE(!excl_cntrs))
2743 return;
2744
2745 xl = &excl_cntrs->states[tid];
2746
2747 xl->sched_started = true;
2748 /*
2749 * lock shared state until we are done scheduling
2750 * in stop_event_scheduling()
2751 * makes scheduling appear as a transaction
2752 */
2753 raw_spin_lock(&excl_cntrs->lock);
2754 }
2755
2756 static void intel_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
2757 {
2758 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
2759 struct event_constraint *c = cpuc->event_constraint[idx];
2760 struct intel_excl_states *xl;
2761 int tid = cpuc->excl_thread_id;
2762
2763 if (cpuc->is_fake || !is_ht_workaround_enabled())
2764 return;
2765
2766 if (WARN_ON_ONCE(!excl_cntrs))
2767 return;
2768
2769 if (!(c->flags & PERF_X86_EVENT_DYNAMIC))
2770 return;
2771
2772 xl = &excl_cntrs->states[tid];
2773
2774 lockdep_assert_held(&excl_cntrs->lock);
2775
2776 if (c->flags & PERF_X86_EVENT_EXCL)
2777 xl->state[cntr] = INTEL_EXCL_EXCLUSIVE;
2778 else
2779 xl->state[cntr] = INTEL_EXCL_SHARED;
2780 }
2781
2782 static void
2783 intel_stop_scheduling(struct cpu_hw_events *cpuc)
2784 {
2785 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
2786 struct intel_excl_states *xl;
2787 int tid = cpuc->excl_thread_id;
2788
2789 /*
2790 * nothing needed if in group validation mode
2791 */
2792 if (cpuc->is_fake || !is_ht_workaround_enabled())
2793 return;
2794 /*
2795 * no exclusion needed
2796 */
2797 if (WARN_ON_ONCE(!excl_cntrs))
2798 return;
2799
2800 xl = &excl_cntrs->states[tid];
2801
2802 xl->sched_started = false;
2803 /*
2804 * release shared state lock (acquired in intel_start_scheduling())
2805 */
2806 raw_spin_unlock(&excl_cntrs->lock);
2807 }
2808
2809 static struct event_constraint *
2810 dyn_constraint(struct cpu_hw_events *cpuc, struct event_constraint *c, int idx)
2811 {
2812 WARN_ON_ONCE(!cpuc->constraint_list);
2813
2814 if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) {
2815 struct event_constraint *cx;
2816
2817 /*
2818 * grab pre-allocated constraint entry
2819 */
2820 cx = &cpuc->constraint_list[idx];
2821
2822 /*
2823 * initialize dynamic constraint
2824 * with static constraint
2825 */
2826 *cx = *c;
2827
2828 /*
2829 * mark constraint as dynamic
2830 */
2831 cx->flags |= PERF_X86_EVENT_DYNAMIC;
2832 c = cx;
2833 }
2834
2835 return c;
2836 }
2837
2838 static struct event_constraint *
2839 intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
2840 int idx, struct event_constraint *c)
2841 {
2842 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
2843 struct intel_excl_states *xlo;
2844 int tid = cpuc->excl_thread_id;
2845 int is_excl, i;
2846
2847 /*
2848 * validating a group does not require
2849 * enforcing cross-thread exclusion
2850 */
2851 if (cpuc->is_fake || !is_ht_workaround_enabled())
2852 return c;
2853
2854 /*
2855 * no exclusion needed
2856 */
2857 if (WARN_ON_ONCE(!excl_cntrs))
2858 return c;
2859
2860 /*
2861 * because we modify the constraint, we need
2862 * to make a copy. Static constraints come
2863 * from static const tables.
2864 *
2865 * only needed when constraint has not yet
2866 * been cloned (marked dynamic)
2867 */
2868 c = dyn_constraint(cpuc, c, idx);
2869
2870 /*
2871 * From here on, the constraint is dynamic.
2872 * Either it was just allocated above, or it
2873 * was allocated during a earlier invocation
2874 * of this function
2875 */
2876
2877 /*
2878 * state of sibling HT
2879 */
2880 xlo = &excl_cntrs->states[tid ^ 1];
2881
2882 /*
2883 * event requires exclusive counter access
2884 * across HT threads
2885 */
2886 is_excl = c->flags & PERF_X86_EVENT_EXCL;
2887 if (is_excl && !(event->hw.flags & PERF_X86_EVENT_EXCL_ACCT)) {
2888 event->hw.flags |= PERF_X86_EVENT_EXCL_ACCT;
2889 if (!cpuc->n_excl++)
2890 WRITE_ONCE(excl_cntrs->has_exclusive[tid], 1);
2891 }
2892
2893 /*
2894 * Modify static constraint with current dynamic
2895 * state of thread
2896 *
2897 * EXCLUSIVE: sibling counter measuring exclusive event
2898 * SHARED : sibling counter measuring non-exclusive event
2899 * UNUSED : sibling counter unused
2900 */
2901 for_each_set_bit(i, c->idxmsk, X86_PMC_IDX_MAX) {
2902 /*
2903 * exclusive event in sibling counter
2904 * our corresponding counter cannot be used
2905 * regardless of our event
2906 */
2907 if (xlo->state[i] == INTEL_EXCL_EXCLUSIVE)
2908 __clear_bit(i, c->idxmsk);
2909 /*
2910 * if measuring an exclusive event, sibling
2911 * measuring non-exclusive, then counter cannot
2912 * be used
2913 */
2914 if (is_excl && xlo->state[i] == INTEL_EXCL_SHARED)
2915 __clear_bit(i, c->idxmsk);
2916 }
2917
2918 /*
2919 * recompute actual bit weight for scheduling algorithm
2920 */
2921 c->weight = hweight64(c->idxmsk64);
2922
2923 /*
2924 * if we return an empty mask, then switch
2925 * back to static empty constraint to avoid
2926 * the cost of freeing later on
2927 */
2928 if (c->weight == 0)
2929 c = &emptyconstraint;
2930
2931 return c;
2932 }
2933
2934 static struct event_constraint *
2935 intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
2936 struct perf_event *event)
2937 {
2938 struct event_constraint *c1 = NULL;
2939 struct event_constraint *c2;
2940
2941 if (idx >= 0) /* fake does < 0 */
2942 c1 = cpuc->event_constraint[idx];
2943
2944 /*
2945 * first time only
2946 * - static constraint: no change across incremental scheduling calls
2947 * - dynamic constraint: handled by intel_get_excl_constraints()
2948 */
2949 c2 = __intel_get_event_constraints(cpuc, idx, event);
2950 if (c1 && (c1->flags & PERF_X86_EVENT_DYNAMIC)) {
2951 bitmap_copy(c1->idxmsk, c2->idxmsk, X86_PMC_IDX_MAX);
2952 c1->weight = c2->weight;
2953 c2 = c1;
2954 }
2955
2956 if (cpuc->excl_cntrs)
2957 return intel_get_excl_constraints(cpuc, event, idx, c2);
2958
2959 return c2;
2960 }
2961
2962 static void intel_put_excl_constraints(struct cpu_hw_events *cpuc,
2963 struct perf_event *event)
2964 {
2965 struct hw_perf_event *hwc = &event->hw;
2966 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
2967 int tid = cpuc->excl_thread_id;
2968 struct intel_excl_states *xl;
2969
2970 /*
2971 * nothing needed if in group validation mode
2972 */
2973 if (cpuc->is_fake)
2974 return;
2975
2976 if (WARN_ON_ONCE(!excl_cntrs))
2977 return;
2978
2979 if (hwc->flags & PERF_X86_EVENT_EXCL_ACCT) {
2980 hwc->flags &= ~PERF_X86_EVENT_EXCL_ACCT;
2981 if (!--cpuc->n_excl)
2982 WRITE_ONCE(excl_cntrs->has_exclusive[tid], 0);
2983 }
2984
2985 /*
2986 * If event was actually assigned, then mark the counter state as
2987 * unused now.
2988 */
2989 if (hwc->idx >= 0) {
2990 xl = &excl_cntrs->states[tid];
2991
2992 /*
2993 * put_constraint may be called from x86_schedule_events()
2994 * which already has the lock held so here make locking
2995 * conditional.
2996 */
2997 if (!xl->sched_started)
2998 raw_spin_lock(&excl_cntrs->lock);
2999
3000 xl->state[hwc->idx] = INTEL_EXCL_UNUSED;
3001
3002 if (!xl->sched_started)
3003 raw_spin_unlock(&excl_cntrs->lock);
3004 }
3005 }
3006
3007 static void
3008 intel_put_shared_regs_event_constraints(struct cpu_hw_events *cpuc,
3009 struct perf_event *event)
3010 {
3011 struct hw_perf_event_extra *reg;
3012
3013 reg = &event->hw.extra_reg;
3014 if (reg->idx != EXTRA_REG_NONE)
3015 __intel_shared_reg_put_constraints(cpuc, reg);
3016
3017 reg = &event->hw.branch_reg;
3018 if (reg->idx != EXTRA_REG_NONE)
3019 __intel_shared_reg_put_constraints(cpuc, reg);
3020 }
3021
3022 static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
3023 struct perf_event *event)
3024 {
3025 intel_put_shared_regs_event_constraints(cpuc, event);
3026
3027 /*
3028 * is PMU has exclusive counter restrictions, then
3029 * all events are subject to and must call the
3030 * put_excl_constraints() routine
3031 */
3032 if (cpuc->excl_cntrs)
3033 intel_put_excl_constraints(cpuc, event);
3034 }
3035
3036 static void intel_pebs_aliases_core2(struct perf_event *event)
3037 {
3038 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
3039 /*
3040 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
3041 * (0x003c) so that we can use it with PEBS.
3042 *
3043 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
3044 * PEBS capable. However we can use INST_RETIRED.ANY_P
3045 * (0x00c0), which is a PEBS capable event, to get the same
3046 * count.
3047 *
3048 * INST_RETIRED.ANY_P counts the number of cycles that retires
3049 * CNTMASK instructions. By setting CNTMASK to a value (16)
3050 * larger than the maximum number of instructions that can be
3051 * retired per cycle (4) and then inverting the condition, we
3052 * count all cycles that retire 16 or less instructions, which
3053 * is every cycle.
3054 *
3055 * Thereby we gain a PEBS capable cycle counter.
3056 */
3057 u64 alt_config = X86_CONFIG(.event=0xc0, .inv=1, .cmask=16);
3058
3059 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
3060 event->hw.config = alt_config;
3061 }
3062 }
3063
3064 static void intel_pebs_aliases_snb(struct perf_event *event)
3065 {
3066 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
3067 /*
3068 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
3069 * (0x003c) so that we can use it with PEBS.
3070 *
3071 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
3072 * PEBS capable. However we can use UOPS_RETIRED.ALL
3073 * (0x01c2), which is a PEBS capable event, to get the same
3074 * count.
3075 *
3076 * UOPS_RETIRED.ALL counts the number of cycles that retires
3077 * CNTMASK micro-ops. By setting CNTMASK to a value (16)
3078 * larger than the maximum number of micro-ops that can be
3079 * retired per cycle (4) and then inverting the condition, we
3080 * count all cycles that retire 16 or less micro-ops, which
3081 * is every cycle.
3082 *
3083 * Thereby we gain a PEBS capable cycle counter.
3084 */
3085 u64 alt_config = X86_CONFIG(.event=0xc2, .umask=0x01, .inv=1, .cmask=16);
3086
3087 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
3088 event->hw.config = alt_config;
3089 }
3090 }
3091
3092 static void intel_pebs_aliases_precdist(struct perf_event *event)
3093 {
3094 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
3095 /*
3096 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
3097 * (0x003c) so that we can use it with PEBS.
3098 *
3099 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
3100 * PEBS capable. However we can use INST_RETIRED.PREC_DIST
3101 * (0x01c0), which is a PEBS capable event, to get the same
3102 * count.
3103 *
3104 * The PREC_DIST event has special support to minimize sample
3105 * shadowing effects. One drawback is that it can be
3106 * only programmed on counter 1, but that seems like an
3107 * acceptable trade off.
3108 */
3109 u64 alt_config = X86_CONFIG(.event=0xc0, .umask=0x01, .inv=1, .cmask=16);
3110
3111 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
3112 event->hw.config = alt_config;
3113 }
3114 }
3115
3116 static void intel_pebs_aliases_ivb(struct perf_event *event)
3117 {
3118 if (event->attr.precise_ip < 3)
3119 return intel_pebs_aliases_snb(event);
3120 return intel_pebs_aliases_precdist(event);
3121 }
3122
3123 static void intel_pebs_aliases_skl(struct perf_event *event)
3124 {
3125 if (event->attr.precise_ip < 3)
3126 return intel_pebs_aliases_core2(event);
3127 return intel_pebs_aliases_precdist(event);
3128 }
3129
3130 static unsigned long intel_pmu_large_pebs_flags(struct perf_event *event)
3131 {
3132 unsigned long flags = x86_pmu.large_pebs_flags;
3133
3134 if (event->attr.use_clockid)
3135 flags &= ~PERF_SAMPLE_TIME;
3136 if (!event->attr.exclude_kernel)
3137 flags &= ~PERF_SAMPLE_REGS_USER;
3138 if (event->attr.sample_regs_user & ~PEBS_REGS)
3139 flags &= ~(PERF_SAMPLE_REGS_USER | PERF_SAMPLE_REGS_INTR);
3140 return flags;
3141 }
3142
3143 static int intel_pmu_bts_config(struct perf_event *event)
3144 {
3145 struct perf_event_attr *attr = &event->attr;
3146
3147 if (unlikely(intel_pmu_has_bts(event))) {
3148 /* BTS is not supported by this architecture. */
3149 if (!x86_pmu.bts_active)
3150 return -EOPNOTSUPP;
3151
3152 /* BTS is currently only allowed for user-mode. */
3153 if (!attr->exclude_kernel)
3154 return -EOPNOTSUPP;
3155
3156 /* BTS is not allowed for precise events. */
3157 if (attr->precise_ip)
3158 return -EOPNOTSUPP;
3159
3160 /* disallow bts if conflicting events are present */
3161 if (x86_add_exclusive(x86_lbr_exclusive_lbr))
3162 return -EBUSY;
3163
3164 event->destroy = hw_perf_lbr_event_destroy;
3165 }
3166
3167 return 0;
3168 }
3169
3170 static int core_pmu_hw_config(struct perf_event *event)
3171 {
3172 int ret = x86_pmu_hw_config(event);
3173
3174 if (ret)
3175 return ret;
3176
3177 return intel_pmu_bts_config(event);
3178 }
3179
3180 static int intel_pmu_hw_config(struct perf_event *event)
3181 {
3182 int ret = x86_pmu_hw_config(event);
3183
3184 if (ret)
3185 return ret;
3186
3187 ret = intel_pmu_bts_config(event);
3188 if (ret)
3189 return ret;
3190
3191 if (event->attr.precise_ip) {
3192 if (!event->attr.freq) {
3193 event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD;
3194 if (!(event->attr.sample_type &
3195 ~intel_pmu_large_pebs_flags(event)))
3196 event->hw.flags |= PERF_X86_EVENT_LARGE_PEBS;
3197 }
3198 if (x86_pmu.pebs_aliases)
3199 x86_pmu.pebs_aliases(event);
3200
3201 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
3202 event->attr.sample_type |= __PERF_SAMPLE_CALLCHAIN_EARLY;
3203 }
3204
3205 if (needs_branch_stack(event)) {
3206 ret = intel_pmu_setup_lbr_filter(event);
3207 if (ret)
3208 return ret;
3209
3210 /*
3211 * BTS is set up earlier in this path, so don't account twice
3212 */
3213 if (!unlikely(intel_pmu_has_bts(event))) {
3214 /* disallow lbr if conflicting events are present */
3215 if (x86_add_exclusive(x86_lbr_exclusive_lbr))
3216 return -EBUSY;
3217
3218 event->destroy = hw_perf_lbr_event_destroy;
3219 }
3220 }
3221
3222 if (event->attr.type != PERF_TYPE_RAW)
3223 return 0;
3224
3225 if (!(event->attr.config & ARCH_PERFMON_EVENTSEL_ANY))
3226 return 0;
3227
3228 if (x86_pmu.version < 3)
3229 return -EINVAL;
3230
3231 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
3232 return -EACCES;
3233
3234 event->hw.config |= ARCH_PERFMON_EVENTSEL_ANY;
3235
3236 return 0;
3237 }
3238
3239 struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
3240 {
3241 if (x86_pmu.guest_get_msrs)
3242 return x86_pmu.guest_get_msrs(nr);
3243 *nr = 0;
3244 return NULL;
3245 }
3246 EXPORT_SYMBOL_GPL(perf_guest_get_msrs);
3247
3248 static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr)
3249 {
3250 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
3251 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
3252
3253 arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL;
3254 arr[0].host = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask;
3255 arr[0].guest = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_host_mask;
3256 if (x86_pmu.flags & PMU_FL_PEBS_ALL)
3257 arr[0].guest &= ~cpuc->pebs_enabled;
3258 else
3259 arr[0].guest &= ~(cpuc->pebs_enabled & PEBS_COUNTER_MASK);
3260 *nr = 1;
3261
3262 if (x86_pmu.pebs && x86_pmu.pebs_no_isolation) {
3263 /*
3264 * If PMU counter has PEBS enabled it is not enough to
3265 * disable counter on a guest entry since PEBS memory
3266 * write can overshoot guest entry and corrupt guest
3267 * memory. Disabling PEBS solves the problem.
3268 *
3269 * Don't do this if the CPU already enforces it.
3270 */
3271 arr[1].msr = MSR_IA32_PEBS_ENABLE;
3272 arr[1].host = cpuc->pebs_enabled;
3273 arr[1].guest = 0;
3274 *nr = 2;
3275 }
3276
3277 return arr;
3278 }
3279
3280 static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr)
3281 {
3282 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
3283 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
3284 int idx;
3285
3286 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
3287 struct perf_event *event = cpuc->events[idx];
3288
3289 arr[idx].msr = x86_pmu_config_addr(idx);
3290 arr[idx].host = arr[idx].guest = 0;
3291
3292 if (!test_bit(idx, cpuc->active_mask))
3293 continue;
3294
3295 arr[idx].host = arr[idx].guest =
3296 event->hw.config | ARCH_PERFMON_EVENTSEL_ENABLE;
3297
3298 if (event->attr.exclude_host)
3299 arr[idx].host &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
3300 else if (event->attr.exclude_guest)
3301 arr[idx].guest &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
3302 }
3303
3304 *nr = x86_pmu.num_counters;
3305 return arr;
3306 }
3307
3308 static void core_pmu_enable_event(struct perf_event *event)
3309 {
3310 if (!event->attr.exclude_host)
3311 x86_pmu_enable_event(event);
3312 }
3313
3314 static void core_pmu_enable_all(int added)
3315 {
3316 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
3317 int idx;
3318
3319 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
3320 struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
3321
3322 if (!test_bit(idx, cpuc->active_mask) ||
3323 cpuc->events[idx]->attr.exclude_host)
3324 continue;
3325
3326 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
3327 }
3328 }
3329
3330 static int hsw_hw_config(struct perf_event *event)
3331 {
3332 int ret = intel_pmu_hw_config(event);
3333
3334 if (ret)
3335 return ret;
3336 if (!boot_cpu_has(X86_FEATURE_RTM) && !boot_cpu_has(X86_FEATURE_HLE))
3337 return 0;
3338 event->hw.config |= event->attr.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED);
3339
3340 /*
3341 * IN_TX/IN_TX-CP filters are not supported by the Haswell PMU with
3342 * PEBS or in ANY thread mode. Since the results are non-sensical forbid
3343 * this combination.
3344 */
3345 if ((event->hw.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED)) &&
3346 ((event->hw.config & ARCH_PERFMON_EVENTSEL_ANY) ||
3347 event->attr.precise_ip > 0))
3348 return -EOPNOTSUPP;
3349
3350 if (event_is_checkpointed(event)) {
3351 /*
3352 * Sampling of checkpointed events can cause situations where
3353 * the CPU constantly aborts because of a overflow, which is
3354 * then checkpointed back and ignored. Forbid checkpointing
3355 * for sampling.
3356 *
3357 * But still allow a long sampling period, so that perf stat
3358 * from KVM works.
3359 */
3360 if (event->attr.sample_period > 0 &&
3361 event->attr.sample_period < 0x7fffffff)
3362 return -EOPNOTSUPP;
3363 }
3364 return 0;
3365 }
3366
3367 static struct event_constraint counter0_constraint =
3368 INTEL_ALL_EVENT_CONSTRAINT(0, 0x1);
3369
3370 static struct event_constraint counter2_constraint =
3371 EVENT_CONSTRAINT(0, 0x4, 0);
3372
3373 static struct event_constraint *
3374 hsw_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3375 struct perf_event *event)
3376 {
3377 struct event_constraint *c;
3378
3379 c = intel_get_event_constraints(cpuc, idx, event);
3380
3381 /* Handle special quirk on in_tx_checkpointed only in counter 2 */
3382 if (event->hw.config & HSW_IN_TX_CHECKPOINTED) {
3383 if (c->idxmsk64 & (1U << 2))
3384 return &counter2_constraint;
3385 return &emptyconstraint;
3386 }
3387
3388 return c;
3389 }
3390
3391 static struct event_constraint *
3392 glp_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3393 struct perf_event *event)
3394 {
3395 struct event_constraint *c;
3396
3397 /* :ppp means to do reduced skid PEBS which is PMC0 only. */
3398 if (event->attr.precise_ip == 3)
3399 return &counter0_constraint;
3400
3401 c = intel_get_event_constraints(cpuc, idx, event);
3402
3403 return c;
3404 }
3405
3406 static bool allow_tsx_force_abort = true;
3407
3408 static struct event_constraint *
3409 tfa_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3410 struct perf_event *event)
3411 {
3412 struct event_constraint *c = hsw_get_event_constraints(cpuc, idx, event);
3413
3414 /*
3415 * Without TFA we must not use PMC3.
3416 */
3417 if (!allow_tsx_force_abort && test_bit(3, c->idxmsk) && idx >= 0) {
3418 c = dyn_constraint(cpuc, c, idx);
3419 c->idxmsk64 &= ~(1ULL << 3);
3420 c->weight--;
3421 }
3422
3423 return c;
3424 }
3425
3426 /*
3427 * Broadwell:
3428 *
3429 * The INST_RETIRED.ALL period always needs to have lowest 6 bits cleared
3430 * (BDM55) and it must not use a period smaller than 100 (BDM11). We combine
3431 * the two to enforce a minimum period of 128 (the smallest value that has bits
3432 * 0-5 cleared and >= 100).
3433 *
3434 * Because of how the code in x86_perf_event_set_period() works, the truncation
3435 * of the lower 6 bits is 'harmless' as we'll occasionally add a longer period
3436 * to make up for the 'lost' events due to carrying the 'error' in period_left.
3437 *
3438 * Therefore the effective (average) period matches the requested period,
3439 * despite coarser hardware granularity.
3440 */
3441 static u64 bdw_limit_period(struct perf_event *event, u64 left)
3442 {
3443 if ((event->hw.config & INTEL_ARCH_EVENT_MASK) ==
3444 X86_CONFIG(.event=0xc0, .umask=0x01)) {
3445 if (left < 128)
3446 left = 128;
3447 left &= ~0x3fULL;
3448 }
3449 return left;
3450 }
3451
3452 PMU_FORMAT_ATTR(event, "config:0-7" );
3453 PMU_FORMAT_ATTR(umask, "config:8-15" );
3454 PMU_FORMAT_ATTR(edge, "config:18" );
3455 PMU_FORMAT_ATTR(pc, "config:19" );
3456 PMU_FORMAT_ATTR(any, "config:21" ); /* v3 + */
3457 PMU_FORMAT_ATTR(inv, "config:23" );
3458 PMU_FORMAT_ATTR(cmask, "config:24-31" );
3459 PMU_FORMAT_ATTR(in_tx, "config:32");
3460 PMU_FORMAT_ATTR(in_tx_cp, "config:33");
3461
3462 static struct attribute *intel_arch_formats_attr[] = {
3463 &format_attr_event.attr,
3464 &format_attr_umask.attr,
3465 &format_attr_edge.attr,
3466 &format_attr_pc.attr,
3467 &format_attr_inv.attr,
3468 &format_attr_cmask.attr,
3469 NULL,
3470 };
3471
3472 ssize_t intel_event_sysfs_show(char *page, u64 config)
3473 {
3474 u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT);
3475
3476 return x86_event_sysfs_show(page, config, event);
3477 }
3478
3479 static struct intel_shared_regs *allocate_shared_regs(int cpu)
3480 {
3481 struct intel_shared_regs *regs;
3482 int i;
3483
3484 regs = kzalloc_node(sizeof(struct intel_shared_regs),
3485 GFP_KERNEL, cpu_to_node(cpu));
3486 if (regs) {
3487 /*
3488 * initialize the locks to keep lockdep happy
3489 */
3490 for (i = 0; i < EXTRA_REG_MAX; i++)
3491 raw_spin_lock_init(&regs->regs[i].lock);
3492
3493 regs->core_id = -1;
3494 }
3495 return regs;
3496 }
3497
3498 static struct intel_excl_cntrs *allocate_excl_cntrs(int cpu)
3499 {
3500 struct intel_excl_cntrs *c;
3501
3502 c = kzalloc_node(sizeof(struct intel_excl_cntrs),
3503 GFP_KERNEL, cpu_to_node(cpu));
3504 if (c) {
3505 raw_spin_lock_init(&c->lock);
3506 c->core_id = -1;
3507 }
3508 return c;
3509 }
3510
3511
3512 int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
3513 {
3514 if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
3515 cpuc->shared_regs = allocate_shared_regs(cpu);
3516 if (!cpuc->shared_regs)
3517 goto err;
3518 }
3519
3520 if (x86_pmu.flags & (PMU_FL_EXCL_CNTRS | PMU_FL_TFA)) {
3521 size_t sz = X86_PMC_IDX_MAX * sizeof(struct event_constraint);
3522
3523 cpuc->constraint_list = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu));
3524 if (!cpuc->constraint_list)
3525 goto err_shared_regs;
3526 }
3527
3528 if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
3529 cpuc->excl_cntrs = allocate_excl_cntrs(cpu);
3530 if (!cpuc->excl_cntrs)
3531 goto err_constraint_list;
3532
3533 cpuc->excl_thread_id = 0;
3534 }
3535
3536 return 0;
3537
3538 err_constraint_list:
3539 kfree(cpuc->constraint_list);
3540 cpuc->constraint_list = NULL;
3541
3542 err_shared_regs:
3543 kfree(cpuc->shared_regs);
3544 cpuc->shared_regs = NULL;
3545
3546 err:
3547 return -ENOMEM;
3548 }
3549
3550 static int intel_pmu_cpu_prepare(int cpu)
3551 {
3552 return intel_cpuc_prepare(&per_cpu(cpu_hw_events, cpu), cpu);
3553 }
3554
3555 static void flip_smm_bit(void *data)
3556 {
3557 unsigned long set = *(unsigned long *)data;
3558
3559 if (set > 0) {
3560 msr_set_bit(MSR_IA32_DEBUGCTLMSR,
3561 DEBUGCTLMSR_FREEZE_IN_SMM_BIT);
3562 } else {
3563 msr_clear_bit(MSR_IA32_DEBUGCTLMSR,
3564 DEBUGCTLMSR_FREEZE_IN_SMM_BIT);
3565 }
3566 }
3567
3568 static void intel_pmu_cpu_starting(int cpu)
3569 {
3570 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
3571 int core_id = topology_core_id(cpu);
3572 int i;
3573
3574 init_debug_store_on_cpu(cpu);
3575 /*
3576 * Deal with CPUs that don't clear their LBRs on power-up.
3577 */
3578 intel_pmu_lbr_reset();
3579
3580 cpuc->lbr_sel = NULL;
3581
3582 if (x86_pmu.version > 1)
3583 flip_smm_bit(&x86_pmu.attr_freeze_on_smi);
3584
3585 if (x86_pmu.counter_freezing)
3586 enable_counter_freeze();
3587
3588 if (!cpuc->shared_regs)
3589 return;
3590
3591 if (!(x86_pmu.flags & PMU_FL_NO_HT_SHARING)) {
3592 for_each_cpu(i, topology_sibling_cpumask(cpu)) {
3593 struct intel_shared_regs *pc;
3594
3595 pc = per_cpu(cpu_hw_events, i).shared_regs;
3596 if (pc && pc->core_id == core_id) {
3597 cpuc->kfree_on_online[0] = cpuc->shared_regs;
3598 cpuc->shared_regs = pc;
3599 break;
3600 }
3601 }
3602 cpuc->shared_regs->core_id = core_id;
3603 cpuc->shared_regs->refcnt++;
3604 }
3605
3606 if (x86_pmu.lbr_sel_map)
3607 cpuc->lbr_sel = &cpuc->shared_regs->regs[EXTRA_REG_LBR];
3608
3609 if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
3610 for_each_cpu(i, topology_sibling_cpumask(cpu)) {
3611 struct cpu_hw_events *sibling;
3612 struct intel_excl_cntrs *c;
3613
3614 sibling = &per_cpu(cpu_hw_events, i);
3615 c = sibling->excl_cntrs;
3616 if (c && c->core_id == core_id) {
3617 cpuc->kfree_on_online[1] = cpuc->excl_cntrs;
3618 cpuc->excl_cntrs = c;
3619 if (!sibling->excl_thread_id)
3620 cpuc->excl_thread_id = 1;
3621 break;
3622 }
3623 }
3624 cpuc->excl_cntrs->core_id = core_id;
3625 cpuc->excl_cntrs->refcnt++;
3626 }
3627 }
3628
3629 static void free_excl_cntrs(struct cpu_hw_events *cpuc)
3630 {
3631 struct intel_excl_cntrs *c;
3632
3633 c = cpuc->excl_cntrs;
3634 if (c) {
3635 if (c->core_id == -1 || --c->refcnt == 0)
3636 kfree(c);
3637 cpuc->excl_cntrs = NULL;
3638 }
3639
3640 kfree(cpuc->constraint_list);
3641 cpuc->constraint_list = NULL;
3642 }
3643
3644 static void intel_pmu_cpu_dying(int cpu)
3645 {
3646 fini_debug_store_on_cpu(cpu);
3647
3648 if (x86_pmu.counter_freezing)
3649 disable_counter_freeze();
3650 }
3651
3652 void intel_cpuc_finish(struct cpu_hw_events *cpuc)
3653 {
3654 struct intel_shared_regs *pc;
3655
3656 pc = cpuc->shared_regs;
3657 if (pc) {
3658 if (pc->core_id == -1 || --pc->refcnt == 0)
3659 kfree(pc);
3660 cpuc->shared_regs = NULL;
3661 }
3662
3663 free_excl_cntrs(cpuc);
3664 }
3665
3666 static void intel_pmu_cpu_dead(int cpu)
3667 {
3668 intel_cpuc_finish(&per_cpu(cpu_hw_events, cpu));
3669 }
3670
3671 static void intel_pmu_sched_task(struct perf_event_context *ctx,
3672 bool sched_in)
3673 {
3674 intel_pmu_pebs_sched_task(ctx, sched_in);
3675 intel_pmu_lbr_sched_task(ctx, sched_in);
3676 }
3677
3678 static int intel_pmu_check_period(struct perf_event *event, u64 value)
3679 {
3680 return intel_pmu_has_bts_period(event, value) ? -EINVAL : 0;
3681 }
3682
3683 PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
3684
3685 PMU_FORMAT_ATTR(ldlat, "config1:0-15");
3686
3687 PMU_FORMAT_ATTR(frontend, "config1:0-23");
3688
3689 static struct attribute *intel_arch3_formats_attr[] = {
3690 &format_attr_event.attr,
3691 &format_attr_umask.attr,
3692 &format_attr_edge.attr,
3693 &format_attr_pc.attr,
3694 &format_attr_any.attr,
3695 &format_attr_inv.attr,
3696 &format_attr_cmask.attr,
3697 NULL,
3698 };
3699
3700 static struct attribute *hsw_format_attr[] = {
3701 &format_attr_in_tx.attr,
3702 &format_attr_in_tx_cp.attr,
3703 &format_attr_offcore_rsp.attr,
3704 &format_attr_ldlat.attr,
3705 NULL
3706 };
3707
3708 static struct attribute *nhm_format_attr[] = {
3709 &format_attr_offcore_rsp.attr,
3710 &format_attr_ldlat.attr,
3711 NULL
3712 };
3713
3714 static struct attribute *slm_format_attr[] = {
3715 &format_attr_offcore_rsp.attr,
3716 NULL
3717 };
3718
3719 static struct attribute *skl_format_attr[] = {
3720 &format_attr_frontend.attr,
3721 NULL,
3722 };
3723
3724 static __initconst const struct x86_pmu core_pmu = {
3725 .name = "core",
3726 .handle_irq = x86_pmu_handle_irq,
3727 .disable_all = x86_pmu_disable_all,
3728 .enable_all = core_pmu_enable_all,
3729 .enable = core_pmu_enable_event,
3730 .disable = x86_pmu_disable_event,
3731 .hw_config = core_pmu_hw_config,
3732 .schedule_events = x86_schedule_events,
3733 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
3734 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
3735 .event_map = intel_pmu_event_map,
3736 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
3737 .apic = 1,
3738 .large_pebs_flags = LARGE_PEBS_FLAGS,
3739
3740 /*
3741 * Intel PMCs cannot be accessed sanely above 32-bit width,
3742 * so we install an artificial 1<<31 period regardless of
3743 * the generic event period:
3744 */
3745 .max_period = (1ULL<<31) - 1,
3746 .get_event_constraints = intel_get_event_constraints,
3747 .put_event_constraints = intel_put_event_constraints,
3748 .event_constraints = intel_core_event_constraints,
3749 .guest_get_msrs = core_guest_get_msrs,
3750 .format_attrs = intel_arch_formats_attr,
3751 .events_sysfs_show = intel_event_sysfs_show,
3752
3753 /*
3754 * Virtual (or funny metal) CPU can define x86_pmu.extra_regs
3755 * together with PMU version 1 and thus be using core_pmu with
3756 * shared_regs. We need following callbacks here to allocate
3757 * it properly.
3758 */
3759 .cpu_prepare = intel_pmu_cpu_prepare,
3760 .cpu_starting = intel_pmu_cpu_starting,
3761 .cpu_dying = intel_pmu_cpu_dying,
3762 .cpu_dead = intel_pmu_cpu_dead,
3763
3764 .check_period = intel_pmu_check_period,
3765 };
3766
3767 static struct attribute *intel_pmu_attrs[];
3768
3769 static __initconst const struct x86_pmu intel_pmu = {
3770 .name = "Intel",
3771 .handle_irq = intel_pmu_handle_irq,
3772 .disable_all = intel_pmu_disable_all,
3773 .enable_all = intel_pmu_enable_all,
3774 .enable = intel_pmu_enable_event,
3775 .disable = intel_pmu_disable_event,
3776 .add = intel_pmu_add_event,
3777 .del = intel_pmu_del_event,
3778 .read = intel_pmu_read_event,
3779 .hw_config = intel_pmu_hw_config,
3780 .schedule_events = x86_schedule_events,
3781 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
3782 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
3783 .event_map = intel_pmu_event_map,
3784 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
3785 .apic = 1,
3786 .large_pebs_flags = LARGE_PEBS_FLAGS,
3787 /*
3788 * Intel PMCs cannot be accessed sanely above 32 bit width,
3789 * so we install an artificial 1<<31 period regardless of
3790 * the generic event period:
3791 */
3792 .max_period = (1ULL << 31) - 1,
3793 .get_event_constraints = intel_get_event_constraints,
3794 .put_event_constraints = intel_put_event_constraints,
3795 .pebs_aliases = intel_pebs_aliases_core2,
3796
3797 .format_attrs = intel_arch3_formats_attr,
3798 .events_sysfs_show = intel_event_sysfs_show,
3799
3800 .attrs = intel_pmu_attrs,
3801
3802 .cpu_prepare = intel_pmu_cpu_prepare,
3803 .cpu_starting = intel_pmu_cpu_starting,
3804 .cpu_dying = intel_pmu_cpu_dying,
3805 .cpu_dead = intel_pmu_cpu_dead,
3806
3807 .guest_get_msrs = intel_guest_get_msrs,
3808 .sched_task = intel_pmu_sched_task,
3809
3810 .check_period = intel_pmu_check_period,
3811 };
3812
3813 static __init void intel_clovertown_quirk(void)
3814 {
3815 /*
3816 * PEBS is unreliable due to:
3817 *
3818 * AJ67 - PEBS may experience CPL leaks
3819 * AJ68 - PEBS PMI may be delayed by one event
3820 * AJ69 - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12]
3821 * AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS
3822 *
3823 * AJ67 could be worked around by restricting the OS/USR flags.
3824 * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI.
3825 *
3826 * AJ106 could possibly be worked around by not allowing LBR
3827 * usage from PEBS, including the fixup.
3828 * AJ68 could possibly be worked around by always programming
3829 * a pebs_event_reset[0] value and coping with the lost events.
3830 *
3831 * But taken together it might just make sense to not enable PEBS on
3832 * these chips.
3833 */
3834 pr_warn("PEBS disabled due to CPU errata\n");
3835 x86_pmu.pebs = 0;
3836 x86_pmu.pebs_constraints = NULL;
3837 }
3838
3839 static const struct x86_cpu_desc isolation_ucodes[] = {
3840 INTEL_CPU_DESC(INTEL_FAM6_HASWELL_CORE, 3, 0x0000001f),
3841 INTEL_CPU_DESC(INTEL_FAM6_HASWELL_ULT, 1, 0x0000001e),
3842 INTEL_CPU_DESC(INTEL_FAM6_HASWELL_GT3E, 1, 0x00000015),
3843 INTEL_CPU_DESC(INTEL_FAM6_HASWELL_X, 2, 0x00000037),
3844 INTEL_CPU_DESC(INTEL_FAM6_HASWELL_X, 4, 0x0000000a),
3845 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_CORE, 4, 0x00000023),
3846 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_GT3E, 1, 0x00000014),
3847 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_XEON_D, 2, 0x00000010),
3848 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_XEON_D, 3, 0x07000009),
3849 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_XEON_D, 4, 0x0f000009),
3850 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_XEON_D, 5, 0x0e000002),
3851 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_X, 2, 0x0b000014),
3852 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 3, 0x00000021),
3853 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 4, 0x00000000),
3854 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_MOBILE, 3, 0x0000007c),
3855 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_DESKTOP, 3, 0x0000007c),
3856 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_DESKTOP, 9, 0x0000004e),
3857 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_MOBILE, 9, 0x0000004e),
3858 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_MOBILE, 10, 0x0000004e),
3859 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_MOBILE, 11, 0x0000004e),
3860 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_MOBILE, 12, 0x0000004e),
3861 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_DESKTOP, 10, 0x0000004e),
3862 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_DESKTOP, 11, 0x0000004e),
3863 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_DESKTOP, 12, 0x0000004e),
3864 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_DESKTOP, 13, 0x0000004e),
3865 {}
3866 };
3867
3868 static void intel_check_pebs_isolation(void)
3869 {
3870 x86_pmu.pebs_no_isolation = !x86_cpu_has_min_microcode_rev(isolation_ucodes);
3871 }
3872
3873 static __init void intel_pebs_isolation_quirk(void)
3874 {
3875 WARN_ON_ONCE(x86_pmu.check_microcode);
3876 x86_pmu.check_microcode = intel_check_pebs_isolation;
3877 intel_check_pebs_isolation();
3878 }
3879
3880 static const struct x86_cpu_desc pebs_ucodes[] = {
3881 INTEL_CPU_DESC(INTEL_FAM6_SANDYBRIDGE, 7, 0x00000028),
3882 INTEL_CPU_DESC(INTEL_FAM6_SANDYBRIDGE_X, 6, 0x00000618),
3883 INTEL_CPU_DESC(INTEL_FAM6_SANDYBRIDGE_X, 7, 0x0000070c),
3884 {}
3885 };
3886
3887 static bool intel_snb_pebs_broken(void)
3888 {
3889 return !x86_cpu_has_min_microcode_rev(pebs_ucodes);
3890 }
3891
3892 static void intel_snb_check_microcode(void)
3893 {
3894 if (intel_snb_pebs_broken() == x86_pmu.pebs_broken)
3895 return;
3896
3897 /*
3898 * Serialized by the microcode lock..
3899 */
3900 if (x86_pmu.pebs_broken) {
3901 pr_info("PEBS enabled due to microcode update\n");
3902 x86_pmu.pebs_broken = 0;
3903 } else {
3904 pr_info("PEBS disabled due to CPU errata, please upgrade microcode\n");
3905 x86_pmu.pebs_broken = 1;
3906 }
3907 }
3908
3909 static bool is_lbr_from(unsigned long msr)
3910 {
3911 unsigned long lbr_from_nr = x86_pmu.lbr_from + x86_pmu.lbr_nr;
3912
3913 return x86_pmu.lbr_from <= msr && msr < lbr_from_nr;
3914 }
3915
3916 /*
3917 * Under certain circumstances, access certain MSR may cause #GP.
3918 * The function tests if the input MSR can be safely accessed.
3919 */
3920 static bool check_msr(unsigned long msr, u64 mask)
3921 {
3922 u64 val_old, val_new, val_tmp;
3923
3924 /*
3925 * Read the current value, change it and read it back to see if it
3926 * matches, this is needed to detect certain hardware emulators
3927 * (qemu/kvm) that don't trap on the MSR access and always return 0s.
3928 */
3929 if (rdmsrl_safe(msr, &val_old))
3930 return false;
3931
3932 /*
3933 * Only change the bits which can be updated by wrmsrl.
3934 */
3935 val_tmp = val_old ^ mask;
3936
3937 if (is_lbr_from(msr))
3938 val_tmp = lbr_from_signext_quirk_wr(val_tmp);
3939
3940 if (wrmsrl_safe(msr, val_tmp) ||
3941 rdmsrl_safe(msr, &val_new))
3942 return false;
3943
3944 /*
3945 * Quirk only affects validation in wrmsr(), so wrmsrl()'s value
3946 * should equal rdmsrl()'s even with the quirk.
3947 */
3948 if (val_new != val_tmp)
3949 return false;
3950
3951 if (is_lbr_from(msr))
3952 val_old = lbr_from_signext_quirk_wr(val_old);
3953
3954 /* Here it's sure that the MSR can be safely accessed.
3955 * Restore the old value and return.
3956 */
3957 wrmsrl(msr, val_old);
3958
3959 return true;
3960 }
3961
3962 static __init void intel_sandybridge_quirk(void)
3963 {
3964 x86_pmu.check_microcode = intel_snb_check_microcode;
3965 cpus_read_lock();
3966 intel_snb_check_microcode();
3967 cpus_read_unlock();
3968 }
3969
3970 static const struct { int id; char *name; } intel_arch_events_map[] __initconst = {
3971 { PERF_COUNT_HW_CPU_CYCLES, "cpu cycles" },
3972 { PERF_COUNT_HW_INSTRUCTIONS, "instructions" },
3973 { PERF_COUNT_HW_BUS_CYCLES, "bus cycles" },
3974 { PERF_COUNT_HW_CACHE_REFERENCES, "cache references" },
3975 { PERF_COUNT_HW_CACHE_MISSES, "cache misses" },
3976 { PERF_COUNT_HW_BRANCH_INSTRUCTIONS, "branch instructions" },
3977 { PERF_COUNT_HW_BRANCH_MISSES, "branch misses" },
3978 };
3979
3980 static __init void intel_arch_events_quirk(void)
3981 {
3982 int bit;
3983
3984 /* disable event that reported as not presend by cpuid */
3985 for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(intel_arch_events_map)) {
3986 intel_perfmon_event_map[intel_arch_events_map[bit].id] = 0;
3987 pr_warn("CPUID marked event: \'%s\' unavailable\n",
3988 intel_arch_events_map[bit].name);
3989 }
3990 }
3991
3992 static __init void intel_nehalem_quirk(void)
3993 {
3994 union cpuid10_ebx ebx;
3995
3996 ebx.full = x86_pmu.events_maskl;
3997 if (ebx.split.no_branch_misses_retired) {
3998 /*
3999 * Erratum AAJ80 detected, we work it around by using
4000 * the BR_MISP_EXEC.ANY event. This will over-count
4001 * branch-misses, but it's still much better than the
4002 * architectural event which is often completely bogus:
4003 */
4004 intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89;
4005 ebx.split.no_branch_misses_retired = 0;
4006 x86_pmu.events_maskl = ebx.full;
4007 pr_info("CPU erratum AAJ80 worked around\n");
4008 }
4009 }
4010
4011 static const struct x86_cpu_desc counter_freezing_ucodes[] = {
4012 INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT, 2, 0x0000000e),
4013 INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT, 9, 0x0000002e),
4014 INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT, 10, 0x00000008),
4015 INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT_X, 1, 0x00000028),
4016 INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT_PLUS, 1, 0x00000028),
4017 INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT_PLUS, 8, 0x00000006),
4018 {}
4019 };
4020
4021 static bool intel_counter_freezing_broken(void)
4022 {
4023 return !x86_cpu_has_min_microcode_rev(counter_freezing_ucodes);
4024 }
4025
4026 static __init void intel_counter_freezing_quirk(void)
4027 {
4028 /* Check if it's already disabled */
4029 if (disable_counter_freezing)
4030 return;
4031
4032 /*
4033 * If the system starts with the wrong ucode, leave the
4034 * counter-freezing feature permanently disabled.
4035 */
4036 if (intel_counter_freezing_broken()) {
4037 pr_info("PMU counter freezing disabled due to CPU errata,"
4038 "please upgrade microcode\n");
4039 x86_pmu.counter_freezing = false;
4040 x86_pmu.handle_irq = intel_pmu_handle_irq;
4041 }
4042 }
4043
4044 /*
4045 * enable software workaround for errata:
4046 * SNB: BJ122
4047 * IVB: BV98
4048 * HSW: HSD29
4049 *
4050 * Only needed when HT is enabled. However detecting
4051 * if HT is enabled is difficult (model specific). So instead,
4052 * we enable the workaround in the early boot, and verify if
4053 * it is needed in a later initcall phase once we have valid
4054 * topology information to check if HT is actually enabled
4055 */
4056 static __init void intel_ht_bug(void)
4057 {
4058 x86_pmu.flags |= PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED;
4059
4060 x86_pmu.start_scheduling = intel_start_scheduling;
4061 x86_pmu.commit_scheduling = intel_commit_scheduling;
4062 x86_pmu.stop_scheduling = intel_stop_scheduling;
4063 }
4064
4065 EVENT_ATTR_STR(mem-loads, mem_ld_hsw, "event=0xcd,umask=0x1,ldlat=3");
4066 EVENT_ATTR_STR(mem-stores, mem_st_hsw, "event=0xd0,umask=0x82")
4067
4068 /* Haswell special events */
4069 EVENT_ATTR_STR(tx-start, tx_start, "event=0xc9,umask=0x1");
4070 EVENT_ATTR_STR(tx-commit, tx_commit, "event=0xc9,umask=0x2");
4071 EVENT_ATTR_STR(tx-abort, tx_abort, "event=0xc9,umask=0x4");
4072 EVENT_ATTR_STR(tx-capacity, tx_capacity, "event=0x54,umask=0x2");
4073 EVENT_ATTR_STR(tx-conflict, tx_conflict, "event=0x54,umask=0x1");
4074 EVENT_ATTR_STR(el-start, el_start, "event=0xc8,umask=0x1");
4075 EVENT_ATTR_STR(el-commit, el_commit, "event=0xc8,umask=0x2");
4076 EVENT_ATTR_STR(el-abort, el_abort, "event=0xc8,umask=0x4");
4077 EVENT_ATTR_STR(el-capacity, el_capacity, "event=0x54,umask=0x2");
4078 EVENT_ATTR_STR(el-conflict, el_conflict, "event=0x54,umask=0x1");
4079 EVENT_ATTR_STR(cycles-t, cycles_t, "event=0x3c,in_tx=1");
4080 EVENT_ATTR_STR(cycles-ct, cycles_ct, "event=0x3c,in_tx=1,in_tx_cp=1");
4081
4082 static struct attribute *hsw_events_attrs[] = {
4083 EVENT_PTR(td_slots_issued),
4084 EVENT_PTR(td_slots_retired),
4085 EVENT_PTR(td_fetch_bubbles),
4086 EVENT_PTR(td_total_slots),
4087 EVENT_PTR(td_total_slots_scale),
4088 EVENT_PTR(td_recovery_bubbles),
4089 EVENT_PTR(td_recovery_bubbles_scale),
4090 NULL
4091 };
4092
4093 static struct attribute *hsw_mem_events_attrs[] = {
4094 EVENT_PTR(mem_ld_hsw),
4095 EVENT_PTR(mem_st_hsw),
4096 NULL,
4097 };
4098
4099 static struct attribute *hsw_tsx_events_attrs[] = {
4100 EVENT_PTR(tx_start),
4101 EVENT_PTR(tx_commit),
4102 EVENT_PTR(tx_abort),
4103 EVENT_PTR(tx_capacity),
4104 EVENT_PTR(tx_conflict),
4105 EVENT_PTR(el_start),
4106 EVENT_PTR(el_commit),
4107 EVENT_PTR(el_abort),
4108 EVENT_PTR(el_capacity),
4109 EVENT_PTR(el_conflict),
4110 EVENT_PTR(cycles_t),
4111 EVENT_PTR(cycles_ct),
4112 NULL
4113 };
4114
4115 static ssize_t freeze_on_smi_show(struct device *cdev,
4116 struct device_attribute *attr,
4117 char *buf)
4118 {
4119 return sprintf(buf, "%lu\n", x86_pmu.attr_freeze_on_smi);
4120 }
4121
4122 static DEFINE_MUTEX(freeze_on_smi_mutex);
4123
4124 static ssize_t freeze_on_smi_store(struct device *cdev,
4125 struct device_attribute *attr,
4126 const char *buf, size_t count)
4127 {
4128 unsigned long val;
4129 ssize_t ret;
4130
4131 ret = kstrtoul(buf, 0, &val);
4132 if (ret)
4133 return ret;
4134
4135 if (val > 1)
4136 return -EINVAL;
4137
4138 mutex_lock(&freeze_on_smi_mutex);
4139
4140 if (x86_pmu.attr_freeze_on_smi == val)
4141 goto done;
4142
4143 x86_pmu.attr_freeze_on_smi = val;
4144
4145 get_online_cpus();
4146 on_each_cpu(flip_smm_bit, &val, 1);
4147 put_online_cpus();
4148 done:
4149 mutex_unlock(&freeze_on_smi_mutex);
4150
4151 return count;
4152 }
4153
4154 static DEVICE_ATTR_RW(freeze_on_smi);
4155
4156 static ssize_t branches_show(struct device *cdev,
4157 struct device_attribute *attr,
4158 char *buf)
4159 {
4160 return snprintf(buf, PAGE_SIZE, "%d\n", x86_pmu.lbr_nr);
4161 }
4162
4163 static DEVICE_ATTR_RO(branches);
4164
4165 static struct attribute *lbr_attrs[] = {
4166 &dev_attr_branches.attr,
4167 NULL
4168 };
4169
4170 static char pmu_name_str[30];
4171
4172 static ssize_t pmu_name_show(struct device *cdev,
4173 struct device_attribute *attr,
4174 char *buf)
4175 {
4176 return snprintf(buf, PAGE_SIZE, "%s\n", pmu_name_str);
4177 }
4178
4179 static DEVICE_ATTR_RO(pmu_name);
4180
4181 static struct attribute *intel_pmu_caps_attrs[] = {
4182 &dev_attr_pmu_name.attr,
4183 NULL
4184 };
4185
4186 static DEVICE_BOOL_ATTR(allow_tsx_force_abort, 0644, allow_tsx_force_abort);
4187
4188 static struct attribute *intel_pmu_attrs[] = {
4189 &dev_attr_freeze_on_smi.attr,
4190 NULL, /* &dev_attr_allow_tsx_force_abort.attr.attr */
4191 NULL,
4192 };
4193
4194 static __init struct attribute **
4195 get_events_attrs(struct attribute **base,
4196 struct attribute **mem,
4197 struct attribute **tsx)
4198 {
4199 struct attribute **attrs = base;
4200 struct attribute **old;
4201
4202 if (mem && x86_pmu.pebs)
4203 attrs = merge_attr(attrs, mem);
4204
4205 if (tsx && boot_cpu_has(X86_FEATURE_RTM)) {
4206 old = attrs;
4207 attrs = merge_attr(attrs, tsx);
4208 if (old != base)
4209 kfree(old);
4210 }
4211
4212 return attrs;
4213 }
4214
4215 __init int intel_pmu_init(void)
4216 {
4217 struct attribute **extra_attr = NULL;
4218 struct attribute **mem_attr = NULL;
4219 struct attribute **tsx_attr = NULL;
4220 struct attribute **to_free = NULL;
4221 union cpuid10_edx edx;
4222 union cpuid10_eax eax;
4223 union cpuid10_ebx ebx;
4224 struct event_constraint *c;
4225 unsigned int unused;
4226 struct extra_reg *er;
4227 int version, i;
4228 char *name;
4229
4230 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
4231 switch (boot_cpu_data.x86) {
4232 case 0x6:
4233 return p6_pmu_init();
4234 case 0xb:
4235 return knc_pmu_init();
4236 case 0xf:
4237 return p4_pmu_init();
4238 }
4239 return -ENODEV;
4240 }
4241
4242 /*
4243 * Check whether the Architectural PerfMon supports
4244 * Branch Misses Retired hw_event or not.
4245 */
4246 cpuid(10, &eax.full, &ebx.full, &unused, &edx.full);
4247 if (eax.split.mask_length < ARCH_PERFMON_EVENTS_COUNT)
4248 return -ENODEV;
4249
4250 version = eax.split.version_id;
4251 if (version < 2)
4252 x86_pmu = core_pmu;
4253 else
4254 x86_pmu = intel_pmu;
4255
4256 x86_pmu.version = version;
4257 x86_pmu.num_counters = eax.split.num_counters;
4258 x86_pmu.cntval_bits = eax.split.bit_width;
4259 x86_pmu.cntval_mask = (1ULL << eax.split.bit_width) - 1;
4260
4261 x86_pmu.events_maskl = ebx.full;
4262 x86_pmu.events_mask_len = eax.split.mask_length;
4263
4264 x86_pmu.max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, x86_pmu.num_counters);
4265
4266 /*
4267 * Quirk: v2 perfmon does not report fixed-purpose events, so
4268 * assume at least 3 events, when not running in a hypervisor:
4269 */
4270 if (version > 1) {
4271 int assume = 3 * !boot_cpu_has(X86_FEATURE_HYPERVISOR);
4272
4273 x86_pmu.num_counters_fixed =
4274 max((int)edx.split.num_counters_fixed, assume);
4275 }
4276
4277 if (version >= 4)
4278 x86_pmu.counter_freezing = !disable_counter_freezing;
4279
4280 if (boot_cpu_has(X86_FEATURE_PDCM)) {
4281 u64 capabilities;
4282
4283 rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
4284 x86_pmu.intel_cap.capabilities = capabilities;
4285 }
4286
4287 intel_ds_init();
4288
4289 x86_add_quirk(intel_arch_events_quirk); /* Install first, so it runs last */
4290
4291 /*
4292 * Install the hw-cache-events table:
4293 */
4294 switch (boot_cpu_data.x86_model) {
4295 case INTEL_FAM6_CORE_YONAH:
4296 pr_cont("Core events, ");
4297 name = "core";
4298 break;
4299
4300 case INTEL_FAM6_CORE2_MEROM:
4301 x86_add_quirk(intel_clovertown_quirk);
4302 /* fall through */
4303
4304 case INTEL_FAM6_CORE2_MEROM_L:
4305 case INTEL_FAM6_CORE2_PENRYN:
4306 case INTEL_FAM6_CORE2_DUNNINGTON:
4307 memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
4308 sizeof(hw_cache_event_ids));
4309
4310 intel_pmu_lbr_init_core();
4311
4312 x86_pmu.event_constraints = intel_core2_event_constraints;
4313 x86_pmu.pebs_constraints = intel_core2_pebs_event_constraints;
4314 pr_cont("Core2 events, ");
4315 name = "core2";
4316 break;
4317
4318 case INTEL_FAM6_NEHALEM:
4319 case INTEL_FAM6_NEHALEM_EP:
4320 case INTEL_FAM6_NEHALEM_EX:
4321 memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
4322 sizeof(hw_cache_event_ids));
4323 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
4324 sizeof(hw_cache_extra_regs));
4325
4326 intel_pmu_lbr_init_nhm();
4327
4328 x86_pmu.event_constraints = intel_nehalem_event_constraints;
4329 x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints;
4330 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
4331 x86_pmu.extra_regs = intel_nehalem_extra_regs;
4332
4333 mem_attr = nhm_mem_events_attrs;
4334
4335 /* UOPS_ISSUED.STALLED_CYCLES */
4336 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
4337 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
4338 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
4339 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
4340 X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
4341
4342 intel_pmu_pebs_data_source_nhm();
4343 x86_add_quirk(intel_nehalem_quirk);
4344 x86_pmu.pebs_no_tlb = 1;
4345 extra_attr = nhm_format_attr;
4346
4347 pr_cont("Nehalem events, ");
4348 name = "nehalem";
4349 break;
4350
4351 case INTEL_FAM6_ATOM_BONNELL:
4352 case INTEL_FAM6_ATOM_BONNELL_MID:
4353 case INTEL_FAM6_ATOM_SALTWELL:
4354 case INTEL_FAM6_ATOM_SALTWELL_MID:
4355 case INTEL_FAM6_ATOM_SALTWELL_TABLET:
4356 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
4357 sizeof(hw_cache_event_ids));
4358
4359 intel_pmu_lbr_init_atom();
4360
4361 x86_pmu.event_constraints = intel_gen_event_constraints;
4362 x86_pmu.pebs_constraints = intel_atom_pebs_event_constraints;
4363 x86_pmu.pebs_aliases = intel_pebs_aliases_core2;
4364 pr_cont("Atom events, ");
4365 name = "bonnell";
4366 break;
4367
4368 case INTEL_FAM6_ATOM_SILVERMONT:
4369 case INTEL_FAM6_ATOM_SILVERMONT_X:
4370 case INTEL_FAM6_ATOM_SILVERMONT_MID:
4371 case INTEL_FAM6_ATOM_AIRMONT:
4372 case INTEL_FAM6_ATOM_AIRMONT_MID:
4373 memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
4374 sizeof(hw_cache_event_ids));
4375 memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs,
4376 sizeof(hw_cache_extra_regs));
4377
4378 intel_pmu_lbr_init_slm();
4379
4380 x86_pmu.event_constraints = intel_slm_event_constraints;
4381 x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints;
4382 x86_pmu.extra_regs = intel_slm_extra_regs;
4383 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4384 x86_pmu.cpu_events = slm_events_attrs;
4385 extra_attr = slm_format_attr;
4386 pr_cont("Silvermont events, ");
4387 name = "silvermont";
4388 break;
4389
4390 case INTEL_FAM6_ATOM_GOLDMONT:
4391 case INTEL_FAM6_ATOM_GOLDMONT_X:
4392 x86_add_quirk(intel_counter_freezing_quirk);
4393 memcpy(hw_cache_event_ids, glm_hw_cache_event_ids,
4394 sizeof(hw_cache_event_ids));
4395 memcpy(hw_cache_extra_regs, glm_hw_cache_extra_regs,
4396 sizeof(hw_cache_extra_regs));
4397
4398 intel_pmu_lbr_init_skl();
4399
4400 x86_pmu.event_constraints = intel_slm_event_constraints;
4401 x86_pmu.pebs_constraints = intel_glm_pebs_event_constraints;
4402 x86_pmu.extra_regs = intel_glm_extra_regs;
4403 /*
4404 * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS
4405 * for precise cycles.
4406 * :pp is identical to :ppp
4407 */
4408 x86_pmu.pebs_aliases = NULL;
4409 x86_pmu.pebs_prec_dist = true;
4410 x86_pmu.lbr_pt_coexist = true;
4411 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4412 x86_pmu.cpu_events = glm_events_attrs;
4413 extra_attr = slm_format_attr;
4414 pr_cont("Goldmont events, ");
4415 name = "goldmont";
4416 break;
4417
4418 case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
4419 x86_add_quirk(intel_counter_freezing_quirk);
4420 memcpy(hw_cache_event_ids, glp_hw_cache_event_ids,
4421 sizeof(hw_cache_event_ids));
4422 memcpy(hw_cache_extra_regs, glp_hw_cache_extra_regs,
4423 sizeof(hw_cache_extra_regs));
4424
4425 intel_pmu_lbr_init_skl();
4426
4427 x86_pmu.event_constraints = intel_slm_event_constraints;
4428 x86_pmu.extra_regs = intel_glm_extra_regs;
4429 /*
4430 * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS
4431 * for precise cycles.
4432 */
4433 x86_pmu.pebs_aliases = NULL;
4434 x86_pmu.pebs_prec_dist = true;
4435 x86_pmu.lbr_pt_coexist = true;
4436 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4437 x86_pmu.flags |= PMU_FL_PEBS_ALL;
4438 x86_pmu.get_event_constraints = glp_get_event_constraints;
4439 x86_pmu.cpu_events = glm_events_attrs;
4440 /* Goldmont Plus has 4-wide pipeline */
4441 event_attr_td_total_slots_scale_glm.event_str = "4";
4442 extra_attr = slm_format_attr;
4443 pr_cont("Goldmont plus events, ");
4444 name = "goldmont_plus";
4445 break;
4446
4447 case INTEL_FAM6_WESTMERE:
4448 case INTEL_FAM6_WESTMERE_EP:
4449 case INTEL_FAM6_WESTMERE_EX:
4450 memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
4451 sizeof(hw_cache_event_ids));
4452 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
4453 sizeof(hw_cache_extra_regs));
4454
4455 intel_pmu_lbr_init_nhm();
4456
4457 x86_pmu.event_constraints = intel_westmere_event_constraints;
4458 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
4459 x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints;
4460 x86_pmu.extra_regs = intel_westmere_extra_regs;
4461 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4462
4463 mem_attr = nhm_mem_events_attrs;
4464
4465 /* UOPS_ISSUED.STALLED_CYCLES */
4466 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
4467 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
4468 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
4469 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
4470 X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
4471
4472 intel_pmu_pebs_data_source_nhm();
4473 extra_attr = nhm_format_attr;
4474 pr_cont("Westmere events, ");
4475 name = "westmere";
4476 break;
4477
4478 case INTEL_FAM6_SANDYBRIDGE:
4479 case INTEL_FAM6_SANDYBRIDGE_X:
4480 x86_add_quirk(intel_sandybridge_quirk);
4481 x86_add_quirk(intel_ht_bug);
4482 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
4483 sizeof(hw_cache_event_ids));
4484 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
4485 sizeof(hw_cache_extra_regs));
4486
4487 intel_pmu_lbr_init_snb();
4488
4489 x86_pmu.event_constraints = intel_snb_event_constraints;
4490 x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
4491 x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
4492 if (boot_cpu_data.x86_model == INTEL_FAM6_SANDYBRIDGE_X)
4493 x86_pmu.extra_regs = intel_snbep_extra_regs;
4494 else
4495 x86_pmu.extra_regs = intel_snb_extra_regs;
4496
4497
4498 /* all extra regs are per-cpu when HT is on */
4499 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4500 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
4501
4502 x86_pmu.cpu_events = snb_events_attrs;
4503 mem_attr = snb_mem_events_attrs;
4504
4505 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
4506 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
4507 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
4508 /* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/
4509 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
4510 X86_CONFIG(.event=0xb1, .umask=0x01, .inv=1, .cmask=1);
4511
4512 extra_attr = nhm_format_attr;
4513
4514 pr_cont("SandyBridge events, ");
4515 name = "sandybridge";
4516 break;
4517
4518 case INTEL_FAM6_IVYBRIDGE:
4519 case INTEL_FAM6_IVYBRIDGE_X:
4520 x86_add_quirk(intel_ht_bug);
4521 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
4522 sizeof(hw_cache_event_ids));
4523 /* dTLB-load-misses on IVB is different than SNB */
4524 hw_cache_event_ids[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = 0x8108; /* DTLB_LOAD_MISSES.DEMAND_LD_MISS_CAUSES_A_WALK */
4525
4526 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
4527 sizeof(hw_cache_extra_regs));
4528
4529 intel_pmu_lbr_init_snb();
4530
4531 x86_pmu.event_constraints = intel_ivb_event_constraints;
4532 x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints;
4533 x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
4534 x86_pmu.pebs_prec_dist = true;
4535 if (boot_cpu_data.x86_model == INTEL_FAM6_IVYBRIDGE_X)
4536 x86_pmu.extra_regs = intel_snbep_extra_regs;
4537 else
4538 x86_pmu.extra_regs = intel_snb_extra_regs;
4539 /* all extra regs are per-cpu when HT is on */
4540 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4541 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
4542
4543 x86_pmu.cpu_events = snb_events_attrs;
4544 mem_attr = snb_mem_events_attrs;
4545
4546 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
4547 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
4548 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
4549
4550 extra_attr = nhm_format_attr;
4551
4552 pr_cont("IvyBridge events, ");
4553 name = "ivybridge";
4554 break;
4555
4556
4557 case INTEL_FAM6_HASWELL_CORE:
4558 case INTEL_FAM6_HASWELL_X:
4559 case INTEL_FAM6_HASWELL_ULT:
4560 case INTEL_FAM6_HASWELL_GT3E:
4561 x86_add_quirk(intel_ht_bug);
4562 x86_add_quirk(intel_pebs_isolation_quirk);
4563 x86_pmu.late_ack = true;
4564 memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
4565 memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
4566
4567 intel_pmu_lbr_init_hsw();
4568
4569 x86_pmu.event_constraints = intel_hsw_event_constraints;
4570 x86_pmu.pebs_constraints = intel_hsw_pebs_event_constraints;
4571 x86_pmu.extra_regs = intel_snbep_extra_regs;
4572 x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
4573 x86_pmu.pebs_prec_dist = true;
4574 /* all extra regs are per-cpu when HT is on */
4575 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4576 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
4577
4578 x86_pmu.hw_config = hsw_hw_config;
4579 x86_pmu.get_event_constraints = hsw_get_event_constraints;
4580 x86_pmu.cpu_events = hsw_events_attrs;
4581 x86_pmu.lbr_double_abort = true;
4582 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
4583 hsw_format_attr : nhm_format_attr;
4584 mem_attr = hsw_mem_events_attrs;
4585 tsx_attr = hsw_tsx_events_attrs;
4586 pr_cont("Haswell events, ");
4587 name = "haswell";
4588 break;
4589
4590 case INTEL_FAM6_BROADWELL_CORE:
4591 case INTEL_FAM6_BROADWELL_XEON_D:
4592 case INTEL_FAM6_BROADWELL_GT3E:
4593 case INTEL_FAM6_BROADWELL_X:
4594 x86_add_quirk(intel_pebs_isolation_quirk);
4595 x86_pmu.late_ack = true;
4596 memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
4597 memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
4598
4599 /* L3_MISS_LOCAL_DRAM is BIT(26) in Broadwell */
4600 hw_cache_extra_regs[C(LL)][C(OP_READ)][C(RESULT_MISS)] = HSW_DEMAND_READ |
4601 BDW_L3_MISS|HSW_SNOOP_DRAM;
4602 hw_cache_extra_regs[C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = HSW_DEMAND_WRITE|BDW_L3_MISS|
4603 HSW_SNOOP_DRAM;
4604 hw_cache_extra_regs[C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = HSW_DEMAND_READ|
4605 BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM;
4606 hw_cache_extra_regs[C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = HSW_DEMAND_WRITE|
4607 BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM;
4608
4609 intel_pmu_lbr_init_hsw();
4610
4611 x86_pmu.event_constraints = intel_bdw_event_constraints;
4612 x86_pmu.pebs_constraints = intel_bdw_pebs_event_constraints;
4613 x86_pmu.extra_regs = intel_snbep_extra_regs;
4614 x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
4615 x86_pmu.pebs_prec_dist = true;
4616 /* all extra regs are per-cpu when HT is on */
4617 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4618 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
4619
4620 x86_pmu.hw_config = hsw_hw_config;
4621 x86_pmu.get_event_constraints = hsw_get_event_constraints;
4622 x86_pmu.cpu_events = hsw_events_attrs;
4623 x86_pmu.limit_period = bdw_limit_period;
4624 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
4625 hsw_format_attr : nhm_format_attr;
4626 mem_attr = hsw_mem_events_attrs;
4627 tsx_attr = hsw_tsx_events_attrs;
4628 pr_cont("Broadwell events, ");
4629 name = "broadwell";
4630 break;
4631
4632 case INTEL_FAM6_XEON_PHI_KNL:
4633 case INTEL_FAM6_XEON_PHI_KNM:
4634 memcpy(hw_cache_event_ids,
4635 slm_hw_cache_event_ids, sizeof(hw_cache_event_ids));
4636 memcpy(hw_cache_extra_regs,
4637 knl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
4638 intel_pmu_lbr_init_knl();
4639
4640 x86_pmu.event_constraints = intel_slm_event_constraints;
4641 x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints;
4642 x86_pmu.extra_regs = intel_knl_extra_regs;
4643
4644 /* all extra regs are per-cpu when HT is on */
4645 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4646 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
4647 extra_attr = slm_format_attr;
4648 pr_cont("Knights Landing/Mill events, ");
4649 name = "knights-landing";
4650 break;
4651
4652 case INTEL_FAM6_SKYLAKE_MOBILE:
4653 case INTEL_FAM6_SKYLAKE_DESKTOP:
4654 case INTEL_FAM6_SKYLAKE_X:
4655 case INTEL_FAM6_KABYLAKE_MOBILE:
4656 case INTEL_FAM6_KABYLAKE_DESKTOP:
4657 x86_add_quirk(intel_pebs_isolation_quirk);
4658 x86_pmu.late_ack = true;
4659 memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids));
4660 memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
4661 intel_pmu_lbr_init_skl();
4662
4663 /* INT_MISC.RECOVERY_CYCLES has umask 1 in Skylake */
4664 event_attr_td_recovery_bubbles.event_str_noht =
4665 "event=0xd,umask=0x1,cmask=1";
4666 event_attr_td_recovery_bubbles.event_str_ht =
4667 "event=0xd,umask=0x1,cmask=1,any=1";
4668
4669 x86_pmu.event_constraints = intel_skl_event_constraints;
4670 x86_pmu.pebs_constraints = intel_skl_pebs_event_constraints;
4671 x86_pmu.extra_regs = intel_skl_extra_regs;
4672 x86_pmu.pebs_aliases = intel_pebs_aliases_skl;
4673 x86_pmu.pebs_prec_dist = true;
4674 /* all extra regs are per-cpu when HT is on */
4675 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4676 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
4677
4678 x86_pmu.hw_config = hsw_hw_config;
4679 x86_pmu.get_event_constraints = hsw_get_event_constraints;
4680 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
4681 hsw_format_attr : nhm_format_attr;
4682 extra_attr = merge_attr(extra_attr, skl_format_attr);
4683 to_free = extra_attr;
4684 x86_pmu.cpu_events = hsw_events_attrs;
4685 mem_attr = hsw_mem_events_attrs;
4686 tsx_attr = hsw_tsx_events_attrs;
4687 intel_pmu_pebs_data_source_skl(
4688 boot_cpu_data.x86_model == INTEL_FAM6_SKYLAKE_X);
4689
4690 if (boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT)) {
4691 x86_pmu.flags |= PMU_FL_TFA;
4692 x86_pmu.get_event_constraints = tfa_get_event_constraints;
4693 x86_pmu.enable_all = intel_tfa_pmu_enable_all;
4694 x86_pmu.commit_scheduling = intel_tfa_commit_scheduling;
4695 intel_pmu_attrs[1] = &dev_attr_allow_tsx_force_abort.attr.attr;
4696 }
4697
4698 pr_cont("Skylake events, ");
4699 name = "skylake";
4700 break;
4701
4702 default:
4703 switch (x86_pmu.version) {
4704 case 1:
4705 x86_pmu.event_constraints = intel_v1_event_constraints;
4706 pr_cont("generic architected perfmon v1, ");
4707 name = "generic_arch_v1";
4708 break;
4709 default:
4710 /*
4711 * default constraints for v2 and up
4712 */
4713 x86_pmu.event_constraints = intel_gen_event_constraints;
4714 pr_cont("generic architected perfmon, ");
4715 name = "generic_arch_v2+";
4716 break;
4717 }
4718 }
4719
4720 snprintf(pmu_name_str, sizeof(pmu_name_str), "%s", name);
4721
4722 if (version >= 2 && extra_attr) {
4723 x86_pmu.format_attrs = merge_attr(intel_arch3_formats_attr,
4724 extra_attr);
4725 WARN_ON(!x86_pmu.format_attrs);
4726 }
4727
4728 x86_pmu.cpu_events = get_events_attrs(x86_pmu.cpu_events,
4729 mem_attr, tsx_attr);
4730
4731 if (x86_pmu.num_counters > INTEL_PMC_MAX_GENERIC) {
4732 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
4733 x86_pmu.num_counters, INTEL_PMC_MAX_GENERIC);
4734 x86_pmu.num_counters = INTEL_PMC_MAX_GENERIC;
4735 }
4736 x86_pmu.intel_ctrl = (1ULL << x86_pmu.num_counters) - 1;
4737
4738 if (x86_pmu.num_counters_fixed > INTEL_PMC_MAX_FIXED) {
4739 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
4740 x86_pmu.num_counters_fixed, INTEL_PMC_MAX_FIXED);
4741 x86_pmu.num_counters_fixed = INTEL_PMC_MAX_FIXED;
4742 }
4743
4744 x86_pmu.intel_ctrl |=
4745 ((1LL << x86_pmu.num_counters_fixed)-1) << INTEL_PMC_IDX_FIXED;
4746
4747 if (x86_pmu.event_constraints) {
4748 /*
4749 * event on fixed counter2 (REF_CYCLES) only works on this
4750 * counter, so do not extend mask to generic counters
4751 */
4752 for_each_event_constraint(c, x86_pmu.event_constraints) {
4753 if (c->cmask == FIXED_EVENT_FLAGS
4754 && c->idxmsk64 != INTEL_PMC_MSK_FIXED_REF_CYCLES) {
4755 c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
4756 }
4757 c->idxmsk64 &=
4758 ~(~0ULL << (INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed));
4759 c->weight = hweight64(c->idxmsk64);
4760 }
4761 }
4762
4763 /*
4764 * Access LBR MSR may cause #GP under certain circumstances.
4765 * E.g. KVM doesn't support LBR MSR
4766 * Check all LBT MSR here.
4767 * Disable LBR access if any LBR MSRs can not be accessed.
4768 */
4769 if (x86_pmu.lbr_nr && !check_msr(x86_pmu.lbr_tos, 0x3UL))
4770 x86_pmu.lbr_nr = 0;
4771 for (i = 0; i < x86_pmu.lbr_nr; i++) {
4772 if (!(check_msr(x86_pmu.lbr_from + i, 0xffffUL) &&
4773 check_msr(x86_pmu.lbr_to + i, 0xffffUL)))
4774 x86_pmu.lbr_nr = 0;
4775 }
4776
4777 x86_pmu.caps_attrs = intel_pmu_caps_attrs;
4778
4779 if (x86_pmu.lbr_nr) {
4780 x86_pmu.caps_attrs = merge_attr(x86_pmu.caps_attrs, lbr_attrs);
4781 pr_cont("%d-deep LBR, ", x86_pmu.lbr_nr);
4782 }
4783
4784 /*
4785 * Access extra MSR may cause #GP under certain circumstances.
4786 * E.g. KVM doesn't support offcore event
4787 * Check all extra_regs here.
4788 */
4789 if (x86_pmu.extra_regs) {
4790 for (er = x86_pmu.extra_regs; er->msr; er++) {
4791 er->extra_msr_access = check_msr(er->msr, 0x11UL);
4792 /* Disable LBR select mapping */
4793 if ((er->idx == EXTRA_REG_LBR) && !er->extra_msr_access)
4794 x86_pmu.lbr_sel_map = NULL;
4795 }
4796 }
4797
4798 /* Support full width counters using alternative MSR range */
4799 if (x86_pmu.intel_cap.full_width_write) {
4800 x86_pmu.max_period = x86_pmu.cntval_mask >> 1;
4801 x86_pmu.perfctr = MSR_IA32_PMC0;
4802 pr_cont("full-width counters, ");
4803 }
4804
4805 /*
4806 * For arch perfmon 4 use counter freezing to avoid
4807 * several MSR accesses in the PMI.
4808 */
4809 if (x86_pmu.counter_freezing)
4810 x86_pmu.handle_irq = intel_pmu_handle_irq_v4;
4811
4812 kfree(to_free);
4813 return 0;
4814 }
4815
4816 /*
4817 * HT bug: phase 2 init
4818 * Called once we have valid topology information to check
4819 * whether or not HT is enabled
4820 * If HT is off, then we disable the workaround
4821 */
4822 static __init int fixup_ht_bug(void)
4823 {
4824 int c;
4825 /*
4826 * problem not present on this CPU model, nothing to do
4827 */
4828 if (!(x86_pmu.flags & PMU_FL_EXCL_ENABLED))
4829 return 0;
4830
4831 if (topology_max_smt_threads() > 1) {
4832 pr_info("PMU erratum BJ122, BV98, HSD29 worked around, HT is on\n");
4833 return 0;
4834 }
4835
4836 cpus_read_lock();
4837
4838 hardlockup_detector_perf_stop();
4839
4840 x86_pmu.flags &= ~(PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED);
4841
4842 x86_pmu.start_scheduling = NULL;
4843 x86_pmu.commit_scheduling = NULL;
4844 x86_pmu.stop_scheduling = NULL;
4845
4846 hardlockup_detector_perf_restart();
4847
4848 for_each_online_cpu(c)
4849 free_excl_cntrs(&per_cpu(cpu_hw_events, c));
4850
4851 cpus_read_unlock();
4852 pr_info("PMU erratum BJ122, BV98, HSD29 workaround disabled, HT off\n");
4853 return 0;
4854 }
4855 subsys_initcall(fixup_ht_bug)