]> git.ipfire.org Git - people/arne_f/kernel.git/blob - arch/x86/oprofile/op_model_ppro.c
x86/oprofile/ppro: Do not use __this_cpu*() in preemptible context
[people/arne_f/kernel.git] / arch / x86 / oprofile / op_model_ppro.c
1 /*
2 * @file op_model_ppro.h
3 * Family 6 perfmon and architectural perfmon MSR operations
4 *
5 * @remark Copyright 2002 OProfile authors
6 * @remark Copyright 2008 Intel Corporation
7 * @remark Read the file COPYING
8 *
9 * @author John Levon
10 * @author Philippe Elie
11 * @author Graydon Hoare
12 * @author Andi Kleen
13 * @author Robert Richter <robert.richter@amd.com>
14 */
15
16 #include <linux/oprofile.h>
17 #include <linux/slab.h>
18 #include <asm/ptrace.h>
19 #include <asm/msr.h>
20 #include <asm/apic.h>
21 #include <asm/nmi.h>
22
23 #include "op_x86_model.h"
24 #include "op_counter.h"
25
26 static int num_counters = 2;
27 static int counter_width = 32;
28
29 #define MSR_PPRO_EVENTSEL_RESERVED ((0xFFFFFFFFULL<<32)|(1ULL<<21))
30
31 static u64 reset_value[OP_MAX_COUNTER];
32
33 static void ppro_shutdown(struct op_msrs const * const msrs)
34 {
35 int i;
36
37 for (i = 0; i < num_counters; ++i) {
38 if (!msrs->counters[i].addr)
39 continue;
40 release_perfctr_nmi(MSR_P6_PERFCTR0 + i);
41 release_evntsel_nmi(MSR_P6_EVNTSEL0 + i);
42 }
43 }
44
45 static int ppro_fill_in_addresses(struct op_msrs * const msrs)
46 {
47 int i;
48
49 for (i = 0; i < num_counters; i++) {
50 if (!reserve_perfctr_nmi(MSR_P6_PERFCTR0 + i))
51 goto fail;
52 if (!reserve_evntsel_nmi(MSR_P6_EVNTSEL0 + i)) {
53 release_perfctr_nmi(MSR_P6_PERFCTR0 + i);
54 goto fail;
55 }
56 /* both registers must be reserved */
57 msrs->counters[i].addr = MSR_P6_PERFCTR0 + i;
58 msrs->controls[i].addr = MSR_P6_EVNTSEL0 + i;
59 continue;
60 fail:
61 if (!counter_config[i].enabled)
62 continue;
63 op_x86_warn_reserved(i);
64 ppro_shutdown(msrs);
65 return -EBUSY;
66 }
67
68 return 0;
69 }
70
71
72 static void ppro_setup_ctrs(struct op_x86_model_spec const *model,
73 struct op_msrs const * const msrs)
74 {
75 u64 val;
76 int i;
77
78 if (boot_cpu_has(X86_FEATURE_ARCH_PERFMON)) {
79 union cpuid10_eax eax;
80 eax.full = cpuid_eax(0xa);
81
82 /*
83 * For Core2 (family 6, model 15), don't reset the
84 * counter width:
85 */
86 if (!(eax.split.version_id == 0 &&
87 __this_cpu_read(cpu_info.x86) == 6 &&
88 __this_cpu_read(cpu_info.x86_model) == 15)) {
89
90 if (counter_width < eax.split.bit_width)
91 counter_width = eax.split.bit_width;
92 }
93 }
94
95 /* clear all counters */
96 for (i = 0; i < num_counters; ++i) {
97 if (!msrs->controls[i].addr)
98 continue;
99 rdmsrl(msrs->controls[i].addr, val);
100 if (val & ARCH_PERFMON_EVENTSEL_ENABLE)
101 op_x86_warn_in_use(i);
102 val &= model->reserved;
103 wrmsrl(msrs->controls[i].addr, val);
104 /*
105 * avoid a false detection of ctr overflows in NMI *
106 * handler
107 */
108 wrmsrl(msrs->counters[i].addr, -1LL);
109 }
110
111 /* enable active counters */
112 for (i = 0; i < num_counters; ++i) {
113 if (counter_config[i].enabled && msrs->counters[i].addr) {
114 reset_value[i] = counter_config[i].count;
115 wrmsrl(msrs->counters[i].addr, -reset_value[i]);
116 rdmsrl(msrs->controls[i].addr, val);
117 val &= model->reserved;
118 val |= op_x86_get_ctrl(model, &counter_config[i]);
119 wrmsrl(msrs->controls[i].addr, val);
120 } else {
121 reset_value[i] = 0;
122 }
123 }
124 }
125
126
127 static int ppro_check_ctrs(struct pt_regs * const regs,
128 struct op_msrs const * const msrs)
129 {
130 u64 val;
131 int i;
132
133 for (i = 0; i < num_counters; ++i) {
134 if (!reset_value[i])
135 continue;
136 rdmsrl(msrs->counters[i].addr, val);
137 if (val & (1ULL << (counter_width - 1)))
138 continue;
139 oprofile_add_sample(regs, i);
140 wrmsrl(msrs->counters[i].addr, -reset_value[i]);
141 }
142
143 /* Only P6 based Pentium M need to re-unmask the apic vector but it
144 * doesn't hurt other P6 variant */
145 apic_write(APIC_LVTPC, apic_read(APIC_LVTPC) & ~APIC_LVT_MASKED);
146
147 /* We can't work out if we really handled an interrupt. We
148 * might have caught a *second* counter just after overflowing
149 * the interrupt for this counter then arrives
150 * and we don't find a counter that's overflowed, so we
151 * would return 0 and get dazed + confused. Instead we always
152 * assume we found an overflow. This sucks.
153 */
154 return 1;
155 }
156
157
158 static void ppro_start(struct op_msrs const * const msrs)
159 {
160 u64 val;
161 int i;
162
163 for (i = 0; i < num_counters; ++i) {
164 if (reset_value[i]) {
165 rdmsrl(msrs->controls[i].addr, val);
166 val |= ARCH_PERFMON_EVENTSEL_ENABLE;
167 wrmsrl(msrs->controls[i].addr, val);
168 }
169 }
170 }
171
172
173 static void ppro_stop(struct op_msrs const * const msrs)
174 {
175 u64 val;
176 int i;
177
178 for (i = 0; i < num_counters; ++i) {
179 if (!reset_value[i])
180 continue;
181 rdmsrl(msrs->controls[i].addr, val);
182 val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
183 wrmsrl(msrs->controls[i].addr, val);
184 }
185 }
186
187 struct op_x86_model_spec op_ppro_spec = {
188 .num_counters = 2,
189 .num_controls = 2,
190 .reserved = MSR_PPRO_EVENTSEL_RESERVED,
191 .fill_in_addresses = &ppro_fill_in_addresses,
192 .setup_ctrs = &ppro_setup_ctrs,
193 .check_ctrs = &ppro_check_ctrs,
194 .start = &ppro_start,
195 .stop = &ppro_stop,
196 .shutdown = &ppro_shutdown
197 };
198
199 /*
200 * Architectural performance monitoring.
201 *
202 * Newer Intel CPUs (Core1+) have support for architectural
203 * events described in CPUID 0xA. See the IA32 SDM Vol3b.18 for details.
204 * The advantage of this is that it can be done without knowing about
205 * the specific CPU.
206 */
207
208 static void arch_perfmon_setup_counters(void)
209 {
210 union cpuid10_eax eax;
211
212 eax.full = cpuid_eax(0xa);
213
214 /* Workaround for BIOS bugs in 6/15. Taken from perfmon2 */
215 if (eax.split.version_id == 0 && boot_cpu_data.x86 == 6 &&
216 boot_cpu_data.x86_model == 15) {
217 eax.split.version_id = 2;
218 eax.split.num_counters = 2;
219 eax.split.bit_width = 40;
220 }
221
222 num_counters = min((int)eax.split.num_counters, OP_MAX_COUNTER);
223
224 op_arch_perfmon_spec.num_counters = num_counters;
225 op_arch_perfmon_spec.num_controls = num_counters;
226 }
227
228 static int arch_perfmon_init(struct oprofile_operations *ignore)
229 {
230 arch_perfmon_setup_counters();
231 return 0;
232 }
233
234 struct op_x86_model_spec op_arch_perfmon_spec = {
235 .reserved = MSR_PPRO_EVENTSEL_RESERVED,
236 .init = &arch_perfmon_init,
237 /* num_counters/num_controls filled in at runtime */
238 .fill_in_addresses = &ppro_fill_in_addresses,
239 /* user space does the cpuid check for available events */
240 .setup_ctrs = &ppro_setup_ctrs,
241 .check_ctrs = &ppro_check_ctrs,
242 .start = &ppro_start,
243 .stop = &ppro_stop,
244 .shutdown = &ppro_shutdown
245 };