]>
Commit | Line | Data |
---|---|---|
f81ef4a9 WD |
1 | /* |
2 | * This program is free software; you can redistribute it and/or modify | |
3 | * it under the terms of the GNU General Public License version 2 as | |
4 | * published by the Free Software Foundation. | |
5 | * | |
6 | * This program is distributed in the hope that it will be useful, | |
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
9 | * GNU General Public License for more details. | |
10 | * | |
11 | * You should have received a copy of the GNU General Public License | |
12 | * along with this program; if not, write to the Free Software | |
13 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
14 | * | |
15 | * Copyright (C) 2009, 2010 ARM Limited | |
16 | * | |
17 | * Author: Will Deacon <will.deacon@arm.com> | |
18 | */ | |
19 | ||
20 | /* | |
21 | * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility, | |
22 | * using the CPU's debug registers. | |
23 | */ | |
24 | #define pr_fmt(fmt) "hw-breakpoint: " fmt | |
25 | ||
26 | #include <linux/errno.h> | |
7e202696 | 27 | #include <linux/hardirq.h> |
f81ef4a9 WD |
28 | #include <linux/perf_event.h> |
29 | #include <linux/hw_breakpoint.h> | |
30 | #include <linux/smp.h> | |
31 | ||
32 | #include <asm/cacheflush.h> | |
33 | #include <asm/cputype.h> | |
34 | #include <asm/current.h> | |
35 | #include <asm/hw_breakpoint.h> | |
36 | #include <asm/kdebug.h> | |
f81ef4a9 WD |
37 | #include <asm/traps.h> |
38 | ||
39 | /* Breakpoint currently in use for each BRP. */ | |
40 | static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]); | |
41 | ||
42 | /* Watchpoint currently in use for each WRP. */ | |
43 | static DEFINE_PER_CPU(struct perf_event *, wp_on_reg[ARM_MAX_WRP]); | |
44 | ||
45 | /* Number of BRP/WRP registers on this CPU. */ | |
46 | static int core_num_brps; | |
47 | static int core_num_wrps; | |
48 | ||
49 | /* Debug architecture version. */ | |
50 | static u8 debug_arch; | |
51 | ||
52 | /* Maximum supported watchpoint length. */ | |
53 | static u8 max_watchpoint_len; | |
54 | ||
f81ef4a9 WD |
55 | #define READ_WB_REG_CASE(OP2, M, VAL) \ |
56 | case ((OP2 << 4) + M): \ | |
57 | ARM_DBG_READ(c ## M, OP2, VAL); \ | |
58 | break | |
59 | ||
60 | #define WRITE_WB_REG_CASE(OP2, M, VAL) \ | |
61 | case ((OP2 << 4) + M): \ | |
62 | ARM_DBG_WRITE(c ## M, OP2, VAL);\ | |
63 | break | |
64 | ||
65 | #define GEN_READ_WB_REG_CASES(OP2, VAL) \ | |
66 | READ_WB_REG_CASE(OP2, 0, VAL); \ | |
67 | READ_WB_REG_CASE(OP2, 1, VAL); \ | |
68 | READ_WB_REG_CASE(OP2, 2, VAL); \ | |
69 | READ_WB_REG_CASE(OP2, 3, VAL); \ | |
70 | READ_WB_REG_CASE(OP2, 4, VAL); \ | |
71 | READ_WB_REG_CASE(OP2, 5, VAL); \ | |
72 | READ_WB_REG_CASE(OP2, 6, VAL); \ | |
73 | READ_WB_REG_CASE(OP2, 7, VAL); \ | |
74 | READ_WB_REG_CASE(OP2, 8, VAL); \ | |
75 | READ_WB_REG_CASE(OP2, 9, VAL); \ | |
76 | READ_WB_REG_CASE(OP2, 10, VAL); \ | |
77 | READ_WB_REG_CASE(OP2, 11, VAL); \ | |
78 | READ_WB_REG_CASE(OP2, 12, VAL); \ | |
79 | READ_WB_REG_CASE(OP2, 13, VAL); \ | |
80 | READ_WB_REG_CASE(OP2, 14, VAL); \ | |
81 | READ_WB_REG_CASE(OP2, 15, VAL) | |
82 | ||
83 | #define GEN_WRITE_WB_REG_CASES(OP2, VAL) \ | |
84 | WRITE_WB_REG_CASE(OP2, 0, VAL); \ | |
85 | WRITE_WB_REG_CASE(OP2, 1, VAL); \ | |
86 | WRITE_WB_REG_CASE(OP2, 2, VAL); \ | |
87 | WRITE_WB_REG_CASE(OP2, 3, VAL); \ | |
88 | WRITE_WB_REG_CASE(OP2, 4, VAL); \ | |
89 | WRITE_WB_REG_CASE(OP2, 5, VAL); \ | |
90 | WRITE_WB_REG_CASE(OP2, 6, VAL); \ | |
91 | WRITE_WB_REG_CASE(OP2, 7, VAL); \ | |
92 | WRITE_WB_REG_CASE(OP2, 8, VAL); \ | |
93 | WRITE_WB_REG_CASE(OP2, 9, VAL); \ | |
94 | WRITE_WB_REG_CASE(OP2, 10, VAL); \ | |
95 | WRITE_WB_REG_CASE(OP2, 11, VAL); \ | |
96 | WRITE_WB_REG_CASE(OP2, 12, VAL); \ | |
97 | WRITE_WB_REG_CASE(OP2, 13, VAL); \ | |
98 | WRITE_WB_REG_CASE(OP2, 14, VAL); \ | |
99 | WRITE_WB_REG_CASE(OP2, 15, VAL) | |
100 | ||
101 | static u32 read_wb_reg(int n) | |
102 | { | |
103 | u32 val = 0; | |
104 | ||
105 | switch (n) { | |
106 | GEN_READ_WB_REG_CASES(ARM_OP2_BVR, val); | |
107 | GEN_READ_WB_REG_CASES(ARM_OP2_BCR, val); | |
108 | GEN_READ_WB_REG_CASES(ARM_OP2_WVR, val); | |
109 | GEN_READ_WB_REG_CASES(ARM_OP2_WCR, val); | |
110 | default: | |
111 | pr_warning("attempt to read from unknown breakpoint " | |
112 | "register %d\n", n); | |
113 | } | |
114 | ||
115 | return val; | |
116 | } | |
117 | ||
118 | static void write_wb_reg(int n, u32 val) | |
119 | { | |
120 | switch (n) { | |
121 | GEN_WRITE_WB_REG_CASES(ARM_OP2_BVR, val); | |
122 | GEN_WRITE_WB_REG_CASES(ARM_OP2_BCR, val); | |
123 | GEN_WRITE_WB_REG_CASES(ARM_OP2_WVR, val); | |
124 | GEN_WRITE_WB_REG_CASES(ARM_OP2_WCR, val); | |
125 | default: | |
126 | pr_warning("attempt to write to unknown breakpoint " | |
127 | "register %d\n", n); | |
128 | } | |
129 | isb(); | |
130 | } | |
131 | ||
0017ff42 WD |
132 | /* Determine debug architecture. */ |
133 | static u8 get_debug_arch(void) | |
134 | { | |
135 | u32 didr; | |
136 | ||
137 | /* Do we implement the extended CPUID interface? */ | |
d1244336 | 138 | if (((read_cpuid_id() >> 16) & 0xf) != 0xf) { |
5ad29ea2 WD |
139 | pr_warn_once("CPUID feature registers not supported. " |
140 | "Assuming v6 debug is present.\n"); | |
0017ff42 | 141 | return ARM_DEBUG_ARCH_V6; |
d1244336 | 142 | } |
0017ff42 WD |
143 | |
144 | ARM_DBG_READ(c0, 0, didr); | |
145 | return (didr >> 16) & 0xf; | |
146 | } | |
147 | ||
148 | u8 arch_get_debug_arch(void) | |
149 | { | |
150 | return debug_arch; | |
151 | } | |
152 | ||
66e1cfe6 WD |
153 | static int debug_arch_supported(void) |
154 | { | |
155 | u8 arch = get_debug_arch(); | |
b5d5b8f9 WD |
156 | |
157 | /* We don't support the memory-mapped interface. */ | |
158 | return (arch >= ARM_DEBUG_ARCH_V6 && arch <= ARM_DEBUG_ARCH_V7_ECP14) || | |
159 | arch >= ARM_DEBUG_ARCH_V7_1; | |
66e1cfe6 WD |
160 | } |
161 | ||
bf880114 WD |
162 | /* Can we determine the watchpoint access type from the fsr? */ |
163 | static int debug_exception_updates_fsr(void) | |
164 | { | |
165 | return 0; | |
166 | } | |
167 | ||
c512de95 WD |
168 | /* Determine number of WRP registers available. */ |
169 | static int get_num_wrp_resources(void) | |
170 | { | |
171 | u32 didr; | |
172 | ARM_DBG_READ(c0, 0, didr); | |
173 | return ((didr >> 28) & 0xf) + 1; | |
174 | } | |
175 | ||
176 | /* Determine number of BRP registers available. */ | |
0017ff42 WD |
177 | static int get_num_brp_resources(void) |
178 | { | |
179 | u32 didr; | |
180 | ARM_DBG_READ(c0, 0, didr); | |
181 | return ((didr >> 24) & 0xf) + 1; | |
182 | } | |
183 | ||
184 | /* Does this core support mismatch breakpoints? */ | |
185 | static int core_has_mismatch_brps(void) | |
186 | { | |
187 | return (get_debug_arch() >= ARM_DEBUG_ARCH_V7_ECP14 && | |
188 | get_num_brp_resources() > 1); | |
189 | } | |
190 | ||
191 | /* Determine number of usable WRPs available. */ | |
192 | static int get_num_wrps(void) | |
193 | { | |
194 | /* | |
c512de95 WD |
195 | * On debug architectures prior to 7.1, when a watchpoint fires, the |
196 | * only way to work out which watchpoint it was is by disassembling | |
197 | * the faulting instruction and working out the address of the memory | |
198 | * access. | |
0017ff42 WD |
199 | * |
200 | * Furthermore, we can only do this if the watchpoint was precise | |
201 | * since imprecise watchpoints prevent us from calculating register | |
202 | * based addresses. | |
203 | * | |
204 | * Providing we have more than 1 breakpoint register, we only report | |
205 | * a single watchpoint register for the time being. This way, we always | |
206 | * know which watchpoint fired. In the future we can either add a | |
207 | * disassembler and address generation emulator, or we can insert a | |
208 | * check to see if the DFAR is set on watchpoint exception entry | |
209 | * [the ARM ARM states that the DFAR is UNKNOWN, but experience shows | |
210 | * that it is set on some implementations]. | |
211 | */ | |
c512de95 WD |
212 | if (get_debug_arch() < ARM_DEBUG_ARCH_V7_1) |
213 | return 1; | |
0017ff42 | 214 | |
c512de95 | 215 | return get_num_wrp_resources(); |
0017ff42 WD |
216 | } |
217 | ||
218 | /* Determine number of usable BRPs available. */ | |
219 | static int get_num_brps(void) | |
220 | { | |
221 | int brps = get_num_brp_resources(); | |
c512de95 | 222 | return core_has_mismatch_brps() ? brps - 1 : brps; |
0017ff42 WD |
223 | } |
224 | ||
f81ef4a9 WD |
225 | /* |
226 | * In order to access the breakpoint/watchpoint control registers, | |
227 | * we must be running in debug monitor mode. Unfortunately, we can | |
228 | * be put into halting debug mode at any time by an external debugger | |
229 | * but there is nothing we can do to prevent that. | |
230 | */ | |
231 | static int enable_monitor_mode(void) | |
232 | { | |
233 | u32 dscr; | |
f81ef4a9 WD |
234 | ARM_DBG_READ(c1, 0, dscr); |
235 | ||
8fbf397c WD |
236 | /* If monitor mode is already enabled, just return. */ |
237 | if (dscr & ARM_DSCR_MDBGEN) | |
238 | goto out; | |
239 | ||
f81ef4a9 | 240 | /* Write to the corresponding DSCR. */ |
8fbf397c | 241 | switch (get_debug_arch()) { |
f81ef4a9 WD |
242 | case ARM_DEBUG_ARCH_V6: |
243 | case ARM_DEBUG_ARCH_V6_1: | |
244 | ARM_DBG_WRITE(c1, 0, (dscr | ARM_DSCR_MDBGEN)); | |
245 | break; | |
246 | case ARM_DEBUG_ARCH_V7_ECP14: | |
b5d5b8f9 | 247 | case ARM_DEBUG_ARCH_V7_1: |
f81ef4a9 | 248 | ARM_DBG_WRITE(c2, 2, (dscr | ARM_DSCR_MDBGEN)); |
b59a540c | 249 | isb(); |
f81ef4a9 WD |
250 | break; |
251 | default: | |
614bea50 | 252 | return -ENODEV; |
f81ef4a9 WD |
253 | } |
254 | ||
255 | /* Check that the write made it through. */ | |
256 | ARM_DBG_READ(c1, 0, dscr); | |
614bea50 WD |
257 | if (WARN_ONCE(!(dscr & ARM_DSCR_MDBGEN), |
258 | "Failed to enable monitor mode on CPU %d.\n", | |
259 | smp_processor_id())) | |
260 | return -EPERM; | |
f81ef4a9 WD |
261 | |
262 | out: | |
614bea50 | 263 | return 0; |
f81ef4a9 WD |
264 | } |
265 | ||
8fbf397c WD |
266 | int hw_breakpoint_slots(int type) |
267 | { | |
66e1cfe6 WD |
268 | if (!debug_arch_supported()) |
269 | return 0; | |
270 | ||
8fbf397c WD |
271 | /* |
272 | * We can be called early, so don't rely on | |
273 | * our static variables being initialised. | |
274 | */ | |
275 | switch (type) { | |
276 | case TYPE_INST: | |
277 | return get_num_brps(); | |
278 | case TYPE_DATA: | |
279 | return get_num_wrps(); | |
280 | default: | |
281 | pr_warning("unknown slot type: %d\n", type); | |
282 | return 0; | |
283 | } | |
284 | } | |
285 | ||
f81ef4a9 WD |
286 | /* |
287 | * Check if 8-bit byte-address select is available. | |
288 | * This clobbers WRP 0. | |
289 | */ | |
290 | static u8 get_max_wp_len(void) | |
291 | { | |
292 | u32 ctrl_reg; | |
293 | struct arch_hw_breakpoint_ctrl ctrl; | |
294 | u8 size = 4; | |
295 | ||
296 | if (debug_arch < ARM_DEBUG_ARCH_V7_ECP14) | |
297 | goto out; | |
298 | ||
f81ef4a9 WD |
299 | memset(&ctrl, 0, sizeof(ctrl)); |
300 | ctrl.len = ARM_BREAKPOINT_LEN_8; | |
301 | ctrl_reg = encode_ctrl_reg(ctrl); | |
302 | ||
303 | write_wb_reg(ARM_BASE_WVR, 0); | |
304 | write_wb_reg(ARM_BASE_WCR, ctrl_reg); | |
305 | if ((read_wb_reg(ARM_BASE_WCR) & ctrl_reg) == ctrl_reg) | |
306 | size = 8; | |
307 | ||
308 | out: | |
309 | return size; | |
310 | } | |
311 | ||
312 | u8 arch_get_max_wp_len(void) | |
313 | { | |
314 | return max_watchpoint_len; | |
315 | } | |
316 | ||
f81ef4a9 WD |
317 | /* |
318 | * Install a perf counter breakpoint. | |
319 | */ | |
320 | int arch_install_hw_breakpoint(struct perf_event *bp) | |
321 | { | |
322 | struct arch_hw_breakpoint *info = counter_arch_bp(bp); | |
323 | struct perf_event **slot, **slots; | |
324 | int i, max_slots, ctrl_base, val_base, ret = 0; | |
93a04a34 | 325 | u32 addr, ctrl; |
f81ef4a9 WD |
326 | |
327 | /* Ensure that we are in monitor mode and halting mode is disabled. */ | |
328 | ret = enable_monitor_mode(); | |
329 | if (ret) | |
330 | goto out; | |
331 | ||
93a04a34 WD |
332 | addr = info->address; |
333 | ctrl = encode_ctrl_reg(info->ctrl) | 0x1; | |
334 | ||
f81ef4a9 WD |
335 | if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) { |
336 | /* Breakpoint */ | |
337 | ctrl_base = ARM_BASE_BCR; | |
338 | val_base = ARM_BASE_BVR; | |
4a55c18e | 339 | slots = (struct perf_event **)__get_cpu_var(bp_on_reg); |
0017ff42 | 340 | max_slots = core_num_brps; |
f81ef4a9 WD |
341 | } else { |
342 | /* Watchpoint */ | |
6f26aa05 WD |
343 | ctrl_base = ARM_BASE_WCR; |
344 | val_base = ARM_BASE_WVR; | |
4a55c18e | 345 | slots = (struct perf_event **)__get_cpu_var(wp_on_reg); |
f81ef4a9 WD |
346 | max_slots = core_num_wrps; |
347 | } | |
348 | ||
349 | for (i = 0; i < max_slots; ++i) { | |
350 | slot = &slots[i]; | |
351 | ||
352 | if (!*slot) { | |
353 | *slot = bp; | |
354 | break; | |
355 | } | |
356 | } | |
357 | ||
7d85d61f | 358 | if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot\n")) { |
f81ef4a9 WD |
359 | ret = -EBUSY; |
360 | goto out; | |
361 | } | |
362 | ||
6f26aa05 WD |
363 | /* Override the breakpoint data with the step data. */ |
364 | if (info->step_ctrl.enabled) { | |
365 | addr = info->trigger & ~0x3; | |
366 | ctrl = encode_ctrl_reg(info->step_ctrl); | |
367 | if (info->ctrl.type != ARM_BREAKPOINT_EXECUTE) { | |
368 | i = 0; | |
369 | ctrl_base = ARM_BASE_BCR + core_num_brps; | |
370 | val_base = ARM_BASE_BVR + core_num_brps; | |
371 | } | |
372 | } | |
373 | ||
f81ef4a9 | 374 | /* Setup the address register. */ |
93a04a34 | 375 | write_wb_reg(val_base + i, addr); |
f81ef4a9 WD |
376 | |
377 | /* Setup the control register. */ | |
93a04a34 | 378 | write_wb_reg(ctrl_base + i, ctrl); |
f81ef4a9 WD |
379 | |
380 | out: | |
381 | return ret; | |
382 | } | |
383 | ||
384 | void arch_uninstall_hw_breakpoint(struct perf_event *bp) | |
385 | { | |
386 | struct arch_hw_breakpoint *info = counter_arch_bp(bp); | |
387 | struct perf_event **slot, **slots; | |
388 | int i, max_slots, base; | |
389 | ||
390 | if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) { | |
391 | /* Breakpoint */ | |
392 | base = ARM_BASE_BCR; | |
4a55c18e | 393 | slots = (struct perf_event **)__get_cpu_var(bp_on_reg); |
0017ff42 | 394 | max_slots = core_num_brps; |
f81ef4a9 WD |
395 | } else { |
396 | /* Watchpoint */ | |
6f26aa05 | 397 | base = ARM_BASE_WCR; |
4a55c18e | 398 | slots = (struct perf_event **)__get_cpu_var(wp_on_reg); |
f81ef4a9 WD |
399 | max_slots = core_num_wrps; |
400 | } | |
401 | ||
402 | /* Remove the breakpoint. */ | |
403 | for (i = 0; i < max_slots; ++i) { | |
404 | slot = &slots[i]; | |
405 | ||
406 | if (*slot == bp) { | |
407 | *slot = NULL; | |
408 | break; | |
409 | } | |
410 | } | |
411 | ||
7d85d61f | 412 | if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot\n")) |
f81ef4a9 WD |
413 | return; |
414 | ||
6f26aa05 WD |
415 | /* Ensure that we disable the mismatch breakpoint. */ |
416 | if (info->ctrl.type != ARM_BREAKPOINT_EXECUTE && | |
417 | info->step_ctrl.enabled) { | |
418 | i = 0; | |
419 | base = ARM_BASE_BCR + core_num_brps; | |
420 | } | |
421 | ||
f81ef4a9 WD |
422 | /* Reset the control register. */ |
423 | write_wb_reg(base + i, 0); | |
424 | } | |
425 | ||
426 | static int get_hbp_len(u8 hbp_len) | |
427 | { | |
428 | unsigned int len_in_bytes = 0; | |
429 | ||
430 | switch (hbp_len) { | |
431 | case ARM_BREAKPOINT_LEN_1: | |
432 | len_in_bytes = 1; | |
433 | break; | |
434 | case ARM_BREAKPOINT_LEN_2: | |
435 | len_in_bytes = 2; | |
436 | break; | |
437 | case ARM_BREAKPOINT_LEN_4: | |
438 | len_in_bytes = 4; | |
439 | break; | |
440 | case ARM_BREAKPOINT_LEN_8: | |
441 | len_in_bytes = 8; | |
442 | break; | |
443 | } | |
444 | ||
445 | return len_in_bytes; | |
446 | } | |
447 | ||
448 | /* | |
449 | * Check whether bp virtual address is in kernel space. | |
450 | */ | |
451 | int arch_check_bp_in_kernelspace(struct perf_event *bp) | |
452 | { | |
453 | unsigned int len; | |
454 | unsigned long va; | |
455 | struct arch_hw_breakpoint *info = counter_arch_bp(bp); | |
456 | ||
457 | va = info->address; | |
458 | len = get_hbp_len(info->ctrl.len); | |
459 | ||
460 | return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE); | |
461 | } | |
462 | ||
463 | /* | |
464 | * Extract generic type and length encodings from an arch_hw_breakpoint_ctrl. | |
465 | * Hopefully this will disappear when ptrace can bypass the conversion | |
466 | * to generic breakpoint descriptions. | |
467 | */ | |
468 | int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl, | |
469 | int *gen_len, int *gen_type) | |
470 | { | |
471 | /* Type */ | |
472 | switch (ctrl.type) { | |
473 | case ARM_BREAKPOINT_EXECUTE: | |
474 | *gen_type = HW_BREAKPOINT_X; | |
475 | break; | |
476 | case ARM_BREAKPOINT_LOAD: | |
477 | *gen_type = HW_BREAKPOINT_R; | |
478 | break; | |
479 | case ARM_BREAKPOINT_STORE: | |
480 | *gen_type = HW_BREAKPOINT_W; | |
481 | break; | |
482 | case ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE: | |
483 | *gen_type = HW_BREAKPOINT_RW; | |
484 | break; | |
485 | default: | |
486 | return -EINVAL; | |
487 | } | |
488 | ||
489 | /* Len */ | |
490 | switch (ctrl.len) { | |
491 | case ARM_BREAKPOINT_LEN_1: | |
492 | *gen_len = HW_BREAKPOINT_LEN_1; | |
493 | break; | |
494 | case ARM_BREAKPOINT_LEN_2: | |
495 | *gen_len = HW_BREAKPOINT_LEN_2; | |
496 | break; | |
497 | case ARM_BREAKPOINT_LEN_4: | |
498 | *gen_len = HW_BREAKPOINT_LEN_4; | |
499 | break; | |
500 | case ARM_BREAKPOINT_LEN_8: | |
501 | *gen_len = HW_BREAKPOINT_LEN_8; | |
502 | break; | |
503 | default: | |
504 | return -EINVAL; | |
505 | } | |
506 | ||
507 | return 0; | |
508 | } | |
509 | ||
510 | /* | |
511 | * Construct an arch_hw_breakpoint from a perf_event. | |
512 | */ | |
513 | static int arch_build_bp_info(struct perf_event *bp) | |
514 | { | |
515 | struct arch_hw_breakpoint *info = counter_arch_bp(bp); | |
516 | ||
517 | /* Type */ | |
518 | switch (bp->attr.bp_type) { | |
519 | case HW_BREAKPOINT_X: | |
520 | info->ctrl.type = ARM_BREAKPOINT_EXECUTE; | |
521 | break; | |
522 | case HW_BREAKPOINT_R: | |
523 | info->ctrl.type = ARM_BREAKPOINT_LOAD; | |
524 | break; | |
525 | case HW_BREAKPOINT_W: | |
526 | info->ctrl.type = ARM_BREAKPOINT_STORE; | |
527 | break; | |
528 | case HW_BREAKPOINT_RW: | |
529 | info->ctrl.type = ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE; | |
530 | break; | |
531 | default: | |
532 | return -EINVAL; | |
533 | } | |
534 | ||
535 | /* Len */ | |
536 | switch (bp->attr.bp_len) { | |
537 | case HW_BREAKPOINT_LEN_1: | |
538 | info->ctrl.len = ARM_BREAKPOINT_LEN_1; | |
539 | break; | |
540 | case HW_BREAKPOINT_LEN_2: | |
541 | info->ctrl.len = ARM_BREAKPOINT_LEN_2; | |
542 | break; | |
543 | case HW_BREAKPOINT_LEN_4: | |
544 | info->ctrl.len = ARM_BREAKPOINT_LEN_4; | |
545 | break; | |
546 | case HW_BREAKPOINT_LEN_8: | |
547 | info->ctrl.len = ARM_BREAKPOINT_LEN_8; | |
548 | if ((info->ctrl.type != ARM_BREAKPOINT_EXECUTE) | |
549 | && max_watchpoint_len >= 8) | |
550 | break; | |
551 | default: | |
552 | return -EINVAL; | |
553 | } | |
554 | ||
6ee33c27 WD |
555 | /* |
556 | * Breakpoints must be of length 2 (thumb) or 4 (ARM) bytes. | |
557 | * Watchpoints can be of length 1, 2, 4 or 8 bytes if supported | |
558 | * by the hardware and must be aligned to the appropriate number of | |
559 | * bytes. | |
560 | */ | |
561 | if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE && | |
562 | info->ctrl.len != ARM_BREAKPOINT_LEN_2 && | |
563 | info->ctrl.len != ARM_BREAKPOINT_LEN_4) | |
564 | return -EINVAL; | |
565 | ||
f81ef4a9 WD |
566 | /* Address */ |
567 | info->address = bp->attr.bp_addr; | |
568 | ||
569 | /* Privilege */ | |
570 | info->ctrl.privilege = ARM_BREAKPOINT_USER; | |
93a04a34 | 571 | if (arch_check_bp_in_kernelspace(bp)) |
f81ef4a9 WD |
572 | info->ctrl.privilege |= ARM_BREAKPOINT_PRIV; |
573 | ||
574 | /* Enabled? */ | |
575 | info->ctrl.enabled = !bp->attr.disabled; | |
576 | ||
577 | /* Mismatch */ | |
578 | info->ctrl.mismatch = 0; | |
579 | ||
580 | return 0; | |
581 | } | |
582 | ||
583 | /* | |
584 | * Validate the arch-specific HW Breakpoint register settings. | |
585 | */ | |
586 | int arch_validate_hwbkpt_settings(struct perf_event *bp) | |
587 | { | |
588 | struct arch_hw_breakpoint *info = counter_arch_bp(bp); | |
589 | int ret = 0; | |
6ee33c27 | 590 | u32 offset, alignment_mask = 0x3; |
f81ef4a9 WD |
591 | |
592 | /* Build the arch_hw_breakpoint. */ | |
593 | ret = arch_build_bp_info(bp); | |
594 | if (ret) | |
595 | goto out; | |
596 | ||
597 | /* Check address alignment. */ | |
598 | if (info->ctrl.len == ARM_BREAKPOINT_LEN_8) | |
599 | alignment_mask = 0x7; | |
6ee33c27 WD |
600 | offset = info->address & alignment_mask; |
601 | switch (offset) { | |
602 | case 0: | |
603 | /* Aligned */ | |
604 | break; | |
605 | case 1: | |
6ee33c27 WD |
606 | case 2: |
607 | /* Allow halfword watchpoints and breakpoints. */ | |
608 | if (info->ctrl.len == ARM_BREAKPOINT_LEN_2) | |
609 | break; | |
d968d2b8 WD |
610 | case 3: |
611 | /* Allow single byte watchpoint. */ | |
612 | if (info->ctrl.len == ARM_BREAKPOINT_LEN_1) | |
613 | break; | |
6ee33c27 WD |
614 | default: |
615 | ret = -EINVAL; | |
616 | goto out; | |
f81ef4a9 WD |
617 | } |
618 | ||
6ee33c27 WD |
619 | info->address &= ~alignment_mask; |
620 | info->ctrl.len <<= offset; | |
621 | ||
bf880114 WD |
622 | if (!bp->overflow_handler) { |
623 | /* | |
624 | * Mismatch breakpoints are required for single-stepping | |
625 | * breakpoints. | |
626 | */ | |
627 | if (!core_has_mismatch_brps()) | |
628 | return -EINVAL; | |
629 | ||
630 | /* We don't allow mismatch breakpoints in kernel space. */ | |
631 | if (arch_check_bp_in_kernelspace(bp)) | |
632 | return -EPERM; | |
633 | ||
634 | /* | |
635 | * Per-cpu breakpoints are not supported by our stepping | |
636 | * mechanism. | |
637 | */ | |
638 | if (!bp->hw.bp_target) | |
639 | return -EINVAL; | |
640 | ||
641 | /* | |
642 | * We only support specific access types if the fsr | |
643 | * reports them. | |
644 | */ | |
645 | if (!debug_exception_updates_fsr() && | |
646 | (info->ctrl.type == ARM_BREAKPOINT_LOAD || | |
647 | info->ctrl.type == ARM_BREAKPOINT_STORE)) | |
648 | return -EINVAL; | |
f81ef4a9 | 649 | } |
bf880114 | 650 | |
f81ef4a9 WD |
651 | out: |
652 | return ret; | |
653 | } | |
654 | ||
9ebb3cbc WD |
655 | /* |
656 | * Enable/disable single-stepping over the breakpoint bp at address addr. | |
657 | */ | |
658 | static void enable_single_step(struct perf_event *bp, u32 addr) | |
f81ef4a9 | 659 | { |
9ebb3cbc | 660 | struct arch_hw_breakpoint *info = counter_arch_bp(bp); |
f81ef4a9 | 661 | |
9ebb3cbc WD |
662 | arch_uninstall_hw_breakpoint(bp); |
663 | info->step_ctrl.mismatch = 1; | |
664 | info->step_ctrl.len = ARM_BREAKPOINT_LEN_4; | |
665 | info->step_ctrl.type = ARM_BREAKPOINT_EXECUTE; | |
666 | info->step_ctrl.privilege = info->ctrl.privilege; | |
667 | info->step_ctrl.enabled = 1; | |
668 | info->trigger = addr; | |
669 | arch_install_hw_breakpoint(bp); | |
670 | } | |
f81ef4a9 | 671 | |
9ebb3cbc WD |
672 | static void disable_single_step(struct perf_event *bp) |
673 | { | |
674 | arch_uninstall_hw_breakpoint(bp); | |
675 | counter_arch_bp(bp)->step_ctrl.enabled = 0; | |
676 | arch_install_hw_breakpoint(bp); | |
f81ef4a9 WD |
677 | } |
678 | ||
6f26aa05 WD |
679 | static void watchpoint_handler(unsigned long addr, unsigned int fsr, |
680 | struct pt_regs *regs) | |
f81ef4a9 | 681 | { |
6f26aa05 WD |
682 | int i, access; |
683 | u32 val, ctrl_reg, alignment_mask; | |
4a55c18e | 684 | struct perf_event *wp, **slots; |
f81ef4a9 | 685 | struct arch_hw_breakpoint *info; |
6f26aa05 | 686 | struct arch_hw_breakpoint_ctrl ctrl; |
f81ef4a9 | 687 | |
4a55c18e WD |
688 | slots = (struct perf_event **)__get_cpu_var(wp_on_reg); |
689 | ||
f81ef4a9 WD |
690 | for (i = 0; i < core_num_wrps; ++i) { |
691 | rcu_read_lock(); | |
692 | ||
93a04a34 WD |
693 | wp = slots[i]; |
694 | ||
6f26aa05 WD |
695 | if (wp == NULL) |
696 | goto unlock; | |
f81ef4a9 | 697 | |
6f26aa05 | 698 | info = counter_arch_bp(wp); |
f81ef4a9 | 699 | /* |
6f26aa05 WD |
700 | * The DFAR is an unknown value on debug architectures prior |
701 | * to 7.1. Since we only allow a single watchpoint on these | |
702 | * older CPUs, we can set the trigger to the lowest possible | |
703 | * faulting address. | |
f81ef4a9 | 704 | */ |
6f26aa05 WD |
705 | if (debug_arch < ARM_DEBUG_ARCH_V7_1) { |
706 | BUG_ON(i > 0); | |
707 | info->trigger = wp->attr.bp_addr; | |
708 | } else { | |
709 | if (info->ctrl.len == ARM_BREAKPOINT_LEN_8) | |
710 | alignment_mask = 0x7; | |
711 | else | |
712 | alignment_mask = 0x3; | |
713 | ||
714 | /* Check if the watchpoint value matches. */ | |
715 | val = read_wb_reg(ARM_BASE_WVR + i); | |
716 | if (val != (addr & ~alignment_mask)) | |
717 | goto unlock; | |
718 | ||
719 | /* Possible match, check the byte address select. */ | |
720 | ctrl_reg = read_wb_reg(ARM_BASE_WCR + i); | |
721 | decode_ctrl_reg(ctrl_reg, &ctrl); | |
722 | if (!((1 << (addr & alignment_mask)) & ctrl.len)) | |
723 | goto unlock; | |
724 | ||
725 | /* Check that the access type matches. */ | |
bf880114 WD |
726 | if (debug_exception_updates_fsr()) { |
727 | access = (fsr & ARM_FSR_ACCESS_MASK) ? | |
728 | HW_BREAKPOINT_W : HW_BREAKPOINT_R; | |
729 | if (!(access & hw_breakpoint_type(wp))) | |
730 | goto unlock; | |
731 | } | |
6f26aa05 WD |
732 | |
733 | /* We have a winner. */ | |
734 | info->trigger = addr; | |
735 | } | |
736 | ||
f81ef4a9 | 737 | pr_debug("watchpoint fired: address = 0x%x\n", info->trigger); |
93a04a34 | 738 | perf_bp_event(wp, regs); |
f81ef4a9 WD |
739 | |
740 | /* | |
741 | * If no overflow handler is present, insert a temporary | |
742 | * mismatch breakpoint so we can single-step over the | |
743 | * watchpoint trigger. | |
744 | */ | |
9ebb3cbc WD |
745 | if (!wp->overflow_handler) |
746 | enable_single_step(wp, instruction_pointer(regs)); | |
f81ef4a9 | 747 | |
6f26aa05 | 748 | unlock: |
f81ef4a9 WD |
749 | rcu_read_unlock(); |
750 | } | |
751 | } | |
752 | ||
93a04a34 WD |
753 | static void watchpoint_single_step_handler(unsigned long pc) |
754 | { | |
755 | int i; | |
4a55c18e | 756 | struct perf_event *wp, **slots; |
93a04a34 WD |
757 | struct arch_hw_breakpoint *info; |
758 | ||
4a55c18e WD |
759 | slots = (struct perf_event **)__get_cpu_var(wp_on_reg); |
760 | ||
c512de95 | 761 | for (i = 0; i < core_num_wrps; ++i) { |
93a04a34 WD |
762 | rcu_read_lock(); |
763 | ||
764 | wp = slots[i]; | |
765 | ||
766 | if (wp == NULL) | |
767 | goto unlock; | |
768 | ||
769 | info = counter_arch_bp(wp); | |
770 | if (!info->step_ctrl.enabled) | |
771 | goto unlock; | |
772 | ||
773 | /* | |
774 | * Restore the original watchpoint if we've completed the | |
775 | * single-step. | |
776 | */ | |
9ebb3cbc WD |
777 | if (info->trigger != pc) |
778 | disable_single_step(wp); | |
93a04a34 WD |
779 | |
780 | unlock: | |
781 | rcu_read_unlock(); | |
782 | } | |
783 | } | |
784 | ||
f81ef4a9 WD |
785 | static void breakpoint_handler(unsigned long unknown, struct pt_regs *regs) |
786 | { | |
787 | int i; | |
f81ef4a9 | 788 | u32 ctrl_reg, val, addr; |
4a55c18e | 789 | struct perf_event *bp, **slots; |
f81ef4a9 WD |
790 | struct arch_hw_breakpoint *info; |
791 | struct arch_hw_breakpoint_ctrl ctrl; | |
792 | ||
4a55c18e WD |
793 | slots = (struct perf_event **)__get_cpu_var(bp_on_reg); |
794 | ||
f81ef4a9 WD |
795 | /* The exception entry code places the amended lr in the PC. */ |
796 | addr = regs->ARM_pc; | |
797 | ||
93a04a34 WD |
798 | /* Check the currently installed breakpoints first. */ |
799 | for (i = 0; i < core_num_brps; ++i) { | |
f81ef4a9 WD |
800 | rcu_read_lock(); |
801 | ||
802 | bp = slots[i]; | |
803 | ||
9ebb3cbc WD |
804 | if (bp == NULL) |
805 | goto unlock; | |
f81ef4a9 | 806 | |
9ebb3cbc | 807 | info = counter_arch_bp(bp); |
f81ef4a9 WD |
808 | |
809 | /* Check if the breakpoint value matches. */ | |
810 | val = read_wb_reg(ARM_BASE_BVR + i); | |
811 | if (val != (addr & ~0x3)) | |
9ebb3cbc | 812 | goto mismatch; |
f81ef4a9 WD |
813 | |
814 | /* Possible match, check the byte address select to confirm. */ | |
815 | ctrl_reg = read_wb_reg(ARM_BASE_BCR + i); | |
816 | decode_ctrl_reg(ctrl_reg, &ctrl); | |
817 | if ((1 << (addr & 0x3)) & ctrl.len) { | |
f81ef4a9 | 818 | info->trigger = addr; |
f81ef4a9 WD |
819 | pr_debug("breakpoint fired: address = 0x%x\n", addr); |
820 | perf_bp_event(bp, regs); | |
9ebb3cbc WD |
821 | if (!bp->overflow_handler) |
822 | enable_single_step(bp, addr); | |
823 | goto unlock; | |
f81ef4a9 WD |
824 | } |
825 | ||
9ebb3cbc WD |
826 | mismatch: |
827 | /* If we're stepping a breakpoint, it can now be restored. */ | |
828 | if (info->step_ctrl.enabled) | |
829 | disable_single_step(bp); | |
830 | unlock: | |
f81ef4a9 WD |
831 | rcu_read_unlock(); |
832 | } | |
93a04a34 WD |
833 | |
834 | /* Handle any pending watchpoint single-step breakpoints. */ | |
835 | watchpoint_single_step_handler(addr); | |
f81ef4a9 WD |
836 | } |
837 | ||
838 | /* | |
839 | * Called from either the Data Abort Handler [watchpoint] or the | |
02fe2845 | 840 | * Prefetch Abort Handler [breakpoint] with interrupts disabled. |
f81ef4a9 WD |
841 | */ |
842 | static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr, | |
843 | struct pt_regs *regs) | |
844 | { | |
7e202696 | 845 | int ret = 0; |
f81ef4a9 WD |
846 | u32 dscr; |
847 | ||
02fe2845 RK |
848 | preempt_disable(); |
849 | ||
850 | if (interrupts_enabled(regs)) | |
851 | local_irq_enable(); | |
7e202696 | 852 | |
f81ef4a9 WD |
853 | /* We only handle watchpoints and hardware breakpoints. */ |
854 | ARM_DBG_READ(c1, 0, dscr); | |
855 | ||
856 | /* Perform perf callbacks. */ | |
857 | switch (ARM_DSCR_MOE(dscr)) { | |
858 | case ARM_ENTRY_BREAKPOINT: | |
859 | breakpoint_handler(addr, regs); | |
860 | break; | |
861 | case ARM_ENTRY_ASYNC_WATCHPOINT: | |
235584b6 | 862 | WARN(1, "Asynchronous watchpoint exception taken. Debugging results may be unreliable\n"); |
f81ef4a9 | 863 | case ARM_ENTRY_SYNC_WATCHPOINT: |
6f26aa05 | 864 | watchpoint_handler(addr, fsr, regs); |
f81ef4a9 WD |
865 | break; |
866 | default: | |
7e202696 | 867 | ret = 1; /* Unhandled fault. */ |
f81ef4a9 WD |
868 | } |
869 | ||
7e202696 WD |
870 | preempt_enable(); |
871 | ||
f81ef4a9 WD |
872 | return ret; |
873 | } | |
874 | ||
875 | /* | |
876 | * One-time initialisation. | |
877 | */ | |
0d352e3d WD |
878 | static cpumask_t debug_err_mask; |
879 | ||
880 | static int debug_reg_trap(struct pt_regs *regs, unsigned int instr) | |
881 | { | |
882 | int cpu = smp_processor_id(); | |
883 | ||
884 | pr_warning("Debug register access (0x%x) caused undefined instruction on CPU %d\n", | |
885 | instr, cpu); | |
886 | ||
887 | /* Set the error flag for this CPU and skip the faulting instruction. */ | |
888 | cpumask_set_cpu(cpu, &debug_err_mask); | |
889 | instruction_pointer(regs) += 4; | |
890 | return 0; | |
891 | } | |
892 | ||
893 | static struct undef_hook debug_reg_hook = { | |
894 | .instr_mask = 0x0fe80f10, | |
895 | .instr_val = 0x0e000e10, | |
896 | .fn = debug_reg_trap, | |
897 | }; | |
898 | ||
899 | static void reset_ctrl_regs(void *unused) | |
f81ef4a9 | 900 | { |
c512de95 | 901 | int i, raw_num_brps, err = 0, cpu = smp_processor_id(); |
e64877dc | 902 | u32 val; |
f81ef4a9 | 903 | |
ac88e071 WD |
904 | /* |
905 | * v7 debug contains save and restore registers so that debug state | |
ed19b739 WD |
906 | * can be maintained across low-power modes without leaving the debug |
907 | * logic powered up. It is IMPLEMENTATION DEFINED whether we can access | |
908 | * the debug registers out of reset, so we must unlock the OS Lock | |
909 | * Access Register to avoid taking undefined instruction exceptions | |
910 | * later on. | |
ac88e071 | 911 | */ |
b5d5b8f9 | 912 | switch (debug_arch) { |
a26bce12 WD |
913 | case ARM_DEBUG_ARCH_V6: |
914 | case ARM_DEBUG_ARCH_V6_1: | |
7f4050a0 WD |
915 | /* ARMv6 cores clear the registers out of reset. */ |
916 | goto out_mdbgen; | |
b5d5b8f9 | 917 | case ARM_DEBUG_ARCH_V7_ECP14: |
c09bae70 WD |
918 | /* |
919 | * Ensure sticky power-down is clear (i.e. debug logic is | |
920 | * powered up). | |
921 | */ | |
e64877dc WD |
922 | asm volatile("mrc p14, 0, %0, c1, c5, 4" : "=r" (val)); |
923 | if ((val & 0x1) == 0) | |
b5d5b8f9 | 924 | err = -EPERM; |
e64877dc WD |
925 | |
926 | /* | |
927 | * Check whether we implement OS save and restore. | |
928 | */ | |
929 | asm volatile("mrc p14, 0, %0, c1, c1, 4" : "=r" (val)); | |
930 | if ((val & 0x9) == 0) | |
931 | goto clear_vcr; | |
b5d5b8f9 WD |
932 | break; |
933 | case ARM_DEBUG_ARCH_V7_1: | |
ac88e071 | 934 | /* |
b5d5b8f9 | 935 | * Ensure the OS double lock is clear. |
ac88e071 | 936 | */ |
e64877dc WD |
937 | asm volatile("mrc p14, 0, %0, c1, c3, 4" : "=r" (val)); |
938 | if ((val & 0x1) == 1) | |
b5d5b8f9 WD |
939 | err = -EPERM; |
940 | break; | |
941 | } | |
e89c0d70 | 942 | |
b5d5b8f9 WD |
943 | if (err) { |
944 | pr_warning("CPU %d debug is powered down!\n", cpu); | |
0d352e3d | 945 | cpumask_or(&debug_err_mask, &debug_err_mask, cpumask_of(cpu)); |
b5d5b8f9 | 946 | return; |
ac88e071 WD |
947 | } |
948 | ||
b5d5b8f9 | 949 | /* |
e64877dc | 950 | * Unconditionally clear the OS lock by writing a value |
b5d5b8f9 WD |
951 | * other than 0xC5ACCE55 to the access register. |
952 | */ | |
953 | asm volatile("mcr p14, 0, %0, c1, c0, 4" : : "r" (0)); | |
954 | isb(); | |
955 | ||
956 | /* | |
957 | * Clear any configured vector-catch events before | |
958 | * enabling monitor mode. | |
959 | */ | |
e64877dc | 960 | clear_vcr: |
b5d5b8f9 WD |
961 | asm volatile("mcr p14, 0, %0, c0, c7, 0" : : "r" (0)); |
962 | isb(); | |
963 | ||
614bea50 WD |
964 | if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) { |
965 | pr_warning("CPU %d failed to disable vector catch\n", cpu); | |
f81ef4a9 | 966 | return; |
614bea50 | 967 | } |
f81ef4a9 | 968 | |
614bea50 WD |
969 | /* |
970 | * The control/value register pairs are UNKNOWN out of reset so | |
971 | * clear them to avoid spurious debug events. | |
972 | */ | |
c512de95 WD |
973 | raw_num_brps = get_num_brp_resources(); |
974 | for (i = 0; i < raw_num_brps; ++i) { | |
f81ef4a9 WD |
975 | write_wb_reg(ARM_BASE_BCR + i, 0UL); |
976 | write_wb_reg(ARM_BASE_BVR + i, 0UL); | |
977 | } | |
978 | ||
979 | for (i = 0; i < core_num_wrps; ++i) { | |
980 | write_wb_reg(ARM_BASE_WCR + i, 0UL); | |
981 | write_wb_reg(ARM_BASE_WVR + i, 0UL); | |
982 | } | |
614bea50 WD |
983 | |
984 | if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) { | |
985 | pr_warning("CPU %d failed to clear debug register pairs\n", cpu); | |
986 | return; | |
987 | } | |
988 | ||
989 | /* | |
990 | * Have a crack at enabling monitor mode. We don't actually need | |
991 | * it yet, but reporting an error early is useful if it fails. | |
992 | */ | |
7f4050a0 | 993 | out_mdbgen: |
614bea50 WD |
994 | if (enable_monitor_mode()) |
995 | cpumask_or(&debug_err_mask, &debug_err_mask, cpumask_of(cpu)); | |
f81ef4a9 WD |
996 | } |
997 | ||
7d99331e WD |
998 | static int __cpuinit dbg_reset_notify(struct notifier_block *self, |
999 | unsigned long action, void *cpu) | |
1000 | { | |
1001 | if (action == CPU_ONLINE) | |
1002 | smp_call_function_single((int)cpu, reset_ctrl_regs, NULL, 1); | |
0d352e3d | 1003 | |
7d99331e WD |
1004 | return NOTIFY_OK; |
1005 | } | |
1006 | ||
1007 | static struct notifier_block __cpuinitdata dbg_reset_nb = { | |
1008 | .notifier_call = dbg_reset_notify, | |
1009 | }; | |
1010 | ||
f81ef4a9 WD |
1011 | static int __init arch_hw_breakpoint_init(void) |
1012 | { | |
f81ef4a9 WD |
1013 | debug_arch = get_debug_arch(); |
1014 | ||
66e1cfe6 | 1015 | if (!debug_arch_supported()) { |
f81ef4a9 | 1016 | pr_info("debug architecture 0x%x unsupported.\n", debug_arch); |
8fbf397c | 1017 | return 0; |
f81ef4a9 WD |
1018 | } |
1019 | ||
1020 | /* Determine how many BRPs/WRPs are available. */ | |
1021 | core_num_brps = get_num_brps(); | |
1022 | core_num_wrps = get_num_wrps(); | |
1023 | ||
0d352e3d WD |
1024 | /* |
1025 | * We need to tread carefully here because DBGSWENABLE may be | |
1026 | * driven low on this core and there isn't an architected way to | |
1027 | * determine that. | |
1028 | */ | |
1029 | register_undef_hook(&debug_reg_hook); | |
f81ef4a9 | 1030 | |
ed19b739 WD |
1031 | /* |
1032 | * Reset the breakpoint resources. We assume that a halting | |
1033 | * debugger will leave the world in a nice state for us. | |
1034 | */ | |
0d352e3d WD |
1035 | on_each_cpu(reset_ctrl_regs, NULL, 1); |
1036 | unregister_undef_hook(&debug_reg_hook); | |
1037 | if (!cpumask_empty(&debug_err_mask)) { | |
c09bae70 | 1038 | core_num_brps = 0; |
c09bae70 WD |
1039 | core_num_wrps = 0; |
1040 | return 0; | |
1041 | } | |
ed19b739 | 1042 | |
0d352e3d WD |
1043 | pr_info("found %d " "%s" "breakpoint and %d watchpoint registers.\n", |
1044 | core_num_brps, core_has_mismatch_brps() ? "(+1 reserved) " : | |
1045 | "", core_num_wrps); | |
1046 | ||
b59a540c WD |
1047 | /* Work out the maximum supported watchpoint length. */ |
1048 | max_watchpoint_len = get_max_wp_len(); | |
1049 | pr_info("maximum watchpoint size is %u bytes.\n", | |
1050 | max_watchpoint_len); | |
f81ef4a9 WD |
1051 | |
1052 | /* Register debug fault handler. */ | |
f7b8156d CM |
1053 | hook_fault_code(FAULT_CODE_DEBUG, hw_breakpoint_pending, SIGTRAP, |
1054 | TRAP_HWBKPT, "watchpoint debug exception"); | |
1055 | hook_ifault_code(FAULT_CODE_DEBUG, hw_breakpoint_pending, SIGTRAP, | |
1056 | TRAP_HWBKPT, "breakpoint debug exception"); | |
f81ef4a9 | 1057 | |
7d99331e WD |
1058 | /* Register hotplug notifier. */ |
1059 | register_cpu_notifier(&dbg_reset_nb); | |
8fbf397c | 1060 | return 0; |
f81ef4a9 WD |
1061 | } |
1062 | arch_initcall(arch_hw_breakpoint_init); | |
1063 | ||
1064 | void hw_breakpoint_pmu_read(struct perf_event *bp) | |
1065 | { | |
1066 | } | |
1067 | ||
1068 | /* | |
1069 | * Dummy function to register with die_notifier. | |
1070 | */ | |
1071 | int hw_breakpoint_exceptions_notify(struct notifier_block *unused, | |
1072 | unsigned long val, void *data) | |
1073 | { | |
1074 | return NOTIFY_DONE; | |
1075 | } |