]>
Commit | Line | Data |
---|---|---|
e2882c85 | 1 | /* Copyright (C) 2009-2018 Free Software Foundation, Inc. |
554717a3 YQ |
2 | Contributed by ARM Ltd. |
3 | ||
4 | This file is part of GDB. | |
5 | ||
6 | This program is free software; you can redistribute it and/or modify | |
7 | it under the terms of the GNU General Public License as published by | |
8 | the Free Software Foundation; either version 3 of the License, or | |
9 | (at your option) any later version. | |
10 | ||
11 | This program is distributed in the hope that it will be useful, | |
12 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | GNU General Public License for more details. | |
15 | ||
16 | You should have received a copy of the GNU General Public License | |
17 | along with this program. If not, see <http://www.gnu.org/licenses/>. */ | |
18 | ||
19 | #include "common-defs.h" | |
20 | #include "break-common.h" | |
8d689ee5 | 21 | #include "common-regcache.h" |
5e35436e | 22 | #include "nat/linux-nat.h" |
554717a3 YQ |
23 | #include "aarch64-linux-hw-point.h" |
24 | ||
25 | #include <sys/uio.h> | |
26 | #include <asm/ptrace.h> | |
27 | #include <sys/ptrace.h> | |
28 | #include <elf.h> | |
29 | ||
30 | /* Number of hardware breakpoints/watchpoints the target supports. | |
31 | They are initialized with values obtained via the ptrace calls | |
32 | with NT_ARM_HW_BREAK and NT_ARM_HW_WATCH respectively. */ | |
33 | ||
34 | int aarch64_num_bp_regs; | |
35 | int aarch64_num_wp_regs; | |
36 | ||
a3b60e45 JK |
37 | /* True if this kernel does not have the bug described by PR |
38 | external/20207 (Linux >= 4.10). A fixed kernel supports any | |
39 | contiguous range of bits in 8-bit byte DR_CONTROL_MASK. A buggy | |
40 | kernel supports only 0x01, 0x03, 0x0f and 0xff. We start by | |
41 | assuming the bug is fixed, and then detect the bug at | |
42 | PTRACE_SETREGSET time. */ | |
43 | static bool kernel_supports_any_contiguous_range = true; | |
44 | ||
45 | /* Return starting byte 0..7 incl. of a watchpoint encoded by CTRL. */ | |
46 | ||
47 | unsigned int | |
48 | aarch64_watchpoint_offset (unsigned int ctrl) | |
49 | { | |
50 | uint8_t mask = DR_CONTROL_MASK (ctrl); | |
51 | unsigned retval; | |
52 | ||
53 | /* Shift out bottom zeros. */ | |
54 | for (retval = 0; mask && (mask & 1) == 0; ++retval) | |
55 | mask >>= 1; | |
56 | ||
57 | return retval; | |
58 | } | |
59 | ||
554717a3 YQ |
60 | /* Utility function that returns the length in bytes of a watchpoint |
61 | according to the content of a hardware debug control register CTRL. | |
a3b60e45 JK |
62 | Any contiguous range of bytes in CTRL is supported. The returned |
63 | value can be between 0..8 (inclusive). */ | |
554717a3 YQ |
64 | |
65 | unsigned int | |
66 | aarch64_watchpoint_length (unsigned int ctrl) | |
67 | { | |
a3b60e45 JK |
68 | uint8_t mask = DR_CONTROL_MASK (ctrl); |
69 | unsigned retval; | |
70 | ||
71 | /* Shift out bottom zeros. */ | |
72 | mask >>= aarch64_watchpoint_offset (ctrl); | |
73 | ||
74 | /* Count bottom ones. */ | |
75 | for (retval = 0; (mask & 1) != 0; ++retval) | |
76 | mask >>= 1; | |
77 | ||
78 | if (mask != 0) | |
79 | error (_("Unexpected hardware watchpoint length register value 0x%x"), | |
80 | DR_CONTROL_MASK (ctrl)); | |
81 | ||
82 | return retval; | |
554717a3 YQ |
83 | } |
84 | ||
85 | /* Given the hardware breakpoint or watchpoint type TYPE and its | |
86 | length LEN, return the expected encoding for a hardware | |
87 | breakpoint/watchpoint control register. */ | |
88 | ||
89 | static unsigned int | |
a3b60e45 | 90 | aarch64_point_encode_ctrl_reg (enum target_hw_bp_type type, int offset, int len) |
554717a3 YQ |
91 | { |
92 | unsigned int ctrl, ttype; | |
93 | ||
a3b60e45 JK |
94 | gdb_assert (offset == 0 || kernel_supports_any_contiguous_range); |
95 | gdb_assert (offset + len <= AARCH64_HWP_MAX_LEN_PER_REG); | |
96 | ||
554717a3 YQ |
97 | /* type */ |
98 | switch (type) | |
99 | { | |
100 | case hw_write: | |
101 | ttype = 2; | |
102 | break; | |
103 | case hw_read: | |
104 | ttype = 1; | |
105 | break; | |
106 | case hw_access: | |
107 | ttype = 3; | |
108 | break; | |
109 | case hw_execute: | |
110 | ttype = 0; | |
111 | break; | |
112 | default: | |
113 | perror_with_name (_("Unrecognized breakpoint/watchpoint type")); | |
114 | } | |
115 | ||
116 | ctrl = ttype << 3; | |
117 | ||
a3b60e45 JK |
118 | /* offset and length bitmask */ |
119 | ctrl |= ((1 << len) - 1) << (5 + offset); | |
554717a3 YQ |
120 | /* enabled at el0 */ |
121 | ctrl |= (2 << 1) | 1; | |
122 | ||
123 | return ctrl; | |
124 | } | |
125 | ||
126 | /* Addresses to be written to the hardware breakpoint and watchpoint | |
127 | value registers need to be aligned; the alignment is 4-byte and | |
128 | 8-type respectively. Linux kernel rejects any non-aligned address | |
129 | it receives from the related ptrace call. Furthermore, the kernel | |
130 | currently only supports the following Byte Address Select (BAS) | |
131 | values: 0x1, 0x3, 0xf and 0xff, which means that for a hardware | |
132 | watchpoint to be accepted by the kernel (via ptrace call), its | |
133 | valid length can only be 1 byte, 2 bytes, 4 bytes or 8 bytes. | |
134 | Despite these limitations, the unaligned watchpoint is supported in | |
135 | this port. | |
136 | ||
137 | Return 0 for any non-compliant ADDR and/or LEN; return 1 otherwise. */ | |
138 | ||
139 | static int | |
140 | aarch64_point_is_aligned (int is_watchpoint, CORE_ADDR addr, int len) | |
141 | { | |
8d689ee5 YQ |
142 | unsigned int alignment = 0; |
143 | ||
144 | if (is_watchpoint) | |
145 | alignment = AARCH64_HWP_ALIGNMENT; | |
146 | else | |
147 | { | |
148 | struct regcache *regcache | |
149 | = get_thread_regcache_for_ptid (current_lwp_ptid ()); | |
150 | ||
151 | /* Set alignment to 2 only if the current process is 32-bit, | |
152 | since thumb instruction can be 2-byte aligned. Otherwise, set | |
153 | alignment to AARCH64_HBP_ALIGNMENT. */ | |
154 | if (regcache_register_size (regcache, 0) == 8) | |
155 | alignment = AARCH64_HBP_ALIGNMENT; | |
156 | else | |
157 | alignment = 2; | |
158 | } | |
554717a3 YQ |
159 | |
160 | if (addr & (alignment - 1)) | |
161 | return 0; | |
162 | ||
a3b60e45 JK |
163 | if ((!kernel_supports_any_contiguous_range |
164 | && len != 8 && len != 4 && len != 2 && len != 1) | |
165 | || (kernel_supports_any_contiguous_range | |
166 | && (len < 1 || len > 8))) | |
554717a3 YQ |
167 | return 0; |
168 | ||
169 | return 1; | |
170 | } | |
171 | ||
172 | /* Given the (potentially unaligned) watchpoint address in ADDR and | |
a3b60e45 JK |
173 | length in LEN, return the aligned address, offset from that base |
174 | address, and aligned length in *ALIGNED_ADDR_P, *ALIGNED_OFFSET_P | |
175 | and *ALIGNED_LEN_P, respectively. The returned values will be | |
176 | valid values to write to the hardware watchpoint value and control | |
177 | registers. | |
554717a3 YQ |
178 | |
179 | The given watchpoint may get truncated if more than one hardware | |
180 | register is needed to cover the watched region. *NEXT_ADDR_P | |
181 | and *NEXT_LEN_P, if non-NULL, will return the address and length | |
182 | of the remaining part of the watchpoint (which can be processed | |
a3b60e45 JK |
183 | by calling this routine again to generate another aligned address, |
184 | offset and length tuple. | |
554717a3 YQ |
185 | |
186 | Essentially, unaligned watchpoint is achieved by minimally | |
187 | enlarging the watched area to meet the alignment requirement, and | |
188 | if necessary, splitting the watchpoint over several hardware | |
a3b60e45 JK |
189 | watchpoint registers. |
190 | ||
191 | On kernels that predate the support for Byte Address Select (BAS) | |
192 | in the hardware watchpoint control register, the offset from the | |
193 | base address is always zero, and so in that case the trade-off is | |
194 | that there will be false-positive hits for the read-type or the | |
195 | access-type hardware watchpoints; for the write type, which is more | |
196 | commonly used, there will be no such issues, as the higher-level | |
197 | breakpoint management in gdb always examines the exact watched | |
198 | region for any content change, and transparently resumes a thread | |
199 | from a watchpoint trap if there is no change to the watched region. | |
554717a3 YQ |
200 | |
201 | Another limitation is that because the watched region is enlarged, | |
a3b60e45 | 202 | the watchpoint fault address discovered by |
554717a3 YQ |
203 | aarch64_stopped_data_address may be outside of the original watched |
204 | region, especially when the triggering instruction is accessing a | |
205 | larger region. When the fault address is not within any known | |
206 | range, watchpoints_triggered in gdb will get confused, as the | |
207 | higher-level watchpoint management is only aware of original | |
208 | watched regions, and will think that some unknown watchpoint has | |
a3b60e45 JK |
209 | been triggered. To prevent such a case, |
210 | aarch64_stopped_data_address implementations in gdb and gdbserver | |
211 | try to match the trapped address with a watched region, and return | |
212 | an address within the latter. */ | |
554717a3 YQ |
213 | |
214 | static void | |
215 | aarch64_align_watchpoint (CORE_ADDR addr, int len, CORE_ADDR *aligned_addr_p, | |
a3b60e45 JK |
216 | int *aligned_offset_p, int *aligned_len_p, |
217 | CORE_ADDR *next_addr_p, int *next_len_p, | |
218 | CORE_ADDR *next_addr_orig_p) | |
554717a3 YQ |
219 | { |
220 | int aligned_len; | |
a3b60e45 | 221 | unsigned int offset, aligned_offset; |
554717a3 YQ |
222 | CORE_ADDR aligned_addr; |
223 | const unsigned int alignment = AARCH64_HWP_ALIGNMENT; | |
224 | const unsigned int max_wp_len = AARCH64_HWP_MAX_LEN_PER_REG; | |
225 | ||
226 | /* As assumed by the algorithm. */ | |
227 | gdb_assert (alignment == max_wp_len); | |
228 | ||
229 | if (len <= 0) | |
230 | return; | |
231 | ||
a3b60e45 JK |
232 | /* The address put into the hardware watchpoint value register must |
233 | be aligned. */ | |
554717a3 YQ |
234 | offset = addr & (alignment - 1); |
235 | aligned_addr = addr - offset; | |
a3b60e45 JK |
236 | aligned_offset |
237 | = kernel_supports_any_contiguous_range ? addr & (alignment - 1) : 0; | |
554717a3 YQ |
238 | |
239 | gdb_assert (offset >= 0 && offset < alignment); | |
240 | gdb_assert (aligned_addr >= 0 && aligned_addr <= addr); | |
241 | gdb_assert (offset + len > 0); | |
242 | ||
243 | if (offset + len >= max_wp_len) | |
244 | { | |
a3b60e45 | 245 | /* Need more than one watchpoint register; truncate at the |
554717a3 | 246 | alignment boundary. */ |
a3b60e45 JK |
247 | aligned_len |
248 | = max_wp_len - (kernel_supports_any_contiguous_range ? offset : 0); | |
554717a3 YQ |
249 | len -= (max_wp_len - offset); |
250 | addr += (max_wp_len - offset); | |
251 | gdb_assert ((addr & (alignment - 1)) == 0); | |
252 | } | |
253 | else | |
254 | { | |
255 | /* Find the smallest valid length that is large enough to | |
256 | accommodate this watchpoint. */ | |
257 | static const unsigned char | |
258 | aligned_len_array[AARCH64_HWP_MAX_LEN_PER_REG] = | |
259 | { 1, 2, 4, 4, 8, 8, 8, 8 }; | |
260 | ||
a3b60e45 JK |
261 | aligned_len = (kernel_supports_any_contiguous_range |
262 | ? len : aligned_len_array[offset + len - 1]); | |
554717a3 YQ |
263 | addr += len; |
264 | len = 0; | |
265 | } | |
266 | ||
267 | if (aligned_addr_p) | |
268 | *aligned_addr_p = aligned_addr; | |
a3b60e45 JK |
269 | if (aligned_offset_p) |
270 | *aligned_offset_p = aligned_offset; | |
554717a3 YQ |
271 | if (aligned_len_p) |
272 | *aligned_len_p = aligned_len; | |
273 | if (next_addr_p) | |
274 | *next_addr_p = addr; | |
275 | if (next_len_p) | |
276 | *next_len_p = len; | |
a3b60e45 JK |
277 | if (next_addr_orig_p) |
278 | *next_addr_orig_p = align_down (*next_addr_orig_p + alignment, alignment); | |
554717a3 YQ |
279 | } |
280 | ||
5e35436e YQ |
281 | struct aarch64_dr_update_callback_param |
282 | { | |
283 | int is_watchpoint; | |
284 | unsigned int idx; | |
285 | }; | |
286 | ||
287 | /* Callback for iterate_over_lwps. Records the | |
288 | information about the change of one hardware breakpoint/watchpoint | |
289 | setting for the thread LWP. | |
290 | The information is passed in via PTR. | |
291 | N.B. The actual updating of hardware debug registers is not | |
292 | carried out until the moment the thread is resumed. */ | |
293 | ||
294 | static int | |
295 | debug_reg_change_callback (struct lwp_info *lwp, void *ptr) | |
296 | { | |
297 | struct aarch64_dr_update_callback_param *param_p | |
298 | = (struct aarch64_dr_update_callback_param *) ptr; | |
dfe7f77c | 299 | int tid = ptid_get_lwp (ptid_of_lwp (lwp)); |
5e35436e YQ |
300 | int idx = param_p->idx; |
301 | int is_watchpoint = param_p->is_watchpoint; | |
302 | struct arch_lwp_info *info = lwp_arch_private_info (lwp); | |
303 | dr_changed_t *dr_changed_ptr; | |
304 | dr_changed_t dr_changed; | |
305 | ||
306 | if (info == NULL) | |
307 | { | |
308 | info = XCNEW (struct arch_lwp_info); | |
309 | lwp_set_arch_private_info (lwp, info); | |
310 | } | |
311 | ||
312 | if (show_debug_regs) | |
313 | { | |
314 | debug_printf ("debug_reg_change_callback: \n\tOn entry:\n"); | |
dfe7f77c YQ |
315 | debug_printf ("\ttid%d, dr_changed_bp=0x%s, " |
316 | "dr_changed_wp=0x%s\n", tid, | |
5e35436e YQ |
317 | phex (info->dr_changed_bp, 8), |
318 | phex (info->dr_changed_wp, 8)); | |
319 | } | |
320 | ||
321 | dr_changed_ptr = is_watchpoint ? &info->dr_changed_wp | |
322 | : &info->dr_changed_bp; | |
323 | dr_changed = *dr_changed_ptr; | |
324 | ||
325 | gdb_assert (idx >= 0 | |
326 | && (idx <= (is_watchpoint ? aarch64_num_wp_regs | |
327 | : aarch64_num_bp_regs))); | |
328 | ||
329 | /* The actual update is done later just before resuming the lwp, | |
330 | we just mark that one register pair needs updating. */ | |
331 | DR_MARK_N_CHANGED (dr_changed, idx); | |
332 | *dr_changed_ptr = dr_changed; | |
333 | ||
334 | /* If the lwp isn't stopped, force it to momentarily pause, so | |
335 | we can update its debug registers. */ | |
336 | if (!lwp_is_stopped (lwp)) | |
337 | linux_stop_lwp (lwp); | |
338 | ||
339 | if (show_debug_regs) | |
340 | { | |
dfe7f77c YQ |
341 | debug_printf ("\tOn exit:\n\ttid%d, dr_changed_bp=0x%s, " |
342 | "dr_changed_wp=0x%s\n", tid, | |
5e35436e YQ |
343 | phex (info->dr_changed_bp, 8), |
344 | phex (info->dr_changed_wp, 8)); | |
345 | } | |
346 | ||
347 | return 0; | |
348 | } | |
349 | ||
350 | /* Notify each thread that their IDXth breakpoint/watchpoint register | |
351 | pair needs to be updated. The message will be recorded in each | |
352 | thread's arch-specific data area, the actual updating will be done | |
353 | when the thread is resumed. */ | |
354 | ||
355 | static void | |
356 | aarch64_notify_debug_reg_change (const struct aarch64_debug_reg_state *state, | |
357 | int is_watchpoint, unsigned int idx) | |
358 | { | |
359 | struct aarch64_dr_update_callback_param param; | |
e99b03dc | 360 | ptid_t pid_ptid = ptid_t (current_lwp_ptid ().pid ()); |
5e35436e YQ |
361 | |
362 | param.is_watchpoint = is_watchpoint; | |
363 | param.idx = idx; | |
364 | ||
365 | iterate_over_lwps (pid_ptid, debug_reg_change_callback, (void *) ¶m); | |
366 | } | |
367 | ||
a3b60e45 JK |
368 | /* Reconfigure STATE to be compatible with Linux kernels with the PR |
369 | external/20207 bug. This is called when | |
370 | KERNEL_SUPPORTS_ANY_CONTIGUOUS_RANGE transitions to false. Note we | |
371 | don't try to support combining watchpoints with matching (and thus | |
372 | shared) masks, as it's too late when we get here. On buggy | |
373 | kernels, GDB will try to first setup the perfect matching ranges, | |
374 | which will run out of registers before this function can merge | |
375 | them. It doesn't look like worth the effort to improve that, given | |
376 | eventually buggy kernels will be phased out. */ | |
377 | ||
378 | static void | |
379 | aarch64_downgrade_regs (struct aarch64_debug_reg_state *state) | |
380 | { | |
381 | for (int i = 0; i < aarch64_num_wp_regs; ++i) | |
382 | if ((state->dr_ctrl_wp[i] & 1) != 0) | |
383 | { | |
384 | gdb_assert (state->dr_ref_count_wp[i] != 0); | |
385 | uint8_t mask_orig = (state->dr_ctrl_wp[i] >> 5) & 0xff; | |
386 | gdb_assert (mask_orig != 0); | |
387 | static const uint8_t old_valid[] = { 0x01, 0x03, 0x0f, 0xff }; | |
388 | uint8_t mask = 0; | |
389 | for (const uint8_t old_mask : old_valid) | |
390 | if (mask_orig <= old_mask) | |
391 | { | |
392 | mask = old_mask; | |
393 | break; | |
394 | } | |
395 | gdb_assert (mask != 0); | |
396 | ||
397 | /* No update needed for this watchpoint? */ | |
398 | if (mask == mask_orig) | |
399 | continue; | |
400 | state->dr_ctrl_wp[i] |= mask << 5; | |
401 | state->dr_addr_wp[i] | |
402 | = align_down (state->dr_addr_wp[i], AARCH64_HWP_ALIGNMENT); | |
403 | ||
404 | /* Try to match duplicate entries. */ | |
405 | for (int j = 0; j < i; ++j) | |
406 | if ((state->dr_ctrl_wp[j] & 1) != 0 | |
407 | && state->dr_addr_wp[j] == state->dr_addr_wp[i] | |
408 | && state->dr_addr_orig_wp[j] == state->dr_addr_orig_wp[i] | |
409 | && state->dr_ctrl_wp[j] == state->dr_ctrl_wp[i]) | |
410 | { | |
411 | state->dr_ref_count_wp[j] += state->dr_ref_count_wp[i]; | |
412 | state->dr_ref_count_wp[i] = 0; | |
413 | state->dr_addr_wp[i] = 0; | |
414 | state->dr_addr_orig_wp[i] = 0; | |
415 | state->dr_ctrl_wp[i] &= ~1; | |
416 | break; | |
417 | } | |
418 | ||
419 | aarch64_notify_debug_reg_change (state, 1 /* is_watchpoint */, i); | |
420 | } | |
421 | } | |
422 | ||
554717a3 YQ |
423 | /* Record the insertion of one breakpoint/watchpoint, as represented |
424 | by ADDR and CTRL, in the process' arch-specific data area *STATE. */ | |
425 | ||
426 | static int | |
427 | aarch64_dr_state_insert_one_point (struct aarch64_debug_reg_state *state, | |
428 | enum target_hw_bp_type type, | |
a3b60e45 JK |
429 | CORE_ADDR addr, int offset, int len, |
430 | CORE_ADDR addr_orig) | |
554717a3 YQ |
431 | { |
432 | int i, idx, num_regs, is_watchpoint; | |
433 | unsigned int ctrl, *dr_ctrl_p, *dr_ref_count; | |
a3b60e45 | 434 | CORE_ADDR *dr_addr_p, *dr_addr_orig_p; |
554717a3 YQ |
435 | |
436 | /* Set up state pointers. */ | |
437 | is_watchpoint = (type != hw_execute); | |
438 | gdb_assert (aarch64_point_is_aligned (is_watchpoint, addr, len)); | |
439 | if (is_watchpoint) | |
440 | { | |
441 | num_regs = aarch64_num_wp_regs; | |
442 | dr_addr_p = state->dr_addr_wp; | |
a3b60e45 | 443 | dr_addr_orig_p = state->dr_addr_orig_wp; |
554717a3 YQ |
444 | dr_ctrl_p = state->dr_ctrl_wp; |
445 | dr_ref_count = state->dr_ref_count_wp; | |
446 | } | |
447 | else | |
448 | { | |
449 | num_regs = aarch64_num_bp_regs; | |
450 | dr_addr_p = state->dr_addr_bp; | |
a3b60e45 | 451 | dr_addr_orig_p = nullptr; |
554717a3 YQ |
452 | dr_ctrl_p = state->dr_ctrl_bp; |
453 | dr_ref_count = state->dr_ref_count_bp; | |
454 | } | |
455 | ||
a3b60e45 | 456 | ctrl = aarch64_point_encode_ctrl_reg (type, offset, len); |
554717a3 YQ |
457 | |
458 | /* Find an existing or free register in our cache. */ | |
459 | idx = -1; | |
460 | for (i = 0; i < num_regs; ++i) | |
461 | { | |
462 | if ((dr_ctrl_p[i] & 1) == 0) | |
463 | { | |
464 | gdb_assert (dr_ref_count[i] == 0); | |
465 | idx = i; | |
466 | /* no break; continue hunting for an exising one. */ | |
467 | } | |
a3b60e45 JK |
468 | else if (dr_addr_p[i] == addr |
469 | && (dr_addr_orig_p == nullptr || dr_addr_orig_p[i] == addr_orig) | |
470 | && dr_ctrl_p[i] == ctrl) | |
554717a3 YQ |
471 | { |
472 | gdb_assert (dr_ref_count[i] != 0); | |
473 | idx = i; | |
474 | break; | |
475 | } | |
476 | } | |
477 | ||
478 | /* No space. */ | |
479 | if (idx == -1) | |
480 | return -1; | |
481 | ||
482 | /* Update our cache. */ | |
483 | if ((dr_ctrl_p[idx] & 1) == 0) | |
484 | { | |
485 | /* new entry */ | |
486 | dr_addr_p[idx] = addr; | |
a3b60e45 JK |
487 | if (dr_addr_orig_p != nullptr) |
488 | dr_addr_orig_p[idx] = addr_orig; | |
554717a3 YQ |
489 | dr_ctrl_p[idx] = ctrl; |
490 | dr_ref_count[idx] = 1; | |
491 | /* Notify the change. */ | |
492 | aarch64_notify_debug_reg_change (state, is_watchpoint, idx); | |
493 | } | |
494 | else | |
495 | { | |
496 | /* existing entry */ | |
497 | dr_ref_count[idx]++; | |
498 | } | |
499 | ||
500 | return 0; | |
501 | } | |
502 | ||
503 | /* Record the removal of one breakpoint/watchpoint, as represented by | |
504 | ADDR and CTRL, in the process' arch-specific data area *STATE. */ | |
505 | ||
506 | static int | |
507 | aarch64_dr_state_remove_one_point (struct aarch64_debug_reg_state *state, | |
508 | enum target_hw_bp_type type, | |
a3b60e45 JK |
509 | CORE_ADDR addr, int offset, int len, |
510 | CORE_ADDR addr_orig) | |
554717a3 YQ |
511 | { |
512 | int i, num_regs, is_watchpoint; | |
513 | unsigned int ctrl, *dr_ctrl_p, *dr_ref_count; | |
a3b60e45 | 514 | CORE_ADDR *dr_addr_p, *dr_addr_orig_p; |
554717a3 YQ |
515 | |
516 | /* Set up state pointers. */ | |
517 | is_watchpoint = (type != hw_execute); | |
554717a3 YQ |
518 | if (is_watchpoint) |
519 | { | |
520 | num_regs = aarch64_num_wp_regs; | |
521 | dr_addr_p = state->dr_addr_wp; | |
a3b60e45 | 522 | dr_addr_orig_p = state->dr_addr_orig_wp; |
554717a3 YQ |
523 | dr_ctrl_p = state->dr_ctrl_wp; |
524 | dr_ref_count = state->dr_ref_count_wp; | |
525 | } | |
526 | else | |
527 | { | |
528 | num_regs = aarch64_num_bp_regs; | |
529 | dr_addr_p = state->dr_addr_bp; | |
a3b60e45 | 530 | dr_addr_orig_p = nullptr; |
554717a3 YQ |
531 | dr_ctrl_p = state->dr_ctrl_bp; |
532 | dr_ref_count = state->dr_ref_count_bp; | |
533 | } | |
534 | ||
a3b60e45 | 535 | ctrl = aarch64_point_encode_ctrl_reg (type, offset, len); |
554717a3 YQ |
536 | |
537 | /* Find the entry that matches the ADDR and CTRL. */ | |
538 | for (i = 0; i < num_regs; ++i) | |
a3b60e45 JK |
539 | if (dr_addr_p[i] == addr |
540 | && (dr_addr_orig_p == nullptr || dr_addr_orig_p[i] == addr_orig) | |
541 | && dr_ctrl_p[i] == ctrl) | |
554717a3 YQ |
542 | { |
543 | gdb_assert (dr_ref_count[i] != 0); | |
544 | break; | |
545 | } | |
546 | ||
547 | /* Not found. */ | |
548 | if (i == num_regs) | |
549 | return -1; | |
550 | ||
551 | /* Clear our cache. */ | |
552 | if (--dr_ref_count[i] == 0) | |
553 | { | |
554 | /* Clear the enable bit. */ | |
555 | ctrl &= ~1; | |
556 | dr_addr_p[i] = 0; | |
a3b60e45 JK |
557 | if (dr_addr_orig_p != nullptr) |
558 | dr_addr_orig_p[i] = 0; | |
554717a3 YQ |
559 | dr_ctrl_p[i] = ctrl; |
560 | /* Notify the change. */ | |
561 | aarch64_notify_debug_reg_change (state, is_watchpoint, i); | |
562 | } | |
563 | ||
564 | return 0; | |
565 | } | |
566 | ||
567 | int | |
568 | aarch64_handle_breakpoint (enum target_hw_bp_type type, CORE_ADDR addr, | |
569 | int len, int is_insert, | |
570 | struct aarch64_debug_reg_state *state) | |
571 | { | |
554717a3 | 572 | if (is_insert) |
805035d7 YQ |
573 | { |
574 | /* The hardware breakpoint on AArch64 should always be 4-byte | |
575 | aligned, but on AArch32, it can be 2-byte aligned. Note that | |
576 | we only check the alignment on inserting breakpoint because | |
577 | aarch64_point_is_aligned needs the inferior_ptid inferior's | |
578 | regcache to decide whether the inferior is 32-bit or 64-bit. | |
579 | However when GDB follows the parent process and detach breakpoints | |
580 | from child process, inferior_ptid is the child ptid, but the | |
581 | child inferior doesn't exist in GDB's view yet. */ | |
582 | if (!aarch64_point_is_aligned (0 /* is_watchpoint */ , addr, len)) | |
583 | return -1; | |
584 | ||
a3b60e45 | 585 | return aarch64_dr_state_insert_one_point (state, type, addr, 0, len, -1); |
805035d7 | 586 | } |
554717a3 | 587 | else |
a3b60e45 | 588 | return aarch64_dr_state_remove_one_point (state, type, addr, 0, len, -1); |
554717a3 YQ |
589 | } |
590 | ||
591 | /* This is essentially the same as aarch64_handle_breakpoint, apart | |
592 | from that it is an aligned watchpoint to be handled. */ | |
593 | ||
594 | static int | |
595 | aarch64_handle_aligned_watchpoint (enum target_hw_bp_type type, | |
596 | CORE_ADDR addr, int len, int is_insert, | |
597 | struct aarch64_debug_reg_state *state) | |
598 | { | |
599 | if (is_insert) | |
a3b60e45 | 600 | return aarch64_dr_state_insert_one_point (state, type, addr, 0, len, addr); |
554717a3 | 601 | else |
a3b60e45 | 602 | return aarch64_dr_state_remove_one_point (state, type, addr, 0, len, addr); |
554717a3 YQ |
603 | } |
604 | ||
605 | /* Insert/remove unaligned watchpoint by calling | |
606 | aarch64_align_watchpoint repeatedly until the whole watched region, | |
607 | as represented by ADDR and LEN, has been properly aligned and ready | |
608 | to be written to one or more hardware watchpoint registers. | |
609 | IS_INSERT indicates whether this is an insertion or a deletion. | |
610 | Return 0 if succeed. */ | |
611 | ||
612 | static int | |
613 | aarch64_handle_unaligned_watchpoint (enum target_hw_bp_type type, | |
614 | CORE_ADDR addr, int len, int is_insert, | |
615 | struct aarch64_debug_reg_state *state) | |
616 | { | |
a3b60e45 JK |
617 | CORE_ADDR addr_orig = addr; |
618 | ||
554717a3 YQ |
619 | while (len > 0) |
620 | { | |
621 | CORE_ADDR aligned_addr; | |
a3b60e45 JK |
622 | int aligned_offset, aligned_len, ret; |
623 | CORE_ADDR addr_orig_next = addr_orig; | |
554717a3 | 624 | |
a3b60e45 JK |
625 | aarch64_align_watchpoint (addr, len, &aligned_addr, &aligned_offset, |
626 | &aligned_len, &addr, &len, &addr_orig_next); | |
554717a3 YQ |
627 | |
628 | if (is_insert) | |
629 | ret = aarch64_dr_state_insert_one_point (state, type, aligned_addr, | |
a3b60e45 JK |
630 | aligned_offset, |
631 | aligned_len, addr_orig); | |
554717a3 YQ |
632 | else |
633 | ret = aarch64_dr_state_remove_one_point (state, type, aligned_addr, | |
a3b60e45 JK |
634 | aligned_offset, |
635 | aligned_len, addr_orig); | |
554717a3 YQ |
636 | |
637 | if (show_debug_regs) | |
3675a06a YQ |
638 | debug_printf ("handle_unaligned_watchpoint: is_insert: %d\n" |
639 | " " | |
640 | "aligned_addr: %s, aligned_len: %d\n" | |
641 | " " | |
a3b60e45 JK |
642 | "addr_orig: %s\n" |
643 | " " | |
644 | "next_addr: %s, next_len: %d\n" | |
645 | " " | |
646 | "addr_orig_next: %s\n", | |
3675a06a | 647 | is_insert, core_addr_to_string_nz (aligned_addr), |
a3b60e45 JK |
648 | aligned_len, core_addr_to_string_nz (addr_orig), |
649 | core_addr_to_string_nz (addr), len, | |
650 | core_addr_to_string_nz (addr_orig_next)); | |
651 | ||
652 | addr_orig = addr_orig_next; | |
554717a3 YQ |
653 | |
654 | if (ret != 0) | |
655 | return ret; | |
656 | } | |
657 | ||
658 | return 0; | |
659 | } | |
660 | ||
661 | int | |
662 | aarch64_handle_watchpoint (enum target_hw_bp_type type, CORE_ADDR addr, | |
663 | int len, int is_insert, | |
664 | struct aarch64_debug_reg_state *state) | |
665 | { | |
666 | if (aarch64_point_is_aligned (1 /* is_watchpoint */ , addr, len)) | |
667 | return aarch64_handle_aligned_watchpoint (type, addr, len, is_insert, | |
668 | state); | |
669 | else | |
670 | return aarch64_handle_unaligned_watchpoint (type, addr, len, is_insert, | |
671 | state); | |
672 | } | |
673 | ||
674 | /* Call ptrace to set the thread TID's hardware breakpoint/watchpoint | |
675 | registers with data from *STATE. */ | |
676 | ||
677 | void | |
a3b60e45 | 678 | aarch64_linux_set_debug_regs (struct aarch64_debug_reg_state *state, |
554717a3 YQ |
679 | int tid, int watchpoint) |
680 | { | |
681 | int i, count; | |
682 | struct iovec iov; | |
683 | struct user_hwdebug_state regs; | |
684 | const CORE_ADDR *addr; | |
685 | const unsigned int *ctrl; | |
686 | ||
687 | memset (®s, 0, sizeof (regs)); | |
688 | iov.iov_base = ®s; | |
689 | count = watchpoint ? aarch64_num_wp_regs : aarch64_num_bp_regs; | |
690 | addr = watchpoint ? state->dr_addr_wp : state->dr_addr_bp; | |
691 | ctrl = watchpoint ? state->dr_ctrl_wp : state->dr_ctrl_bp; | |
692 | if (count == 0) | |
693 | return; | |
bb82e934 SM |
694 | iov.iov_len = (offsetof (struct user_hwdebug_state, dbg_regs) |
695 | + count * sizeof (regs.dbg_regs[0])); | |
554717a3 YQ |
696 | |
697 | for (i = 0; i < count; i++) | |
698 | { | |
699 | regs.dbg_regs[i].addr = addr[i]; | |
700 | regs.dbg_regs[i].ctrl = ctrl[i]; | |
701 | } | |
702 | ||
703 | if (ptrace (PTRACE_SETREGSET, tid, | |
704 | watchpoint ? NT_ARM_HW_WATCH : NT_ARM_HW_BREAK, | |
705 | (void *) &iov)) | |
a3b60e45 JK |
706 | { |
707 | /* Handle Linux kernels with the PR external/20207 bug. */ | |
708 | if (watchpoint && errno == EINVAL | |
709 | && kernel_supports_any_contiguous_range) | |
710 | { | |
711 | kernel_supports_any_contiguous_range = false; | |
712 | aarch64_downgrade_regs (state); | |
713 | aarch64_linux_set_debug_regs (state, tid, watchpoint); | |
714 | return; | |
715 | } | |
716 | error (_("Unexpected error setting hardware debug registers")); | |
717 | } | |
554717a3 YQ |
718 | } |
719 | ||
720 | /* Print the values of the cached breakpoint/watchpoint registers. */ | |
721 | ||
722 | void | |
723 | aarch64_show_debug_reg_state (struct aarch64_debug_reg_state *state, | |
724 | const char *func, CORE_ADDR addr, | |
725 | int len, enum target_hw_bp_type type) | |
726 | { | |
727 | int i; | |
728 | ||
729 | debug_printf ("%s", func); | |
730 | if (addr || len) | |
731 | debug_printf (" (addr=0x%08lx, len=%d, type=%s)", | |
732 | (unsigned long) addr, len, | |
733 | type == hw_write ? "hw-write-watchpoint" | |
734 | : (type == hw_read ? "hw-read-watchpoint" | |
735 | : (type == hw_access ? "hw-access-watchpoint" | |
736 | : (type == hw_execute ? "hw-breakpoint" | |
737 | : "??unknown??")))); | |
738 | debug_printf (":\n"); | |
739 | ||
740 | debug_printf ("\tBREAKPOINTs:\n"); | |
741 | for (i = 0; i < aarch64_num_bp_regs; i++) | |
742 | debug_printf ("\tBP%d: addr=%s, ctrl=0x%08x, ref.count=%d\n", | |
743 | i, core_addr_to_string_nz (state->dr_addr_bp[i]), | |
744 | state->dr_ctrl_bp[i], state->dr_ref_count_bp[i]); | |
745 | ||
746 | debug_printf ("\tWATCHPOINTs:\n"); | |
747 | for (i = 0; i < aarch64_num_wp_regs; i++) | |
a3b60e45 | 748 | debug_printf ("\tWP%d: addr=%s (orig=%s), ctrl=0x%08x, ref.count=%d\n", |
554717a3 | 749 | i, core_addr_to_string_nz (state->dr_addr_wp[i]), |
a3b60e45 | 750 | core_addr_to_string_nz (state->dr_addr_orig_wp[i]), |
554717a3 YQ |
751 | state->dr_ctrl_wp[i], state->dr_ref_count_wp[i]); |
752 | } | |
af1b22f3 YQ |
753 | |
754 | /* Get the hardware debug register capacity information from the | |
755 | process represented by TID. */ | |
756 | ||
757 | void | |
758 | aarch64_linux_get_debug_reg_capacity (int tid) | |
759 | { | |
760 | struct iovec iov; | |
761 | struct user_hwdebug_state dreg_state; | |
762 | ||
763 | iov.iov_base = &dreg_state; | |
764 | iov.iov_len = sizeof (dreg_state); | |
765 | ||
766 | /* Get hardware watchpoint register info. */ | |
767 | if (ptrace (PTRACE_GETREGSET, tid, NT_ARM_HW_WATCH, &iov) == 0 | |
49ecef2a AP |
768 | && (AARCH64_DEBUG_ARCH (dreg_state.dbg_info) == AARCH64_DEBUG_ARCH_V8 |
769 | || AARCH64_DEBUG_ARCH (dreg_state.dbg_info) == AARCH64_DEBUG_ARCH_V8_1 | |
770 | || AARCH64_DEBUG_ARCH (dreg_state.dbg_info) == AARCH64_DEBUG_ARCH_V8_2)) | |
af1b22f3 YQ |
771 | { |
772 | aarch64_num_wp_regs = AARCH64_DEBUG_NUM_SLOTS (dreg_state.dbg_info); | |
773 | if (aarch64_num_wp_regs > AARCH64_HWP_MAX_NUM) | |
774 | { | |
775 | warning (_("Unexpected number of hardware watchpoint registers" | |
776 | " reported by ptrace, got %d, expected %d."), | |
777 | aarch64_num_wp_regs, AARCH64_HWP_MAX_NUM); | |
778 | aarch64_num_wp_regs = AARCH64_HWP_MAX_NUM; | |
779 | } | |
780 | } | |
781 | else | |
782 | { | |
783 | warning (_("Unable to determine the number of hardware watchpoints" | |
784 | " available.")); | |
785 | aarch64_num_wp_regs = 0; | |
786 | } | |
787 | ||
788 | /* Get hardware breakpoint register info. */ | |
789 | if (ptrace (PTRACE_GETREGSET, tid, NT_ARM_HW_BREAK, &iov) == 0 | |
49ecef2a AP |
790 | && (AARCH64_DEBUG_ARCH (dreg_state.dbg_info) == AARCH64_DEBUG_ARCH_V8 |
791 | || AARCH64_DEBUG_ARCH (dreg_state.dbg_info) == AARCH64_DEBUG_ARCH_V8_1 | |
792 | || AARCH64_DEBUG_ARCH (dreg_state.dbg_info) == AARCH64_DEBUG_ARCH_V8_2)) | |
af1b22f3 YQ |
793 | { |
794 | aarch64_num_bp_regs = AARCH64_DEBUG_NUM_SLOTS (dreg_state.dbg_info); | |
795 | if (aarch64_num_bp_regs > AARCH64_HBP_MAX_NUM) | |
796 | { | |
797 | warning (_("Unexpected number of hardware breakpoint registers" | |
798 | " reported by ptrace, got %d, expected %d."), | |
799 | aarch64_num_bp_regs, AARCH64_HBP_MAX_NUM); | |
800 | aarch64_num_bp_regs = AARCH64_HBP_MAX_NUM; | |
801 | } | |
802 | } | |
803 | else | |
804 | { | |
805 | warning (_("Unable to determine the number of hardware breakpoints" | |
806 | " available.")); | |
807 | aarch64_num_bp_regs = 0; | |
808 | } | |
809 | } | |
39edd165 YQ |
810 | |
811 | /* Return true if we can watch a memory region that starts address | |
812 | ADDR and whose length is LEN in bytes. */ | |
813 | ||
814 | int | |
815 | aarch64_linux_region_ok_for_watchpoint (CORE_ADDR addr, int len) | |
816 | { | |
817 | CORE_ADDR aligned_addr; | |
818 | ||
819 | /* Can not set watchpoints for zero or negative lengths. */ | |
820 | if (len <= 0) | |
821 | return 0; | |
822 | ||
823 | /* Must have hardware watchpoint debug register(s). */ | |
824 | if (aarch64_num_wp_regs == 0) | |
825 | return 0; | |
826 | ||
827 | /* We support unaligned watchpoint address and arbitrary length, | |
828 | as long as the size of the whole watched area after alignment | |
829 | doesn't exceed size of the total area that all watchpoint debug | |
830 | registers can watch cooperatively. | |
831 | ||
832 | This is a very relaxed rule, but unfortunately there are | |
833 | limitations, e.g. false-positive hits, due to limited support of | |
834 | hardware debug registers in the kernel. See comment above | |
835 | aarch64_align_watchpoint for more information. */ | |
836 | ||
837 | aligned_addr = addr & ~(AARCH64_HWP_MAX_LEN_PER_REG - 1); | |
838 | if (aligned_addr + aarch64_num_wp_regs * AARCH64_HWP_MAX_LEN_PER_REG | |
839 | < addr + len) | |
840 | return 0; | |
841 | ||
842 | /* All tests passed so we are likely to be able to set the watchpoint. | |
843 | The reason that it is 'likely' rather than 'must' is because | |
844 | we don't check the current usage of the watchpoint registers, and | |
845 | there may not be enough registers available for this watchpoint. | |
846 | Ideally we should check the cached debug register state, however | |
847 | the checking is costly. */ | |
848 | return 1; | |
849 | } |