]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/nat/aarch64-linux-hw-point.c
Automatic Copyright Year update after running gdb/copyright.py
[thirdparty/binutils-gdb.git] / gdb / nat / aarch64-linux-hw-point.c
1 /* Copyright (C) 2009-2022 Free Software Foundation, Inc.
2 Contributed by ARM Ltd.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "gdbsupport/common-defs.h"
20 #include "gdbsupport/break-common.h"
21 #include "gdbsupport/common-regcache.h"
22 #include "nat/linux-nat.h"
23 #include "aarch64-linux-hw-point.h"
24
25 #include <sys/uio.h>
26
27 /* The order in which <sys/ptrace.h> and <asm/ptrace.h> are included
28 can be important. <sys/ptrace.h> often declares various PTRACE_*
29 enums. <asm/ptrace.h> often defines preprocessor constants for
30 these very same symbols. When that's the case, build errors will
31 result when <asm/ptrace.h> is included before <sys/ptrace.h>. */
32 #include <sys/ptrace.h>
33 #include <asm/ptrace.h>
34
35 #include <elf.h>
36
37 /* Number of hardware breakpoints/watchpoints the target supports.
38 They are initialized with values obtained via the ptrace calls
39 with NT_ARM_HW_BREAK and NT_ARM_HW_WATCH respectively. */
40
41 int aarch64_num_bp_regs;
42 int aarch64_num_wp_regs;
43
44 /* True if this kernel does not have the bug described by PR
45 external/20207 (Linux >= 4.10). A fixed kernel supports any
46 contiguous range of bits in 8-bit byte DR_CONTROL_MASK. A buggy
47 kernel supports only 0x01, 0x03, 0x0f and 0xff. We start by
48 assuming the bug is fixed, and then detect the bug at
49 PTRACE_SETREGSET time. */
50 static bool kernel_supports_any_contiguous_range = true;
51
52 /* Return starting byte 0..7 incl. of a watchpoint encoded by CTRL. */
53
54 unsigned int
55 aarch64_watchpoint_offset (unsigned int ctrl)
56 {
57 uint8_t mask = DR_CONTROL_MASK (ctrl);
58 unsigned retval;
59
60 /* Shift out bottom zeros. */
61 for (retval = 0; mask && (mask & 1) == 0; ++retval)
62 mask >>= 1;
63
64 return retval;
65 }
66
67 /* Utility function that returns the length in bytes of a watchpoint
68 according to the content of a hardware debug control register CTRL.
69 Any contiguous range of bytes in CTRL is supported. The returned
70 value can be between 0..8 (inclusive). */
71
72 unsigned int
73 aarch64_watchpoint_length (unsigned int ctrl)
74 {
75 uint8_t mask = DR_CONTROL_MASK (ctrl);
76 unsigned retval;
77
78 /* Shift out bottom zeros. */
79 mask >>= aarch64_watchpoint_offset (ctrl);
80
81 /* Count bottom ones. */
82 for (retval = 0; (mask & 1) != 0; ++retval)
83 mask >>= 1;
84
85 if (mask != 0)
86 error (_("Unexpected hardware watchpoint length register value 0x%x"),
87 DR_CONTROL_MASK (ctrl));
88
89 return retval;
90 }
91
92 /* Given the hardware breakpoint or watchpoint type TYPE and its
93 length LEN, return the expected encoding for a hardware
94 breakpoint/watchpoint control register. */
95
96 static unsigned int
97 aarch64_point_encode_ctrl_reg (enum target_hw_bp_type type, int offset, int len)
98 {
99 unsigned int ctrl, ttype;
100
101 gdb_assert (offset == 0 || kernel_supports_any_contiguous_range);
102 gdb_assert (offset + len <= AARCH64_HWP_MAX_LEN_PER_REG);
103
104 /* type */
105 switch (type)
106 {
107 case hw_write:
108 ttype = 2;
109 break;
110 case hw_read:
111 ttype = 1;
112 break;
113 case hw_access:
114 ttype = 3;
115 break;
116 case hw_execute:
117 ttype = 0;
118 break;
119 default:
120 perror_with_name (_("Unrecognized breakpoint/watchpoint type"));
121 }
122
123 ctrl = ttype << 3;
124
125 /* offset and length bitmask */
126 ctrl |= ((1 << len) - 1) << (5 + offset);
127 /* enabled at el0 */
128 ctrl |= (2 << 1) | 1;
129
130 return ctrl;
131 }
132
133 /* Addresses to be written to the hardware breakpoint and watchpoint
134 value registers need to be aligned; the alignment is 4-byte and
135 8-type respectively. Linux kernel rejects any non-aligned address
136 it receives from the related ptrace call. Furthermore, the kernel
137 currently only supports the following Byte Address Select (BAS)
138 values: 0x1, 0x3, 0xf and 0xff, which means that for a hardware
139 watchpoint to be accepted by the kernel (via ptrace call), its
140 valid length can only be 1 byte, 2 bytes, 4 bytes or 8 bytes.
141 Despite these limitations, the unaligned watchpoint is supported in
142 this port.
143
144 Return 0 for any non-compliant ADDR and/or LEN; return 1 otherwise. */
145
146 static int
147 aarch64_point_is_aligned (int is_watchpoint, CORE_ADDR addr, int len)
148 {
149 unsigned int alignment = 0;
150
151 if (is_watchpoint)
152 alignment = AARCH64_HWP_ALIGNMENT;
153 else
154 {
155 struct regcache *regcache
156 = get_thread_regcache_for_ptid (current_lwp_ptid ());
157
158 /* Set alignment to 2 only if the current process is 32-bit,
159 since thumb instruction can be 2-byte aligned. Otherwise, set
160 alignment to AARCH64_HBP_ALIGNMENT. */
161 if (regcache_register_size (regcache, 0) == 8)
162 alignment = AARCH64_HBP_ALIGNMENT;
163 else
164 alignment = 2;
165 }
166
167 if (addr & (alignment - 1))
168 return 0;
169
170 if ((!kernel_supports_any_contiguous_range
171 && len != 8 && len != 4 && len != 2 && len != 1)
172 || (kernel_supports_any_contiguous_range
173 && (len < 1 || len > 8)))
174 return 0;
175
176 return 1;
177 }
178
179 /* Given the (potentially unaligned) watchpoint address in ADDR and
180 length in LEN, return the aligned address, offset from that base
181 address, and aligned length in *ALIGNED_ADDR_P, *ALIGNED_OFFSET_P
182 and *ALIGNED_LEN_P, respectively. The returned values will be
183 valid values to write to the hardware watchpoint value and control
184 registers.
185
186 The given watchpoint may get truncated if more than one hardware
187 register is needed to cover the watched region. *NEXT_ADDR_P
188 and *NEXT_LEN_P, if non-NULL, will return the address and length
189 of the remaining part of the watchpoint (which can be processed
190 by calling this routine again to generate another aligned address,
191 offset and length tuple.
192
193 Essentially, unaligned watchpoint is achieved by minimally
194 enlarging the watched area to meet the alignment requirement, and
195 if necessary, splitting the watchpoint over several hardware
196 watchpoint registers.
197
198 On kernels that predate the support for Byte Address Select (BAS)
199 in the hardware watchpoint control register, the offset from the
200 base address is always zero, and so in that case the trade-off is
201 that there will be false-positive hits for the read-type or the
202 access-type hardware watchpoints; for the write type, which is more
203 commonly used, there will be no such issues, as the higher-level
204 breakpoint management in gdb always examines the exact watched
205 region for any content change, and transparently resumes a thread
206 from a watchpoint trap if there is no change to the watched region.
207
208 Another limitation is that because the watched region is enlarged,
209 the watchpoint fault address discovered by
210 aarch64_stopped_data_address may be outside of the original watched
211 region, especially when the triggering instruction is accessing a
212 larger region. When the fault address is not within any known
213 range, watchpoints_triggered in gdb will get confused, as the
214 higher-level watchpoint management is only aware of original
215 watched regions, and will think that some unknown watchpoint has
216 been triggered. To prevent such a case,
217 aarch64_stopped_data_address implementations in gdb and gdbserver
218 try to match the trapped address with a watched region, and return
219 an address within the latter. */
220
221 static void
222 aarch64_align_watchpoint (CORE_ADDR addr, int len, CORE_ADDR *aligned_addr_p,
223 int *aligned_offset_p, int *aligned_len_p,
224 CORE_ADDR *next_addr_p, int *next_len_p,
225 CORE_ADDR *next_addr_orig_p)
226 {
227 int aligned_len;
228 unsigned int offset, aligned_offset;
229 CORE_ADDR aligned_addr;
230 const unsigned int alignment = AARCH64_HWP_ALIGNMENT;
231 const unsigned int max_wp_len = AARCH64_HWP_MAX_LEN_PER_REG;
232
233 /* As assumed by the algorithm. */
234 gdb_assert (alignment == max_wp_len);
235
236 if (len <= 0)
237 return;
238
239 /* The address put into the hardware watchpoint value register must
240 be aligned. */
241 offset = addr & (alignment - 1);
242 aligned_addr = addr - offset;
243 aligned_offset
244 = kernel_supports_any_contiguous_range ? addr & (alignment - 1) : 0;
245
246 gdb_assert (offset >= 0 && offset < alignment);
247 gdb_assert (aligned_addr >= 0 && aligned_addr <= addr);
248 gdb_assert (offset + len > 0);
249
250 if (offset + len >= max_wp_len)
251 {
252 /* Need more than one watchpoint register; truncate at the
253 alignment boundary. */
254 aligned_len
255 = max_wp_len - (kernel_supports_any_contiguous_range ? offset : 0);
256 len -= (max_wp_len - offset);
257 addr += (max_wp_len - offset);
258 gdb_assert ((addr & (alignment - 1)) == 0);
259 }
260 else
261 {
262 /* Find the smallest valid length that is large enough to
263 accommodate this watchpoint. */
264 static const unsigned char
265 aligned_len_array[AARCH64_HWP_MAX_LEN_PER_REG] =
266 { 1, 2, 4, 4, 8, 8, 8, 8 };
267
268 aligned_len = (kernel_supports_any_contiguous_range
269 ? len : aligned_len_array[offset + len - 1]);
270 addr += len;
271 len = 0;
272 }
273
274 if (aligned_addr_p)
275 *aligned_addr_p = aligned_addr;
276 if (aligned_offset_p)
277 *aligned_offset_p = aligned_offset;
278 if (aligned_len_p)
279 *aligned_len_p = aligned_len;
280 if (next_addr_p)
281 *next_addr_p = addr;
282 if (next_len_p)
283 *next_len_p = len;
284 if (next_addr_orig_p)
285 *next_addr_orig_p = align_down (*next_addr_orig_p + alignment, alignment);
286 }
287
288 /* Helper for aarch64_notify_debug_reg_change. Records the
289 information about the change of one hardware breakpoint/watchpoint
290 setting for the thread LWP.
291 N.B. The actual updating of hardware debug registers is not
292 carried out until the moment the thread is resumed. */
293
294 static int
295 debug_reg_change_callback (struct lwp_info *lwp, int is_watchpoint,
296 unsigned int idx)
297 {
298 int tid = ptid_of_lwp (lwp).lwp ();
299 struct arch_lwp_info *info = lwp_arch_private_info (lwp);
300 dr_changed_t *dr_changed_ptr;
301 dr_changed_t dr_changed;
302
303 if (info == NULL)
304 {
305 info = XCNEW (struct arch_lwp_info);
306 lwp_set_arch_private_info (lwp, info);
307 }
308
309 if (show_debug_regs)
310 {
311 debug_printf ("debug_reg_change_callback: \n\tOn entry:\n");
312 debug_printf ("\ttid%d, dr_changed_bp=0x%s, "
313 "dr_changed_wp=0x%s\n", tid,
314 phex (info->dr_changed_bp, 8),
315 phex (info->dr_changed_wp, 8));
316 }
317
318 dr_changed_ptr = is_watchpoint ? &info->dr_changed_wp
319 : &info->dr_changed_bp;
320 dr_changed = *dr_changed_ptr;
321
322 gdb_assert (idx >= 0
323 && (idx <= (is_watchpoint ? aarch64_num_wp_regs
324 : aarch64_num_bp_regs)));
325
326 /* The actual update is done later just before resuming the lwp,
327 we just mark that one register pair needs updating. */
328 DR_MARK_N_CHANGED (dr_changed, idx);
329 *dr_changed_ptr = dr_changed;
330
331 /* If the lwp isn't stopped, force it to momentarily pause, so
332 we can update its debug registers. */
333 if (!lwp_is_stopped (lwp))
334 linux_stop_lwp (lwp);
335
336 if (show_debug_regs)
337 {
338 debug_printf ("\tOn exit:\n\ttid%d, dr_changed_bp=0x%s, "
339 "dr_changed_wp=0x%s\n", tid,
340 phex (info->dr_changed_bp, 8),
341 phex (info->dr_changed_wp, 8));
342 }
343
344 return 0;
345 }
346
347 /* Notify each thread that their IDXth breakpoint/watchpoint register
348 pair needs to be updated. The message will be recorded in each
349 thread's arch-specific data area, the actual updating will be done
350 when the thread is resumed. */
351
352 static void
353 aarch64_notify_debug_reg_change (const struct aarch64_debug_reg_state *state,
354 int is_watchpoint, unsigned int idx)
355 {
356 ptid_t pid_ptid = ptid_t (current_lwp_ptid ().pid ());
357
358 iterate_over_lwps (pid_ptid, [=] (struct lwp_info *info)
359 {
360 return debug_reg_change_callback (info,
361 is_watchpoint,
362 idx);
363 });
364 }
365
366 /* Reconfigure STATE to be compatible with Linux kernels with the PR
367 external/20207 bug. This is called when
368 KERNEL_SUPPORTS_ANY_CONTIGUOUS_RANGE transitions to false. Note we
369 don't try to support combining watchpoints with matching (and thus
370 shared) masks, as it's too late when we get here. On buggy
371 kernels, GDB will try to first setup the perfect matching ranges,
372 which will run out of registers before this function can merge
373 them. It doesn't look like worth the effort to improve that, given
374 eventually buggy kernels will be phased out. */
375
376 static void
377 aarch64_downgrade_regs (struct aarch64_debug_reg_state *state)
378 {
379 for (int i = 0; i < aarch64_num_wp_regs; ++i)
380 if ((state->dr_ctrl_wp[i] & 1) != 0)
381 {
382 gdb_assert (state->dr_ref_count_wp[i] != 0);
383 uint8_t mask_orig = (state->dr_ctrl_wp[i] >> 5) & 0xff;
384 gdb_assert (mask_orig != 0);
385 static const uint8_t old_valid[] = { 0x01, 0x03, 0x0f, 0xff };
386 uint8_t mask = 0;
387 for (const uint8_t old_mask : old_valid)
388 if (mask_orig <= old_mask)
389 {
390 mask = old_mask;
391 break;
392 }
393 gdb_assert (mask != 0);
394
395 /* No update needed for this watchpoint? */
396 if (mask == mask_orig)
397 continue;
398 state->dr_ctrl_wp[i] |= mask << 5;
399 state->dr_addr_wp[i]
400 = align_down (state->dr_addr_wp[i], AARCH64_HWP_ALIGNMENT);
401
402 /* Try to match duplicate entries. */
403 for (int j = 0; j < i; ++j)
404 if ((state->dr_ctrl_wp[j] & 1) != 0
405 && state->dr_addr_wp[j] == state->dr_addr_wp[i]
406 && state->dr_addr_orig_wp[j] == state->dr_addr_orig_wp[i]
407 && state->dr_ctrl_wp[j] == state->dr_ctrl_wp[i])
408 {
409 state->dr_ref_count_wp[j] += state->dr_ref_count_wp[i];
410 state->dr_ref_count_wp[i] = 0;
411 state->dr_addr_wp[i] = 0;
412 state->dr_addr_orig_wp[i] = 0;
413 state->dr_ctrl_wp[i] &= ~1;
414 break;
415 }
416
417 aarch64_notify_debug_reg_change (state, 1 /* is_watchpoint */, i);
418 }
419 }
420
421 /* Record the insertion of one breakpoint/watchpoint, as represented
422 by ADDR and CTRL, in the process' arch-specific data area *STATE. */
423
424 static int
425 aarch64_dr_state_insert_one_point (struct aarch64_debug_reg_state *state,
426 enum target_hw_bp_type type,
427 CORE_ADDR addr, int offset, int len,
428 CORE_ADDR addr_orig)
429 {
430 int i, idx, num_regs, is_watchpoint;
431 unsigned int ctrl, *dr_ctrl_p, *dr_ref_count;
432 CORE_ADDR *dr_addr_p, *dr_addr_orig_p;
433
434 /* Set up state pointers. */
435 is_watchpoint = (type != hw_execute);
436 gdb_assert (aarch64_point_is_aligned (is_watchpoint, addr, len));
437 if (is_watchpoint)
438 {
439 num_regs = aarch64_num_wp_regs;
440 dr_addr_p = state->dr_addr_wp;
441 dr_addr_orig_p = state->dr_addr_orig_wp;
442 dr_ctrl_p = state->dr_ctrl_wp;
443 dr_ref_count = state->dr_ref_count_wp;
444 }
445 else
446 {
447 num_regs = aarch64_num_bp_regs;
448 dr_addr_p = state->dr_addr_bp;
449 dr_addr_orig_p = nullptr;
450 dr_ctrl_p = state->dr_ctrl_bp;
451 dr_ref_count = state->dr_ref_count_bp;
452 }
453
454 ctrl = aarch64_point_encode_ctrl_reg (type, offset, len);
455
456 /* Find an existing or free register in our cache. */
457 idx = -1;
458 for (i = 0; i < num_regs; ++i)
459 {
460 if ((dr_ctrl_p[i] & 1) == 0)
461 {
462 gdb_assert (dr_ref_count[i] == 0);
463 idx = i;
464 /* no break; continue hunting for an exising one. */
465 }
466 else if (dr_addr_p[i] == addr
467 && (dr_addr_orig_p == nullptr || dr_addr_orig_p[i] == addr_orig)
468 && dr_ctrl_p[i] == ctrl)
469 {
470 gdb_assert (dr_ref_count[i] != 0);
471 idx = i;
472 break;
473 }
474 }
475
476 /* No space. */
477 if (idx == -1)
478 return -1;
479
480 /* Update our cache. */
481 if ((dr_ctrl_p[idx] & 1) == 0)
482 {
483 /* new entry */
484 dr_addr_p[idx] = addr;
485 if (dr_addr_orig_p != nullptr)
486 dr_addr_orig_p[idx] = addr_orig;
487 dr_ctrl_p[idx] = ctrl;
488 dr_ref_count[idx] = 1;
489 /* Notify the change. */
490 aarch64_notify_debug_reg_change (state, is_watchpoint, idx);
491 }
492 else
493 {
494 /* existing entry */
495 dr_ref_count[idx]++;
496 }
497
498 return 0;
499 }
500
501 /* Record the removal of one breakpoint/watchpoint, as represented by
502 ADDR and CTRL, in the process' arch-specific data area *STATE. */
503
504 static int
505 aarch64_dr_state_remove_one_point (struct aarch64_debug_reg_state *state,
506 enum target_hw_bp_type type,
507 CORE_ADDR addr, int offset, int len,
508 CORE_ADDR addr_orig)
509 {
510 int i, num_regs, is_watchpoint;
511 unsigned int ctrl, *dr_ctrl_p, *dr_ref_count;
512 CORE_ADDR *dr_addr_p, *dr_addr_orig_p;
513
514 /* Set up state pointers. */
515 is_watchpoint = (type != hw_execute);
516 if (is_watchpoint)
517 {
518 num_regs = aarch64_num_wp_regs;
519 dr_addr_p = state->dr_addr_wp;
520 dr_addr_orig_p = state->dr_addr_orig_wp;
521 dr_ctrl_p = state->dr_ctrl_wp;
522 dr_ref_count = state->dr_ref_count_wp;
523 }
524 else
525 {
526 num_regs = aarch64_num_bp_regs;
527 dr_addr_p = state->dr_addr_bp;
528 dr_addr_orig_p = nullptr;
529 dr_ctrl_p = state->dr_ctrl_bp;
530 dr_ref_count = state->dr_ref_count_bp;
531 }
532
533 ctrl = aarch64_point_encode_ctrl_reg (type, offset, len);
534
535 /* Find the entry that matches the ADDR and CTRL. */
536 for (i = 0; i < num_regs; ++i)
537 if (dr_addr_p[i] == addr
538 && (dr_addr_orig_p == nullptr || dr_addr_orig_p[i] == addr_orig)
539 && dr_ctrl_p[i] == ctrl)
540 {
541 gdb_assert (dr_ref_count[i] != 0);
542 break;
543 }
544
545 /* Not found. */
546 if (i == num_regs)
547 return -1;
548
549 /* Clear our cache. */
550 if (--dr_ref_count[i] == 0)
551 {
552 /* Clear the enable bit. */
553 ctrl &= ~1;
554 dr_addr_p[i] = 0;
555 if (dr_addr_orig_p != nullptr)
556 dr_addr_orig_p[i] = 0;
557 dr_ctrl_p[i] = ctrl;
558 /* Notify the change. */
559 aarch64_notify_debug_reg_change (state, is_watchpoint, i);
560 }
561
562 return 0;
563 }
564
565 int
566 aarch64_handle_breakpoint (enum target_hw_bp_type type, CORE_ADDR addr,
567 int len, int is_insert,
568 struct aarch64_debug_reg_state *state)
569 {
570 if (is_insert)
571 {
572 /* The hardware breakpoint on AArch64 should always be 4-byte
573 aligned, but on AArch32, it can be 2-byte aligned. Note that
574 we only check the alignment on inserting breakpoint because
575 aarch64_point_is_aligned needs the inferior_ptid inferior's
576 regcache to decide whether the inferior is 32-bit or 64-bit.
577 However when GDB follows the parent process and detach breakpoints
578 from child process, inferior_ptid is the child ptid, but the
579 child inferior doesn't exist in GDB's view yet. */
580 if (!aarch64_point_is_aligned (0 /* is_watchpoint */ , addr, len))
581 return -1;
582
583 return aarch64_dr_state_insert_one_point (state, type, addr, 0, len, -1);
584 }
585 else
586 return aarch64_dr_state_remove_one_point (state, type, addr, 0, len, -1);
587 }
588
589 /* This is essentially the same as aarch64_handle_breakpoint, apart
590 from that it is an aligned watchpoint to be handled. */
591
592 static int
593 aarch64_handle_aligned_watchpoint (enum target_hw_bp_type type,
594 CORE_ADDR addr, int len, int is_insert,
595 struct aarch64_debug_reg_state *state)
596 {
597 if (is_insert)
598 return aarch64_dr_state_insert_one_point (state, type, addr, 0, len, addr);
599 else
600 return aarch64_dr_state_remove_one_point (state, type, addr, 0, len, addr);
601 }
602
603 /* Insert/remove unaligned watchpoint by calling
604 aarch64_align_watchpoint repeatedly until the whole watched region,
605 as represented by ADDR and LEN, has been properly aligned and ready
606 to be written to one or more hardware watchpoint registers.
607 IS_INSERT indicates whether this is an insertion or a deletion.
608 Return 0 if succeed. */
609
610 static int
611 aarch64_handle_unaligned_watchpoint (enum target_hw_bp_type type,
612 CORE_ADDR addr, int len, int is_insert,
613 struct aarch64_debug_reg_state *state)
614 {
615 CORE_ADDR addr_orig = addr;
616
617 while (len > 0)
618 {
619 CORE_ADDR aligned_addr;
620 int aligned_offset, aligned_len, ret;
621 CORE_ADDR addr_orig_next = addr_orig;
622
623 aarch64_align_watchpoint (addr, len, &aligned_addr, &aligned_offset,
624 &aligned_len, &addr, &len, &addr_orig_next);
625
626 if (is_insert)
627 ret = aarch64_dr_state_insert_one_point (state, type, aligned_addr,
628 aligned_offset,
629 aligned_len, addr_orig);
630 else
631 ret = aarch64_dr_state_remove_one_point (state, type, aligned_addr,
632 aligned_offset,
633 aligned_len, addr_orig);
634
635 if (show_debug_regs)
636 debug_printf ("handle_unaligned_watchpoint: is_insert: %d\n"
637 " "
638 "aligned_addr: %s, aligned_len: %d\n"
639 " "
640 "addr_orig: %s\n"
641 " "
642 "next_addr: %s, next_len: %d\n"
643 " "
644 "addr_orig_next: %s\n",
645 is_insert, core_addr_to_string_nz (aligned_addr),
646 aligned_len, core_addr_to_string_nz (addr_orig),
647 core_addr_to_string_nz (addr), len,
648 core_addr_to_string_nz (addr_orig_next));
649
650 addr_orig = addr_orig_next;
651
652 if (ret != 0)
653 return ret;
654 }
655
656 return 0;
657 }
658
659 int
660 aarch64_handle_watchpoint (enum target_hw_bp_type type, CORE_ADDR addr,
661 int len, int is_insert,
662 struct aarch64_debug_reg_state *state)
663 {
664 if (aarch64_point_is_aligned (1 /* is_watchpoint */ , addr, len))
665 return aarch64_handle_aligned_watchpoint (type, addr, len, is_insert,
666 state);
667 else
668 return aarch64_handle_unaligned_watchpoint (type, addr, len, is_insert,
669 state);
670 }
671
672 /* Call ptrace to set the thread TID's hardware breakpoint/watchpoint
673 registers with data from *STATE. */
674
675 void
676 aarch64_linux_set_debug_regs (struct aarch64_debug_reg_state *state,
677 int tid, int watchpoint)
678 {
679 int i, count;
680 struct iovec iov;
681 struct user_hwdebug_state regs;
682 const CORE_ADDR *addr;
683 const unsigned int *ctrl;
684
685 memset (&regs, 0, sizeof (regs));
686 iov.iov_base = &regs;
687 count = watchpoint ? aarch64_num_wp_regs : aarch64_num_bp_regs;
688 addr = watchpoint ? state->dr_addr_wp : state->dr_addr_bp;
689 ctrl = watchpoint ? state->dr_ctrl_wp : state->dr_ctrl_bp;
690 if (count == 0)
691 return;
692 iov.iov_len = (offsetof (struct user_hwdebug_state, dbg_regs)
693 + count * sizeof (regs.dbg_regs[0]));
694
695 for (i = 0; i < count; i++)
696 {
697 regs.dbg_regs[i].addr = addr[i];
698 regs.dbg_regs[i].ctrl = ctrl[i];
699 }
700
701 if (ptrace (PTRACE_SETREGSET, tid,
702 watchpoint ? NT_ARM_HW_WATCH : NT_ARM_HW_BREAK,
703 (void *) &iov))
704 {
705 /* Handle Linux kernels with the PR external/20207 bug. */
706 if (watchpoint && errno == EINVAL
707 && kernel_supports_any_contiguous_range)
708 {
709 kernel_supports_any_contiguous_range = false;
710 aarch64_downgrade_regs (state);
711 aarch64_linux_set_debug_regs (state, tid, watchpoint);
712 return;
713 }
714 error (_("Unexpected error setting hardware debug registers"));
715 }
716 }
717
718 /* See nat/aarch64-linux-hw-point.h. */
719
720 bool
721 aarch64_linux_any_set_debug_regs_state (aarch64_debug_reg_state *state,
722 bool watchpoint)
723 {
724 int count = watchpoint ? aarch64_num_wp_regs : aarch64_num_bp_regs;
725 if (count == 0)
726 return false;
727
728 const CORE_ADDR *addr = watchpoint ? state->dr_addr_wp : state->dr_addr_bp;
729 const unsigned int *ctrl = watchpoint ? state->dr_ctrl_wp : state->dr_ctrl_bp;
730
731 for (int i = 0; i < count; i++)
732 if (addr[i] != 0 || ctrl[i] != 0)
733 return true;
734
735 return false;
736 }
737
738 /* Print the values of the cached breakpoint/watchpoint registers. */
739
740 void
741 aarch64_show_debug_reg_state (struct aarch64_debug_reg_state *state,
742 const char *func, CORE_ADDR addr,
743 int len, enum target_hw_bp_type type)
744 {
745 int i;
746
747 debug_printf ("%s", func);
748 if (addr || len)
749 debug_printf (" (addr=0x%08lx, len=%d, type=%s)",
750 (unsigned long) addr, len,
751 type == hw_write ? "hw-write-watchpoint"
752 : (type == hw_read ? "hw-read-watchpoint"
753 : (type == hw_access ? "hw-access-watchpoint"
754 : (type == hw_execute ? "hw-breakpoint"
755 : "??unknown??"))));
756 debug_printf (":\n");
757
758 debug_printf ("\tBREAKPOINTs:\n");
759 for (i = 0; i < aarch64_num_bp_regs; i++)
760 debug_printf ("\tBP%d: addr=%s, ctrl=0x%08x, ref.count=%d\n",
761 i, core_addr_to_string_nz (state->dr_addr_bp[i]),
762 state->dr_ctrl_bp[i], state->dr_ref_count_bp[i]);
763
764 debug_printf ("\tWATCHPOINTs:\n");
765 for (i = 0; i < aarch64_num_wp_regs; i++)
766 debug_printf ("\tWP%d: addr=%s (orig=%s), ctrl=0x%08x, ref.count=%d\n",
767 i, core_addr_to_string_nz (state->dr_addr_wp[i]),
768 core_addr_to_string_nz (state->dr_addr_orig_wp[i]),
769 state->dr_ctrl_wp[i], state->dr_ref_count_wp[i]);
770 }
771
772 /* Return true if debug arch level is compatible for hw watchpoints
773 and breakpoints. */
774
775 static bool
776 compatible_debug_arch (unsigned int debug_arch)
777 {
778 if (debug_arch == AARCH64_DEBUG_ARCH_V8)
779 return true;
780 if (debug_arch == AARCH64_DEBUG_ARCH_V8_1)
781 return true;
782 if (debug_arch == AARCH64_DEBUG_ARCH_V8_2)
783 return true;
784 if (debug_arch == AARCH64_DEBUG_ARCH_V8_4)
785 return true;
786
787 return false;
788 }
789
790 /* Get the hardware debug register capacity information from the
791 process represented by TID. */
792
793 void
794 aarch64_linux_get_debug_reg_capacity (int tid)
795 {
796 struct iovec iov;
797 struct user_hwdebug_state dreg_state;
798
799 iov.iov_base = &dreg_state;
800 iov.iov_len = sizeof (dreg_state);
801
802 /* Get hardware watchpoint register info. */
803 if (ptrace (PTRACE_GETREGSET, tid, NT_ARM_HW_WATCH, &iov) == 0
804 && compatible_debug_arch (AARCH64_DEBUG_ARCH (dreg_state.dbg_info)))
805 {
806 aarch64_num_wp_regs = AARCH64_DEBUG_NUM_SLOTS (dreg_state.dbg_info);
807 if (aarch64_num_wp_regs > AARCH64_HWP_MAX_NUM)
808 {
809 warning (_("Unexpected number of hardware watchpoint registers"
810 " reported by ptrace, got %d, expected %d."),
811 aarch64_num_wp_regs, AARCH64_HWP_MAX_NUM);
812 aarch64_num_wp_regs = AARCH64_HWP_MAX_NUM;
813 }
814 }
815 else
816 {
817 warning (_("Unable to determine the number of hardware watchpoints"
818 " available."));
819 aarch64_num_wp_regs = 0;
820 }
821
822 /* Get hardware breakpoint register info. */
823 if (ptrace (PTRACE_GETREGSET, tid, NT_ARM_HW_BREAK, &iov) == 0
824 && compatible_debug_arch (AARCH64_DEBUG_ARCH (dreg_state.dbg_info)))
825 {
826 aarch64_num_bp_regs = AARCH64_DEBUG_NUM_SLOTS (dreg_state.dbg_info);
827 if (aarch64_num_bp_regs > AARCH64_HBP_MAX_NUM)
828 {
829 warning (_("Unexpected number of hardware breakpoint registers"
830 " reported by ptrace, got %d, expected %d."),
831 aarch64_num_bp_regs, AARCH64_HBP_MAX_NUM);
832 aarch64_num_bp_regs = AARCH64_HBP_MAX_NUM;
833 }
834 }
835 else
836 {
837 warning (_("Unable to determine the number of hardware breakpoints"
838 " available."));
839 aarch64_num_bp_regs = 0;
840 }
841 }
842
843 /* Return true if we can watch a memory region that starts address
844 ADDR and whose length is LEN in bytes. */
845
846 int
847 aarch64_linux_region_ok_for_watchpoint (CORE_ADDR addr, int len)
848 {
849 CORE_ADDR aligned_addr;
850
851 /* Can not set watchpoints for zero or negative lengths. */
852 if (len <= 0)
853 return 0;
854
855 /* Must have hardware watchpoint debug register(s). */
856 if (aarch64_num_wp_regs == 0)
857 return 0;
858
859 /* We support unaligned watchpoint address and arbitrary length,
860 as long as the size of the whole watched area after alignment
861 doesn't exceed size of the total area that all watchpoint debug
862 registers can watch cooperatively.
863
864 This is a very relaxed rule, but unfortunately there are
865 limitations, e.g. false-positive hits, due to limited support of
866 hardware debug registers in the kernel. See comment above
867 aarch64_align_watchpoint for more information. */
868
869 aligned_addr = addr & ~(AARCH64_HWP_MAX_LEN_PER_REG - 1);
870 if (aligned_addr + aarch64_num_wp_regs * AARCH64_HWP_MAX_LEN_PER_REG
871 < addr + len)
872 return 0;
873
874 /* All tests passed so we are likely to be able to set the watchpoint.
875 The reason that it is 'likely' rather than 'must' is because
876 we don't check the current usage of the watchpoint registers, and
877 there may not be enough registers available for this watchpoint.
878 Ideally we should check the cached debug register state, however
879 the checking is costly. */
880 return 1;
881 }