]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/nat/aarch64-hw-point.c
Update copyright year range in header of all files managed by GDB
[thirdparty/binutils-gdb.git] / gdb / nat / aarch64-hw-point.c
CommitLineData
1d506c26 1/* Copyright (C) 2009-2024 Free Software Foundation, Inc.
4bd817e7
JB
2
3 This file is part of GDB.
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 3 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program. If not, see <http://www.gnu.org/licenses/>. */
17
18#include "gdbsupport/common-defs.h"
19#include "gdbsupport/break-common.h"
20#include "gdbsupport/common-regcache.h"
21#include "aarch64-hw-point.h"
22
23#ifdef __linux__
24/* For kernel_supports_any_contiguous_range. */
25#include "aarch64-linux-hw-point.h"
26#else
27#define kernel_supports_any_contiguous_range true
28#endif
29
30/* Number of hardware breakpoints/watchpoints the target supports.
31 They are initialized with values obtained via ptrace. */
32
33int aarch64_num_bp_regs;
34int aarch64_num_wp_regs;
35
36/* Return starting byte 0..7 incl. of a watchpoint encoded by CTRL. */
37
38unsigned int
39aarch64_watchpoint_offset (unsigned int ctrl)
40{
41 uint8_t mask = DR_CONTROL_MASK (ctrl);
42 unsigned retval;
43
44 /* Shift out bottom zeros. */
45 for (retval = 0; mask && (mask & 1) == 0; ++retval)
46 mask >>= 1;
47
48 return retval;
49}
50
51/* Utility function that returns the length in bytes of a watchpoint
52 according to the content of a hardware debug control register CTRL.
53 Any contiguous range of bytes in CTRL is supported. The returned
54 value can be between 0..8 (inclusive). */
55
56unsigned int
57aarch64_watchpoint_length (unsigned int ctrl)
58{
59 uint8_t mask = DR_CONTROL_MASK (ctrl);
60 unsigned retval;
61
62 /* Shift out bottom zeros. */
63 mask >>= aarch64_watchpoint_offset (ctrl);
64
65 /* Count bottom ones. */
66 for (retval = 0; (mask & 1) != 0; ++retval)
67 mask >>= 1;
68
69 if (mask != 0)
70 error (_("Unexpected hardware watchpoint length register value 0x%x"),
71 DR_CONTROL_MASK (ctrl));
72
73 return retval;
74}
75
76/* Given the hardware breakpoint or watchpoint type TYPE and its
77 length LEN, return the expected encoding for a hardware
78 breakpoint/watchpoint control register. */
79
80static unsigned int
81aarch64_point_encode_ctrl_reg (enum target_hw_bp_type type, int offset, int len)
82{
83 unsigned int ctrl, ttype;
84
85 gdb_assert (offset == 0 || kernel_supports_any_contiguous_range);
86 gdb_assert (offset + len <= AARCH64_HWP_MAX_LEN_PER_REG);
87
88 /* type */
89 switch (type)
90 {
91 case hw_write:
92 ttype = 2;
93 break;
94 case hw_read:
95 ttype = 1;
96 break;
97 case hw_access:
98 ttype = 3;
99 break;
100 case hw_execute:
101 ttype = 0;
102 break;
103 default:
104 perror_with_name (_("Unrecognized breakpoint/watchpoint type"));
105 }
106
107 ctrl = ttype << 3;
108
109 /* offset and length bitmask */
110 ctrl |= ((1 << len) - 1) << (5 + offset);
111 /* enabled at el0 */
112 ctrl |= (2 << 1) | 1;
113
114 return ctrl;
115}
116
117/* Addresses to be written to the hardware breakpoint and watchpoint
118 value registers need to be aligned; the alignment is 4-byte and
119 8-type respectively. Linux kernel rejects any non-aligned address
120 it receives from the related ptrace call. Furthermore, the kernel
121 currently only supports the following Byte Address Select (BAS)
122 values: 0x1, 0x3, 0xf and 0xff, which means that for a hardware
123 watchpoint to be accepted by the kernel (via ptrace call), its
124 valid length can only be 1 byte, 2 bytes, 4 bytes or 8 bytes.
125 Despite these limitations, the unaligned watchpoint is supported in
126 this port.
127
128 Return 0 for any non-compliant ADDR and/or LEN; return 1 otherwise. */
129
130static int
131aarch64_point_is_aligned (ptid_t ptid, int is_watchpoint, CORE_ADDR addr,
132 int len)
133{
134 unsigned int alignment = 0;
135
136 if (is_watchpoint)
137 alignment = AARCH64_HWP_ALIGNMENT;
138 else
139 {
e4e20d45 140 reg_buffer_common *regcache = get_thread_regcache_for_ptid (ptid);
4bd817e7
JB
141
142 /* Set alignment to 2 only if the current process is 32-bit,
143 since thumb instruction can be 2-byte aligned. Otherwise, set
144 alignment to AARCH64_HBP_ALIGNMENT. */
145 if (regcache_register_size (regcache, 0) == 8)
146 alignment = AARCH64_HBP_ALIGNMENT;
147 else
148 alignment = 2;
149 }
150
151 if (addr & (alignment - 1))
152 return 0;
153
154 if ((!kernel_supports_any_contiguous_range
155 && len != 8 && len != 4 && len != 2 && len != 1)
156 || (kernel_supports_any_contiguous_range
157 && (len < 1 || len > 8)))
158 return 0;
159
160 return 1;
161}
162
163/* Given the (potentially unaligned) watchpoint address in ADDR and
164 length in LEN, return the aligned address, offset from that base
165 address, and aligned length in *ALIGNED_ADDR_P, *ALIGNED_OFFSET_P
166 and *ALIGNED_LEN_P, respectively. The returned values will be
167 valid values to write to the hardware watchpoint value and control
168 registers.
169
170 The given watchpoint may get truncated if more than one hardware
171 register is needed to cover the watched region. *NEXT_ADDR_P
172 and *NEXT_LEN_P, if non-NULL, will return the address and length
173 of the remaining part of the watchpoint (which can be processed
174 by calling this routine again to generate another aligned address,
175 offset and length tuple.
176
177 Essentially, unaligned watchpoint is achieved by minimally
178 enlarging the watched area to meet the alignment requirement, and
179 if necessary, splitting the watchpoint over several hardware
180 watchpoint registers.
181
182 On kernels that predate the support for Byte Address Select (BAS)
183 in the hardware watchpoint control register, the offset from the
184 base address is always zero, and so in that case the trade-off is
185 that there will be false-positive hits for the read-type or the
186 access-type hardware watchpoints; for the write type, which is more
187 commonly used, there will be no such issues, as the higher-level
188 breakpoint management in gdb always examines the exact watched
189 region for any content change, and transparently resumes a thread
190 from a watchpoint trap if there is no change to the watched region.
191
192 Another limitation is that because the watched region is enlarged,
193 the watchpoint fault address discovered by
194 aarch64_stopped_data_address may be outside of the original watched
195 region, especially when the triggering instruction is accessing a
196 larger region. When the fault address is not within any known
197 range, watchpoints_triggered in gdb will get confused, as the
198 higher-level watchpoint management is only aware of original
199 watched regions, and will think that some unknown watchpoint has
200 been triggered. To prevent such a case,
201 aarch64_stopped_data_address implementations in gdb and gdbserver
202 try to match the trapped address with a watched region, and return
203 an address within the latter. */
204
205static void
206aarch64_align_watchpoint (CORE_ADDR addr, int len, CORE_ADDR *aligned_addr_p,
207 int *aligned_offset_p, int *aligned_len_p,
208 CORE_ADDR *next_addr_p, int *next_len_p,
209 CORE_ADDR *next_addr_orig_p)
210{
211 int aligned_len;
212 unsigned int offset, aligned_offset;
213 CORE_ADDR aligned_addr;
214 const unsigned int alignment = AARCH64_HWP_ALIGNMENT;
215 const unsigned int max_wp_len = AARCH64_HWP_MAX_LEN_PER_REG;
216
217 /* As assumed by the algorithm. */
218 gdb_assert (alignment == max_wp_len);
219
220 if (len <= 0)
221 return;
222
223 /* The address put into the hardware watchpoint value register must
224 be aligned. */
225 offset = addr & (alignment - 1);
226 aligned_addr = addr - offset;
227 aligned_offset
228 = kernel_supports_any_contiguous_range ? addr & (alignment - 1) : 0;
229
230 gdb_assert (offset >= 0 && offset < alignment);
231 gdb_assert (aligned_addr >= 0 && aligned_addr <= addr);
232 gdb_assert (offset + len > 0);
233
234 if (offset + len >= max_wp_len)
235 {
236 /* Need more than one watchpoint register; truncate at the
237 alignment boundary. */
238 aligned_len
239 = max_wp_len - (kernel_supports_any_contiguous_range ? offset : 0);
240 len -= (max_wp_len - offset);
241 addr += (max_wp_len - offset);
242 gdb_assert ((addr & (alignment - 1)) == 0);
243 }
244 else
245 {
246 /* Find the smallest valid length that is large enough to
247 accommodate this watchpoint. */
248 static const unsigned char
249 aligned_len_array[AARCH64_HWP_MAX_LEN_PER_REG] =
250 { 1, 2, 4, 4, 8, 8, 8, 8 };
251
252 aligned_len = (kernel_supports_any_contiguous_range
253 ? len : aligned_len_array[offset + len - 1]);
254 addr += len;
255 len = 0;
256 }
257
258 if (aligned_addr_p)
259 *aligned_addr_p = aligned_addr;
260 if (aligned_offset_p)
261 *aligned_offset_p = aligned_offset;
262 if (aligned_len_p)
263 *aligned_len_p = aligned_len;
264 if (next_addr_p)
265 *next_addr_p = addr;
266 if (next_len_p)
267 *next_len_p = len;
268 if (next_addr_orig_p)
269 *next_addr_orig_p = align_down (*next_addr_orig_p + alignment, alignment);
270}
271
272/* Record the insertion of one breakpoint/watchpoint, as represented
273 by ADDR and CTRL, in the process' arch-specific data area *STATE. */
274
275static int
276aarch64_dr_state_insert_one_point (ptid_t ptid,
277 struct aarch64_debug_reg_state *state,
278 enum target_hw_bp_type type,
279 CORE_ADDR addr, int offset, int len,
280 CORE_ADDR addr_orig)
281{
282 int i, idx, num_regs, is_watchpoint;
283 unsigned int ctrl, *dr_ctrl_p, *dr_ref_count;
284 CORE_ADDR *dr_addr_p, *dr_addr_orig_p;
285
286 /* Set up state pointers. */
287 is_watchpoint = (type != hw_execute);
288 gdb_assert (aarch64_point_is_aligned (ptid, is_watchpoint, addr, len));
289 if (is_watchpoint)
290 {
291 num_regs = aarch64_num_wp_regs;
292 dr_addr_p = state->dr_addr_wp;
293 dr_addr_orig_p = state->dr_addr_orig_wp;
294 dr_ctrl_p = state->dr_ctrl_wp;
295 dr_ref_count = state->dr_ref_count_wp;
296 }
297 else
298 {
299 num_regs = aarch64_num_bp_regs;
300 dr_addr_p = state->dr_addr_bp;
301 dr_addr_orig_p = nullptr;
302 dr_ctrl_p = state->dr_ctrl_bp;
303 dr_ref_count = state->dr_ref_count_bp;
304 }
305
306 ctrl = aarch64_point_encode_ctrl_reg (type, offset, len);
307
308 /* Find an existing or free register in our cache. */
309 idx = -1;
310 for (i = 0; i < num_regs; ++i)
311 {
312 if ((dr_ctrl_p[i] & 1) == 0)
313 {
314 gdb_assert (dr_ref_count[i] == 0);
315 idx = i;
33b5899f 316 /* no break; continue hunting for an existing one. */
4bd817e7
JB
317 }
318 else if (dr_addr_p[i] == addr
319 && (dr_addr_orig_p == nullptr || dr_addr_orig_p[i] == addr_orig)
320 && dr_ctrl_p[i] == ctrl)
321 {
322 gdb_assert (dr_ref_count[i] != 0);
323 idx = i;
324 break;
325 }
326 }
327
328 /* No space. */
329 if (idx == -1)
330 return -1;
331
332 /* Update our cache. */
333 if ((dr_ctrl_p[idx] & 1) == 0)
334 {
335 /* new entry */
336 dr_addr_p[idx] = addr;
337 if (dr_addr_orig_p != nullptr)
338 dr_addr_orig_p[idx] = addr_orig;
339 dr_ctrl_p[idx] = ctrl;
340 dr_ref_count[idx] = 1;
341 /* Notify the change. */
342 aarch64_notify_debug_reg_change (ptid, is_watchpoint, idx);
343 }
344 else
345 {
346 /* existing entry */
347 dr_ref_count[idx]++;
348 }
349
350 return 0;
351}
352
353/* Record the removal of one breakpoint/watchpoint, as represented by
354 ADDR and CTRL, in the process' arch-specific data area *STATE. */
355
356static int
357aarch64_dr_state_remove_one_point (ptid_t ptid,
358 struct aarch64_debug_reg_state *state,
359 enum target_hw_bp_type type,
360 CORE_ADDR addr, int offset, int len,
361 CORE_ADDR addr_orig)
362{
363 int i, num_regs, is_watchpoint;
364 unsigned int ctrl, *dr_ctrl_p, *dr_ref_count;
365 CORE_ADDR *dr_addr_p, *dr_addr_orig_p;
366
367 /* Set up state pointers. */
368 is_watchpoint = (type != hw_execute);
369 if (is_watchpoint)
370 {
371 num_regs = aarch64_num_wp_regs;
372 dr_addr_p = state->dr_addr_wp;
373 dr_addr_orig_p = state->dr_addr_orig_wp;
374 dr_ctrl_p = state->dr_ctrl_wp;
375 dr_ref_count = state->dr_ref_count_wp;
376 }
377 else
378 {
379 num_regs = aarch64_num_bp_regs;
380 dr_addr_p = state->dr_addr_bp;
381 dr_addr_orig_p = nullptr;
382 dr_ctrl_p = state->dr_ctrl_bp;
383 dr_ref_count = state->dr_ref_count_bp;
384 }
385
386 ctrl = aarch64_point_encode_ctrl_reg (type, offset, len);
387
388 /* Find the entry that matches the ADDR and CTRL. */
389 for (i = 0; i < num_regs; ++i)
390 if (dr_addr_p[i] == addr
391 && (dr_addr_orig_p == nullptr || dr_addr_orig_p[i] == addr_orig)
392 && dr_ctrl_p[i] == ctrl)
393 {
394 gdb_assert (dr_ref_count[i] != 0);
395 break;
396 }
397
398 /* Not found. */
399 if (i == num_regs)
400 return -1;
401
402 /* Clear our cache. */
403 if (--dr_ref_count[i] == 0)
404 {
405 /* Clear the enable bit. */
406 ctrl &= ~1;
407 dr_addr_p[i] = 0;
408 if (dr_addr_orig_p != nullptr)
409 dr_addr_orig_p[i] = 0;
410 dr_ctrl_p[i] = ctrl;
411 /* Notify the change. */
412 aarch64_notify_debug_reg_change (ptid, is_watchpoint, i);
413 }
414
415 return 0;
416}
417
418int
419aarch64_handle_breakpoint (enum target_hw_bp_type type, CORE_ADDR addr,
420 int len, int is_insert, ptid_t ptid,
421 struct aarch64_debug_reg_state *state)
422{
423 if (is_insert)
424 {
425 /* The hardware breakpoint on AArch64 should always be 4-byte
426 aligned, but on AArch32, it can be 2-byte aligned. Note that
427 we only check the alignment on inserting breakpoint because
428 aarch64_point_is_aligned needs the inferior_ptid inferior's
429 regcache to decide whether the inferior is 32-bit or 64-bit.
430 However when GDB follows the parent process and detach breakpoints
431 from child process, inferior_ptid is the child ptid, but the
432 child inferior doesn't exist in GDB's view yet. */
433 if (!aarch64_point_is_aligned (ptid, 0 /* is_watchpoint */ , addr, len))
434 return -1;
435
436 return aarch64_dr_state_insert_one_point (ptid, state, type, addr, 0, len,
437 -1);
438 }
439 else
440 return aarch64_dr_state_remove_one_point (ptid, state, type, addr, 0, len,
441 -1);
442}
443
444/* This is essentially the same as aarch64_handle_breakpoint, apart
445 from that it is an aligned watchpoint to be handled. */
446
447static int
448aarch64_handle_aligned_watchpoint (enum target_hw_bp_type type,
449 CORE_ADDR addr, int len, int is_insert,
450 ptid_t ptid,
451 struct aarch64_debug_reg_state *state)
452{
453 if (is_insert)
454 return aarch64_dr_state_insert_one_point (ptid, state, type, addr, 0, len,
455 addr);
456 else
457 return aarch64_dr_state_remove_one_point (ptid, state, type, addr, 0, len,
458 addr);
459}
460
461/* Insert/remove unaligned watchpoint by calling
462 aarch64_align_watchpoint repeatedly until the whole watched region,
463 as represented by ADDR and LEN, has been properly aligned and ready
464 to be written to one or more hardware watchpoint registers.
465 IS_INSERT indicates whether this is an insertion or a deletion.
466 Return 0 if succeed. */
467
468static int
469aarch64_handle_unaligned_watchpoint (enum target_hw_bp_type type,
470 CORE_ADDR addr, int len, int is_insert,
471 ptid_t ptid,
472 struct aarch64_debug_reg_state *state)
473{
474 CORE_ADDR addr_orig = addr;
475
476 while (len > 0)
477 {
478 CORE_ADDR aligned_addr;
479 int aligned_offset, aligned_len, ret;
480 CORE_ADDR addr_orig_next = addr_orig;
481
482 aarch64_align_watchpoint (addr, len, &aligned_addr, &aligned_offset,
483 &aligned_len, &addr, &len, &addr_orig_next);
484
485 if (is_insert)
486 ret = aarch64_dr_state_insert_one_point (ptid, state, type,
487 aligned_addr, aligned_offset,
488 aligned_len, addr_orig);
489 else
490 ret = aarch64_dr_state_remove_one_point (ptid, state, type,
491 aligned_addr, aligned_offset,
492 aligned_len, addr_orig);
493
494 if (show_debug_regs)
495 debug_printf ("handle_unaligned_watchpoint: is_insert: %d\n"
496 " "
497 "aligned_addr: %s, aligned_len: %d\n"
498 " "
499 "addr_orig: %s\n"
500 " "
501 "next_addr: %s, next_len: %d\n"
502 " "
503 "addr_orig_next: %s\n",
504 is_insert, core_addr_to_string_nz (aligned_addr),
505 aligned_len, core_addr_to_string_nz (addr_orig),
506 core_addr_to_string_nz (addr), len,
507 core_addr_to_string_nz (addr_orig_next));
508
509 addr_orig = addr_orig_next;
510
511 if (ret != 0)
512 return ret;
513 }
514
515 return 0;
516}
517
518int
519aarch64_handle_watchpoint (enum target_hw_bp_type type, CORE_ADDR addr,
520 int len, int is_insert, ptid_t ptid,
521 struct aarch64_debug_reg_state *state)
522{
523 if (aarch64_point_is_aligned (ptid, 1 /* is_watchpoint */ , addr, len))
524 return aarch64_handle_aligned_watchpoint (type, addr, len, is_insert, ptid,
525 state);
526 else
527 return aarch64_handle_unaligned_watchpoint (type, addr, len, is_insert,
528 ptid, state);
529}
530
531/* See nat/aarch64-hw-point.h. */
532
533bool
534aarch64_any_set_debug_regs_state (aarch64_debug_reg_state *state,
535 bool watchpoint)
536{
537 int count = watchpoint ? aarch64_num_wp_regs : aarch64_num_bp_regs;
538 if (count == 0)
539 return false;
540
541 const CORE_ADDR *addr = watchpoint ? state->dr_addr_wp : state->dr_addr_bp;
542 const unsigned int *ctrl = watchpoint ? state->dr_ctrl_wp : state->dr_ctrl_bp;
543
544 for (int i = 0; i < count; i++)
545 if (addr[i] != 0 || ctrl[i] != 0)
546 return true;
547
548 return false;
549}
550
551/* Print the values of the cached breakpoint/watchpoint registers. */
552
553void
554aarch64_show_debug_reg_state (struct aarch64_debug_reg_state *state,
555 const char *func, CORE_ADDR addr,
556 int len, enum target_hw_bp_type type)
557{
558 int i;
559
560 debug_printf ("%s", func);
561 if (addr || len)
562 debug_printf (" (addr=0x%08lx, len=%d, type=%s)",
563 (unsigned long) addr, len,
564 type == hw_write ? "hw-write-watchpoint"
565 : (type == hw_read ? "hw-read-watchpoint"
566 : (type == hw_access ? "hw-access-watchpoint"
567 : (type == hw_execute ? "hw-breakpoint"
568 : "??unknown??"))));
569 debug_printf (":\n");
570
571 debug_printf ("\tBREAKPOINTs:\n");
572 for (i = 0; i < aarch64_num_bp_regs; i++)
573 debug_printf ("\tBP%d: addr=%s, ctrl=0x%08x, ref.count=%d\n",
574 i, core_addr_to_string_nz (state->dr_addr_bp[i]),
575 state->dr_ctrl_bp[i], state->dr_ref_count_bp[i]);
576
577 debug_printf ("\tWATCHPOINTs:\n");
578 for (i = 0; i < aarch64_num_wp_regs; i++)
579 debug_printf ("\tWP%d: addr=%s (orig=%s), ctrl=0x%08x, ref.count=%d\n",
580 i, core_addr_to_string_nz (state->dr_addr_wp[i]),
581 core_addr_to_string_nz (state->dr_addr_orig_wp[i]),
582 state->dr_ctrl_wp[i], state->dr_ref_count_wp[i]);
583}
584
585/* Return true if we can watch a memory region that starts address
586 ADDR and whose length is LEN in bytes. */
587
588int
589aarch64_region_ok_for_watchpoint (CORE_ADDR addr, int len)
590{
591 CORE_ADDR aligned_addr;
592
593 /* Can not set watchpoints for zero or negative lengths. */
594 if (len <= 0)
595 return 0;
596
597 /* Must have hardware watchpoint debug register(s). */
598 if (aarch64_num_wp_regs == 0)
599 return 0;
600
601 /* We support unaligned watchpoint address and arbitrary length,
602 as long as the size of the whole watched area after alignment
603 doesn't exceed size of the total area that all watchpoint debug
604 registers can watch cooperatively.
605
606 This is a very relaxed rule, but unfortunately there are
607 limitations, e.g. false-positive hits, due to limited support of
608 hardware debug registers in the kernel. See comment above
609 aarch64_align_watchpoint for more information. */
610
611 aligned_addr = addr & ~(AARCH64_HWP_MAX_LEN_PER_REG - 1);
612 if (aligned_addr + aarch64_num_wp_regs * AARCH64_HWP_MAX_LEN_PER_REG
613 < addr + len)
614 return 0;
615
616 /* All tests passed so we are likely to be able to set the watchpoint.
617 The reason that it is 'likely' rather than 'must' is because
618 we don't check the current usage of the watchpoint registers, and
619 there may not be enough registers available for this watchpoint.
620 Ideally we should check the cached debug register state, however
621 the checking is costly. */
622 return 1;
623}