1 /* Common target dependent code for GDB on AArch64 systems.
3 Copyright (C) 2009-2025 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
6 This file is part of GDB.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22 #include "extract-store-integer.h"
25 #include "cli/cli-cmds.h"
29 #include "reggroups.h"
31 #include "arch-utils.h"
33 #include "frame-unwind.h"
34 #include "frame-base.h"
35 #include "trad-frame.h"
38 #include "dwarf2/frame.h"
40 #include "prologue-value.h"
41 #include "target-descriptions.h"
42 #include "user-regs.h"
44 #include "gdbsupport/selftest.h"
46 #include "aarch64-tdep.h"
47 #include "aarch64-ravenscar-thread.h"
48 #include "arch/aarch64-mte.h"
51 #include "record-full.h"
52 #include "arch/aarch64-insn.h"
55 #include "opcode/aarch64.h"
57 #include <unordered_map>
59 /* For inferior_ptid and current_inferior (). */
61 /* For std::sqrt and std::pow. */
64 /* A Homogeneous Floating-Point or Short-Vector Aggregate may have at most
66 #define HA_MAX_NUM_FLDS 4
68 /* All possible aarch64 target descriptors. */
69 static std::unordered_map
<aarch64_features
, target_desc
*> tdesc_aarch64_map
;
71 /* The standard register names, and all the valid aliases for them.
72 We're not adding fp here, that name is already taken, see
73 _initialize_frame_reg. */
76 const char *const name
;
78 } aarch64_register_aliases
[] =
80 /* Link register alias for x30. */
81 {"lr", AARCH64_LR_REGNUM
},
82 /* SP is the canonical name for x31 according to aarch64_r_register_names,
83 so we're adding an x31 alias for sp. */
84 {"x31", AARCH64_SP_REGNUM
},
86 {"ip0", AARCH64_X0_REGNUM
+ 16},
87 {"ip1", AARCH64_X0_REGNUM
+ 17}
90 /* The required core 'R' registers. */
91 static const char *const aarch64_r_register_names
[] =
93 /* These registers must appear in consecutive RAW register number
94 order and they must begin with AARCH64_X0_REGNUM! */
95 "x0", "x1", "x2", "x3",
96 "x4", "x5", "x6", "x7",
97 "x8", "x9", "x10", "x11",
98 "x12", "x13", "x14", "x15",
99 "x16", "x17", "x18", "x19",
100 "x20", "x21", "x22", "x23",
101 "x24", "x25", "x26", "x27",
102 "x28", "x29", "x30", "sp",
106 /* The FP/SIMD 'V' registers. */
107 static const char *const aarch64_v_register_names
[] =
109 /* These registers must appear in consecutive RAW register number
110 order and they must begin with AARCH64_V0_REGNUM! */
111 "v0", "v1", "v2", "v3",
112 "v4", "v5", "v6", "v7",
113 "v8", "v9", "v10", "v11",
114 "v12", "v13", "v14", "v15",
115 "v16", "v17", "v18", "v19",
116 "v20", "v21", "v22", "v23",
117 "v24", "v25", "v26", "v27",
118 "v28", "v29", "v30", "v31",
123 /* The SVE 'Z' and 'P' registers. */
124 static const char *const aarch64_sve_register_names
[] =
126 /* These registers must appear in consecutive RAW register number
127 order and they must begin with AARCH64_SVE_Z0_REGNUM! */
128 "z0", "z1", "z2", "z3",
129 "z4", "z5", "z6", "z7",
130 "z8", "z9", "z10", "z11",
131 "z12", "z13", "z14", "z15",
132 "z16", "z17", "z18", "z19",
133 "z20", "z21", "z22", "z23",
134 "z24", "z25", "z26", "z27",
135 "z28", "z29", "z30", "z31",
137 "p0", "p1", "p2", "p3",
138 "p4", "p5", "p6", "p7",
139 "p8", "p9", "p10", "p11",
140 "p12", "p13", "p14", "p15",
144 static const char *const aarch64_pauth_register_names
[] =
146 /* Authentication mask for data pointer, low half/user pointers. */
148 /* Authentication mask for code pointer, low half/user pointers. */
150 /* Authentication mask for data pointer, high half / kernel pointers. */
152 /* Authentication mask for code pointer, high half / kernel pointers. */
156 static const char *const aarch64_mte_register_names
[] =
158 /* Tag Control Register. */
162 static const char *const aarch64_gcs_register_names
[] = {
163 /* Guarded Control Stack Pointer Register. */
167 static const char *const aarch64_gcs_linux_register_names
[] = {
168 /* Field in struct user_gcs. */
169 "gcs_features_enabled",
170 /* Field in struct user_gcs. */
171 "gcs_features_locked",
174 static int aarch64_stack_frame_destroyed_p (struct gdbarch
*, CORE_ADDR
);
176 /* AArch64 prologue cache structure. */
177 struct aarch64_prologue_cache
179 /* The program counter at the start of the function. It is used to
180 identify this frame as a prologue frame. */
183 /* The program counter at the time this frame was created; i.e. where
184 this function was called from. It is used to identify this frame as a
188 /* The stack pointer at the time this frame was created; i.e. the
189 caller's stack pointer when this function was called. It is used
190 to identify this frame. */
193 /* Is the target available to read from? */
196 /* The frame base for this frame is just prev_sp - frame size.
197 FRAMESIZE is the distance from the frame pointer to the
198 initial stack pointer. */
201 /* The register used to hold the frame pointer for this frame. */
204 /* Saved register offsets. */
205 trad_frame_saved_reg
*saved_regs
;
208 /* Holds information used to read/write from/to ZA
211 With this information, the read/write code can be simplified so it
212 deals only with the required information to map a ZA pseudo-register
213 to the exact bytes into the ZA contents buffer. Otherwise we'd need
214 to use a lot of conditionals. */
218 /* Offset, into ZA, of the starting byte of the pseudo-register. */
219 size_t starting_offset
;
220 /* The size of the contiguous chunks of the pseudo-register. */
222 /* The number of pseudo-register chunks contained in ZA. */
224 /* The offset between each contiguous chunk. */
228 /* Holds data that is helpful to determine the individual fields that make
229 up the names of the ZA pseudo-registers. It is also very helpful to
230 determine offsets, stride and sizes for reading ZA tiles and tile
233 struct za_pseudo_encoding
235 /* The slice index (0 ~ svl). Only used for tile slices. */
237 /* The tile number (0 ~ 15). */
239 /* Direction (horizontal/vertical). Only used for tile slices. */
241 /* Qualifier index (0 ~ 4). These map to B, H, S, D and Q. */
242 uint8_t qualifier_index
;
246 show_aarch64_debug (struct ui_file
*file
, int from_tty
,
247 struct cmd_list_element
*c
, const char *value
)
249 gdb_printf (file
, _("AArch64 debugging is %s.\n"), value
);
254 /* Abstract instruction reader. */
256 class abstract_instruction_reader
259 /* Read in one instruction. */
260 virtual ULONGEST
read (CORE_ADDR memaddr
, int len
,
261 enum bfd_endian byte_order
) = 0;
264 /* Instruction reader from real target. */
266 class instruction_reader
: public abstract_instruction_reader
269 ULONGEST
read (CORE_ADDR memaddr
, int len
, enum bfd_endian byte_order
)
272 return read_code_unsigned_integer (memaddr
, len
, byte_order
);
278 /* If address signing is enabled, mask off the signature bits from the link
279 register, which is passed by value in ADDR, using the register values in
283 aarch64_frame_unmask_lr (aarch64_gdbarch_tdep
*tdep
,
284 const frame_info_ptr
&this_frame
, CORE_ADDR addr
)
286 if (tdep
->has_pauth ()
287 && frame_unwind_register_unsigned (this_frame
,
288 tdep
->ra_sign_state_regnum
))
290 /* VA range select (bit 55) tells us whether to use the low half masks
291 or the high half masks. */
293 if (tdep
->pauth_reg_count
> 2 && addr
& VA_RANGE_SELECT_BIT_MASK
)
294 cmask_num
= AARCH64_PAUTH_CMASK_HIGH_REGNUM (tdep
->pauth_reg_base
);
296 cmask_num
= AARCH64_PAUTH_CMASK_REGNUM (tdep
->pauth_reg_base
);
298 /* By default, we assume TBI and discard the top 8 bits plus the VA range
300 CORE_ADDR mask
= AARCH64_TOP_BITS_MASK
;
301 mask
|= frame_unwind_register_unsigned (this_frame
, cmask_num
);
302 addr
= aarch64_remove_top_bits (addr
, mask
);
304 /* Record in the frame that the link register required unmasking. */
305 set_frame_previous_pc_masked (this_frame
);
311 /* Implement the "get_pc_address_flags" gdbarch method. */
314 aarch64_get_pc_address_flags (const frame_info_ptr
&frame
, CORE_ADDR pc
)
316 if (pc
!= 0 && get_frame_pc_masked (frame
))
322 /* Analyze a prologue, looking for a recognizable stack frame
323 and frame pointer. Scan until we encounter a store that could
324 clobber the stack frame unexpectedly, or an unknown instruction. */
327 aarch64_analyze_prologue (struct gdbarch
*gdbarch
,
328 CORE_ADDR start
, CORE_ADDR limit
,
329 struct aarch64_prologue_cache
*cache
,
330 abstract_instruction_reader
& reader
)
332 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
335 /* Whether the stack has been set. This should be true when we notice a SP
336 to FP move or if we are using the SP as the base register for storing
337 data, in case the FP is omitted. */
338 bool seen_stack_set
= false;
340 /* Track X registers and D registers in prologue. */
341 pv_t regs
[AARCH64_X_REGISTER_COUNT
+ AARCH64_D_REGISTER_COUNT
];
343 for (i
= 0; i
< AARCH64_X_REGISTER_COUNT
+ AARCH64_D_REGISTER_COUNT
; i
++)
344 regs
[i
] = pv_register (i
, 0);
345 pv_area
stack (AARCH64_SP_REGNUM
, gdbarch_addr_bit (gdbarch
));
347 for (; start
< limit
; start
+= 4)
352 insn
= reader
.read (start
, 4, byte_order_for_code
);
354 if (aarch64_decode_insn (insn
, &inst
, 1, NULL
) != 0)
357 if (inst
.opcode
->iclass
== addsub_imm
358 && (inst
.opcode
->op
== OP_ADD
359 || strcmp ("sub", inst
.opcode
->name
) == 0))
361 unsigned rd
= inst
.operands
[0].reg
.regno
;
362 unsigned rn
= inst
.operands
[1].reg
.regno
;
364 gdb_assert (aarch64_num_of_operands (inst
.opcode
) == 3);
365 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rd_SP
);
366 gdb_assert (inst
.operands
[1].type
== AARCH64_OPND_Rn_SP
);
367 gdb_assert (inst
.operands
[2].type
== AARCH64_OPND_AIMM
);
369 if (inst
.opcode
->op
== OP_ADD
)
371 regs
[rd
] = pv_add_constant (regs
[rn
],
372 inst
.operands
[2].imm
.value
);
376 regs
[rd
] = pv_add_constant (regs
[rn
],
377 -inst
.operands
[2].imm
.value
);
380 /* Did we move SP to FP? */
381 if (rn
== AARCH64_SP_REGNUM
&& rd
== AARCH64_FP_REGNUM
)
382 seen_stack_set
= true;
384 else if (inst
.opcode
->iclass
== addsub_ext
385 && strcmp ("sub", inst
.opcode
->name
) == 0)
387 unsigned rd
= inst
.operands
[0].reg
.regno
;
388 unsigned rn
= inst
.operands
[1].reg
.regno
;
389 unsigned rm
= inst
.operands
[2].reg
.regno
;
391 gdb_assert (aarch64_num_of_operands (inst
.opcode
) == 3);
392 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rd_SP
);
393 gdb_assert (inst
.operands
[1].type
== AARCH64_OPND_Rn_SP
);
394 gdb_assert (inst
.operands
[2].type
== AARCH64_OPND_Rm_EXT
);
396 regs
[rd
] = pv_subtract (regs
[rn
], regs
[rm
]);
398 else if (inst
.opcode
->iclass
== branch_imm
)
400 /* Stop analysis on branch. */
403 else if (inst
.opcode
->iclass
== condbranch
)
405 /* Stop analysis on branch. */
408 else if (inst
.opcode
->iclass
== branch_reg
)
410 /* Stop analysis on branch. */
413 else if (inst
.opcode
->iclass
== compbranch
)
415 /* Stop analysis on branch. */
418 else if (inst
.opcode
->op
== OP_MOVZ
)
420 unsigned rd
= inst
.operands
[0].reg
.regno
;
422 gdb_assert (aarch64_num_of_operands (inst
.opcode
) == 2);
423 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rd
);
424 gdb_assert (inst
.operands
[1].type
== AARCH64_OPND_HALF
);
425 gdb_assert (inst
.operands
[1].shifter
.kind
== AARCH64_MOD_LSL
);
427 /* If this shows up before we set the stack, keep going. Otherwise
428 stop the analysis. */
432 regs
[rd
] = pv_constant (inst
.operands
[1].imm
.value
433 << inst
.operands
[1].shifter
.amount
);
435 else if (inst
.opcode
->iclass
== log_shift
436 && strcmp (inst
.opcode
->name
, "orr") == 0)
438 unsigned rd
= inst
.operands
[0].reg
.regno
;
439 unsigned rn
= inst
.operands
[1].reg
.regno
;
440 unsigned rm
= inst
.operands
[2].reg
.regno
;
442 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rd
);
443 gdb_assert (inst
.operands
[1].type
== AARCH64_OPND_Rn
);
444 gdb_assert (inst
.operands
[2].type
== AARCH64_OPND_Rm_SFT
);
446 if (inst
.operands
[2].shifter
.amount
== 0
447 && rn
== AARCH64_SP_REGNUM
)
451 aarch64_debug_printf ("prologue analysis gave up "
452 "addr=%s opcode=0x%x (orr x register)",
453 core_addr_to_string_nz (start
), insn
);
458 else if (inst
.opcode
->op
== OP_STUR
)
460 unsigned rt
= inst
.operands
[0].reg
.regno
;
461 unsigned rn
= inst
.operands
[1].addr
.base_regno
;
462 int size
= aarch64_get_qualifier_esize (inst
.operands
[0].qualifier
);
464 gdb_assert (aarch64_num_of_operands (inst
.opcode
) == 2);
465 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rt
);
466 gdb_assert (inst
.operands
[1].type
== AARCH64_OPND_ADDR_SIMM9
);
467 gdb_assert (!inst
.operands
[1].addr
.offset
.is_reg
);
470 (pv_add_constant (regs
[rn
], inst
.operands
[1].addr
.offset
.imm
),
473 /* Are we storing with SP as a base? */
474 if (rn
== AARCH64_SP_REGNUM
)
475 seen_stack_set
= true;
477 else if ((inst
.opcode
->iclass
== ldstpair_off
478 || (inst
.opcode
->iclass
== ldstpair_indexed
479 && inst
.operands
[2].addr
.preind
))
480 && strcmp ("stp", inst
.opcode
->name
) == 0)
482 /* STP with addressing mode Pre-indexed and Base register. */
485 unsigned rn
= inst
.operands
[2].addr
.base_regno
;
486 int32_t imm
= inst
.operands
[2].addr
.offset
.imm
;
487 int size
= aarch64_get_qualifier_esize (inst
.operands
[0].qualifier
);
489 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rt
490 || inst
.operands
[0].type
== AARCH64_OPND_Ft
);
491 gdb_assert (inst
.operands
[1].type
== AARCH64_OPND_Rt2
492 || inst
.operands
[1].type
== AARCH64_OPND_Ft2
);
493 gdb_assert (inst
.operands
[2].type
== AARCH64_OPND_ADDR_SIMM7
);
494 gdb_assert (!inst
.operands
[2].addr
.offset
.is_reg
);
496 /* If recording this store would invalidate the store area
497 (perhaps because rn is not known) then we should abandon
498 further prologue analysis. */
499 if (stack
.store_would_trash (pv_add_constant (regs
[rn
], imm
)))
502 if (stack
.store_would_trash (pv_add_constant (regs
[rn
], imm
+ 8)))
505 rt1
= inst
.operands
[0].reg
.regno
;
506 rt2
= inst
.operands
[1].reg
.regno
;
507 if (inst
.operands
[0].type
== AARCH64_OPND_Ft
)
509 rt1
+= AARCH64_X_REGISTER_COUNT
;
510 rt2
+= AARCH64_X_REGISTER_COUNT
;
513 stack
.store (pv_add_constant (regs
[rn
], imm
), size
, regs
[rt1
]);
514 stack
.store (pv_add_constant (regs
[rn
], imm
+ size
), size
, regs
[rt2
]);
516 if (inst
.operands
[2].addr
.writeback
)
517 regs
[rn
] = pv_add_constant (regs
[rn
], imm
);
519 /* Ignore the instruction that allocates stack space and sets
521 if (rn
== AARCH64_SP_REGNUM
&& !inst
.operands
[2].addr
.writeback
)
522 seen_stack_set
= true;
524 else if ((inst
.opcode
->iclass
== ldst_imm9
/* Signed immediate. */
525 || (inst
.opcode
->iclass
== ldst_pos
/* Unsigned immediate. */
526 && (inst
.opcode
->op
== OP_STR_POS
527 || inst
.opcode
->op
== OP_STRF_POS
)))
528 && inst
.operands
[1].addr
.base_regno
== AARCH64_SP_REGNUM
529 && strcmp ("str", inst
.opcode
->name
) == 0)
531 /* STR (immediate) */
532 unsigned int rt
= inst
.operands
[0].reg
.regno
;
533 int32_t imm
= inst
.operands
[1].addr
.offset
.imm
;
534 unsigned int rn
= inst
.operands
[1].addr
.base_regno
;
535 int size
= aarch64_get_qualifier_esize (inst
.operands
[0].qualifier
);
536 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rt
537 || inst
.operands
[0].type
== AARCH64_OPND_Ft
);
539 if (inst
.operands
[0].type
== AARCH64_OPND_Ft
)
540 rt
+= AARCH64_X_REGISTER_COUNT
;
542 stack
.store (pv_add_constant (regs
[rn
], imm
), size
, regs
[rt
]);
543 if (inst
.operands
[1].addr
.writeback
)
544 regs
[rn
] = pv_add_constant (regs
[rn
], imm
);
546 /* Are we storing with SP as a base? */
547 if (rn
== AARCH64_SP_REGNUM
)
548 seen_stack_set
= true;
550 else if (inst
.opcode
->iclass
== testbranch
)
552 /* Stop analysis on branch. */
555 else if (inst
.opcode
->iclass
== ic_system
)
557 aarch64_gdbarch_tdep
*tdep
558 = gdbarch_tdep
<aarch64_gdbarch_tdep
> (gdbarch
);
559 int ra_state_val
= 0;
561 if (insn
== 0xd503233f /* paciasp. */
562 || insn
== 0xd503237f /* pacibsp. */)
564 /* Return addresses are mangled. */
567 else if (insn
== 0xd50323bf /* autiasp. */
568 || insn
== 0xd50323ff /* autibsp. */)
570 /* Return addresses are not mangled. */
573 else if (IS_BTI (insn
))
574 /* We don't need to do anything special for a BTI instruction. */
578 aarch64_debug_printf ("prologue analysis gave up addr=%s"
579 " opcode=0x%x (iclass)",
580 core_addr_to_string_nz (start
), insn
);
584 if (tdep
->has_pauth () && cache
!= nullptr)
586 int regnum
= tdep
->ra_sign_state_regnum
;
587 cache
->saved_regs
[regnum
].set_value (ra_state_val
);
592 aarch64_debug_printf ("prologue analysis gave up addr=%s"
594 core_addr_to_string_nz (start
), insn
);
603 if (pv_is_register (regs
[AARCH64_FP_REGNUM
], AARCH64_SP_REGNUM
))
605 /* Frame pointer is fp. Frame size is constant. */
606 cache
->framereg
= AARCH64_FP_REGNUM
;
607 cache
->framesize
= -regs
[AARCH64_FP_REGNUM
].k
;
609 else if (pv_is_register (regs
[AARCH64_SP_REGNUM
], AARCH64_SP_REGNUM
))
611 /* Try the stack pointer. */
612 cache
->framesize
= -regs
[AARCH64_SP_REGNUM
].k
;
613 cache
->framereg
= AARCH64_SP_REGNUM
;
617 /* We're just out of luck. We don't know where the frame is. */
618 cache
->framereg
= -1;
619 cache
->framesize
= 0;
622 for (i
= 0; i
< AARCH64_X_REGISTER_COUNT
; i
++)
626 if (stack
.find_reg (gdbarch
, i
, &offset
))
627 cache
->saved_regs
[i
].set_addr (offset
);
630 for (i
= 0; i
< AARCH64_D_REGISTER_COUNT
; i
++)
632 int regnum
= gdbarch_num_regs (gdbarch
);
635 if (stack
.find_reg (gdbarch
, i
+ AARCH64_X_REGISTER_COUNT
,
637 cache
->saved_regs
[i
+ regnum
+ AARCH64_D0_REGNUM
].set_addr (offset
);
644 aarch64_analyze_prologue (struct gdbarch
*gdbarch
,
645 CORE_ADDR start
, CORE_ADDR limit
,
646 struct aarch64_prologue_cache
*cache
)
648 instruction_reader reader
;
650 return aarch64_analyze_prologue (gdbarch
, start
, limit
, cache
,
656 namespace selftests
{
658 /* Instruction reader from manually cooked instruction sequences. */
660 class instruction_reader_test
: public abstract_instruction_reader
663 template<size_t SIZE
>
664 explicit instruction_reader_test (const uint32_t (&insns
)[SIZE
])
665 : m_insns (insns
), m_insns_size (SIZE
)
668 ULONGEST
read (CORE_ADDR memaddr
, int len
, enum bfd_endian byte_order
)
671 SELF_CHECK (len
== 4);
672 SELF_CHECK (memaddr
% 4 == 0);
673 SELF_CHECK (memaddr
/ 4 < m_insns_size
);
675 return m_insns
[memaddr
/ 4];
679 const uint32_t *m_insns
;
684 aarch64_analyze_prologue_test (void)
686 struct gdbarch_info info
;
688 info
.bfd_arch_info
= bfd_scan_arch ("aarch64");
690 struct gdbarch
*gdbarch
= gdbarch_find_by_info (info
);
691 SELF_CHECK (gdbarch
!= NULL
);
693 struct aarch64_prologue_cache cache
;
694 cache
.saved_regs
= trad_frame_alloc_saved_regs (gdbarch
);
696 aarch64_gdbarch_tdep
*tdep
= gdbarch_tdep
<aarch64_gdbarch_tdep
> (gdbarch
);
698 /* Test the simple prologue in which frame pointer is used. */
700 static const uint32_t insns
[] = {
701 0xa9af7bfd, /* stp x29, x30, [sp,#-272]! */
702 0x910003fd, /* mov x29, sp */
703 0x97ffffe6, /* bl 0x400580 */
705 instruction_reader_test
reader (insns
);
707 CORE_ADDR end
= aarch64_analyze_prologue (gdbarch
, 0, 128, &cache
, reader
);
708 SELF_CHECK (end
== 4 * 2);
710 SELF_CHECK (cache
.framereg
== AARCH64_FP_REGNUM
);
711 SELF_CHECK (cache
.framesize
== 272);
713 for (int i
= 0; i
< AARCH64_X_REGISTER_COUNT
; i
++)
715 if (i
== AARCH64_FP_REGNUM
)
716 SELF_CHECK (cache
.saved_regs
[i
].addr () == -272);
717 else if (i
== AARCH64_LR_REGNUM
)
718 SELF_CHECK (cache
.saved_regs
[i
].addr () == -264);
720 SELF_CHECK (cache
.saved_regs
[i
].is_realreg ()
721 && cache
.saved_regs
[i
].realreg () == i
);
724 for (int i
= 0; i
< AARCH64_D_REGISTER_COUNT
; i
++)
726 int num_regs
= gdbarch_num_regs (gdbarch
);
727 int regnum
= i
+ num_regs
+ AARCH64_D0_REGNUM
;
729 SELF_CHECK (cache
.saved_regs
[regnum
].is_realreg ()
730 && cache
.saved_regs
[regnum
].realreg () == regnum
);
734 /* Test a prologue in which STR is used and frame pointer is not
737 static const uint32_t insns
[] = {
738 0xf81d0ff3, /* str x19, [sp, #-48]! */
739 0xb9002fe0, /* str w0, [sp, #44] */
740 0xf90013e1, /* str x1, [sp, #32]*/
741 0xfd000fe0, /* str d0, [sp, #24] */
742 0xaa0203f3, /* mov x19, x2 */
743 0xf94013e0, /* ldr x0, [sp, #32] */
745 instruction_reader_test
reader (insns
);
747 trad_frame_reset_saved_regs (gdbarch
, cache
.saved_regs
);
748 CORE_ADDR end
= aarch64_analyze_prologue (gdbarch
, 0, 128, &cache
, reader
);
750 SELF_CHECK (end
== 4 * 5);
752 SELF_CHECK (cache
.framereg
== AARCH64_SP_REGNUM
);
753 SELF_CHECK (cache
.framesize
== 48);
755 for (int i
= 0; i
< AARCH64_X_REGISTER_COUNT
; i
++)
758 SELF_CHECK (cache
.saved_regs
[i
].addr () == -16);
760 SELF_CHECK (cache
.saved_regs
[i
].addr () == -48);
762 SELF_CHECK (cache
.saved_regs
[i
].is_realreg ()
763 && cache
.saved_regs
[i
].realreg () == i
);
766 for (int i
= 0; i
< AARCH64_D_REGISTER_COUNT
; i
++)
768 int num_regs
= gdbarch_num_regs (gdbarch
);
769 int regnum
= i
+ num_regs
+ AARCH64_D0_REGNUM
;
773 SELF_CHECK (cache
.saved_regs
[regnum
].addr () == -24);
775 SELF_CHECK (cache
.saved_regs
[regnum
].is_realreg ()
776 && cache
.saved_regs
[regnum
].realreg () == regnum
);
780 /* Test handling of movz before setting the frame pointer. */
782 static const uint32_t insns
[] = {
783 0xa9bf7bfd, /* stp x29, x30, [sp, #-16]! */
784 0x52800020, /* mov w0, #0x1 */
785 0x910003fd, /* mov x29, sp */
786 0x528000a2, /* mov w2, #0x5 */
787 0x97fffff8, /* bl 6e4 */
790 instruction_reader_test
reader (insns
);
792 trad_frame_reset_saved_regs (gdbarch
, cache
.saved_regs
);
793 CORE_ADDR end
= aarch64_analyze_prologue (gdbarch
, 0, 128, &cache
, reader
);
795 /* We should stop at the 4th instruction. */
796 SELF_CHECK (end
== (4 - 1) * 4);
797 SELF_CHECK (cache
.framereg
== AARCH64_FP_REGNUM
);
798 SELF_CHECK (cache
.framesize
== 16);
801 /* Test handling of movz/stp when using the stack pointer as frame
804 static const uint32_t insns
[] = {
805 0xa9bc7bfd, /* stp x29, x30, [sp, #-64]! */
806 0x52800020, /* mov w0, #0x1 */
807 0x290207e0, /* stp w0, w1, [sp, #16] */
808 0xa9018fe2, /* stp x2, x3, [sp, #24] */
809 0x528000a2, /* mov w2, #0x5 */
810 0x97fffff8, /* bl 6e4 */
813 instruction_reader_test
reader (insns
);
815 trad_frame_reset_saved_regs (gdbarch
, cache
.saved_regs
);
816 CORE_ADDR end
= aarch64_analyze_prologue (gdbarch
, 0, 128, &cache
, reader
);
818 /* We should stop at the 5th instruction. */
819 SELF_CHECK (end
== (5 - 1) * 4);
820 SELF_CHECK (cache
.framereg
== AARCH64_SP_REGNUM
);
821 SELF_CHECK (cache
.framesize
== 64);
824 /* Test handling of movz/str when using the stack pointer as frame
827 static const uint32_t insns
[] = {
828 0xa9bc7bfd, /* stp x29, x30, [sp, #-64]! */
829 0x52800020, /* mov w0, #0x1 */
830 0xb9002be4, /* str w4, [sp, #40] */
831 0xf9001be5, /* str x5, [sp, #48] */
832 0x528000a2, /* mov w2, #0x5 */
833 0x97fffff8, /* bl 6e4 */
836 instruction_reader_test
reader (insns
);
838 trad_frame_reset_saved_regs (gdbarch
, cache
.saved_regs
);
839 CORE_ADDR end
= aarch64_analyze_prologue (gdbarch
, 0, 128, &cache
, reader
);
841 /* We should stop at the 5th instruction. */
842 SELF_CHECK (end
== (5 - 1) * 4);
843 SELF_CHECK (cache
.framereg
== AARCH64_SP_REGNUM
);
844 SELF_CHECK (cache
.framesize
== 64);
847 /* Test handling of movz/stur when using the stack pointer as frame
850 static const uint32_t insns
[] = {
851 0xa9bc7bfd, /* stp x29, x30, [sp, #-64]! */
852 0x52800020, /* mov w0, #0x1 */
853 0xb80343e6, /* stur w6, [sp, #52] */
854 0xf80383e7, /* stur x7, [sp, #56] */
855 0x528000a2, /* mov w2, #0x5 */
856 0x97fffff8, /* bl 6e4 */
859 instruction_reader_test
reader (insns
);
861 trad_frame_reset_saved_regs (gdbarch
, cache
.saved_regs
);
862 CORE_ADDR end
= aarch64_analyze_prologue (gdbarch
, 0, 128, &cache
, reader
);
864 /* We should stop at the 5th instruction. */
865 SELF_CHECK (end
== (5 - 1) * 4);
866 SELF_CHECK (cache
.framereg
== AARCH64_SP_REGNUM
);
867 SELF_CHECK (cache
.framesize
== 64);
870 /* Test handling of movz when there is no frame pointer set or no stack
873 static const uint32_t insns
[] = {
874 0xa9bf7bfd, /* stp x29, x30, [sp, #-16]! */
875 0x52800020, /* mov w0, #0x1 */
876 0x528000a2, /* mov w2, #0x5 */
877 0x97fffff8, /* bl 6e4 */
880 instruction_reader_test
reader (insns
);
882 trad_frame_reset_saved_regs (gdbarch
, cache
.saved_regs
);
883 CORE_ADDR end
= aarch64_analyze_prologue (gdbarch
, 0, 128, &cache
, reader
);
885 /* We should stop at the 4th instruction. */
886 SELF_CHECK (end
== (4 - 1) * 4);
887 SELF_CHECK (cache
.framereg
== AARCH64_SP_REGNUM
);
888 SELF_CHECK (cache
.framesize
== 16);
891 /* Test a prologue in which there is a return address signing instruction. */
892 if (tdep
->has_pauth ())
894 static const uint32_t insns
[] = {
895 0xd503233f, /* paciasp */
896 0xa9bd7bfd, /* stp x29, x30, [sp, #-48]! */
897 0x910003fd, /* mov x29, sp */
898 0xf801c3f3, /* str x19, [sp, #28] */
899 0xb9401fa0, /* ldr x19, [x29, #28] */
901 instruction_reader_test
reader (insns
);
903 trad_frame_reset_saved_regs (gdbarch
, cache
.saved_regs
);
904 CORE_ADDR end
= aarch64_analyze_prologue (gdbarch
, 0, 128, &cache
,
907 SELF_CHECK (end
== 4 * 4);
908 SELF_CHECK (cache
.framereg
== AARCH64_FP_REGNUM
);
909 SELF_CHECK (cache
.framesize
== 48);
911 for (int i
= 0; i
< AARCH64_X_REGISTER_COUNT
; i
++)
914 SELF_CHECK (cache
.saved_regs
[i
].addr () == -20);
915 else if (i
== AARCH64_FP_REGNUM
)
916 SELF_CHECK (cache
.saved_regs
[i
].addr () == -48);
917 else if (i
== AARCH64_LR_REGNUM
)
918 SELF_CHECK (cache
.saved_regs
[i
].addr () == -40);
920 SELF_CHECK (cache
.saved_regs
[i
].is_realreg ()
921 && cache
.saved_regs
[i
].realreg () == i
);
924 if (tdep
->has_pauth ())
926 int regnum
= tdep
->ra_sign_state_regnum
;
927 SELF_CHECK (cache
.saved_regs
[regnum
].is_value ());
931 /* Test a prologue with a BTI instruction. */
933 static const uint32_t insns
[] = {
934 0xd503245f, /* bti */
935 0xa9bd7bfd, /* stp x29, x30, [sp, #-48]! */
936 0x910003fd, /* mov x29, sp */
937 0xf801c3f3, /* str x19, [sp, #28] */
938 0xb9401fa0, /* ldr x19, [x29, #28] */
940 instruction_reader_test
reader (insns
);
942 trad_frame_reset_saved_regs (gdbarch
, cache
.saved_regs
);
943 CORE_ADDR end
= aarch64_analyze_prologue (gdbarch
, 0, 128, &cache
,
946 SELF_CHECK (end
== 4 * 4);
947 SELF_CHECK (cache
.framereg
== AARCH64_FP_REGNUM
);
948 SELF_CHECK (cache
.framesize
== 48);
950 for (int i
= 0; i
< AARCH64_X_REGISTER_COUNT
; i
++)
953 SELF_CHECK (cache
.saved_regs
[i
].addr () == -20);
954 else if (i
== AARCH64_FP_REGNUM
)
955 SELF_CHECK (cache
.saved_regs
[i
].addr () == -48);
956 else if (i
== AARCH64_LR_REGNUM
)
957 SELF_CHECK (cache
.saved_regs
[i
].addr () == -40);
959 SELF_CHECK (cache
.saved_regs
[i
].is_realreg ()
960 && cache
.saved_regs
[i
].realreg () == i
);
964 } /* namespace selftests */
965 #endif /* GDB_SELF_TEST */
967 /* Implement the "skip_prologue" gdbarch method. */
970 aarch64_skip_prologue (struct gdbarch
*gdbarch
, CORE_ADDR pc
)
972 CORE_ADDR func_addr
, func_end_addr
, limit_pc
;
974 /* See if we can determine the end of the prologue via the symbol
975 table. If so, then return either PC, or the PC after the
976 prologue, whichever is greater. */
978 = find_pc_partial_function (pc
, NULL
, &func_addr
, &func_end_addr
);
982 CORE_ADDR post_prologue_pc
983 = skip_prologue_using_sal (gdbarch
, func_addr
);
985 if (post_prologue_pc
!= 0)
986 return std::max (pc
, post_prologue_pc
);
989 /* Can't determine prologue from the symbol table, need to examine
992 /* Find an upper limit on the function prologue using the debug
993 information. If the debug information could not be used to
994 provide that bound, then use an arbitrary large number as the
996 limit_pc
= skip_prologue_using_sal (gdbarch
, pc
);
998 limit_pc
= pc
+ 128; /* Magic. */
1001 = func_end_addr
== 0 ? limit_pc
: std::min (limit_pc
, func_end_addr
- 4);
1003 /* Try disassembling prologue. */
1004 return aarch64_analyze_prologue (gdbarch
, pc
, limit_pc
, NULL
);
1007 /* Scan the function prologue for THIS_FRAME and populate the prologue
1011 aarch64_scan_prologue (const frame_info_ptr
&this_frame
,
1012 struct aarch64_prologue_cache
*cache
)
1014 CORE_ADDR block_addr
= get_frame_address_in_block (this_frame
);
1015 CORE_ADDR prologue_start
;
1016 CORE_ADDR prologue_end
;
1017 CORE_ADDR prev_pc
= get_frame_pc (this_frame
);
1018 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
1020 cache
->prev_pc
= prev_pc
;
1022 /* Assume we do not find a frame. */
1023 cache
->framereg
= -1;
1024 cache
->framesize
= 0;
1026 if (find_pc_partial_function (block_addr
, NULL
, &prologue_start
,
1029 struct symtab_and_line sal
= find_pc_line (prologue_start
, 0);
1033 /* No line info so use the current PC. */
1034 prologue_end
= prev_pc
;
1036 else if (sal
.end
< prologue_end
)
1038 /* The next line begins after the function end. */
1039 prologue_end
= sal
.end
;
1042 prologue_end
= std::min (prologue_end
, prev_pc
);
1043 aarch64_analyze_prologue (gdbarch
, prologue_start
, prologue_end
, cache
);
1047 CORE_ADDR frame_loc
;
1049 frame_loc
= get_frame_register_unsigned (this_frame
, AARCH64_FP_REGNUM
);
1053 cache
->framereg
= AARCH64_FP_REGNUM
;
1054 cache
->framesize
= 16;
1055 cache
->saved_regs
[29].set_addr (0);
1056 cache
->saved_regs
[30].set_addr (8);
1060 /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
1061 function may throw an exception if the inferior's registers or memory is
1065 aarch64_make_prologue_cache_1 (const frame_info_ptr
&this_frame
,
1066 struct aarch64_prologue_cache
*cache
)
1068 CORE_ADDR unwound_fp
;
1071 aarch64_scan_prologue (this_frame
, cache
);
1073 if (cache
->framereg
== -1)
1076 unwound_fp
= get_frame_register_unsigned (this_frame
, cache
->framereg
);
1077 if (unwound_fp
== 0)
1080 cache
->prev_sp
= unwound_fp
;
1081 if (!aarch64_stack_frame_destroyed_p (get_frame_arch (this_frame
),
1083 cache
->prev_sp
+= cache
->framesize
;
1085 /* Calculate actual addresses of saved registers using offsets
1086 determined by aarch64_analyze_prologue. */
1087 for (reg
= 0; reg
< gdbarch_num_regs (get_frame_arch (this_frame
)); reg
++)
1088 if (cache
->saved_regs
[reg
].is_addr ())
1089 cache
->saved_regs
[reg
].set_addr (cache
->saved_regs
[reg
].addr ()
1092 cache
->func
= get_frame_func (this_frame
);
1094 cache
->available_p
= 1;
1097 /* Allocate and fill in *THIS_CACHE with information about the prologue of
1098 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
1099 Return a pointer to the current aarch64_prologue_cache in
1102 static struct aarch64_prologue_cache
*
1103 aarch64_make_prologue_cache (const frame_info_ptr
&this_frame
, void **this_cache
)
1105 struct aarch64_prologue_cache
*cache
;
1107 if (*this_cache
!= NULL
)
1108 return (struct aarch64_prologue_cache
*) *this_cache
;
1110 cache
= FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache
);
1111 cache
->saved_regs
= trad_frame_alloc_saved_regs (this_frame
);
1112 *this_cache
= cache
;
1116 aarch64_make_prologue_cache_1 (this_frame
, cache
);
1118 catch (const gdb_exception_error
&ex
)
1120 if (ex
.error
!= NOT_AVAILABLE_ERROR
)
1127 /* Implement the "stop_reason" frame_unwind method. */
1129 static enum unwind_stop_reason
1130 aarch64_prologue_frame_unwind_stop_reason (const frame_info_ptr
&this_frame
,
1133 struct aarch64_prologue_cache
*cache
1134 = aarch64_make_prologue_cache (this_frame
, this_cache
);
1136 if (!cache
->available_p
)
1137 return UNWIND_UNAVAILABLE
;
1139 /* Halt the backtrace at "_start". */
1140 gdbarch
*arch
= get_frame_arch (this_frame
);
1141 aarch64_gdbarch_tdep
*tdep
= gdbarch_tdep
<aarch64_gdbarch_tdep
> (arch
);
1142 if (cache
->prev_pc
<= tdep
->lowest_pc
)
1143 return UNWIND_OUTERMOST
;
1145 /* We've hit a wall, stop. */
1146 if (cache
->prev_sp
== 0)
1147 return UNWIND_OUTERMOST
;
1149 return UNWIND_NO_REASON
;
1152 /* Our frame ID for a normal frame is the current function's starting
1153 PC and the caller's SP when we were called. */
1156 aarch64_prologue_this_id (const frame_info_ptr
&this_frame
,
1157 void **this_cache
, struct frame_id
*this_id
)
1159 struct aarch64_prologue_cache
*cache
1160 = aarch64_make_prologue_cache (this_frame
, this_cache
);
1162 if (!cache
->available_p
)
1163 *this_id
= frame_id_build_unavailable_stack (cache
->func
);
1165 *this_id
= frame_id_build (cache
->prev_sp
, cache
->func
);
1168 /* Implement the "prev_register" frame_unwind method. */
1170 static struct value
*
1171 aarch64_prologue_prev_register (const frame_info_ptr
&this_frame
,
1172 void **this_cache
, int prev_regnum
)
1174 struct aarch64_prologue_cache
*cache
1175 = aarch64_make_prologue_cache (this_frame
, this_cache
);
1177 /* If we are asked to unwind the PC, then we need to return the LR
1178 instead. The prologue may save PC, but it will point into this
1179 frame's prologue, not the next frame's resume location. */
1180 if (prev_regnum
== AARCH64_PC_REGNUM
)
1183 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
1184 aarch64_gdbarch_tdep
*tdep
1185 = gdbarch_tdep
<aarch64_gdbarch_tdep
> (gdbarch
);
1187 lr
= frame_unwind_register_unsigned (this_frame
, AARCH64_LR_REGNUM
);
1189 if (tdep
->has_pauth ()
1190 && cache
->saved_regs
[tdep
->ra_sign_state_regnum
].is_value ())
1191 lr
= aarch64_frame_unmask_lr (tdep
, this_frame
, lr
);
1193 return frame_unwind_got_constant (this_frame
, prev_regnum
, lr
);
1196 /* SP is generally not saved to the stack, but this frame is
1197 identified by the next frame's stack pointer at the time of the
1198 call. The value was already reconstructed into PREV_SP. */
1204 | | | <- Previous SP
1207 +--| saved fp |<- FP
1211 if (prev_regnum
== AARCH64_SP_REGNUM
)
1212 return frame_unwind_got_constant (this_frame
, prev_regnum
,
1215 return trad_frame_get_prev_register (this_frame
, cache
->saved_regs
,
1219 /* AArch64 prologue unwinder. */
1220 static const frame_unwind_legacy
aarch64_prologue_unwind (
1224 aarch64_prologue_frame_unwind_stop_reason
,
1225 aarch64_prologue_this_id
,
1226 aarch64_prologue_prev_register
,
1228 default_frame_sniffer
1231 /* Allocate and fill in *THIS_CACHE with information about the prologue of
1232 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
1233 Return a pointer to the current aarch64_prologue_cache in
1236 static struct aarch64_prologue_cache
*
1237 aarch64_make_stub_cache (const frame_info_ptr
&this_frame
, void **this_cache
)
1239 struct aarch64_prologue_cache
*cache
;
1241 if (*this_cache
!= NULL
)
1242 return (struct aarch64_prologue_cache
*) *this_cache
;
1244 cache
= FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache
);
1245 cache
->saved_regs
= trad_frame_alloc_saved_regs (this_frame
);
1246 *this_cache
= cache
;
1250 cache
->prev_sp
= get_frame_register_unsigned (this_frame
,
1252 cache
->prev_pc
= get_frame_pc (this_frame
);
1253 cache
->available_p
= 1;
1255 catch (const gdb_exception_error
&ex
)
1257 if (ex
.error
!= NOT_AVAILABLE_ERROR
)
1264 /* Implement the "stop_reason" frame_unwind method. */
1266 static enum unwind_stop_reason
1267 aarch64_stub_frame_unwind_stop_reason (const frame_info_ptr
&this_frame
,
1270 struct aarch64_prologue_cache
*cache
1271 = aarch64_make_stub_cache (this_frame
, this_cache
);
1273 if (!cache
->available_p
)
1274 return UNWIND_UNAVAILABLE
;
1276 return UNWIND_NO_REASON
;
1279 /* Our frame ID for a stub frame is the current SP and LR. */
1282 aarch64_stub_this_id (const frame_info_ptr
&this_frame
,
1283 void **this_cache
, struct frame_id
*this_id
)
1285 struct aarch64_prologue_cache
*cache
1286 = aarch64_make_stub_cache (this_frame
, this_cache
);
1288 if (cache
->available_p
)
1289 *this_id
= frame_id_build (cache
->prev_sp
, cache
->prev_pc
);
1291 *this_id
= frame_id_build_unavailable_stack (cache
->prev_pc
);
1294 /* Implement the "sniffer" frame_unwind method. */
1297 aarch64_stub_unwind_sniffer (const struct frame_unwind
*self
,
1298 const frame_info_ptr
&this_frame
,
1299 void **this_prologue_cache
)
1301 CORE_ADDR addr_in_block
;
1304 addr_in_block
= get_frame_address_in_block (this_frame
);
1305 if (in_plt_section (addr_in_block
)
1306 /* We also use the stub winder if the target memory is unreadable
1307 to avoid having the prologue unwinder trying to read it. */
1308 || target_read_memory (get_frame_pc (this_frame
), dummy
, 4) != 0)
1314 /* AArch64 stub unwinder. */
1315 static const frame_unwind_legacy
aarch64_stub_unwind (
1319 aarch64_stub_frame_unwind_stop_reason
,
1320 aarch64_stub_this_id
,
1321 aarch64_prologue_prev_register
,
1323 aarch64_stub_unwind_sniffer
1326 /* Return the frame base address of *THIS_FRAME. */
1329 aarch64_normal_frame_base (const frame_info_ptr
&this_frame
, void **this_cache
)
1331 struct aarch64_prologue_cache
*cache
1332 = aarch64_make_prologue_cache (this_frame
, this_cache
);
1334 return cache
->prev_sp
- cache
->framesize
;
1337 /* AArch64 default frame base information. */
1338 static frame_base aarch64_normal_base
=
1340 &aarch64_prologue_unwind
,
1341 aarch64_normal_frame_base
,
1342 aarch64_normal_frame_base
,
1343 aarch64_normal_frame_base
1346 /* Return the value of the REGNUM register in the previous frame of
1349 static struct value
*
1350 aarch64_dwarf2_prev_register (const frame_info_ptr
&this_frame
,
1351 void **this_cache
, int regnum
)
1353 gdbarch
*arch
= get_frame_arch (this_frame
);
1354 aarch64_gdbarch_tdep
*tdep
= gdbarch_tdep
<aarch64_gdbarch_tdep
> (arch
);
1359 case AARCH64_PC_REGNUM
:
1360 lr
= frame_unwind_register_unsigned (this_frame
, AARCH64_LR_REGNUM
);
1361 lr
= aarch64_frame_unmask_lr (tdep
, this_frame
, lr
);
1362 return frame_unwind_got_constant (this_frame
, regnum
, lr
);
1365 internal_error (_("Unexpected register %d"), regnum
);
1369 static const unsigned char op_lit0
= DW_OP_lit0
;
1370 static const unsigned char op_lit1
= DW_OP_lit1
;
1372 /* Implement the "init_reg" dwarf2_frame_ops method. */
1375 aarch64_dwarf2_frame_init_reg (struct gdbarch
*gdbarch
, int regnum
,
1376 struct dwarf2_frame_state_reg
*reg
,
1377 const frame_info_ptr
&this_frame
)
1379 aarch64_gdbarch_tdep
*tdep
= gdbarch_tdep
<aarch64_gdbarch_tdep
> (gdbarch
);
1383 case AARCH64_PC_REGNUM
:
1384 reg
->how
= DWARF2_FRAME_REG_FN
;
1385 reg
->loc
.fn
= aarch64_dwarf2_prev_register
;
1388 case AARCH64_SP_REGNUM
:
1389 reg
->how
= DWARF2_FRAME_REG_CFA
;
1393 /* Init pauth registers. */
1394 if (tdep
->has_pauth ())
1396 if (regnum
== tdep
->ra_sign_state_regnum
)
1398 /* Initialize RA_STATE to zero. */
1399 reg
->how
= DWARF2_FRAME_REG_SAVED_VAL_EXP
;
1400 reg
->loc
.exp
.start
= &op_lit0
;
1401 reg
->loc
.exp
.len
= 1;
1404 else if (regnum
>= tdep
->pauth_reg_base
1405 && regnum
< tdep
->pauth_reg_base
+ tdep
->pauth_reg_count
)
1407 reg
->how
= DWARF2_FRAME_REG_SAME_VALUE
;
1411 if (tdep
->has_gcs () && tdep
->fn_prev_gcspr
!= nullptr
1412 && regnum
== tdep
->gcs_reg_base
)
1414 reg
->how
= DWARF2_FRAME_REG_FN
;
1415 reg
->loc
.fn
= tdep
->fn_prev_gcspr
;
1419 /* Implement the execute_dwarf_cfa_vendor_op method. */
1422 aarch64_execute_dwarf_cfa_vendor_op (struct gdbarch
*gdbarch
, gdb_byte op
,
1423 struct dwarf2_frame_state
*fs
)
1425 aarch64_gdbarch_tdep
*tdep
= gdbarch_tdep
<aarch64_gdbarch_tdep
> (gdbarch
);
1426 struct dwarf2_frame_state_reg
*ra_state
;
1428 if (op
== DW_CFA_AARCH64_negate_ra_state
)
1430 /* On systems without pauth, treat as a nop. */
1431 if (!tdep
->has_pauth ())
1434 /* Allocate RA_STATE column if it's not allocated yet. */
1435 fs
->regs
.alloc_regs (AARCH64_DWARF_RA_SIGN_STATE
+ 1);
1437 /* Toggle the status of RA_STATE between 0 and 1. */
1438 ra_state
= &(fs
->regs
.reg
[AARCH64_DWARF_RA_SIGN_STATE
]);
1439 ra_state
->how
= DWARF2_FRAME_REG_SAVED_VAL_EXP
;
1441 if (ra_state
->loc
.exp
.start
== nullptr
1442 || ra_state
->loc
.exp
.start
== &op_lit0
)
1443 ra_state
->loc
.exp
.start
= &op_lit1
;
1445 ra_state
->loc
.exp
.start
= &op_lit0
;
1447 ra_state
->loc
.exp
.len
= 1;
1455 /* Used for matching BRK instructions for AArch64. */
1456 static constexpr uint32_t BRK_INSN_MASK
= 0xffe0001f;
1457 static constexpr uint32_t BRK_INSN_BASE
= 0xd4200000;
1459 /* Implementation of gdbarch_program_breakpoint_here_p for aarch64. */
1462 aarch64_program_breakpoint_here_p (gdbarch
*gdbarch
, CORE_ADDR address
)
1464 const uint32_t insn_len
= 4;
1465 gdb_byte target_mem
[4];
1467 /* Enable the automatic memory restoration from breakpoints while
1468 we read the memory. Otherwise we may find temporary breakpoints, ones
1469 inserted by GDB, and flag them as permanent breakpoints. */
1470 scoped_restore restore_memory
1471 = make_scoped_restore_show_memory_breakpoints (0);
1473 if (target_read_memory (address
, target_mem
, insn_len
) == 0)
1476 (uint32_t) extract_unsigned_integer (target_mem
, insn_len
,
1477 gdbarch_byte_order_for_code (gdbarch
));
1479 /* Check if INSN is a BRK instruction pattern. There are multiple choices
1480 of such instructions with different immediate values. Different OS'
1481 may use a different variation, but they have the same outcome. */
1482 return ((insn
& BRK_INSN_MASK
) == BRK_INSN_BASE
);
1488 /* When arguments must be pushed onto the stack, they go on in reverse
1489 order. The code below implements a FILO (stack) to do this. */
1493 /* Value to pass on stack. It can be NULL if this item is for stack
1495 const gdb_byte
*data
;
1497 /* Size in bytes of value to pass on stack. */
1501 /* Implement the gdbarch type alignment method, overrides the generic
1502 alignment algorithm for anything that is aarch64 specific. */
1505 aarch64_type_align (gdbarch
*gdbarch
, struct type
*t
)
1507 t
= check_typedef (t
);
1508 if (t
->code () == TYPE_CODE_ARRAY
&& t
->is_vector ())
1510 /* Use the natural alignment for vector types (the same for
1511 scalar type), but the maximum alignment is 128-bit. */
1512 if (t
->length () > 16)
1515 return t
->length ();
1518 /* Allow the common code to calculate the alignment. */
1522 /* Worker function for aapcs_is_vfp_call_or_return_candidate.
1524 Return the number of register required, or -1 on failure.
1526 When encountering a base element, if FUNDAMENTAL_TYPE is not set then set it
1527 to the element, else fail if the type of this element does not match the
1531 aapcs_is_vfp_call_or_return_candidate_1 (struct type
*type
,
1532 struct type
**fundamental_type
)
1534 if (type
== nullptr)
1537 switch (type
->code ())
1540 case TYPE_CODE_DECFLOAT
:
1541 if (type
->length () > 16)
1544 if (*fundamental_type
== nullptr)
1545 *fundamental_type
= type
;
1546 else if (type
->length () != (*fundamental_type
)->length ()
1547 || type
->code () != (*fundamental_type
)->code ())
1552 case TYPE_CODE_COMPLEX
:
1554 struct type
*target_type
= check_typedef (type
->target_type ());
1555 if (target_type
->length () > 16)
1558 if (*fundamental_type
== nullptr)
1559 *fundamental_type
= target_type
;
1560 else if (target_type
->length () != (*fundamental_type
)->length ()
1561 || target_type
->code () != (*fundamental_type
)->code ())
1567 case TYPE_CODE_ARRAY
:
1569 if (type
->is_vector ())
1571 if (type
->length () != 8 && type
->length () != 16)
1574 if (*fundamental_type
== nullptr)
1575 *fundamental_type
= type
;
1576 else if (type
->length () != (*fundamental_type
)->length ()
1577 || type
->code () != (*fundamental_type
)->code ())
1584 struct type
*target_type
= type
->target_type ();
1585 int count
= aapcs_is_vfp_call_or_return_candidate_1
1586 (target_type
, fundamental_type
);
1591 count
*= (type
->length () / target_type
->length ());
1596 case TYPE_CODE_STRUCT
:
1597 case TYPE_CODE_UNION
:
1601 for (int i
= 0; i
< type
->num_fields (); i
++)
1603 /* Ignore any static fields. */
1604 if (type
->field (i
).is_static ())
1607 struct type
*member
= check_typedef (type
->field (i
).type ());
1609 int sub_count
= aapcs_is_vfp_call_or_return_candidate_1
1610 (member
, fundamental_type
);
1611 if (sub_count
== -1)
1616 /* Ensure there is no padding between the fields (allowing for empty
1617 zero length structs) */
1618 int ftype_length
= (*fundamental_type
== nullptr)
1619 ? 0 : (*fundamental_type
)->length ();
1620 if (count
* ftype_length
!= type
->length ())
1633 /* Return true if an argument, whose type is described by TYPE, can be passed or
1634 returned in simd/fp registers, providing enough parameter passing registers
1635 are available. This is as described in the AAPCS64.
1637 Upon successful return, *COUNT returns the number of needed registers,
1638 *FUNDAMENTAL_TYPE contains the type of those registers.
1640 Candidate as per the AAPCS64 5.4.2.C is either a:
1643 - HFA (Homogeneous Floating-point Aggregate, 4.3.5.1). A Composite type where
1644 all the members are floats and has at most 4 members.
1645 - HVA (Homogeneous Short-vector Aggregate, 4.3.5.2). A Composite type where
1646 all the members are short vectors and has at most 4 members.
1649 Note that HFAs and HVAs can include nested structures and arrays. */
1652 aapcs_is_vfp_call_or_return_candidate (struct type
*type
, int *count
,
1653 struct type
**fundamental_type
)
1655 if (type
== nullptr)
1658 *fundamental_type
= nullptr;
1660 int ag_count
= aapcs_is_vfp_call_or_return_candidate_1 (type
,
1663 if (ag_count
> 0 && ag_count
<= HA_MAX_NUM_FLDS
)
1672 /* AArch64 function call information structure. */
1673 struct aarch64_call_info
1675 /* the current argument number. */
1676 unsigned argnum
= 0;
1678 /* The next general purpose register number, equivalent to NGRN as
1679 described in the AArch64 Procedure Call Standard. */
1682 /* The next SIMD and floating point register number, equivalent to
1683 NSRN as described in the AArch64 Procedure Call Standard. */
1686 /* The next stacked argument address, equivalent to NSAA as
1687 described in the AArch64 Procedure Call Standard. */
1690 /* Stack item vector. */
1691 std::vector
<stack_item_t
> si
;
1694 /* Pass a value in a sequence of consecutive X registers. The caller
1695 is responsible for ensuring sufficient registers are available. */
1698 pass_in_x (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
1699 struct aarch64_call_info
*info
, struct type
*type
,
1702 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1703 int len
= type
->length ();
1704 enum type_code typecode
= type
->code ();
1705 int regnum
= AARCH64_X0_REGNUM
+ info
->ngrn
;
1706 const bfd_byte
*buf
= arg
->contents ().data ();
1712 int partial_len
= len
< X_REGISTER_SIZE
? len
: X_REGISTER_SIZE
;
1713 CORE_ADDR regval
= extract_unsigned_integer (buf
, partial_len
,
1717 /* Adjust sub-word struct/union args when big-endian. */
1718 if (byte_order
== BFD_ENDIAN_BIG
1719 && partial_len
< X_REGISTER_SIZE
1720 && (typecode
== TYPE_CODE_STRUCT
|| typecode
== TYPE_CODE_UNION
))
1721 regval
<<= ((X_REGISTER_SIZE
- partial_len
) * TARGET_CHAR_BIT
);
1723 aarch64_debug_printf ("arg %d in %s = 0x%s", info
->argnum
,
1724 gdbarch_register_name (gdbarch
, regnum
),
1725 phex (regval
, X_REGISTER_SIZE
));
1727 regcache_cooked_write_unsigned (regcache
, regnum
, regval
);
1734 /* Attempt to marshall a value in a V register. Return 1 if
1735 successful, or 0 if insufficient registers are available. This
1736 function, unlike the equivalent pass_in_x() function does not
1737 handle arguments spread across multiple registers. */
1740 pass_in_v (struct gdbarch
*gdbarch
,
1741 struct regcache
*regcache
,
1742 struct aarch64_call_info
*info
,
1743 int len
, const bfd_byte
*buf
)
1747 int regnum
= AARCH64_V0_REGNUM
+ info
->nsrn
;
1748 /* Enough space for a full vector register. */
1749 gdb::byte_vector
reg (register_size (gdbarch
, regnum
), 0);
1750 gdb_assert (len
<= reg
.size ());
1755 /* PCS C.1, the argument is allocated to the least significant
1756 bits of V register. */
1757 memcpy (reg
.data (), buf
, len
);
1758 regcache
->cooked_write (regnum
, reg
);
1760 aarch64_debug_printf ("arg %d in %s", info
->argnum
,
1761 gdbarch_register_name (gdbarch
, regnum
));
1769 /* Marshall an argument onto the stack. */
1772 pass_on_stack (struct aarch64_call_info
*info
, struct type
*type
,
1775 const bfd_byte
*buf
= arg
->contents ().data ();
1776 int len
= type
->length ();
1782 align
= type_align (type
);
1784 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1785 Natural alignment of the argument's type. */
1786 align
= align_up (align
, 8);
1788 /* The AArch64 PCS requires at most doubleword alignment. */
1792 aarch64_debug_printf ("arg %d len=%d @ sp + %d\n", info
->argnum
, len
,
1797 info
->si
.push_back (item
);
1800 if (info
->nsaa
& (align
- 1))
1802 /* Push stack alignment padding. */
1803 int pad
= align
- (info
->nsaa
& (align
- 1));
1808 info
->si
.push_back (item
);
1813 /* Marshall an argument into a sequence of one or more consecutive X
1814 registers or, if insufficient X registers are available then onto
1818 pass_in_x_or_stack (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
1819 struct aarch64_call_info
*info
, struct type
*type
,
1822 int len
= type
->length ();
1823 int nregs
= (len
+ X_REGISTER_SIZE
- 1) / X_REGISTER_SIZE
;
1825 /* PCS C.13 - Pass in registers if we have enough spare */
1826 if (info
->ngrn
+ nregs
<= 8)
1828 pass_in_x (gdbarch
, regcache
, info
, type
, arg
);
1829 info
->ngrn
+= nregs
;
1834 pass_on_stack (info
, type
, arg
);
1838 /* Pass a value, which is of type arg_type, in a V register. Assumes value is a
1839 aapcs_is_vfp_call_or_return_candidate and there are enough spare V
1840 registers. A return value of false is an error state as the value will have
1841 been partially passed to the stack. */
1843 pass_in_v_vfp_candidate (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
1844 struct aarch64_call_info
*info
, struct type
*arg_type
,
1847 switch (arg_type
->code ())
1850 case TYPE_CODE_DECFLOAT
:
1851 return pass_in_v (gdbarch
, regcache
, info
, arg_type
->length (),
1852 arg
->contents ().data ());
1855 case TYPE_CODE_COMPLEX
:
1857 const bfd_byte
*buf
= arg
->contents ().data ();
1858 struct type
*target_type
= check_typedef (arg_type
->target_type ());
1860 if (!pass_in_v (gdbarch
, regcache
, info
, target_type
->length (),
1864 return pass_in_v (gdbarch
, regcache
, info
, target_type
->length (),
1865 buf
+ target_type
->length ());
1868 case TYPE_CODE_ARRAY
:
1869 if (arg_type
->is_vector ())
1870 return pass_in_v (gdbarch
, regcache
, info
, arg_type
->length (),
1871 arg
->contents ().data ());
1874 case TYPE_CODE_STRUCT
:
1875 case TYPE_CODE_UNION
:
1876 for (int i
= 0; i
< arg_type
->num_fields (); i
++)
1878 /* Don't include static fields. */
1879 if (arg_type
->field (i
).is_static ())
1882 struct value
*field
= arg
->primitive_field (0, i
, arg_type
);
1883 struct type
*field_type
= check_typedef (field
->type ());
1885 if (!pass_in_v_vfp_candidate (gdbarch
, regcache
, info
, field_type
,
1896 /* Push LR_VALUE to the Guarded Control Stack. */
1899 aarch64_push_gcs_entry (regcache
*regs
, CORE_ADDR lr_value
)
1901 gdbarch
*arch
= regs
->arch ();
1902 aarch64_gdbarch_tdep
*tdep
= gdbarch_tdep
<aarch64_gdbarch_tdep
> (arch
);
1905 register_status status
= regs
->cooked_read (tdep
->gcs_reg_base
, &gcs_addr
);
1906 if (status
!= REG_VALID
)
1907 error (_("Can't read $gcspr."));
1911 store_integer (buf
, gdbarch_byte_order (arch
), lr_value
);
1912 if (target_write_memory (gcs_addr
, buf
, sizeof (buf
)) != 0)
1913 error (_("Can't write to Guarded Control Stack."));
1916 regcache_cooked_write_unsigned (regs
, tdep
->gcs_reg_base
, gcs_addr
);
1919 /* Remove the newest entry from the Guarded Control Stack. */
1922 aarch64_pop_gcs_entry (regcache
*regs
)
1924 gdbarch
*arch
= regs
->arch ();
1925 aarch64_gdbarch_tdep
*tdep
= gdbarch_tdep
<aarch64_gdbarch_tdep
> (arch
);
1928 register_status status
= regs
->cooked_read (tdep
->gcs_reg_base
, &gcs_addr
);
1929 if (status
!= REG_VALID
)
1930 error (_("Can't read $gcspr."));
1933 regcache_cooked_write_unsigned (regs
, tdep
->gcs_reg_base
, gcs_addr
+ 8);
1936 /* Implement the "shadow_stack_push" gdbarch method. */
1939 aarch64_shadow_stack_push (gdbarch
*gdbarch
, CORE_ADDR new_addr
,
1942 aarch64_push_gcs_entry (regcache
, new_addr
);
1945 /* Implement the "push_dummy_call" gdbarch method. */
1948 aarch64_push_dummy_call (struct gdbarch
*gdbarch
, struct value
*function
,
1949 struct regcache
*regcache
, CORE_ADDR bp_addr
,
1951 struct value
**args
, CORE_ADDR sp
,
1952 function_call_return_method return_method
,
1953 CORE_ADDR struct_addr
)
1956 struct aarch64_call_info info
;
1958 /* We need to know what the type of the called function is in order
1959 to determine the number of named/anonymous arguments for the
1960 actual argument placement, and the return type in order to handle
1961 return value correctly.
1963 The generic code above us views the decision of return in memory
1964 or return in registers as a two stage processes. The language
1965 handler is consulted first and may decide to return in memory (eg
1966 class with copy constructor returned by value), this will cause
1967 the generic code to allocate space AND insert an initial leading
1970 If the language code does not decide to pass in memory then the
1971 target code is consulted.
1973 If the language code decides to pass in memory we want to move
1974 the pointer inserted as the initial argument from the argument
1975 list and into X8, the conventional AArch64 struct return pointer
1978 /* Set the return address. For the AArch64, the return breakpoint
1979 is always at BP_ADDR. */
1980 regcache_cooked_write_unsigned (regcache
, AARCH64_LR_REGNUM
, bp_addr
);
1982 /* If we were given an initial argument for the return slot, lose it. */
1983 if (return_method
== return_method_hidden_param
)
1989 /* The struct_return pointer occupies X8. */
1990 if (return_method
!= return_method_normal
)
1992 aarch64_debug_printf ("struct return in %s = 0x%s",
1993 gdbarch_register_name
1994 (gdbarch
, AARCH64_STRUCT_RETURN_REGNUM
),
1995 paddress (gdbarch
, struct_addr
));
1997 regcache_cooked_write_unsigned (regcache
, AARCH64_STRUCT_RETURN_REGNUM
,
2001 for (argnum
= 0; argnum
< nargs
; argnum
++)
2003 struct value
*arg
= args
[argnum
];
2004 struct type
*arg_type
, *fundamental_type
;
2007 arg_type
= check_typedef (arg
->type ());
2008 len
= arg_type
->length ();
2010 /* If arg can be passed in v registers as per the AAPCS64, then do so if
2011 if there are enough spare registers. */
2012 if (aapcs_is_vfp_call_or_return_candidate (arg_type
, &elements
,
2015 if (info
.nsrn
+ elements
<= 8)
2017 /* We know that we have sufficient registers available therefore
2018 this will never need to fallback to the stack. */
2019 if (!pass_in_v_vfp_candidate (gdbarch
, regcache
, &info
, arg_type
,
2021 gdb_assert_not_reached ("Failed to push args");
2026 pass_on_stack (&info
, arg_type
, arg
);
2031 switch (arg_type
->code ())
2034 case TYPE_CODE_BOOL
:
2035 case TYPE_CODE_CHAR
:
2036 case TYPE_CODE_RANGE
:
2037 case TYPE_CODE_ENUM
:
2038 if (len
< 4 && !is_fixed_point_type (arg_type
))
2040 /* Promote to 32 bit integer. */
2041 if (arg_type
->is_unsigned ())
2042 arg_type
= builtin_type (gdbarch
)->builtin_uint32
;
2044 arg_type
= builtin_type (gdbarch
)->builtin_int32
;
2045 arg
= value_cast (arg_type
, arg
);
2047 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
, arg
);
2050 case TYPE_CODE_STRUCT
:
2051 case TYPE_CODE_ARRAY
:
2052 case TYPE_CODE_UNION
:
2055 /* PCS B.7 Aggregates larger than 16 bytes are passed by
2056 invisible reference. */
2058 /* Allocate aligned storage. */
2059 sp
= align_down (sp
- len
, 16);
2061 /* Write the real data into the stack. */
2062 write_memory (sp
, arg
->contents ().data (), len
);
2064 /* Construct the indirection. */
2065 arg_type
= lookup_pointer_type (arg_type
);
2066 arg
= value_from_pointer (arg_type
, sp
);
2067 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
, arg
);
2070 /* PCS C.15 / C.18 multiple values pass. */
2071 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
, arg
);
2075 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
, arg
);
2080 /* Make sure stack retains 16 byte alignment. */
2082 sp
-= 16 - (info
.nsaa
& 15);
2084 while (!info
.si
.empty ())
2086 const stack_item_t
&si
= info
.si
.back ();
2089 if (si
.data
!= NULL
)
2090 write_memory (sp
, si
.data
, si
.len
);
2091 info
.si
.pop_back ();
2094 /* Finally, update the SP register. */
2095 regcache_cooked_write_unsigned (regcache
, AARCH64_SP_REGNUM
, sp
);
2100 /* Implement the "frame_align" gdbarch method. */
2103 aarch64_frame_align (struct gdbarch
*gdbarch
, CORE_ADDR sp
)
2105 /* Align the stack to sixteen bytes. */
2106 return sp
& ~(CORE_ADDR
) 15;
2109 /* Return the type for an AdvSISD Q register. */
2111 static struct type
*
2112 aarch64_vnq_type (struct gdbarch
*gdbarch
)
2114 aarch64_gdbarch_tdep
*tdep
= gdbarch_tdep
<aarch64_gdbarch_tdep
> (gdbarch
);
2116 if (tdep
->vnq_type
== NULL
)
2121 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnq",
2124 elem
= builtin_type (gdbarch
)->builtin_uint128
;
2125 append_composite_type_field (t
, "u", elem
);
2127 elem
= builtin_type (gdbarch
)->builtin_int128
;
2128 append_composite_type_field (t
, "s", elem
);
2133 return tdep
->vnq_type
;
2136 /* Return the type for an AdvSISD D register. */
2138 static struct type
*
2139 aarch64_vnd_type (struct gdbarch
*gdbarch
)
2141 aarch64_gdbarch_tdep
*tdep
= gdbarch_tdep
<aarch64_gdbarch_tdep
> (gdbarch
);
2143 if (tdep
->vnd_type
== NULL
)
2148 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnd",
2151 elem
= builtin_type (gdbarch
)->builtin_double
;
2152 append_composite_type_field (t
, "f", elem
);
2154 elem
= builtin_type (gdbarch
)->builtin_uint64
;
2155 append_composite_type_field (t
, "u", elem
);
2157 elem
= builtin_type (gdbarch
)->builtin_int64
;
2158 append_composite_type_field (t
, "s", elem
);
2163 return tdep
->vnd_type
;
2166 /* Return the type for an AdvSISD S register. */
2168 static struct type
*
2169 aarch64_vns_type (struct gdbarch
*gdbarch
)
2171 aarch64_gdbarch_tdep
*tdep
= gdbarch_tdep
<aarch64_gdbarch_tdep
> (gdbarch
);
2173 if (tdep
->vns_type
== NULL
)
2178 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vns",
2181 elem
= builtin_type (gdbarch
)->builtin_float
;
2182 append_composite_type_field (t
, "f", elem
);
2184 elem
= builtin_type (gdbarch
)->builtin_uint32
;
2185 append_composite_type_field (t
, "u", elem
);
2187 elem
= builtin_type (gdbarch
)->builtin_int32
;
2188 append_composite_type_field (t
, "s", elem
);
2193 return tdep
->vns_type
;
2196 /* Return the type for an AdvSISD H register. */
2198 static struct type
*
2199 aarch64_vnh_type (struct gdbarch
*gdbarch
)
2201 aarch64_gdbarch_tdep
*tdep
= gdbarch_tdep
<aarch64_gdbarch_tdep
> (gdbarch
);
2203 if (tdep
->vnh_type
== NULL
)
2208 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnh",
2211 elem
= builtin_type (gdbarch
)->builtin_bfloat16
;
2212 append_composite_type_field (t
, "bf", elem
);
2214 elem
= builtin_type (gdbarch
)->builtin_half
;
2215 append_composite_type_field (t
, "f", elem
);
2217 elem
= builtin_type (gdbarch
)->builtin_uint16
;
2218 append_composite_type_field (t
, "u", elem
);
2220 elem
= builtin_type (gdbarch
)->builtin_int16
;
2221 append_composite_type_field (t
, "s", elem
);
2226 return tdep
->vnh_type
;
2229 /* Return the type for an AdvSISD B register. */
2231 static struct type
*
2232 aarch64_vnb_type (struct gdbarch
*gdbarch
)
2234 aarch64_gdbarch_tdep
*tdep
= gdbarch_tdep
<aarch64_gdbarch_tdep
> (gdbarch
);
2236 if (tdep
->vnb_type
== NULL
)
2241 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnb",
2244 elem
= builtin_type (gdbarch
)->builtin_uint8
;
2245 append_composite_type_field (t
, "u", elem
);
2247 elem
= builtin_type (gdbarch
)->builtin_int8
;
2248 append_composite_type_field (t
, "s", elem
);
2253 return tdep
->vnb_type
;
2256 /* Return TRUE if REGNUM is a ZA tile slice pseudo-register number. Return
2260 is_sme_tile_slice_pseudo_register (struct gdbarch
*gdbarch
, int regnum
)
2262 aarch64_gdbarch_tdep
*tdep
= gdbarch_tdep
<aarch64_gdbarch_tdep
> (gdbarch
);
2264 gdb_assert (tdep
->has_sme ());
2265 gdb_assert (tdep
->sme_svq
> 0);
2266 gdb_assert (tdep
->sme_pseudo_base
<= regnum
);
2267 gdb_assert (regnum
< tdep
->sme_pseudo_base
+ tdep
->sme_pseudo_count
);
2269 if (tdep
->sme_tile_slice_pseudo_base
<= regnum
2270 && regnum
< tdep
->sme_tile_slice_pseudo_base
2271 + tdep
->sme_tile_slice_pseudo_count
)
2277 /* Given REGNUM, a ZA pseudo-register number, return, in ENCODING, the
2278 decoded fields that make up its name. */
2281 aarch64_za_decode_pseudos (struct gdbarch
*gdbarch
, int regnum
,
2282 struct za_pseudo_encoding
&encoding
)
2284 aarch64_gdbarch_tdep
*tdep
= gdbarch_tdep
<aarch64_gdbarch_tdep
> (gdbarch
);
2286 gdb_assert (tdep
->has_sme ());
2287 gdb_assert (tdep
->sme_svq
> 0);
2288 gdb_assert (tdep
->sme_pseudo_base
<= regnum
);
2289 gdb_assert (regnum
< tdep
->sme_pseudo_base
+ tdep
->sme_pseudo_count
);
2291 if (is_sme_tile_slice_pseudo_register (gdbarch
, regnum
))
2293 /* Calculate the tile slice pseudo-register offset relative to the other
2294 tile slice pseudo-registers. */
2295 int offset
= regnum
- tdep
->sme_tile_slice_pseudo_base
;
2297 /* Fetch the qualifier. We can have 160 to 2560 possible tile slice
2298 pseudo-registers. Each qualifier (we have 5 of them: B, H, S, D
2299 and Q) covers 32 * svq pseudo-registers, so we divide the offset by
2301 size_t qualifier
= offset
/ (tdep
->sme_svq
* 32);
2302 encoding
.qualifier_index
= qualifier
;
2304 /* Prepare to fetch the direction (d), tile number (t) and slice
2306 int dts
= offset
% (tdep
->sme_svq
* 32);
2308 /* The direction is represented by the even/odd numbers. Even-numbered
2309 pseudo-registers are horizontal tile slices and odd-numbered
2310 pseudo-registers are vertical tile slices. */
2311 encoding
.horizontal
= !(dts
& 1);
2313 /* Fetch the tile number. The tile number is closely related to the
2314 qualifier. B has 1 tile, H has 2 tiles, S has 4 tiles, D has 8 tiles
2315 and Q has 16 tiles. */
2316 encoding
.tile_index
= (dts
>> 1) & ((1 << qualifier
) - 1);
2318 /* Fetch the slice number. The slice number is closely related to the
2319 qualifier and the svl. */
2320 encoding
.slice_index
= dts
>> (qualifier
+ 1);
2324 /* Calculate the tile pseudo-register offset relative to the other
2325 tile pseudo-registers. */
2326 int offset
= regnum
- tdep
->sme_tile_pseudo_base
;
2328 encoding
.qualifier_index
= std::floor (std::log2 (offset
+ 1));
2329 /* Calculate the tile number. */
2330 encoding
.tile_index
= (offset
+ 1) - (1 << encoding
.qualifier_index
);
2331 /* Direction and slice index don't get used for tiles. Set them to
2333 encoding
.slice_index
= 0;
2334 encoding
.horizontal
= false;
2338 /* Return the type for a ZA tile slice pseudo-register based on ENCODING. */
2340 static struct type
*
2341 aarch64_za_tile_slice_type (struct gdbarch
*gdbarch
,
2342 const struct za_pseudo_encoding
&encoding
)
2344 aarch64_gdbarch_tdep
*tdep
= gdbarch_tdep
<aarch64_gdbarch_tdep
> (gdbarch
);
2346 gdb_assert (tdep
->has_sme ());
2347 gdb_assert (tdep
->sme_svq
> 0);
2349 if (tdep
->sme_tile_slice_type_q
== nullptr)
2351 /* Q tile slice type. */
2352 tdep
->sme_tile_slice_type_q
2353 = init_vector_type (builtin_type (gdbarch
)->builtin_uint128
,
2355 /* D tile slice type. */
2356 tdep
->sme_tile_slice_type_d
2357 = init_vector_type (builtin_type (gdbarch
)->builtin_uint64
,
2359 /* S tile slice type. */
2360 tdep
->sme_tile_slice_type_s
2361 = init_vector_type (builtin_type (gdbarch
)->builtin_uint32
,
2363 /* H tile slice type. */
2364 tdep
->sme_tile_slice_type_h
2365 = init_vector_type (builtin_type (gdbarch
)->builtin_uint16
,
2367 /* B tile slice type. */
2368 tdep
->sme_tile_slice_type_b
2369 = init_vector_type (builtin_type (gdbarch
)->builtin_uint8
,
2370 tdep
->sme_svq
* 16);
2373 switch (encoding
.qualifier_index
)
2376 return tdep
->sme_tile_slice_type_q
;
2378 return tdep
->sme_tile_slice_type_d
;
2380 return tdep
->sme_tile_slice_type_s
;
2382 return tdep
->sme_tile_slice_type_h
;
2384 return tdep
->sme_tile_slice_type_b
;
2386 error (_("Invalid qualifier index %s for tile slice pseudo register."),
2387 pulongest (encoding
.qualifier_index
));
2390 gdb_assert_not_reached ("Unknown qualifier for ZA tile slice register");
2393 /* Return the type for a ZA tile pseudo-register based on ENCODING. */
2395 static struct type
*
2396 aarch64_za_tile_type (struct gdbarch
*gdbarch
,
2397 const struct za_pseudo_encoding
&encoding
)
2399 aarch64_gdbarch_tdep
*tdep
= gdbarch_tdep
<aarch64_gdbarch_tdep
> (gdbarch
);
2401 gdb_assert (tdep
->has_sme ());
2402 gdb_assert (tdep
->sme_svq
> 0);
2404 if (tdep
->sme_tile_type_q
== nullptr)
2406 struct type
*inner_vectors_type
;
2410 = init_vector_type (builtin_type (gdbarch
)->builtin_uint128
,
2412 tdep
->sme_tile_type_q
2413 = init_vector_type (inner_vectors_type
, tdep
->sme_svq
);
2417 = init_vector_type (builtin_type (gdbarch
)->builtin_uint64
,
2419 tdep
->sme_tile_type_d
2420 = init_vector_type (inner_vectors_type
, tdep
->sme_svq
* 2);
2424 = init_vector_type (builtin_type (gdbarch
)->builtin_uint32
,
2426 tdep
->sme_tile_type_s
2427 = init_vector_type (inner_vectors_type
, tdep
->sme_svq
* 4);
2431 = init_vector_type (builtin_type (gdbarch
)->builtin_uint16
,
2433 tdep
->sme_tile_type_h
2434 = init_vector_type (inner_vectors_type
, tdep
->sme_svq
* 8);
2438 = init_vector_type (builtin_type (gdbarch
)->builtin_uint8
,
2439 tdep
->sme_svq
* 16);
2440 tdep
->sme_tile_type_b
2441 = init_vector_type (inner_vectors_type
, tdep
->sme_svq
* 16);
2444 switch (encoding
.qualifier_index
)
2447 return tdep
->sme_tile_type_q
;
2449 return tdep
->sme_tile_type_d
;
2451 return tdep
->sme_tile_type_s
;
2453 return tdep
->sme_tile_type_h
;
2455 return tdep
->sme_tile_type_b
;
2457 error (_("Invalid qualifier index %s for ZA tile pseudo register."),
2458 pulongest (encoding
.qualifier_index
));
2461 gdb_assert_not_reached ("unknown qualifier for tile pseudo-register");
2464 /* Return the type for an AdvSISD V register. */
2466 static struct type
*
2467 aarch64_vnv_type (struct gdbarch
*gdbarch
)
2469 aarch64_gdbarch_tdep
*tdep
= gdbarch_tdep
<aarch64_gdbarch_tdep
> (gdbarch
);
2471 if (tdep
->vnv_type
== NULL
)
2473 /* The other AArch64 pseudo registers (Q,D,H,S,B) refer to a single value
2474 slice from the non-pseudo vector registers. However NEON V registers
2475 are always vector registers, and need constructing as such. */
2476 const struct builtin_type
*bt
= builtin_type (gdbarch
);
2478 struct type
*t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnv",
2481 struct type
*sub
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnd",
2483 append_composite_type_field (sub
, "f",
2484 init_vector_type (bt
->builtin_double
, 2));
2485 append_composite_type_field (sub
, "u",
2486 init_vector_type (bt
->builtin_uint64
, 2));
2487 append_composite_type_field (sub
, "s",
2488 init_vector_type (bt
->builtin_int64
, 2));
2489 append_composite_type_field (t
, "d", sub
);
2491 sub
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vns",
2493 append_composite_type_field (sub
, "f",
2494 init_vector_type (bt
->builtin_float
, 4));
2495 append_composite_type_field (sub
, "u",
2496 init_vector_type (bt
->builtin_uint32
, 4));
2497 append_composite_type_field (sub
, "s",
2498 init_vector_type (bt
->builtin_int32
, 4));
2499 append_composite_type_field (t
, "s", sub
);
2501 sub
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnh",
2503 append_composite_type_field (sub
, "bf",
2504 init_vector_type (bt
->builtin_bfloat16
, 8));
2505 append_composite_type_field (sub
, "f",
2506 init_vector_type (bt
->builtin_half
, 8));
2507 append_composite_type_field (sub
, "u",
2508 init_vector_type (bt
->builtin_uint16
, 8));
2509 append_composite_type_field (sub
, "s",
2510 init_vector_type (bt
->builtin_int16
, 8));
2511 append_composite_type_field (t
, "h", sub
);
2513 sub
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnb",
2515 append_composite_type_field (sub
, "u",
2516 init_vector_type (bt
->builtin_uint8
, 16));
2517 append_composite_type_field (sub
, "s",
2518 init_vector_type (bt
->builtin_int8
, 16));
2519 append_composite_type_field (t
, "b", sub
);
2521 sub
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnq",
2523 append_composite_type_field (sub
, "u",
2524 init_vector_type (bt
->builtin_uint128
, 1));
2525 append_composite_type_field (sub
, "s",
2526 init_vector_type (bt
->builtin_int128
, 1));
2527 append_composite_type_field (t
, "q", sub
);
2532 return tdep
->vnv_type
;
2535 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
2538 aarch64_dwarf_reg_to_regnum (struct gdbarch
*gdbarch
, int reg
)
2540 aarch64_gdbarch_tdep
*tdep
= gdbarch_tdep
<aarch64_gdbarch_tdep
> (gdbarch
);
2542 if (reg
>= AARCH64_DWARF_X0
&& reg
<= AARCH64_DWARF_X0
+ 30)
2543 return AARCH64_X0_REGNUM
+ reg
- AARCH64_DWARF_X0
;
2545 if (reg
== AARCH64_DWARF_SP
)
2546 return AARCH64_SP_REGNUM
;
2548 if (reg
== AARCH64_DWARF_PC
)
2549 return AARCH64_PC_REGNUM
;
2551 if (reg
>= AARCH64_DWARF_V0
&& reg
<= AARCH64_DWARF_V0
+ 31)
2552 return AARCH64_V0_REGNUM
+ reg
- AARCH64_DWARF_V0
;
2554 if (reg
== AARCH64_DWARF_SVE_VG
)
2555 return AARCH64_SVE_VG_REGNUM
;
2557 if (reg
== AARCH64_DWARF_SVE_FFR
)
2558 return AARCH64_SVE_FFR_REGNUM
;
2560 if (reg
>= AARCH64_DWARF_SVE_P0
&& reg
<= AARCH64_DWARF_SVE_P0
+ 15)
2561 return AARCH64_SVE_P0_REGNUM
+ reg
- AARCH64_DWARF_SVE_P0
;
2563 if (reg
>= AARCH64_DWARF_SVE_Z0
&& reg
<= AARCH64_DWARF_SVE_Z0
+ 15)
2564 return AARCH64_SVE_Z0_REGNUM
+ reg
- AARCH64_DWARF_SVE_Z0
;
2566 if (tdep
->has_pauth ())
2568 if (reg
== AARCH64_DWARF_RA_SIGN_STATE
)
2569 return tdep
->ra_sign_state_regnum
;
2575 /* Implement the "print_insn" gdbarch method. */
2578 aarch64_gdb_print_insn (bfd_vma memaddr
, disassemble_info
*info
)
2580 info
->symbols
= NULL
;
2581 return default_print_insn (memaddr
, info
);
2584 /* AArch64 BRK software debug mode instruction.
2585 Note that AArch64 code is always little-endian.
2586 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
2587 constexpr gdb_byte aarch64_default_breakpoint
[] = {0x00, 0x00, 0x20, 0xd4};
2589 typedef BP_MANIPULATION (aarch64_default_breakpoint
) aarch64_breakpoint
;
2591 /* Extract from an array REGS containing the (raw) register state a
2592 function return value of type TYPE, and copy that, in virtual
2593 format, into VALBUF. */
2596 aarch64_extract_return_value (struct type
*type
, struct regcache
*regs
,
2599 struct gdbarch
*gdbarch
= regs
->arch ();
2600 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
2602 struct type
*fundamental_type
;
2604 if (aapcs_is_vfp_call_or_return_candidate (type
, &elements
,
2607 int len
= fundamental_type
->length ();
2609 for (int i
= 0; i
< elements
; i
++)
2611 int regno
= AARCH64_V0_REGNUM
+ i
;
2612 /* Enough space for a full vector register. */
2613 gdb::byte_vector
buf (register_size (gdbarch
, regno
));
2614 gdb_assert (len
<= buf
.size ());
2616 aarch64_debug_printf
2617 ("read HFA or HVA return value element %d from %s",
2618 i
+ 1, gdbarch_register_name (gdbarch
, regno
));
2620 regs
->cooked_read (regno
, buf
);
2622 memcpy (valbuf
, buf
.data (), len
);
2626 else if (type
->code () == TYPE_CODE_INT
2627 || type
->code () == TYPE_CODE_CHAR
2628 || type
->code () == TYPE_CODE_BOOL
2629 || type
->code () == TYPE_CODE_PTR
2630 || TYPE_IS_REFERENCE (type
)
2631 || type
->code () == TYPE_CODE_ENUM
)
2633 /* If the type is a plain integer, then the access is
2634 straight-forward. Otherwise we have to play around a bit
2636 int len
= type
->length ();
2637 int regno
= AARCH64_X0_REGNUM
;
2642 /* By using store_unsigned_integer we avoid having to do
2643 anything special for small big-endian values. */
2644 regcache_cooked_read_unsigned (regs
, regno
++, &tmp
);
2645 store_unsigned_integer (valbuf
,
2646 (len
> X_REGISTER_SIZE
2647 ? X_REGISTER_SIZE
: len
), byte_order
, tmp
);
2648 len
-= X_REGISTER_SIZE
;
2649 valbuf
+= X_REGISTER_SIZE
;
2654 /* For a structure or union the behavior is as if the value had
2655 been stored to word-aligned memory and then loaded into
2656 registers with 64-bit load instruction(s). */
2657 int len
= type
->length ();
2658 int regno
= AARCH64_X0_REGNUM
;
2659 bfd_byte buf
[X_REGISTER_SIZE
];
2663 regs
->cooked_read (regno
++, buf
);
2664 memcpy (valbuf
, buf
, len
> X_REGISTER_SIZE
? X_REGISTER_SIZE
: len
);
2665 len
-= X_REGISTER_SIZE
;
2666 valbuf
+= X_REGISTER_SIZE
;
2672 /* Will a function return an aggregate type in memory or in a
2673 register? Return 0 if an aggregate type can be returned in a
2674 register, 1 if it must be returned in memory. */
2677 aarch64_return_in_memory (struct gdbarch
*gdbarch
, struct type
*type
)
2679 type
= check_typedef (type
);
2681 struct type
*fundamental_type
;
2683 if (TYPE_HAS_DYNAMIC_LENGTH (type
))
2686 if (aapcs_is_vfp_call_or_return_candidate (type
, &elements
,
2689 /* v0-v7 are used to return values and one register is allocated
2690 for one member. However, HFA or HVA has at most four members. */
2694 if (type
->length () > 16
2695 || !language_pass_by_reference (type
).trivially_copyable
)
2697 /* PCS B.6 Aggregates larger than 16 bytes are passed by
2698 invisible reference. */
2706 /* Write into appropriate registers a function return value of type
2707 TYPE, given in virtual format. */
2710 aarch64_store_return_value (struct type
*type
, struct regcache
*regs
,
2711 const gdb_byte
*valbuf
)
2713 struct gdbarch
*gdbarch
= regs
->arch ();
2714 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
2716 struct type
*fundamental_type
;
2718 if (aapcs_is_vfp_call_or_return_candidate (type
, &elements
,
2721 int len
= fundamental_type
->length ();
2723 for (int i
= 0; i
< elements
; i
++)
2725 int regno
= AARCH64_V0_REGNUM
+ i
;
2726 /* Enough space for a full vector register. */
2727 gdb::byte_vector
tmpbuf (register_size (gdbarch
, regno
));
2728 gdb_assert (len
<= tmpbuf
.size ());
2730 aarch64_debug_printf
2731 ("write HFA or HVA return value element %d to %s",
2732 i
+ 1, gdbarch_register_name (gdbarch
, regno
));
2734 /* Depending on whether the target supports SVE or not, the V
2735 registers may report a size > 16 bytes. In that case, read the
2736 original contents of the register before overriding it with a new
2737 value that has a potential size <= 16 bytes. */
2738 regs
->cooked_read (regno
, tmpbuf
);
2739 memcpy (tmpbuf
.data (), valbuf
,
2740 len
> V_REGISTER_SIZE
? V_REGISTER_SIZE
: len
);
2741 regs
->cooked_write (regno
, tmpbuf
);
2745 else if (type
->code () == TYPE_CODE_INT
2746 || type
->code () == TYPE_CODE_CHAR
2747 || type
->code () == TYPE_CODE_BOOL
2748 || type
->code () == TYPE_CODE_PTR
2749 || TYPE_IS_REFERENCE (type
)
2750 || type
->code () == TYPE_CODE_ENUM
)
2752 if (type
->length () <= X_REGISTER_SIZE
)
2754 /* Values of one word or less are zero/sign-extended and
2756 bfd_byte tmpbuf
[X_REGISTER_SIZE
];
2757 LONGEST val
= unpack_long (type
, valbuf
);
2759 store_signed_integer (tmpbuf
, X_REGISTER_SIZE
, byte_order
, val
);
2760 regs
->cooked_write (AARCH64_X0_REGNUM
, tmpbuf
);
2764 /* Integral values greater than one word are stored in
2765 consecutive registers starting with r0. This will always
2766 be a multiple of the register size. */
2767 int len
= type
->length ();
2768 int regno
= AARCH64_X0_REGNUM
;
2772 regs
->cooked_write (regno
++, valbuf
);
2773 len
-= X_REGISTER_SIZE
;
2774 valbuf
+= X_REGISTER_SIZE
;
2780 /* For a structure or union the behavior is as if the value had
2781 been stored to word-aligned memory and then loaded into
2782 registers with 64-bit load instruction(s). */
2783 int len
= type
->length ();
2784 int regno
= AARCH64_X0_REGNUM
;
2785 bfd_byte tmpbuf
[X_REGISTER_SIZE
];
2789 memcpy (tmpbuf
, valbuf
,
2790 len
> X_REGISTER_SIZE
? X_REGISTER_SIZE
: len
);
2791 regs
->cooked_write (regno
++, tmpbuf
);
2792 len
-= X_REGISTER_SIZE
;
2793 valbuf
+= X_REGISTER_SIZE
;
2798 /* Implement the "return_value" gdbarch method. */
2800 static enum return_value_convention
2801 aarch64_return_value (struct gdbarch
*gdbarch
, struct value
*func_value
,
2802 struct type
*valtype
, struct regcache
*regcache
,
2803 struct value
**read_value
, const gdb_byte
*writebuf
)
2805 if (valtype
->code () == TYPE_CODE_STRUCT
2806 || valtype
->code () == TYPE_CODE_UNION
2807 || valtype
->code () == TYPE_CODE_ARRAY
)
2809 if (aarch64_return_in_memory (gdbarch
, valtype
))
2811 /* From the AAPCS64's Result Return section:
2813 "Otherwise, the caller shall reserve a block of memory of
2814 sufficient size and alignment to hold the result. The address
2815 of the memory block shall be passed as an additional argument to
2816 the function in x8. */
2818 aarch64_debug_printf ("return value in memory");
2820 if (read_value
!= nullptr)
2824 regcache
->cooked_read (AARCH64_STRUCT_RETURN_REGNUM
, &addr
);
2825 *read_value
= value_at_non_lval (valtype
, addr
);
2828 return RETURN_VALUE_ABI_RETURNS_ADDRESS
;
2833 aarch64_store_return_value (valtype
, regcache
, writebuf
);
2837 *read_value
= value::allocate (valtype
);
2838 aarch64_extract_return_value (valtype
, regcache
,
2839 (*read_value
)->contents_raw ().data ());
2842 aarch64_debug_printf ("return value in registers");
2844 return RETURN_VALUE_REGISTER_CONVENTION
;
2847 /* Implement the "get_longjmp_target" gdbarch method. */
2850 aarch64_get_longjmp_target (const frame_info_ptr
&frame
, CORE_ADDR
*pc
)
2853 gdb_byte buf
[X_REGISTER_SIZE
];
2854 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
2855 aarch64_gdbarch_tdep
*tdep
= gdbarch_tdep
<aarch64_gdbarch_tdep
> (gdbarch
);
2856 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
2858 jb_addr
= get_frame_register_unsigned (frame
, AARCH64_X0_REGNUM
);
2860 if (target_read_memory (jb_addr
+ tdep
->jb_pc
* tdep
->jb_elt_size
, buf
,
2864 *pc
= extract_unsigned_integer (buf
, X_REGISTER_SIZE
, byte_order
);
2868 /* Implement the "gen_return_address" gdbarch method. */
2871 aarch64_gen_return_address (struct gdbarch
*gdbarch
,
2872 struct agent_expr
*ax
, struct axs_value
*value
,
2875 value
->type
= register_type (gdbarch
, AARCH64_LR_REGNUM
);
2876 value
->kind
= axs_lvalue_register
;
2877 value
->u
.reg
= AARCH64_LR_REGNUM
;
2881 /* Return TRUE if REGNUM is a W pseudo-register number. Return FALSE
2885 is_w_pseudo_register (struct gdbarch
*gdbarch
, int regnum
)
2887 aarch64_gdbarch_tdep
*tdep
= gdbarch_tdep
<aarch64_gdbarch_tdep
> (gdbarch
);
2889 if (tdep
->w_pseudo_base
<= regnum
2890 && regnum
< tdep
->w_pseudo_base
+ tdep
->w_pseudo_count
)
2896 /* Return TRUE if REGNUM is a SME pseudo-register number. Return FALSE
2900 is_sme_pseudo_register (struct gdbarch
*gdbarch
, int regnum
)
2902 aarch64_gdbarch_tdep
*tdep
= gdbarch_tdep
<aarch64_gdbarch_tdep
> (gdbarch
);
2904 if (tdep
->has_sme () && tdep
->sme_pseudo_base
<= regnum
2905 && regnum
< tdep
->sme_pseudo_base
+ tdep
->sme_pseudo_count
)
2911 /* Convert ENCODING into a ZA tile slice name. */
2913 static const std::string
2914 aarch64_za_tile_slice_name (const struct za_pseudo_encoding
&encoding
)
2916 gdb_assert (encoding
.qualifier_index
>= 0);
2917 gdb_assert (encoding
.qualifier_index
<= 4);
2918 gdb_assert (encoding
.tile_index
>= 0);
2919 gdb_assert (encoding
.tile_index
<= 15);
2920 gdb_assert (encoding
.slice_index
>= 0);
2921 gdb_assert (encoding
.slice_index
<= 255);
2923 const char orientation
= encoding
.horizontal
? 'h' : 'v';
2925 const char qualifiers
[6] = "bhsdq";
2926 const char qualifier
= qualifiers
[encoding
.qualifier_index
];
2927 return string_printf ("za%d%c%c%d", encoding
.tile_index
, orientation
,
2928 qualifier
, encoding
.slice_index
);
2931 /* Convert ENCODING into a ZA tile name. */
2933 static const std::string
2934 aarch64_za_tile_name (const struct za_pseudo_encoding
&encoding
)
2936 /* Tiles don't use the slice number and the direction fields. */
2937 gdb_assert (encoding
.qualifier_index
>= 0);
2938 gdb_assert (encoding
.qualifier_index
<= 4);
2939 gdb_assert (encoding
.tile_index
>= 0);
2940 gdb_assert (encoding
.tile_index
<= 15);
2942 const char qualifiers
[6] = "bhsdq";
2943 const char qualifier
= qualifiers
[encoding
.qualifier_index
];
2944 return (string_printf ("za%d%c", encoding
.tile_index
, qualifier
));
2947 /* Given a SME pseudo-register REGNUM, return its type. */
2949 static struct type
*
2950 aarch64_sme_pseudo_register_type (struct gdbarch
*gdbarch
, int regnum
)
2952 struct za_pseudo_encoding encoding
;
2954 /* Decode the SME pseudo-register number. */
2955 aarch64_za_decode_pseudos (gdbarch
, regnum
, encoding
);
2957 if (is_sme_tile_slice_pseudo_register (gdbarch
, regnum
))
2958 return aarch64_za_tile_slice_type (gdbarch
, encoding
);
2960 return aarch64_za_tile_type (gdbarch
, encoding
);
2963 /* Return the pseudo register name corresponding to register regnum. */
2966 aarch64_pseudo_register_name (struct gdbarch
*gdbarch
, int regnum
)
2968 aarch64_gdbarch_tdep
*tdep
= gdbarch_tdep
<aarch64_gdbarch_tdep
> (gdbarch
);
2970 /* W pseudo-registers. Bottom halves of the X registers. */
2971 static const char *const w_name
[] =
2973 "w0", "w1", "w2", "w3",
2974 "w4", "w5", "w6", "w7",
2975 "w8", "w9", "w10", "w11",
2976 "w12", "w13", "w14", "w15",
2977 "w16", "w17", "w18", "w19",
2978 "w20", "w21", "w22", "w23",
2979 "w24", "w25", "w26", "w27",
2980 "w28", "w29", "w30",
2983 static const char *const q_name
[] =
2985 "q0", "q1", "q2", "q3",
2986 "q4", "q5", "q6", "q7",
2987 "q8", "q9", "q10", "q11",
2988 "q12", "q13", "q14", "q15",
2989 "q16", "q17", "q18", "q19",
2990 "q20", "q21", "q22", "q23",
2991 "q24", "q25", "q26", "q27",
2992 "q28", "q29", "q30", "q31",
2995 static const char *const d_name
[] =
2997 "d0", "d1", "d2", "d3",
2998 "d4", "d5", "d6", "d7",
2999 "d8", "d9", "d10", "d11",
3000 "d12", "d13", "d14", "d15",
3001 "d16", "d17", "d18", "d19",
3002 "d20", "d21", "d22", "d23",
3003 "d24", "d25", "d26", "d27",
3004 "d28", "d29", "d30", "d31",
3007 static const char *const s_name
[] =
3009 "s0", "s1", "s2", "s3",
3010 "s4", "s5", "s6", "s7",
3011 "s8", "s9", "s10", "s11",
3012 "s12", "s13", "s14", "s15",
3013 "s16", "s17", "s18", "s19",
3014 "s20", "s21", "s22", "s23",
3015 "s24", "s25", "s26", "s27",
3016 "s28", "s29", "s30", "s31",
3019 static const char *const h_name
[] =
3021 "h0", "h1", "h2", "h3",
3022 "h4", "h5", "h6", "h7",
3023 "h8", "h9", "h10", "h11",
3024 "h12", "h13", "h14", "h15",
3025 "h16", "h17", "h18", "h19",
3026 "h20", "h21", "h22", "h23",
3027 "h24", "h25", "h26", "h27",
3028 "h28", "h29", "h30", "h31",
3031 static const char *const b_name
[] =
3033 "b0", "b1", "b2", "b3",
3034 "b4", "b5", "b6", "b7",
3035 "b8", "b9", "b10", "b11",
3036 "b12", "b13", "b14", "b15",
3037 "b16", "b17", "b18", "b19",
3038 "b20", "b21", "b22", "b23",
3039 "b24", "b25", "b26", "b27",
3040 "b28", "b29", "b30", "b31",
3043 int p_regnum
= regnum
- gdbarch_num_regs (gdbarch
);
3045 if (p_regnum
>= AARCH64_Q0_REGNUM
&& p_regnum
< AARCH64_Q0_REGNUM
+ 32)
3046 return q_name
[p_regnum
- AARCH64_Q0_REGNUM
];
3048 if (p_regnum
>= AARCH64_D0_REGNUM
&& p_regnum
< AARCH64_D0_REGNUM
+ 32)
3049 return d_name
[p_regnum
- AARCH64_D0_REGNUM
];
3051 if (p_regnum
>= AARCH64_S0_REGNUM
&& p_regnum
< AARCH64_S0_REGNUM
+ 32)
3052 return s_name
[p_regnum
- AARCH64_S0_REGNUM
];
3054 if (p_regnum
>= AARCH64_H0_REGNUM
&& p_regnum
< AARCH64_H0_REGNUM
+ 32)
3055 return h_name
[p_regnum
- AARCH64_H0_REGNUM
];
3057 if (p_regnum
>= AARCH64_B0_REGNUM
&& p_regnum
< AARCH64_B0_REGNUM
+ 32)
3058 return b_name
[p_regnum
- AARCH64_B0_REGNUM
];
3060 /* W pseudo-registers? */
3061 if (is_w_pseudo_register (gdbarch
, regnum
))
3062 return w_name
[regnum
- tdep
->w_pseudo_base
];
3064 if (tdep
->has_sve ())
3066 static const char *const sve_v_name
[] =
3068 "v0", "v1", "v2", "v3",
3069 "v4", "v5", "v6", "v7",
3070 "v8", "v9", "v10", "v11",
3071 "v12", "v13", "v14", "v15",
3072 "v16", "v17", "v18", "v19",
3073 "v20", "v21", "v22", "v23",
3074 "v24", "v25", "v26", "v27",
3075 "v28", "v29", "v30", "v31",
3078 if (p_regnum
>= AARCH64_SVE_V0_REGNUM
3079 && p_regnum
< AARCH64_SVE_V0_REGNUM
+ AARCH64_V_REGS_NUM
)
3080 return sve_v_name
[p_regnum
- AARCH64_SVE_V0_REGNUM
];
3083 if (is_sme_pseudo_register (gdbarch
, regnum
))
3084 return tdep
->sme_pseudo_names
[regnum
- tdep
->sme_pseudo_base
].c_str ();
3086 /* RA_STATE is used for unwinding only. Do not assign it a name - this
3087 prevents it from being read by methods such as
3088 mi_cmd_trace_frame_collected. */
3089 if (tdep
->has_pauth () && regnum
== tdep
->ra_sign_state_regnum
)
3092 internal_error (_("aarch64_pseudo_register_name: bad register number %d"),
3096 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
3098 static struct type
*
3099 aarch64_pseudo_register_type (struct gdbarch
*gdbarch
, int regnum
)
3101 aarch64_gdbarch_tdep
*tdep
= gdbarch_tdep
<aarch64_gdbarch_tdep
> (gdbarch
);
3103 int p_regnum
= regnum
- gdbarch_num_regs (gdbarch
);
3105 if (p_regnum
>= AARCH64_Q0_REGNUM
&& p_regnum
< AARCH64_Q0_REGNUM
+ 32)
3106 return aarch64_vnq_type (gdbarch
);
3108 if (p_regnum
>= AARCH64_D0_REGNUM
&& p_regnum
< AARCH64_D0_REGNUM
+ 32)
3109 return aarch64_vnd_type (gdbarch
);
3111 if (p_regnum
>= AARCH64_S0_REGNUM
&& p_regnum
< AARCH64_S0_REGNUM
+ 32)
3112 return aarch64_vns_type (gdbarch
);
3114 if (p_regnum
>= AARCH64_H0_REGNUM
&& p_regnum
< AARCH64_H0_REGNUM
+ 32)
3115 return aarch64_vnh_type (gdbarch
);
3117 if (p_regnum
>= AARCH64_B0_REGNUM
&& p_regnum
< AARCH64_B0_REGNUM
+ 32)
3118 return aarch64_vnb_type (gdbarch
);
3120 if (tdep
->has_sve () && p_regnum
>= AARCH64_SVE_V0_REGNUM
3121 && p_regnum
< AARCH64_SVE_V0_REGNUM
+ AARCH64_V_REGS_NUM
)
3122 return aarch64_vnv_type (gdbarch
);
3124 /* W pseudo-registers are 32-bit. */
3125 if (is_w_pseudo_register (gdbarch
, regnum
))
3126 return builtin_type (gdbarch
)->builtin_uint32
;
3128 if (is_sme_pseudo_register (gdbarch
, regnum
))
3129 return aarch64_sme_pseudo_register_type (gdbarch
, regnum
);
3131 if (tdep
->has_pauth () && regnum
== tdep
->ra_sign_state_regnum
)
3132 return builtin_type (gdbarch
)->builtin_uint64
;
3134 internal_error (_("aarch64_pseudo_register_type: bad register number %d"),
3138 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
3141 aarch64_pseudo_register_reggroup_p (struct gdbarch
*gdbarch
, int regnum
,
3142 const struct reggroup
*group
)
3144 aarch64_gdbarch_tdep
*tdep
= gdbarch_tdep
<aarch64_gdbarch_tdep
> (gdbarch
);
3146 int p_regnum
= regnum
- gdbarch_num_regs (gdbarch
);
3148 if (p_regnum
>= AARCH64_Q0_REGNUM
&& p_regnum
< AARCH64_Q0_REGNUM
+ 32)
3149 return group
== all_reggroup
|| group
== vector_reggroup
;
3150 else if (p_regnum
>= AARCH64_D0_REGNUM
&& p_regnum
< AARCH64_D0_REGNUM
+ 32)
3151 return (group
== all_reggroup
|| group
== vector_reggroup
3152 || group
== float_reggroup
);
3153 else if (p_regnum
>= AARCH64_S0_REGNUM
&& p_regnum
< AARCH64_S0_REGNUM
+ 32)
3154 return (group
== all_reggroup
|| group
== vector_reggroup
3155 || group
== float_reggroup
);
3156 else if (p_regnum
>= AARCH64_H0_REGNUM
&& p_regnum
< AARCH64_H0_REGNUM
+ 32)
3157 return group
== all_reggroup
|| group
== vector_reggroup
;
3158 else if (p_regnum
>= AARCH64_B0_REGNUM
&& p_regnum
< AARCH64_B0_REGNUM
+ 32)
3159 return group
== all_reggroup
|| group
== vector_reggroup
;
3160 else if (tdep
->has_sve () && p_regnum
>= AARCH64_SVE_V0_REGNUM
3161 && p_regnum
< AARCH64_SVE_V0_REGNUM
+ AARCH64_V_REGS_NUM
)
3162 return group
== all_reggroup
|| group
== vector_reggroup
;
3163 else if (is_sme_pseudo_register (gdbarch
, regnum
))
3164 return group
== all_reggroup
|| group
== vector_reggroup
;
3165 /* RA_STATE is used for unwinding only. Do not assign it to any groups. */
3166 if (tdep
->has_pauth () && regnum
== tdep
->ra_sign_state_regnum
)
3169 return group
== all_reggroup
;
3172 /* Helper for aarch64_pseudo_read_value. */
3175 aarch64_pseudo_read_value_1 (const frame_info_ptr
&next_frame
,
3176 const int pseudo_reg_num
, int raw_regnum_offset
)
3178 unsigned v_regnum
= AARCH64_V0_REGNUM
+ raw_regnum_offset
;
3180 return pseudo_from_raw_part (next_frame
, pseudo_reg_num
, v_regnum
, 0);
3183 /* Helper function for reading/writing ZA pseudo-registers. Given REGNUM,
3184 a ZA pseudo-register number, return the information on positioning of the
3185 bytes that must be read from/written to. */
3188 aarch64_za_offsets_from_regnum (struct gdbarch
*gdbarch
, int regnum
)
3190 aarch64_gdbarch_tdep
*tdep
= gdbarch_tdep
<aarch64_gdbarch_tdep
> (gdbarch
);
3192 gdb_assert (tdep
->has_sme ());
3193 gdb_assert (tdep
->sme_svq
> 0);
3194 gdb_assert (tdep
->sme_pseudo_base
<= regnum
);
3195 gdb_assert (regnum
< tdep
->sme_pseudo_base
+ tdep
->sme_pseudo_count
);
3197 struct za_pseudo_encoding encoding
;
3199 /* Decode the ZA pseudo-register number. */
3200 aarch64_za_decode_pseudos (gdbarch
, regnum
, encoding
);
3202 /* Fetch the streaming vector length. */
3203 size_t svl
= sve_vl_from_vq (tdep
->sme_svq
);
3206 if (is_sme_tile_slice_pseudo_register (gdbarch
, regnum
))
3208 if (encoding
.horizontal
)
3210 /* Horizontal tile slices are contiguous ranges of svl bytes. */
3212 /* The starting offset depends on the tile index (to locate the tile
3213 in the ZA buffer), the slice index (to locate the slice within the
3214 tile) and the qualifier. */
3215 offsets
.starting_offset
3216 = encoding
.tile_index
* svl
+ encoding
.slice_index
3217 * (svl
>> encoding
.qualifier_index
);
3218 /* Horizontal tile slice data is contiguous and thus doesn't have
3220 offsets
.stride_size
= 0;
3221 /* Horizontal tile slice data is contiguous and thus only has 1
3224 /* The chunk size is always svl bytes. */
3225 offsets
.chunk_size
= svl
;
3229 /* Vertical tile slices are non-contiguous ranges of
3230 (1 << qualifier_index) bytes. */
3232 /* The starting offset depends on the tile number (to locate the
3233 tile in the ZA buffer), the slice index (to locate the element
3234 within the tile slice) and the qualifier. */
3235 offsets
.starting_offset
3236 = encoding
.tile_index
* svl
+ encoding
.slice_index
3237 * (1 << encoding
.qualifier_index
);
3238 /* The offset between vertical tile slices depends on the qualifier
3240 offsets
.stride_size
= svl
<< encoding
.qualifier_index
;
3241 /* The number of chunks depends on svl and the qualifier size. */
3242 offsets
.chunks
= svl
>> encoding
.qualifier_index
;
3243 /* The chunk size depends on the qualifier. */
3244 offsets
.chunk_size
= 1 << encoding
.qualifier_index
;
3249 /* ZA tile pseudo-register. */
3251 /* Starting offset depends on the tile index and qualifier. */
3252 offsets
.starting_offset
= encoding
.tile_index
* svl
;
3253 /* The offset between tile slices depends on the qualifier and svl. */
3254 offsets
.stride_size
= svl
<< encoding
.qualifier_index
;
3255 /* The number of chunks depends on the qualifier and svl. */
3256 offsets
.chunks
= svl
>> encoding
.qualifier_index
;
3257 /* The chunk size is always svl bytes. */
3258 offsets
.chunk_size
= svl
;
3264 /* Given REGNUM, a SME pseudo-register number, return its value in RESULT. */
3267 aarch64_sme_pseudo_register_read (gdbarch
*gdbarch
, const frame_info_ptr
&next_frame
,
3268 const int pseudo_reg_num
)
3270 aarch64_gdbarch_tdep
*tdep
= gdbarch_tdep
<aarch64_gdbarch_tdep
> (gdbarch
);
3272 gdb_assert (tdep
->has_sme ());
3273 gdb_assert (tdep
->sme_svq
> 0);
3274 gdb_assert (tdep
->sme_pseudo_base
<= pseudo_reg_num
);
3275 gdb_assert (pseudo_reg_num
< tdep
->sme_pseudo_base
+ tdep
->sme_pseudo_count
);
3277 /* Fetch the offsets that we need in order to read from the correct blocks
3280 = aarch64_za_offsets_from_regnum (gdbarch
, pseudo_reg_num
);
3282 /* Fetch the contents of ZA. */
3283 value
*za_value
= value_of_register (tdep
->sme_za_regnum
, next_frame
);
3284 value
*result
= value::allocate_register (next_frame
, pseudo_reg_num
);
3286 /* Copy the requested data. */
3287 for (int chunks
= 0; chunks
< offsets
.chunks
; chunks
++)
3289 int src_offset
= offsets
.starting_offset
+ chunks
* offsets
.stride_size
;
3290 int dst_offset
= chunks
* offsets
.chunk_size
;
3291 za_value
->contents_copy (result
, dst_offset
, src_offset
,
3292 offsets
.chunk_size
);
3298 /* Implement the "pseudo_register_read_value" gdbarch method. */
3301 aarch64_pseudo_read_value (gdbarch
*gdbarch
, const frame_info_ptr
&next_frame
,
3302 const int pseudo_reg_num
)
3304 aarch64_gdbarch_tdep
*tdep
= gdbarch_tdep
<aarch64_gdbarch_tdep
> (gdbarch
);
3306 if (is_w_pseudo_register (gdbarch
, pseudo_reg_num
))
3308 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
3309 /* Default offset for little endian. */
3312 if (byte_order
== BFD_ENDIAN_BIG
)
3315 /* Find the correct X register to extract the data from. */
3317 = AARCH64_X0_REGNUM
+ (pseudo_reg_num
- tdep
->w_pseudo_base
);
3319 /* Read the bottom 4 bytes of X. */
3320 return pseudo_from_raw_part (next_frame
, pseudo_reg_num
, x_regnum
,
3323 else if (is_sme_pseudo_register (gdbarch
, pseudo_reg_num
))
3324 return aarch64_sme_pseudo_register_read (gdbarch
, next_frame
,
3327 /* Offset in the "pseudo-register space". */
3328 int pseudo_offset
= pseudo_reg_num
- gdbarch_num_regs (gdbarch
);
3330 if (pseudo_offset
>= AARCH64_Q0_REGNUM
3331 && pseudo_offset
< AARCH64_Q0_REGNUM
+ 32)
3332 return aarch64_pseudo_read_value_1 (next_frame
, pseudo_reg_num
,
3333 pseudo_offset
- AARCH64_Q0_REGNUM
);
3335 if (pseudo_offset
>= AARCH64_D0_REGNUM
3336 && pseudo_offset
< AARCH64_D0_REGNUM
+ 32)
3337 return aarch64_pseudo_read_value_1 (next_frame
, pseudo_reg_num
,
3338 pseudo_offset
- AARCH64_D0_REGNUM
);
3340 if (pseudo_offset
>= AARCH64_S0_REGNUM
3341 && pseudo_offset
< AARCH64_S0_REGNUM
+ 32)
3342 return aarch64_pseudo_read_value_1 (next_frame
, pseudo_reg_num
,
3343 pseudo_offset
- AARCH64_S0_REGNUM
);
3345 if (pseudo_offset
>= AARCH64_H0_REGNUM
3346 && pseudo_offset
< AARCH64_H0_REGNUM
+ 32)
3347 return aarch64_pseudo_read_value_1 (next_frame
, pseudo_reg_num
,
3348 pseudo_offset
- AARCH64_H0_REGNUM
);
3350 if (pseudo_offset
>= AARCH64_B0_REGNUM
3351 && pseudo_offset
< AARCH64_B0_REGNUM
+ 32)
3352 return aarch64_pseudo_read_value_1 (next_frame
, pseudo_reg_num
,
3353 pseudo_offset
- AARCH64_B0_REGNUM
);
3355 if (tdep
->has_sve () && pseudo_offset
>= AARCH64_SVE_V0_REGNUM
3356 && pseudo_offset
< AARCH64_SVE_V0_REGNUM
+ 32)
3357 return aarch64_pseudo_read_value_1 (next_frame
, pseudo_reg_num
,
3358 pseudo_offset
- AARCH64_SVE_V0_REGNUM
);
3360 if (tdep
->has_pauth () && pseudo_reg_num
== tdep
->ra_sign_state_regnum
)
3361 return value::zero (builtin_type (gdbarch
)->builtin_uint64
, lval_register
);
3363 gdb_assert_not_reached ("regnum out of bound");
3366 /* Helper for aarch64_pseudo_write. */
3369 aarch64_pseudo_write_1 (gdbarch
*gdbarch
, const frame_info_ptr
&next_frame
,
3371 gdb::array_view
<const gdb_byte
> buf
)
3373 unsigned raw_regnum
= AARCH64_V0_REGNUM
+ regnum_offset
;
3375 /* Enough space for a full vector register.
3377 Ensure the register buffer is zero, we want gdb writes of the
3378 various 'scalar' pseudo registers to behavior like architectural
3379 writes, register width bytes are written the remainder are set to
3381 gdb::byte_vector
raw_buf (register_size (gdbarch
, raw_regnum
), 0);
3382 static_assert (AARCH64_V0_REGNUM
== AARCH64_SVE_Z0_REGNUM
);
3384 gdb::array_view
<gdb_byte
> raw_view (raw_buf
);
3385 copy (buf
, raw_view
.slice (0, buf
.size ()));
3386 put_frame_register (next_frame
, raw_regnum
, raw_view
);
3389 /* Given REGNUM, a SME pseudo-register number, store the bytes from DATA to the
3393 aarch64_sme_pseudo_register_write (gdbarch
*gdbarch
, const frame_info_ptr
&next_frame
,
3395 gdb::array_view
<const gdb_byte
> data
)
3397 aarch64_gdbarch_tdep
*tdep
= gdbarch_tdep
<aarch64_gdbarch_tdep
> (gdbarch
);
3399 gdb_assert (tdep
->has_sme ());
3400 gdb_assert (tdep
->sme_svq
> 0);
3401 gdb_assert (tdep
->sme_pseudo_base
<= regnum
);
3402 gdb_assert (regnum
< tdep
->sme_pseudo_base
+ tdep
->sme_pseudo_count
);
3404 /* Fetch the offsets that we need in order to write to the correct blocks
3406 za_offsets offsets
= aarch64_za_offsets_from_regnum (gdbarch
, regnum
);
3408 /* Fetch the contents of ZA. */
3409 value
*za_value
= value_of_register (tdep
->sme_za_regnum
, next_frame
);
3412 /* Create a view only on the portion of za we want to write. */
3413 gdb::array_view
<gdb_byte
> za_view
3414 = za_value
->contents_writeable ().slice (offsets
.starting_offset
);
3416 /* Copy the requested data. */
3417 for (int chunks
= 0; chunks
< offsets
.chunks
; chunks
++)
3419 gdb::array_view
<const gdb_byte
> src
3420 = data
.slice (chunks
* offsets
.chunk_size
, offsets
.chunk_size
);
3421 gdb::array_view
<gdb_byte
> dst
3422 = za_view
.slice (chunks
* offsets
.stride_size
, offsets
.chunk_size
);
3427 /* Write back to ZA. */
3428 put_frame_register (next_frame
, tdep
->sme_za_regnum
,
3429 za_value
->contents_raw ());
3432 /* Implement the "pseudo_register_write" gdbarch method. */
3435 aarch64_pseudo_write (gdbarch
*gdbarch
, const frame_info_ptr
&next_frame
,
3436 const int pseudo_reg_num
,
3437 gdb::array_view
<const gdb_byte
> buf
)
3439 aarch64_gdbarch_tdep
*tdep
= gdbarch_tdep
<aarch64_gdbarch_tdep
> (gdbarch
);
3441 if (is_w_pseudo_register (gdbarch
, pseudo_reg_num
))
3443 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
3444 /* Default offset for little endian. */
3447 if (byte_order
== BFD_ENDIAN_BIG
)
3450 /* Find the correct X register to extract the data from. */
3451 int x_regnum
= AARCH64_X0_REGNUM
+ (pseudo_reg_num
- tdep
->w_pseudo_base
);
3453 /* First zero-out the contents of X. */
3454 gdb_byte bytes
[8] {};
3455 gdb::array_view
<gdb_byte
> bytes_view (bytes
);
3456 copy (buf
, bytes_view
.slice (offset
, 4));
3458 /* Write to the bottom 4 bytes of X. */
3459 put_frame_register (next_frame
, x_regnum
, bytes_view
);
3462 else if (is_sme_pseudo_register (gdbarch
, pseudo_reg_num
))
3464 aarch64_sme_pseudo_register_write (gdbarch
, next_frame
, pseudo_reg_num
,
3469 /* Offset in the "pseudo-register space". */
3470 int pseudo_offset
= pseudo_reg_num
- gdbarch_num_regs (gdbarch
);
3472 if (pseudo_offset
>= AARCH64_Q0_REGNUM
3473 && pseudo_offset
< AARCH64_Q0_REGNUM
+ 32)
3474 return aarch64_pseudo_write_1 (gdbarch
, next_frame
,
3475 pseudo_offset
- AARCH64_Q0_REGNUM
, buf
);
3477 if (pseudo_offset
>= AARCH64_D0_REGNUM
3478 && pseudo_offset
< AARCH64_D0_REGNUM
+ 32)
3479 return aarch64_pseudo_write_1 (gdbarch
, next_frame
,
3480 pseudo_offset
- AARCH64_D0_REGNUM
, buf
);
3482 if (pseudo_offset
>= AARCH64_S0_REGNUM
3483 && pseudo_offset
< AARCH64_S0_REGNUM
+ 32)
3484 return aarch64_pseudo_write_1 (gdbarch
, next_frame
,
3485 pseudo_offset
- AARCH64_S0_REGNUM
, buf
);
3487 if (pseudo_offset
>= AARCH64_H0_REGNUM
3488 && pseudo_offset
< AARCH64_H0_REGNUM
+ 32)
3489 return aarch64_pseudo_write_1 (gdbarch
, next_frame
,
3490 pseudo_offset
- AARCH64_H0_REGNUM
, buf
);
3492 if (pseudo_offset
>= AARCH64_B0_REGNUM
3493 && pseudo_offset
< AARCH64_B0_REGNUM
+ 32)
3494 return aarch64_pseudo_write_1 (gdbarch
, next_frame
,
3495 pseudo_offset
- AARCH64_B0_REGNUM
, buf
);
3497 if (tdep
->has_sve () && pseudo_offset
>= AARCH64_SVE_V0_REGNUM
3498 && pseudo_offset
< AARCH64_SVE_V0_REGNUM
+ 32)
3499 return aarch64_pseudo_write_1 (gdbarch
, next_frame
,
3500 pseudo_offset
- AARCH64_SVE_V0_REGNUM
, buf
);
3502 gdb_assert_not_reached ("regnum out of bound");
3505 /* Callback function for user_reg_add. */
3507 static struct value
*
3508 value_of_aarch64_user_reg (const frame_info_ptr
&frame
, const void *baton
)
3510 const int *reg_p
= (const int *) baton
;
3512 return value_of_register (*reg_p
, get_next_frame_sentinel_okay (frame
));
3515 /* Implement the "software_single_step" gdbarch method, needed to
3516 single step through atomic sequences on AArch64. */
3518 static std::vector
<CORE_ADDR
>
3519 aarch64_software_single_step (struct regcache
*regcache
)
3521 struct gdbarch
*gdbarch
= regcache
->arch ();
3522 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
3523 const int insn_size
= 4;
3524 const int atomic_sequence_length
= 16; /* Instruction sequence length. */
3525 CORE_ADDR pc
= regcache_read_pc (regcache
);
3526 CORE_ADDR breaks
[2] = { CORE_ADDR_MAX
, CORE_ADDR_MAX
};
3528 CORE_ADDR closing_insn
= 0;
3530 ULONGEST insn_from_memory
;
3531 if (!safe_read_memory_unsigned_integer (loc
, insn_size
,
3532 byte_order_for_code
,
3535 /* Assume we don't have a atomic sequence, as we couldn't read the
3536 instruction in this location. */
3540 uint32_t insn
= insn_from_memory
;
3543 int bc_insn_count
= 0; /* Conditional branch instruction count. */
3544 int last_breakpoint
= 0; /* Defaults to 0 (no breakpoints placed). */
3547 if (aarch64_decode_insn (insn
, &inst
, 1, NULL
) != 0)
3550 /* Look for a Load Exclusive instruction which begins the sequence. */
3551 if (inst
.opcode
->iclass
!= ldstexcl
|| bit (insn
, 22) == 0)
3554 for (insn_count
= 0; insn_count
< atomic_sequence_length
; ++insn_count
)
3558 if (!safe_read_memory_unsigned_integer (loc
, insn_size
,
3559 byte_order_for_code
,
3562 /* Assume we don't have a atomic sequence, as we couldn't read the
3563 instruction in this location. */
3567 insn
= insn_from_memory
;
3568 if (aarch64_decode_insn (insn
, &inst
, 1, NULL
) != 0)
3570 /* Check if the instruction is a conditional branch. */
3571 if (inst
.opcode
->iclass
== condbranch
)
3573 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_ADDR_PCREL19
);
3575 if (bc_insn_count
>= 1)
3578 /* It is, so we'll try to set a breakpoint at the destination. */
3579 breaks
[1] = loc
+ inst
.operands
[0].imm
.value
;
3585 /* Look for the Store Exclusive which closes the atomic sequence. */
3586 if (inst
.opcode
->iclass
== ldstexcl
&& bit (insn
, 22) == 0)
3593 /* We didn't find a closing Store Exclusive instruction, fall back. */
3597 /* Insert breakpoint after the end of the atomic sequence. */
3598 breaks
[0] = loc
+ insn_size
;
3600 /* Check for duplicated breakpoints, and also check that the second
3601 breakpoint is not within the atomic sequence. */
3603 && (breaks
[1] == breaks
[0]
3604 || (breaks
[1] >= pc
&& breaks
[1] <= closing_insn
)))
3605 last_breakpoint
= 0;
3607 std::vector
<CORE_ADDR
> next_pcs
;
3609 /* Insert the breakpoint at the end of the sequence, and one at the
3610 destination of the conditional branch, if it exists. */
3611 for (index
= 0; index
<= last_breakpoint
; index
++)
3612 next_pcs
.push_back (breaks
[index
]);
3617 struct aarch64_displaced_step_copy_insn_closure
3618 : public displaced_step_copy_insn_closure
3620 /* It is true when condition instruction, such as B.CON, TBZ, etc,
3621 is being displaced stepping. */
3624 /* PC adjustment offset after displaced stepping. If 0, then we don't
3625 write the PC back, assuming the PC is already the right address. */
3626 int32_t pc_adjust
= 0;
3628 /* True if it's a branch instruction that saves the link register. */
3629 bool linked_branch
= false;
3632 /* Data when visiting instructions for displaced stepping. */
3634 struct aarch64_displaced_step_data
3636 struct aarch64_insn_data base
;
3638 /* The address where the instruction will be executed at. */
3640 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
3641 uint32_t insn_buf
[AARCH64_DISPLACED_MODIFIED_INSNS
];
3642 /* Number of instructions in INSN_BUF. */
3643 unsigned insn_count
;
3644 /* Registers when doing displaced stepping. */
3645 struct regcache
*regs
;
3647 aarch64_displaced_step_copy_insn_closure
*dsc
;
3650 /* Implementation of aarch64_insn_visitor method "b". */
3653 aarch64_displaced_step_b (const int is_bl
, const int32_t offset
,
3654 struct aarch64_insn_data
*data
)
3656 struct aarch64_displaced_step_data
*dsd
3657 = (struct aarch64_displaced_step_data
*) data
;
3658 int64_t new_offset
= data
->insn_addr
- dsd
->new_addr
+ offset
;
3660 if (can_encode_int32 (new_offset
, 28))
3662 /* Emit B rather than BL, because executing BL on a new address
3663 will get the wrong address into LR. In order to avoid this,
3664 we emit B, and update LR if the instruction is BL. */
3665 emit_b (dsd
->insn_buf
, 0, new_offset
);
3671 emit_nop (dsd
->insn_buf
);
3673 dsd
->dsc
->pc_adjust
= offset
;
3679 regcache_cooked_write_unsigned (dsd
->regs
, AARCH64_LR_REGNUM
,
3680 data
->insn_addr
+ 4);
3681 dsd
->dsc
->linked_branch
= true;
3682 bool gcs_is_enabled
;
3683 gdbarch_get_shadow_stack_pointer (dsd
->regs
->arch (), dsd
->regs
,
3686 aarch64_push_gcs_entry (dsd
->regs
, data
->insn_addr
+ 4);
3690 /* Implementation of aarch64_insn_visitor method "b_cond". */
3693 aarch64_displaced_step_b_cond (const unsigned cond
, const int32_t offset
,
3694 struct aarch64_insn_data
*data
)
3696 struct aarch64_displaced_step_data
*dsd
3697 = (struct aarch64_displaced_step_data
*) data
;
3699 /* GDB has to fix up PC after displaced step this instruction
3700 differently according to the condition is true or false. Instead
3701 of checking COND against conditional flags, we can use
3702 the following instructions, and GDB can tell how to fix up PC
3703 according to the PC value.
3705 B.COND TAKEN ; If cond is true, then jump to TAKEN.
3711 emit_bcond (dsd
->insn_buf
, cond
, 8);
3712 dsd
->dsc
->cond
= true;
3713 dsd
->dsc
->pc_adjust
= offset
;
3714 dsd
->insn_count
= 1;
3717 /* Dynamically allocate a new register. If we know the register
3718 statically, we should make it a global as above instead of using this
3721 static struct aarch64_register
3722 aarch64_register (unsigned num
, int is64
)
3724 return (struct aarch64_register
) { num
, is64
};
3727 /* Implementation of aarch64_insn_visitor method "cb". */
3730 aarch64_displaced_step_cb (const int32_t offset
, const int is_cbnz
,
3731 const unsigned rn
, int is64
,
3732 struct aarch64_insn_data
*data
)
3734 struct aarch64_displaced_step_data
*dsd
3735 = (struct aarch64_displaced_step_data
*) data
;
3737 /* The offset is out of range for a compare and branch
3738 instruction. We can use the following instructions instead:
3740 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
3745 emit_cb (dsd
->insn_buf
, is_cbnz
, aarch64_register (rn
, is64
), 8);
3746 dsd
->insn_count
= 1;
3747 dsd
->dsc
->cond
= true;
3748 dsd
->dsc
->pc_adjust
= offset
;
3751 /* Implementation of aarch64_insn_visitor method "tb". */
3754 aarch64_displaced_step_tb (const int32_t offset
, int is_tbnz
,
3755 const unsigned rt
, unsigned bit
,
3756 struct aarch64_insn_data
*data
)
3758 struct aarch64_displaced_step_data
*dsd
3759 = (struct aarch64_displaced_step_data
*) data
;
3761 /* The offset is out of range for a test bit and branch
3762 instruction We can use the following instructions instead:
3764 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
3770 emit_tb (dsd
->insn_buf
, is_tbnz
, bit
, aarch64_register (rt
, 1), 8);
3771 dsd
->insn_count
= 1;
3772 dsd
->dsc
->cond
= true;
3773 dsd
->dsc
->pc_adjust
= offset
;
3776 /* Implementation of aarch64_insn_visitor method "adr". */
3779 aarch64_displaced_step_adr (const int32_t offset
, const unsigned rd
,
3780 const int is_adrp
, struct aarch64_insn_data
*data
)
3782 struct aarch64_displaced_step_data
*dsd
3783 = (struct aarch64_displaced_step_data
*) data
;
3784 /* We know exactly the address the ADR{P,} instruction will compute.
3785 We can just write it to the destination register. */
3786 CORE_ADDR address
= data
->insn_addr
+ offset
;
3790 /* Clear the lower 12 bits of the offset to get the 4K page. */
3791 regcache_cooked_write_unsigned (dsd
->regs
, AARCH64_X0_REGNUM
+ rd
,
3795 regcache_cooked_write_unsigned (dsd
->regs
, AARCH64_X0_REGNUM
+ rd
,
3798 dsd
->dsc
->pc_adjust
= 4;
3799 emit_nop (dsd
->insn_buf
);
3800 dsd
->insn_count
= 1;
3803 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
3806 aarch64_displaced_step_ldr_literal (const int32_t offset
, const int is_sw
,
3807 const unsigned rt
, const int is64
,
3808 struct aarch64_insn_data
*data
)
3810 struct aarch64_displaced_step_data
*dsd
3811 = (struct aarch64_displaced_step_data
*) data
;
3812 CORE_ADDR address
= data
->insn_addr
+ offset
;
3813 struct aarch64_memory_operand zero
= { MEMORY_OPERAND_OFFSET
, 0 };
3815 regcache_cooked_write_unsigned (dsd
->regs
, AARCH64_X0_REGNUM
+ rt
,
3819 dsd
->insn_count
= emit_ldrsw (dsd
->insn_buf
, aarch64_register (rt
, 1),
3820 aarch64_register (rt
, 1), zero
);
3822 dsd
->insn_count
= emit_ldr (dsd
->insn_buf
, aarch64_register (rt
, is64
),
3823 aarch64_register (rt
, 1), zero
);
3825 dsd
->dsc
->pc_adjust
= 4;
3828 /* Implementation of aarch64_insn_visitor method "others". */
3831 aarch64_displaced_step_others (const uint32_t insn
,
3832 struct aarch64_insn_data
*data
)
3834 struct aarch64_displaced_step_data
*dsd
3835 = (struct aarch64_displaced_step_data
*) data
;
3837 uint32_t masked_insn
= (insn
& CLEAR_Rn_MASK
);
3838 if (masked_insn
== BLR
)
3840 /* Emit a BR to the same register and then update LR to the original
3841 address (similar to aarch64_displaced_step_b). */
3842 aarch64_emit_insn (dsd
->insn_buf
, insn
& 0xffdfffff);
3843 regcache_cooked_write_unsigned (dsd
->regs
, AARCH64_LR_REGNUM
,
3844 data
->insn_addr
+ 4);
3845 dsd
->dsc
->linked_branch
= true;
3846 bool gcs_is_enabled
;
3847 gdbarch_get_shadow_stack_pointer (dsd
->regs
->arch (), dsd
->regs
,
3850 aarch64_push_gcs_entry (dsd
->regs
, data
->insn_addr
+ 4);
3853 aarch64_emit_insn (dsd
->insn_buf
, insn
);
3854 dsd
->insn_count
= 1;
3856 if (masked_insn
== RET
|| masked_insn
== BR
|| masked_insn
== BLR
)
3857 dsd
->dsc
->pc_adjust
= 0;
3859 dsd
->dsc
->pc_adjust
= 4;
3862 static const struct aarch64_insn_visitor visitor
=
3864 aarch64_displaced_step_b
,
3865 aarch64_displaced_step_b_cond
,
3866 aarch64_displaced_step_cb
,
3867 aarch64_displaced_step_tb
,
3868 aarch64_displaced_step_adr
,
3869 aarch64_displaced_step_ldr_literal
,
3870 aarch64_displaced_step_others
,
3873 /* Implement the "displaced_step_copy_insn" gdbarch method. */
3875 displaced_step_copy_insn_closure_up
3876 aarch64_displaced_step_copy_insn (struct gdbarch
*gdbarch
,
3877 CORE_ADDR from
, CORE_ADDR to
,
3878 struct regcache
*regs
)
3880 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
3881 struct aarch64_displaced_step_data dsd
;
3883 ULONGEST insn_from_memory
;
3885 if (!safe_read_memory_unsigned_integer (from
, 4, byte_order_for_code
,
3889 uint32_t insn
= insn_from_memory
;
3891 if (aarch64_decode_insn (insn
, &inst
, 1, NULL
) != 0)
3894 /* Look for a Load Exclusive instruction which begins the sequence,
3895 or for a MOPS instruction. */
3896 if ((inst
.opcode
->iclass
== ldstexcl
&& bit (insn
, 22))
3897 || AARCH64_CPU_HAS_FEATURE (*inst
.opcode
->avariant
, MOPS
))
3899 /* We can't displaced step atomic sequences nor MOPS instructions. */
3903 std::unique_ptr
<aarch64_displaced_step_copy_insn_closure
> dsc
3904 (new aarch64_displaced_step_copy_insn_closure
);
3905 dsd
.base
.insn_addr
= from
;
3908 dsd
.dsc
= dsc
.get ();
3910 aarch64_relocate_instruction (insn
, &visitor
,
3911 (struct aarch64_insn_data
*) &dsd
);
3912 gdb_assert (dsd
.insn_count
<= AARCH64_DISPLACED_MODIFIED_INSNS
);
3914 if (dsd
.insn_count
!= 0)
3918 /* Instruction can be relocated to scratch pad. Copy
3919 relocated instruction(s) there. */
3920 for (i
= 0; i
< dsd
.insn_count
; i
++)
3922 displaced_debug_printf ("writing insn %.8x at %s",
3924 paddress (gdbarch
, to
+ i
* 4));
3926 write_memory_unsigned_integer (to
+ i
* 4, 4, byte_order_for_code
,
3927 (ULONGEST
) dsd
.insn_buf
[i
]);
3935 /* This is a work around for a problem with g++ 4.8. */
3936 return displaced_step_copy_insn_closure_up (dsc
.release ());
3939 /* Implement the "displaced_step_fixup" gdbarch method. */
3942 aarch64_displaced_step_fixup (struct gdbarch
*gdbarch
,
3943 struct displaced_step_copy_insn_closure
*dsc_
,
3944 CORE_ADDR from
, CORE_ADDR to
,
3945 struct regcache
*regs
, bool completed_p
)
3947 aarch64_displaced_step_copy_insn_closure
*dsc
3948 = (aarch64_displaced_step_copy_insn_closure
*) dsc_
;
3949 CORE_ADDR pc
= regcache_read_pc (regs
);
3951 /* If the displaced instruction didn't complete successfully then we need
3952 to restore the program counter, and perhaps the Guarded Control Stack. */
3955 bool gcs_is_enabled
;
3956 gdbarch_get_shadow_stack_pointer (gdbarch
, regs
, gcs_is_enabled
);
3957 if (dsc
->linked_branch
&& gcs_is_enabled
)
3958 aarch64_pop_gcs_entry (regs
);
3960 pc
= from
+ (pc
- to
);
3961 regcache_write_pc (regs
, pc
);
3965 displaced_debug_printf ("PC after stepping: %s (was %s).",
3966 paddress (gdbarch
, pc
), paddress (gdbarch
, to
));
3970 displaced_debug_printf ("[Conditional] pc_adjust before: %d",
3975 /* Condition is true. */
3977 else if (pc
- to
== 4)
3979 /* Condition is false. */
3983 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
3985 displaced_debug_printf ("[Conditional] pc_adjust after: %d",
3989 displaced_debug_printf ("%s PC by %d",
3990 dsc
->pc_adjust
? "adjusting" : "not adjusting",
3993 if (dsc
->pc_adjust
!= 0)
3995 /* Make sure the previous instruction was executed (that is, the PC
3996 has changed). If the PC didn't change, then discard the adjustment
3997 offset. Otherwise we may skip an instruction before its execution
4001 displaced_debug_printf ("PC did not move. Discarding PC adjustment.");
4005 displaced_debug_printf ("fixup: set PC to %s:%d",
4006 paddress (gdbarch
, from
), dsc
->pc_adjust
);
4008 regcache_cooked_write_unsigned (regs
, AARCH64_PC_REGNUM
,
4009 from
+ dsc
->pc_adjust
);
4013 /* Implement the "displaced_step_hw_singlestep" gdbarch method. */
4016 aarch64_displaced_step_hw_singlestep (struct gdbarch
*gdbarch
)
4021 /* Get the correct target description for the given VQ value.
4022 If VQ is zero then it is assumed SVE is not supported.
4023 (It is not possible to set VQ to zero on an SVE system).
4025 MTE_P indicates the presence of the Memory Tagging Extension feature.
4027 TLS_P indicates the presence of the Thread Local Storage feature. */
4030 aarch64_read_description (const aarch64_features
&features
)
4032 if (features
.vq
> AARCH64_MAX_SVE_VQ
)
4033 error (_("VQ is %" PRIu64
", maximum supported value is %d"), features
.vq
,
4034 AARCH64_MAX_SVE_VQ
);
4036 struct target_desc
*tdesc
= tdesc_aarch64_map
[features
];
4040 tdesc
= aarch64_create_target_description (features
);
4041 tdesc_aarch64_map
[features
] = tdesc
;
4047 /* Return the VQ used when creating the target description TDESC. */
4050 aarch64_get_tdesc_vq (const struct target_desc
*tdesc
)
4052 const struct tdesc_feature
*feature_sve
;
4054 if (!tdesc_has_registers (tdesc
))
4057 feature_sve
= tdesc_find_feature (tdesc
, "org.gnu.gdb.aarch64.sve");
4059 if (feature_sve
== nullptr)
4062 uint64_t vl
= tdesc_register_bitsize (feature_sve
,
4063 aarch64_sve_register_names
[0]) / 8;
4064 return sve_vq_from_vl (vl
);
4068 /* Return the svq (streaming vector quotient) used when creating the target
4069 description TDESC. */
4072 aarch64_get_tdesc_svq (const struct target_desc
*tdesc
)
4074 const struct tdesc_feature
*feature_sme
;
4076 if (!tdesc_has_registers (tdesc
))
4079 feature_sme
= tdesc_find_feature (tdesc
, "org.gnu.gdb.aarch64.sme");
4081 if (feature_sme
== nullptr)
4084 size_t svl_squared
= tdesc_register_bitsize (feature_sme
, "za");
4086 /* We have the total size of the ZA matrix, in bits. Figure out the svl
4088 size_t svl
= std::sqrt (svl_squared
/ 8);
4090 /* Now extract svq. */
4091 return sve_vq_from_vl (svl
);
4094 /* Get the AArch64 features present in the given target description. */
4097 aarch64_features_from_target_desc (const struct target_desc
*tdesc
)
4099 aarch64_features features
;
4101 if (tdesc
== nullptr)
4104 features
.vq
= aarch64_get_tdesc_vq (tdesc
);
4106 /* We need to look for a couple pauth feature name variations. */
4108 = (tdesc_find_feature (tdesc
, "org.gnu.gdb.aarch64.pauth") != nullptr);
4110 if (!features
.pauth
)
4111 features
.pauth
= (tdesc_find_feature (tdesc
, "org.gnu.gdb.aarch64.pauth_v2")
4115 = (tdesc_find_feature (tdesc
, "org.gnu.gdb.aarch64.mte") != nullptr);
4117 const struct tdesc_feature
*tls_feature
4118 = tdesc_find_feature (tdesc
, "org.gnu.gdb.aarch64.tls");
4120 if (tls_feature
!= nullptr)
4122 /* We have TLS registers. Find out how many. */
4123 if (tdesc_unnumbered_register (tls_feature
, "tpidr2"))
4129 features
.svq
= aarch64_get_tdesc_svq (tdesc
);
4131 /* Check for the SME2 feature. */
4132 features
.sme2
= (tdesc_find_feature (tdesc
, "org.gnu.gdb.aarch64.sme2")
4135 /* Check for the GCS feature. */
4136 features
.gcs
= (tdesc_find_feature (tdesc
, "org.gnu.gdb.aarch64.gcs")
4139 /* Check for the GCS Linux feature. */
4140 features
.gcs_linux
= (tdesc_find_feature (tdesc
, "org.gnu.gdb.aarch64.gcs.linux")
4146 /* Implement the "cannot_store_register" gdbarch method. */
4149 aarch64_cannot_store_register (struct gdbarch
*gdbarch
, int regnum
)
4151 aarch64_gdbarch_tdep
*tdep
= gdbarch_tdep
<aarch64_gdbarch_tdep
> (gdbarch
);
4153 if (!tdep
->has_pauth ())
4156 /* Pointer authentication registers are read-only. */
4157 return (regnum
>= tdep
->pauth_reg_base
4158 && regnum
< tdep
->pauth_reg_base
+ tdep
->pauth_reg_count
);
4161 /* Implement the stack_frame_destroyed_p gdbarch method. */
4164 aarch64_stack_frame_destroyed_p (struct gdbarch
*gdbarch
, CORE_ADDR pc
)
4166 CORE_ADDR func_start
, func_end
;
4167 if (!find_pc_partial_function (pc
, NULL
, &func_start
, &func_end
))
4170 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
4172 ULONGEST insn_from_memory
;
4173 if (!safe_read_memory_unsigned_integer (pc
, 4, byte_order_for_code
,
4177 uint32_t insn
= insn_from_memory
;
4180 if (aarch64_decode_insn (insn
, &inst
, 1, nullptr) != 0)
4183 return streq (inst
.opcode
->name
, "ret");
4186 /* Helper to get the allocation tag from a 64-bit ADDRESS.
4188 Return the allocation tag if successful and nullopt otherwise. */
4190 std::optional
<CORE_ADDR
>
4191 aarch64_mte_get_atag (CORE_ADDR address
)
4193 gdb::byte_vector tags
;
4195 /* Attempt to fetch the allocation tag. */
4196 if (!target_fetch_memtags (address
, 1, tags
,
4197 static_cast<int> (memtag_type::allocation
)))
4200 /* Only one tag should've been returned. Make sure we got exactly that. */
4201 if (tags
.size () != 1)
4202 error (_("Target returned an unexpected number of tags."));
4204 /* Although our tags are 4 bits in size, they are stored in a
4209 /* Implement the memtag_matches_p gdbarch method. */
4212 aarch64_memtag_matches_p (struct gdbarch
*gdbarch
,
4213 struct value
*address
)
4215 gdb_assert (address
!= nullptr);
4217 CORE_ADDR addr
= value_as_address (address
);
4219 /* Fetch the allocation tag for ADDRESS. */
4220 std::optional
<CORE_ADDR
> atag
4221 = aarch64_mte_get_atag (aarch64_remove_non_address_bits (gdbarch
, addr
));
4223 if (!atag
.has_value ())
4226 /* Fetch the logical tag for ADDRESS. */
4227 gdb_byte ltag
= aarch64_mte_get_ltag (addr
);
4229 /* Are the tags the same? */
4230 return ltag
== *atag
;
4233 /* Implement the set_memtags gdbarch method. */
4236 aarch64_set_memtags (struct gdbarch
*gdbarch
, struct value
*address
,
4237 size_t length
, const gdb::byte_vector
&tags
,
4238 memtag_type tag_type
)
4240 gdb_assert (!tags
.empty ());
4241 gdb_assert (address
!= nullptr);
4243 CORE_ADDR addr
= value_as_address (address
);
4245 /* Set the logical tag or the allocation tag. */
4246 if (tag_type
== memtag_type::logical
)
4248 /* When setting logical tags, we don't care about the length, since
4249 we are only setting a single logical tag. */
4250 addr
= aarch64_mte_set_ltag (addr
, tags
[0]);
4252 /* Update the value's content with the tag. */
4253 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
4254 gdb_byte
*srcbuf
= address
->contents_raw ().data ();
4255 store_unsigned_integer (srcbuf
, sizeof (addr
), byte_order
, addr
);
4259 /* Remove the top byte. */
4260 addr
= aarch64_remove_non_address_bits (gdbarch
, addr
);
4262 /* With G being the number of tag granules and N the number of tags
4263 passed in, we can have the following cases:
4265 1 - G == N: Store all the N tags to memory.
4267 2 - G < N : Warn about having more tags than granules, but write G
4270 3 - G > N : This is a "fill tags" operation. We should use the tags
4271 as a pattern to fill the granules repeatedly until we have
4272 written G tags to memory.
4275 size_t g
= aarch64_mte_get_tag_granules (addr
, length
,
4276 AARCH64_MTE_GRANULE_SIZE
);
4277 size_t n
= tags
.size ();
4280 warning (_("Got more tags than memory granules. Tags will be "
4283 warning (_("Using tag pattern to fill memory range."));
4285 if (!target_store_memtags (addr
, length
, tags
,
4286 static_cast<int> (memtag_type::allocation
)))
4292 /* Implement the get_memtag gdbarch method. */
4294 static struct value
*
4295 aarch64_get_memtag (struct gdbarch
*gdbarch
, struct value
*address
,
4296 memtag_type tag_type
)
4298 gdb_assert (address
!= nullptr);
4300 CORE_ADDR addr
= value_as_address (address
);
4303 /* Get the logical tag or the allocation tag. */
4304 if (tag_type
== memtag_type::logical
)
4305 tag
= aarch64_mte_get_ltag (addr
);
4308 /* Remove the top byte. */
4309 addr
= aarch64_remove_non_address_bits (gdbarch
, addr
);
4310 std::optional
<CORE_ADDR
> atag
= aarch64_mte_get_atag (addr
);
4312 if (!atag
.has_value ())
4318 /* Convert the tag to a value. */
4319 return value_from_ulongest (builtin_type (gdbarch
)->builtin_unsigned_int
,
4323 /* Implement the memtag_to_string gdbarch method. */
4326 aarch64_memtag_to_string (struct gdbarch
*gdbarch
, struct value
*tag_value
)
4328 if (tag_value
== nullptr)
4331 CORE_ADDR tag
= value_as_address (tag_value
);
4333 return string_printf ("0x%s", phex_nz (tag
));
4336 /* See aarch64-tdep.h. */
4339 aarch64_remove_non_address_bits (struct gdbarch
*gdbarch
, CORE_ADDR pointer
)
4341 /* By default, we assume TBI and discard the top 8 bits plus the VA range
4342 select bit (55). Below we try to fetch information about pointer
4343 authentication masks in order to make non-address removal more
4345 CORE_ADDR mask
= AARCH64_TOP_BITS_MASK
;
4347 /* Check if we have an inferior first. If not, just use the default
4350 We use the inferior_ptid here because the pointer authentication masks
4351 should be the same across threads of a process. Since we may not have
4352 access to the current thread (gdb may have switched to no inferiors
4353 momentarily), we use the inferior ptid. */
4354 if (inferior_ptid
!= null_ptid
)
4356 /* If we do have an inferior, attempt to fetch its thread's thread_info
4358 thread_info
*thread
= current_inferior ()->find_thread (inferior_ptid
);
4360 /* If the thread is running, we will not be able to fetch the mask
4362 if (thread
!= nullptr && thread
->state
!= THREAD_RUNNING
)
4364 /* Otherwise, fetch the register cache and the masks. */
4365 struct regcache
*regs
4366 = get_thread_regcache (current_inferior ()->process_target (),
4369 /* Use the gdbarch from the register cache to check for pointer
4370 authentication support, as it matches the features found in
4371 that particular thread. */
4372 aarch64_gdbarch_tdep
*tdep
4373 = gdbarch_tdep
<aarch64_gdbarch_tdep
> (regs
->arch ());
4375 /* Is there pointer authentication support? */
4376 if (tdep
->has_pauth ())
4378 CORE_ADDR cmask
, dmask
;
4380 = AARCH64_PAUTH_DMASK_REGNUM (tdep
->pauth_reg_base
);
4382 = AARCH64_PAUTH_CMASK_REGNUM (tdep
->pauth_reg_base
);
4384 /* If we have a kernel address and we have kernel-mode address
4385 mask registers, use those instead. */
4386 if (tdep
->pauth_reg_count
> 2
4387 && pointer
& VA_RANGE_SELECT_BIT_MASK
)
4390 = AARCH64_PAUTH_DMASK_HIGH_REGNUM (tdep
->pauth_reg_base
);
4392 = AARCH64_PAUTH_CMASK_HIGH_REGNUM (tdep
->pauth_reg_base
);
4395 /* We have both a code mask and a data mask. For now they are
4396 the same, but this may change in the future. */
4397 if (regs
->cooked_read (dmask_regnum
, &dmask
) != REG_VALID
)
4400 if (regs
->cooked_read (cmask_regnum
, &cmask
) != REG_VALID
)
4403 mask
|= aarch64_mask_from_pac_registers (cmask
, dmask
);
4408 return aarch64_remove_top_bits (pointer
, mask
);
4411 /* Given NAMES, a vector of strings, initialize it with all the SME
4412 pseudo-register names for the current streaming vector length. */
4415 aarch64_initialize_sme_pseudo_names (struct gdbarch
*gdbarch
,
4416 std::vector
<std::string
> &names
)
4418 aarch64_gdbarch_tdep
*tdep
= gdbarch_tdep
<aarch64_gdbarch_tdep
> (gdbarch
);
4420 gdb_assert (tdep
->has_sme ());
4421 gdb_assert (tdep
->sme_tile_slice_pseudo_base
> 0);
4422 gdb_assert (tdep
->sme_tile_pseudo_base
> 0);
4424 for (int i
= 0; i
< tdep
->sme_tile_slice_pseudo_count
; i
++)
4426 int regnum
= tdep
->sme_tile_slice_pseudo_base
+ i
;
4427 struct za_pseudo_encoding encoding
;
4428 aarch64_za_decode_pseudos (gdbarch
, regnum
, encoding
);
4429 names
.push_back (aarch64_za_tile_slice_name (encoding
));
4431 for (int i
= 0; i
< AARCH64_ZA_TILES_NUM
; i
++)
4433 int regnum
= tdep
->sme_tile_pseudo_base
+ i
;
4434 struct za_pseudo_encoding encoding
;
4435 aarch64_za_decode_pseudos (gdbarch
, regnum
, encoding
);
4436 names
.push_back (aarch64_za_tile_name (encoding
));
4440 /* Initialize the current architecture based on INFO. If possible,
4441 reuse an architecture from ARCHES, which is a list of
4442 architectures already created during this debugging session.
4444 Called e.g. at program startup, when reading a core file, and when
4445 reading a binary file. */
4447 static struct gdbarch
*
4448 aarch64_gdbarch_init (struct gdbarch_info info
, struct gdbarch_list
*arches
)
4450 const struct tdesc_feature
*feature_core
, *feature_fpu
, *feature_sve
;
4451 const struct tdesc_feature
*feature_pauth
;
4452 bool valid_p
= true;
4453 int i
, num_regs
= 0, num_pseudo_regs
= 0;
4454 int first_pauth_regnum
= -1, ra_sign_state_offset
= -1;
4455 int first_mte_regnum
= -1, first_tls_regnum
= -1;
4456 uint64_t vq
= aarch64_get_tdesc_vq (info
.target_desc
);
4457 uint64_t svq
= aarch64_get_tdesc_svq (info
.target_desc
);
4459 if (vq
> AARCH64_MAX_SVE_VQ
)
4460 internal_error (_("VQ out of bounds: %s (max %d)"),
4461 pulongest (vq
), AARCH64_MAX_SVE_VQ
);
4463 if (svq
> AARCH64_MAX_SVE_VQ
)
4464 internal_error (_("Streaming vector quotient (svq) out of bounds: %s"
4466 pulongest (svq
), AARCH64_MAX_SVE_VQ
);
4468 /* If there is already a candidate, use it. */
4469 for (gdbarch_list
*best_arch
= gdbarch_list_lookup_by_info (arches
, &info
);
4470 best_arch
!= nullptr;
4471 best_arch
= gdbarch_list_lookup_by_info (best_arch
->next
, &info
))
4473 aarch64_gdbarch_tdep
*tdep
4474 = gdbarch_tdep
<aarch64_gdbarch_tdep
> (best_arch
->gdbarch
);
4475 if (tdep
&& tdep
->vq
== vq
&& tdep
->sme_svq
== svq
)
4476 return best_arch
->gdbarch
;
4479 /* Ensure we always have a target descriptor, and that it is for the given VQ
4481 const struct target_desc
*tdesc
= info
.target_desc
;
4482 if (!tdesc_has_registers (tdesc
) || vq
!= aarch64_get_tdesc_vq (tdesc
)
4483 || svq
!= aarch64_get_tdesc_svq (tdesc
))
4485 aarch64_features features
;
4488 tdesc
= aarch64_read_description (features
);
4492 feature_core
= tdesc_find_feature (tdesc
,"org.gnu.gdb.aarch64.core");
4493 feature_fpu
= tdesc_find_feature (tdesc
, "org.gnu.gdb.aarch64.fpu");
4494 feature_sve
= tdesc_find_feature (tdesc
, "org.gnu.gdb.aarch64.sve");
4495 const struct tdesc_feature
*feature_mte
4496 = tdesc_find_feature (tdesc
, "org.gnu.gdb.aarch64.mte");
4497 const struct tdesc_feature
*feature_tls
4498 = tdesc_find_feature (tdesc
, "org.gnu.gdb.aarch64.tls");
4500 if (feature_core
== nullptr)
4503 tdesc_arch_data_up tdesc_data
= tdesc_data_alloc ();
4505 /* Validate the description provides the mandatory core R registers
4506 and allocate their numbers. */
4507 for (i
= 0; i
< ARRAY_SIZE (aarch64_r_register_names
); i
++)
4508 valid_p
&= tdesc_numbered_register (feature_core
, tdesc_data
.get (),
4509 AARCH64_X0_REGNUM
+ i
,
4510 aarch64_r_register_names
[i
]);
4512 num_regs
= AARCH64_X0_REGNUM
+ i
;
4514 /* Add the V registers. */
4515 if (feature_fpu
!= nullptr)
4517 if (feature_sve
!= nullptr)
4518 error (_("Program contains both fpu and SVE features."));
4520 /* Validate the description provides the mandatory V registers
4521 and allocate their numbers. */
4522 for (i
= 0; i
< ARRAY_SIZE (aarch64_v_register_names
); i
++)
4523 valid_p
&= tdesc_numbered_register (feature_fpu
, tdesc_data
.get (),
4524 AARCH64_V0_REGNUM
+ i
,
4525 aarch64_v_register_names
[i
]);
4527 num_regs
= AARCH64_V0_REGNUM
+ i
;
4530 /* Add the SVE registers. */
4531 if (feature_sve
!= nullptr)
4533 /* Validate the description provides the mandatory SVE registers
4534 and allocate their numbers. */
4535 for (i
= 0; i
< ARRAY_SIZE (aarch64_sve_register_names
); i
++)
4536 valid_p
&= tdesc_numbered_register (feature_sve
, tdesc_data
.get (),
4537 AARCH64_SVE_Z0_REGNUM
+ i
,
4538 aarch64_sve_register_names
[i
]);
4540 num_regs
= AARCH64_SVE_Z0_REGNUM
+ i
;
4541 num_pseudo_regs
+= 32; /* add the Vn register pseudos. */
4544 if (feature_fpu
!= nullptr || feature_sve
!= nullptr)
4546 num_pseudo_regs
+= 32; /* add the Qn scalar register pseudos */
4547 num_pseudo_regs
+= 32; /* add the Dn scalar register pseudos */
4548 num_pseudo_regs
+= 32; /* add the Sn scalar register pseudos */
4549 num_pseudo_regs
+= 32; /* add the Hn scalar register pseudos */
4550 num_pseudo_regs
+= 32; /* add the Bn scalar register pseudos */
4553 int first_sme_regnum
= -1;
4554 int first_sme2_regnum
= -1;
4555 int first_sme_pseudo_regnum
= -1;
4556 const struct tdesc_feature
*feature_sme
4557 = tdesc_find_feature (tdesc
, "org.gnu.gdb.aarch64.sme");
4558 if (feature_sme
!= nullptr)
4560 /* Record the first SME register. */
4561 first_sme_regnum
= num_regs
;
4563 valid_p
&= tdesc_numbered_register (feature_sme
, tdesc_data
.get (),
4566 valid_p
&= tdesc_numbered_register (feature_sme
, tdesc_data
.get (),
4567 num_regs
++, "svcr");
4569 valid_p
&= tdesc_numbered_register (feature_sme
, tdesc_data
.get (),
4572 /* Record the first SME pseudo register. */
4573 first_sme_pseudo_regnum
= num_pseudo_regs
;
4575 /* Add the ZA tile slice pseudo registers. The number of tile slice
4576 pseudo-registers depend on the svl, and is always a multiple of 5. */
4577 num_pseudo_regs
+= (svq
<< 5) * 5;
4579 /* Add the ZA tile pseudo registers. */
4580 num_pseudo_regs
+= AARCH64_ZA_TILES_NUM
;
4582 /* Now check for the SME2 feature. SME2 is only available if SME is
4584 const struct tdesc_feature
*feature_sme2
4585 = tdesc_find_feature (tdesc
, "org.gnu.gdb.aarch64.sme2");
4586 if (feature_sme2
!= nullptr)
4588 /* Record the first SME2 register. */
4589 first_sme2_regnum
= num_regs
;
4591 valid_p
&= tdesc_numbered_register (feature_sme2
, tdesc_data
.get (),
4596 /* Add the TLS register. */
4597 int tls_register_count
= 0;
4598 if (feature_tls
!= nullptr)
4600 first_tls_regnum
= num_regs
;
4602 /* Look for the TLS registers. tpidr is required, but tpidr2 is
4605 = tdesc_numbered_register (feature_tls
, tdesc_data
.get (),
4606 first_tls_regnum
, "tpidr");
4610 tls_register_count
++;
4613 = tdesc_numbered_register (feature_tls
, tdesc_data
.get (),
4614 first_tls_regnum
+ tls_register_count
,
4617 /* Figure out how many TLS registers we have. */
4619 tls_register_count
++;
4621 num_regs
+= tls_register_count
;
4625 warning (_("Provided TLS register feature doesn't contain "
4626 "required tpidr register."));
4631 /* We have two versions of the pauth target description due to a past bug
4632 where GDB would crash when seeing the first version of the pauth target
4634 feature_pauth
= tdesc_find_feature (tdesc
, "org.gnu.gdb.aarch64.pauth");
4635 if (feature_pauth
== nullptr)
4636 feature_pauth
= tdesc_find_feature (tdesc
, "org.gnu.gdb.aarch64.pauth_v2");
4638 /* Add the pauth registers. */
4639 int pauth_masks
= 0;
4640 if (feature_pauth
!= NULL
)
4642 first_pauth_regnum
= num_regs
;
4643 ra_sign_state_offset
= num_pseudo_regs
;
4645 /* Size of the expected register set with all 4 masks. */
4646 int set_size
= ARRAY_SIZE (aarch64_pauth_register_names
);
4648 /* QEMU exposes a couple additional masks for the high half of the
4649 address. We should either have 2 registers or 4 registers. */
4650 if (tdesc_unnumbered_register (feature_pauth
,
4651 "pauth_dmask_high") == 0)
4653 /* We did not find pauth_dmask_high, assume we only have
4654 2 masks. We are not dealing with QEMU/Emulators then. */
4658 /* Validate the descriptor provides the mandatory PAUTH registers and
4659 allocate their numbers. */
4660 for (i
= 0; i
< set_size
; i
++)
4661 valid_p
&= tdesc_numbered_register (feature_pauth
, tdesc_data
.get (),
4662 first_pauth_regnum
+ i
,
4663 aarch64_pauth_register_names
[i
]);
4666 num_pseudo_regs
+= 1; /* Count RA_STATE pseudo register. */
4667 pauth_masks
= set_size
;
4670 /* Add the MTE registers. */
4671 if (feature_mte
!= NULL
)
4673 first_mte_regnum
= num_regs
;
4674 /* Validate the descriptor provides the mandatory MTE registers and
4675 allocate their numbers. */
4676 for (i
= 0; i
< ARRAY_SIZE (aarch64_mte_register_names
); i
++)
4677 valid_p
&= tdesc_numbered_register (feature_mte
, tdesc_data
.get (),
4678 first_mte_regnum
+ i
,
4679 aarch64_mte_register_names
[i
]);
4683 /* W pseudo-registers */
4684 int first_w_regnum
= num_pseudo_regs
;
4685 num_pseudo_regs
+= 31;
4687 const tdesc_feature
*feature_gcs
4688 = tdesc_find_feature (tdesc
, "org.gnu.gdb.aarch64.gcs");
4689 int first_gcs_regnum
= -1;
4690 /* Add the GCS registers. */
4691 if (feature_gcs
!= nullptr)
4693 first_gcs_regnum
= num_regs
;
4694 /* Validate the descriptor provides the mandatory GCS registers and
4695 allocate their numbers. */
4696 for (i
= 0; i
< ARRAY_SIZE (aarch64_gcs_register_names
); i
++)
4697 valid_p
&= tdesc_numbered_register (feature_gcs
, tdesc_data
.get (),
4698 first_gcs_regnum
+ i
,
4699 aarch64_gcs_register_names
[i
]);
4707 const tdesc_feature
*feature_gcs_linux
4708 = tdesc_find_feature (tdesc
, "org.gnu.gdb.aarch64.gcs.linux");
4709 int first_gcs_linux_regnum
= -1;
4710 /* Add the GCS Linux registers. */
4711 if (feature_gcs_linux
!= nullptr && feature_gcs
== nullptr)
4713 /* This feature depends on the GCS feature. */
4716 else if (feature_gcs_linux
!= nullptr)
4718 first_gcs_linux_regnum
= num_regs
;
4719 /* Validate the descriptor provides the mandatory GCS Linux registers
4720 and allocate their numbers. */
4721 for (i
= 0; i
< ARRAY_SIZE (aarch64_gcs_linux_register_names
); i
++)
4722 valid_p
&= tdesc_numbered_register (feature_gcs_linux
, tdesc_data
.get (),
4723 first_gcs_linux_regnum
+ i
,
4724 aarch64_gcs_linux_register_names
[i
]);
4732 /* AArch64 code is always little-endian. */
4733 info
.byte_order_for_code
= BFD_ENDIAN_LITTLE
;
4736 = gdbarch_alloc (&info
, gdbarch_tdep_up (new aarch64_gdbarch_tdep
));
4737 aarch64_gdbarch_tdep
*tdep
= gdbarch_tdep
<aarch64_gdbarch_tdep
> (gdbarch
);
4739 /* This should be low enough for everything. */
4740 tdep
->lowest_pc
= 0x20;
4741 tdep
->jb_pc
= -1; /* Longjump support not enabled by default. */
4742 tdep
->jb_elt_size
= 8;
4744 tdep
->pauth_reg_base
= first_pauth_regnum
;
4745 tdep
->pauth_reg_count
= pauth_masks
;
4746 tdep
->ra_sign_state_regnum
= -1;
4747 tdep
->mte_reg_base
= first_mte_regnum
;
4748 tdep
->tls_regnum_base
= first_tls_regnum
;
4749 tdep
->tls_register_count
= tls_register_count
;
4750 tdep
->gcs_reg_base
= first_gcs_regnum
;
4751 tdep
->gcs_linux_reg_base
= first_gcs_linux_regnum
;
4753 /* Set the SME register set details. The pseudo-registers will be adjusted
4755 tdep
->sme_reg_base
= first_sme_regnum
;
4756 tdep
->sme_svg_regnum
= first_sme_regnum
;
4757 tdep
->sme_svcr_regnum
= first_sme_regnum
+ 1;
4758 tdep
->sme_za_regnum
= first_sme_regnum
+ 2;
4759 tdep
->sme_svq
= svq
;
4761 /* Set the SME2 register set details. */
4762 tdep
->sme2_zt0_regnum
= first_sme2_regnum
;
4764 set_gdbarch_push_dummy_call (gdbarch
, aarch64_push_dummy_call
);
4765 set_gdbarch_frame_align (gdbarch
, aarch64_frame_align
);
4767 /* Advance PC across function entry code. */
4768 set_gdbarch_skip_prologue (gdbarch
, aarch64_skip_prologue
);
4770 /* The stack grows downward. */
4771 set_gdbarch_inner_than (gdbarch
, core_addr_lessthan
);
4773 /* Breakpoint manipulation. */
4774 set_gdbarch_breakpoint_kind_from_pc (gdbarch
,
4775 aarch64_breakpoint::kind_from_pc
);
4776 set_gdbarch_sw_breakpoint_from_kind (gdbarch
,
4777 aarch64_breakpoint::bp_from_kind
);
4778 set_gdbarch_have_nonsteppable_watchpoint (gdbarch
, 1);
4779 set_gdbarch_get_next_pcs (gdbarch
, aarch64_software_single_step
);
4781 /* Information about registers, etc. */
4782 set_gdbarch_sp_regnum (gdbarch
, AARCH64_SP_REGNUM
);
4783 set_gdbarch_pc_regnum (gdbarch
, AARCH64_PC_REGNUM
);
4784 set_gdbarch_num_regs (gdbarch
, num_regs
);
4786 set_gdbarch_num_pseudo_regs (gdbarch
, num_pseudo_regs
);
4787 set_gdbarch_pseudo_register_read_value (gdbarch
, aarch64_pseudo_read_value
);
4788 set_gdbarch_pseudo_register_write (gdbarch
, aarch64_pseudo_write
);
4789 set_tdesc_pseudo_register_name (gdbarch
, aarch64_pseudo_register_name
);
4790 set_tdesc_pseudo_register_type (gdbarch
, aarch64_pseudo_register_type
);
4791 set_tdesc_pseudo_register_reggroup_p (gdbarch
,
4792 aarch64_pseudo_register_reggroup_p
);
4793 set_gdbarch_cannot_store_register (gdbarch
, aarch64_cannot_store_register
);
4795 /* Set the allocation tag granule size to 16 bytes. */
4796 set_gdbarch_memtag_granule_size (gdbarch
, AARCH64_MTE_GRANULE_SIZE
);
4798 /* Register a hook for checking if there is a memory tag match. */
4799 set_gdbarch_memtag_matches_p (gdbarch
, aarch64_memtag_matches_p
);
4801 /* Register a hook for setting the logical/allocation tags for
4802 a range of addresses. */
4803 set_gdbarch_set_memtags (gdbarch
, aarch64_set_memtags
);
4805 /* Register a hook for extracting the logical/allocation tag from an
4807 set_gdbarch_get_memtag (gdbarch
, aarch64_get_memtag
);
4809 /* Register a hook for converting a memory tag to a string. */
4810 set_gdbarch_memtag_to_string (gdbarch
, aarch64_memtag_to_string
);
4813 set_gdbarch_short_bit (gdbarch
, 16);
4814 set_gdbarch_int_bit (gdbarch
, 32);
4815 set_gdbarch_float_bit (gdbarch
, 32);
4816 set_gdbarch_double_bit (gdbarch
, 64);
4817 set_gdbarch_long_double_bit (gdbarch
, 128);
4818 set_gdbarch_long_bit (gdbarch
, 64);
4819 set_gdbarch_long_long_bit (gdbarch
, 64);
4820 set_gdbarch_ptr_bit (gdbarch
, 64);
4821 set_gdbarch_char_signed (gdbarch
, 0);
4822 set_gdbarch_wchar_signed (gdbarch
, 0);
4823 set_gdbarch_float_format (gdbarch
, floatformats_ieee_single
);
4824 set_gdbarch_double_format (gdbarch
, floatformats_ieee_double
);
4825 set_gdbarch_long_double_format (gdbarch
, floatformats_ieee_quad
);
4826 set_gdbarch_type_align (gdbarch
, aarch64_type_align
);
4828 /* Detect whether PC is at a point where the stack has been destroyed. */
4829 set_gdbarch_stack_frame_destroyed_p (gdbarch
, aarch64_stack_frame_destroyed_p
);
4831 /* Internal <-> external register number maps. */
4832 set_gdbarch_dwarf2_reg_to_regnum (gdbarch
, aarch64_dwarf_reg_to_regnum
);
4834 /* Returning results. */
4835 set_gdbarch_return_value_as_value (gdbarch
, aarch64_return_value
);
4838 set_gdbarch_print_insn (gdbarch
, aarch64_gdb_print_insn
);
4840 /* Virtual tables. */
4841 set_gdbarch_vbit_in_delta (gdbarch
, 1);
4843 /* Hook in the ABI-specific overrides, if they have been registered. */
4844 info
.target_desc
= tdesc
;
4845 info
.tdesc_data
= tdesc_data
.get ();
4846 gdbarch_init_osabi (info
, gdbarch
);
4848 dwarf2_frame_set_init_reg (gdbarch
, aarch64_dwarf2_frame_init_reg
);
4849 /* Register DWARF CFA vendor handler. */
4850 set_gdbarch_execute_dwarf_cfa_vendor_op (gdbarch
,
4851 aarch64_execute_dwarf_cfa_vendor_op
);
4853 /* Permanent/Program breakpoint handling. */
4854 set_gdbarch_program_breakpoint_here_p (gdbarch
,
4855 aarch64_program_breakpoint_here_p
);
4857 /* Add some default predicates. */
4858 frame_unwind_append_unwinder (gdbarch
, &aarch64_stub_unwind
);
4859 dwarf2_append_unwinders (gdbarch
);
4860 frame_unwind_append_unwinder (gdbarch
, &aarch64_prologue_unwind
);
4862 frame_base_set_default (gdbarch
, &aarch64_normal_base
);
4864 /* Now we have tuned the configuration, set a few final things,
4865 based on what the OS ABI has told us. */
4867 if (tdep
->jb_pc
>= 0)
4868 set_gdbarch_get_longjmp_target (gdbarch
, aarch64_get_longjmp_target
);
4870 set_gdbarch_gen_return_address (gdbarch
, aarch64_gen_return_address
);
4872 set_gdbarch_get_pc_address_flags (gdbarch
, aarch64_get_pc_address_flags
);
4874 if (tdep
->has_gcs ())
4875 set_gdbarch_shadow_stack_push (gdbarch
, aarch64_shadow_stack_push
);
4877 tdesc_use_registers (gdbarch
, tdesc
, std::move (tdesc_data
));
4879 /* Fetch the updated number of registers after we're done adding all
4880 entries from features we don't explicitly care about. This is the case
4881 for bare metal debugging stubs that include a lot of system registers. */
4882 num_regs
= gdbarch_num_regs (gdbarch
);
4884 /* With the number of real registers updated, setup the pseudo-registers and
4885 record their numbers. */
4887 /* Setup W pseudo-register numbers. */
4888 tdep
->w_pseudo_base
= first_w_regnum
+ num_regs
;
4889 tdep
->w_pseudo_count
= 31;
4891 /* Pointer authentication pseudo-registers. */
4892 if (tdep
->has_pauth ())
4893 tdep
->ra_sign_state_regnum
= ra_sign_state_offset
+ num_regs
;
4895 /* Architecture hook to remove bits of a pointer that are not part of the
4896 address, like memory tags (MTE) and pointer authentication signatures.
4897 Configure address adjustment for watchpoints, breakpoints and memory
4899 set_gdbarch_remove_non_address_bits_watchpoint
4900 (gdbarch
, aarch64_remove_non_address_bits
);
4901 set_gdbarch_remove_non_address_bits_breakpoint
4902 (gdbarch
, aarch64_remove_non_address_bits
);
4903 set_gdbarch_remove_non_address_bits_memory
4904 (gdbarch
, aarch64_remove_non_address_bits
);
4906 /* SME pseudo-registers. */
4907 if (tdep
->has_sme ())
4909 tdep
->sme_pseudo_base
= num_regs
+ first_sme_pseudo_regnum
;
4910 tdep
->sme_tile_slice_pseudo_base
= tdep
->sme_pseudo_base
;
4911 tdep
->sme_tile_slice_pseudo_count
= (svq
* 32) * 5;
4912 tdep
->sme_tile_pseudo_base
4913 = tdep
->sme_pseudo_base
+ tdep
->sme_tile_slice_pseudo_count
;
4914 tdep
->sme_pseudo_count
4915 = tdep
->sme_tile_slice_pseudo_count
+ AARCH64_ZA_TILES_NUM
;
4917 /* The SME ZA pseudo-registers are a set of 160 to 2560 pseudo-registers
4918 depending on the value of svl.
4920 The tile pseudo-registers are organized around their qualifiers
4921 (b, h, s, d and q). Their numbers are distributed as follows:
4929 The naming of the tile pseudo-registers follows the pattern za<t><q>,
4932 <t> is the tile number, with the following possible values based on
4935 Qualifier - Allocated indexes
4943 <q> is the qualifier: b, h, s, d and q.
4945 The tile slice pseudo-registers are organized around their
4946 qualifiers as well (b, h, s, d and q), but also around their
4947 direction (h - horizontal and v - vertical).
4949 Even-numbered tile slice pseudo-registers are horizontally-oriented
4950 and odd-numbered tile slice pseudo-registers are vertically-oriented.
4952 Their numbers are distributed as follows:
4954 Qualifier - Allocated indexes
4956 b tile slices - 0~511
4957 h tile slices - 512~1023
4958 s tile slices - 1024~1535
4959 d tile slices - 1536~2047
4960 q tile slices - 2048~2559
4962 The naming of the tile slice pseudo-registers follows the pattern
4963 za<t><d><q><s>, where:
4965 <t> is the tile number as described for the tile pseudo-registers.
4966 <d> is the direction of the tile slice (h or v)
4967 <q> is the qualifier of the tile slice (b, h, s, d or q)
4968 <s> is the slice number, defined as follows:
4970 Qualifier - Allocated indexes
4978 We have helper functions to translate to/from register index from/to
4979 the set of fields that make the pseudo-register names. */
4981 /* Build the array of pseudo-register names available for this
4982 particular gdbarch configuration. */
4983 aarch64_initialize_sme_pseudo_names (gdbarch
, tdep
->sme_pseudo_names
);
4986 /* Add standard register aliases. */
4987 for (i
= 0; i
< ARRAY_SIZE (aarch64_register_aliases
); i
++)
4988 user_reg_add (gdbarch
, aarch64_register_aliases
[i
].name
,
4989 value_of_aarch64_user_reg
,
4990 &aarch64_register_aliases
[i
].regnum
);
4992 register_aarch64_ravenscar_ops (gdbarch
);
4998 aarch64_dump_tdep (struct gdbarch
*gdbarch
, struct ui_file
*file
)
5000 aarch64_gdbarch_tdep
*tdep
= gdbarch_tdep
<aarch64_gdbarch_tdep
> (gdbarch
);
5005 gdb_printf (file
, _("aarch64_dump_tdep: Lowest pc = 0x%s\n"),
5006 paddress (gdbarch
, tdep
->lowest_pc
));
5009 gdb_printf (file
, _("aarch64_dump_tdep: sme_tile_type_q = %s\n"),
5010 host_address_to_string (tdep
->sme_tile_type_q
));
5011 gdb_printf (file
, _("aarch64_dump_tdep: sme_tile_type_d = %s\n"),
5012 host_address_to_string (tdep
->sme_tile_type_d
));
5013 gdb_printf (file
, _("aarch64_dump_tdep: sme_tile_type_s = %s\n"),
5014 host_address_to_string (tdep
->sme_tile_type_s
));
5015 gdb_printf (file
, _("aarch64_dump_tdep: sme_tile_type_h = %s\n"),
5016 host_address_to_string (tdep
->sme_tile_type_h
));
5017 gdb_printf (file
, _("aarch64_dump_tdep: sme_tile_type_n = %s\n"),
5018 host_address_to_string (tdep
->sme_tile_type_b
));
5019 gdb_printf (file
, _("aarch64_dump_tdep: sme_tile_slice_type_q = %s\n"),
5020 host_address_to_string (tdep
->sme_tile_slice_type_q
));
5021 gdb_printf (file
, _("aarch64_dump_tdep: sme_tile_slice_type_d = %s\n"),
5022 host_address_to_string (tdep
->sme_tile_slice_type_d
));
5023 gdb_printf (file
, _("aarch64_dump_tdep: sme_tile_slice_type_s = %s\n"),
5024 host_address_to_string (tdep
->sme_tile_slice_type_s
));
5025 gdb_printf (file
, _("aarch64_dump_tdep: sme_tile_slice_type_h = %s\n"),
5026 host_address_to_string (tdep
->sme_tile_slice_type_h
));
5027 gdb_printf (file
, _("aarch64_dump_tdep: sme_tile_slice_type_b = %s\n"),
5028 host_address_to_string (tdep
->sme_tile_slice_type_b
));
5029 gdb_printf (file
, _("aarch64_dump_tdep: sme_reg_base = %s\n"),
5030 pulongest (tdep
->sme_reg_base
));
5031 gdb_printf (file
, _("aarch64_dump_tdep: sme_svg_regnum = %s\n"),
5032 pulongest (tdep
->sme_svg_regnum
));
5033 gdb_printf (file
, _("aarch64_dump_tdep: sme_svcr_regnum = %s\n"),
5034 pulongest (tdep
->sme_svcr_regnum
));
5035 gdb_printf (file
, _("aarch64_dump_tdep: sme_za_regnum = %s\n"),
5036 pulongest (tdep
->sme_za_regnum
));
5037 gdb_printf (file
, _("aarch64_dump_tdep: sme_pseudo_base = %s\n"),
5038 pulongest (tdep
->sme_pseudo_base
));
5039 gdb_printf (file
, _("aarch64_dump_tdep: sme_pseudo_count = %s\n"),
5040 pulongest (tdep
->sme_pseudo_count
));
5041 gdb_printf (file
, _("aarch64_dump_tdep: sme_tile_slice_pseudo_base = %s\n"),
5042 pulongest (tdep
->sme_tile_slice_pseudo_base
));
5043 gdb_printf (file
, _("aarch64_dump_tdep: sme_tile_slice_pseudo_count = %s\n"),
5044 pulongest (tdep
->sme_tile_slice_pseudo_count
));
5045 gdb_printf (file
, _("aarch64_dump_tdep: sme_tile_pseudo_base = %s\n"),
5046 pulongest (tdep
->sme_tile_pseudo_base
));
5047 gdb_printf (file
, _("aarch64_dump_tdep: sme_svq = %s\n"),
5048 pulongest (tdep
->sme_svq
));
5050 gdb_printf (file
, _("aarch64_dump_tdep: gcs_reg_base = %d\n"),
5051 tdep
->gcs_reg_base
);
5052 gdb_printf (file
, _("aarch64_dump_tdep: gcs_linux_reg_base = %d\n"),
5053 tdep
->gcs_linux_reg_base
);
5059 static void aarch64_process_record_test (void);
5063 INIT_GDB_FILE (aarch64_tdep
)
5065 gdbarch_register (bfd_arch_aarch64
, aarch64_gdbarch_init
,
5068 /* Debug this file's internals. */
5069 add_setshow_boolean_cmd ("aarch64", class_maintenance
, &aarch64_debug
, _("\
5070 Set AArch64 debugging."), _("\
5071 Show AArch64 debugging."), _("\
5072 When on, AArch64 specific debugging is enabled."),
5075 &setdebuglist
, &showdebuglist
);
5078 selftests::register_test ("aarch64-analyze-prologue",
5079 selftests::aarch64_analyze_prologue_test
);
5080 selftests::register_test ("aarch64-process-record",
5081 selftests::aarch64_process_record_test
);
5085 /* AArch64 process record-replay related structures, defines etc. */
5087 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
5090 unsigned int reg_len = LENGTH; \
5093 REGS = XNEWVEC (uint32_t, reg_len); \
5094 memcpy(®S[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
5099 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
5102 unsigned int mem_len = LENGTH; \
5105 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
5106 memcpy(MEMS, &RECORD_BUF[0], \
5107 sizeof(struct aarch64_mem_r) * LENGTH); \
5112 /* AArch64 record/replay structures and enumerations. */
5114 struct aarch64_mem_r
5116 uint64_t len
; /* Record length. */
5117 uint64_t addr
; /* Memory address. */
5120 enum aarch64_record_result
5122 AARCH64_RECORD_SUCCESS
,
5123 AARCH64_RECORD_UNSUPPORTED
,
5124 AARCH64_RECORD_UNKNOWN
5127 struct aarch64_insn_decode_record
5129 struct gdbarch
*gdbarch
;
5130 struct regcache
*regcache
;
5131 CORE_ADDR this_addr
; /* Address of insn to be recorded. */
5132 uint32_t aarch64_insn
; /* Insn to be recorded. */
5133 uint32_t mem_rec_count
; /* Count of memory records. */
5134 uint32_t reg_rec_count
; /* Count of register records. */
5135 uint32_t *aarch64_regs
; /* Registers to be recorded. */
5136 struct aarch64_mem_r
*aarch64_mems
; /* Memory locations to be recorded. */
5139 /* Record handler for data processing - register instructions. */
5142 aarch64_record_data_proc_reg (aarch64_insn_decode_record
*aarch64_insn_r
)
5144 uint8_t reg_rd
, insn_bits24_27
, insn_bits21_23
;
5145 uint32_t record_buf
[4];
5147 reg_rd
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
5148 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
5149 insn_bits21_23
= bits (aarch64_insn_r
->aarch64_insn
, 21, 23);
5151 if (!bit (aarch64_insn_r
->aarch64_insn
, 28))
5155 /* Logical (shifted register). */
5156 if (insn_bits24_27
== 0x0a)
5157 setflags
= (bits (aarch64_insn_r
->aarch64_insn
, 29, 30) == 0x03);
5159 else if (insn_bits24_27
== 0x0b)
5160 setflags
= bit (aarch64_insn_r
->aarch64_insn
, 29);
5162 return AARCH64_RECORD_UNKNOWN
;
5164 record_buf
[0] = reg_rd
;
5165 aarch64_insn_r
->reg_rec_count
= 1;
5167 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_CPSR_REGNUM
;
5171 if (insn_bits24_27
== 0x0b)
5173 /* Data-processing (3 source). */
5174 record_buf
[0] = reg_rd
;
5175 aarch64_insn_r
->reg_rec_count
= 1;
5177 else if (insn_bits24_27
== 0x0a)
5179 if (insn_bits21_23
== 0x00)
5181 /* Add/subtract (with carry). */
5182 record_buf
[0] = reg_rd
;
5183 aarch64_insn_r
->reg_rec_count
= 1;
5184 if (bit (aarch64_insn_r
->aarch64_insn
, 29))
5186 record_buf
[1] = AARCH64_CPSR_REGNUM
;
5187 aarch64_insn_r
->reg_rec_count
= 2;
5190 else if (insn_bits21_23
== 0x02)
5192 /* Conditional compare (register) and conditional compare
5193 (immediate) instructions. */
5194 record_buf
[0] = AARCH64_CPSR_REGNUM
;
5195 aarch64_insn_r
->reg_rec_count
= 1;
5197 else if (insn_bits21_23
== 0x04 || insn_bits21_23
== 0x06)
5199 /* Conditional select. */
5200 /* Data-processing (2 source). */
5201 /* Data-processing (1 source). */
5202 record_buf
[0] = reg_rd
;
5203 aarch64_insn_r
->reg_rec_count
= 1;
5206 return AARCH64_RECORD_UNKNOWN
;
5210 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
5212 return AARCH64_RECORD_SUCCESS
;
5215 /* Record handler for data processing - immediate instructions. */
5218 aarch64_record_data_proc_imm (aarch64_insn_decode_record
*aarch64_insn_r
)
5220 uint8_t reg_rd
, insn_bit23
, insn_bits24_27
, setflags
;
5221 uint32_t record_buf
[4];
5223 reg_rd
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
5224 insn_bit23
= bit (aarch64_insn_r
->aarch64_insn
, 23);
5225 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
5227 if (insn_bits24_27
== 0x00 /* PC rel addressing. */
5228 || insn_bits24_27
== 0x03 /* Bitfield and Extract. */
5229 || (insn_bits24_27
== 0x02 && insn_bit23
)) /* Move wide (immediate). */
5231 record_buf
[0] = reg_rd
;
5232 aarch64_insn_r
->reg_rec_count
= 1;
5234 else if (insn_bits24_27
== 0x01)
5236 /* Add/Subtract (immediate). */
5237 setflags
= bit (aarch64_insn_r
->aarch64_insn
, 29);
5238 record_buf
[0] = reg_rd
;
5239 aarch64_insn_r
->reg_rec_count
= 1;
5241 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_CPSR_REGNUM
;
5243 else if (insn_bits24_27
== 0x02 && !insn_bit23
)
5245 /* Logical (immediate). */
5246 setflags
= bits (aarch64_insn_r
->aarch64_insn
, 29, 30) == 0x03;
5247 record_buf
[0] = reg_rd
;
5248 aarch64_insn_r
->reg_rec_count
= 1;
5250 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_CPSR_REGNUM
;
5253 return AARCH64_RECORD_UNKNOWN
;
5255 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
5257 return AARCH64_RECORD_SUCCESS
;
5260 /* Record handler for branch, exception generation and system instructions. */
5263 aarch64_record_branch_except_sys (aarch64_insn_decode_record
*aarch64_insn_r
)
5266 aarch64_gdbarch_tdep
*tdep
5267 = gdbarch_tdep
<aarch64_gdbarch_tdep
> (aarch64_insn_r
->gdbarch
);
5268 uint8_t insn_bits24_27
, insn_bits28_31
, insn_bits22_23
;
5269 uint32_t record_buf
[4];
5271 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
5272 insn_bits28_31
= bits (aarch64_insn_r
->aarch64_insn
, 28, 31);
5273 insn_bits22_23
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
5275 if (insn_bits28_31
== 0x0d)
5277 /* Exception generation instructions. */
5278 if (insn_bits24_27
== 0x04)
5280 if (!bits (aarch64_insn_r
->aarch64_insn
, 2, 4)
5281 && !bits (aarch64_insn_r
->aarch64_insn
, 21, 23)
5282 && bits (aarch64_insn_r
->aarch64_insn
, 0, 1) == 0x01)
5284 ULONGEST svc_number
;
5286 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, 8,
5288 return tdep
->aarch64_syscall_record (aarch64_insn_r
->regcache
,
5292 return AARCH64_RECORD_UNSUPPORTED
;
5294 /* System instructions. */
5295 else if (insn_bits24_27
== 0x05 && insn_bits22_23
== 0x00)
5297 uint32_t reg_rt
, reg_crn
;
5299 reg_rt
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
5300 reg_crn
= bits (aarch64_insn_r
->aarch64_insn
, 12, 15);
5302 /* Record rt in case of sysl and mrs instructions. */
5303 if (bit (aarch64_insn_r
->aarch64_insn
, 21))
5305 record_buf
[0] = reg_rt
;
5306 aarch64_insn_r
->reg_rec_count
= 1;
5308 /* Record cpsr for hint and msr(immediate) instructions. */
5309 else if (reg_crn
== 0x02 || reg_crn
== 0x04)
5311 record_buf
[0] = AARCH64_CPSR_REGNUM
;
5312 aarch64_insn_r
->reg_rec_count
= 1;
5315 /* Unconditional branch (register). */
5316 else if((insn_bits24_27
& 0x0e) == 0x06)
5318 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_PC_REGNUM
;
5319 if (bits (aarch64_insn_r
->aarch64_insn
, 21, 22) == 0x01)
5320 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_LR_REGNUM
;
5323 return AARCH64_RECORD_UNKNOWN
;
5325 /* Unconditional branch (immediate). */
5326 else if ((insn_bits28_31
& 0x07) == 0x01 && (insn_bits24_27
& 0x0c) == 0x04)
5328 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_PC_REGNUM
;
5329 if (bit (aarch64_insn_r
->aarch64_insn
, 31))
5330 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_LR_REGNUM
;
5333 /* Compare & branch (immediate), Test & branch (immediate) and
5334 Conditional branch (immediate). */
5335 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_PC_REGNUM
;
5337 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
5339 return AARCH64_RECORD_SUCCESS
;
5342 /* Record handler for advanced SIMD load and store instructions. */
5345 aarch64_record_asimd_load_store (aarch64_insn_decode_record
*aarch64_insn_r
)
5348 uint64_t addr_offset
= 0;
5349 uint32_t record_buf
[24];
5350 std::vector
<uint64_t> record_buf_mem
;
5351 uint32_t reg_rn
, reg_rt
;
5352 uint32_t reg_index
= 0;
5353 uint8_t opcode_bits
, size_bits
;
5355 reg_rt
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
5356 reg_rn
= bits (aarch64_insn_r
->aarch64_insn
, 5, 9);
5357 size_bits
= bits (aarch64_insn_r
->aarch64_insn
, 10, 11);
5358 opcode_bits
= bits (aarch64_insn_r
->aarch64_insn
, 12, 15);
5359 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
, &address
);
5362 debug_printf ("Process record: Advanced SIMD load/store\n");
5364 /* Load/store single structure. */
5365 if (bit (aarch64_insn_r
->aarch64_insn
, 24))
5367 uint8_t sindex
, scale
, selem
, esize
, replicate
= 0;
5368 scale
= opcode_bits
>> 2;
5369 selem
= ((opcode_bits
& 0x02) |
5370 bit (aarch64_insn_r
->aarch64_insn
, 21)) + 1;
5374 if (size_bits
& 0x01)
5375 return AARCH64_RECORD_UNKNOWN
;
5378 if ((size_bits
>> 1) & 0x01)
5379 return AARCH64_RECORD_UNKNOWN
;
5380 if (size_bits
& 0x01)
5382 if (!((opcode_bits
>> 1) & 0x01))
5385 return AARCH64_RECORD_UNKNOWN
;
5389 if (bit (aarch64_insn_r
->aarch64_insn
, 22) && !(opcode_bits
& 0x01))
5396 return AARCH64_RECORD_UNKNOWN
;
5402 for (sindex
= 0; sindex
< selem
; sindex
++)
5404 record_buf
[reg_index
++] = reg_rt
+ AARCH64_V0_REGNUM
;
5405 reg_rt
= (reg_rt
+ 1) % 32;
5409 for (sindex
= 0; sindex
< selem
; sindex
++)
5411 if (bit (aarch64_insn_r
->aarch64_insn
, 22))
5412 record_buf
[reg_index
++] = reg_rt
+ AARCH64_V0_REGNUM
;
5415 record_buf_mem
.push_back (esize
/ 8);
5416 record_buf_mem
.push_back (address
+ addr_offset
);
5418 addr_offset
= addr_offset
+ (esize
/ 8);
5419 reg_rt
= (reg_rt
+ 1) % 32;
5423 /* Load/store multiple structure. */
5426 uint8_t selem
, esize
, rpt
, elements
;
5427 uint8_t eindex
, rindex
;
5429 esize
= 8 << size_bits
;
5430 if (bit (aarch64_insn_r
->aarch64_insn
, 30))
5431 elements
= 128 / esize
;
5433 elements
= 64 / esize
;
5435 switch (opcode_bits
)
5437 /*LD/ST4 (4 Registers). */
5442 /*LD/ST1 (4 Registers). */
5447 /*LD/ST3 (3 Registers). */
5452 /*LD/ST1 (3 Registers). */
5457 /*LD/ST1 (1 Register). */
5462 /*LD/ST2 (2 Registers). */
5467 /*LD/ST1 (2 Registers). */
5473 return AARCH64_RECORD_UNSUPPORTED
;
5476 for (rindex
= 0; rindex
< rpt
; rindex
++)
5477 for (eindex
= 0; eindex
< elements
; eindex
++)
5479 uint8_t reg_tt
, sindex
;
5480 reg_tt
= (reg_rt
+ rindex
) % 32;
5481 for (sindex
= 0; sindex
< selem
; sindex
++)
5483 if (bit (aarch64_insn_r
->aarch64_insn
, 22))
5484 record_buf
[reg_index
++] = reg_tt
+ AARCH64_V0_REGNUM
;
5487 record_buf_mem
.push_back (esize
/ 8);
5488 record_buf_mem
.push_back (address
+ addr_offset
);
5490 addr_offset
= addr_offset
+ (esize
/ 8);
5491 reg_tt
= (reg_tt
+ 1) % 32;
5496 if (bit (aarch64_insn_r
->aarch64_insn
, 23))
5497 record_buf
[reg_index
++] = reg_rn
;
5499 aarch64_insn_r
->reg_rec_count
= reg_index
;
5500 aarch64_insn_r
->mem_rec_count
= record_buf_mem
.size () / 2;
5501 MEM_ALLOC (aarch64_insn_r
->aarch64_mems
, aarch64_insn_r
->mem_rec_count
,
5502 record_buf_mem
.data ());
5503 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
5505 return AARCH64_RECORD_SUCCESS
;
5508 /* Record handler for Memory Copy and Memory Set instructions. */
5511 aarch64_record_memcopy_memset (aarch64_insn_decode_record
*aarch64_insn_r
)
5514 debug_printf ("Process record: memory copy and memory set\n");
5516 uint8_t op1
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
5517 uint8_t op2
= bits (aarch64_insn_r
->aarch64_insn
, 12, 15);
5518 uint32_t reg_rd
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
5519 uint32_t reg_rn
= bits (aarch64_insn_r
->aarch64_insn
, 5, 9);
5520 uint32_t record_buf
[3];
5521 uint64_t record_buf_mem
[4];
5523 if (op1
== 3 && op2
> 11)
5524 /* Unallocated instructions. */
5525 return AARCH64_RECORD_UNKNOWN
;
5527 /* Set instructions have two registers and one memory region to be
5529 record_buf
[0] = reg_rd
;
5530 record_buf
[1] = reg_rn
;
5531 aarch64_insn_r
->reg_rec_count
= 2;
5534 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rd
, &dest_addr
);
5537 regcache_raw_read_signed (aarch64_insn_r
->regcache
, reg_rn
, &length
);
5539 /* In one of the algorithm options a processor can implement, the length
5540 in Rn has an inverted sign. */
5544 record_buf_mem
[0] = length
;
5545 record_buf_mem
[1] = dest_addr
;
5546 aarch64_insn_r
->mem_rec_count
= 1;
5550 /* Copy instructions have an additional register and an additional
5551 memory region to be recorded. */
5552 uint32_t reg_rs
= bits (aarch64_insn_r
->aarch64_insn
, 16, 20);
5554 record_buf
[2] = reg_rs
;
5555 aarch64_insn_r
->reg_rec_count
++;
5557 ULONGEST source_addr
;
5558 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rs
,
5561 record_buf_mem
[2] = length
;
5562 record_buf_mem
[3] = source_addr
;
5563 aarch64_insn_r
->mem_rec_count
++;
5566 MEM_ALLOC (aarch64_insn_r
->aarch64_mems
, aarch64_insn_r
->mem_rec_count
,
5568 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
5570 return AARCH64_RECORD_SUCCESS
;
5573 /* Record handler for load and store instructions. */
5576 aarch64_record_load_store (aarch64_insn_decode_record
*aarch64_insn_r
)
5578 uint8_t insn_bits24_27
, insn_bits28_29
, insn_bits10_11
;
5579 uint8_t insn_bit23
, insn_bit21
;
5580 uint8_t opc
, size_bits
, ld_flag
, vector_flag
;
5581 uint32_t reg_rn
, reg_rt
, reg_rt2
;
5582 uint64_t datasize
, offset
;
5583 uint32_t record_buf
[8];
5584 uint64_t record_buf_mem
[8];
5587 insn_bits10_11
= bits (aarch64_insn_r
->aarch64_insn
, 10, 11);
5588 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
5589 insn_bits28_29
= bits (aarch64_insn_r
->aarch64_insn
, 28, 29);
5590 insn_bit21
= bit (aarch64_insn_r
->aarch64_insn
, 21);
5591 insn_bit23
= bit (aarch64_insn_r
->aarch64_insn
, 23);
5592 ld_flag
= bit (aarch64_insn_r
->aarch64_insn
, 22);
5593 vector_flag
= bit (aarch64_insn_r
->aarch64_insn
, 26);
5594 reg_rt
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
5595 reg_rn
= bits (aarch64_insn_r
->aarch64_insn
, 5, 9);
5596 reg_rt2
= bits (aarch64_insn_r
->aarch64_insn
, 10, 14);
5597 size_bits
= bits (aarch64_insn_r
->aarch64_insn
, 30, 31);
5599 /* Load/store exclusive. */
5600 if (insn_bits24_27
== 0x08 && insn_bits28_29
== 0x00)
5603 debug_printf ("Process record: load/store exclusive\n");
5607 record_buf
[0] = reg_rt
;
5608 aarch64_insn_r
->reg_rec_count
= 1;
5611 record_buf
[1] = reg_rt2
;
5612 aarch64_insn_r
->reg_rec_count
= 2;
5618 datasize
= (8 << size_bits
) * 2;
5620 datasize
= (8 << size_bits
);
5621 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
5623 record_buf_mem
[0] = datasize
/ 8;
5624 record_buf_mem
[1] = address
;
5625 aarch64_insn_r
->mem_rec_count
= 1;
5628 /* Save register rs. */
5629 record_buf
[0] = bits (aarch64_insn_r
->aarch64_insn
, 16, 20);
5630 aarch64_insn_r
->reg_rec_count
= 1;
5634 /* Load register (literal) instructions decoding. */
5635 else if ((insn_bits24_27
& 0x0b) == 0x08 && insn_bits28_29
== 0x01)
5638 debug_printf ("Process record: load register (literal)\n");
5640 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
5642 record_buf
[0] = reg_rt
;
5643 aarch64_insn_r
->reg_rec_count
= 1;
5645 /* All types of load/store pair instructions decoding. */
5646 else if ((insn_bits24_27
& 0x0a) == 0x08 && insn_bits28_29
== 0x02)
5649 debug_printf ("Process record: load/store pair\n");
5655 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
5656 record_buf
[1] = reg_rt2
+ AARCH64_V0_REGNUM
;
5660 record_buf
[0] = reg_rt
;
5661 record_buf
[1] = reg_rt2
;
5663 aarch64_insn_r
->reg_rec_count
= 2;
5668 imm7_off
= bits (aarch64_insn_r
->aarch64_insn
, 15, 21);
5670 size_bits
= size_bits
>> 1;
5671 datasize
= 8 << (2 + size_bits
);
5672 offset
= (imm7_off
& 0x40) ? (~imm7_off
& 0x007f) + 1 : imm7_off
;
5673 offset
= offset
<< (2 + size_bits
);
5674 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
5676 if (!((insn_bits24_27
& 0x0b) == 0x08 && insn_bit23
))
5678 if (imm7_off
& 0x40)
5679 address
= address
- offset
;
5681 address
= address
+ offset
;
5684 record_buf_mem
[0] = datasize
/ 8;
5685 record_buf_mem
[1] = address
;
5686 record_buf_mem
[2] = datasize
/ 8;
5687 record_buf_mem
[3] = address
+ (datasize
/ 8);
5688 aarch64_insn_r
->mem_rec_count
= 2;
5690 if (bit (aarch64_insn_r
->aarch64_insn
, 23))
5691 record_buf
[aarch64_insn_r
->reg_rec_count
++] = reg_rn
;
5693 /* Load/store register (unsigned immediate) instructions. */
5694 else if ((insn_bits24_27
& 0x0b) == 0x09 && insn_bits28_29
== 0x03)
5696 opc
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
5706 if (size_bits
== 0x3 && vector_flag
== 0x0 && opc
== 0x2)
5708 /* PRFM (immediate) */
5709 return AARCH64_RECORD_SUCCESS
;
5711 else if (size_bits
== 0x2 && vector_flag
== 0x0 && opc
== 0x2)
5713 /* LDRSW (immediate) */
5727 debug_printf ("Process record: load/store (unsigned immediate):"
5728 " size %x V %d opc %x\n", size_bits
, vector_flag
,
5734 offset
= bits (aarch64_insn_r
->aarch64_insn
, 10, 21);
5735 datasize
= 8 << size_bits
;
5736 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
5738 offset
= offset
<< size_bits
;
5739 address
= address
+ offset
;
5741 record_buf_mem
[0] = datasize
>> 3;
5742 record_buf_mem
[1] = address
;
5743 aarch64_insn_r
->mem_rec_count
= 1;
5748 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
5750 record_buf
[0] = reg_rt
;
5751 aarch64_insn_r
->reg_rec_count
= 1;
5754 /* Load/store register (register offset) instructions. */
5755 else if ((insn_bits24_27
& 0x0b) == 0x08 && insn_bits28_29
== 0x03
5756 && insn_bits10_11
== 0x02 && insn_bit21
)
5759 debug_printf ("Process record: load/store (register offset)\n");
5760 opc
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
5767 if (size_bits
!= 0x03)
5770 return AARCH64_RECORD_UNKNOWN
;
5774 ULONGEST reg_rm_val
;
5776 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
,
5777 bits (aarch64_insn_r
->aarch64_insn
, 16, 20), ®_rm_val
);
5778 if (bit (aarch64_insn_r
->aarch64_insn
, 12))
5779 offset
= reg_rm_val
<< size_bits
;
5781 offset
= reg_rm_val
;
5782 datasize
= 8 << size_bits
;
5783 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
5785 address
= address
+ offset
;
5786 record_buf_mem
[0] = datasize
>> 3;
5787 record_buf_mem
[1] = address
;
5788 aarch64_insn_r
->mem_rec_count
= 1;
5793 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
5795 record_buf
[0] = reg_rt
;
5796 aarch64_insn_r
->reg_rec_count
= 1;
5799 /* Load/store register (immediate and unprivileged) instructions. */
5800 else if ((insn_bits24_27
& 0x0b) == 0x08 && insn_bits28_29
== 0x03
5805 debug_printf ("Process record: load/store "
5806 "(immediate and unprivileged)\n");
5808 opc
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
5815 if (size_bits
!= 0x03)
5818 return AARCH64_RECORD_UNKNOWN
;
5823 imm9_off
= bits (aarch64_insn_r
->aarch64_insn
, 12, 20);
5824 offset
= (imm9_off
& 0x0100) ? (((~imm9_off
) & 0x01ff) + 1) : imm9_off
;
5825 datasize
= 8 << size_bits
;
5826 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
5828 if (insn_bits10_11
!= 0x01)
5830 if (imm9_off
& 0x0100)
5831 address
= address
- offset
;
5833 address
= address
+ offset
;
5835 record_buf_mem
[0] = datasize
>> 3;
5836 record_buf_mem
[1] = address
;
5837 aarch64_insn_r
->mem_rec_count
= 1;
5842 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
5844 record_buf
[0] = reg_rt
;
5845 aarch64_insn_r
->reg_rec_count
= 1;
5847 if (insn_bits10_11
== 0x01 || insn_bits10_11
== 0x03)
5848 record_buf
[aarch64_insn_r
->reg_rec_count
++] = reg_rn
;
5850 /* Memory Copy and Memory Set instructions. */
5851 else if ((insn_bits24_27
& 1) == 1 && insn_bits28_29
== 1
5852 && insn_bits10_11
== 1 && !insn_bit21
)
5853 return aarch64_record_memcopy_memset (aarch64_insn_r
);
5854 /* Advanced SIMD load/store instructions. */
5856 return aarch64_record_asimd_load_store (aarch64_insn_r
);
5858 MEM_ALLOC (aarch64_insn_r
->aarch64_mems
, aarch64_insn_r
->mem_rec_count
,
5860 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
5862 return AARCH64_RECORD_SUCCESS
;
5865 /* Record handler for data processing SIMD and floating point instructions. */
5868 aarch64_record_data_proc_simd_fp (aarch64_insn_decode_record
*aarch64_insn_r
)
5870 uint8_t insn_bit21
, opcode
, rmode
, reg_rd
;
5871 uint8_t insn_bits24_27
, insn_bits28_31
, insn_bits10_11
, insn_bits12_15
;
5872 uint8_t insn_bits11_14
;
5873 uint32_t record_buf
[2];
5875 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
5876 insn_bits28_31
= bits (aarch64_insn_r
->aarch64_insn
, 28, 31);
5877 insn_bits10_11
= bits (aarch64_insn_r
->aarch64_insn
, 10, 11);
5878 insn_bits12_15
= bits (aarch64_insn_r
->aarch64_insn
, 12, 15);
5879 insn_bits11_14
= bits (aarch64_insn_r
->aarch64_insn
, 11, 14);
5880 opcode
= bits (aarch64_insn_r
->aarch64_insn
, 16, 18);
5881 rmode
= bits (aarch64_insn_r
->aarch64_insn
, 19, 20);
5882 reg_rd
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
5883 insn_bit21
= bit (aarch64_insn_r
->aarch64_insn
, 21);
5886 debug_printf ("Process record: data processing SIMD/FP: ");
5888 if ((insn_bits28_31
& 0x05) == 0x01 && insn_bits24_27
== 0x0e)
5890 /* Floating point - fixed point conversion instructions. */
5894 debug_printf ("FP - fixed point conversion");
5896 if ((opcode
>> 1) == 0x0 && rmode
== 0x03)
5897 record_buf
[0] = reg_rd
;
5899 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
5901 /* Floating point - conditional compare instructions. */
5902 else if (insn_bits10_11
== 0x01)
5905 debug_printf ("FP - conditional compare");
5907 record_buf
[0] = AARCH64_CPSR_REGNUM
;
5909 /* Floating point - data processing (2-source) and
5910 conditional select instructions. */
5911 else if (insn_bits10_11
== 0x02 || insn_bits10_11
== 0x03)
5914 debug_printf ("FP - DP (2-source)");
5916 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
5918 else if (insn_bits10_11
== 0x00)
5920 /* Floating point - immediate instructions. */
5921 if ((insn_bits12_15
& 0x01) == 0x01
5922 || (insn_bits12_15
& 0x07) == 0x04)
5925 debug_printf ("FP - immediate");
5926 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
5928 /* Floating point - compare instructions. */
5929 else if ((insn_bits12_15
& 0x03) == 0x02)
5932 debug_printf ("FP - immediate");
5933 record_buf
[0] = AARCH64_CPSR_REGNUM
;
5935 /* Floating point - integer conversions instructions. */
5936 else if (insn_bits12_15
== 0x00)
5938 /* Convert float to integer instruction. */
5939 if (!(opcode
>> 1) || ((opcode
>> 1) == 0x02 && !rmode
))
5942 debug_printf ("float to int conversion");
5944 record_buf
[0] = reg_rd
+ AARCH64_X0_REGNUM
;
5946 /* Convert integer to float instruction. */
5947 else if ((opcode
>> 1) == 0x01 && !rmode
)
5950 debug_printf ("int to float conversion");
5952 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
5954 /* Move float to integer instruction. */
5955 else if ((opcode
>> 1) == 0x03)
5958 debug_printf ("move float to int");
5960 if (!(opcode
& 0x01))
5961 record_buf
[0] = reg_rd
+ AARCH64_X0_REGNUM
;
5963 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
5966 return AARCH64_RECORD_UNKNOWN
;
5969 return AARCH64_RECORD_UNKNOWN
;
5972 return AARCH64_RECORD_UNKNOWN
;
5974 else if ((insn_bits28_31
& 0x09) == 0x00 && insn_bits24_27
== 0x0e)
5977 debug_printf ("SIMD copy");
5979 /* Advanced SIMD copy instructions. */
5980 if (!bits (aarch64_insn_r
->aarch64_insn
, 21, 23)
5981 && !bit (aarch64_insn_r
->aarch64_insn
, 15)
5982 && bit (aarch64_insn_r
->aarch64_insn
, 10))
5984 if (insn_bits11_14
== 0x05 || insn_bits11_14
== 0x07)
5985 record_buf
[0] = reg_rd
+ AARCH64_X0_REGNUM
;
5987 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
5990 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
5992 /* All remaining floating point or advanced SIMD instructions. */
5996 debug_printf ("all remain");
5998 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
6002 debug_printf ("\n");
6004 /* Record the V/X register. */
6005 aarch64_insn_r
->reg_rec_count
++;
6007 /* Some of these instructions may set bits in the FPSR, so record it
6009 record_buf
[1] = AARCH64_FPSR_REGNUM
;
6010 aarch64_insn_r
->reg_rec_count
++;
6012 gdb_assert (aarch64_insn_r
->reg_rec_count
== 2);
6013 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
6015 return AARCH64_RECORD_SUCCESS
;
6018 /* Decodes insns type and invokes its record handler. */
6021 aarch64_record_decode_insn_handler (aarch64_insn_decode_record
*aarch64_insn_r
)
6023 uint32_t ins_bit25
, ins_bit26
, ins_bit27
, ins_bit28
;
6025 ins_bit25
= bit (aarch64_insn_r
->aarch64_insn
, 25);
6026 ins_bit26
= bit (aarch64_insn_r
->aarch64_insn
, 26);
6027 ins_bit27
= bit (aarch64_insn_r
->aarch64_insn
, 27);
6028 ins_bit28
= bit (aarch64_insn_r
->aarch64_insn
, 28);
6030 /* Data processing - immediate instructions. */
6031 if (!ins_bit26
&& !ins_bit27
&& ins_bit28
)
6032 return aarch64_record_data_proc_imm (aarch64_insn_r
);
6034 /* Branch, exception generation and system instructions. */
6035 if (ins_bit26
&& !ins_bit27
&& ins_bit28
)
6036 return aarch64_record_branch_except_sys (aarch64_insn_r
);
6038 /* Load and store instructions. */
6039 if (!ins_bit25
&& ins_bit27
)
6040 return aarch64_record_load_store (aarch64_insn_r
);
6042 /* Data processing - register instructions. */
6043 if (ins_bit25
&& !ins_bit26
&& ins_bit27
)
6044 return aarch64_record_data_proc_reg (aarch64_insn_r
);
6046 /* Data processing - SIMD and floating point instructions. */
6047 if (ins_bit25
&& ins_bit26
&& ins_bit27
)
6048 return aarch64_record_data_proc_simd_fp (aarch64_insn_r
);
6050 return AARCH64_RECORD_UNSUPPORTED
;
6053 /* Cleans up local record registers and memory allocations. */
6056 deallocate_reg_mem (aarch64_insn_decode_record
*record
)
6058 xfree (record
->aarch64_regs
);
6059 xfree (record
->aarch64_mems
);
6063 namespace selftests
{
6066 aarch64_process_record_test (void)
6068 struct gdbarch_info info
;
6071 info
.bfd_arch_info
= bfd_scan_arch ("aarch64");
6073 struct gdbarch
*gdbarch
= gdbarch_find_by_info (info
);
6074 SELF_CHECK (gdbarch
!= NULL
);
6076 aarch64_insn_decode_record aarch64_record
;
6078 memset (&aarch64_record
, 0, sizeof (aarch64_insn_decode_record
));
6079 aarch64_record
.regcache
= NULL
;
6080 aarch64_record
.this_addr
= 0;
6081 aarch64_record
.gdbarch
= gdbarch
;
6083 /* 20 00 80 f9 prfm pldl1keep, [x1] */
6084 aarch64_record
.aarch64_insn
= 0xf9800020;
6085 ret
= aarch64_record_decode_insn_handler (&aarch64_record
);
6086 SELF_CHECK (ret
== AARCH64_RECORD_SUCCESS
);
6087 SELF_CHECK (aarch64_record
.reg_rec_count
== 0);
6088 SELF_CHECK (aarch64_record
.mem_rec_count
== 0);
6090 deallocate_reg_mem (&aarch64_record
);
6093 } /* namespace selftests */
6094 #endif /* GDB_SELF_TEST */
6096 /* Parse the current instruction and record the values of the registers and
6097 memory that will be changed in current instruction to record_arch_list
6098 return -1 if something is wrong. */
6101 aarch64_process_record (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
6102 CORE_ADDR insn_addr
)
6104 uint32_t rec_no
= 0;
6105 const uint8_t insn_size
= 4;
6107 gdb_byte buf
[insn_size
];
6108 aarch64_insn_decode_record aarch64_record
;
6110 memset (&buf
[0], 0, insn_size
);
6111 memset (&aarch64_record
, 0, sizeof (aarch64_insn_decode_record
));
6112 target_read_memory (insn_addr
, &buf
[0], insn_size
);
6113 aarch64_record
.aarch64_insn
6114 = (uint32_t) extract_unsigned_integer (&buf
[0],
6116 gdbarch_byte_order (gdbarch
));
6117 aarch64_record
.regcache
= regcache
;
6118 aarch64_record
.this_addr
= insn_addr
;
6119 aarch64_record
.gdbarch
= gdbarch
;
6121 ret
= aarch64_record_decode_insn_handler (&aarch64_record
);
6122 if (ret
== AARCH64_RECORD_UNSUPPORTED
)
6124 gdb_printf (gdb_stderr
,
6125 _("Process record does not support instruction "
6126 "0x%0x at address %s.\n"),
6127 aarch64_record
.aarch64_insn
,
6128 paddress (gdbarch
, insn_addr
));
6134 /* Record registers. */
6135 record_full_arch_list_add_reg (aarch64_record
.regcache
,
6137 /* Always record register CPSR. */
6138 record_full_arch_list_add_reg (aarch64_record
.regcache
,
6139 AARCH64_CPSR_REGNUM
);
6140 if (aarch64_record
.aarch64_regs
)
6141 for (rec_no
= 0; rec_no
< aarch64_record
.reg_rec_count
; rec_no
++)
6142 if (record_full_arch_list_add_reg (aarch64_record
.regcache
,
6143 aarch64_record
.aarch64_regs
[rec_no
]))
6146 /* Record memories. */
6147 if (aarch64_record
.aarch64_mems
)
6148 for (rec_no
= 0; rec_no
< aarch64_record
.mem_rec_count
; rec_no
++)
6149 if (record_full_arch_list_add_mem
6150 ((CORE_ADDR
)aarch64_record
.aarch64_mems
[rec_no
].addr
,
6151 aarch64_record
.aarch64_mems
[rec_no
].len
))
6154 if (record_full_arch_list_add_end ())
6158 deallocate_reg_mem (&aarch64_record
);