]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/aarch64-tdep.c
[gdb/testsuite] Add missing wait in gdb.python/tui-window-disabled.exp
[thirdparty/binutils-gdb.git] / gdb / aarch64-tdep.c
CommitLineData
07b287a0
MS
1/* Common target dependent code for GDB on AArch64 systems.
2
213516ef 3 Copyright (C) 2009-2023 Free Software Foundation, Inc.
07b287a0
MS
4 Contributed by ARM Ltd.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21#include "defs.h"
22
23#include "frame.h"
83b6e1f1 24#include "language.h"
07b287a0
MS
25#include "gdbcmd.h"
26#include "gdbcore.h"
4de283e4 27#include "dis-asm.h"
d55e5aa6
TT
28#include "regcache.h"
29#include "reggroups.h"
4de283e4
TT
30#include "value.h"
31#include "arch-utils.h"
32#include "osabi.h"
33#include "frame-unwind.h"
34#include "frame-base.h"
d55e5aa6 35#include "trad-frame.h"
4de283e4
TT
36#include "objfiles.h"
37#include "dwarf2.h"
82ca8957 38#include "dwarf2/frame.h"
4de283e4
TT
39#include "gdbtypes.h"
40#include "prologue-value.h"
41#include "target-descriptions.h"
07b287a0 42#include "user-regs.h"
4de283e4 43#include "ax-gdb.h"
268a13a5 44#include "gdbsupport/selftest.h"
4de283e4
TT
45
46#include "aarch64-tdep.h"
47#include "aarch64-ravenscar-thread.h"
48
4de283e4
TT
49#include "record.h"
50#include "record-full.h"
51#include "arch/aarch64-insn.h"
0d12e84c 52#include "gdbarch.h"
4de283e4
TT
53
54#include "opcode/aarch64.h"
55#include <algorithm>
0ee6b1c5 56#include <unordered_map>
f77ee802 57
ef139898
LM
58/* For inferior_ptid and current_inferior (). */
59#include "inferior.h"
60
ea92689a
AH
61/* A Homogeneous Floating-Point or Short-Vector Aggregate may have at most
62 four members. */
63#define HA_MAX_NUM_FLDS 4
64
95228a0d 65/* All possible aarch64 target descriptors. */
0ee6b1c5 66static std::unordered_map <aarch64_features, target_desc *> tdesc_aarch64_map;
95228a0d 67
ea2f6fad
TV
68/* The standard register names, and all the valid aliases for them.
69 We're not adding fp here, that name is already taken, see
70 _initialize_frame_reg. */
07b287a0
MS
71static const struct
72{
73 const char *const name;
74 int regnum;
75} aarch64_register_aliases[] =
76{
ea2f6fad 77 /* Link register alias for x30. */
07b287a0 78 {"lr", AARCH64_LR_REGNUM},
ea2f6fad
TV
79 /* SP is the canonical name for x31 according to aarch64_r_register_names,
80 so we're adding an x31 alias for sp. */
81 {"x31", AARCH64_SP_REGNUM},
07b287a0
MS
82 /* specials */
83 {"ip0", AARCH64_X0_REGNUM + 16},
84 {"ip1", AARCH64_X0_REGNUM + 17}
85};
86
87/* The required core 'R' registers. */
88static const char *const aarch64_r_register_names[] =
89{
90 /* These registers must appear in consecutive RAW register number
91 order and they must begin with AARCH64_X0_REGNUM! */
92 "x0", "x1", "x2", "x3",
93 "x4", "x5", "x6", "x7",
94 "x8", "x9", "x10", "x11",
95 "x12", "x13", "x14", "x15",
96 "x16", "x17", "x18", "x19",
97 "x20", "x21", "x22", "x23",
98 "x24", "x25", "x26", "x27",
99 "x28", "x29", "x30", "sp",
100 "pc", "cpsr"
101};
102
103/* The FP/SIMD 'V' registers. */
104static const char *const aarch64_v_register_names[] =
105{
106 /* These registers must appear in consecutive RAW register number
107 order and they must begin with AARCH64_V0_REGNUM! */
108 "v0", "v1", "v2", "v3",
109 "v4", "v5", "v6", "v7",
110 "v8", "v9", "v10", "v11",
111 "v12", "v13", "v14", "v15",
112 "v16", "v17", "v18", "v19",
113 "v20", "v21", "v22", "v23",
114 "v24", "v25", "v26", "v27",
115 "v28", "v29", "v30", "v31",
116 "fpsr",
117 "fpcr"
118};
119
739e8682
AH
120/* The SVE 'Z' and 'P' registers. */
121static const char *const aarch64_sve_register_names[] =
122{
123 /* These registers must appear in consecutive RAW register number
124 order and they must begin with AARCH64_SVE_Z0_REGNUM! */
125 "z0", "z1", "z2", "z3",
126 "z4", "z5", "z6", "z7",
127 "z8", "z9", "z10", "z11",
128 "z12", "z13", "z14", "z15",
129 "z16", "z17", "z18", "z19",
130 "z20", "z21", "z22", "z23",
131 "z24", "z25", "z26", "z27",
132 "z28", "z29", "z30", "z31",
133 "fpsr", "fpcr",
134 "p0", "p1", "p2", "p3",
135 "p4", "p5", "p6", "p7",
136 "p8", "p9", "p10", "p11",
137 "p12", "p13", "p14", "p15",
138 "ffr", "vg"
139};
140
76bed0fd
AH
141static const char *const aarch64_pauth_register_names[] =
142{
6d002087 143 /* Authentication mask for data pointer, low half/user pointers. */
76bed0fd 144 "pauth_dmask",
6d002087
LM
145 /* Authentication mask for code pointer, low half/user pointers. */
146 "pauth_cmask",
147 /* Authentication mask for data pointer, high half / kernel pointers. */
148 "pauth_dmask_high",
149 /* Authentication mask for code pointer, high half / kernel pointers. */
150 "pauth_cmask_high"
76bed0fd
AH
151};
152
5e984dbf
LM
153static const char *const aarch64_mte_register_names[] =
154{
155 /* Tag Control Register. */
156 "tag_ctl"
157};
158
29e09a42
TV
159static int aarch64_stack_frame_destroyed_p (struct gdbarch *, CORE_ADDR);
160
07b287a0
MS
161/* AArch64 prologue cache structure. */
162struct aarch64_prologue_cache
163{
db634143
PL
164 /* The program counter at the start of the function. It is used to
165 identify this frame as a prologue frame. */
166 CORE_ADDR func;
167
168 /* The program counter at the time this frame was created; i.e. where
169 this function was called from. It is used to identify this frame as a
170 stub frame. */
171 CORE_ADDR prev_pc;
172
07b287a0
MS
173 /* The stack pointer at the time this frame was created; i.e. the
174 caller's stack pointer when this function was called. It is used
175 to identify this frame. */
176 CORE_ADDR prev_sp;
177
7dfa3edc
PL
178 /* Is the target available to read from? */
179 int available_p;
180
07b287a0
MS
181 /* The frame base for this frame is just prev_sp - frame size.
182 FRAMESIZE is the distance from the frame pointer to the
183 initial stack pointer. */
184 int framesize;
185
186 /* The register used to hold the frame pointer for this frame. */
187 int framereg;
188
189 /* Saved register offsets. */
098caef4 190 trad_frame_saved_reg *saved_regs;
07b287a0
MS
191};
192
07b287a0
MS
193static void
194show_aarch64_debug (struct ui_file *file, int from_tty,
dda83cd7 195 struct cmd_list_element *c, const char *value)
07b287a0 196{
6cb06a8c 197 gdb_printf (file, _("AArch64 debugging is %s.\n"), value);
07b287a0
MS
198}
199
ffdbe864
YQ
200namespace {
201
4d9a9006
YQ
202/* Abstract instruction reader. */
203
204class abstract_instruction_reader
205{
206public:
207 /* Read in one instruction. */
208 virtual ULONGEST read (CORE_ADDR memaddr, int len,
209 enum bfd_endian byte_order) = 0;
210};
211
212/* Instruction reader from real target. */
213
214class instruction_reader : public abstract_instruction_reader
215{
216 public:
217 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
632e107b 218 override
4d9a9006 219 {
fc2f703e 220 return read_code_unsigned_integer (memaddr, len, byte_order);
4d9a9006
YQ
221 }
222};
223
ffdbe864
YQ
224} // namespace
225
3d31bc39
AH
226/* If address signing is enabled, mask off the signature bits from the link
227 register, which is passed by value in ADDR, using the register values in
228 THIS_FRAME. */
11e1b75f
AH
229
230static CORE_ADDR
345bd07c 231aarch64_frame_unmask_lr (aarch64_gdbarch_tdep *tdep,
bd2b40ac 232 frame_info_ptr this_frame, CORE_ADDR addr)
11e1b75f
AH
233{
234 if (tdep->has_pauth ()
235 && frame_unwind_register_unsigned (this_frame,
c9cd8ca4 236 tdep->ra_sign_state_regnum))
11e1b75f 237 {
6d002087
LM
238 /* VA range select (bit 55) tells us whether to use the low half masks
239 or the high half masks. */
240 int cmask_num;
241 if (tdep->pauth_reg_count > 2 && addr & VA_RANGE_SELECT_BIT_MASK)
242 cmask_num = AARCH64_PAUTH_CMASK_HIGH_REGNUM (tdep->pauth_reg_base);
243 else
244 cmask_num = AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base);
245
246 /* By default, we assume TBI and discard the top 8 bits plus the VA range
247 select bit (55). */
248 CORE_ADDR mask = AARCH64_TOP_BITS_MASK;
249 mask |= frame_unwind_register_unsigned (this_frame, cmask_num);
250 addr = aarch64_remove_top_bits (addr, mask);
3d31bc39
AH
251
252 /* Record in the frame that the link register required unmasking. */
253 set_frame_previous_pc_masked (this_frame);
11e1b75f
AH
254 }
255
256 return addr;
257}
258
aa7ca1bb
AH
259/* Implement the "get_pc_address_flags" gdbarch method. */
260
261static std::string
bd2b40ac 262aarch64_get_pc_address_flags (frame_info_ptr frame, CORE_ADDR pc)
aa7ca1bb
AH
263{
264 if (pc != 0 && get_frame_pc_masked (frame))
265 return "PAC";
266
267 return "";
268}
269
07b287a0
MS
270/* Analyze a prologue, looking for a recognizable stack frame
271 and frame pointer. Scan until we encounter a store that could
272 clobber the stack frame unexpectedly, or an unknown instruction. */
273
274static CORE_ADDR
275aarch64_analyze_prologue (struct gdbarch *gdbarch,
276 CORE_ADDR start, CORE_ADDR limit,
4d9a9006
YQ
277 struct aarch64_prologue_cache *cache,
278 abstract_instruction_reader& reader)
07b287a0
MS
279{
280 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
281 int i;
f8e3fe0d
LM
282
283 /* Whether the stack has been set. This should be true when we notice a SP
284 to FP move or if we are using the SP as the base register for storing
33b5899f 285 data, in case the FP is omitted. */
f8e3fe0d
LM
286 bool seen_stack_set = false;
287
187f5d00
YQ
288 /* Track X registers and D registers in prologue. */
289 pv_t regs[AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT];
07b287a0 290
187f5d00 291 for (i = 0; i < AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT; i++)
07b287a0 292 regs[i] = pv_register (i, 0);
f7b7ed97 293 pv_area stack (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
07b287a0
MS
294
295 for (; start < limit; start += 4)
296 {
297 uint32_t insn;
d9ebcbce 298 aarch64_inst inst;
07b287a0 299
4d9a9006 300 insn = reader.read (start, 4, byte_order_for_code);
07b287a0 301
561a72d4 302 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
d9ebcbce
YQ
303 break;
304
305 if (inst.opcode->iclass == addsub_imm
306 && (inst.opcode->op == OP_ADD
307 || strcmp ("sub", inst.opcode->name) == 0))
07b287a0 308 {
d9ebcbce
YQ
309 unsigned rd = inst.operands[0].reg.regno;
310 unsigned rn = inst.operands[1].reg.regno;
311
312 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
313 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
314 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
315 gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM);
316
317 if (inst.opcode->op == OP_ADD)
318 {
319 regs[rd] = pv_add_constant (regs[rn],
320 inst.operands[2].imm.value);
321 }
322 else
323 {
324 regs[rd] = pv_add_constant (regs[rn],
325 -inst.operands[2].imm.value);
326 }
f8e3fe0d
LM
327
328 /* Did we move SP to FP? */
329 if (rn == AARCH64_SP_REGNUM && rd == AARCH64_FP_REGNUM)
330 seen_stack_set = true;
d9ebcbce 331 }
60adf22c
TV
332 else if (inst.opcode->iclass == addsub_ext
333 && strcmp ("sub", inst.opcode->name) == 0)
334 {
335 unsigned rd = inst.operands[0].reg.regno;
336 unsigned rn = inst.operands[1].reg.regno;
337 unsigned rm = inst.operands[2].reg.regno;
338
339 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
340 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
341 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
342 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_EXT);
343
344 regs[rd] = pv_subtract (regs[rn], regs[rm]);
345 }
d9ebcbce 346 else if (inst.opcode->iclass == branch_imm)
07b287a0
MS
347 {
348 /* Stop analysis on branch. */
349 break;
350 }
d9ebcbce 351 else if (inst.opcode->iclass == condbranch)
07b287a0
MS
352 {
353 /* Stop analysis on branch. */
354 break;
355 }
d9ebcbce 356 else if (inst.opcode->iclass == branch_reg)
07b287a0
MS
357 {
358 /* Stop analysis on branch. */
359 break;
360 }
d9ebcbce 361 else if (inst.opcode->iclass == compbranch)
07b287a0
MS
362 {
363 /* Stop analysis on branch. */
364 break;
365 }
d9ebcbce
YQ
366 else if (inst.opcode->op == OP_MOVZ)
367 {
60adf22c
TV
368 unsigned rd = inst.operands[0].reg.regno;
369
370 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
d9ebcbce 371 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
60adf22c
TV
372 gdb_assert (inst.operands[1].type == AARCH64_OPND_HALF);
373 gdb_assert (inst.operands[1].shifter.kind == AARCH64_MOD_LSL);
f8e3fe0d
LM
374
375 /* If this shows up before we set the stack, keep going. Otherwise
376 stop the analysis. */
377 if (seen_stack_set)
378 break;
379
60adf22c
TV
380 regs[rd] = pv_constant (inst.operands[1].imm.value
381 << inst.operands[1].shifter.amount);
d9ebcbce
YQ
382 }
383 else if (inst.opcode->iclass == log_shift
384 && strcmp (inst.opcode->name, "orr") == 0)
07b287a0 385 {
d9ebcbce
YQ
386 unsigned rd = inst.operands[0].reg.regno;
387 unsigned rn = inst.operands[1].reg.regno;
388 unsigned rm = inst.operands[2].reg.regno;
389
390 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
391 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn);
392 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT);
393
394 if (inst.operands[2].shifter.amount == 0
395 && rn == AARCH64_SP_REGNUM)
07b287a0
MS
396 regs[rd] = regs[rm];
397 else
398 {
c6185dce
SM
399 aarch64_debug_printf ("prologue analysis gave up "
400 "addr=%s opcode=0x%x (orr x register)",
401 core_addr_to_string_nz (start), insn);
402
07b287a0
MS
403 break;
404 }
405 }
d9ebcbce 406 else if (inst.opcode->op == OP_STUR)
07b287a0 407 {
d9ebcbce
YQ
408 unsigned rt = inst.operands[0].reg.regno;
409 unsigned rn = inst.operands[1].addr.base_regno;
75faf5c4 410 int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
d9ebcbce
YQ
411
412 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
413 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
414 gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9);
415 gdb_assert (!inst.operands[1].addr.offset.is_reg);
416
75faf5c4
AH
417 stack.store
418 (pv_add_constant (regs[rn], inst.operands[1].addr.offset.imm),
419 size, regs[rt]);
f8e3fe0d
LM
420
421 /* Are we storing with SP as a base? */
422 if (rn == AARCH64_SP_REGNUM)
423 seen_stack_set = true;
07b287a0 424 }
d9ebcbce 425 else if ((inst.opcode->iclass == ldstpair_off
03bcd739
YQ
426 || (inst.opcode->iclass == ldstpair_indexed
427 && inst.operands[2].addr.preind))
d9ebcbce 428 && strcmp ("stp", inst.opcode->name) == 0)
07b287a0 429 {
03bcd739 430 /* STP with addressing mode Pre-indexed and Base register. */
187f5d00
YQ
431 unsigned rt1;
432 unsigned rt2;
d9ebcbce
YQ
433 unsigned rn = inst.operands[2].addr.base_regno;
434 int32_t imm = inst.operands[2].addr.offset.imm;
75faf5c4 435 int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
d9ebcbce 436
187f5d00
YQ
437 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
438 || inst.operands[0].type == AARCH64_OPND_Ft);
439 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2
440 || inst.operands[1].type == AARCH64_OPND_Ft2);
d9ebcbce
YQ
441 gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7);
442 gdb_assert (!inst.operands[2].addr.offset.is_reg);
443
07b287a0
MS
444 /* If recording this store would invalidate the store area
445 (perhaps because rn is not known) then we should abandon
446 further prologue analysis. */
f7b7ed97 447 if (stack.store_would_trash (pv_add_constant (regs[rn], imm)))
07b287a0
MS
448 break;
449
f7b7ed97 450 if (stack.store_would_trash (pv_add_constant (regs[rn], imm + 8)))
07b287a0
MS
451 break;
452
187f5d00
YQ
453 rt1 = inst.operands[0].reg.regno;
454 rt2 = inst.operands[1].reg.regno;
455 if (inst.operands[0].type == AARCH64_OPND_Ft)
456 {
187f5d00
YQ
457 rt1 += AARCH64_X_REGISTER_COUNT;
458 rt2 += AARCH64_X_REGISTER_COUNT;
459 }
460
75faf5c4
AH
461 stack.store (pv_add_constant (regs[rn], imm), size, regs[rt1]);
462 stack.store (pv_add_constant (regs[rn], imm + size), size, regs[rt2]);
14ac654f 463
d9ebcbce 464 if (inst.operands[2].addr.writeback)
93d96012 465 regs[rn] = pv_add_constant (regs[rn], imm);
07b287a0 466
f8e3fe0d
LM
467 /* Ignore the instruction that allocates stack space and sets
468 the SP. */
469 if (rn == AARCH64_SP_REGNUM && !inst.operands[2].addr.writeback)
470 seen_stack_set = true;
07b287a0 471 }
432ec081
YQ
472 else if ((inst.opcode->iclass == ldst_imm9 /* Signed immediate. */
473 || (inst.opcode->iclass == ldst_pos /* Unsigned immediate. */
474 && (inst.opcode->op == OP_STR_POS
475 || inst.opcode->op == OP_STRF_POS)))
476 && inst.operands[1].addr.base_regno == AARCH64_SP_REGNUM
477 && strcmp ("str", inst.opcode->name) == 0)
478 {
479 /* STR (immediate) */
480 unsigned int rt = inst.operands[0].reg.regno;
481 int32_t imm = inst.operands[1].addr.offset.imm;
482 unsigned int rn = inst.operands[1].addr.base_regno;
75faf5c4 483 int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
432ec081
YQ
484 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
485 || inst.operands[0].type == AARCH64_OPND_Ft);
486
487 if (inst.operands[0].type == AARCH64_OPND_Ft)
75faf5c4 488 rt += AARCH64_X_REGISTER_COUNT;
432ec081 489
75faf5c4 490 stack.store (pv_add_constant (regs[rn], imm), size, regs[rt]);
432ec081
YQ
491 if (inst.operands[1].addr.writeback)
492 regs[rn] = pv_add_constant (regs[rn], imm);
f8e3fe0d
LM
493
494 /* Are we storing with SP as a base? */
495 if (rn == AARCH64_SP_REGNUM)
496 seen_stack_set = true;
432ec081 497 }
d9ebcbce 498 else if (inst.opcode->iclass == testbranch)
07b287a0
MS
499 {
500 /* Stop analysis on branch. */
501 break;
502 }
17e116a7
AH
503 else if (inst.opcode->iclass == ic_system)
504 {
345bd07c 505 aarch64_gdbarch_tdep *tdep
08106042 506 = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
17e116a7
AH
507 int ra_state_val = 0;
508
509 if (insn == 0xd503233f /* paciasp. */
510 || insn == 0xd503237f /* pacibsp. */)
511 {
512 /* Return addresses are mangled. */
513 ra_state_val = 1;
514 }
515 else if (insn == 0xd50323bf /* autiasp. */
516 || insn == 0xd50323ff /* autibsp. */)
517 {
518 /* Return addresses are not mangled. */
519 ra_state_val = 0;
520 }
37989733
LM
521 else if (IS_BTI (insn))
522 /* We don't need to do anything special for a BTI instruction. */
523 continue;
17e116a7
AH
524 else
525 {
c6185dce
SM
526 aarch64_debug_printf ("prologue analysis gave up addr=%s"
527 " opcode=0x%x (iclass)",
528 core_addr_to_string_nz (start), insn);
17e116a7
AH
529 break;
530 }
531
532 if (tdep->has_pauth () && cache != nullptr)
a9a87d35 533 {
c9cd8ca4 534 int regnum = tdep->ra_sign_state_regnum;
a9a87d35
LM
535 cache->saved_regs[regnum].set_value (ra_state_val);
536 }
17e116a7 537 }
07b287a0
MS
538 else
539 {
c6185dce
SM
540 aarch64_debug_printf ("prologue analysis gave up addr=%s"
541 " opcode=0x%x",
542 core_addr_to_string_nz (start), insn);
543
07b287a0
MS
544 break;
545 }
546 }
547
548 if (cache == NULL)
f7b7ed97 549 return start;
07b287a0
MS
550
551 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
552 {
553 /* Frame pointer is fp. Frame size is constant. */
554 cache->framereg = AARCH64_FP_REGNUM;
555 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
556 }
557 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
558 {
559 /* Try the stack pointer. */
560 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
561 cache->framereg = AARCH64_SP_REGNUM;
562 }
563 else
564 {
565 /* We're just out of luck. We don't know where the frame is. */
566 cache->framereg = -1;
567 cache->framesize = 0;
568 }
569
570 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
571 {
572 CORE_ADDR offset;
573
f7b7ed97 574 if (stack.find_reg (gdbarch, i, &offset))
098caef4 575 cache->saved_regs[i].set_addr (offset);
07b287a0
MS
576 }
577
187f5d00
YQ
578 for (i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
579 {
580 int regnum = gdbarch_num_regs (gdbarch);
581 CORE_ADDR offset;
582
f7b7ed97
TT
583 if (stack.find_reg (gdbarch, i + AARCH64_X_REGISTER_COUNT,
584 &offset))
098caef4 585 cache->saved_regs[i + regnum + AARCH64_D0_REGNUM].set_addr (offset);
187f5d00
YQ
586 }
587
07b287a0
MS
588 return start;
589}
590
4d9a9006
YQ
591static CORE_ADDR
592aarch64_analyze_prologue (struct gdbarch *gdbarch,
593 CORE_ADDR start, CORE_ADDR limit,
594 struct aarch64_prologue_cache *cache)
595{
596 instruction_reader reader;
597
598 return aarch64_analyze_prologue (gdbarch, start, limit, cache,
599 reader);
600}
601
602#if GDB_SELF_TEST
603
604namespace selftests {
605
606/* Instruction reader from manually cooked instruction sequences. */
607
608class instruction_reader_test : public abstract_instruction_reader
609{
610public:
611 template<size_t SIZE>
612 explicit instruction_reader_test (const uint32_t (&insns)[SIZE])
613 : m_insns (insns), m_insns_size (SIZE)
614 {}
615
616 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
632e107b 617 override
4d9a9006
YQ
618 {
619 SELF_CHECK (len == 4);
620 SELF_CHECK (memaddr % 4 == 0);
621 SELF_CHECK (memaddr / 4 < m_insns_size);
622
623 return m_insns[memaddr / 4];
624 }
625
626private:
627 const uint32_t *m_insns;
628 size_t m_insns_size;
629};
630
631static void
632aarch64_analyze_prologue_test (void)
633{
634 struct gdbarch_info info;
635
4d9a9006
YQ
636 info.bfd_arch_info = bfd_scan_arch ("aarch64");
637
638 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
639 SELF_CHECK (gdbarch != NULL);
640
17e116a7
AH
641 struct aarch64_prologue_cache cache;
642 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
643
08106042 644 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
17e116a7 645
4d9a9006
YQ
646 /* Test the simple prologue in which frame pointer is used. */
647 {
4d9a9006
YQ
648 static const uint32_t insns[] = {
649 0xa9af7bfd, /* stp x29, x30, [sp,#-272]! */
650 0x910003fd, /* mov x29, sp */
651 0x97ffffe6, /* bl 0x400580 */
652 };
653 instruction_reader_test reader (insns);
654
655 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
656 SELF_CHECK (end == 4 * 2);
657
658 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
659 SELF_CHECK (cache.framesize == 272);
660
661 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
662 {
663 if (i == AARCH64_FP_REGNUM)
098caef4 664 SELF_CHECK (cache.saved_regs[i].addr () == -272);
4d9a9006 665 else if (i == AARCH64_LR_REGNUM)
098caef4 666 SELF_CHECK (cache.saved_regs[i].addr () == -264);
4d9a9006 667 else
a9a87d35
LM
668 SELF_CHECK (cache.saved_regs[i].is_realreg ()
669 && cache.saved_regs[i].realreg () == i);
4d9a9006
YQ
670 }
671
672 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
673 {
a9a87d35
LM
674 int num_regs = gdbarch_num_regs (gdbarch);
675 int regnum = i + num_regs + AARCH64_D0_REGNUM;
4d9a9006 676
a9a87d35
LM
677 SELF_CHECK (cache.saved_regs[regnum].is_realreg ()
678 && cache.saved_regs[regnum].realreg () == regnum);
4d9a9006
YQ
679 }
680 }
432ec081
YQ
681
682 /* Test a prologue in which STR is used and frame pointer is not
683 used. */
684 {
432ec081
YQ
685 static const uint32_t insns[] = {
686 0xf81d0ff3, /* str x19, [sp, #-48]! */
687 0xb9002fe0, /* str w0, [sp, #44] */
688 0xf90013e1, /* str x1, [sp, #32]*/
689 0xfd000fe0, /* str d0, [sp, #24] */
690 0xaa0203f3, /* mov x19, x2 */
691 0xf94013e0, /* ldr x0, [sp, #32] */
692 };
693 instruction_reader_test reader (insns);
694
68811f8f 695 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
432ec081
YQ
696 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
697
698 SELF_CHECK (end == 4 * 5);
699
700 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
701 SELF_CHECK (cache.framesize == 48);
702
703 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
704 {
705 if (i == 1)
098caef4 706 SELF_CHECK (cache.saved_regs[i].addr () == -16);
432ec081 707 else if (i == 19)
098caef4 708 SELF_CHECK (cache.saved_regs[i].addr () == -48);
432ec081 709 else
a9a87d35
LM
710 SELF_CHECK (cache.saved_regs[i].is_realreg ()
711 && cache.saved_regs[i].realreg () == i);
432ec081
YQ
712 }
713
714 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
715 {
a9a87d35
LM
716 int num_regs = gdbarch_num_regs (gdbarch);
717 int regnum = i + num_regs + AARCH64_D0_REGNUM;
718
432ec081
YQ
719
720 if (i == 0)
a9a87d35 721 SELF_CHECK (cache.saved_regs[regnum].addr () == -24);
432ec081 722 else
a9a87d35
LM
723 SELF_CHECK (cache.saved_regs[regnum].is_realreg ()
724 && cache.saved_regs[regnum].realreg () == regnum);
432ec081
YQ
725 }
726 }
17e116a7 727
f8e3fe0d
LM
728 /* Test handling of movz before setting the frame pointer. */
729 {
730 static const uint32_t insns[] = {
731 0xa9bf7bfd, /* stp x29, x30, [sp, #-16]! */
732 0x52800020, /* mov w0, #0x1 */
733 0x910003fd, /* mov x29, sp */
734 0x528000a2, /* mov w2, #0x5 */
735 0x97fffff8, /* bl 6e4 */
736 };
737
738 instruction_reader_test reader (insns);
739
740 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
741 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
742
743 /* We should stop at the 4th instruction. */
744 SELF_CHECK (end == (4 - 1) * 4);
745 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
746 SELF_CHECK (cache.framesize == 16);
747 }
748
749 /* Test handling of movz/stp when using the stack pointer as frame
750 pointer. */
751 {
752 static const uint32_t insns[] = {
753 0xa9bc7bfd, /* stp x29, x30, [sp, #-64]! */
754 0x52800020, /* mov w0, #0x1 */
755 0x290207e0, /* stp w0, w1, [sp, #16] */
756 0xa9018fe2, /* stp x2, x3, [sp, #24] */
757 0x528000a2, /* mov w2, #0x5 */
758 0x97fffff8, /* bl 6e4 */
759 };
760
761 instruction_reader_test reader (insns);
762
763 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
764 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
765
766 /* We should stop at the 5th instruction. */
767 SELF_CHECK (end == (5 - 1) * 4);
768 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
769 SELF_CHECK (cache.framesize == 64);
770 }
771
772 /* Test handling of movz/str when using the stack pointer as frame
773 pointer */
774 {
775 static const uint32_t insns[] = {
776 0xa9bc7bfd, /* stp x29, x30, [sp, #-64]! */
777 0x52800020, /* mov w0, #0x1 */
778 0xb9002be4, /* str w4, [sp, #40] */
779 0xf9001be5, /* str x5, [sp, #48] */
780 0x528000a2, /* mov w2, #0x5 */
781 0x97fffff8, /* bl 6e4 */
782 };
783
784 instruction_reader_test reader (insns);
785
786 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
787 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
788
789 /* We should stop at the 5th instruction. */
790 SELF_CHECK (end == (5 - 1) * 4);
791 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
792 SELF_CHECK (cache.framesize == 64);
793 }
794
795 /* Test handling of movz/stur when using the stack pointer as frame
796 pointer. */
797 {
798 static const uint32_t insns[] = {
799 0xa9bc7bfd, /* stp x29, x30, [sp, #-64]! */
800 0x52800020, /* mov w0, #0x1 */
801 0xb80343e6, /* stur w6, [sp, #52] */
802 0xf80383e7, /* stur x7, [sp, #56] */
803 0x528000a2, /* mov w2, #0x5 */
804 0x97fffff8, /* bl 6e4 */
805 };
806
807 instruction_reader_test reader (insns);
808
809 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
810 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
811
812 /* We should stop at the 5th instruction. */
813 SELF_CHECK (end == (5 - 1) * 4);
814 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
815 SELF_CHECK (cache.framesize == 64);
816 }
817
818 /* Test handling of movz when there is no frame pointer set or no stack
819 pointer used. */
820 {
821 static const uint32_t insns[] = {
822 0xa9bf7bfd, /* stp x29, x30, [sp, #-16]! */
823 0x52800020, /* mov w0, #0x1 */
824 0x528000a2, /* mov w2, #0x5 */
825 0x97fffff8, /* bl 6e4 */
826 };
827
828 instruction_reader_test reader (insns);
829
830 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
831 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
832
833 /* We should stop at the 4th instruction. */
834 SELF_CHECK (end == (4 - 1) * 4);
835 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
836 SELF_CHECK (cache.framesize == 16);
837 }
838
17e116a7
AH
839 /* Test a prologue in which there is a return address signing instruction. */
840 if (tdep->has_pauth ())
841 {
842 static const uint32_t insns[] = {
843 0xd503233f, /* paciasp */
844 0xa9bd7bfd, /* stp x29, x30, [sp, #-48]! */
845 0x910003fd, /* mov x29, sp */
846 0xf801c3f3, /* str x19, [sp, #28] */
847 0xb9401fa0, /* ldr x19, [x29, #28] */
848 };
849 instruction_reader_test reader (insns);
850
68811f8f 851 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
17e116a7
AH
852 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache,
853 reader);
854
855 SELF_CHECK (end == 4 * 4);
856 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
857 SELF_CHECK (cache.framesize == 48);
858
859 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
860 {
861 if (i == 19)
098caef4 862 SELF_CHECK (cache.saved_regs[i].addr () == -20);
17e116a7 863 else if (i == AARCH64_FP_REGNUM)
098caef4 864 SELF_CHECK (cache.saved_regs[i].addr () == -48);
17e116a7 865 else if (i == AARCH64_LR_REGNUM)
098caef4 866 SELF_CHECK (cache.saved_regs[i].addr () == -40);
17e116a7 867 else
a9a87d35
LM
868 SELF_CHECK (cache.saved_regs[i].is_realreg ()
869 && cache.saved_regs[i].realreg () == i);
17e116a7
AH
870 }
871
872 if (tdep->has_pauth ())
873 {
c9cd8ca4 874 int regnum = tdep->ra_sign_state_regnum;
a9a87d35 875 SELF_CHECK (cache.saved_regs[regnum].is_value ());
17e116a7
AH
876 }
877 }
37989733
LM
878
879 /* Test a prologue with a BTI instruction. */
880 {
881 static const uint32_t insns[] = {
882 0xd503245f, /* bti */
883 0xa9bd7bfd, /* stp x29, x30, [sp, #-48]! */
884 0x910003fd, /* mov x29, sp */
885 0xf801c3f3, /* str x19, [sp, #28] */
886 0xb9401fa0, /* ldr x19, [x29, #28] */
887 };
888 instruction_reader_test reader (insns);
889
890 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
891 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache,
892 reader);
893
894 SELF_CHECK (end == 4 * 4);
895 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
896 SELF_CHECK (cache.framesize == 48);
897
898 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
899 {
900 if (i == 19)
901 SELF_CHECK (cache.saved_regs[i].addr () == -20);
902 else if (i == AARCH64_FP_REGNUM)
903 SELF_CHECK (cache.saved_regs[i].addr () == -48);
904 else if (i == AARCH64_LR_REGNUM)
905 SELF_CHECK (cache.saved_regs[i].addr () == -40);
906 else
907 SELF_CHECK (cache.saved_regs[i].is_realreg ()
908 && cache.saved_regs[i].realreg () == i);
909 }
910 }
4d9a9006
YQ
911}
912} // namespace selftests
913#endif /* GDB_SELF_TEST */
914
07b287a0
MS
915/* Implement the "skip_prologue" gdbarch method. */
916
917static CORE_ADDR
918aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
919{
07b287a0 920 CORE_ADDR func_addr, limit_pc;
07b287a0
MS
921
922 /* See if we can determine the end of the prologue via the symbol
923 table. If so, then return either PC, or the PC after the
924 prologue, whichever is greater. */
925 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
926 {
927 CORE_ADDR post_prologue_pc
928 = skip_prologue_using_sal (gdbarch, func_addr);
929
930 if (post_prologue_pc != 0)
325fac50 931 return std::max (pc, post_prologue_pc);
07b287a0
MS
932 }
933
934 /* Can't determine prologue from the symbol table, need to examine
935 instructions. */
936
937 /* Find an upper limit on the function prologue using the debug
938 information. If the debug information could not be used to
939 provide that bound, then use an arbitrary large number as the
940 upper bound. */
941 limit_pc = skip_prologue_using_sal (gdbarch, pc);
942 if (limit_pc == 0)
943 limit_pc = pc + 128; /* Magic. */
944
945 /* Try disassembling prologue. */
946 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
947}
948
949/* Scan the function prologue for THIS_FRAME and populate the prologue
950 cache CACHE. */
951
952static void
bd2b40ac 953aarch64_scan_prologue (frame_info_ptr this_frame,
07b287a0
MS
954 struct aarch64_prologue_cache *cache)
955{
956 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
957 CORE_ADDR prologue_start;
958 CORE_ADDR prologue_end;
959 CORE_ADDR prev_pc = get_frame_pc (this_frame);
960 struct gdbarch *gdbarch = get_frame_arch (this_frame);
961
db634143
PL
962 cache->prev_pc = prev_pc;
963
07b287a0
MS
964 /* Assume we do not find a frame. */
965 cache->framereg = -1;
966 cache->framesize = 0;
967
968 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
969 &prologue_end))
970 {
971 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
972
973 if (sal.line == 0)
974 {
975 /* No line info so use the current PC. */
976 prologue_end = prev_pc;
977 }
978 else if (sal.end < prologue_end)
979 {
980 /* The next line begins after the function end. */
981 prologue_end = sal.end;
982 }
983
325fac50 984 prologue_end = std::min (prologue_end, prev_pc);
07b287a0
MS
985 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
986 }
987 else
988 {
989 CORE_ADDR frame_loc;
07b287a0
MS
990
991 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
992 if (frame_loc == 0)
993 return;
994
995 cache->framereg = AARCH64_FP_REGNUM;
996 cache->framesize = 16;
098caef4
LM
997 cache->saved_regs[29].set_addr (0);
998 cache->saved_regs[30].set_addr (8);
07b287a0
MS
999 }
1000}
1001
7dfa3edc
PL
1002/* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
1003 function may throw an exception if the inferior's registers or memory is
1004 not available. */
07b287a0 1005
7dfa3edc 1006static void
bd2b40ac 1007aarch64_make_prologue_cache_1 (frame_info_ptr this_frame,
7dfa3edc 1008 struct aarch64_prologue_cache *cache)
07b287a0 1009{
07b287a0
MS
1010 CORE_ADDR unwound_fp;
1011 int reg;
1012
07b287a0
MS
1013 aarch64_scan_prologue (this_frame, cache);
1014
1015 if (cache->framereg == -1)
7dfa3edc 1016 return;
07b287a0
MS
1017
1018 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
1019 if (unwound_fp == 0)
7dfa3edc 1020 return;
07b287a0 1021
29e09a42
TV
1022 cache->prev_sp = unwound_fp;
1023 if (!aarch64_stack_frame_destroyed_p (get_frame_arch (this_frame),
1024 cache->prev_pc))
1025 cache->prev_sp += cache->framesize;
07b287a0
MS
1026
1027 /* Calculate actual addresses of saved registers using offsets
1028 determined by aarch64_analyze_prologue. */
1029 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
a9a87d35 1030 if (cache->saved_regs[reg].is_addr ())
098caef4
LM
1031 cache->saved_regs[reg].set_addr (cache->saved_regs[reg].addr ()
1032 + cache->prev_sp);
07b287a0 1033
db634143
PL
1034 cache->func = get_frame_func (this_frame);
1035
7dfa3edc
PL
1036 cache->available_p = 1;
1037}
1038
1039/* Allocate and fill in *THIS_CACHE with information about the prologue of
1040 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
1041 Return a pointer to the current aarch64_prologue_cache in
1042 *THIS_CACHE. */
1043
1044static struct aarch64_prologue_cache *
bd2b40ac 1045aarch64_make_prologue_cache (frame_info_ptr this_frame, void **this_cache)
7dfa3edc
PL
1046{
1047 struct aarch64_prologue_cache *cache;
1048
1049 if (*this_cache != NULL)
9a3c8263 1050 return (struct aarch64_prologue_cache *) *this_cache;
7dfa3edc
PL
1051
1052 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
1053 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
1054 *this_cache = cache;
1055
a70b8144 1056 try
7dfa3edc
PL
1057 {
1058 aarch64_make_prologue_cache_1 (this_frame, cache);
1059 }
230d2906 1060 catch (const gdb_exception_error &ex)
7dfa3edc
PL
1061 {
1062 if (ex.error != NOT_AVAILABLE_ERROR)
eedc3f4f 1063 throw;
7dfa3edc 1064 }
7dfa3edc 1065
07b287a0
MS
1066 return cache;
1067}
1068
7dfa3edc
PL
1069/* Implement the "stop_reason" frame_unwind method. */
1070
1071static enum unwind_stop_reason
bd2b40ac 1072aarch64_prologue_frame_unwind_stop_reason (frame_info_ptr this_frame,
7dfa3edc
PL
1073 void **this_cache)
1074{
1075 struct aarch64_prologue_cache *cache
1076 = aarch64_make_prologue_cache (this_frame, this_cache);
1077
1078 if (!cache->available_p)
1079 return UNWIND_UNAVAILABLE;
1080
1081 /* Halt the backtrace at "_start". */
345bd07c 1082 gdbarch *arch = get_frame_arch (this_frame);
08106042 1083 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (arch);
345bd07c 1084 if (cache->prev_pc <= tdep->lowest_pc)
7dfa3edc
PL
1085 return UNWIND_OUTERMOST;
1086
1087 /* We've hit a wall, stop. */
1088 if (cache->prev_sp == 0)
1089 return UNWIND_OUTERMOST;
1090
1091 return UNWIND_NO_REASON;
1092}
1093
07b287a0
MS
1094/* Our frame ID for a normal frame is the current function's starting
1095 PC and the caller's SP when we were called. */
1096
1097static void
bd2b40ac 1098aarch64_prologue_this_id (frame_info_ptr this_frame,
07b287a0
MS
1099 void **this_cache, struct frame_id *this_id)
1100{
7c8edfae
PL
1101 struct aarch64_prologue_cache *cache
1102 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0 1103
7dfa3edc
PL
1104 if (!cache->available_p)
1105 *this_id = frame_id_build_unavailable_stack (cache->func);
1106 else
1107 *this_id = frame_id_build (cache->prev_sp, cache->func);
07b287a0
MS
1108}
1109
1110/* Implement the "prev_register" frame_unwind method. */
1111
1112static struct value *
bd2b40ac 1113aarch64_prologue_prev_register (frame_info_ptr this_frame,
07b287a0
MS
1114 void **this_cache, int prev_regnum)
1115{
7c8edfae
PL
1116 struct aarch64_prologue_cache *cache
1117 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0
MS
1118
1119 /* If we are asked to unwind the PC, then we need to return the LR
1120 instead. The prologue may save PC, but it will point into this
1121 frame's prologue, not the next frame's resume location. */
1122 if (prev_regnum == AARCH64_PC_REGNUM)
1123 {
1124 CORE_ADDR lr;
17e116a7 1125 struct gdbarch *gdbarch = get_frame_arch (this_frame);
345bd07c 1126 aarch64_gdbarch_tdep *tdep
08106042 1127 = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
07b287a0
MS
1128
1129 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
17e116a7
AH
1130
1131 if (tdep->has_pauth ()
c9cd8ca4 1132 && cache->saved_regs[tdep->ra_sign_state_regnum].is_value ())
3d31bc39 1133 lr = aarch64_frame_unmask_lr (tdep, this_frame, lr);
17e116a7 1134
07b287a0
MS
1135 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
1136 }
1137
1138 /* SP is generally not saved to the stack, but this frame is
1139 identified by the next frame's stack pointer at the time of the
1140 call. The value was already reconstructed into PREV_SP. */
1141 /*
dda83cd7
SM
1142 +----------+ ^
1143 | saved lr | |
07b287a0
MS
1144 +->| saved fp |--+
1145 | | |
1146 | | | <- Previous SP
1147 | +----------+
1148 | | saved lr |
1149 +--| saved fp |<- FP
dda83cd7
SM
1150 | |
1151 | |<- SP
1152 +----------+ */
07b287a0
MS
1153 if (prev_regnum == AARCH64_SP_REGNUM)
1154 return frame_unwind_got_constant (this_frame, prev_regnum,
1155 cache->prev_sp);
1156
1157 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
1158 prev_regnum);
1159}
1160
1161/* AArch64 prologue unwinder. */
6bd434d6 1162static frame_unwind aarch64_prologue_unwind =
07b287a0 1163{
a154d838 1164 "aarch64 prologue",
07b287a0 1165 NORMAL_FRAME,
7dfa3edc 1166 aarch64_prologue_frame_unwind_stop_reason,
07b287a0
MS
1167 aarch64_prologue_this_id,
1168 aarch64_prologue_prev_register,
1169 NULL,
1170 default_frame_sniffer
1171};
1172
8b61f75d
PL
1173/* Allocate and fill in *THIS_CACHE with information about the prologue of
1174 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
1175 Return a pointer to the current aarch64_prologue_cache in
1176 *THIS_CACHE. */
07b287a0
MS
1177
1178static struct aarch64_prologue_cache *
bd2b40ac 1179aarch64_make_stub_cache (frame_info_ptr this_frame, void **this_cache)
07b287a0 1180{
07b287a0 1181 struct aarch64_prologue_cache *cache;
8b61f75d
PL
1182
1183 if (*this_cache != NULL)
9a3c8263 1184 return (struct aarch64_prologue_cache *) *this_cache;
07b287a0
MS
1185
1186 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
1187 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
8b61f75d 1188 *this_cache = cache;
07b287a0 1189
a70b8144 1190 try
02a2a705
PL
1191 {
1192 cache->prev_sp = get_frame_register_unsigned (this_frame,
1193 AARCH64_SP_REGNUM);
1194 cache->prev_pc = get_frame_pc (this_frame);
1195 cache->available_p = 1;
1196 }
230d2906 1197 catch (const gdb_exception_error &ex)
02a2a705
PL
1198 {
1199 if (ex.error != NOT_AVAILABLE_ERROR)
eedc3f4f 1200 throw;
02a2a705 1201 }
07b287a0
MS
1202
1203 return cache;
1204}
1205
02a2a705
PL
1206/* Implement the "stop_reason" frame_unwind method. */
1207
1208static enum unwind_stop_reason
bd2b40ac 1209aarch64_stub_frame_unwind_stop_reason (frame_info_ptr this_frame,
02a2a705
PL
1210 void **this_cache)
1211{
1212 struct aarch64_prologue_cache *cache
1213 = aarch64_make_stub_cache (this_frame, this_cache);
1214
1215 if (!cache->available_p)
1216 return UNWIND_UNAVAILABLE;
1217
1218 return UNWIND_NO_REASON;
1219}
1220
07b287a0
MS
1221/* Our frame ID for a stub frame is the current SP and LR. */
1222
1223static void
bd2b40ac 1224aarch64_stub_this_id (frame_info_ptr this_frame,
07b287a0
MS
1225 void **this_cache, struct frame_id *this_id)
1226{
8b61f75d
PL
1227 struct aarch64_prologue_cache *cache
1228 = aarch64_make_stub_cache (this_frame, this_cache);
07b287a0 1229
02a2a705
PL
1230 if (cache->available_p)
1231 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
1232 else
1233 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
07b287a0
MS
1234}
1235
1236/* Implement the "sniffer" frame_unwind method. */
1237
1238static int
1239aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
bd2b40ac 1240 frame_info_ptr this_frame,
07b287a0
MS
1241 void **this_prologue_cache)
1242{
1243 CORE_ADDR addr_in_block;
1244 gdb_byte dummy[4];
1245
1246 addr_in_block = get_frame_address_in_block (this_frame);
3e5d3a5a 1247 if (in_plt_section (addr_in_block)
07b287a0
MS
1248 /* We also use the stub winder if the target memory is unreadable
1249 to avoid having the prologue unwinder trying to read it. */
1250 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
1251 return 1;
1252
1253 return 0;
1254}
1255
1256/* AArch64 stub unwinder. */
6bd434d6 1257static frame_unwind aarch64_stub_unwind =
07b287a0 1258{
a154d838 1259 "aarch64 stub",
07b287a0 1260 NORMAL_FRAME,
02a2a705 1261 aarch64_stub_frame_unwind_stop_reason,
07b287a0
MS
1262 aarch64_stub_this_id,
1263 aarch64_prologue_prev_register,
1264 NULL,
1265 aarch64_stub_unwind_sniffer
1266};
1267
1268/* Return the frame base address of *THIS_FRAME. */
1269
1270static CORE_ADDR
bd2b40ac 1271aarch64_normal_frame_base (frame_info_ptr this_frame, void **this_cache)
07b287a0 1272{
7c8edfae
PL
1273 struct aarch64_prologue_cache *cache
1274 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0
MS
1275
1276 return cache->prev_sp - cache->framesize;
1277}
1278
1279/* AArch64 default frame base information. */
6bd434d6 1280static frame_base aarch64_normal_base =
07b287a0
MS
1281{
1282 &aarch64_prologue_unwind,
1283 aarch64_normal_frame_base,
1284 aarch64_normal_frame_base,
1285 aarch64_normal_frame_base
1286};
1287
07b287a0
MS
1288/* Return the value of the REGNUM register in the previous frame of
1289 *THIS_FRAME. */
1290
1291static struct value *
bd2b40ac 1292aarch64_dwarf2_prev_register (frame_info_ptr this_frame,
07b287a0
MS
1293 void **this_cache, int regnum)
1294{
345bd07c 1295 gdbarch *arch = get_frame_arch (this_frame);
08106042 1296 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (arch);
07b287a0
MS
1297 CORE_ADDR lr;
1298
1299 switch (regnum)
1300 {
1301 case AARCH64_PC_REGNUM:
1302 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
3d31bc39 1303 lr = aarch64_frame_unmask_lr (tdep, this_frame, lr);
07b287a0
MS
1304 return frame_unwind_got_constant (this_frame, regnum, lr);
1305
1306 default:
f34652de 1307 internal_error (_("Unexpected register %d"), regnum);
07b287a0
MS
1308 }
1309}
1310
11e1b75f
AH
1311static const unsigned char op_lit0 = DW_OP_lit0;
1312static const unsigned char op_lit1 = DW_OP_lit1;
1313
07b287a0
MS
1314/* Implement the "init_reg" dwarf2_frame_ops method. */
1315
1316static void
1317aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1318 struct dwarf2_frame_state_reg *reg,
bd2b40ac 1319 frame_info_ptr this_frame)
07b287a0 1320{
08106042 1321 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
11e1b75f 1322
07b287a0
MS
1323 switch (regnum)
1324 {
1325 case AARCH64_PC_REGNUM:
1326 reg->how = DWARF2_FRAME_REG_FN;
1327 reg->loc.fn = aarch64_dwarf2_prev_register;
11e1b75f
AH
1328 return;
1329
07b287a0
MS
1330 case AARCH64_SP_REGNUM:
1331 reg->how = DWARF2_FRAME_REG_CFA;
11e1b75f
AH
1332 return;
1333 }
1334
1335 /* Init pauth registers. */
1336 if (tdep->has_pauth ())
1337 {
c9cd8ca4 1338 if (regnum == tdep->ra_sign_state_regnum)
11e1b75f
AH
1339 {
1340 /* Initialize RA_STATE to zero. */
1341 reg->how = DWARF2_FRAME_REG_SAVED_VAL_EXP;
1342 reg->loc.exp.start = &op_lit0;
1343 reg->loc.exp.len = 1;
1344 return;
1345 }
6d002087
LM
1346 else if (regnum >= tdep->pauth_reg_base
1347 && regnum < tdep->pauth_reg_base + tdep->pauth_reg_count)
11e1b75f
AH
1348 {
1349 reg->how = DWARF2_FRAME_REG_SAME_VALUE;
1350 return;
1351 }
07b287a0
MS
1352 }
1353}
1354
11e1b75f
AH
1355/* Implement the execute_dwarf_cfa_vendor_op method. */
1356
1357static bool
1358aarch64_execute_dwarf_cfa_vendor_op (struct gdbarch *gdbarch, gdb_byte op,
1359 struct dwarf2_frame_state *fs)
1360{
08106042 1361 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
11e1b75f
AH
1362 struct dwarf2_frame_state_reg *ra_state;
1363
8fca4da0 1364 if (op == DW_CFA_AARCH64_negate_ra_state)
11e1b75f 1365 {
8fca4da0
AH
1366 /* On systems without pauth, treat as a nop. */
1367 if (!tdep->has_pauth ())
1368 return true;
1369
11e1b75f 1370 /* Allocate RA_STATE column if it's not allocated yet. */
c9cd8ca4 1371 fs->regs.alloc_regs (AARCH64_DWARF_RA_SIGN_STATE + 1);
11e1b75f
AH
1372
1373 /* Toggle the status of RA_STATE between 0 and 1. */
c9cd8ca4 1374 ra_state = &(fs->regs.reg[AARCH64_DWARF_RA_SIGN_STATE]);
11e1b75f
AH
1375 ra_state->how = DWARF2_FRAME_REG_SAVED_VAL_EXP;
1376
1377 if (ra_state->loc.exp.start == nullptr
1378 || ra_state->loc.exp.start == &op_lit0)
1379 ra_state->loc.exp.start = &op_lit1;
1380 else
1381 ra_state->loc.exp.start = &op_lit0;
1382
1383 ra_state->loc.exp.len = 1;
1384
1385 return true;
1386 }
1387
1388 return false;
1389}
1390
5133a315
LM
1391/* Used for matching BRK instructions for AArch64. */
1392static constexpr uint32_t BRK_INSN_MASK = 0xffe0001f;
1393static constexpr uint32_t BRK_INSN_BASE = 0xd4200000;
1394
1395/* Implementation of gdbarch_program_breakpoint_here_p for aarch64. */
1396
1397static bool
1398aarch64_program_breakpoint_here_p (gdbarch *gdbarch, CORE_ADDR address)
1399{
1400 const uint32_t insn_len = 4;
1401 gdb_byte target_mem[4];
1402
1403 /* Enable the automatic memory restoration from breakpoints while
1404 we read the memory. Otherwise we may find temporary breakpoints, ones
1405 inserted by GDB, and flag them as permanent breakpoints. */
1406 scoped_restore restore_memory
1407 = make_scoped_restore_show_memory_breakpoints (0);
1408
1409 if (target_read_memory (address, target_mem, insn_len) == 0)
1410 {
1411 uint32_t insn =
1412 (uint32_t) extract_unsigned_integer (target_mem, insn_len,
1413 gdbarch_byte_order_for_code (gdbarch));
1414
1415 /* Check if INSN is a BRK instruction pattern. There are multiple choices
1416 of such instructions with different immediate values. Different OS'
1417 may use a different variation, but they have the same outcome. */
1418 return ((insn & BRK_INSN_MASK) == BRK_INSN_BASE);
1419 }
1420
1421 return false;
1422}
1423
07b287a0
MS
1424/* When arguments must be pushed onto the stack, they go on in reverse
1425 order. The code below implements a FILO (stack) to do this. */
1426
89055eaa 1427struct stack_item_t
07b287a0 1428{
c3c87445
YQ
1429 /* Value to pass on stack. It can be NULL if this item is for stack
1430 padding. */
7c543f7b 1431 const gdb_byte *data;
07b287a0
MS
1432
1433 /* Size in bytes of value to pass on stack. */
1434 int len;
89055eaa 1435};
07b287a0 1436
b907456c
AB
1437/* Implement the gdbarch type alignment method, overrides the generic
1438 alignment algorithm for anything that is aarch64 specific. */
07b287a0 1439
b907456c
AB
1440static ULONGEST
1441aarch64_type_align (gdbarch *gdbarch, struct type *t)
07b287a0 1442{
07b287a0 1443 t = check_typedef (t);
bd63c870 1444 if (t->code () == TYPE_CODE_ARRAY && t->is_vector ())
07b287a0 1445 {
b907456c
AB
1446 /* Use the natural alignment for vector types (the same for
1447 scalar type), but the maximum alignment is 128-bit. */
df86565b 1448 if (t->length () > 16)
b907456c 1449 return 16;
238f2452 1450 else
df86565b 1451 return t->length ();
07b287a0 1452 }
b907456c
AB
1453
1454 /* Allow the common code to calculate the alignment. */
1455 return 0;
07b287a0
MS
1456}
1457
ea92689a
AH
1458/* Worker function for aapcs_is_vfp_call_or_return_candidate.
1459
1460 Return the number of register required, or -1 on failure.
1461
1462 When encountering a base element, if FUNDAMENTAL_TYPE is not set then set it
1463 to the element, else fail if the type of this element does not match the
1464 existing value. */
1465
1466static int
1467aapcs_is_vfp_call_or_return_candidate_1 (struct type *type,
1468 struct type **fundamental_type)
1469{
1470 if (type == nullptr)
1471 return -1;
1472
78134374 1473 switch (type->code ())
ea92689a
AH
1474 {
1475 case TYPE_CODE_FLT:
81657e58 1476 case TYPE_CODE_DECFLOAT:
df86565b 1477 if (type->length () > 16)
ea92689a
AH
1478 return -1;
1479
1480 if (*fundamental_type == nullptr)
1481 *fundamental_type = type;
df86565b 1482 else if (type->length () != (*fundamental_type)->length ()
78134374 1483 || type->code () != (*fundamental_type)->code ())
ea92689a
AH
1484 return -1;
1485
1486 return 1;
1487
1488 case TYPE_CODE_COMPLEX:
1489 {
27710edb 1490 struct type *target_type = check_typedef (type->target_type ());
df86565b 1491 if (target_type->length () > 16)
ea92689a
AH
1492 return -1;
1493
1494 if (*fundamental_type == nullptr)
1495 *fundamental_type = target_type;
df86565b 1496 else if (target_type->length () != (*fundamental_type)->length ()
78134374 1497 || target_type->code () != (*fundamental_type)->code ())
ea92689a
AH
1498 return -1;
1499
1500 return 2;
1501 }
1502
1503 case TYPE_CODE_ARRAY:
1504 {
bd63c870 1505 if (type->is_vector ())
ea92689a 1506 {
df86565b 1507 if (type->length () != 8 && type->length () != 16)
ea92689a
AH
1508 return -1;
1509
1510 if (*fundamental_type == nullptr)
1511 *fundamental_type = type;
df86565b 1512 else if (type->length () != (*fundamental_type)->length ()
78134374 1513 || type->code () != (*fundamental_type)->code ())
ea92689a
AH
1514 return -1;
1515
1516 return 1;
1517 }
1518 else
1519 {
27710edb 1520 struct type *target_type = type->target_type ();
ea92689a
AH
1521 int count = aapcs_is_vfp_call_or_return_candidate_1
1522 (target_type, fundamental_type);
1523
1524 if (count == -1)
1525 return count;
1526
df86565b 1527 count *= (type->length () / target_type->length ());
ea92689a
AH
1528 return count;
1529 }
1530 }
1531
1532 case TYPE_CODE_STRUCT:
1533 case TYPE_CODE_UNION:
1534 {
1535 int count = 0;
1536
1f704f76 1537 for (int i = 0; i < type->num_fields (); i++)
ea92689a 1538 {
353229bf 1539 /* Ignore any static fields. */
c819a338 1540 if (type->field (i).is_static ())
353229bf
AH
1541 continue;
1542
940da03e 1543 struct type *member = check_typedef (type->field (i).type ());
ea92689a
AH
1544
1545 int sub_count = aapcs_is_vfp_call_or_return_candidate_1
1546 (member, fundamental_type);
1547 if (sub_count == -1)
1548 return -1;
1549 count += sub_count;
1550 }
73021deb
AH
1551
1552 /* Ensure there is no padding between the fields (allowing for empty
1553 zero length structs) */
1554 int ftype_length = (*fundamental_type == nullptr)
df86565b
SM
1555 ? 0 : (*fundamental_type)->length ();
1556 if (count * ftype_length != type->length ())
73021deb
AH
1557 return -1;
1558
ea92689a
AH
1559 return count;
1560 }
1561
1562 default:
1563 break;
1564 }
1565
1566 return -1;
1567}
1568
1569/* Return true if an argument, whose type is described by TYPE, can be passed or
1570 returned in simd/fp registers, providing enough parameter passing registers
1571 are available. This is as described in the AAPCS64.
1572
1573 Upon successful return, *COUNT returns the number of needed registers,
1574 *FUNDAMENTAL_TYPE contains the type of those registers.
1575
1576 Candidate as per the AAPCS64 5.4.2.C is either a:
1577 - float.
1578 - short-vector.
1579 - HFA (Homogeneous Floating-point Aggregate, 4.3.5.1). A Composite type where
1580 all the members are floats and has at most 4 members.
1581 - HVA (Homogeneous Short-vector Aggregate, 4.3.5.2). A Composite type where
1582 all the members are short vectors and has at most 4 members.
1583 - Complex (7.1.1)
1584
1585 Note that HFAs and HVAs can include nested structures and arrays. */
1586
0e745c60 1587static bool
ea92689a
AH
1588aapcs_is_vfp_call_or_return_candidate (struct type *type, int *count,
1589 struct type **fundamental_type)
1590{
1591 if (type == nullptr)
1592 return false;
1593
1594 *fundamental_type = nullptr;
1595
1596 int ag_count = aapcs_is_vfp_call_or_return_candidate_1 (type,
1597 fundamental_type);
1598
1599 if (ag_count > 0 && ag_count <= HA_MAX_NUM_FLDS)
1600 {
1601 *count = ag_count;
1602 return true;
1603 }
1604 else
1605 return false;
1606}
1607
07b287a0
MS
1608/* AArch64 function call information structure. */
1609struct aarch64_call_info
1610{
1611 /* the current argument number. */
89055eaa 1612 unsigned argnum = 0;
07b287a0
MS
1613
1614 /* The next general purpose register number, equivalent to NGRN as
1615 described in the AArch64 Procedure Call Standard. */
89055eaa 1616 unsigned ngrn = 0;
07b287a0
MS
1617
1618 /* The next SIMD and floating point register number, equivalent to
1619 NSRN as described in the AArch64 Procedure Call Standard. */
89055eaa 1620 unsigned nsrn = 0;
07b287a0
MS
1621
1622 /* The next stacked argument address, equivalent to NSAA as
1623 described in the AArch64 Procedure Call Standard. */
89055eaa 1624 unsigned nsaa = 0;
07b287a0
MS
1625
1626 /* Stack item vector. */
89055eaa 1627 std::vector<stack_item_t> si;
07b287a0
MS
1628};
1629
1630/* Pass a value in a sequence of consecutive X registers. The caller
30baf67b 1631 is responsible for ensuring sufficient registers are available. */
07b287a0
MS
1632
1633static void
1634pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1635 struct aarch64_call_info *info, struct type *type,
8e80f9d1 1636 struct value *arg)
07b287a0
MS
1637{
1638 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
df86565b 1639 int len = type->length ();
78134374 1640 enum type_code typecode = type->code ();
07b287a0 1641 int regnum = AARCH64_X0_REGNUM + info->ngrn;
efaf1ae0 1642 const bfd_byte *buf = arg->contents ().data ();
07b287a0
MS
1643
1644 info->argnum++;
1645
1646 while (len > 0)
1647 {
1648 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1649 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1650 byte_order);
1651
1652
1653 /* Adjust sub-word struct/union args when big-endian. */
1654 if (byte_order == BFD_ENDIAN_BIG
1655 && partial_len < X_REGISTER_SIZE
1656 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1657 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1658
c6185dce
SM
1659 aarch64_debug_printf ("arg %d in %s = 0x%s", info->argnum,
1660 gdbarch_register_name (gdbarch, regnum),
1661 phex (regval, X_REGISTER_SIZE));
1662
07b287a0
MS
1663 regcache_cooked_write_unsigned (regcache, regnum, regval);
1664 len -= partial_len;
1665 buf += partial_len;
1666 regnum++;
1667 }
1668}
1669
1670/* Attempt to marshall a value in a V register. Return 1 if
1671 successful, or 0 if insufficient registers are available. This
1672 function, unlike the equivalent pass_in_x() function does not
1673 handle arguments spread across multiple registers. */
1674
1675static int
1676pass_in_v (struct gdbarch *gdbarch,
1677 struct regcache *regcache,
1678 struct aarch64_call_info *info,
0735fddd 1679 int len, const bfd_byte *buf)
07b287a0
MS
1680{
1681 if (info->nsrn < 8)
1682 {
07b287a0 1683 int regnum = AARCH64_V0_REGNUM + info->nsrn;
3ff2c72e
AH
1684 /* Enough space for a full vector register. */
1685 gdb_byte reg[register_size (gdbarch, regnum)];
1686 gdb_assert (len <= sizeof (reg));
07b287a0
MS
1687
1688 info->argnum++;
1689 info->nsrn++;
1690
0735fddd
YQ
1691 memset (reg, 0, sizeof (reg));
1692 /* PCS C.1, the argument is allocated to the least significant
1693 bits of V register. */
1694 memcpy (reg, buf, len);
b66f5587 1695 regcache->cooked_write (regnum, reg);
0735fddd 1696
c6185dce
SM
1697 aarch64_debug_printf ("arg %d in %s", info->argnum,
1698 gdbarch_register_name (gdbarch, regnum));
1699
07b287a0
MS
1700 return 1;
1701 }
1702 info->nsrn = 8;
1703 return 0;
1704}
1705
1706/* Marshall an argument onto the stack. */
1707
1708static void
1709pass_on_stack (struct aarch64_call_info *info, struct type *type,
8e80f9d1 1710 struct value *arg)
07b287a0 1711{
efaf1ae0 1712 const bfd_byte *buf = arg->contents ().data ();
df86565b 1713 int len = type->length ();
07b287a0
MS
1714 int align;
1715 stack_item_t item;
1716
1717 info->argnum++;
1718
b907456c 1719 align = type_align (type);
07b287a0
MS
1720
1721 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1722 Natural alignment of the argument's type. */
1723 align = align_up (align, 8);
1724
1725 /* The AArch64 PCS requires at most doubleword alignment. */
1726 if (align > 16)
1727 align = 16;
1728
c6185dce
SM
1729 aarch64_debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1730 info->nsaa);
07b287a0
MS
1731
1732 item.len = len;
1733 item.data = buf;
89055eaa 1734 info->si.push_back (item);
07b287a0
MS
1735
1736 info->nsaa += len;
1737 if (info->nsaa & (align - 1))
1738 {
1739 /* Push stack alignment padding. */
1740 int pad = align - (info->nsaa & (align - 1));
1741
1742 item.len = pad;
c3c87445 1743 item.data = NULL;
07b287a0 1744
89055eaa 1745 info->si.push_back (item);
07b287a0
MS
1746 info->nsaa += pad;
1747 }
1748}
1749
1750/* Marshall an argument into a sequence of one or more consecutive X
1751 registers or, if insufficient X registers are available then onto
1752 the stack. */
1753
1754static void
1755pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1756 struct aarch64_call_info *info, struct type *type,
8e80f9d1 1757 struct value *arg)
07b287a0 1758{
df86565b 1759 int len = type->length ();
07b287a0
MS
1760 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1761
1762 /* PCS C.13 - Pass in registers if we have enough spare */
1763 if (info->ngrn + nregs <= 8)
1764 {
8e80f9d1 1765 pass_in_x (gdbarch, regcache, info, type, arg);
07b287a0
MS
1766 info->ngrn += nregs;
1767 }
1768 else
1769 {
1770 info->ngrn = 8;
8e80f9d1 1771 pass_on_stack (info, type, arg);
07b287a0
MS
1772 }
1773}
1774
0e745c60
AH
1775/* Pass a value, which is of type arg_type, in a V register. Assumes value is a
1776 aapcs_is_vfp_call_or_return_candidate and there are enough spare V
1777 registers. A return value of false is an error state as the value will have
1778 been partially passed to the stack. */
1779static bool
1780pass_in_v_vfp_candidate (struct gdbarch *gdbarch, struct regcache *regcache,
1781 struct aarch64_call_info *info, struct type *arg_type,
1782 struct value *arg)
07b287a0 1783{
78134374 1784 switch (arg_type->code ())
0e745c60
AH
1785 {
1786 case TYPE_CODE_FLT:
81657e58 1787 case TYPE_CODE_DECFLOAT:
df86565b 1788 return pass_in_v (gdbarch, regcache, info, arg_type->length (),
efaf1ae0 1789 arg->contents ().data ());
0e745c60
AH
1790 break;
1791
1792 case TYPE_CODE_COMPLEX:
1793 {
efaf1ae0 1794 const bfd_byte *buf = arg->contents ().data ();
27710edb 1795 struct type *target_type = check_typedef (arg_type->target_type ());
0e745c60 1796
df86565b 1797 if (!pass_in_v (gdbarch, regcache, info, target_type->length (),
0e745c60
AH
1798 buf))
1799 return false;
1800
df86565b
SM
1801 return pass_in_v (gdbarch, regcache, info, target_type->length (),
1802 buf + target_type->length ());
0e745c60
AH
1803 }
1804
1805 case TYPE_CODE_ARRAY:
bd63c870 1806 if (arg_type->is_vector ())
df86565b 1807 return pass_in_v (gdbarch, regcache, info, arg_type->length (),
efaf1ae0 1808 arg->contents ().data ());
0e745c60
AH
1809 /* fall through. */
1810
1811 case TYPE_CODE_STRUCT:
1812 case TYPE_CODE_UNION:
1f704f76 1813 for (int i = 0; i < arg_type->num_fields (); i++)
0e745c60 1814 {
353229bf 1815 /* Don't include static fields. */
c819a338 1816 if (arg_type->field (i).is_static ())
353229bf
AH
1817 continue;
1818
6c49729e 1819 struct value *field = arg->primitive_field (0, i, arg_type);
d0c97917 1820 struct type *field_type = check_typedef (field->type ());
0e745c60
AH
1821
1822 if (!pass_in_v_vfp_candidate (gdbarch, regcache, info, field_type,
1823 field))
1824 return false;
1825 }
1826 return true;
1827
1828 default:
1829 return false;
1830 }
07b287a0
MS
1831}
1832
1833/* Implement the "push_dummy_call" gdbarch method. */
1834
1835static CORE_ADDR
1836aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1837 struct regcache *regcache, CORE_ADDR bp_addr,
1838 int nargs,
cf84fa6b
AH
1839 struct value **args, CORE_ADDR sp,
1840 function_call_return_method return_method,
07b287a0
MS
1841 CORE_ADDR struct_addr)
1842{
07b287a0 1843 int argnum;
07b287a0 1844 struct aarch64_call_info info;
07b287a0 1845
07b287a0
MS
1846 /* We need to know what the type of the called function is in order
1847 to determine the number of named/anonymous arguments for the
1848 actual argument placement, and the return type in order to handle
1849 return value correctly.
1850
1851 The generic code above us views the decision of return in memory
1852 or return in registers as a two stage processes. The language
1853 handler is consulted first and may decide to return in memory (eg
1854 class with copy constructor returned by value), this will cause
1855 the generic code to allocate space AND insert an initial leading
1856 argument.
1857
1858 If the language code does not decide to pass in memory then the
1859 target code is consulted.
1860
1861 If the language code decides to pass in memory we want to move
1862 the pointer inserted as the initial argument from the argument
1863 list and into X8, the conventional AArch64 struct return pointer
38a72da0 1864 register. */
07b287a0
MS
1865
1866 /* Set the return address. For the AArch64, the return breakpoint
1867 is always at BP_ADDR. */
1868 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1869
38a72da0
AH
1870 /* If we were given an initial argument for the return slot, lose it. */
1871 if (return_method == return_method_hidden_param)
07b287a0
MS
1872 {
1873 args++;
1874 nargs--;
1875 }
1876
1877 /* The struct_return pointer occupies X8. */
38a72da0 1878 if (return_method != return_method_normal)
07b287a0 1879 {
c6185dce
SM
1880 aarch64_debug_printf ("struct return in %s = 0x%s",
1881 gdbarch_register_name
1882 (gdbarch, AARCH64_STRUCT_RETURN_REGNUM),
1883 paddress (gdbarch, struct_addr));
1884
07b287a0
MS
1885 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1886 struct_addr);
1887 }
1888
1889 for (argnum = 0; argnum < nargs; argnum++)
1890 {
1891 struct value *arg = args[argnum];
0e745c60
AH
1892 struct type *arg_type, *fundamental_type;
1893 int len, elements;
07b287a0 1894
d0c97917 1895 arg_type = check_typedef (arg->type ());
df86565b 1896 len = arg_type->length ();
07b287a0 1897
0e745c60
AH
1898 /* If arg can be passed in v registers as per the AAPCS64, then do so if
1899 if there are enough spare registers. */
1900 if (aapcs_is_vfp_call_or_return_candidate (arg_type, &elements,
1901 &fundamental_type))
1902 {
1903 if (info.nsrn + elements <= 8)
1904 {
1905 /* We know that we have sufficient registers available therefore
1906 this will never need to fallback to the stack. */
1907 if (!pass_in_v_vfp_candidate (gdbarch, regcache, &info, arg_type,
1908 arg))
1909 gdb_assert_not_reached ("Failed to push args");
1910 }
1911 else
1912 {
1913 info.nsrn = 8;
1914 pass_on_stack (&info, arg_type, arg);
1915 }
1916 continue;
1917 }
1918
78134374 1919 switch (arg_type->code ())
07b287a0
MS
1920 {
1921 case TYPE_CODE_INT:
1922 case TYPE_CODE_BOOL:
1923 case TYPE_CODE_CHAR:
1924 case TYPE_CODE_RANGE:
1925 case TYPE_CODE_ENUM:
28397ae7 1926 if (len < 4 && !is_fixed_point_type (arg_type))
07b287a0
MS
1927 {
1928 /* Promote to 32 bit integer. */
c6d940a9 1929 if (arg_type->is_unsigned ())
07b287a0
MS
1930 arg_type = builtin_type (gdbarch)->builtin_uint32;
1931 else
1932 arg_type = builtin_type (gdbarch)->builtin_int32;
1933 arg = value_cast (arg_type, arg);
1934 }
8e80f9d1 1935 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1936 break;
1937
07b287a0
MS
1938 case TYPE_CODE_STRUCT:
1939 case TYPE_CODE_ARRAY:
1940 case TYPE_CODE_UNION:
0e745c60 1941 if (len > 16)
07b287a0
MS
1942 {
1943 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1944 invisible reference. */
1945
1946 /* Allocate aligned storage. */
1947 sp = align_down (sp - len, 16);
1948
1949 /* Write the real data into the stack. */
efaf1ae0 1950 write_memory (sp, arg->contents ().data (), len);
07b287a0
MS
1951
1952 /* Construct the indirection. */
1953 arg_type = lookup_pointer_type (arg_type);
1954 arg = value_from_pointer (arg_type, sp);
8e80f9d1 1955 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1956 }
1957 else
1958 /* PCS C.15 / C.18 multiple values pass. */
8e80f9d1 1959 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1960 break;
1961
1962 default:
8e80f9d1 1963 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1964 break;
1965 }
1966 }
1967
1968 /* Make sure stack retains 16 byte alignment. */
1969 if (info.nsaa & 15)
1970 sp -= 16 - (info.nsaa & 15);
1971
89055eaa 1972 while (!info.si.empty ())
07b287a0 1973 {
89055eaa 1974 const stack_item_t &si = info.si.back ();
07b287a0 1975
89055eaa
TT
1976 sp -= si.len;
1977 if (si.data != NULL)
1978 write_memory (sp, si.data, si.len);
1979 info.si.pop_back ();
07b287a0
MS
1980 }
1981
07b287a0
MS
1982 /* Finally, update the SP register. */
1983 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1984
1985 return sp;
1986}
1987
1988/* Implement the "frame_align" gdbarch method. */
1989
1990static CORE_ADDR
1991aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1992{
1993 /* Align the stack to sixteen bytes. */
1994 return sp & ~(CORE_ADDR) 15;
1995}
1996
1997/* Return the type for an AdvSISD Q register. */
1998
1999static struct type *
2000aarch64_vnq_type (struct gdbarch *gdbarch)
2001{
08106042 2002 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
07b287a0
MS
2003
2004 if (tdep->vnq_type == NULL)
2005 {
2006 struct type *t;
2007 struct type *elem;
2008
2009 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
2010 TYPE_CODE_UNION);
2011
2012 elem = builtin_type (gdbarch)->builtin_uint128;
2013 append_composite_type_field (t, "u", elem);
2014
2015 elem = builtin_type (gdbarch)->builtin_int128;
2016 append_composite_type_field (t, "s", elem);
2017
2018 tdep->vnq_type = t;
2019 }
2020
2021 return tdep->vnq_type;
2022}
2023
2024/* Return the type for an AdvSISD D register. */
2025
2026static struct type *
2027aarch64_vnd_type (struct gdbarch *gdbarch)
2028{
08106042 2029 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
07b287a0
MS
2030
2031 if (tdep->vnd_type == NULL)
2032 {
2033 struct type *t;
2034 struct type *elem;
2035
2036 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
2037 TYPE_CODE_UNION);
2038
2039 elem = builtin_type (gdbarch)->builtin_double;
2040 append_composite_type_field (t, "f", elem);
2041
2042 elem = builtin_type (gdbarch)->builtin_uint64;
2043 append_composite_type_field (t, "u", elem);
2044
2045 elem = builtin_type (gdbarch)->builtin_int64;
2046 append_composite_type_field (t, "s", elem);
2047
2048 tdep->vnd_type = t;
2049 }
2050
2051 return tdep->vnd_type;
2052}
2053
2054/* Return the type for an AdvSISD S register. */
2055
2056static struct type *
2057aarch64_vns_type (struct gdbarch *gdbarch)
2058{
08106042 2059 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
07b287a0
MS
2060
2061 if (tdep->vns_type == NULL)
2062 {
2063 struct type *t;
2064 struct type *elem;
2065
2066 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
2067 TYPE_CODE_UNION);
2068
2069 elem = builtin_type (gdbarch)->builtin_float;
2070 append_composite_type_field (t, "f", elem);
2071
2072 elem = builtin_type (gdbarch)->builtin_uint32;
2073 append_composite_type_field (t, "u", elem);
2074
2075 elem = builtin_type (gdbarch)->builtin_int32;
2076 append_composite_type_field (t, "s", elem);
2077
2078 tdep->vns_type = t;
2079 }
2080
2081 return tdep->vns_type;
2082}
2083
2084/* Return the type for an AdvSISD H register. */
2085
2086static struct type *
2087aarch64_vnh_type (struct gdbarch *gdbarch)
2088{
08106042 2089 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
07b287a0
MS
2090
2091 if (tdep->vnh_type == NULL)
2092 {
2093 struct type *t;
2094 struct type *elem;
2095
2096 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
2097 TYPE_CODE_UNION);
2098
5291fe3c
SP
2099 elem = builtin_type (gdbarch)->builtin_bfloat16;
2100 append_composite_type_field (t, "bf", elem);
2101
a6d0f249
AH
2102 elem = builtin_type (gdbarch)->builtin_half;
2103 append_composite_type_field (t, "f", elem);
2104
07b287a0
MS
2105 elem = builtin_type (gdbarch)->builtin_uint16;
2106 append_composite_type_field (t, "u", elem);
2107
2108 elem = builtin_type (gdbarch)->builtin_int16;
2109 append_composite_type_field (t, "s", elem);
2110
2111 tdep->vnh_type = t;
2112 }
2113
2114 return tdep->vnh_type;
2115}
2116
2117/* Return the type for an AdvSISD B register. */
2118
2119static struct type *
2120aarch64_vnb_type (struct gdbarch *gdbarch)
2121{
08106042 2122 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
07b287a0
MS
2123
2124 if (tdep->vnb_type == NULL)
2125 {
2126 struct type *t;
2127 struct type *elem;
2128
2129 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
2130 TYPE_CODE_UNION);
2131
2132 elem = builtin_type (gdbarch)->builtin_uint8;
2133 append_composite_type_field (t, "u", elem);
2134
2135 elem = builtin_type (gdbarch)->builtin_int8;
2136 append_composite_type_field (t, "s", elem);
2137
2138 tdep->vnb_type = t;
2139 }
2140
2141 return tdep->vnb_type;
2142}
2143
63bad7b6
AH
2144/* Return the type for an AdvSISD V register. */
2145
2146static struct type *
2147aarch64_vnv_type (struct gdbarch *gdbarch)
2148{
08106042 2149 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
63bad7b6
AH
2150
2151 if (tdep->vnv_type == NULL)
2152 {
09624f1f 2153 /* The other AArch64 pseudo registers (Q,D,H,S,B) refer to a single value
bffa1015
AH
2154 slice from the non-pseudo vector registers. However NEON V registers
2155 are always vector registers, and need constructing as such. */
2156 const struct builtin_type *bt = builtin_type (gdbarch);
2157
63bad7b6
AH
2158 struct type *t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnv",
2159 TYPE_CODE_UNION);
2160
bffa1015
AH
2161 struct type *sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
2162 TYPE_CODE_UNION);
2163 append_composite_type_field (sub, "f",
2164 init_vector_type (bt->builtin_double, 2));
2165 append_composite_type_field (sub, "u",
2166 init_vector_type (bt->builtin_uint64, 2));
2167 append_composite_type_field (sub, "s",
2168 init_vector_type (bt->builtin_int64, 2));
2169 append_composite_type_field (t, "d", sub);
2170
2171 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
2172 TYPE_CODE_UNION);
2173 append_composite_type_field (sub, "f",
2174 init_vector_type (bt->builtin_float, 4));
2175 append_composite_type_field (sub, "u",
2176 init_vector_type (bt->builtin_uint32, 4));
2177 append_composite_type_field (sub, "s",
2178 init_vector_type (bt->builtin_int32, 4));
2179 append_composite_type_field (t, "s", sub);
2180
2181 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
2182 TYPE_CODE_UNION);
5291fe3c
SP
2183 append_composite_type_field (sub, "bf",
2184 init_vector_type (bt->builtin_bfloat16, 8));
a6d0f249
AH
2185 append_composite_type_field (sub, "f",
2186 init_vector_type (bt->builtin_half, 8));
bffa1015
AH
2187 append_composite_type_field (sub, "u",
2188 init_vector_type (bt->builtin_uint16, 8));
2189 append_composite_type_field (sub, "s",
2190 init_vector_type (bt->builtin_int16, 8));
2191 append_composite_type_field (t, "h", sub);
2192
2193 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
2194 TYPE_CODE_UNION);
2195 append_composite_type_field (sub, "u",
2196 init_vector_type (bt->builtin_uint8, 16));
2197 append_composite_type_field (sub, "s",
2198 init_vector_type (bt->builtin_int8, 16));
2199 append_composite_type_field (t, "b", sub);
2200
2201 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
2202 TYPE_CODE_UNION);
2203 append_composite_type_field (sub, "u",
2204 init_vector_type (bt->builtin_uint128, 1));
2205 append_composite_type_field (sub, "s",
2206 init_vector_type (bt->builtin_int128, 1));
2207 append_composite_type_field (t, "q", sub);
63bad7b6
AH
2208
2209 tdep->vnv_type = t;
2210 }
2211
2212 return tdep->vnv_type;
2213}
2214
07b287a0
MS
2215/* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
2216
2217static int
2218aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
2219{
08106042 2220 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
34dcc7cf 2221
07b287a0
MS
2222 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
2223 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
2224
2225 if (reg == AARCH64_DWARF_SP)
2226 return AARCH64_SP_REGNUM;
2227
1fe84861
YY
2228 if (reg == AARCH64_DWARF_PC)
2229 return AARCH64_PC_REGNUM;
2230
07b287a0
MS
2231 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
2232 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
2233
65d4cada
AH
2234 if (reg == AARCH64_DWARF_SVE_VG)
2235 return AARCH64_SVE_VG_REGNUM;
2236
2237 if (reg == AARCH64_DWARF_SVE_FFR)
2238 return AARCH64_SVE_FFR_REGNUM;
2239
2240 if (reg >= AARCH64_DWARF_SVE_P0 && reg <= AARCH64_DWARF_SVE_P0 + 15)
2241 return AARCH64_SVE_P0_REGNUM + reg - AARCH64_DWARF_SVE_P0;
2242
2243 if (reg >= AARCH64_DWARF_SVE_Z0 && reg <= AARCH64_DWARF_SVE_Z0 + 15)
2244 return AARCH64_SVE_Z0_REGNUM + reg - AARCH64_DWARF_SVE_Z0;
2245
34dcc7cf
AH
2246 if (tdep->has_pauth ())
2247 {
c9cd8ca4
LM
2248 if (reg == AARCH64_DWARF_RA_SIGN_STATE)
2249 return tdep->ra_sign_state_regnum;
34dcc7cf
AH
2250 }
2251
07b287a0
MS
2252 return -1;
2253}
07b287a0
MS
2254
2255/* Implement the "print_insn" gdbarch method. */
2256
2257static int
2258aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
2259{
2260 info->symbols = NULL;
6394c606 2261 return default_print_insn (memaddr, info);
07b287a0
MS
2262}
2263
2264/* AArch64 BRK software debug mode instruction.
2265 Note that AArch64 code is always little-endian.
2266 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
04180708 2267constexpr gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
07b287a0 2268
04180708 2269typedef BP_MANIPULATION (aarch64_default_breakpoint) aarch64_breakpoint;
07b287a0
MS
2270
2271/* Extract from an array REGS containing the (raw) register state a
2272 function return value of type TYPE, and copy that, in virtual
2273 format, into VALBUF. */
2274
2275static void
2276aarch64_extract_return_value (struct type *type, struct regcache *regs,
2277 gdb_byte *valbuf)
2278{
ac7936df 2279 struct gdbarch *gdbarch = regs->arch ();
07b287a0 2280 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
4f4aedeb
AH
2281 int elements;
2282 struct type *fundamental_type;
07b287a0 2283
4f4aedeb
AH
2284 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2285 &fundamental_type))
07b287a0 2286 {
df86565b 2287 int len = fundamental_type->length ();
4f4aedeb
AH
2288
2289 for (int i = 0; i < elements; i++)
2290 {
2291 int regno = AARCH64_V0_REGNUM + i;
3ff2c72e
AH
2292 /* Enough space for a full vector register. */
2293 gdb_byte buf[register_size (gdbarch, regno)];
2294 gdb_assert (len <= sizeof (buf));
4f4aedeb 2295
c6185dce
SM
2296 aarch64_debug_printf
2297 ("read HFA or HVA return value element %d from %s",
2298 i + 1, gdbarch_register_name (gdbarch, regno));
2299
4f4aedeb 2300 regs->cooked_read (regno, buf);
07b287a0 2301
4f4aedeb
AH
2302 memcpy (valbuf, buf, len);
2303 valbuf += len;
2304 }
07b287a0 2305 }
78134374
SM
2306 else if (type->code () == TYPE_CODE_INT
2307 || type->code () == TYPE_CODE_CHAR
2308 || type->code () == TYPE_CODE_BOOL
2309 || type->code () == TYPE_CODE_PTR
aa006118 2310 || TYPE_IS_REFERENCE (type)
78134374 2311 || type->code () == TYPE_CODE_ENUM)
07b287a0 2312 {
6471e7d2 2313 /* If the type is a plain integer, then the access is
07b287a0
MS
2314 straight-forward. Otherwise we have to play around a bit
2315 more. */
df86565b 2316 int len = type->length ();
07b287a0
MS
2317 int regno = AARCH64_X0_REGNUM;
2318 ULONGEST tmp;
2319
2320 while (len > 0)
2321 {
2322 /* By using store_unsigned_integer we avoid having to do
2323 anything special for small big-endian values. */
2324 regcache_cooked_read_unsigned (regs, regno++, &tmp);
2325 store_unsigned_integer (valbuf,
2326 (len > X_REGISTER_SIZE
2327 ? X_REGISTER_SIZE : len), byte_order, tmp);
2328 len -= X_REGISTER_SIZE;
2329 valbuf += X_REGISTER_SIZE;
2330 }
2331 }
07b287a0
MS
2332 else
2333 {
2334 /* For a structure or union the behaviour is as if the value had
dda83cd7
SM
2335 been stored to word-aligned memory and then loaded into
2336 registers with 64-bit load instruction(s). */
df86565b 2337 int len = type->length ();
07b287a0
MS
2338 int regno = AARCH64_X0_REGNUM;
2339 bfd_byte buf[X_REGISTER_SIZE];
2340
2341 while (len > 0)
2342 {
dca08e1f 2343 regs->cooked_read (regno++, buf);
07b287a0
MS
2344 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2345 len -= X_REGISTER_SIZE;
2346 valbuf += X_REGISTER_SIZE;
2347 }
2348 }
2349}
2350
2351
2352/* Will a function return an aggregate type in memory or in a
2353 register? Return 0 if an aggregate type can be returned in a
2354 register, 1 if it must be returned in memory. */
2355
2356static int
2357aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
2358{
f168693b 2359 type = check_typedef (type);
4f4aedeb
AH
2360 int elements;
2361 struct type *fundamental_type;
07b287a0 2362
911627e7
TT
2363 if (TYPE_HAS_DYNAMIC_LENGTH (type))
2364 return 1;
2365
4f4aedeb
AH
2366 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2367 &fundamental_type))
07b287a0 2368 {
cd635f74
YQ
2369 /* v0-v7 are used to return values and one register is allocated
2370 for one member. However, HFA or HVA has at most four members. */
07b287a0
MS
2371 return 0;
2372 }
2373
df86565b 2374 if (type->length () > 16
bab22d06 2375 || !language_pass_by_reference (type).trivially_copyable)
07b287a0
MS
2376 {
2377 /* PCS B.6 Aggregates larger than 16 bytes are passed by
dda83cd7 2378 invisible reference. */
07b287a0
MS
2379
2380 return 1;
2381 }
2382
2383 return 0;
2384}
2385
2386/* Write into appropriate registers a function return value of type
2387 TYPE, given in virtual format. */
2388
2389static void
2390aarch64_store_return_value (struct type *type, struct regcache *regs,
2391 const gdb_byte *valbuf)
2392{
ac7936df 2393 struct gdbarch *gdbarch = regs->arch ();
07b287a0 2394 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
4f4aedeb
AH
2395 int elements;
2396 struct type *fundamental_type;
07b287a0 2397
4f4aedeb
AH
2398 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2399 &fundamental_type))
07b287a0 2400 {
df86565b 2401 int len = fundamental_type->length ();
4f4aedeb
AH
2402
2403 for (int i = 0; i < elements; i++)
2404 {
2405 int regno = AARCH64_V0_REGNUM + i;
3ff2c72e
AH
2406 /* Enough space for a full vector register. */
2407 gdb_byte tmpbuf[register_size (gdbarch, regno)];
2408 gdb_assert (len <= sizeof (tmpbuf));
4f4aedeb 2409
c6185dce
SM
2410 aarch64_debug_printf
2411 ("write HFA or HVA return value element %d to %s",
2412 i + 1, gdbarch_register_name (gdbarch, regno));
07b287a0 2413
4f4aedeb
AH
2414 memcpy (tmpbuf, valbuf,
2415 len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
2416 regs->cooked_write (regno, tmpbuf);
2417 valbuf += len;
2418 }
07b287a0 2419 }
78134374
SM
2420 else if (type->code () == TYPE_CODE_INT
2421 || type->code () == TYPE_CODE_CHAR
2422 || type->code () == TYPE_CODE_BOOL
2423 || type->code () == TYPE_CODE_PTR
aa006118 2424 || TYPE_IS_REFERENCE (type)
78134374 2425 || type->code () == TYPE_CODE_ENUM)
07b287a0 2426 {
df86565b 2427 if (type->length () <= X_REGISTER_SIZE)
07b287a0
MS
2428 {
2429 /* Values of one word or less are zero/sign-extended and
2430 returned in r0. */
2431 bfd_byte tmpbuf[X_REGISTER_SIZE];
2432 LONGEST val = unpack_long (type, valbuf);
2433
2434 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
b66f5587 2435 regs->cooked_write (AARCH64_X0_REGNUM, tmpbuf);
07b287a0
MS
2436 }
2437 else
2438 {
2439 /* Integral values greater than one word are stored in
2440 consecutive registers starting with r0. This will always
2441 be a multiple of the regiser size. */
df86565b 2442 int len = type->length ();
07b287a0
MS
2443 int regno = AARCH64_X0_REGNUM;
2444
2445 while (len > 0)
2446 {
b66f5587 2447 regs->cooked_write (regno++, valbuf);
07b287a0
MS
2448 len -= X_REGISTER_SIZE;
2449 valbuf += X_REGISTER_SIZE;
2450 }
2451 }
2452 }
07b287a0
MS
2453 else
2454 {
2455 /* For a structure or union the behaviour is as if the value had
2456 been stored to word-aligned memory and then loaded into
2457 registers with 64-bit load instruction(s). */
df86565b 2458 int len = type->length ();
07b287a0
MS
2459 int regno = AARCH64_X0_REGNUM;
2460 bfd_byte tmpbuf[X_REGISTER_SIZE];
2461
2462 while (len > 0)
2463 {
2464 memcpy (tmpbuf, valbuf,
2465 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
b66f5587 2466 regs->cooked_write (regno++, tmpbuf);
07b287a0
MS
2467 len -= X_REGISTER_SIZE;
2468 valbuf += X_REGISTER_SIZE;
2469 }
2470 }
2471}
2472
2473/* Implement the "return_value" gdbarch method. */
2474
2475static enum return_value_convention
2476aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2477 struct type *valtype, struct regcache *regcache,
5cb0f2d5 2478 struct value **read_value, const gdb_byte *writebuf)
07b287a0 2479{
78134374
SM
2480 if (valtype->code () == TYPE_CODE_STRUCT
2481 || valtype->code () == TYPE_CODE_UNION
2482 || valtype->code () == TYPE_CODE_ARRAY)
07b287a0
MS
2483 {
2484 if (aarch64_return_in_memory (gdbarch, valtype))
2485 {
bab22d06
LM
2486 /* From the AAPCS64's Result Return section:
2487
2488 "Otherwise, the caller shall reserve a block of memory of
2489 sufficient size and alignment to hold the result. The address
2490 of the memory block shall be passed as an additional argument to
2491 the function in x8. */
2492
c6185dce 2493 aarch64_debug_printf ("return value in memory");
bab22d06 2494
911627e7 2495 if (read_value != nullptr)
bab22d06
LM
2496 {
2497 CORE_ADDR addr;
2498
2499 regcache->cooked_read (AARCH64_STRUCT_RETURN_REGNUM, &addr);
911627e7 2500 *read_value = value_at_non_lval (valtype, addr);
bab22d06
LM
2501 }
2502
2503 return RETURN_VALUE_ABI_RETURNS_ADDRESS;
07b287a0
MS
2504 }
2505 }
2506
2507 if (writebuf)
2508 aarch64_store_return_value (valtype, regcache, writebuf);
2509
911627e7
TT
2510 if (read_value)
2511 {
317c3ed9 2512 *read_value = value::allocate (valtype);
911627e7 2513 aarch64_extract_return_value (valtype, regcache,
bbe912ba 2514 (*read_value)->contents_raw ().data ());
911627e7 2515 }
07b287a0 2516
c6185dce 2517 aarch64_debug_printf ("return value in registers");
07b287a0
MS
2518
2519 return RETURN_VALUE_REGISTER_CONVENTION;
2520}
2521
2522/* Implement the "get_longjmp_target" gdbarch method. */
2523
2524static int
bd2b40ac 2525aarch64_get_longjmp_target (frame_info_ptr frame, CORE_ADDR *pc)
07b287a0
MS
2526{
2527 CORE_ADDR jb_addr;
2528 gdb_byte buf[X_REGISTER_SIZE];
2529 struct gdbarch *gdbarch = get_frame_arch (frame);
08106042 2530 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
07b287a0
MS
2531 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2532
2533 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
2534
2535 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2536 X_REGISTER_SIZE))
2537 return 0;
2538
2539 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2540 return 1;
2541}
ea873d8e
PL
2542
2543/* Implement the "gen_return_address" gdbarch method. */
2544
2545static void
2546aarch64_gen_return_address (struct gdbarch *gdbarch,
2547 struct agent_expr *ax, struct axs_value *value,
2548 CORE_ADDR scope)
2549{
2550 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
2551 value->kind = axs_lvalue_register;
2552 value->u.reg = AARCH64_LR_REGNUM;
2553}
07b287a0
MS
2554\f
2555
e63ae49b
LM
2556/* Return TRUE if REGNUM is a W pseudo-register number. Return FALSE
2557 otherwise. */
2558
2559static bool
2560is_w_pseudo_register (struct gdbarch *gdbarch, int regnum)
2561{
2562 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2563
2564 if (tdep->w_pseudo_base <= regnum
2565 && regnum < tdep->w_pseudo_base + tdep->w_pseudo_count)
2566 return true;
2567
2568 return false;
2569}
2570
07b287a0
MS
2571/* Return the pseudo register name corresponding to register regnum. */
2572
2573static const char *
2574aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2575{
08106042 2576 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
63bad7b6 2577
e63ae49b
LM
2578 /* W pseudo-registers. Bottom halves of the X registers. */
2579 static const char *const w_name[] =
2580 {
2581 "w0", "w1", "w2", "w3",
2582 "w4", "w5", "w6", "w7",
2583 "w8", "w9", "w10", "w11",
2584 "w12", "w13", "w14", "w15",
2585 "w16", "w17", "w18", "w19",
2586 "w20", "w21", "w22", "w23",
2587 "w24", "w25", "w26", "w27",
2588 "w28", "w29", "w30",
2589 };
2590
07b287a0
MS
2591 static const char *const q_name[] =
2592 {
2593 "q0", "q1", "q2", "q3",
2594 "q4", "q5", "q6", "q7",
2595 "q8", "q9", "q10", "q11",
2596 "q12", "q13", "q14", "q15",
2597 "q16", "q17", "q18", "q19",
2598 "q20", "q21", "q22", "q23",
2599 "q24", "q25", "q26", "q27",
2600 "q28", "q29", "q30", "q31",
2601 };
2602
2603 static const char *const d_name[] =
2604 {
2605 "d0", "d1", "d2", "d3",
2606 "d4", "d5", "d6", "d7",
2607 "d8", "d9", "d10", "d11",
2608 "d12", "d13", "d14", "d15",
2609 "d16", "d17", "d18", "d19",
2610 "d20", "d21", "d22", "d23",
2611 "d24", "d25", "d26", "d27",
2612 "d28", "d29", "d30", "d31",
2613 };
2614
2615 static const char *const s_name[] =
2616 {
2617 "s0", "s1", "s2", "s3",
2618 "s4", "s5", "s6", "s7",
2619 "s8", "s9", "s10", "s11",
2620 "s12", "s13", "s14", "s15",
2621 "s16", "s17", "s18", "s19",
2622 "s20", "s21", "s22", "s23",
2623 "s24", "s25", "s26", "s27",
2624 "s28", "s29", "s30", "s31",
2625 };
2626
2627 static const char *const h_name[] =
2628 {
2629 "h0", "h1", "h2", "h3",
2630 "h4", "h5", "h6", "h7",
2631 "h8", "h9", "h10", "h11",
2632 "h12", "h13", "h14", "h15",
2633 "h16", "h17", "h18", "h19",
2634 "h20", "h21", "h22", "h23",
2635 "h24", "h25", "h26", "h27",
2636 "h28", "h29", "h30", "h31",
2637 };
2638
2639 static const char *const b_name[] =
2640 {
2641 "b0", "b1", "b2", "b3",
2642 "b4", "b5", "b6", "b7",
2643 "b8", "b9", "b10", "b11",
2644 "b12", "b13", "b14", "b15",
2645 "b16", "b17", "b18", "b19",
2646 "b20", "b21", "b22", "b23",
2647 "b24", "b25", "b26", "b27",
2648 "b28", "b29", "b30", "b31",
2649 };
2650
34dcc7cf 2651 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
07b287a0 2652
34dcc7cf
AH
2653 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2654 return q_name[p_regnum - AARCH64_Q0_REGNUM];
07b287a0 2655
34dcc7cf
AH
2656 if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2657 return d_name[p_regnum - AARCH64_D0_REGNUM];
07b287a0 2658
34dcc7cf
AH
2659 if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2660 return s_name[p_regnum - AARCH64_S0_REGNUM];
07b287a0 2661
34dcc7cf
AH
2662 if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2663 return h_name[p_regnum - AARCH64_H0_REGNUM];
07b287a0 2664
34dcc7cf
AH
2665 if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2666 return b_name[p_regnum - AARCH64_B0_REGNUM];
07b287a0 2667
e63ae49b
LM
2668 /* W pseudo-registers? */
2669 if (is_w_pseudo_register (gdbarch, regnum))
2670 return w_name[regnum - tdep->w_pseudo_base];
2671
63bad7b6
AH
2672 if (tdep->has_sve ())
2673 {
2674 static const char *const sve_v_name[] =
2675 {
2676 "v0", "v1", "v2", "v3",
2677 "v4", "v5", "v6", "v7",
2678 "v8", "v9", "v10", "v11",
2679 "v12", "v13", "v14", "v15",
2680 "v16", "v17", "v18", "v19",
2681 "v20", "v21", "v22", "v23",
2682 "v24", "v25", "v26", "v27",
2683 "v28", "v29", "v30", "v31",
2684 };
2685
34dcc7cf
AH
2686 if (p_regnum >= AARCH64_SVE_V0_REGNUM
2687 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2688 return sve_v_name[p_regnum - AARCH64_SVE_V0_REGNUM];
63bad7b6
AH
2689 }
2690
34dcc7cf
AH
2691 /* RA_STATE is used for unwinding only. Do not assign it a name - this
2692 prevents it from being read by methods such as
2693 mi_cmd_trace_frame_collected. */
c9cd8ca4 2694 if (tdep->has_pauth () && regnum == tdep->ra_sign_state_regnum)
34dcc7cf
AH
2695 return "";
2696
f34652de 2697 internal_error (_("aarch64_pseudo_register_name: bad register number %d"),
34dcc7cf 2698 p_regnum);
07b287a0
MS
2699}
2700
2701/* Implement the "pseudo_register_type" tdesc_arch_data method. */
2702
2703static struct type *
2704aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2705{
08106042 2706 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
63bad7b6 2707
34dcc7cf 2708 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
07b287a0 2709
34dcc7cf 2710 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
07b287a0
MS
2711 return aarch64_vnq_type (gdbarch);
2712
34dcc7cf 2713 if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
07b287a0
MS
2714 return aarch64_vnd_type (gdbarch);
2715
34dcc7cf 2716 if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
07b287a0
MS
2717 return aarch64_vns_type (gdbarch);
2718
34dcc7cf 2719 if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
07b287a0
MS
2720 return aarch64_vnh_type (gdbarch);
2721
34dcc7cf 2722 if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
07b287a0
MS
2723 return aarch64_vnb_type (gdbarch);
2724
34dcc7cf
AH
2725 if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
2726 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
63bad7b6
AH
2727 return aarch64_vnv_type (gdbarch);
2728
e63ae49b
LM
2729 /* W pseudo-registers are 32-bit. */
2730 if (is_w_pseudo_register (gdbarch, regnum))
2731 return builtin_type (gdbarch)->builtin_uint32;
2732
c9cd8ca4 2733 if (tdep->has_pauth () && regnum == tdep->ra_sign_state_regnum)
34dcc7cf
AH
2734 return builtin_type (gdbarch)->builtin_uint64;
2735
f34652de 2736 internal_error (_("aarch64_pseudo_register_type: bad register number %d"),
34dcc7cf 2737 p_regnum);
07b287a0
MS
2738}
2739
2740/* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2741
2742static int
2743aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
dbf5d61b 2744 const struct reggroup *group)
07b287a0 2745{
08106042 2746 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
63bad7b6 2747
34dcc7cf 2748 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
07b287a0 2749
34dcc7cf 2750 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
07b287a0 2751 return group == all_reggroup || group == vector_reggroup;
34dcc7cf 2752 else if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
07b287a0
MS
2753 return (group == all_reggroup || group == vector_reggroup
2754 || group == float_reggroup);
34dcc7cf 2755 else if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
07b287a0
MS
2756 return (group == all_reggroup || group == vector_reggroup
2757 || group == float_reggroup);
34dcc7cf 2758 else if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
07b287a0 2759 return group == all_reggroup || group == vector_reggroup;
34dcc7cf 2760 else if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
07b287a0 2761 return group == all_reggroup || group == vector_reggroup;
34dcc7cf
AH
2762 else if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
2763 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
63bad7b6 2764 return group == all_reggroup || group == vector_reggroup;
34dcc7cf 2765 /* RA_STATE is used for unwinding only. Do not assign it to any groups. */
c9cd8ca4 2766 if (tdep->has_pauth () && regnum == tdep->ra_sign_state_regnum)
34dcc7cf 2767 return 0;
07b287a0
MS
2768
2769 return group == all_reggroup;
2770}
2771
3c5cd5c3
AH
2772/* Helper for aarch64_pseudo_read_value. */
2773
2774static struct value *
63bad7b6
AH
2775aarch64_pseudo_read_value_1 (struct gdbarch *gdbarch,
2776 readable_regcache *regcache, int regnum_offset,
3c5cd5c3
AH
2777 int regsize, struct value *result_value)
2778{
3c5cd5c3
AH
2779 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2780
63bad7b6
AH
2781 /* Enough space for a full vector register. */
2782 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2783 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2784
3c5cd5c3 2785 if (regcache->raw_read (v_regnum, reg_buf) != REG_VALID)
d00664db
TT
2786 result_value->mark_bytes_unavailable (0,
2787 result_value->type ()->length ());
3c5cd5c3 2788 else
bbe912ba 2789 memcpy (result_value->contents_raw ().data (), reg_buf, regsize);
63bad7b6 2790
3c5cd5c3
AH
2791 return result_value;
2792 }
2793
07b287a0
MS
2794/* Implement the "pseudo_register_read_value" gdbarch method. */
2795
2796static struct value *
3c5cd5c3 2797aarch64_pseudo_read_value (struct gdbarch *gdbarch, readable_regcache *regcache,
07b287a0
MS
2798 int regnum)
2799{
08106042 2800 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
317c3ed9 2801 struct value *result_value = value::allocate (register_type (gdbarch, regnum));
07b287a0 2802
6f9c9d71 2803 result_value->set_lval (lval_register);
07b287a0 2804 VALUE_REGNUM (result_value) = regnum;
07b287a0 2805
e63ae49b
LM
2806 if (is_w_pseudo_register (gdbarch, regnum))
2807 {
2808 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2809 /* Default offset for little endian. */
2810 int offset = 0;
2811
2812 if (byte_order == BFD_ENDIAN_BIG)
2813 offset = 4;
2814
2815 /* Find the correct X register to extract the data from. */
2816 int x_regnum = AARCH64_X0_REGNUM + (regnum - tdep->w_pseudo_base);
2817 gdb_byte data[4];
2818
2819 /* Read the bottom 4 bytes of X. */
2820 if (regcache->raw_read_part (x_regnum, offset, 4, data) != REG_VALID)
d00664db 2821 result_value->mark_bytes_unavailable (0, 4);
e63ae49b 2822 else
bbe912ba 2823 memcpy (result_value->contents_raw ().data (), data, 4);
e63ae49b
LM
2824
2825 return result_value;
2826 }
2827
07b287a0
MS
2828 regnum -= gdbarch_num_regs (gdbarch);
2829
2830 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
63bad7b6
AH
2831 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2832 regnum - AARCH64_Q0_REGNUM,
3c5cd5c3 2833 Q_REGISTER_SIZE, result_value);
07b287a0
MS
2834
2835 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
63bad7b6
AH
2836 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2837 regnum - AARCH64_D0_REGNUM,
3c5cd5c3 2838 D_REGISTER_SIZE, result_value);
07b287a0
MS
2839
2840 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
63bad7b6
AH
2841 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2842 regnum - AARCH64_S0_REGNUM,
3c5cd5c3 2843 S_REGISTER_SIZE, result_value);
07b287a0
MS
2844
2845 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
63bad7b6
AH
2846 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2847 regnum - AARCH64_H0_REGNUM,
3c5cd5c3 2848 H_REGISTER_SIZE, result_value);
07b287a0
MS
2849
2850 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
63bad7b6
AH
2851 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2852 regnum - AARCH64_B0_REGNUM,
3c5cd5c3 2853 B_REGISTER_SIZE, result_value);
07b287a0 2854
63bad7b6
AH
2855 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2856 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2857 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2858 regnum - AARCH64_SVE_V0_REGNUM,
2859 V_REGISTER_SIZE, result_value);
2860
07b287a0
MS
2861 gdb_assert_not_reached ("regnum out of bound");
2862}
2863
3c5cd5c3 2864/* Helper for aarch64_pseudo_write. */
07b287a0
MS
2865
2866static void
63bad7b6
AH
2867aarch64_pseudo_write_1 (struct gdbarch *gdbarch, struct regcache *regcache,
2868 int regnum_offset, int regsize, const gdb_byte *buf)
07b287a0 2869{
3c5cd5c3 2870 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
07b287a0 2871
63bad7b6
AH
2872 /* Enough space for a full vector register. */
2873 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2874 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2875
07b287a0
MS
2876 /* Ensure the register buffer is zero, we want gdb writes of the
2877 various 'scalar' pseudo registers to behavior like architectural
2878 writes, register width bytes are written the remainder are set to
2879 zero. */
63bad7b6 2880 memset (reg_buf, 0, register_size (gdbarch, AARCH64_V0_REGNUM));
07b287a0 2881
3c5cd5c3
AH
2882 memcpy (reg_buf, buf, regsize);
2883 regcache->raw_write (v_regnum, reg_buf);
2884}
2885
2886/* Implement the "pseudo_register_write" gdbarch method. */
2887
2888static void
2889aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2890 int regnum, const gdb_byte *buf)
2891{
08106042 2892 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
e63ae49b
LM
2893
2894 if (is_w_pseudo_register (gdbarch, regnum))
2895 {
2896 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2897 /* Default offset for little endian. */
2898 int offset = 0;
2899
2900 if (byte_order == BFD_ENDIAN_BIG)
2901 offset = 4;
2902
2903 /* Find the correct X register to extract the data from. */
2904 int x_regnum = AARCH64_X0_REGNUM + (regnum - tdep->w_pseudo_base);
2905
2906 /* First zero-out the contents of X. */
2907 ULONGEST zero = 0;
2908 regcache->raw_write (x_regnum, zero);
2909 /* Write to the bottom 4 bytes of X. */
2910 regcache->raw_write_part (x_regnum, offset, 4, buf);
2911 return;
2912 }
2913
07b287a0
MS
2914 regnum -= gdbarch_num_regs (gdbarch);
2915
2916 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
63bad7b6
AH
2917 return aarch64_pseudo_write_1 (gdbarch, regcache,
2918 regnum - AARCH64_Q0_REGNUM, Q_REGISTER_SIZE,
2919 buf);
07b287a0
MS
2920
2921 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
63bad7b6
AH
2922 return aarch64_pseudo_write_1 (gdbarch, regcache,
2923 regnum - AARCH64_D0_REGNUM, D_REGISTER_SIZE,
2924 buf);
07b287a0
MS
2925
2926 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
63bad7b6
AH
2927 return aarch64_pseudo_write_1 (gdbarch, regcache,
2928 regnum - AARCH64_S0_REGNUM, S_REGISTER_SIZE,
2929 buf);
07b287a0
MS
2930
2931 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
63bad7b6
AH
2932 return aarch64_pseudo_write_1 (gdbarch, regcache,
2933 regnum - AARCH64_H0_REGNUM, H_REGISTER_SIZE,
2934 buf);
07b287a0
MS
2935
2936 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
63bad7b6
AH
2937 return aarch64_pseudo_write_1 (gdbarch, regcache,
2938 regnum - AARCH64_B0_REGNUM, B_REGISTER_SIZE,
2939 buf);
2940
2941 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2942 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2943 return aarch64_pseudo_write_1 (gdbarch, regcache,
2944 regnum - AARCH64_SVE_V0_REGNUM,
2945 V_REGISTER_SIZE, buf);
07b287a0
MS
2946
2947 gdb_assert_not_reached ("regnum out of bound");
2948}
2949
07b287a0
MS
2950/* Callback function for user_reg_add. */
2951
2952static struct value *
bd2b40ac 2953value_of_aarch64_user_reg (frame_info_ptr frame, const void *baton)
07b287a0 2954{
9a3c8263 2955 const int *reg_p = (const int *) baton;
07b287a0
MS
2956
2957 return value_of_register (*reg_p, frame);
2958}
2959\f
2960
9404b58f
KM
2961/* Implement the "software_single_step" gdbarch method, needed to
2962 single step through atomic sequences on AArch64. */
2963
a0ff9e1a 2964static std::vector<CORE_ADDR>
f5ea389a 2965aarch64_software_single_step (struct regcache *regcache)
9404b58f 2966{
ac7936df 2967 struct gdbarch *gdbarch = regcache->arch ();
9404b58f
KM
2968 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2969 const int insn_size = 4;
2970 const int atomic_sequence_length = 16; /* Instruction sequence length. */
0187a92f 2971 CORE_ADDR pc = regcache_read_pc (regcache);
70ab8ccd 2972 CORE_ADDR breaks[2] = { CORE_ADDR_MAX, CORE_ADDR_MAX };
9404b58f
KM
2973 CORE_ADDR loc = pc;
2974 CORE_ADDR closing_insn = 0;
94355de7
LM
2975
2976 ULONGEST insn_from_memory;
2977 if (!safe_read_memory_unsigned_integer (loc, insn_size,
2978 byte_order_for_code,
2979 &insn_from_memory))
2980 {
2981 /* Assume we don't have a atomic sequence, as we couldn't read the
2982 instruction in this location. */
2983 return {};
2984 }
2985
2986 uint32_t insn = insn_from_memory;
9404b58f
KM
2987 int index;
2988 int insn_count;
2989 int bc_insn_count = 0; /* Conditional branch instruction count. */
2990 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
f77ee802
YQ
2991 aarch64_inst inst;
2992
561a72d4 2993 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
a0ff9e1a 2994 return {};
9404b58f
KM
2995
2996 /* Look for a Load Exclusive instruction which begins the sequence. */
f77ee802 2997 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
a0ff9e1a 2998 return {};
9404b58f
KM
2999
3000 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
3001 {
9404b58f 3002 loc += insn_size;
9404b58f 3003
94355de7
LM
3004 if (!safe_read_memory_unsigned_integer (loc, insn_size,
3005 byte_order_for_code,
3006 &insn_from_memory))
3007 {
3008 /* Assume we don't have a atomic sequence, as we couldn't read the
3009 instruction in this location. */
3010 return {};
3011 }
3012
3013 insn = insn_from_memory;
561a72d4 3014 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
a0ff9e1a 3015 return {};
9404b58f 3016 /* Check if the instruction is a conditional branch. */
f77ee802 3017 if (inst.opcode->iclass == condbranch)
9404b58f 3018 {
f77ee802
YQ
3019 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
3020
9404b58f 3021 if (bc_insn_count >= 1)
a0ff9e1a 3022 return {};
9404b58f
KM
3023
3024 /* It is, so we'll try to set a breakpoint at the destination. */
f77ee802 3025 breaks[1] = loc + inst.operands[0].imm.value;
9404b58f
KM
3026
3027 bc_insn_count++;
3028 last_breakpoint++;
3029 }
3030
3031 /* Look for the Store Exclusive which closes the atomic sequence. */
f77ee802 3032 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
9404b58f
KM
3033 {
3034 closing_insn = loc;
3035 break;
3036 }
3037 }
3038
3039 /* We didn't find a closing Store Exclusive instruction, fall back. */
3040 if (!closing_insn)
a0ff9e1a 3041 return {};
9404b58f
KM
3042
3043 /* Insert breakpoint after the end of the atomic sequence. */
3044 breaks[0] = loc + insn_size;
3045
3046 /* Check for duplicated breakpoints, and also check that the second
3047 breakpoint is not within the atomic sequence. */
3048 if (last_breakpoint
3049 && (breaks[1] == breaks[0]
3050 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
3051 last_breakpoint = 0;
3052
a0ff9e1a
SM
3053 std::vector<CORE_ADDR> next_pcs;
3054
9404b58f
KM
3055 /* Insert the breakpoint at the end of the sequence, and one at the
3056 destination of the conditional branch, if it exists. */
3057 for (index = 0; index <= last_breakpoint; index++)
a0ff9e1a 3058 next_pcs.push_back (breaks[index]);
9404b58f 3059
93f9a11f 3060 return next_pcs;
9404b58f
KM
3061}
3062
1152d984
SM
3063struct aarch64_displaced_step_copy_insn_closure
3064 : public displaced_step_copy_insn_closure
b6542f81
YQ
3065{
3066 /* It is true when condition instruction, such as B.CON, TBZ, etc,
3067 is being displaced stepping. */
f0c702d4 3068 bool cond = false;
b6542f81 3069
0c271889
LM
3070 /* PC adjustment offset after displaced stepping. If 0, then we don't
3071 write the PC back, assuming the PC is already the right address. */
cfba9872 3072 int32_t pc_adjust = 0;
b6542f81
YQ
3073};
3074
3075/* Data when visiting instructions for displaced stepping. */
3076
3077struct aarch64_displaced_step_data
3078{
3079 struct aarch64_insn_data base;
3080
3081 /* The address where the instruction will be executed at. */
3082 CORE_ADDR new_addr;
3083 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
e935475c 3084 uint32_t insn_buf[AARCH64_DISPLACED_MODIFIED_INSNS];
b6542f81
YQ
3085 /* Number of instructions in INSN_BUF. */
3086 unsigned insn_count;
3087 /* Registers when doing displaced stepping. */
3088 struct regcache *regs;
3089
1152d984 3090 aarch64_displaced_step_copy_insn_closure *dsc;
b6542f81
YQ
3091};
3092
3093/* Implementation of aarch64_insn_visitor method "b". */
3094
3095static void
3096aarch64_displaced_step_b (const int is_bl, const int32_t offset,
3097 struct aarch64_insn_data *data)
3098{
3099 struct aarch64_displaced_step_data *dsd
3100 = (struct aarch64_displaced_step_data *) data;
2ac09a5b 3101 int64_t new_offset = data->insn_addr - dsd->new_addr + offset;
b6542f81
YQ
3102
3103 if (can_encode_int32 (new_offset, 28))
3104 {
3105 /* Emit B rather than BL, because executing BL on a new address
3106 will get the wrong address into LR. In order to avoid this,
3107 we emit B, and update LR if the instruction is BL. */
3108 emit_b (dsd->insn_buf, 0, new_offset);
3109 dsd->insn_count++;
3110 }
3111 else
3112 {
3113 /* Write NOP. */
3114 emit_nop (dsd->insn_buf);
3115 dsd->insn_count++;
3116 dsd->dsc->pc_adjust = offset;
3117 }
3118
3119 if (is_bl)
3120 {
3121 /* Update LR. */
3122 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
3123 data->insn_addr + 4);
3124 }
3125}
3126
3127/* Implementation of aarch64_insn_visitor method "b_cond". */
3128
3129static void
3130aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
3131 struct aarch64_insn_data *data)
3132{
3133 struct aarch64_displaced_step_data *dsd
3134 = (struct aarch64_displaced_step_data *) data;
b6542f81
YQ
3135
3136 /* GDB has to fix up PC after displaced step this instruction
3137 differently according to the condition is true or false. Instead
3138 of checking COND against conditional flags, we can use
3139 the following instructions, and GDB can tell how to fix up PC
3140 according to the PC value.
3141
3142 B.COND TAKEN ; If cond is true, then jump to TAKEN.
3143 INSN1 ;
3144 TAKEN:
3145 INSN2
3146 */
3147
3148 emit_bcond (dsd->insn_buf, cond, 8);
f0c702d4 3149 dsd->dsc->cond = true;
b6542f81
YQ
3150 dsd->dsc->pc_adjust = offset;
3151 dsd->insn_count = 1;
3152}
3153
3154/* Dynamically allocate a new register. If we know the register
3155 statically, we should make it a global as above instead of using this
3156 helper function. */
3157
3158static struct aarch64_register
3159aarch64_register (unsigned num, int is64)
3160{
3161 return (struct aarch64_register) { num, is64 };
3162}
3163
3164/* Implementation of aarch64_insn_visitor method "cb". */
3165
3166static void
3167aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
3168 const unsigned rn, int is64,
3169 struct aarch64_insn_data *data)
3170{
3171 struct aarch64_displaced_step_data *dsd
3172 = (struct aarch64_displaced_step_data *) data;
b6542f81
YQ
3173
3174 /* The offset is out of range for a compare and branch
3175 instruction. We can use the following instructions instead:
3176
3177 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
3178 INSN1 ;
3179 TAKEN:
3180 INSN2
3181 */
3182 emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
3183 dsd->insn_count = 1;
f0c702d4 3184 dsd->dsc->cond = true;
b6542f81
YQ
3185 dsd->dsc->pc_adjust = offset;
3186}
3187
3188/* Implementation of aarch64_insn_visitor method "tb". */
3189
3190static void
3191aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
3192 const unsigned rt, unsigned bit,
3193 struct aarch64_insn_data *data)
3194{
3195 struct aarch64_displaced_step_data *dsd
3196 = (struct aarch64_displaced_step_data *) data;
b6542f81
YQ
3197
3198 /* The offset is out of range for a test bit and branch
3199 instruction We can use the following instructions instead:
3200
3201 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
3202 INSN1 ;
3203 TAKEN:
3204 INSN2
3205
3206 */
3207 emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
3208 dsd->insn_count = 1;
f0c702d4 3209 dsd->dsc->cond = true;
b6542f81
YQ
3210 dsd->dsc->pc_adjust = offset;
3211}
3212
3213/* Implementation of aarch64_insn_visitor method "adr". */
3214
3215static void
3216aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
3217 const int is_adrp, struct aarch64_insn_data *data)
3218{
3219 struct aarch64_displaced_step_data *dsd
3220 = (struct aarch64_displaced_step_data *) data;
3221 /* We know exactly the address the ADR{P,} instruction will compute.
3222 We can just write it to the destination register. */
3223 CORE_ADDR address = data->insn_addr + offset;
3224
3225 if (is_adrp)
3226 {
3227 /* Clear the lower 12 bits of the offset to get the 4K page. */
3228 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
3229 address & ~0xfff);
3230 }
3231 else
3232 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
3233 address);
3234
3235 dsd->dsc->pc_adjust = 4;
3236 emit_nop (dsd->insn_buf);
3237 dsd->insn_count = 1;
3238}
3239
3240/* Implementation of aarch64_insn_visitor method "ldr_literal". */
3241
3242static void
3243aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
3244 const unsigned rt, const int is64,
3245 struct aarch64_insn_data *data)
3246{
3247 struct aarch64_displaced_step_data *dsd
3248 = (struct aarch64_displaced_step_data *) data;
3249 CORE_ADDR address = data->insn_addr + offset;
3250 struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
3251
3252 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
3253 address);
3254
3255 if (is_sw)
3256 dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
3257 aarch64_register (rt, 1), zero);
3258 else
3259 dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
3260 aarch64_register (rt, 1), zero);
3261
3262 dsd->dsc->pc_adjust = 4;
3263}
3264
3265/* Implementation of aarch64_insn_visitor method "others". */
3266
3267static void
3268aarch64_displaced_step_others (const uint32_t insn,
3269 struct aarch64_insn_data *data)
3270{
3271 struct aarch64_displaced_step_data *dsd
3272 = (struct aarch64_displaced_step_data *) data;
3273
807f647c
MM
3274 uint32_t masked_insn = (insn & CLEAR_Rn_MASK);
3275 if (masked_insn == BLR)
b6542f81 3276 {
807f647c
MM
3277 /* Emit a BR to the same register and then update LR to the original
3278 address (similar to aarch64_displaced_step_b). */
3279 aarch64_emit_insn (dsd->insn_buf, insn & 0xffdfffff);
3280 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
3281 data->insn_addr + 4);
b6542f81 3282 }
807f647c
MM
3283 else
3284 aarch64_emit_insn (dsd->insn_buf, insn);
3285 dsd->insn_count = 1;
3286
3287 if (masked_insn == RET || masked_insn == BR || masked_insn == BLR)
3288 dsd->dsc->pc_adjust = 0;
b6542f81
YQ
3289 else
3290 dsd->dsc->pc_adjust = 4;
3291}
3292
3293static const struct aarch64_insn_visitor visitor =
3294{
3295 aarch64_displaced_step_b,
3296 aarch64_displaced_step_b_cond,
3297 aarch64_displaced_step_cb,
3298 aarch64_displaced_step_tb,
3299 aarch64_displaced_step_adr,
3300 aarch64_displaced_step_ldr_literal,
3301 aarch64_displaced_step_others,
3302};
3303
3304/* Implement the "displaced_step_copy_insn" gdbarch method. */
3305
1152d984 3306displaced_step_copy_insn_closure_up
b6542f81
YQ
3307aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
3308 CORE_ADDR from, CORE_ADDR to,
3309 struct regcache *regs)
3310{
b6542f81 3311 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
b6542f81 3312 struct aarch64_displaced_step_data dsd;
c86a40c6 3313 aarch64_inst inst;
94355de7
LM
3314 ULONGEST insn_from_memory;
3315
3316 if (!safe_read_memory_unsigned_integer (from, 4, byte_order_for_code,
3317 &insn_from_memory))
3318 return nullptr;
3319
3320 uint32_t insn = insn_from_memory;
c86a40c6 3321
561a72d4 3322 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
c86a40c6 3323 return NULL;
b6542f81
YQ
3324
3325 /* Look for a Load Exclusive instruction which begins the sequence. */
c86a40c6 3326 if (inst.opcode->iclass == ldstexcl && bit (insn, 22))
b6542f81
YQ
3327 {
3328 /* We can't displaced step atomic sequences. */
3329 return NULL;
3330 }
3331
1152d984
SM
3332 std::unique_ptr<aarch64_displaced_step_copy_insn_closure> dsc
3333 (new aarch64_displaced_step_copy_insn_closure);
b6542f81
YQ
3334 dsd.base.insn_addr = from;
3335 dsd.new_addr = to;
3336 dsd.regs = regs;
cfba9872 3337 dsd.dsc = dsc.get ();
034f1a81 3338 dsd.insn_count = 0;
b6542f81
YQ
3339 aarch64_relocate_instruction (insn, &visitor,
3340 (struct aarch64_insn_data *) &dsd);
e935475c 3341 gdb_assert (dsd.insn_count <= AARCH64_DISPLACED_MODIFIED_INSNS);
b6542f81
YQ
3342
3343 if (dsd.insn_count != 0)
3344 {
3345 int i;
3346
3347 /* Instruction can be relocated to scratch pad. Copy
3348 relocated instruction(s) there. */
3349 for (i = 0; i < dsd.insn_count; i++)
3350 {
136821d9
SM
3351 displaced_debug_printf ("writing insn %.8x at %s",
3352 dsd.insn_buf[i],
3353 paddress (gdbarch, to + i * 4));
3354
b6542f81
YQ
3355 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
3356 (ULONGEST) dsd.insn_buf[i]);
3357 }
3358 }
3359 else
3360 {
b6542f81
YQ
3361 dsc = NULL;
3362 }
3363
6d0cf446 3364 /* This is a work around for a problem with g++ 4.8. */
1152d984 3365 return displaced_step_copy_insn_closure_up (dsc.release ());
b6542f81
YQ
3366}
3367
3368/* Implement the "displaced_step_fixup" gdbarch method. */
3369
3370void
3371aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
1152d984 3372 struct displaced_step_copy_insn_closure *dsc_,
b6542f81 3373 CORE_ADDR from, CORE_ADDR to,
cf141dd8 3374 struct regcache *regs, bool completed_p)
b6542f81 3375{
cf141dd8 3376 CORE_ADDR pc = regcache_read_pc (regs);
cfba9872 3377
cf141dd8
AB
3378 /* If the displaced instruction didn't complete successfully then all we
3379 need to do is restore the program counter. */
3380 if (!completed_p)
3381 {
3382 pc = from + (pc - to);
3383 regcache_write_pc (regs, pc);
3384 return;
3385 }
0c271889 3386
cf141dd8
AB
3387 aarch64_displaced_step_copy_insn_closure *dsc
3388 = (aarch64_displaced_step_copy_insn_closure *) dsc_;
0c271889 3389
136821d9
SM
3390 displaced_debug_printf ("PC after stepping: %s (was %s).",
3391 paddress (gdbarch, pc), paddress (gdbarch, to));
1ab139e5 3392
b6542f81
YQ
3393 if (dsc->cond)
3394 {
136821d9
SM
3395 displaced_debug_printf ("[Conditional] pc_adjust before: %d",
3396 dsc->pc_adjust);
1ab139e5 3397
b6542f81
YQ
3398 if (pc - to == 8)
3399 {
3400 /* Condition is true. */
3401 }
3402 else if (pc - to == 4)
3403 {
3404 /* Condition is false. */
3405 dsc->pc_adjust = 4;
3406 }
3407 else
3408 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
1ab139e5 3409
136821d9
SM
3410 displaced_debug_printf ("[Conditional] pc_adjust after: %d",
3411 dsc->pc_adjust);
b6542f81
YQ
3412 }
3413
136821d9
SM
3414 displaced_debug_printf ("%s PC by %d",
3415 dsc->pc_adjust ? "adjusting" : "not adjusting",
3416 dsc->pc_adjust);
1ab139e5 3417
b6542f81
YQ
3418 if (dsc->pc_adjust != 0)
3419 {
0c271889
LM
3420 /* Make sure the previous instruction was executed (that is, the PC
3421 has changed). If the PC didn't change, then discard the adjustment
3422 offset. Otherwise we may skip an instruction before its execution
3423 took place. */
3424 if ((pc - to) == 0)
1ab139e5 3425 {
136821d9 3426 displaced_debug_printf ("PC did not move. Discarding PC adjustment.");
1ab139e5
LM
3427 dsc->pc_adjust = 0;
3428 }
0c271889 3429
136821d9
SM
3430 displaced_debug_printf ("fixup: set PC to %s:%d",
3431 paddress (gdbarch, from), dsc->pc_adjust);
3432
b6542f81
YQ
3433 regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
3434 from + dsc->pc_adjust);
3435 }
3436}
3437
3438/* Implement the "displaced_step_hw_singlestep" gdbarch method. */
3439
07fbbd01 3440bool
40a53766 3441aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch)
b6542f81 3442{
07fbbd01 3443 return true;
b6542f81
YQ
3444}
3445
95228a0d
AH
3446/* Get the correct target description for the given VQ value.
3447 If VQ is zero then it is assumed SVE is not supported.
c1bd443b
LM
3448 (It is not possible to set VQ to zero on an SVE system).
3449
414d5848
JB
3450 MTE_P indicates the presence of the Memory Tagging Extension feature.
3451
3452 TLS_P indicates the presence of the Thread Local Storage feature. */
da434ccb
AH
3453
3454const target_desc *
0ee6b1c5 3455aarch64_read_description (const aarch64_features &features)
da434ccb 3456{
0ee6b1c5
JB
3457 if (features.vq > AARCH64_MAX_SVE_VQ)
3458 error (_("VQ is %" PRIu64 ", maximum supported value is %d"), features.vq,
95228a0d
AH
3459 AARCH64_MAX_SVE_VQ);
3460
0ee6b1c5 3461 struct target_desc *tdesc = tdesc_aarch64_map[features];
da434ccb 3462
95228a0d
AH
3463 if (tdesc == NULL)
3464 {
0ee6b1c5
JB
3465 tdesc = aarch64_create_target_description (features);
3466 tdesc_aarch64_map[features] = tdesc;
95228a0d 3467 }
da434ccb 3468
95228a0d 3469 return tdesc;
da434ccb
AH
3470}
3471
ba2d2bb2
AH
3472/* Return the VQ used when creating the target description TDESC. */
3473
1332a140 3474static uint64_t
ba2d2bb2
AH
3475aarch64_get_tdesc_vq (const struct target_desc *tdesc)
3476{
3477 const struct tdesc_feature *feature_sve;
3478
3479 if (!tdesc_has_registers (tdesc))
3480 return 0;
3481
3482 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
3483
3484 if (feature_sve == nullptr)
3485 return 0;
3486
12863263
AH
3487 uint64_t vl = tdesc_register_bitsize (feature_sve,
3488 aarch64_sve_register_names[0]) / 8;
ba2d2bb2
AH
3489 return sve_vq_from_vl (vl);
3490}
3491
4f3681cc
TJB
3492/* Get the AArch64 features present in the given target description. */
3493
3494aarch64_features
3495aarch64_features_from_target_desc (const struct target_desc *tdesc)
3496{
3497 aarch64_features features;
3498
3499 if (tdesc == nullptr)
3500 return features;
3501
3502 features.vq = aarch64_get_tdesc_vq (tdesc);
acdf6071
LM
3503
3504 /* We need to look for a couple pauth feature name variations. */
4f3681cc
TJB
3505 features.pauth
3506 = (tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.pauth") != nullptr);
acdf6071
LM
3507
3508 if (!features.pauth)
3509 features.pauth = (tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.pauth_v2")
3510 != nullptr);
3511
4f3681cc
TJB
3512 features.mte
3513 = (tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.mte") != nullptr);
ba60b963
LM
3514
3515 const struct tdesc_feature *tls_feature
3516 = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.tls");
3517
3518 if (tls_feature != nullptr)
3519 {
3520 /* We have TLS registers. Find out how many. */
3521 if (tdesc_unnumbered_register (tls_feature, "tpidr2"))
3522 features.tls = 2;
3523 else
3524 features.tls = 1;
3525 }
4f3681cc
TJB
3526
3527 return features;
3528}
3529
76bed0fd
AH
3530/* Implement the "cannot_store_register" gdbarch method. */
3531
3532static int
3533aarch64_cannot_store_register (struct gdbarch *gdbarch, int regnum)
3534{
08106042 3535 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
76bed0fd
AH
3536
3537 if (!tdep->has_pauth ())
3538 return 0;
3539
3540 /* Pointer authentication registers are read-only. */
6d002087
LM
3541 return (regnum >= tdep->pauth_reg_base
3542 && regnum < tdep->pauth_reg_base + tdep->pauth_reg_count);
76bed0fd
AH
3543}
3544
da729c5c
TT
3545/* Implement the stack_frame_destroyed_p gdbarch method. */
3546
3547static int
3548aarch64_stack_frame_destroyed_p (struct gdbarch *gdbarch, CORE_ADDR pc)
3549{
3550 CORE_ADDR func_start, func_end;
3551 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
3552 return 0;
3553
3554 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
94355de7
LM
3555
3556 ULONGEST insn_from_memory;
3557 if (!safe_read_memory_unsigned_integer (pc, 4, byte_order_for_code,
3558 &insn_from_memory))
3559 return 0;
3560
3561 uint32_t insn = insn_from_memory;
da729c5c
TT
3562
3563 aarch64_inst inst;
3564 if (aarch64_decode_insn (insn, &inst, 1, nullptr) != 0)
3565 return 0;
3566
3567 return streq (inst.opcode->name, "ret");
3568}
3569
6d002087
LM
3570/* AArch64 implementation of the remove_non_address_bits gdbarch hook. Remove
3571 non address bits from a pointer value. */
3572
3573static CORE_ADDR
3574aarch64_remove_non_address_bits (struct gdbarch *gdbarch, CORE_ADDR pointer)
3575{
6d002087 3576 /* By default, we assume TBI and discard the top 8 bits plus the VA range
ef139898
LM
3577 select bit (55). Below we try to fetch information about pointer
3578 authentication masks in order to make non-address removal more
3579 precise. */
6d002087
LM
3580 CORE_ADDR mask = AARCH64_TOP_BITS_MASK;
3581
ef139898
LM
3582 /* Check if we have an inferior first. If not, just use the default
3583 mask.
6d002087 3584
ef139898
LM
3585 We use the inferior_ptid here because the pointer authentication masks
3586 should be the same across threads of a process. Since we may not have
3587 access to the current thread (gdb may have switched to no inferiors
3588 momentarily), we use the inferior ptid. */
3589 if (inferior_ptid != null_ptid)
3590 {
3591 /* If we do have an inferior, attempt to fetch its thread's thread_info
3592 struct. */
9213a6d7 3593 thread_info *thread = current_inferior ()->find_thread (inferior_ptid);
6d002087 3594
ef139898
LM
3595 /* If the thread is running, we will not be able to fetch the mask
3596 registers. */
3597 if (thread != nullptr && thread->state != THREAD_RUNNING)
6d002087 3598 {
ef139898
LM
3599 /* Otherwise, fetch the register cache and the masks. */
3600 struct regcache *regs
3601 = get_thread_regcache (current_inferior ()->process_target (),
3602 inferior_ptid);
3603
3604 /* Use the gdbarch from the register cache to check for pointer
3605 authentication support, as it matches the features found in
3606 that particular thread. */
3607 aarch64_gdbarch_tdep *tdep
3608 = gdbarch_tdep<aarch64_gdbarch_tdep> (regs->arch ());
6d002087 3609
ef139898
LM
3610 /* Is there pointer authentication support? */
3611 if (tdep->has_pauth ())
3612 {
3613 CORE_ADDR cmask, dmask;
3614 int dmask_regnum
3615 = AARCH64_PAUTH_DMASK_REGNUM (tdep->pauth_reg_base);
3616 int cmask_regnum
3617 = AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base);
3618
3619 /* If we have a kernel address and we have kernel-mode address
3620 mask registers, use those instead. */
3621 if (tdep->pauth_reg_count > 2
3622 && pointer & VA_RANGE_SELECT_BIT_MASK)
3623 {
3624 dmask_regnum
3625 = AARCH64_PAUTH_DMASK_HIGH_REGNUM (tdep->pauth_reg_base);
3626 cmask_regnum
3627 = AARCH64_PAUTH_CMASK_HIGH_REGNUM (tdep->pauth_reg_base);
3628 }
3629
3630 /* We have both a code mask and a data mask. For now they are
3631 the same, but this may change in the future. */
3632 if (regs->cooked_read (dmask_regnum, &dmask) != REG_VALID)
3633 dmask = mask;
3634
3635 if (regs->cooked_read (cmask_regnum, &cmask) != REG_VALID)
3636 cmask = mask;
3637
3638 mask |= aarch64_mask_from_pac_registers (cmask, dmask);
3639 }
3640 }
6d002087
LM
3641 }
3642
3643 return aarch64_remove_top_bits (pointer, mask);
3644}
3645
07b287a0
MS
3646/* Initialize the current architecture based on INFO. If possible,
3647 re-use an architecture from ARCHES, which is a list of
3648 architectures already created during this debugging session.
3649
3650 Called e.g. at program startup, when reading a core file, and when
3651 reading a binary file. */
3652
3653static struct gdbarch *
3654aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
3655{
ccb8d7e8 3656 const struct tdesc_feature *feature_core, *feature_fpu, *feature_sve;
76bed0fd 3657 const struct tdesc_feature *feature_pauth;
ccb8d7e8
AH
3658 bool valid_p = true;
3659 int i, num_regs = 0, num_pseudo_regs = 0;
c9cd8ca4 3660 int first_pauth_regnum = -1, ra_sign_state_offset = -1;
ba60b963 3661 int first_mte_regnum = -1, first_tls_regnum = -1;
4f3681cc 3662 uint64_t vq = aarch64_get_tdesc_vq (info.target_desc);
4da037ef
AH
3663
3664 if (vq > AARCH64_MAX_SVE_VQ)
f34652de 3665 internal_error (_("VQ out of bounds: %s (max %d)"),
596179f7 3666 pulongest (vq), AARCH64_MAX_SVE_VQ);
4da037ef 3667
ccb8d7e8
AH
3668 /* If there is already a candidate, use it. */
3669 for (gdbarch_list *best_arch = gdbarch_list_lookup_by_info (arches, &info);
3670 best_arch != nullptr;
3671 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
3672 {
345bd07c 3673 aarch64_gdbarch_tdep *tdep
08106042 3674 = gdbarch_tdep<aarch64_gdbarch_tdep> (best_arch->gdbarch);
4da037ef 3675 if (tdep && tdep->vq == vq)
ccb8d7e8
AH
3676 return best_arch->gdbarch;
3677 }
07b287a0 3678
4da037ef
AH
3679 /* Ensure we always have a target descriptor, and that it is for the given VQ
3680 value. */
ccb8d7e8 3681 const struct target_desc *tdesc = info.target_desc;
4f3681cc
TJB
3682 if (!tdesc_has_registers (tdesc))
3683 tdesc = aarch64_read_description ({});
07b287a0
MS
3684 gdb_assert (tdesc);
3685
ccb8d7e8 3686 feature_core = tdesc_find_feature (tdesc,"org.gnu.gdb.aarch64.core");
ba2d2bb2
AH
3687 feature_fpu = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
3688 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
5e984dbf
LM
3689 const struct tdesc_feature *feature_mte
3690 = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.mte");
414d5848
JB
3691 const struct tdesc_feature *feature_tls
3692 = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.tls");
07b287a0 3693
ccb8d7e8
AH
3694 if (feature_core == nullptr)
3695 return nullptr;
07b287a0 3696
c1e1314d 3697 tdesc_arch_data_up tdesc_data = tdesc_data_alloc ();
07b287a0 3698
ba2d2bb2 3699 /* Validate the description provides the mandatory core R registers
07b287a0
MS
3700 and allocate their numbers. */
3701 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
c1e1314d 3702 valid_p &= tdesc_numbered_register (feature_core, tdesc_data.get (),
ba2d2bb2
AH
3703 AARCH64_X0_REGNUM + i,
3704 aarch64_r_register_names[i]);
07b287a0
MS
3705
3706 num_regs = AARCH64_X0_REGNUM + i;
3707
ba2d2bb2 3708 /* Add the V registers. */
ccb8d7e8 3709 if (feature_fpu != nullptr)
07b287a0 3710 {
ccb8d7e8 3711 if (feature_sve != nullptr)
ba2d2bb2
AH
3712 error (_("Program contains both fpu and SVE features."));
3713
3714 /* Validate the description provides the mandatory V registers
3715 and allocate their numbers. */
07b287a0 3716 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
c1e1314d 3717 valid_p &= tdesc_numbered_register (feature_fpu, tdesc_data.get (),
ba2d2bb2
AH
3718 AARCH64_V0_REGNUM + i,
3719 aarch64_v_register_names[i]);
07b287a0
MS
3720
3721 num_regs = AARCH64_V0_REGNUM + i;
ba2d2bb2 3722 }
07b287a0 3723
ba2d2bb2 3724 /* Add the SVE registers. */
ccb8d7e8 3725 if (feature_sve != nullptr)
ba2d2bb2
AH
3726 {
3727 /* Validate the description provides the mandatory SVE registers
3728 and allocate their numbers. */
3729 for (i = 0; i < ARRAY_SIZE (aarch64_sve_register_names); i++)
c1e1314d 3730 valid_p &= tdesc_numbered_register (feature_sve, tdesc_data.get (),
ba2d2bb2
AH
3731 AARCH64_SVE_Z0_REGNUM + i,
3732 aarch64_sve_register_names[i]);
3733
3734 num_regs = AARCH64_SVE_Z0_REGNUM + i;
3735 num_pseudo_regs += 32; /* add the Vn register pseudos. */
3736 }
3737
ccb8d7e8 3738 if (feature_fpu != nullptr || feature_sve != nullptr)
ba2d2bb2 3739 {
07b287a0
MS
3740 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
3741 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
3742 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
3743 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
3744 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
3745 }
3746
414d5848 3747 /* Add the TLS register. */
ba60b963 3748 int tls_register_count = 0;
414d5848
JB
3749 if (feature_tls != nullptr)
3750 {
ba60b963 3751 first_tls_regnum = num_regs;
414d5848 3752
ba60b963
LM
3753 /* Look for the TLS registers. tpidr is required, but tpidr2 is
3754 optional. */
3755 valid_p
3756 = tdesc_numbered_register (feature_tls, tdesc_data.get (),
3757 first_tls_regnum, "tpidr");
3758
3759 if (valid_p)
3760 {
3761 tls_register_count++;
3762
3763 bool has_tpidr2
3764 = tdesc_numbered_register (feature_tls, tdesc_data.get (),
3765 first_tls_regnum + tls_register_count,
3766 "tpidr2");
3767
3768 /* Figure out how many TLS registers we have. */
3769 if (has_tpidr2)
3770 tls_register_count++;
3771
3772 num_regs += tls_register_count;
3773 }
3774 else
3775 {
3776 warning (_("Provided TLS register feature doesn't contain "
3777 "required tpidr register."));
3778 return nullptr;
3779 }
414d5848
JB
3780 }
3781
acdf6071
LM
3782 /* We have two versions of the pauth target description due to a past bug
3783 where GDB would crash when seeing the first version of the pauth target
3784 description. */
3785 feature_pauth = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.pauth");
3786 if (feature_pauth == nullptr)
3787 feature_pauth = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.pauth_v2");
3788
76bed0fd 3789 /* Add the pauth registers. */
6d002087 3790 int pauth_masks = 0;
76bed0fd
AH
3791 if (feature_pauth != NULL)
3792 {
3793 first_pauth_regnum = num_regs;
c9cd8ca4 3794 ra_sign_state_offset = num_pseudo_regs;
6d002087
LM
3795
3796 /* Size of the expected register set with all 4 masks. */
3797 int set_size = ARRAY_SIZE (aarch64_pauth_register_names);
3798
3799 /* QEMU exposes a couple additional masks for the high half of the
3800 address. We should either have 2 registers or 4 registers. */
3801 if (tdesc_unnumbered_register (feature_pauth,
3802 "pauth_dmask_high") == 0)
3803 {
3804 /* We did not find pauth_dmask_high, assume we only have
3805 2 masks. We are not dealing with QEMU/Emulators then. */
3806 set_size -= 2;
3807 }
3808
76bed0fd
AH
3809 /* Validate the descriptor provides the mandatory PAUTH registers and
3810 allocate their numbers. */
6d002087 3811 for (i = 0; i < set_size; i++)
c1e1314d 3812 valid_p &= tdesc_numbered_register (feature_pauth, tdesc_data.get (),
76bed0fd
AH
3813 first_pauth_regnum + i,
3814 aarch64_pauth_register_names[i]);
3815
3816 num_regs += i;
34dcc7cf 3817 num_pseudo_regs += 1; /* Count RA_STATE pseudo register. */
6d002087 3818 pauth_masks = set_size;
76bed0fd
AH
3819 }
3820
5e984dbf
LM
3821 /* Add the MTE registers. */
3822 if (feature_mte != NULL)
3823 {
3824 first_mte_regnum = num_regs;
3825 /* Validate the descriptor provides the mandatory MTE registers and
3826 allocate their numbers. */
3827 for (i = 0; i < ARRAY_SIZE (aarch64_mte_register_names); i++)
3828 valid_p &= tdesc_numbered_register (feature_mte, tdesc_data.get (),
3829 first_mte_regnum + i,
3830 aarch64_mte_register_names[i]);
3831
3832 num_regs += i;
3833 }
e63ae49b
LM
3834 /* W pseudo-registers */
3835 int first_w_regnum = num_pseudo_regs;
3836 num_pseudo_regs += 31;
5e984dbf 3837
07b287a0 3838 if (!valid_p)
c1e1314d 3839 return nullptr;
07b287a0
MS
3840
3841 /* AArch64 code is always little-endian. */
3842 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
3843
2b16913c
SM
3844 gdbarch *gdbarch
3845 = gdbarch_alloc (&info, gdbarch_tdep_up (new aarch64_gdbarch_tdep));
3846 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
07b287a0
MS
3847
3848 /* This should be low enough for everything. */
3849 tdep->lowest_pc = 0x20;
3850 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
3851 tdep->jb_elt_size = 8;
4da037ef 3852 tdep->vq = vq;
76bed0fd 3853 tdep->pauth_reg_base = first_pauth_regnum;
6d002087 3854 tdep->pauth_reg_count = pauth_masks;
1ba3a322 3855 tdep->ra_sign_state_regnum = -1;
5e984dbf 3856 tdep->mte_reg_base = first_mte_regnum;
ba60b963
LM
3857 tdep->tls_regnum_base = first_tls_regnum;
3858 tdep->tls_register_count = tls_register_count;
34dcc7cf 3859
07b287a0
MS
3860 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
3861 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
3862
07b287a0
MS
3863 /* Advance PC across function entry code. */
3864 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
3865
3866 /* The stack grows downward. */
3867 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
3868
3869 /* Breakpoint manipulation. */
04180708
YQ
3870 set_gdbarch_breakpoint_kind_from_pc (gdbarch,
3871 aarch64_breakpoint::kind_from_pc);
3872 set_gdbarch_sw_breakpoint_from_kind (gdbarch,
3873 aarch64_breakpoint::bp_from_kind);
07b287a0 3874 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
9404b58f 3875 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
07b287a0
MS
3876
3877 /* Information about registers, etc. */
3878 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
3879 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
3880 set_gdbarch_num_regs (gdbarch, num_regs);
3881
3882 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
3883 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
3884 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
3885 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
3886 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
3887 set_tdesc_pseudo_register_reggroup_p (gdbarch,
3888 aarch64_pseudo_register_reggroup_p);
76bed0fd 3889 set_gdbarch_cannot_store_register (gdbarch, aarch64_cannot_store_register);
07b287a0
MS
3890
3891 /* ABI */
3892 set_gdbarch_short_bit (gdbarch, 16);
3893 set_gdbarch_int_bit (gdbarch, 32);
3894 set_gdbarch_float_bit (gdbarch, 32);
3895 set_gdbarch_double_bit (gdbarch, 64);
3896 set_gdbarch_long_double_bit (gdbarch, 128);
3897 set_gdbarch_long_bit (gdbarch, 64);
3898 set_gdbarch_long_long_bit (gdbarch, 64);
3899 set_gdbarch_ptr_bit (gdbarch, 64);
3900 set_gdbarch_char_signed (gdbarch, 0);
53375380 3901 set_gdbarch_wchar_signed (gdbarch, 0);
07b287a0
MS
3902 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
3903 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
552f1157 3904 set_gdbarch_long_double_format (gdbarch, floatformats_ieee_quad);
b907456c 3905 set_gdbarch_type_align (gdbarch, aarch64_type_align);
07b287a0 3906
da729c5c
TT
3907 /* Detect whether PC is at a point where the stack has been destroyed. */
3908 set_gdbarch_stack_frame_destroyed_p (gdbarch, aarch64_stack_frame_destroyed_p);
3909
07b287a0
MS
3910 /* Internal <-> external register number maps. */
3911 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
3912
3913 /* Returning results. */
5cb0f2d5 3914 set_gdbarch_return_value_as_value (gdbarch, aarch64_return_value);
07b287a0
MS
3915
3916 /* Disassembly. */
3917 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
3918
3919 /* Virtual tables. */
3920 set_gdbarch_vbit_in_delta (gdbarch, 1);
3921
3922 /* Hook in the ABI-specific overrides, if they have been registered. */
3923 info.target_desc = tdesc;
c1e1314d 3924 info.tdesc_data = tdesc_data.get ();
07b287a0
MS
3925 gdbarch_init_osabi (info, gdbarch);
3926
3927 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
11e1b75f
AH
3928 /* Register DWARF CFA vendor handler. */
3929 set_gdbarch_execute_dwarf_cfa_vendor_op (gdbarch,
3930 aarch64_execute_dwarf_cfa_vendor_op);
07b287a0 3931
5133a315
LM
3932 /* Permanent/Program breakpoint handling. */
3933 set_gdbarch_program_breakpoint_here_p (gdbarch,
3934 aarch64_program_breakpoint_here_p);
3935
07b287a0
MS
3936 /* Add some default predicates. */
3937 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
3938 dwarf2_append_unwinders (gdbarch);
3939 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
3940
3941 frame_base_set_default (gdbarch, &aarch64_normal_base);
3942
3943 /* Now we have tuned the configuration, set a few final things,
3944 based on what the OS ABI has told us. */
3945
3946 if (tdep->jb_pc >= 0)
3947 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
3948
ea873d8e
PL
3949 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
3950
aa7ca1bb
AH
3951 set_gdbarch_get_pc_address_flags (gdbarch, aarch64_get_pc_address_flags);
3952
c1e1314d 3953 tdesc_use_registers (gdbarch, tdesc, std::move (tdesc_data));
07b287a0 3954
1ba3a322
LM
3955 /* Fetch the updated number of registers after we're done adding all
3956 entries from features we don't explicitly care about. This is the case
3957 for bare metal debugging stubs that include a lot of system registers. */
3958 num_regs = gdbarch_num_regs (gdbarch);
3959
3960 /* With the number of real registers updated, setup the pseudo-registers and
3961 record their numbers. */
3962
e63ae49b
LM
3963 /* Setup W pseudo-register numbers. */
3964 tdep->w_pseudo_base = first_w_regnum + num_regs;
3965 tdep->w_pseudo_count = 31;
3966
1ba3a322
LM
3967 /* Pointer authentication pseudo-registers. */
3968 if (tdep->has_pauth ())
3969 tdep->ra_sign_state_regnum = ra_sign_state_offset + num_regs;
3970
6d002087
LM
3971 /* Architecture hook to remove bits of a pointer that are not part of the
3972 address, like memory tags (MTE) and pointer authentication signatures. */
3973 set_gdbarch_remove_non_address_bits (gdbarch,
3974 aarch64_remove_non_address_bits);
3975
07b287a0
MS
3976 /* Add standard register aliases. */
3977 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
3978 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
3979 value_of_aarch64_user_reg,
3980 &aarch64_register_aliases[i].regnum);
3981
e8bf1ce4
JB
3982 register_aarch64_ravenscar_ops (gdbarch);
3983
07b287a0
MS
3984 return gdbarch;
3985}
3986
3987static void
3988aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
3989{
08106042 3990 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
07b287a0
MS
3991
3992 if (tdep == NULL)
3993 return;
3994
09a5d200 3995 gdb_printf (file, _("aarch64_dump_tdep: Lowest pc = 0x%s\n"),
6cb06a8c 3996 paddress (gdbarch, tdep->lowest_pc));
07b287a0
MS
3997}
3998
0d4c07af 3999#if GDB_SELF_TEST
1e2b521d
YQ
4000namespace selftests
4001{
4002static void aarch64_process_record_test (void);
4003}
0d4c07af 4004#endif
1e2b521d 4005
6c265988 4006void _initialize_aarch64_tdep ();
07b287a0 4007void
6c265988 4008_initialize_aarch64_tdep ()
07b287a0
MS
4009{
4010 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
4011 aarch64_dump_tdep);
4012
07b287a0
MS
4013 /* Debug this file's internals. */
4014 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
4015Set AArch64 debugging."), _("\
4016Show AArch64 debugging."), _("\
4017When on, AArch64 specific debugging is enabled."),
4018 NULL,
4019 show_aarch64_debug,
4020 &setdebuglist, &showdebuglist);
4d9a9006
YQ
4021
4022#if GDB_SELF_TEST
1526853e
SM
4023 selftests::register_test ("aarch64-analyze-prologue",
4024 selftests::aarch64_analyze_prologue_test);
4025 selftests::register_test ("aarch64-process-record",
4026 selftests::aarch64_process_record_test);
4d9a9006 4027#endif
07b287a0 4028}
99afc88b
OJ
4029
4030/* AArch64 process record-replay related structures, defines etc. */
4031
99afc88b 4032#define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
dda83cd7
SM
4033 do \
4034 { \
4035 unsigned int reg_len = LENGTH; \
4036 if (reg_len) \
4037 { \
4038 REGS = XNEWVEC (uint32_t, reg_len); \
4039 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
4040 } \
4041 } \
4042 while (0)
99afc88b
OJ
4043
4044#define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
dda83cd7
SM
4045 do \
4046 { \
4047 unsigned int mem_len = LENGTH; \
4048 if (mem_len) \
01add95b
SM
4049 { \
4050 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
91ddba83 4051 memcpy(MEMS, &RECORD_BUF[0], \
01add95b
SM
4052 sizeof(struct aarch64_mem_r) * LENGTH); \
4053 } \
dda83cd7
SM
4054 } \
4055 while (0)
99afc88b
OJ
4056
4057/* AArch64 record/replay structures and enumerations. */
4058
4059struct aarch64_mem_r
4060{
4061 uint64_t len; /* Record length. */
4062 uint64_t addr; /* Memory address. */
4063};
4064
4065enum aarch64_record_result
4066{
4067 AARCH64_RECORD_SUCCESS,
99afc88b
OJ
4068 AARCH64_RECORD_UNSUPPORTED,
4069 AARCH64_RECORD_UNKNOWN
4070};
4071
4748a9be 4072struct aarch64_insn_decode_record
99afc88b
OJ
4073{
4074 struct gdbarch *gdbarch;
4075 struct regcache *regcache;
4076 CORE_ADDR this_addr; /* Address of insn to be recorded. */
4077 uint32_t aarch64_insn; /* Insn to be recorded. */
4078 uint32_t mem_rec_count; /* Count of memory records. */
4079 uint32_t reg_rec_count; /* Count of register records. */
4080 uint32_t *aarch64_regs; /* Registers to be recorded. */
4081 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
4748a9be 4082};
99afc88b
OJ
4083
4084/* Record handler for data processing - register instructions. */
4085
4086static unsigned int
4748a9be 4087aarch64_record_data_proc_reg (aarch64_insn_decode_record *aarch64_insn_r)
99afc88b
OJ
4088{
4089 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
4090 uint32_t record_buf[4];
4091
4092 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
4093 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
4094 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
4095
4096 if (!bit (aarch64_insn_r->aarch64_insn, 28))
4097 {
4098 uint8_t setflags;
4099
4100 /* Logical (shifted register). */
4101 if (insn_bits24_27 == 0x0a)
4102 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
4103 /* Add/subtract. */
4104 else if (insn_bits24_27 == 0x0b)
4105 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
4106 else
4107 return AARCH64_RECORD_UNKNOWN;
4108
4109 record_buf[0] = reg_rd;
4110 aarch64_insn_r->reg_rec_count = 1;
4111 if (setflags)
4112 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
4113 }
4114 else
4115 {
4116 if (insn_bits24_27 == 0x0b)
4117 {
4118 /* Data-processing (3 source). */
4119 record_buf[0] = reg_rd;
4120 aarch64_insn_r->reg_rec_count = 1;
4121 }
4122 else if (insn_bits24_27 == 0x0a)
4123 {
4124 if (insn_bits21_23 == 0x00)
4125 {
4126 /* Add/subtract (with carry). */
4127 record_buf[0] = reg_rd;
4128 aarch64_insn_r->reg_rec_count = 1;
4129 if (bit (aarch64_insn_r->aarch64_insn, 29))
4130 {
4131 record_buf[1] = AARCH64_CPSR_REGNUM;
4132 aarch64_insn_r->reg_rec_count = 2;
4133 }
4134 }
4135 else if (insn_bits21_23 == 0x02)
4136 {
4137 /* Conditional compare (register) and conditional compare
4138 (immediate) instructions. */
4139 record_buf[0] = AARCH64_CPSR_REGNUM;
4140 aarch64_insn_r->reg_rec_count = 1;
4141 }
4142 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
4143 {
85102364 4144 /* Conditional select. */
99afc88b
OJ
4145 /* Data-processing (2 source). */
4146 /* Data-processing (1 source). */
4147 record_buf[0] = reg_rd;
4148 aarch64_insn_r->reg_rec_count = 1;
4149 }
4150 else
4151 return AARCH64_RECORD_UNKNOWN;
4152 }
4153 }
4154
4155 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4156 record_buf);
4157 return AARCH64_RECORD_SUCCESS;
4158}
4159
4160/* Record handler for data processing - immediate instructions. */
4161
4162static unsigned int
4748a9be 4163aarch64_record_data_proc_imm (aarch64_insn_decode_record *aarch64_insn_r)
99afc88b 4164{
78cc6c2d 4165 uint8_t reg_rd, insn_bit23, insn_bits24_27, setflags;
99afc88b
OJ
4166 uint32_t record_buf[4];
4167
4168 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
99afc88b
OJ
4169 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
4170 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
4171
4172 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
4173 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
4174 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
4175 {
4176 record_buf[0] = reg_rd;
4177 aarch64_insn_r->reg_rec_count = 1;
4178 }
4179 else if (insn_bits24_27 == 0x01)
4180 {
4181 /* Add/Subtract (immediate). */
4182 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
4183 record_buf[0] = reg_rd;
4184 aarch64_insn_r->reg_rec_count = 1;
4185 if (setflags)
4186 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
4187 }
4188 else if (insn_bits24_27 == 0x02 && !insn_bit23)
4189 {
4190 /* Logical (immediate). */
4191 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
4192 record_buf[0] = reg_rd;
4193 aarch64_insn_r->reg_rec_count = 1;
4194 if (setflags)
4195 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
4196 }
4197 else
4198 return AARCH64_RECORD_UNKNOWN;
4199
4200 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4201 record_buf);
4202 return AARCH64_RECORD_SUCCESS;
4203}
4204
4205/* Record handler for branch, exception generation and system instructions. */
4206
4207static unsigned int
4748a9be 4208aarch64_record_branch_except_sys (aarch64_insn_decode_record *aarch64_insn_r)
99afc88b 4209{
345bd07c
SM
4210
4211 aarch64_gdbarch_tdep *tdep
08106042 4212 = gdbarch_tdep<aarch64_gdbarch_tdep> (aarch64_insn_r->gdbarch);
99afc88b
OJ
4213 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
4214 uint32_t record_buf[4];
4215
4216 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
4217 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
4218 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4219
4220 if (insn_bits28_31 == 0x0d)
4221 {
4222 /* Exception generation instructions. */
4223 if (insn_bits24_27 == 0x04)
4224 {
5d98d3cd
YQ
4225 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
4226 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
4227 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
99afc88b
OJ
4228 {
4229 ULONGEST svc_number;
4230
4231 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
4232 &svc_number);
4233 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
4234 svc_number);
4235 }
4236 else
4237 return AARCH64_RECORD_UNSUPPORTED;
4238 }
4239 /* System instructions. */
4240 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
4241 {
4242 uint32_t reg_rt, reg_crn;
4243
4244 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
4245 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
4246
4247 /* Record rt in case of sysl and mrs instructions. */
4248 if (bit (aarch64_insn_r->aarch64_insn, 21))
4249 {
4250 record_buf[0] = reg_rt;
4251 aarch64_insn_r->reg_rec_count = 1;
4252 }
4253 /* Record cpsr for hint and msr(immediate) instructions. */
4254 else if (reg_crn == 0x02 || reg_crn == 0x04)
4255 {
4256 record_buf[0] = AARCH64_CPSR_REGNUM;
4257 aarch64_insn_r->reg_rec_count = 1;
4258 }
4259 }
4260 /* Unconditional branch (register). */
4261 else if((insn_bits24_27 & 0x0e) == 0x06)
4262 {
4263 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
4264 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
4265 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
4266 }
4267 else
4268 return AARCH64_RECORD_UNKNOWN;
4269 }
4270 /* Unconditional branch (immediate). */
4271 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
4272 {
4273 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
4274 if (bit (aarch64_insn_r->aarch64_insn, 31))
4275 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
4276 }
4277 else
4278 /* Compare & branch (immediate), Test & branch (immediate) and
4279 Conditional branch (immediate). */
4280 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
4281
4282 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4283 record_buf);
4284 return AARCH64_RECORD_SUCCESS;
4285}
4286
4287/* Record handler for advanced SIMD load and store instructions. */
4288
4289static unsigned int
4748a9be 4290aarch64_record_asimd_load_store (aarch64_insn_decode_record *aarch64_insn_r)
99afc88b
OJ
4291{
4292 CORE_ADDR address;
4293 uint64_t addr_offset = 0;
4294 uint32_t record_buf[24];
4295 uint64_t record_buf_mem[24];
4296 uint32_t reg_rn, reg_rt;
4297 uint32_t reg_index = 0, mem_index = 0;
4298 uint8_t opcode_bits, size_bits;
4299
4300 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
4301 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
4302 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
4303 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
4304 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
4305
4306 if (record_debug)
b277c936 4307 debug_printf ("Process record: Advanced SIMD load/store\n");
99afc88b
OJ
4308
4309 /* Load/store single structure. */
4310 if (bit (aarch64_insn_r->aarch64_insn, 24))
4311 {
4312 uint8_t sindex, scale, selem, esize, replicate = 0;
4313 scale = opcode_bits >> 2;
4314 selem = ((opcode_bits & 0x02) |
dda83cd7 4315 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
99afc88b 4316 switch (scale)
dda83cd7
SM
4317 {
4318 case 1:
4319 if (size_bits & 0x01)
4320 return AARCH64_RECORD_UNKNOWN;
4321 break;
4322 case 2:
4323 if ((size_bits >> 1) & 0x01)
4324 return AARCH64_RECORD_UNKNOWN;
4325 if (size_bits & 0x01)
4326 {
4327 if (!((opcode_bits >> 1) & 0x01))
4328 scale = 3;
4329 else
4330 return AARCH64_RECORD_UNKNOWN;
4331 }
4332 break;
4333 case 3:
4334 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
4335 {
4336 scale = size_bits;
4337 replicate = 1;
4338 break;
4339 }
4340 else
4341 return AARCH64_RECORD_UNKNOWN;
4342 default:
4343 break;
4344 }
99afc88b
OJ
4345 esize = 8 << scale;
4346 if (replicate)
dda83cd7
SM
4347 for (sindex = 0; sindex < selem; sindex++)
4348 {
4349 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
4350 reg_rt = (reg_rt + 1) % 32;
4351 }
99afc88b 4352 else
dda83cd7
SM
4353 {
4354 for (sindex = 0; sindex < selem; sindex++)
a2e3e93f
SM
4355 {
4356 if (bit (aarch64_insn_r->aarch64_insn, 22))
4357 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
4358 else
4359 {
4360 record_buf_mem[mem_index++] = esize / 8;
4361 record_buf_mem[mem_index++] = address + addr_offset;
4362 }
4363 addr_offset = addr_offset + (esize / 8);
4364 reg_rt = (reg_rt + 1) % 32;
4365 }
dda83cd7 4366 }
99afc88b
OJ
4367 }
4368 /* Load/store multiple structure. */
4369 else
4370 {
4371 uint8_t selem, esize, rpt, elements;
4372 uint8_t eindex, rindex;
4373
4374 esize = 8 << size_bits;
4375 if (bit (aarch64_insn_r->aarch64_insn, 30))
dda83cd7 4376 elements = 128 / esize;
99afc88b 4377 else
dda83cd7 4378 elements = 64 / esize;
99afc88b
OJ
4379
4380 switch (opcode_bits)
dda83cd7
SM
4381 {
4382 /*LD/ST4 (4 Registers). */
4383 case 0:
4384 rpt = 1;
4385 selem = 4;
4386 break;
4387 /*LD/ST1 (4 Registers). */
4388 case 2:
4389 rpt = 4;
4390 selem = 1;
4391 break;
4392 /*LD/ST3 (3 Registers). */
4393 case 4:
4394 rpt = 1;
4395 selem = 3;
4396 break;
4397 /*LD/ST1 (3 Registers). */
4398 case 6:
4399 rpt = 3;
4400 selem = 1;
4401 break;
4402 /*LD/ST1 (1 Register). */
4403 case 7:
4404 rpt = 1;
4405 selem = 1;
4406 break;
4407 /*LD/ST2 (2 Registers). */
4408 case 8:
4409 rpt = 1;
4410 selem = 2;
4411 break;
4412 /*LD/ST1 (2 Registers). */
4413 case 10:
4414 rpt = 2;
4415 selem = 1;
4416 break;
4417 default:
4418 return AARCH64_RECORD_UNSUPPORTED;
4419 break;
4420 }
99afc88b 4421 for (rindex = 0; rindex < rpt; rindex++)
dda83cd7
SM
4422 for (eindex = 0; eindex < elements; eindex++)
4423 {
4424 uint8_t reg_tt, sindex;
4425 reg_tt = (reg_rt + rindex) % 32;
4426 for (sindex = 0; sindex < selem; sindex++)
4427 {
4428 if (bit (aarch64_insn_r->aarch64_insn, 22))
4429 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
4430 else
4431 {
4432 record_buf_mem[mem_index++] = esize / 8;
4433 record_buf_mem[mem_index++] = address + addr_offset;
4434 }
4435 addr_offset = addr_offset + (esize / 8);
4436 reg_tt = (reg_tt + 1) % 32;
4437 }
4438 }
99afc88b
OJ
4439 }
4440
4441 if (bit (aarch64_insn_r->aarch64_insn, 23))
4442 record_buf[reg_index++] = reg_rn;
4443
4444 aarch64_insn_r->reg_rec_count = reg_index;
4445 aarch64_insn_r->mem_rec_count = mem_index / 2;
4446 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
dda83cd7 4447 record_buf_mem);
99afc88b 4448 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
dda83cd7 4449 record_buf);
99afc88b
OJ
4450 return AARCH64_RECORD_SUCCESS;
4451}
4452
4453/* Record handler for load and store instructions. */
4454
4455static unsigned int
4748a9be 4456aarch64_record_load_store (aarch64_insn_decode_record *aarch64_insn_r)
99afc88b
OJ
4457{
4458 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
4459 uint8_t insn_bit23, insn_bit21;
4460 uint8_t opc, size_bits, ld_flag, vector_flag;
4461 uint32_t reg_rn, reg_rt, reg_rt2;
4462 uint64_t datasize, offset;
4463 uint32_t record_buf[8];
4464 uint64_t record_buf_mem[8];
4465 CORE_ADDR address;
4466
4467 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
4468 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
4469 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
4470 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
4471 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
4472 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
4473 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
4474 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
4475 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
4476 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
4477 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
4478
4479 /* Load/store exclusive. */
4480 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
4481 {
4482 if (record_debug)
b277c936 4483 debug_printf ("Process record: load/store exclusive\n");
99afc88b
OJ
4484
4485 if (ld_flag)
4486 {
4487 record_buf[0] = reg_rt;
4488 aarch64_insn_r->reg_rec_count = 1;
4489 if (insn_bit21)
4490 {
4491 record_buf[1] = reg_rt2;
4492 aarch64_insn_r->reg_rec_count = 2;
4493 }
4494 }
4495 else
4496 {
4497 if (insn_bit21)
4498 datasize = (8 << size_bits) * 2;
4499 else
4500 datasize = (8 << size_bits);
4501 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4502 &address);
4503 record_buf_mem[0] = datasize / 8;
4504 record_buf_mem[1] = address;
4505 aarch64_insn_r->mem_rec_count = 1;
4506 if (!insn_bit23)
4507 {
4508 /* Save register rs. */
4509 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
4510 aarch64_insn_r->reg_rec_count = 1;
4511 }
4512 }
4513 }
4514 /* Load register (literal) instructions decoding. */
4515 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
4516 {
4517 if (record_debug)
b277c936 4518 debug_printf ("Process record: load register (literal)\n");
99afc88b 4519 if (vector_flag)
dda83cd7 4520 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
99afc88b 4521 else
dda83cd7 4522 record_buf[0] = reg_rt;
99afc88b
OJ
4523 aarch64_insn_r->reg_rec_count = 1;
4524 }
4525 /* All types of load/store pair instructions decoding. */
4526 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
4527 {
4528 if (record_debug)
b277c936 4529 debug_printf ("Process record: load/store pair\n");
99afc88b
OJ
4530
4531 if (ld_flag)
dda83cd7
SM
4532 {
4533 if (vector_flag)
4534 {
4535 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4536 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
4537 }
4538 else
4539 {
4540 record_buf[0] = reg_rt;
4541 record_buf[1] = reg_rt2;
4542 }
4543 aarch64_insn_r->reg_rec_count = 2;
4544 }
99afc88b 4545 else
dda83cd7
SM
4546 {
4547 uint16_t imm7_off;
4548 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
4549 if (!vector_flag)
4550 size_bits = size_bits >> 1;
4551 datasize = 8 << (2 + size_bits);
4552 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
4553 offset = offset << (2 + size_bits);
4554 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4555 &address);
4556 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
4557 {
4558 if (imm7_off & 0x40)
4559 address = address - offset;
4560 else
4561 address = address + offset;
4562 }
4563
4564 record_buf_mem[0] = datasize / 8;
4565 record_buf_mem[1] = address;
4566 record_buf_mem[2] = datasize / 8;
4567 record_buf_mem[3] = address + (datasize / 8);
4568 aarch64_insn_r->mem_rec_count = 2;
4569 }
99afc88b 4570 if (bit (aarch64_insn_r->aarch64_insn, 23))
dda83cd7 4571 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
99afc88b
OJ
4572 }
4573 /* Load/store register (unsigned immediate) instructions. */
4574 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
4575 {
4576 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4577 if (!(opc >> 1))
33877125
YQ
4578 {
4579 if (opc & 0x01)
4580 ld_flag = 0x01;
4581 else
4582 ld_flag = 0x0;
4583 }
99afc88b 4584 else
33877125 4585 {
1e2b521d
YQ
4586 if (size_bits == 0x3 && vector_flag == 0x0 && opc == 0x2)
4587 {
4588 /* PRFM (immediate) */
4589 return AARCH64_RECORD_SUCCESS;
4590 }
4591 else if (size_bits == 0x2 && vector_flag == 0x0 && opc == 0x2)
4592 {
4593 /* LDRSW (immediate) */
4594 ld_flag = 0x1;
4595 }
33877125 4596 else
1e2b521d
YQ
4597 {
4598 if (opc & 0x01)
4599 ld_flag = 0x01;
4600 else
4601 ld_flag = 0x0;
4602 }
33877125 4603 }
99afc88b
OJ
4604
4605 if (record_debug)
4606 {
b277c936
PL
4607 debug_printf ("Process record: load/store (unsigned immediate):"
4608 " size %x V %d opc %x\n", size_bits, vector_flag,
4609 opc);
99afc88b
OJ
4610 }
4611
4612 if (!ld_flag)
dda83cd7
SM
4613 {
4614 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
4615 datasize = 8 << size_bits;
4616 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4617 &address);
4618 offset = offset << size_bits;
4619 address = address + offset;
4620
4621 record_buf_mem[0] = datasize >> 3;
4622 record_buf_mem[1] = address;
4623 aarch64_insn_r->mem_rec_count = 1;
4624 }
99afc88b 4625 else
dda83cd7
SM
4626 {
4627 if (vector_flag)
4628 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4629 else
4630 record_buf[0] = reg_rt;
4631 aarch64_insn_r->reg_rec_count = 1;
4632 }
99afc88b
OJ
4633 }
4634 /* Load/store register (register offset) instructions. */
5d98d3cd
YQ
4635 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
4636 && insn_bits10_11 == 0x02 && insn_bit21)
99afc88b
OJ
4637 {
4638 if (record_debug)
b277c936 4639 debug_printf ("Process record: load/store (register offset)\n");
99afc88b
OJ
4640 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4641 if (!(opc >> 1))
dda83cd7
SM
4642 if (opc & 0x01)
4643 ld_flag = 0x01;
4644 else
4645 ld_flag = 0x0;
99afc88b 4646 else
dda83cd7
SM
4647 if (size_bits != 0x03)
4648 ld_flag = 0x01;
4649 else
4650 return AARCH64_RECORD_UNKNOWN;
99afc88b
OJ
4651
4652 if (!ld_flag)
dda83cd7
SM
4653 {
4654 ULONGEST reg_rm_val;
4655
4656 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
4657 bits (aarch64_insn_r->aarch64_insn, 16, 20), &reg_rm_val);
4658 if (bit (aarch64_insn_r->aarch64_insn, 12))
4659 offset = reg_rm_val << size_bits;
4660 else
4661 offset = reg_rm_val;
4662 datasize = 8 << size_bits;
4663 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4664 &address);
4665 address = address + offset;
4666 record_buf_mem[0] = datasize >> 3;
4667 record_buf_mem[1] = address;
4668 aarch64_insn_r->mem_rec_count = 1;
4669 }
99afc88b 4670 else
dda83cd7
SM
4671 {
4672 if (vector_flag)
4673 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4674 else
4675 record_buf[0] = reg_rt;
4676 aarch64_insn_r->reg_rec_count = 1;
4677 }
99afc88b
OJ
4678 }
4679 /* Load/store register (immediate and unprivileged) instructions. */
5d98d3cd
YQ
4680 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
4681 && !insn_bit21)
99afc88b
OJ
4682 {
4683 if (record_debug)
4684 {
b277c936
PL
4685 debug_printf ("Process record: load/store "
4686 "(immediate and unprivileged)\n");
99afc88b
OJ
4687 }
4688 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4689 if (!(opc >> 1))
dda83cd7
SM
4690 if (opc & 0x01)
4691 ld_flag = 0x01;
4692 else
4693 ld_flag = 0x0;
99afc88b 4694 else
dda83cd7
SM
4695 if (size_bits != 0x03)
4696 ld_flag = 0x01;
4697 else
4698 return AARCH64_RECORD_UNKNOWN;
99afc88b
OJ
4699
4700 if (!ld_flag)
dda83cd7
SM
4701 {
4702 uint16_t imm9_off;
4703 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
4704 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
4705 datasize = 8 << size_bits;
4706 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4707 &address);
4708 if (insn_bits10_11 != 0x01)
4709 {
4710 if (imm9_off & 0x0100)
4711 address = address - offset;
4712 else
4713 address = address + offset;
4714 }
4715 record_buf_mem[0] = datasize >> 3;
4716 record_buf_mem[1] = address;
4717 aarch64_insn_r->mem_rec_count = 1;
4718 }
99afc88b 4719 else
dda83cd7
SM
4720 {
4721 if (vector_flag)
4722 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4723 else
4724 record_buf[0] = reg_rt;
4725 aarch64_insn_r->reg_rec_count = 1;
4726 }
99afc88b 4727 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
dda83cd7 4728 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
99afc88b
OJ
4729 }
4730 /* Advanced SIMD load/store instructions. */
4731 else
4732 return aarch64_record_asimd_load_store (aarch64_insn_r);
4733
4734 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
dda83cd7 4735 record_buf_mem);
99afc88b 4736 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
dda83cd7 4737 record_buf);
99afc88b
OJ
4738 return AARCH64_RECORD_SUCCESS;
4739}
4740
4741/* Record handler for data processing SIMD and floating point instructions. */
4742
4743static unsigned int
4748a9be 4744aarch64_record_data_proc_simd_fp (aarch64_insn_decode_record *aarch64_insn_r)
99afc88b
OJ
4745{
4746 uint8_t insn_bit21, opcode, rmode, reg_rd;
4747 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
4748 uint8_t insn_bits11_14;
4749 uint32_t record_buf[2];
4750
4751 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
4752 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
4753 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
4754 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
4755 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
4756 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
4757 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
4758 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
4759 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
4760
4761 if (record_debug)
b277c936 4762 debug_printf ("Process record: data processing SIMD/FP: ");
99afc88b
OJ
4763
4764 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
4765 {
4766 /* Floating point - fixed point conversion instructions. */
4767 if (!insn_bit21)
4768 {
4769 if (record_debug)
b277c936 4770 debug_printf ("FP - fixed point conversion");
99afc88b
OJ
4771
4772 if ((opcode >> 1) == 0x0 && rmode == 0x03)
4773 record_buf[0] = reg_rd;
4774 else
4775 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4776 }
4777 /* Floating point - conditional compare instructions. */
4778 else if (insn_bits10_11 == 0x01)
4779 {
4780 if (record_debug)
b277c936 4781 debug_printf ("FP - conditional compare");
99afc88b
OJ
4782
4783 record_buf[0] = AARCH64_CPSR_REGNUM;
4784 }
4785 /* Floating point - data processing (2-source) and
dda83cd7 4786 conditional select instructions. */
99afc88b
OJ
4787 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
4788 {
4789 if (record_debug)
b277c936 4790 debug_printf ("FP - DP (2-source)");
99afc88b
OJ
4791
4792 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4793 }
4794 else if (insn_bits10_11 == 0x00)
4795 {
4796 /* Floating point - immediate instructions. */
4797 if ((insn_bits12_15 & 0x01) == 0x01
4798 || (insn_bits12_15 & 0x07) == 0x04)
4799 {
4800 if (record_debug)
b277c936 4801 debug_printf ("FP - immediate");
99afc88b
OJ
4802 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4803 }
4804 /* Floating point - compare instructions. */
4805 else if ((insn_bits12_15 & 0x03) == 0x02)
4806 {
4807 if (record_debug)
b277c936 4808 debug_printf ("FP - immediate");
99afc88b
OJ
4809 record_buf[0] = AARCH64_CPSR_REGNUM;
4810 }
4811 /* Floating point - integer conversions instructions. */
f62fce35 4812 else if (insn_bits12_15 == 0x00)
99afc88b
OJ
4813 {
4814 /* Convert float to integer instruction. */
4815 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
4816 {
4817 if (record_debug)
b277c936 4818 debug_printf ("float to int conversion");
99afc88b
OJ
4819
4820 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4821 }
4822 /* Convert integer to float instruction. */
4823 else if ((opcode >> 1) == 0x01 && !rmode)
4824 {
4825 if (record_debug)
b277c936 4826 debug_printf ("int to float conversion");
99afc88b
OJ
4827
4828 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4829 }
4830 /* Move float to integer instruction. */
4831 else if ((opcode >> 1) == 0x03)
4832 {
4833 if (record_debug)
b277c936 4834 debug_printf ("move float to int");
99afc88b
OJ
4835
4836 if (!(opcode & 0x01))
4837 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4838 else
4839 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4840 }
f62fce35
YQ
4841 else
4842 return AARCH64_RECORD_UNKNOWN;
dda83cd7 4843 }
f62fce35
YQ
4844 else
4845 return AARCH64_RECORD_UNKNOWN;
dda83cd7 4846 }
f62fce35
YQ
4847 else
4848 return AARCH64_RECORD_UNKNOWN;
99afc88b
OJ
4849 }
4850 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
4851 {
4852 if (record_debug)
b277c936 4853 debug_printf ("SIMD copy");
99afc88b
OJ
4854
4855 /* Advanced SIMD copy instructions. */
4856 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
4857 && !bit (aarch64_insn_r->aarch64_insn, 15)
4858 && bit (aarch64_insn_r->aarch64_insn, 10))
4859 {
4860 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
4861 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4862 else
4863 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4864 }
4865 else
4866 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4867 }
4868 /* All remaining floating point or advanced SIMD instructions. */
4869 else
4870 {
4871 if (record_debug)
b277c936 4872 debug_printf ("all remain");
99afc88b
OJ
4873
4874 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4875 }
4876
4877 if (record_debug)
b277c936 4878 debug_printf ("\n");
99afc88b 4879
bfbe4b84 4880 /* Record the V/X register. */
99afc88b 4881 aarch64_insn_r->reg_rec_count++;
bfbe4b84
LM
4882
4883 /* Some of these instructions may set bits in the FPSR, so record it
4884 too. */
4885 record_buf[1] = AARCH64_FPSR_REGNUM;
4886 aarch64_insn_r->reg_rec_count++;
4887
4888 gdb_assert (aarch64_insn_r->reg_rec_count == 2);
99afc88b
OJ
4889 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4890 record_buf);
4891 return AARCH64_RECORD_SUCCESS;
4892}
4893
4894/* Decodes insns type and invokes its record handler. */
4895
4896static unsigned int
4748a9be 4897aarch64_record_decode_insn_handler (aarch64_insn_decode_record *aarch64_insn_r)
99afc88b
OJ
4898{
4899 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
4900
4901 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
4902 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
4903 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
4904 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
4905
4906 /* Data processing - immediate instructions. */
4907 if (!ins_bit26 && !ins_bit27 && ins_bit28)
4908 return aarch64_record_data_proc_imm (aarch64_insn_r);
4909
4910 /* Branch, exception generation and system instructions. */
4911 if (ins_bit26 && !ins_bit27 && ins_bit28)
4912 return aarch64_record_branch_except_sys (aarch64_insn_r);
4913
4914 /* Load and store instructions. */
4915 if (!ins_bit25 && ins_bit27)
4916 return aarch64_record_load_store (aarch64_insn_r);
4917
4918 /* Data processing - register instructions. */
4919 if (ins_bit25 && !ins_bit26 && ins_bit27)
4920 return aarch64_record_data_proc_reg (aarch64_insn_r);
4921
4922 /* Data processing - SIMD and floating point instructions. */
4923 if (ins_bit25 && ins_bit26 && ins_bit27)
4924 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
4925
4926 return AARCH64_RECORD_UNSUPPORTED;
4927}
4928
4929/* Cleans up local record registers and memory allocations. */
4930
4931static void
4748a9be 4932deallocate_reg_mem (aarch64_insn_decode_record *record)
99afc88b
OJ
4933{
4934 xfree (record->aarch64_regs);
4935 xfree (record->aarch64_mems);
4936}
4937
1e2b521d
YQ
4938#if GDB_SELF_TEST
4939namespace selftests {
4940
4941static void
4942aarch64_process_record_test (void)
4943{
4944 struct gdbarch_info info;
4945 uint32_t ret;
4946
1e2b521d
YQ
4947 info.bfd_arch_info = bfd_scan_arch ("aarch64");
4948
4949 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
4950 SELF_CHECK (gdbarch != NULL);
4951
4748a9be 4952 aarch64_insn_decode_record aarch64_record;
1e2b521d 4953
4748a9be 4954 memset (&aarch64_record, 0, sizeof (aarch64_insn_decode_record));
1e2b521d
YQ
4955 aarch64_record.regcache = NULL;
4956 aarch64_record.this_addr = 0;
4957 aarch64_record.gdbarch = gdbarch;
4958
4959 /* 20 00 80 f9 prfm pldl1keep, [x1] */
4960 aarch64_record.aarch64_insn = 0xf9800020;
4961 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4962 SELF_CHECK (ret == AARCH64_RECORD_SUCCESS);
4963 SELF_CHECK (aarch64_record.reg_rec_count == 0);
4964 SELF_CHECK (aarch64_record.mem_rec_count == 0);
4965
4966 deallocate_reg_mem (&aarch64_record);
4967}
4968
4969} // namespace selftests
4970#endif /* GDB_SELF_TEST */
4971
99afc88b
OJ
4972/* Parse the current instruction and record the values of the registers and
4973 memory that will be changed in current instruction to record_arch_list
4974 return -1 if something is wrong. */
4975
4976int
4977aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
4978 CORE_ADDR insn_addr)
4979{
4980 uint32_t rec_no = 0;
4981 uint8_t insn_size = 4;
4982 uint32_t ret = 0;
99afc88b 4983 gdb_byte buf[insn_size];
4748a9be 4984 aarch64_insn_decode_record aarch64_record;
99afc88b
OJ
4985
4986 memset (&buf[0], 0, insn_size);
4748a9be 4987 memset (&aarch64_record, 0, sizeof (aarch64_insn_decode_record));
99afc88b
OJ
4988 target_read_memory (insn_addr, &buf[0], insn_size);
4989 aarch64_record.aarch64_insn
4990 = (uint32_t) extract_unsigned_integer (&buf[0],
4991 insn_size,
4992 gdbarch_byte_order (gdbarch));
4993 aarch64_record.regcache = regcache;
4994 aarch64_record.this_addr = insn_addr;
4995 aarch64_record.gdbarch = gdbarch;
4996
4997 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4998 if (ret == AARCH64_RECORD_UNSUPPORTED)
4999 {
6cb06a8c
TT
5000 gdb_printf (gdb_stderr,
5001 _("Process record does not support instruction "
5002 "0x%0x at address %s.\n"),
5003 aarch64_record.aarch64_insn,
5004 paddress (gdbarch, insn_addr));
99afc88b
OJ
5005 ret = -1;
5006 }
5007
5008 if (0 == ret)
5009 {
5010 /* Record registers. */
5011 record_full_arch_list_add_reg (aarch64_record.regcache,
5012 AARCH64_PC_REGNUM);
5013 /* Always record register CPSR. */
5014 record_full_arch_list_add_reg (aarch64_record.regcache,
5015 AARCH64_CPSR_REGNUM);
5016 if (aarch64_record.aarch64_regs)
5017 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
5018 if (record_full_arch_list_add_reg (aarch64_record.regcache,
5019 aarch64_record.aarch64_regs[rec_no]))
5020 ret = -1;
5021
5022 /* Record memories. */
5023 if (aarch64_record.aarch64_mems)
5024 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
5025 if (record_full_arch_list_add_mem
5026 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
5027 aarch64_record.aarch64_mems[rec_no].len))
5028 ret = -1;
5029
5030 if (record_full_arch_list_add_end ())
5031 ret = -1;
5032 }
5033
5034 deallocate_reg_mem (&aarch64_record);
5035 return ret;
5036}