]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/aarch64-tdep.c
Introduce frame_info_ptr smart pointer class
[thirdparty/binutils-gdb.git] / gdb / aarch64-tdep.c
CommitLineData
07b287a0
MS
1/* Common target dependent code for GDB on AArch64 systems.
2
4a94e368 3 Copyright (C) 2009-2022 Free Software Foundation, Inc.
07b287a0
MS
4 Contributed by ARM Ltd.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21#include "defs.h"
22
23#include "frame.h"
07b287a0
MS
24#include "gdbcmd.h"
25#include "gdbcore.h"
4de283e4 26#include "dis-asm.h"
d55e5aa6
TT
27#include "regcache.h"
28#include "reggroups.h"
4de283e4
TT
29#include "value.h"
30#include "arch-utils.h"
31#include "osabi.h"
32#include "frame-unwind.h"
33#include "frame-base.h"
d55e5aa6 34#include "trad-frame.h"
4de283e4
TT
35#include "objfiles.h"
36#include "dwarf2.h"
82ca8957 37#include "dwarf2/frame.h"
4de283e4
TT
38#include "gdbtypes.h"
39#include "prologue-value.h"
40#include "target-descriptions.h"
07b287a0 41#include "user-regs.h"
4de283e4 42#include "ax-gdb.h"
268a13a5 43#include "gdbsupport/selftest.h"
4de283e4
TT
44
45#include "aarch64-tdep.h"
46#include "aarch64-ravenscar-thread.h"
47
4de283e4
TT
48#include "record.h"
49#include "record-full.h"
50#include "arch/aarch64-insn.h"
0d12e84c 51#include "gdbarch.h"
4de283e4
TT
52
53#include "opcode/aarch64.h"
54#include <algorithm>
0ee6b1c5 55#include <unordered_map>
f77ee802 56
ea92689a
AH
57/* A Homogeneous Floating-Point or Short-Vector Aggregate may have at most
58 four members. */
59#define HA_MAX_NUM_FLDS 4
60
95228a0d 61/* All possible aarch64 target descriptors. */
0ee6b1c5 62static std::unordered_map <aarch64_features, target_desc *> tdesc_aarch64_map;
95228a0d 63
07b287a0
MS
64/* The standard register names, and all the valid aliases for them. */
65static const struct
66{
67 const char *const name;
68 int regnum;
69} aarch64_register_aliases[] =
70{
71 /* 64-bit register names. */
72 {"fp", AARCH64_FP_REGNUM},
73 {"lr", AARCH64_LR_REGNUM},
74 {"sp", AARCH64_SP_REGNUM},
07b287a0
MS
75 /* specials */
76 {"ip0", AARCH64_X0_REGNUM + 16},
77 {"ip1", AARCH64_X0_REGNUM + 17}
78};
79
80/* The required core 'R' registers. */
81static const char *const aarch64_r_register_names[] =
82{
83 /* These registers must appear in consecutive RAW register number
84 order and they must begin with AARCH64_X0_REGNUM! */
85 "x0", "x1", "x2", "x3",
86 "x4", "x5", "x6", "x7",
87 "x8", "x9", "x10", "x11",
88 "x12", "x13", "x14", "x15",
89 "x16", "x17", "x18", "x19",
90 "x20", "x21", "x22", "x23",
91 "x24", "x25", "x26", "x27",
92 "x28", "x29", "x30", "sp",
93 "pc", "cpsr"
94};
95
96/* The FP/SIMD 'V' registers. */
97static const char *const aarch64_v_register_names[] =
98{
99 /* These registers must appear in consecutive RAW register number
100 order and they must begin with AARCH64_V0_REGNUM! */
101 "v0", "v1", "v2", "v3",
102 "v4", "v5", "v6", "v7",
103 "v8", "v9", "v10", "v11",
104 "v12", "v13", "v14", "v15",
105 "v16", "v17", "v18", "v19",
106 "v20", "v21", "v22", "v23",
107 "v24", "v25", "v26", "v27",
108 "v28", "v29", "v30", "v31",
109 "fpsr",
110 "fpcr"
111};
112
739e8682
AH
113/* The SVE 'Z' and 'P' registers. */
114static const char *const aarch64_sve_register_names[] =
115{
116 /* These registers must appear in consecutive RAW register number
117 order and they must begin with AARCH64_SVE_Z0_REGNUM! */
118 "z0", "z1", "z2", "z3",
119 "z4", "z5", "z6", "z7",
120 "z8", "z9", "z10", "z11",
121 "z12", "z13", "z14", "z15",
122 "z16", "z17", "z18", "z19",
123 "z20", "z21", "z22", "z23",
124 "z24", "z25", "z26", "z27",
125 "z28", "z29", "z30", "z31",
126 "fpsr", "fpcr",
127 "p0", "p1", "p2", "p3",
128 "p4", "p5", "p6", "p7",
129 "p8", "p9", "p10", "p11",
130 "p12", "p13", "p14", "p15",
131 "ffr", "vg"
132};
133
76bed0fd
AH
134static const char *const aarch64_pauth_register_names[] =
135{
136 /* Authentication mask for data pointer. */
137 "pauth_dmask",
138 /* Authentication mask for code pointer. */
139 "pauth_cmask"
140};
141
5e984dbf
LM
142static const char *const aarch64_mte_register_names[] =
143{
144 /* Tag Control Register. */
145 "tag_ctl"
146};
147
07b287a0
MS
148/* AArch64 prologue cache structure. */
149struct aarch64_prologue_cache
150{
db634143
PL
151 /* The program counter at the start of the function. It is used to
152 identify this frame as a prologue frame. */
153 CORE_ADDR func;
154
155 /* The program counter at the time this frame was created; i.e. where
156 this function was called from. It is used to identify this frame as a
157 stub frame. */
158 CORE_ADDR prev_pc;
159
07b287a0
MS
160 /* The stack pointer at the time this frame was created; i.e. the
161 caller's stack pointer when this function was called. It is used
162 to identify this frame. */
163 CORE_ADDR prev_sp;
164
7dfa3edc
PL
165 /* Is the target available to read from? */
166 int available_p;
167
07b287a0
MS
168 /* The frame base for this frame is just prev_sp - frame size.
169 FRAMESIZE is the distance from the frame pointer to the
170 initial stack pointer. */
171 int framesize;
172
173 /* The register used to hold the frame pointer for this frame. */
174 int framereg;
175
176 /* Saved register offsets. */
098caef4 177 trad_frame_saved_reg *saved_regs;
07b287a0
MS
178};
179
07b287a0
MS
180static void
181show_aarch64_debug (struct ui_file *file, int from_tty,
dda83cd7 182 struct cmd_list_element *c, const char *value)
07b287a0 183{
6cb06a8c 184 gdb_printf (file, _("AArch64 debugging is %s.\n"), value);
07b287a0
MS
185}
186
ffdbe864
YQ
187namespace {
188
4d9a9006
YQ
189/* Abstract instruction reader. */
190
191class abstract_instruction_reader
192{
193public:
194 /* Read in one instruction. */
195 virtual ULONGEST read (CORE_ADDR memaddr, int len,
196 enum bfd_endian byte_order) = 0;
197};
198
199/* Instruction reader from real target. */
200
201class instruction_reader : public abstract_instruction_reader
202{
203 public:
204 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
632e107b 205 override
4d9a9006 206 {
fc2f703e 207 return read_code_unsigned_integer (memaddr, len, byte_order);
4d9a9006
YQ
208 }
209};
210
ffdbe864
YQ
211} // namespace
212
3d31bc39
AH
213/* If address signing is enabled, mask off the signature bits from the link
214 register, which is passed by value in ADDR, using the register values in
215 THIS_FRAME. */
11e1b75f
AH
216
217static CORE_ADDR
345bd07c 218aarch64_frame_unmask_lr (aarch64_gdbarch_tdep *tdep,
3d31bc39 219 struct frame_info *this_frame, CORE_ADDR addr)
11e1b75f
AH
220{
221 if (tdep->has_pauth ()
222 && frame_unwind_register_unsigned (this_frame,
c9cd8ca4 223 tdep->ra_sign_state_regnum))
11e1b75f
AH
224 {
225 int cmask_num = AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base);
226 CORE_ADDR cmask = frame_unwind_register_unsigned (this_frame, cmask_num);
227 addr = addr & ~cmask;
3d31bc39
AH
228
229 /* Record in the frame that the link register required unmasking. */
230 set_frame_previous_pc_masked (this_frame);
11e1b75f
AH
231 }
232
233 return addr;
234}
235
aa7ca1bb
AH
236/* Implement the "get_pc_address_flags" gdbarch method. */
237
238static std::string
239aarch64_get_pc_address_flags (frame_info *frame, CORE_ADDR pc)
240{
241 if (pc != 0 && get_frame_pc_masked (frame))
242 return "PAC";
243
244 return "";
245}
246
07b287a0
MS
247/* Analyze a prologue, looking for a recognizable stack frame
248 and frame pointer. Scan until we encounter a store that could
249 clobber the stack frame unexpectedly, or an unknown instruction. */
250
251static CORE_ADDR
252aarch64_analyze_prologue (struct gdbarch *gdbarch,
253 CORE_ADDR start, CORE_ADDR limit,
4d9a9006
YQ
254 struct aarch64_prologue_cache *cache,
255 abstract_instruction_reader& reader)
07b287a0
MS
256{
257 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
258 int i;
f8e3fe0d
LM
259
260 /* Whether the stack has been set. This should be true when we notice a SP
261 to FP move or if we are using the SP as the base register for storing
262 data, in case the FP is ommitted. */
263 bool seen_stack_set = false;
264
187f5d00
YQ
265 /* Track X registers and D registers in prologue. */
266 pv_t regs[AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT];
07b287a0 267
187f5d00 268 for (i = 0; i < AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT; i++)
07b287a0 269 regs[i] = pv_register (i, 0);
f7b7ed97 270 pv_area stack (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
07b287a0
MS
271
272 for (; start < limit; start += 4)
273 {
274 uint32_t insn;
d9ebcbce 275 aarch64_inst inst;
07b287a0 276
4d9a9006 277 insn = reader.read (start, 4, byte_order_for_code);
07b287a0 278
561a72d4 279 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
d9ebcbce
YQ
280 break;
281
282 if (inst.opcode->iclass == addsub_imm
283 && (inst.opcode->op == OP_ADD
284 || strcmp ("sub", inst.opcode->name) == 0))
07b287a0 285 {
d9ebcbce
YQ
286 unsigned rd = inst.operands[0].reg.regno;
287 unsigned rn = inst.operands[1].reg.regno;
288
289 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
290 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
291 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
292 gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM);
293
294 if (inst.opcode->op == OP_ADD)
295 {
296 regs[rd] = pv_add_constant (regs[rn],
297 inst.operands[2].imm.value);
298 }
299 else
300 {
301 regs[rd] = pv_add_constant (regs[rn],
302 -inst.operands[2].imm.value);
303 }
f8e3fe0d
LM
304
305 /* Did we move SP to FP? */
306 if (rn == AARCH64_SP_REGNUM && rd == AARCH64_FP_REGNUM)
307 seen_stack_set = true;
d9ebcbce 308 }
60adf22c
TV
309 else if (inst.opcode->iclass == addsub_ext
310 && strcmp ("sub", inst.opcode->name) == 0)
311 {
312 unsigned rd = inst.operands[0].reg.regno;
313 unsigned rn = inst.operands[1].reg.regno;
314 unsigned rm = inst.operands[2].reg.regno;
315
316 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
317 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
318 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
319 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_EXT);
320
321 regs[rd] = pv_subtract (regs[rn], regs[rm]);
322 }
d9ebcbce 323 else if (inst.opcode->iclass == branch_imm)
07b287a0
MS
324 {
325 /* Stop analysis on branch. */
326 break;
327 }
d9ebcbce 328 else if (inst.opcode->iclass == condbranch)
07b287a0
MS
329 {
330 /* Stop analysis on branch. */
331 break;
332 }
d9ebcbce 333 else if (inst.opcode->iclass == branch_reg)
07b287a0
MS
334 {
335 /* Stop analysis on branch. */
336 break;
337 }
d9ebcbce 338 else if (inst.opcode->iclass == compbranch)
07b287a0
MS
339 {
340 /* Stop analysis on branch. */
341 break;
342 }
d9ebcbce
YQ
343 else if (inst.opcode->op == OP_MOVZ)
344 {
60adf22c
TV
345 unsigned rd = inst.operands[0].reg.regno;
346
347 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
d9ebcbce 348 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
60adf22c
TV
349 gdb_assert (inst.operands[1].type == AARCH64_OPND_HALF);
350 gdb_assert (inst.operands[1].shifter.kind == AARCH64_MOD_LSL);
f8e3fe0d
LM
351
352 /* If this shows up before we set the stack, keep going. Otherwise
353 stop the analysis. */
354 if (seen_stack_set)
355 break;
356
60adf22c
TV
357 regs[rd] = pv_constant (inst.operands[1].imm.value
358 << inst.operands[1].shifter.amount);
d9ebcbce
YQ
359 }
360 else if (inst.opcode->iclass == log_shift
361 && strcmp (inst.opcode->name, "orr") == 0)
07b287a0 362 {
d9ebcbce
YQ
363 unsigned rd = inst.operands[0].reg.regno;
364 unsigned rn = inst.operands[1].reg.regno;
365 unsigned rm = inst.operands[2].reg.regno;
366
367 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
368 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn);
369 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT);
370
371 if (inst.operands[2].shifter.amount == 0
372 && rn == AARCH64_SP_REGNUM)
07b287a0
MS
373 regs[rd] = regs[rm];
374 else
375 {
c6185dce
SM
376 aarch64_debug_printf ("prologue analysis gave up "
377 "addr=%s opcode=0x%x (orr x register)",
378 core_addr_to_string_nz (start), insn);
379
07b287a0
MS
380 break;
381 }
382 }
d9ebcbce 383 else if (inst.opcode->op == OP_STUR)
07b287a0 384 {
d9ebcbce
YQ
385 unsigned rt = inst.operands[0].reg.regno;
386 unsigned rn = inst.operands[1].addr.base_regno;
75faf5c4 387 int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
d9ebcbce
YQ
388
389 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
390 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
391 gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9);
392 gdb_assert (!inst.operands[1].addr.offset.is_reg);
393
75faf5c4
AH
394 stack.store
395 (pv_add_constant (regs[rn], inst.operands[1].addr.offset.imm),
396 size, regs[rt]);
f8e3fe0d
LM
397
398 /* Are we storing with SP as a base? */
399 if (rn == AARCH64_SP_REGNUM)
400 seen_stack_set = true;
07b287a0 401 }
d9ebcbce 402 else if ((inst.opcode->iclass == ldstpair_off
03bcd739
YQ
403 || (inst.opcode->iclass == ldstpair_indexed
404 && inst.operands[2].addr.preind))
d9ebcbce 405 && strcmp ("stp", inst.opcode->name) == 0)
07b287a0 406 {
03bcd739 407 /* STP with addressing mode Pre-indexed and Base register. */
187f5d00
YQ
408 unsigned rt1;
409 unsigned rt2;
d9ebcbce
YQ
410 unsigned rn = inst.operands[2].addr.base_regno;
411 int32_t imm = inst.operands[2].addr.offset.imm;
75faf5c4 412 int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
d9ebcbce 413
187f5d00
YQ
414 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
415 || inst.operands[0].type == AARCH64_OPND_Ft);
416 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2
417 || inst.operands[1].type == AARCH64_OPND_Ft2);
d9ebcbce
YQ
418 gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7);
419 gdb_assert (!inst.operands[2].addr.offset.is_reg);
420
07b287a0
MS
421 /* If recording this store would invalidate the store area
422 (perhaps because rn is not known) then we should abandon
423 further prologue analysis. */
f7b7ed97 424 if (stack.store_would_trash (pv_add_constant (regs[rn], imm)))
07b287a0
MS
425 break;
426
f7b7ed97 427 if (stack.store_would_trash (pv_add_constant (regs[rn], imm + 8)))
07b287a0
MS
428 break;
429
187f5d00
YQ
430 rt1 = inst.operands[0].reg.regno;
431 rt2 = inst.operands[1].reg.regno;
432 if (inst.operands[0].type == AARCH64_OPND_Ft)
433 {
187f5d00
YQ
434 rt1 += AARCH64_X_REGISTER_COUNT;
435 rt2 += AARCH64_X_REGISTER_COUNT;
436 }
437
75faf5c4
AH
438 stack.store (pv_add_constant (regs[rn], imm), size, regs[rt1]);
439 stack.store (pv_add_constant (regs[rn], imm + size), size, regs[rt2]);
14ac654f 440
d9ebcbce 441 if (inst.operands[2].addr.writeback)
93d96012 442 regs[rn] = pv_add_constant (regs[rn], imm);
07b287a0 443
f8e3fe0d
LM
444 /* Ignore the instruction that allocates stack space and sets
445 the SP. */
446 if (rn == AARCH64_SP_REGNUM && !inst.operands[2].addr.writeback)
447 seen_stack_set = true;
07b287a0 448 }
432ec081
YQ
449 else if ((inst.opcode->iclass == ldst_imm9 /* Signed immediate. */
450 || (inst.opcode->iclass == ldst_pos /* Unsigned immediate. */
451 && (inst.opcode->op == OP_STR_POS
452 || inst.opcode->op == OP_STRF_POS)))
453 && inst.operands[1].addr.base_regno == AARCH64_SP_REGNUM
454 && strcmp ("str", inst.opcode->name) == 0)
455 {
456 /* STR (immediate) */
457 unsigned int rt = inst.operands[0].reg.regno;
458 int32_t imm = inst.operands[1].addr.offset.imm;
459 unsigned int rn = inst.operands[1].addr.base_regno;
75faf5c4 460 int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
432ec081
YQ
461 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
462 || inst.operands[0].type == AARCH64_OPND_Ft);
463
464 if (inst.operands[0].type == AARCH64_OPND_Ft)
75faf5c4 465 rt += AARCH64_X_REGISTER_COUNT;
432ec081 466
75faf5c4 467 stack.store (pv_add_constant (regs[rn], imm), size, regs[rt]);
432ec081
YQ
468 if (inst.operands[1].addr.writeback)
469 regs[rn] = pv_add_constant (regs[rn], imm);
f8e3fe0d
LM
470
471 /* Are we storing with SP as a base? */
472 if (rn == AARCH64_SP_REGNUM)
473 seen_stack_set = true;
432ec081 474 }
d9ebcbce 475 else if (inst.opcode->iclass == testbranch)
07b287a0
MS
476 {
477 /* Stop analysis on branch. */
478 break;
479 }
17e116a7
AH
480 else if (inst.opcode->iclass == ic_system)
481 {
345bd07c 482 aarch64_gdbarch_tdep *tdep
08106042 483 = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
17e116a7
AH
484 int ra_state_val = 0;
485
486 if (insn == 0xd503233f /* paciasp. */
487 || insn == 0xd503237f /* pacibsp. */)
488 {
489 /* Return addresses are mangled. */
490 ra_state_val = 1;
491 }
492 else if (insn == 0xd50323bf /* autiasp. */
493 || insn == 0xd50323ff /* autibsp. */)
494 {
495 /* Return addresses are not mangled. */
496 ra_state_val = 0;
497 }
37989733
LM
498 else if (IS_BTI (insn))
499 /* We don't need to do anything special for a BTI instruction. */
500 continue;
17e116a7
AH
501 else
502 {
c6185dce
SM
503 aarch64_debug_printf ("prologue analysis gave up addr=%s"
504 " opcode=0x%x (iclass)",
505 core_addr_to_string_nz (start), insn);
17e116a7
AH
506 break;
507 }
508
509 if (tdep->has_pauth () && cache != nullptr)
a9a87d35 510 {
c9cd8ca4 511 int regnum = tdep->ra_sign_state_regnum;
a9a87d35
LM
512 cache->saved_regs[regnum].set_value (ra_state_val);
513 }
17e116a7 514 }
07b287a0
MS
515 else
516 {
c6185dce
SM
517 aarch64_debug_printf ("prologue analysis gave up addr=%s"
518 " opcode=0x%x",
519 core_addr_to_string_nz (start), insn);
520
07b287a0
MS
521 break;
522 }
523 }
524
525 if (cache == NULL)
f7b7ed97 526 return start;
07b287a0
MS
527
528 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
529 {
530 /* Frame pointer is fp. Frame size is constant. */
531 cache->framereg = AARCH64_FP_REGNUM;
532 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
533 }
534 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
535 {
536 /* Try the stack pointer. */
537 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
538 cache->framereg = AARCH64_SP_REGNUM;
539 }
540 else
541 {
542 /* We're just out of luck. We don't know where the frame is. */
543 cache->framereg = -1;
544 cache->framesize = 0;
545 }
546
547 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
548 {
549 CORE_ADDR offset;
550
f7b7ed97 551 if (stack.find_reg (gdbarch, i, &offset))
098caef4 552 cache->saved_regs[i].set_addr (offset);
07b287a0
MS
553 }
554
187f5d00
YQ
555 for (i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
556 {
557 int regnum = gdbarch_num_regs (gdbarch);
558 CORE_ADDR offset;
559
f7b7ed97
TT
560 if (stack.find_reg (gdbarch, i + AARCH64_X_REGISTER_COUNT,
561 &offset))
098caef4 562 cache->saved_regs[i + regnum + AARCH64_D0_REGNUM].set_addr (offset);
187f5d00
YQ
563 }
564
07b287a0
MS
565 return start;
566}
567
4d9a9006
YQ
568static CORE_ADDR
569aarch64_analyze_prologue (struct gdbarch *gdbarch,
570 CORE_ADDR start, CORE_ADDR limit,
571 struct aarch64_prologue_cache *cache)
572{
573 instruction_reader reader;
574
575 return aarch64_analyze_prologue (gdbarch, start, limit, cache,
576 reader);
577}
578
579#if GDB_SELF_TEST
580
581namespace selftests {
582
583/* Instruction reader from manually cooked instruction sequences. */
584
585class instruction_reader_test : public abstract_instruction_reader
586{
587public:
588 template<size_t SIZE>
589 explicit instruction_reader_test (const uint32_t (&insns)[SIZE])
590 : m_insns (insns), m_insns_size (SIZE)
591 {}
592
593 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
632e107b 594 override
4d9a9006
YQ
595 {
596 SELF_CHECK (len == 4);
597 SELF_CHECK (memaddr % 4 == 0);
598 SELF_CHECK (memaddr / 4 < m_insns_size);
599
600 return m_insns[memaddr / 4];
601 }
602
603private:
604 const uint32_t *m_insns;
605 size_t m_insns_size;
606};
607
608static void
609aarch64_analyze_prologue_test (void)
610{
611 struct gdbarch_info info;
612
4d9a9006
YQ
613 info.bfd_arch_info = bfd_scan_arch ("aarch64");
614
615 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
616 SELF_CHECK (gdbarch != NULL);
617
17e116a7
AH
618 struct aarch64_prologue_cache cache;
619 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
620
08106042 621 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
17e116a7 622
4d9a9006
YQ
623 /* Test the simple prologue in which frame pointer is used. */
624 {
4d9a9006
YQ
625 static const uint32_t insns[] = {
626 0xa9af7bfd, /* stp x29, x30, [sp,#-272]! */
627 0x910003fd, /* mov x29, sp */
628 0x97ffffe6, /* bl 0x400580 */
629 };
630 instruction_reader_test reader (insns);
631
632 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
633 SELF_CHECK (end == 4 * 2);
634
635 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
636 SELF_CHECK (cache.framesize == 272);
637
638 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
639 {
640 if (i == AARCH64_FP_REGNUM)
098caef4 641 SELF_CHECK (cache.saved_regs[i].addr () == -272);
4d9a9006 642 else if (i == AARCH64_LR_REGNUM)
098caef4 643 SELF_CHECK (cache.saved_regs[i].addr () == -264);
4d9a9006 644 else
a9a87d35
LM
645 SELF_CHECK (cache.saved_regs[i].is_realreg ()
646 && cache.saved_regs[i].realreg () == i);
4d9a9006
YQ
647 }
648
649 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
650 {
a9a87d35
LM
651 int num_regs = gdbarch_num_regs (gdbarch);
652 int regnum = i + num_regs + AARCH64_D0_REGNUM;
4d9a9006 653
a9a87d35
LM
654 SELF_CHECK (cache.saved_regs[regnum].is_realreg ()
655 && cache.saved_regs[regnum].realreg () == regnum);
4d9a9006
YQ
656 }
657 }
432ec081
YQ
658
659 /* Test a prologue in which STR is used and frame pointer is not
660 used. */
661 {
432ec081
YQ
662 static const uint32_t insns[] = {
663 0xf81d0ff3, /* str x19, [sp, #-48]! */
664 0xb9002fe0, /* str w0, [sp, #44] */
665 0xf90013e1, /* str x1, [sp, #32]*/
666 0xfd000fe0, /* str d0, [sp, #24] */
667 0xaa0203f3, /* mov x19, x2 */
668 0xf94013e0, /* ldr x0, [sp, #32] */
669 };
670 instruction_reader_test reader (insns);
671
68811f8f 672 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
432ec081
YQ
673 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
674
675 SELF_CHECK (end == 4 * 5);
676
677 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
678 SELF_CHECK (cache.framesize == 48);
679
680 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
681 {
682 if (i == 1)
098caef4 683 SELF_CHECK (cache.saved_regs[i].addr () == -16);
432ec081 684 else if (i == 19)
098caef4 685 SELF_CHECK (cache.saved_regs[i].addr () == -48);
432ec081 686 else
a9a87d35
LM
687 SELF_CHECK (cache.saved_regs[i].is_realreg ()
688 && cache.saved_regs[i].realreg () == i);
432ec081
YQ
689 }
690
691 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
692 {
a9a87d35
LM
693 int num_regs = gdbarch_num_regs (gdbarch);
694 int regnum = i + num_regs + AARCH64_D0_REGNUM;
695
432ec081
YQ
696
697 if (i == 0)
a9a87d35 698 SELF_CHECK (cache.saved_regs[regnum].addr () == -24);
432ec081 699 else
a9a87d35
LM
700 SELF_CHECK (cache.saved_regs[regnum].is_realreg ()
701 && cache.saved_regs[regnum].realreg () == regnum);
432ec081
YQ
702 }
703 }
17e116a7 704
f8e3fe0d
LM
705 /* Test handling of movz before setting the frame pointer. */
706 {
707 static const uint32_t insns[] = {
708 0xa9bf7bfd, /* stp x29, x30, [sp, #-16]! */
709 0x52800020, /* mov w0, #0x1 */
710 0x910003fd, /* mov x29, sp */
711 0x528000a2, /* mov w2, #0x5 */
712 0x97fffff8, /* bl 6e4 */
713 };
714
715 instruction_reader_test reader (insns);
716
717 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
718 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
719
720 /* We should stop at the 4th instruction. */
721 SELF_CHECK (end == (4 - 1) * 4);
722 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
723 SELF_CHECK (cache.framesize == 16);
724 }
725
726 /* Test handling of movz/stp when using the stack pointer as frame
727 pointer. */
728 {
729 static const uint32_t insns[] = {
730 0xa9bc7bfd, /* stp x29, x30, [sp, #-64]! */
731 0x52800020, /* mov w0, #0x1 */
732 0x290207e0, /* stp w0, w1, [sp, #16] */
733 0xa9018fe2, /* stp x2, x3, [sp, #24] */
734 0x528000a2, /* mov w2, #0x5 */
735 0x97fffff8, /* bl 6e4 */
736 };
737
738 instruction_reader_test reader (insns);
739
740 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
741 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
742
743 /* We should stop at the 5th instruction. */
744 SELF_CHECK (end == (5 - 1) * 4);
745 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
746 SELF_CHECK (cache.framesize == 64);
747 }
748
749 /* Test handling of movz/str when using the stack pointer as frame
750 pointer */
751 {
752 static const uint32_t insns[] = {
753 0xa9bc7bfd, /* stp x29, x30, [sp, #-64]! */
754 0x52800020, /* mov w0, #0x1 */
755 0xb9002be4, /* str w4, [sp, #40] */
756 0xf9001be5, /* str x5, [sp, #48] */
757 0x528000a2, /* mov w2, #0x5 */
758 0x97fffff8, /* bl 6e4 */
759 };
760
761 instruction_reader_test reader (insns);
762
763 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
764 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
765
766 /* We should stop at the 5th instruction. */
767 SELF_CHECK (end == (5 - 1) * 4);
768 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
769 SELF_CHECK (cache.framesize == 64);
770 }
771
772 /* Test handling of movz/stur when using the stack pointer as frame
773 pointer. */
774 {
775 static const uint32_t insns[] = {
776 0xa9bc7bfd, /* stp x29, x30, [sp, #-64]! */
777 0x52800020, /* mov w0, #0x1 */
778 0xb80343e6, /* stur w6, [sp, #52] */
779 0xf80383e7, /* stur x7, [sp, #56] */
780 0x528000a2, /* mov w2, #0x5 */
781 0x97fffff8, /* bl 6e4 */
782 };
783
784 instruction_reader_test reader (insns);
785
786 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
787 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
788
789 /* We should stop at the 5th instruction. */
790 SELF_CHECK (end == (5 - 1) * 4);
791 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
792 SELF_CHECK (cache.framesize == 64);
793 }
794
795 /* Test handling of movz when there is no frame pointer set or no stack
796 pointer used. */
797 {
798 static const uint32_t insns[] = {
799 0xa9bf7bfd, /* stp x29, x30, [sp, #-16]! */
800 0x52800020, /* mov w0, #0x1 */
801 0x528000a2, /* mov w2, #0x5 */
802 0x97fffff8, /* bl 6e4 */
803 };
804
805 instruction_reader_test reader (insns);
806
807 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
808 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
809
810 /* We should stop at the 4th instruction. */
811 SELF_CHECK (end == (4 - 1) * 4);
812 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
813 SELF_CHECK (cache.framesize == 16);
814 }
815
17e116a7
AH
816 /* Test a prologue in which there is a return address signing instruction. */
817 if (tdep->has_pauth ())
818 {
819 static const uint32_t insns[] = {
820 0xd503233f, /* paciasp */
821 0xa9bd7bfd, /* stp x29, x30, [sp, #-48]! */
822 0x910003fd, /* mov x29, sp */
823 0xf801c3f3, /* str x19, [sp, #28] */
824 0xb9401fa0, /* ldr x19, [x29, #28] */
825 };
826 instruction_reader_test reader (insns);
827
68811f8f 828 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
17e116a7
AH
829 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache,
830 reader);
831
832 SELF_CHECK (end == 4 * 4);
833 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
834 SELF_CHECK (cache.framesize == 48);
835
836 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
837 {
838 if (i == 19)
098caef4 839 SELF_CHECK (cache.saved_regs[i].addr () == -20);
17e116a7 840 else if (i == AARCH64_FP_REGNUM)
098caef4 841 SELF_CHECK (cache.saved_regs[i].addr () == -48);
17e116a7 842 else if (i == AARCH64_LR_REGNUM)
098caef4 843 SELF_CHECK (cache.saved_regs[i].addr () == -40);
17e116a7 844 else
a9a87d35
LM
845 SELF_CHECK (cache.saved_regs[i].is_realreg ()
846 && cache.saved_regs[i].realreg () == i);
17e116a7
AH
847 }
848
849 if (tdep->has_pauth ())
850 {
c9cd8ca4 851 int regnum = tdep->ra_sign_state_regnum;
a9a87d35 852 SELF_CHECK (cache.saved_regs[regnum].is_value ());
17e116a7
AH
853 }
854 }
37989733
LM
855
856 /* Test a prologue with a BTI instruction. */
857 {
858 static const uint32_t insns[] = {
859 0xd503245f, /* bti */
860 0xa9bd7bfd, /* stp x29, x30, [sp, #-48]! */
861 0x910003fd, /* mov x29, sp */
862 0xf801c3f3, /* str x19, [sp, #28] */
863 0xb9401fa0, /* ldr x19, [x29, #28] */
864 };
865 instruction_reader_test reader (insns);
866
867 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
868 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache,
869 reader);
870
871 SELF_CHECK (end == 4 * 4);
872 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
873 SELF_CHECK (cache.framesize == 48);
874
875 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
876 {
877 if (i == 19)
878 SELF_CHECK (cache.saved_regs[i].addr () == -20);
879 else if (i == AARCH64_FP_REGNUM)
880 SELF_CHECK (cache.saved_regs[i].addr () == -48);
881 else if (i == AARCH64_LR_REGNUM)
882 SELF_CHECK (cache.saved_regs[i].addr () == -40);
883 else
884 SELF_CHECK (cache.saved_regs[i].is_realreg ()
885 && cache.saved_regs[i].realreg () == i);
886 }
887 }
4d9a9006
YQ
888}
889} // namespace selftests
890#endif /* GDB_SELF_TEST */
891
07b287a0
MS
892/* Implement the "skip_prologue" gdbarch method. */
893
894static CORE_ADDR
895aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
896{
07b287a0 897 CORE_ADDR func_addr, limit_pc;
07b287a0
MS
898
899 /* See if we can determine the end of the prologue via the symbol
900 table. If so, then return either PC, or the PC after the
901 prologue, whichever is greater. */
902 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
903 {
904 CORE_ADDR post_prologue_pc
905 = skip_prologue_using_sal (gdbarch, func_addr);
906
907 if (post_prologue_pc != 0)
325fac50 908 return std::max (pc, post_prologue_pc);
07b287a0
MS
909 }
910
911 /* Can't determine prologue from the symbol table, need to examine
912 instructions. */
913
914 /* Find an upper limit on the function prologue using the debug
915 information. If the debug information could not be used to
916 provide that bound, then use an arbitrary large number as the
917 upper bound. */
918 limit_pc = skip_prologue_using_sal (gdbarch, pc);
919 if (limit_pc == 0)
920 limit_pc = pc + 128; /* Magic. */
921
922 /* Try disassembling prologue. */
923 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
924}
925
926/* Scan the function prologue for THIS_FRAME and populate the prologue
927 cache CACHE. */
928
929static void
930aarch64_scan_prologue (struct frame_info *this_frame,
931 struct aarch64_prologue_cache *cache)
932{
933 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
934 CORE_ADDR prologue_start;
935 CORE_ADDR prologue_end;
936 CORE_ADDR prev_pc = get_frame_pc (this_frame);
937 struct gdbarch *gdbarch = get_frame_arch (this_frame);
938
db634143
PL
939 cache->prev_pc = prev_pc;
940
07b287a0
MS
941 /* Assume we do not find a frame. */
942 cache->framereg = -1;
943 cache->framesize = 0;
944
945 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
946 &prologue_end))
947 {
948 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
949
950 if (sal.line == 0)
951 {
952 /* No line info so use the current PC. */
953 prologue_end = prev_pc;
954 }
955 else if (sal.end < prologue_end)
956 {
957 /* The next line begins after the function end. */
958 prologue_end = sal.end;
959 }
960
325fac50 961 prologue_end = std::min (prologue_end, prev_pc);
07b287a0
MS
962 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
963 }
964 else
965 {
966 CORE_ADDR frame_loc;
07b287a0
MS
967
968 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
969 if (frame_loc == 0)
970 return;
971
972 cache->framereg = AARCH64_FP_REGNUM;
973 cache->framesize = 16;
098caef4
LM
974 cache->saved_regs[29].set_addr (0);
975 cache->saved_regs[30].set_addr (8);
07b287a0
MS
976 }
977}
978
7dfa3edc
PL
979/* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
980 function may throw an exception if the inferior's registers or memory is
981 not available. */
07b287a0 982
7dfa3edc
PL
983static void
984aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
985 struct aarch64_prologue_cache *cache)
07b287a0 986{
07b287a0
MS
987 CORE_ADDR unwound_fp;
988 int reg;
989
07b287a0
MS
990 aarch64_scan_prologue (this_frame, cache);
991
992 if (cache->framereg == -1)
7dfa3edc 993 return;
07b287a0
MS
994
995 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
996 if (unwound_fp == 0)
7dfa3edc 997 return;
07b287a0
MS
998
999 cache->prev_sp = unwound_fp + cache->framesize;
1000
1001 /* Calculate actual addresses of saved registers using offsets
1002 determined by aarch64_analyze_prologue. */
1003 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
a9a87d35 1004 if (cache->saved_regs[reg].is_addr ())
098caef4
LM
1005 cache->saved_regs[reg].set_addr (cache->saved_regs[reg].addr ()
1006 + cache->prev_sp);
07b287a0 1007
db634143
PL
1008 cache->func = get_frame_func (this_frame);
1009
7dfa3edc
PL
1010 cache->available_p = 1;
1011}
1012
1013/* Allocate and fill in *THIS_CACHE with information about the prologue of
1014 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
1015 Return a pointer to the current aarch64_prologue_cache in
1016 *THIS_CACHE. */
1017
1018static struct aarch64_prologue_cache *
1019aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
1020{
1021 struct aarch64_prologue_cache *cache;
1022
1023 if (*this_cache != NULL)
9a3c8263 1024 return (struct aarch64_prologue_cache *) *this_cache;
7dfa3edc
PL
1025
1026 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
1027 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
1028 *this_cache = cache;
1029
a70b8144 1030 try
7dfa3edc
PL
1031 {
1032 aarch64_make_prologue_cache_1 (this_frame, cache);
1033 }
230d2906 1034 catch (const gdb_exception_error &ex)
7dfa3edc
PL
1035 {
1036 if (ex.error != NOT_AVAILABLE_ERROR)
eedc3f4f 1037 throw;
7dfa3edc 1038 }
7dfa3edc 1039
07b287a0
MS
1040 return cache;
1041}
1042
7dfa3edc
PL
1043/* Implement the "stop_reason" frame_unwind method. */
1044
1045static enum unwind_stop_reason
1046aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
1047 void **this_cache)
1048{
1049 struct aarch64_prologue_cache *cache
1050 = aarch64_make_prologue_cache (this_frame, this_cache);
1051
1052 if (!cache->available_p)
1053 return UNWIND_UNAVAILABLE;
1054
1055 /* Halt the backtrace at "_start". */
345bd07c 1056 gdbarch *arch = get_frame_arch (this_frame);
08106042 1057 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (arch);
345bd07c 1058 if (cache->prev_pc <= tdep->lowest_pc)
7dfa3edc
PL
1059 return UNWIND_OUTERMOST;
1060
1061 /* We've hit a wall, stop. */
1062 if (cache->prev_sp == 0)
1063 return UNWIND_OUTERMOST;
1064
1065 return UNWIND_NO_REASON;
1066}
1067
07b287a0
MS
1068/* Our frame ID for a normal frame is the current function's starting
1069 PC and the caller's SP when we were called. */
1070
1071static void
1072aarch64_prologue_this_id (struct frame_info *this_frame,
1073 void **this_cache, struct frame_id *this_id)
1074{
7c8edfae
PL
1075 struct aarch64_prologue_cache *cache
1076 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0 1077
7dfa3edc
PL
1078 if (!cache->available_p)
1079 *this_id = frame_id_build_unavailable_stack (cache->func);
1080 else
1081 *this_id = frame_id_build (cache->prev_sp, cache->func);
07b287a0
MS
1082}
1083
1084/* Implement the "prev_register" frame_unwind method. */
1085
1086static struct value *
1087aarch64_prologue_prev_register (struct frame_info *this_frame,
1088 void **this_cache, int prev_regnum)
1089{
7c8edfae
PL
1090 struct aarch64_prologue_cache *cache
1091 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0
MS
1092
1093 /* If we are asked to unwind the PC, then we need to return the LR
1094 instead. The prologue may save PC, but it will point into this
1095 frame's prologue, not the next frame's resume location. */
1096 if (prev_regnum == AARCH64_PC_REGNUM)
1097 {
1098 CORE_ADDR lr;
17e116a7 1099 struct gdbarch *gdbarch = get_frame_arch (this_frame);
345bd07c 1100 aarch64_gdbarch_tdep *tdep
08106042 1101 = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
07b287a0
MS
1102
1103 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
17e116a7
AH
1104
1105 if (tdep->has_pauth ()
c9cd8ca4 1106 && cache->saved_regs[tdep->ra_sign_state_regnum].is_value ())
3d31bc39 1107 lr = aarch64_frame_unmask_lr (tdep, this_frame, lr);
17e116a7 1108
07b287a0
MS
1109 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
1110 }
1111
1112 /* SP is generally not saved to the stack, but this frame is
1113 identified by the next frame's stack pointer at the time of the
1114 call. The value was already reconstructed into PREV_SP. */
1115 /*
dda83cd7
SM
1116 +----------+ ^
1117 | saved lr | |
07b287a0
MS
1118 +->| saved fp |--+
1119 | | |
1120 | | | <- Previous SP
1121 | +----------+
1122 | | saved lr |
1123 +--| saved fp |<- FP
dda83cd7
SM
1124 | |
1125 | |<- SP
1126 +----------+ */
07b287a0
MS
1127 if (prev_regnum == AARCH64_SP_REGNUM)
1128 return frame_unwind_got_constant (this_frame, prev_regnum,
1129 cache->prev_sp);
1130
1131 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
1132 prev_regnum);
1133}
1134
1135/* AArch64 prologue unwinder. */
6bd434d6 1136static frame_unwind aarch64_prologue_unwind =
07b287a0 1137{
a154d838 1138 "aarch64 prologue",
07b287a0 1139 NORMAL_FRAME,
7dfa3edc 1140 aarch64_prologue_frame_unwind_stop_reason,
07b287a0
MS
1141 aarch64_prologue_this_id,
1142 aarch64_prologue_prev_register,
1143 NULL,
1144 default_frame_sniffer
1145};
1146
8b61f75d
PL
1147/* Allocate and fill in *THIS_CACHE with information about the prologue of
1148 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
1149 Return a pointer to the current aarch64_prologue_cache in
1150 *THIS_CACHE. */
07b287a0
MS
1151
1152static struct aarch64_prologue_cache *
8b61f75d 1153aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
07b287a0 1154{
07b287a0 1155 struct aarch64_prologue_cache *cache;
8b61f75d
PL
1156
1157 if (*this_cache != NULL)
9a3c8263 1158 return (struct aarch64_prologue_cache *) *this_cache;
07b287a0
MS
1159
1160 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
1161 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
8b61f75d 1162 *this_cache = cache;
07b287a0 1163
a70b8144 1164 try
02a2a705
PL
1165 {
1166 cache->prev_sp = get_frame_register_unsigned (this_frame,
1167 AARCH64_SP_REGNUM);
1168 cache->prev_pc = get_frame_pc (this_frame);
1169 cache->available_p = 1;
1170 }
230d2906 1171 catch (const gdb_exception_error &ex)
02a2a705
PL
1172 {
1173 if (ex.error != NOT_AVAILABLE_ERROR)
eedc3f4f 1174 throw;
02a2a705 1175 }
07b287a0
MS
1176
1177 return cache;
1178}
1179
02a2a705
PL
1180/* Implement the "stop_reason" frame_unwind method. */
1181
1182static enum unwind_stop_reason
1183aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
1184 void **this_cache)
1185{
1186 struct aarch64_prologue_cache *cache
1187 = aarch64_make_stub_cache (this_frame, this_cache);
1188
1189 if (!cache->available_p)
1190 return UNWIND_UNAVAILABLE;
1191
1192 return UNWIND_NO_REASON;
1193}
1194
07b287a0
MS
1195/* Our frame ID for a stub frame is the current SP and LR. */
1196
1197static void
1198aarch64_stub_this_id (struct frame_info *this_frame,
1199 void **this_cache, struct frame_id *this_id)
1200{
8b61f75d
PL
1201 struct aarch64_prologue_cache *cache
1202 = aarch64_make_stub_cache (this_frame, this_cache);
07b287a0 1203
02a2a705
PL
1204 if (cache->available_p)
1205 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
1206 else
1207 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
07b287a0
MS
1208}
1209
1210/* Implement the "sniffer" frame_unwind method. */
1211
1212static int
1213aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
1214 struct frame_info *this_frame,
1215 void **this_prologue_cache)
1216{
1217 CORE_ADDR addr_in_block;
1218 gdb_byte dummy[4];
1219
1220 addr_in_block = get_frame_address_in_block (this_frame);
3e5d3a5a 1221 if (in_plt_section (addr_in_block)
07b287a0
MS
1222 /* We also use the stub winder if the target memory is unreadable
1223 to avoid having the prologue unwinder trying to read it. */
1224 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
1225 return 1;
1226
1227 return 0;
1228}
1229
1230/* AArch64 stub unwinder. */
6bd434d6 1231static frame_unwind aarch64_stub_unwind =
07b287a0 1232{
a154d838 1233 "aarch64 stub",
07b287a0 1234 NORMAL_FRAME,
02a2a705 1235 aarch64_stub_frame_unwind_stop_reason,
07b287a0
MS
1236 aarch64_stub_this_id,
1237 aarch64_prologue_prev_register,
1238 NULL,
1239 aarch64_stub_unwind_sniffer
1240};
1241
1242/* Return the frame base address of *THIS_FRAME. */
1243
1244static CORE_ADDR
1245aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
1246{
7c8edfae
PL
1247 struct aarch64_prologue_cache *cache
1248 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0
MS
1249
1250 return cache->prev_sp - cache->framesize;
1251}
1252
1253/* AArch64 default frame base information. */
6bd434d6 1254static frame_base aarch64_normal_base =
07b287a0
MS
1255{
1256 &aarch64_prologue_unwind,
1257 aarch64_normal_frame_base,
1258 aarch64_normal_frame_base,
1259 aarch64_normal_frame_base
1260};
1261
07b287a0
MS
1262/* Return the value of the REGNUM register in the previous frame of
1263 *THIS_FRAME. */
1264
1265static struct value *
1266aarch64_dwarf2_prev_register (struct frame_info *this_frame,
1267 void **this_cache, int regnum)
1268{
345bd07c 1269 gdbarch *arch = get_frame_arch (this_frame);
08106042 1270 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (arch);
07b287a0
MS
1271 CORE_ADDR lr;
1272
1273 switch (regnum)
1274 {
1275 case AARCH64_PC_REGNUM:
1276 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
3d31bc39 1277 lr = aarch64_frame_unmask_lr (tdep, this_frame, lr);
07b287a0
MS
1278 return frame_unwind_got_constant (this_frame, regnum, lr);
1279
1280 default:
1281 internal_error (__FILE__, __LINE__,
1282 _("Unexpected register %d"), regnum);
1283 }
1284}
1285
11e1b75f
AH
1286static const unsigned char op_lit0 = DW_OP_lit0;
1287static const unsigned char op_lit1 = DW_OP_lit1;
1288
07b287a0
MS
1289/* Implement the "init_reg" dwarf2_frame_ops method. */
1290
1291static void
1292aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1293 struct dwarf2_frame_state_reg *reg,
1294 struct frame_info *this_frame)
1295{
08106042 1296 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
11e1b75f 1297
07b287a0
MS
1298 switch (regnum)
1299 {
1300 case AARCH64_PC_REGNUM:
1301 reg->how = DWARF2_FRAME_REG_FN;
1302 reg->loc.fn = aarch64_dwarf2_prev_register;
11e1b75f
AH
1303 return;
1304
07b287a0
MS
1305 case AARCH64_SP_REGNUM:
1306 reg->how = DWARF2_FRAME_REG_CFA;
11e1b75f
AH
1307 return;
1308 }
1309
1310 /* Init pauth registers. */
1311 if (tdep->has_pauth ())
1312 {
c9cd8ca4 1313 if (regnum == tdep->ra_sign_state_regnum)
11e1b75f
AH
1314 {
1315 /* Initialize RA_STATE to zero. */
1316 reg->how = DWARF2_FRAME_REG_SAVED_VAL_EXP;
1317 reg->loc.exp.start = &op_lit0;
1318 reg->loc.exp.len = 1;
1319 return;
1320 }
1321 else if (regnum == AARCH64_PAUTH_DMASK_REGNUM (tdep->pauth_reg_base)
1322 || regnum == AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base))
1323 {
1324 reg->how = DWARF2_FRAME_REG_SAME_VALUE;
1325 return;
1326 }
07b287a0
MS
1327 }
1328}
1329
11e1b75f
AH
1330/* Implement the execute_dwarf_cfa_vendor_op method. */
1331
1332static bool
1333aarch64_execute_dwarf_cfa_vendor_op (struct gdbarch *gdbarch, gdb_byte op,
1334 struct dwarf2_frame_state *fs)
1335{
08106042 1336 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
11e1b75f
AH
1337 struct dwarf2_frame_state_reg *ra_state;
1338
8fca4da0 1339 if (op == DW_CFA_AARCH64_negate_ra_state)
11e1b75f 1340 {
8fca4da0
AH
1341 /* On systems without pauth, treat as a nop. */
1342 if (!tdep->has_pauth ())
1343 return true;
1344
11e1b75f 1345 /* Allocate RA_STATE column if it's not allocated yet. */
c9cd8ca4 1346 fs->regs.alloc_regs (AARCH64_DWARF_RA_SIGN_STATE + 1);
11e1b75f
AH
1347
1348 /* Toggle the status of RA_STATE between 0 and 1. */
c9cd8ca4 1349 ra_state = &(fs->regs.reg[AARCH64_DWARF_RA_SIGN_STATE]);
11e1b75f
AH
1350 ra_state->how = DWARF2_FRAME_REG_SAVED_VAL_EXP;
1351
1352 if (ra_state->loc.exp.start == nullptr
1353 || ra_state->loc.exp.start == &op_lit0)
1354 ra_state->loc.exp.start = &op_lit1;
1355 else
1356 ra_state->loc.exp.start = &op_lit0;
1357
1358 ra_state->loc.exp.len = 1;
1359
1360 return true;
1361 }
1362
1363 return false;
1364}
1365
5133a315
LM
1366/* Used for matching BRK instructions for AArch64. */
1367static constexpr uint32_t BRK_INSN_MASK = 0xffe0001f;
1368static constexpr uint32_t BRK_INSN_BASE = 0xd4200000;
1369
1370/* Implementation of gdbarch_program_breakpoint_here_p for aarch64. */
1371
1372static bool
1373aarch64_program_breakpoint_here_p (gdbarch *gdbarch, CORE_ADDR address)
1374{
1375 const uint32_t insn_len = 4;
1376 gdb_byte target_mem[4];
1377
1378 /* Enable the automatic memory restoration from breakpoints while
1379 we read the memory. Otherwise we may find temporary breakpoints, ones
1380 inserted by GDB, and flag them as permanent breakpoints. */
1381 scoped_restore restore_memory
1382 = make_scoped_restore_show_memory_breakpoints (0);
1383
1384 if (target_read_memory (address, target_mem, insn_len) == 0)
1385 {
1386 uint32_t insn =
1387 (uint32_t) extract_unsigned_integer (target_mem, insn_len,
1388 gdbarch_byte_order_for_code (gdbarch));
1389
1390 /* Check if INSN is a BRK instruction pattern. There are multiple choices
1391 of such instructions with different immediate values. Different OS'
1392 may use a different variation, but they have the same outcome. */
1393 return ((insn & BRK_INSN_MASK) == BRK_INSN_BASE);
1394 }
1395
1396 return false;
1397}
1398
07b287a0
MS
1399/* When arguments must be pushed onto the stack, they go on in reverse
1400 order. The code below implements a FILO (stack) to do this. */
1401
89055eaa 1402struct stack_item_t
07b287a0 1403{
c3c87445
YQ
1404 /* Value to pass on stack. It can be NULL if this item is for stack
1405 padding. */
7c543f7b 1406 const gdb_byte *data;
07b287a0
MS
1407
1408 /* Size in bytes of value to pass on stack. */
1409 int len;
89055eaa 1410};
07b287a0 1411
b907456c
AB
1412/* Implement the gdbarch type alignment method, overrides the generic
1413 alignment algorithm for anything that is aarch64 specific. */
07b287a0 1414
b907456c
AB
1415static ULONGEST
1416aarch64_type_align (gdbarch *gdbarch, struct type *t)
07b287a0 1417{
07b287a0 1418 t = check_typedef (t);
bd63c870 1419 if (t->code () == TYPE_CODE_ARRAY && t->is_vector ())
07b287a0 1420 {
b907456c
AB
1421 /* Use the natural alignment for vector types (the same for
1422 scalar type), but the maximum alignment is 128-bit. */
df86565b 1423 if (t->length () > 16)
b907456c 1424 return 16;
238f2452 1425 else
df86565b 1426 return t->length ();
07b287a0 1427 }
b907456c
AB
1428
1429 /* Allow the common code to calculate the alignment. */
1430 return 0;
07b287a0
MS
1431}
1432
ea92689a
AH
1433/* Worker function for aapcs_is_vfp_call_or_return_candidate.
1434
1435 Return the number of register required, or -1 on failure.
1436
1437 When encountering a base element, if FUNDAMENTAL_TYPE is not set then set it
1438 to the element, else fail if the type of this element does not match the
1439 existing value. */
1440
1441static int
1442aapcs_is_vfp_call_or_return_candidate_1 (struct type *type,
1443 struct type **fundamental_type)
1444{
1445 if (type == nullptr)
1446 return -1;
1447
78134374 1448 switch (type->code ())
ea92689a
AH
1449 {
1450 case TYPE_CODE_FLT:
81657e58 1451 case TYPE_CODE_DECFLOAT:
df86565b 1452 if (type->length () > 16)
ea92689a
AH
1453 return -1;
1454
1455 if (*fundamental_type == nullptr)
1456 *fundamental_type = type;
df86565b 1457 else if (type->length () != (*fundamental_type)->length ()
78134374 1458 || type->code () != (*fundamental_type)->code ())
ea92689a
AH
1459 return -1;
1460
1461 return 1;
1462
1463 case TYPE_CODE_COMPLEX:
1464 {
27710edb 1465 struct type *target_type = check_typedef (type->target_type ());
df86565b 1466 if (target_type->length () > 16)
ea92689a
AH
1467 return -1;
1468
1469 if (*fundamental_type == nullptr)
1470 *fundamental_type = target_type;
df86565b 1471 else if (target_type->length () != (*fundamental_type)->length ()
78134374 1472 || target_type->code () != (*fundamental_type)->code ())
ea92689a
AH
1473 return -1;
1474
1475 return 2;
1476 }
1477
1478 case TYPE_CODE_ARRAY:
1479 {
bd63c870 1480 if (type->is_vector ())
ea92689a 1481 {
df86565b 1482 if (type->length () != 8 && type->length () != 16)
ea92689a
AH
1483 return -1;
1484
1485 if (*fundamental_type == nullptr)
1486 *fundamental_type = type;
df86565b 1487 else if (type->length () != (*fundamental_type)->length ()
78134374 1488 || type->code () != (*fundamental_type)->code ())
ea92689a
AH
1489 return -1;
1490
1491 return 1;
1492 }
1493 else
1494 {
27710edb 1495 struct type *target_type = type->target_type ();
ea92689a
AH
1496 int count = aapcs_is_vfp_call_or_return_candidate_1
1497 (target_type, fundamental_type);
1498
1499 if (count == -1)
1500 return count;
1501
df86565b 1502 count *= (type->length () / target_type->length ());
ea92689a
AH
1503 return count;
1504 }
1505 }
1506
1507 case TYPE_CODE_STRUCT:
1508 case TYPE_CODE_UNION:
1509 {
1510 int count = 0;
1511
1f704f76 1512 for (int i = 0; i < type->num_fields (); i++)
ea92689a 1513 {
353229bf 1514 /* Ignore any static fields. */
ceacbf6e 1515 if (field_is_static (&type->field (i)))
353229bf
AH
1516 continue;
1517
940da03e 1518 struct type *member = check_typedef (type->field (i).type ());
ea92689a
AH
1519
1520 int sub_count = aapcs_is_vfp_call_or_return_candidate_1
1521 (member, fundamental_type);
1522 if (sub_count == -1)
1523 return -1;
1524 count += sub_count;
1525 }
73021deb
AH
1526
1527 /* Ensure there is no padding between the fields (allowing for empty
1528 zero length structs) */
1529 int ftype_length = (*fundamental_type == nullptr)
df86565b
SM
1530 ? 0 : (*fundamental_type)->length ();
1531 if (count * ftype_length != type->length ())
73021deb
AH
1532 return -1;
1533
ea92689a
AH
1534 return count;
1535 }
1536
1537 default:
1538 break;
1539 }
1540
1541 return -1;
1542}
1543
1544/* Return true if an argument, whose type is described by TYPE, can be passed or
1545 returned in simd/fp registers, providing enough parameter passing registers
1546 are available. This is as described in the AAPCS64.
1547
1548 Upon successful return, *COUNT returns the number of needed registers,
1549 *FUNDAMENTAL_TYPE contains the type of those registers.
1550
1551 Candidate as per the AAPCS64 5.4.2.C is either a:
1552 - float.
1553 - short-vector.
1554 - HFA (Homogeneous Floating-point Aggregate, 4.3.5.1). A Composite type where
1555 all the members are floats and has at most 4 members.
1556 - HVA (Homogeneous Short-vector Aggregate, 4.3.5.2). A Composite type where
1557 all the members are short vectors and has at most 4 members.
1558 - Complex (7.1.1)
1559
1560 Note that HFAs and HVAs can include nested structures and arrays. */
1561
0e745c60 1562static bool
ea92689a
AH
1563aapcs_is_vfp_call_or_return_candidate (struct type *type, int *count,
1564 struct type **fundamental_type)
1565{
1566 if (type == nullptr)
1567 return false;
1568
1569 *fundamental_type = nullptr;
1570
1571 int ag_count = aapcs_is_vfp_call_or_return_candidate_1 (type,
1572 fundamental_type);
1573
1574 if (ag_count > 0 && ag_count <= HA_MAX_NUM_FLDS)
1575 {
1576 *count = ag_count;
1577 return true;
1578 }
1579 else
1580 return false;
1581}
1582
07b287a0
MS
1583/* AArch64 function call information structure. */
1584struct aarch64_call_info
1585{
1586 /* the current argument number. */
89055eaa 1587 unsigned argnum = 0;
07b287a0
MS
1588
1589 /* The next general purpose register number, equivalent to NGRN as
1590 described in the AArch64 Procedure Call Standard. */
89055eaa 1591 unsigned ngrn = 0;
07b287a0
MS
1592
1593 /* The next SIMD and floating point register number, equivalent to
1594 NSRN as described in the AArch64 Procedure Call Standard. */
89055eaa 1595 unsigned nsrn = 0;
07b287a0
MS
1596
1597 /* The next stacked argument address, equivalent to NSAA as
1598 described in the AArch64 Procedure Call Standard. */
89055eaa 1599 unsigned nsaa = 0;
07b287a0
MS
1600
1601 /* Stack item vector. */
89055eaa 1602 std::vector<stack_item_t> si;
07b287a0
MS
1603};
1604
1605/* Pass a value in a sequence of consecutive X registers. The caller
30baf67b 1606 is responsible for ensuring sufficient registers are available. */
07b287a0
MS
1607
1608static void
1609pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1610 struct aarch64_call_info *info, struct type *type,
8e80f9d1 1611 struct value *arg)
07b287a0
MS
1612{
1613 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
df86565b 1614 int len = type->length ();
78134374 1615 enum type_code typecode = type->code ();
07b287a0 1616 int regnum = AARCH64_X0_REGNUM + info->ngrn;
50888e42 1617 const bfd_byte *buf = value_contents (arg).data ();
07b287a0
MS
1618
1619 info->argnum++;
1620
1621 while (len > 0)
1622 {
1623 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1624 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1625 byte_order);
1626
1627
1628 /* Adjust sub-word struct/union args when big-endian. */
1629 if (byte_order == BFD_ENDIAN_BIG
1630 && partial_len < X_REGISTER_SIZE
1631 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1632 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1633
c6185dce
SM
1634 aarch64_debug_printf ("arg %d in %s = 0x%s", info->argnum,
1635 gdbarch_register_name (gdbarch, regnum),
1636 phex (regval, X_REGISTER_SIZE));
1637
07b287a0
MS
1638 regcache_cooked_write_unsigned (regcache, regnum, regval);
1639 len -= partial_len;
1640 buf += partial_len;
1641 regnum++;
1642 }
1643}
1644
1645/* Attempt to marshall a value in a V register. Return 1 if
1646 successful, or 0 if insufficient registers are available. This
1647 function, unlike the equivalent pass_in_x() function does not
1648 handle arguments spread across multiple registers. */
1649
1650static int
1651pass_in_v (struct gdbarch *gdbarch,
1652 struct regcache *regcache,
1653 struct aarch64_call_info *info,
0735fddd 1654 int len, const bfd_byte *buf)
07b287a0
MS
1655{
1656 if (info->nsrn < 8)
1657 {
07b287a0 1658 int regnum = AARCH64_V0_REGNUM + info->nsrn;
3ff2c72e
AH
1659 /* Enough space for a full vector register. */
1660 gdb_byte reg[register_size (gdbarch, regnum)];
1661 gdb_assert (len <= sizeof (reg));
07b287a0
MS
1662
1663 info->argnum++;
1664 info->nsrn++;
1665
0735fddd
YQ
1666 memset (reg, 0, sizeof (reg));
1667 /* PCS C.1, the argument is allocated to the least significant
1668 bits of V register. */
1669 memcpy (reg, buf, len);
b66f5587 1670 regcache->cooked_write (regnum, reg);
0735fddd 1671
c6185dce
SM
1672 aarch64_debug_printf ("arg %d in %s", info->argnum,
1673 gdbarch_register_name (gdbarch, regnum));
1674
07b287a0
MS
1675 return 1;
1676 }
1677 info->nsrn = 8;
1678 return 0;
1679}
1680
1681/* Marshall an argument onto the stack. */
1682
1683static void
1684pass_on_stack (struct aarch64_call_info *info, struct type *type,
8e80f9d1 1685 struct value *arg)
07b287a0 1686{
50888e42 1687 const bfd_byte *buf = value_contents (arg).data ();
df86565b 1688 int len = type->length ();
07b287a0
MS
1689 int align;
1690 stack_item_t item;
1691
1692 info->argnum++;
1693
b907456c 1694 align = type_align (type);
07b287a0
MS
1695
1696 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1697 Natural alignment of the argument's type. */
1698 align = align_up (align, 8);
1699
1700 /* The AArch64 PCS requires at most doubleword alignment. */
1701 if (align > 16)
1702 align = 16;
1703
c6185dce
SM
1704 aarch64_debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1705 info->nsaa);
07b287a0
MS
1706
1707 item.len = len;
1708 item.data = buf;
89055eaa 1709 info->si.push_back (item);
07b287a0
MS
1710
1711 info->nsaa += len;
1712 if (info->nsaa & (align - 1))
1713 {
1714 /* Push stack alignment padding. */
1715 int pad = align - (info->nsaa & (align - 1));
1716
1717 item.len = pad;
c3c87445 1718 item.data = NULL;
07b287a0 1719
89055eaa 1720 info->si.push_back (item);
07b287a0
MS
1721 info->nsaa += pad;
1722 }
1723}
1724
1725/* Marshall an argument into a sequence of one or more consecutive X
1726 registers or, if insufficient X registers are available then onto
1727 the stack. */
1728
1729static void
1730pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1731 struct aarch64_call_info *info, struct type *type,
8e80f9d1 1732 struct value *arg)
07b287a0 1733{
df86565b 1734 int len = type->length ();
07b287a0
MS
1735 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1736
1737 /* PCS C.13 - Pass in registers if we have enough spare */
1738 if (info->ngrn + nregs <= 8)
1739 {
8e80f9d1 1740 pass_in_x (gdbarch, regcache, info, type, arg);
07b287a0
MS
1741 info->ngrn += nregs;
1742 }
1743 else
1744 {
1745 info->ngrn = 8;
8e80f9d1 1746 pass_on_stack (info, type, arg);
07b287a0
MS
1747 }
1748}
1749
0e745c60
AH
1750/* Pass a value, which is of type arg_type, in a V register. Assumes value is a
1751 aapcs_is_vfp_call_or_return_candidate and there are enough spare V
1752 registers. A return value of false is an error state as the value will have
1753 been partially passed to the stack. */
1754static bool
1755pass_in_v_vfp_candidate (struct gdbarch *gdbarch, struct regcache *regcache,
1756 struct aarch64_call_info *info, struct type *arg_type,
1757 struct value *arg)
07b287a0 1758{
78134374 1759 switch (arg_type->code ())
0e745c60
AH
1760 {
1761 case TYPE_CODE_FLT:
81657e58 1762 case TYPE_CODE_DECFLOAT:
df86565b 1763 return pass_in_v (gdbarch, regcache, info, arg_type->length (),
50888e42 1764 value_contents (arg).data ());
0e745c60
AH
1765 break;
1766
1767 case TYPE_CODE_COMPLEX:
1768 {
50888e42 1769 const bfd_byte *buf = value_contents (arg).data ();
27710edb 1770 struct type *target_type = check_typedef (arg_type->target_type ());
0e745c60 1771
df86565b 1772 if (!pass_in_v (gdbarch, regcache, info, target_type->length (),
0e745c60
AH
1773 buf))
1774 return false;
1775
df86565b
SM
1776 return pass_in_v (gdbarch, regcache, info, target_type->length (),
1777 buf + target_type->length ());
0e745c60
AH
1778 }
1779
1780 case TYPE_CODE_ARRAY:
bd63c870 1781 if (arg_type->is_vector ())
df86565b 1782 return pass_in_v (gdbarch, regcache, info, arg_type->length (),
50888e42 1783 value_contents (arg).data ());
0e745c60
AH
1784 /* fall through. */
1785
1786 case TYPE_CODE_STRUCT:
1787 case TYPE_CODE_UNION:
1f704f76 1788 for (int i = 0; i < arg_type->num_fields (); i++)
0e745c60 1789 {
353229bf 1790 /* Don't include static fields. */
ceacbf6e 1791 if (field_is_static (&arg_type->field (i)))
353229bf
AH
1792 continue;
1793
0e745c60
AH
1794 struct value *field = value_primitive_field (arg, 0, i, arg_type);
1795 struct type *field_type = check_typedef (value_type (field));
1796
1797 if (!pass_in_v_vfp_candidate (gdbarch, regcache, info, field_type,
1798 field))
1799 return false;
1800 }
1801 return true;
1802
1803 default:
1804 return false;
1805 }
07b287a0
MS
1806}
1807
1808/* Implement the "push_dummy_call" gdbarch method. */
1809
1810static CORE_ADDR
1811aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1812 struct regcache *regcache, CORE_ADDR bp_addr,
1813 int nargs,
cf84fa6b
AH
1814 struct value **args, CORE_ADDR sp,
1815 function_call_return_method return_method,
07b287a0
MS
1816 CORE_ADDR struct_addr)
1817{
07b287a0 1818 int argnum;
07b287a0 1819 struct aarch64_call_info info;
07b287a0 1820
07b287a0
MS
1821 /* We need to know what the type of the called function is in order
1822 to determine the number of named/anonymous arguments for the
1823 actual argument placement, and the return type in order to handle
1824 return value correctly.
1825
1826 The generic code above us views the decision of return in memory
1827 or return in registers as a two stage processes. The language
1828 handler is consulted first and may decide to return in memory (eg
1829 class with copy constructor returned by value), this will cause
1830 the generic code to allocate space AND insert an initial leading
1831 argument.
1832
1833 If the language code does not decide to pass in memory then the
1834 target code is consulted.
1835
1836 If the language code decides to pass in memory we want to move
1837 the pointer inserted as the initial argument from the argument
1838 list and into X8, the conventional AArch64 struct return pointer
38a72da0 1839 register. */
07b287a0
MS
1840
1841 /* Set the return address. For the AArch64, the return breakpoint
1842 is always at BP_ADDR. */
1843 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1844
38a72da0
AH
1845 /* If we were given an initial argument for the return slot, lose it. */
1846 if (return_method == return_method_hidden_param)
07b287a0
MS
1847 {
1848 args++;
1849 nargs--;
1850 }
1851
1852 /* The struct_return pointer occupies X8. */
38a72da0 1853 if (return_method != return_method_normal)
07b287a0 1854 {
c6185dce
SM
1855 aarch64_debug_printf ("struct return in %s = 0x%s",
1856 gdbarch_register_name
1857 (gdbarch, AARCH64_STRUCT_RETURN_REGNUM),
1858 paddress (gdbarch, struct_addr));
1859
07b287a0
MS
1860 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1861 struct_addr);
1862 }
1863
1864 for (argnum = 0; argnum < nargs; argnum++)
1865 {
1866 struct value *arg = args[argnum];
0e745c60
AH
1867 struct type *arg_type, *fundamental_type;
1868 int len, elements;
07b287a0
MS
1869
1870 arg_type = check_typedef (value_type (arg));
df86565b 1871 len = arg_type->length ();
07b287a0 1872
0e745c60
AH
1873 /* If arg can be passed in v registers as per the AAPCS64, then do so if
1874 if there are enough spare registers. */
1875 if (aapcs_is_vfp_call_or_return_candidate (arg_type, &elements,
1876 &fundamental_type))
1877 {
1878 if (info.nsrn + elements <= 8)
1879 {
1880 /* We know that we have sufficient registers available therefore
1881 this will never need to fallback to the stack. */
1882 if (!pass_in_v_vfp_candidate (gdbarch, regcache, &info, arg_type,
1883 arg))
1884 gdb_assert_not_reached ("Failed to push args");
1885 }
1886 else
1887 {
1888 info.nsrn = 8;
1889 pass_on_stack (&info, arg_type, arg);
1890 }
1891 continue;
1892 }
1893
78134374 1894 switch (arg_type->code ())
07b287a0
MS
1895 {
1896 case TYPE_CODE_INT:
1897 case TYPE_CODE_BOOL:
1898 case TYPE_CODE_CHAR:
1899 case TYPE_CODE_RANGE:
1900 case TYPE_CODE_ENUM:
28397ae7 1901 if (len < 4 && !is_fixed_point_type (arg_type))
07b287a0
MS
1902 {
1903 /* Promote to 32 bit integer. */
c6d940a9 1904 if (arg_type->is_unsigned ())
07b287a0
MS
1905 arg_type = builtin_type (gdbarch)->builtin_uint32;
1906 else
1907 arg_type = builtin_type (gdbarch)->builtin_int32;
1908 arg = value_cast (arg_type, arg);
1909 }
8e80f9d1 1910 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1911 break;
1912
07b287a0
MS
1913 case TYPE_CODE_STRUCT:
1914 case TYPE_CODE_ARRAY:
1915 case TYPE_CODE_UNION:
0e745c60 1916 if (len > 16)
07b287a0
MS
1917 {
1918 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1919 invisible reference. */
1920
1921 /* Allocate aligned storage. */
1922 sp = align_down (sp - len, 16);
1923
1924 /* Write the real data into the stack. */
50888e42 1925 write_memory (sp, value_contents (arg).data (), len);
07b287a0
MS
1926
1927 /* Construct the indirection. */
1928 arg_type = lookup_pointer_type (arg_type);
1929 arg = value_from_pointer (arg_type, sp);
8e80f9d1 1930 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1931 }
1932 else
1933 /* PCS C.15 / C.18 multiple values pass. */
8e80f9d1 1934 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1935 break;
1936
1937 default:
8e80f9d1 1938 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1939 break;
1940 }
1941 }
1942
1943 /* Make sure stack retains 16 byte alignment. */
1944 if (info.nsaa & 15)
1945 sp -= 16 - (info.nsaa & 15);
1946
89055eaa 1947 while (!info.si.empty ())
07b287a0 1948 {
89055eaa 1949 const stack_item_t &si = info.si.back ();
07b287a0 1950
89055eaa
TT
1951 sp -= si.len;
1952 if (si.data != NULL)
1953 write_memory (sp, si.data, si.len);
1954 info.si.pop_back ();
07b287a0
MS
1955 }
1956
07b287a0
MS
1957 /* Finally, update the SP register. */
1958 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1959
1960 return sp;
1961}
1962
1963/* Implement the "frame_align" gdbarch method. */
1964
1965static CORE_ADDR
1966aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1967{
1968 /* Align the stack to sixteen bytes. */
1969 return sp & ~(CORE_ADDR) 15;
1970}
1971
1972/* Return the type for an AdvSISD Q register. */
1973
1974static struct type *
1975aarch64_vnq_type (struct gdbarch *gdbarch)
1976{
08106042 1977 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
07b287a0
MS
1978
1979 if (tdep->vnq_type == NULL)
1980 {
1981 struct type *t;
1982 struct type *elem;
1983
1984 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1985 TYPE_CODE_UNION);
1986
1987 elem = builtin_type (gdbarch)->builtin_uint128;
1988 append_composite_type_field (t, "u", elem);
1989
1990 elem = builtin_type (gdbarch)->builtin_int128;
1991 append_composite_type_field (t, "s", elem);
1992
1993 tdep->vnq_type = t;
1994 }
1995
1996 return tdep->vnq_type;
1997}
1998
1999/* Return the type for an AdvSISD D register. */
2000
2001static struct type *
2002aarch64_vnd_type (struct gdbarch *gdbarch)
2003{
08106042 2004 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
07b287a0
MS
2005
2006 if (tdep->vnd_type == NULL)
2007 {
2008 struct type *t;
2009 struct type *elem;
2010
2011 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
2012 TYPE_CODE_UNION);
2013
2014 elem = builtin_type (gdbarch)->builtin_double;
2015 append_composite_type_field (t, "f", elem);
2016
2017 elem = builtin_type (gdbarch)->builtin_uint64;
2018 append_composite_type_field (t, "u", elem);
2019
2020 elem = builtin_type (gdbarch)->builtin_int64;
2021 append_composite_type_field (t, "s", elem);
2022
2023 tdep->vnd_type = t;
2024 }
2025
2026 return tdep->vnd_type;
2027}
2028
2029/* Return the type for an AdvSISD S register. */
2030
2031static struct type *
2032aarch64_vns_type (struct gdbarch *gdbarch)
2033{
08106042 2034 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
07b287a0
MS
2035
2036 if (tdep->vns_type == NULL)
2037 {
2038 struct type *t;
2039 struct type *elem;
2040
2041 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
2042 TYPE_CODE_UNION);
2043
2044 elem = builtin_type (gdbarch)->builtin_float;
2045 append_composite_type_field (t, "f", elem);
2046
2047 elem = builtin_type (gdbarch)->builtin_uint32;
2048 append_composite_type_field (t, "u", elem);
2049
2050 elem = builtin_type (gdbarch)->builtin_int32;
2051 append_composite_type_field (t, "s", elem);
2052
2053 tdep->vns_type = t;
2054 }
2055
2056 return tdep->vns_type;
2057}
2058
2059/* Return the type for an AdvSISD H register. */
2060
2061static struct type *
2062aarch64_vnh_type (struct gdbarch *gdbarch)
2063{
08106042 2064 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
07b287a0
MS
2065
2066 if (tdep->vnh_type == NULL)
2067 {
2068 struct type *t;
2069 struct type *elem;
2070
2071 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
2072 TYPE_CODE_UNION);
2073
5291fe3c
SP
2074 elem = builtin_type (gdbarch)->builtin_bfloat16;
2075 append_composite_type_field (t, "bf", elem);
2076
a6d0f249
AH
2077 elem = builtin_type (gdbarch)->builtin_half;
2078 append_composite_type_field (t, "f", elem);
2079
07b287a0
MS
2080 elem = builtin_type (gdbarch)->builtin_uint16;
2081 append_composite_type_field (t, "u", elem);
2082
2083 elem = builtin_type (gdbarch)->builtin_int16;
2084 append_composite_type_field (t, "s", elem);
2085
2086 tdep->vnh_type = t;
2087 }
2088
2089 return tdep->vnh_type;
2090}
2091
2092/* Return the type for an AdvSISD B register. */
2093
2094static struct type *
2095aarch64_vnb_type (struct gdbarch *gdbarch)
2096{
08106042 2097 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
07b287a0
MS
2098
2099 if (tdep->vnb_type == NULL)
2100 {
2101 struct type *t;
2102 struct type *elem;
2103
2104 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
2105 TYPE_CODE_UNION);
2106
2107 elem = builtin_type (gdbarch)->builtin_uint8;
2108 append_composite_type_field (t, "u", elem);
2109
2110 elem = builtin_type (gdbarch)->builtin_int8;
2111 append_composite_type_field (t, "s", elem);
2112
2113 tdep->vnb_type = t;
2114 }
2115
2116 return tdep->vnb_type;
2117}
2118
63bad7b6
AH
2119/* Return the type for an AdvSISD V register. */
2120
2121static struct type *
2122aarch64_vnv_type (struct gdbarch *gdbarch)
2123{
08106042 2124 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
63bad7b6
AH
2125
2126 if (tdep->vnv_type == NULL)
2127 {
09624f1f 2128 /* The other AArch64 pseudo registers (Q,D,H,S,B) refer to a single value
bffa1015
AH
2129 slice from the non-pseudo vector registers. However NEON V registers
2130 are always vector registers, and need constructing as such. */
2131 const struct builtin_type *bt = builtin_type (gdbarch);
2132
63bad7b6
AH
2133 struct type *t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnv",
2134 TYPE_CODE_UNION);
2135
bffa1015
AH
2136 struct type *sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
2137 TYPE_CODE_UNION);
2138 append_composite_type_field (sub, "f",
2139 init_vector_type (bt->builtin_double, 2));
2140 append_composite_type_field (sub, "u",
2141 init_vector_type (bt->builtin_uint64, 2));
2142 append_composite_type_field (sub, "s",
2143 init_vector_type (bt->builtin_int64, 2));
2144 append_composite_type_field (t, "d", sub);
2145
2146 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
2147 TYPE_CODE_UNION);
2148 append_composite_type_field (sub, "f",
2149 init_vector_type (bt->builtin_float, 4));
2150 append_composite_type_field (sub, "u",
2151 init_vector_type (bt->builtin_uint32, 4));
2152 append_composite_type_field (sub, "s",
2153 init_vector_type (bt->builtin_int32, 4));
2154 append_composite_type_field (t, "s", sub);
2155
2156 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
2157 TYPE_CODE_UNION);
5291fe3c
SP
2158 append_composite_type_field (sub, "bf",
2159 init_vector_type (bt->builtin_bfloat16, 8));
a6d0f249
AH
2160 append_composite_type_field (sub, "f",
2161 init_vector_type (bt->builtin_half, 8));
bffa1015
AH
2162 append_composite_type_field (sub, "u",
2163 init_vector_type (bt->builtin_uint16, 8));
2164 append_composite_type_field (sub, "s",
2165 init_vector_type (bt->builtin_int16, 8));
2166 append_composite_type_field (t, "h", sub);
2167
2168 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
2169 TYPE_CODE_UNION);
2170 append_composite_type_field (sub, "u",
2171 init_vector_type (bt->builtin_uint8, 16));
2172 append_composite_type_field (sub, "s",
2173 init_vector_type (bt->builtin_int8, 16));
2174 append_composite_type_field (t, "b", sub);
2175
2176 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
2177 TYPE_CODE_UNION);
2178 append_composite_type_field (sub, "u",
2179 init_vector_type (bt->builtin_uint128, 1));
2180 append_composite_type_field (sub, "s",
2181 init_vector_type (bt->builtin_int128, 1));
2182 append_composite_type_field (t, "q", sub);
63bad7b6
AH
2183
2184 tdep->vnv_type = t;
2185 }
2186
2187 return tdep->vnv_type;
2188}
2189
07b287a0
MS
2190/* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
2191
2192static int
2193aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
2194{
08106042 2195 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
34dcc7cf 2196
07b287a0
MS
2197 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
2198 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
2199
2200 if (reg == AARCH64_DWARF_SP)
2201 return AARCH64_SP_REGNUM;
2202
1fe84861
YY
2203 if (reg == AARCH64_DWARF_PC)
2204 return AARCH64_PC_REGNUM;
2205
07b287a0
MS
2206 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
2207 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
2208
65d4cada
AH
2209 if (reg == AARCH64_DWARF_SVE_VG)
2210 return AARCH64_SVE_VG_REGNUM;
2211
2212 if (reg == AARCH64_DWARF_SVE_FFR)
2213 return AARCH64_SVE_FFR_REGNUM;
2214
2215 if (reg >= AARCH64_DWARF_SVE_P0 && reg <= AARCH64_DWARF_SVE_P0 + 15)
2216 return AARCH64_SVE_P0_REGNUM + reg - AARCH64_DWARF_SVE_P0;
2217
2218 if (reg >= AARCH64_DWARF_SVE_Z0 && reg <= AARCH64_DWARF_SVE_Z0 + 15)
2219 return AARCH64_SVE_Z0_REGNUM + reg - AARCH64_DWARF_SVE_Z0;
2220
34dcc7cf
AH
2221 if (tdep->has_pauth ())
2222 {
c9cd8ca4
LM
2223 if (reg == AARCH64_DWARF_RA_SIGN_STATE)
2224 return tdep->ra_sign_state_regnum;
34dcc7cf
AH
2225 }
2226
07b287a0
MS
2227 return -1;
2228}
07b287a0
MS
2229
2230/* Implement the "print_insn" gdbarch method. */
2231
2232static int
2233aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
2234{
2235 info->symbols = NULL;
6394c606 2236 return default_print_insn (memaddr, info);
07b287a0
MS
2237}
2238
2239/* AArch64 BRK software debug mode instruction.
2240 Note that AArch64 code is always little-endian.
2241 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
04180708 2242constexpr gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
07b287a0 2243
04180708 2244typedef BP_MANIPULATION (aarch64_default_breakpoint) aarch64_breakpoint;
07b287a0
MS
2245
2246/* Extract from an array REGS containing the (raw) register state a
2247 function return value of type TYPE, and copy that, in virtual
2248 format, into VALBUF. */
2249
2250static void
2251aarch64_extract_return_value (struct type *type, struct regcache *regs,
2252 gdb_byte *valbuf)
2253{
ac7936df 2254 struct gdbarch *gdbarch = regs->arch ();
07b287a0 2255 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
4f4aedeb
AH
2256 int elements;
2257 struct type *fundamental_type;
07b287a0 2258
4f4aedeb
AH
2259 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2260 &fundamental_type))
07b287a0 2261 {
df86565b 2262 int len = fundamental_type->length ();
4f4aedeb
AH
2263
2264 for (int i = 0; i < elements; i++)
2265 {
2266 int regno = AARCH64_V0_REGNUM + i;
3ff2c72e
AH
2267 /* Enough space for a full vector register. */
2268 gdb_byte buf[register_size (gdbarch, regno)];
2269 gdb_assert (len <= sizeof (buf));
4f4aedeb 2270
c6185dce
SM
2271 aarch64_debug_printf
2272 ("read HFA or HVA return value element %d from %s",
2273 i + 1, gdbarch_register_name (gdbarch, regno));
2274
4f4aedeb 2275 regs->cooked_read (regno, buf);
07b287a0 2276
4f4aedeb
AH
2277 memcpy (valbuf, buf, len);
2278 valbuf += len;
2279 }
07b287a0 2280 }
78134374
SM
2281 else if (type->code () == TYPE_CODE_INT
2282 || type->code () == TYPE_CODE_CHAR
2283 || type->code () == TYPE_CODE_BOOL
2284 || type->code () == TYPE_CODE_PTR
aa006118 2285 || TYPE_IS_REFERENCE (type)
78134374 2286 || type->code () == TYPE_CODE_ENUM)
07b287a0 2287 {
6471e7d2 2288 /* If the type is a plain integer, then the access is
07b287a0
MS
2289 straight-forward. Otherwise we have to play around a bit
2290 more. */
df86565b 2291 int len = type->length ();
07b287a0
MS
2292 int regno = AARCH64_X0_REGNUM;
2293 ULONGEST tmp;
2294
2295 while (len > 0)
2296 {
2297 /* By using store_unsigned_integer we avoid having to do
2298 anything special for small big-endian values. */
2299 regcache_cooked_read_unsigned (regs, regno++, &tmp);
2300 store_unsigned_integer (valbuf,
2301 (len > X_REGISTER_SIZE
2302 ? X_REGISTER_SIZE : len), byte_order, tmp);
2303 len -= X_REGISTER_SIZE;
2304 valbuf += X_REGISTER_SIZE;
2305 }
2306 }
07b287a0
MS
2307 else
2308 {
2309 /* For a structure or union the behaviour is as if the value had
dda83cd7
SM
2310 been stored to word-aligned memory and then loaded into
2311 registers with 64-bit load instruction(s). */
df86565b 2312 int len = type->length ();
07b287a0
MS
2313 int regno = AARCH64_X0_REGNUM;
2314 bfd_byte buf[X_REGISTER_SIZE];
2315
2316 while (len > 0)
2317 {
dca08e1f 2318 regs->cooked_read (regno++, buf);
07b287a0
MS
2319 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2320 len -= X_REGISTER_SIZE;
2321 valbuf += X_REGISTER_SIZE;
2322 }
2323 }
2324}
2325
2326
2327/* Will a function return an aggregate type in memory or in a
2328 register? Return 0 if an aggregate type can be returned in a
2329 register, 1 if it must be returned in memory. */
2330
2331static int
2332aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
2333{
f168693b 2334 type = check_typedef (type);
4f4aedeb
AH
2335 int elements;
2336 struct type *fundamental_type;
07b287a0 2337
4f4aedeb
AH
2338 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2339 &fundamental_type))
07b287a0 2340 {
cd635f74
YQ
2341 /* v0-v7 are used to return values and one register is allocated
2342 for one member. However, HFA or HVA has at most four members. */
07b287a0
MS
2343 return 0;
2344 }
2345
df86565b 2346 if (type->length () > 16
bab22d06 2347 || !language_pass_by_reference (type).trivially_copyable)
07b287a0
MS
2348 {
2349 /* PCS B.6 Aggregates larger than 16 bytes are passed by
dda83cd7 2350 invisible reference. */
07b287a0
MS
2351
2352 return 1;
2353 }
2354
2355 return 0;
2356}
2357
2358/* Write into appropriate registers a function return value of type
2359 TYPE, given in virtual format. */
2360
2361static void
2362aarch64_store_return_value (struct type *type, struct regcache *regs,
2363 const gdb_byte *valbuf)
2364{
ac7936df 2365 struct gdbarch *gdbarch = regs->arch ();
07b287a0 2366 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
4f4aedeb
AH
2367 int elements;
2368 struct type *fundamental_type;
07b287a0 2369
4f4aedeb
AH
2370 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2371 &fundamental_type))
07b287a0 2372 {
df86565b 2373 int len = fundamental_type->length ();
4f4aedeb
AH
2374
2375 for (int i = 0; i < elements; i++)
2376 {
2377 int regno = AARCH64_V0_REGNUM + i;
3ff2c72e
AH
2378 /* Enough space for a full vector register. */
2379 gdb_byte tmpbuf[register_size (gdbarch, regno)];
2380 gdb_assert (len <= sizeof (tmpbuf));
4f4aedeb 2381
c6185dce
SM
2382 aarch64_debug_printf
2383 ("write HFA or HVA return value element %d to %s",
2384 i + 1, gdbarch_register_name (gdbarch, regno));
07b287a0 2385
4f4aedeb
AH
2386 memcpy (tmpbuf, valbuf,
2387 len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
2388 regs->cooked_write (regno, tmpbuf);
2389 valbuf += len;
2390 }
07b287a0 2391 }
78134374
SM
2392 else if (type->code () == TYPE_CODE_INT
2393 || type->code () == TYPE_CODE_CHAR
2394 || type->code () == TYPE_CODE_BOOL
2395 || type->code () == TYPE_CODE_PTR
aa006118 2396 || TYPE_IS_REFERENCE (type)
78134374 2397 || type->code () == TYPE_CODE_ENUM)
07b287a0 2398 {
df86565b 2399 if (type->length () <= X_REGISTER_SIZE)
07b287a0
MS
2400 {
2401 /* Values of one word or less are zero/sign-extended and
2402 returned in r0. */
2403 bfd_byte tmpbuf[X_REGISTER_SIZE];
2404 LONGEST val = unpack_long (type, valbuf);
2405
2406 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
b66f5587 2407 regs->cooked_write (AARCH64_X0_REGNUM, tmpbuf);
07b287a0
MS
2408 }
2409 else
2410 {
2411 /* Integral values greater than one word are stored in
2412 consecutive registers starting with r0. This will always
2413 be a multiple of the regiser size. */
df86565b 2414 int len = type->length ();
07b287a0
MS
2415 int regno = AARCH64_X0_REGNUM;
2416
2417 while (len > 0)
2418 {
b66f5587 2419 regs->cooked_write (regno++, valbuf);
07b287a0
MS
2420 len -= X_REGISTER_SIZE;
2421 valbuf += X_REGISTER_SIZE;
2422 }
2423 }
2424 }
07b287a0
MS
2425 else
2426 {
2427 /* For a structure or union the behaviour is as if the value had
2428 been stored to word-aligned memory and then loaded into
2429 registers with 64-bit load instruction(s). */
df86565b 2430 int len = type->length ();
07b287a0
MS
2431 int regno = AARCH64_X0_REGNUM;
2432 bfd_byte tmpbuf[X_REGISTER_SIZE];
2433
2434 while (len > 0)
2435 {
2436 memcpy (tmpbuf, valbuf,
2437 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
b66f5587 2438 regs->cooked_write (regno++, tmpbuf);
07b287a0
MS
2439 len -= X_REGISTER_SIZE;
2440 valbuf += X_REGISTER_SIZE;
2441 }
2442 }
2443}
2444
2445/* Implement the "return_value" gdbarch method. */
2446
2447static enum return_value_convention
2448aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2449 struct type *valtype, struct regcache *regcache,
2450 gdb_byte *readbuf, const gdb_byte *writebuf)
2451{
07b287a0 2452
78134374
SM
2453 if (valtype->code () == TYPE_CODE_STRUCT
2454 || valtype->code () == TYPE_CODE_UNION
2455 || valtype->code () == TYPE_CODE_ARRAY)
07b287a0
MS
2456 {
2457 if (aarch64_return_in_memory (gdbarch, valtype))
2458 {
bab22d06
LM
2459 /* From the AAPCS64's Result Return section:
2460
2461 "Otherwise, the caller shall reserve a block of memory of
2462 sufficient size and alignment to hold the result. The address
2463 of the memory block shall be passed as an additional argument to
2464 the function in x8. */
2465
c6185dce 2466 aarch64_debug_printf ("return value in memory");
bab22d06
LM
2467
2468 if (readbuf)
2469 {
2470 CORE_ADDR addr;
2471
2472 regcache->cooked_read (AARCH64_STRUCT_RETURN_REGNUM, &addr);
df86565b 2473 read_memory (addr, readbuf, valtype->length ());
bab22d06
LM
2474 }
2475
2476 return RETURN_VALUE_ABI_RETURNS_ADDRESS;
07b287a0
MS
2477 }
2478 }
2479
2480 if (writebuf)
2481 aarch64_store_return_value (valtype, regcache, writebuf);
2482
2483 if (readbuf)
2484 aarch64_extract_return_value (valtype, regcache, readbuf);
2485
c6185dce 2486 aarch64_debug_printf ("return value in registers");
07b287a0
MS
2487
2488 return RETURN_VALUE_REGISTER_CONVENTION;
2489}
2490
2491/* Implement the "get_longjmp_target" gdbarch method. */
2492
2493static int
2494aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2495{
2496 CORE_ADDR jb_addr;
2497 gdb_byte buf[X_REGISTER_SIZE];
2498 struct gdbarch *gdbarch = get_frame_arch (frame);
08106042 2499 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
07b287a0
MS
2500 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2501
2502 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
2503
2504 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2505 X_REGISTER_SIZE))
2506 return 0;
2507
2508 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2509 return 1;
2510}
ea873d8e
PL
2511
2512/* Implement the "gen_return_address" gdbarch method. */
2513
2514static void
2515aarch64_gen_return_address (struct gdbarch *gdbarch,
2516 struct agent_expr *ax, struct axs_value *value,
2517 CORE_ADDR scope)
2518{
2519 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
2520 value->kind = axs_lvalue_register;
2521 value->u.reg = AARCH64_LR_REGNUM;
2522}
07b287a0
MS
2523\f
2524
e63ae49b
LM
2525/* Return TRUE if REGNUM is a W pseudo-register number. Return FALSE
2526 otherwise. */
2527
2528static bool
2529is_w_pseudo_register (struct gdbarch *gdbarch, int regnum)
2530{
2531 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2532
2533 if (tdep->w_pseudo_base <= regnum
2534 && regnum < tdep->w_pseudo_base + tdep->w_pseudo_count)
2535 return true;
2536
2537 return false;
2538}
2539
07b287a0
MS
2540/* Return the pseudo register name corresponding to register regnum. */
2541
2542static const char *
2543aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2544{
08106042 2545 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
63bad7b6 2546
e63ae49b
LM
2547 /* W pseudo-registers. Bottom halves of the X registers. */
2548 static const char *const w_name[] =
2549 {
2550 "w0", "w1", "w2", "w3",
2551 "w4", "w5", "w6", "w7",
2552 "w8", "w9", "w10", "w11",
2553 "w12", "w13", "w14", "w15",
2554 "w16", "w17", "w18", "w19",
2555 "w20", "w21", "w22", "w23",
2556 "w24", "w25", "w26", "w27",
2557 "w28", "w29", "w30",
2558 };
2559
07b287a0
MS
2560 static const char *const q_name[] =
2561 {
2562 "q0", "q1", "q2", "q3",
2563 "q4", "q5", "q6", "q7",
2564 "q8", "q9", "q10", "q11",
2565 "q12", "q13", "q14", "q15",
2566 "q16", "q17", "q18", "q19",
2567 "q20", "q21", "q22", "q23",
2568 "q24", "q25", "q26", "q27",
2569 "q28", "q29", "q30", "q31",
2570 };
2571
2572 static const char *const d_name[] =
2573 {
2574 "d0", "d1", "d2", "d3",
2575 "d4", "d5", "d6", "d7",
2576 "d8", "d9", "d10", "d11",
2577 "d12", "d13", "d14", "d15",
2578 "d16", "d17", "d18", "d19",
2579 "d20", "d21", "d22", "d23",
2580 "d24", "d25", "d26", "d27",
2581 "d28", "d29", "d30", "d31",
2582 };
2583
2584 static const char *const s_name[] =
2585 {
2586 "s0", "s1", "s2", "s3",
2587 "s4", "s5", "s6", "s7",
2588 "s8", "s9", "s10", "s11",
2589 "s12", "s13", "s14", "s15",
2590 "s16", "s17", "s18", "s19",
2591 "s20", "s21", "s22", "s23",
2592 "s24", "s25", "s26", "s27",
2593 "s28", "s29", "s30", "s31",
2594 };
2595
2596 static const char *const h_name[] =
2597 {
2598 "h0", "h1", "h2", "h3",
2599 "h4", "h5", "h6", "h7",
2600 "h8", "h9", "h10", "h11",
2601 "h12", "h13", "h14", "h15",
2602 "h16", "h17", "h18", "h19",
2603 "h20", "h21", "h22", "h23",
2604 "h24", "h25", "h26", "h27",
2605 "h28", "h29", "h30", "h31",
2606 };
2607
2608 static const char *const b_name[] =
2609 {
2610 "b0", "b1", "b2", "b3",
2611 "b4", "b5", "b6", "b7",
2612 "b8", "b9", "b10", "b11",
2613 "b12", "b13", "b14", "b15",
2614 "b16", "b17", "b18", "b19",
2615 "b20", "b21", "b22", "b23",
2616 "b24", "b25", "b26", "b27",
2617 "b28", "b29", "b30", "b31",
2618 };
2619
34dcc7cf 2620 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
07b287a0 2621
34dcc7cf
AH
2622 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2623 return q_name[p_regnum - AARCH64_Q0_REGNUM];
07b287a0 2624
34dcc7cf
AH
2625 if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2626 return d_name[p_regnum - AARCH64_D0_REGNUM];
07b287a0 2627
34dcc7cf
AH
2628 if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2629 return s_name[p_regnum - AARCH64_S0_REGNUM];
07b287a0 2630
34dcc7cf
AH
2631 if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2632 return h_name[p_regnum - AARCH64_H0_REGNUM];
07b287a0 2633
34dcc7cf
AH
2634 if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2635 return b_name[p_regnum - AARCH64_B0_REGNUM];
07b287a0 2636
e63ae49b
LM
2637 /* W pseudo-registers? */
2638 if (is_w_pseudo_register (gdbarch, regnum))
2639 return w_name[regnum - tdep->w_pseudo_base];
2640
63bad7b6
AH
2641 if (tdep->has_sve ())
2642 {
2643 static const char *const sve_v_name[] =
2644 {
2645 "v0", "v1", "v2", "v3",
2646 "v4", "v5", "v6", "v7",
2647 "v8", "v9", "v10", "v11",
2648 "v12", "v13", "v14", "v15",
2649 "v16", "v17", "v18", "v19",
2650 "v20", "v21", "v22", "v23",
2651 "v24", "v25", "v26", "v27",
2652 "v28", "v29", "v30", "v31",
2653 };
2654
34dcc7cf
AH
2655 if (p_regnum >= AARCH64_SVE_V0_REGNUM
2656 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2657 return sve_v_name[p_regnum - AARCH64_SVE_V0_REGNUM];
63bad7b6
AH
2658 }
2659
34dcc7cf
AH
2660 /* RA_STATE is used for unwinding only. Do not assign it a name - this
2661 prevents it from being read by methods such as
2662 mi_cmd_trace_frame_collected. */
c9cd8ca4 2663 if (tdep->has_pauth () && regnum == tdep->ra_sign_state_regnum)
34dcc7cf
AH
2664 return "";
2665
07b287a0
MS
2666 internal_error (__FILE__, __LINE__,
2667 _("aarch64_pseudo_register_name: bad register number %d"),
34dcc7cf 2668 p_regnum);
07b287a0
MS
2669}
2670
2671/* Implement the "pseudo_register_type" tdesc_arch_data method. */
2672
2673static struct type *
2674aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2675{
08106042 2676 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
63bad7b6 2677
34dcc7cf 2678 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
07b287a0 2679
34dcc7cf 2680 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
07b287a0
MS
2681 return aarch64_vnq_type (gdbarch);
2682
34dcc7cf 2683 if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
07b287a0
MS
2684 return aarch64_vnd_type (gdbarch);
2685
34dcc7cf 2686 if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
07b287a0
MS
2687 return aarch64_vns_type (gdbarch);
2688
34dcc7cf 2689 if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
07b287a0
MS
2690 return aarch64_vnh_type (gdbarch);
2691
34dcc7cf 2692 if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
07b287a0
MS
2693 return aarch64_vnb_type (gdbarch);
2694
34dcc7cf
AH
2695 if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
2696 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
63bad7b6
AH
2697 return aarch64_vnv_type (gdbarch);
2698
e63ae49b
LM
2699 /* W pseudo-registers are 32-bit. */
2700 if (is_w_pseudo_register (gdbarch, regnum))
2701 return builtin_type (gdbarch)->builtin_uint32;
2702
c9cd8ca4 2703 if (tdep->has_pauth () && regnum == tdep->ra_sign_state_regnum)
34dcc7cf
AH
2704 return builtin_type (gdbarch)->builtin_uint64;
2705
07b287a0
MS
2706 internal_error (__FILE__, __LINE__,
2707 _("aarch64_pseudo_register_type: bad register number %d"),
34dcc7cf 2708 p_regnum);
07b287a0
MS
2709}
2710
2711/* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2712
2713static int
2714aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
dbf5d61b 2715 const struct reggroup *group)
07b287a0 2716{
08106042 2717 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
63bad7b6 2718
34dcc7cf 2719 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
07b287a0 2720
34dcc7cf 2721 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
07b287a0 2722 return group == all_reggroup || group == vector_reggroup;
34dcc7cf 2723 else if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
07b287a0
MS
2724 return (group == all_reggroup || group == vector_reggroup
2725 || group == float_reggroup);
34dcc7cf 2726 else if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
07b287a0
MS
2727 return (group == all_reggroup || group == vector_reggroup
2728 || group == float_reggroup);
34dcc7cf 2729 else if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
07b287a0 2730 return group == all_reggroup || group == vector_reggroup;
34dcc7cf 2731 else if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
07b287a0 2732 return group == all_reggroup || group == vector_reggroup;
34dcc7cf
AH
2733 else if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
2734 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
63bad7b6 2735 return group == all_reggroup || group == vector_reggroup;
34dcc7cf 2736 /* RA_STATE is used for unwinding only. Do not assign it to any groups. */
c9cd8ca4 2737 if (tdep->has_pauth () && regnum == tdep->ra_sign_state_regnum)
34dcc7cf 2738 return 0;
07b287a0
MS
2739
2740 return group == all_reggroup;
2741}
2742
3c5cd5c3
AH
2743/* Helper for aarch64_pseudo_read_value. */
2744
2745static struct value *
63bad7b6
AH
2746aarch64_pseudo_read_value_1 (struct gdbarch *gdbarch,
2747 readable_regcache *regcache, int regnum_offset,
3c5cd5c3
AH
2748 int regsize, struct value *result_value)
2749{
3c5cd5c3
AH
2750 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2751
63bad7b6
AH
2752 /* Enough space for a full vector register. */
2753 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2754 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2755
3c5cd5c3
AH
2756 if (regcache->raw_read (v_regnum, reg_buf) != REG_VALID)
2757 mark_value_bytes_unavailable (result_value, 0,
df86565b 2758 value_type (result_value)->length ());
3c5cd5c3 2759 else
50888e42 2760 memcpy (value_contents_raw (result_value).data (), reg_buf, regsize);
63bad7b6 2761
3c5cd5c3
AH
2762 return result_value;
2763 }
2764
07b287a0
MS
2765/* Implement the "pseudo_register_read_value" gdbarch method. */
2766
2767static struct value *
3c5cd5c3 2768aarch64_pseudo_read_value (struct gdbarch *gdbarch, readable_regcache *regcache,
07b287a0
MS
2769 int regnum)
2770{
08106042 2771 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
3c5cd5c3 2772 struct value *result_value = allocate_value (register_type (gdbarch, regnum));
07b287a0 2773
07b287a0
MS
2774 VALUE_LVAL (result_value) = lval_register;
2775 VALUE_REGNUM (result_value) = regnum;
07b287a0 2776
e63ae49b
LM
2777 if (is_w_pseudo_register (gdbarch, regnum))
2778 {
2779 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2780 /* Default offset for little endian. */
2781 int offset = 0;
2782
2783 if (byte_order == BFD_ENDIAN_BIG)
2784 offset = 4;
2785
2786 /* Find the correct X register to extract the data from. */
2787 int x_regnum = AARCH64_X0_REGNUM + (regnum - tdep->w_pseudo_base);
2788 gdb_byte data[4];
2789
2790 /* Read the bottom 4 bytes of X. */
2791 if (regcache->raw_read_part (x_regnum, offset, 4, data) != REG_VALID)
2792 mark_value_bytes_unavailable (result_value, 0, 4);
2793 else
2794 memcpy (value_contents_raw (result_value).data (), data, 4);
2795
2796 return result_value;
2797 }
2798
07b287a0
MS
2799 regnum -= gdbarch_num_regs (gdbarch);
2800
2801 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
63bad7b6
AH
2802 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2803 regnum - AARCH64_Q0_REGNUM,
3c5cd5c3 2804 Q_REGISTER_SIZE, result_value);
07b287a0
MS
2805
2806 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
63bad7b6
AH
2807 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2808 regnum - AARCH64_D0_REGNUM,
3c5cd5c3 2809 D_REGISTER_SIZE, result_value);
07b287a0
MS
2810
2811 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
63bad7b6
AH
2812 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2813 regnum - AARCH64_S0_REGNUM,
3c5cd5c3 2814 S_REGISTER_SIZE, result_value);
07b287a0
MS
2815
2816 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
63bad7b6
AH
2817 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2818 regnum - AARCH64_H0_REGNUM,
3c5cd5c3 2819 H_REGISTER_SIZE, result_value);
07b287a0
MS
2820
2821 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
63bad7b6
AH
2822 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2823 regnum - AARCH64_B0_REGNUM,
3c5cd5c3 2824 B_REGISTER_SIZE, result_value);
07b287a0 2825
63bad7b6
AH
2826 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2827 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2828 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2829 regnum - AARCH64_SVE_V0_REGNUM,
2830 V_REGISTER_SIZE, result_value);
2831
07b287a0
MS
2832 gdb_assert_not_reached ("regnum out of bound");
2833}
2834
3c5cd5c3 2835/* Helper for aarch64_pseudo_write. */
07b287a0
MS
2836
2837static void
63bad7b6
AH
2838aarch64_pseudo_write_1 (struct gdbarch *gdbarch, struct regcache *regcache,
2839 int regnum_offset, int regsize, const gdb_byte *buf)
07b287a0 2840{
3c5cd5c3 2841 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
07b287a0 2842
63bad7b6
AH
2843 /* Enough space for a full vector register. */
2844 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2845 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2846
07b287a0
MS
2847 /* Ensure the register buffer is zero, we want gdb writes of the
2848 various 'scalar' pseudo registers to behavior like architectural
2849 writes, register width bytes are written the remainder are set to
2850 zero. */
63bad7b6 2851 memset (reg_buf, 0, register_size (gdbarch, AARCH64_V0_REGNUM));
07b287a0 2852
3c5cd5c3
AH
2853 memcpy (reg_buf, buf, regsize);
2854 regcache->raw_write (v_regnum, reg_buf);
2855}
2856
2857/* Implement the "pseudo_register_write" gdbarch method. */
2858
2859static void
2860aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2861 int regnum, const gdb_byte *buf)
2862{
08106042 2863 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
e63ae49b
LM
2864
2865 if (is_w_pseudo_register (gdbarch, regnum))
2866 {
2867 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2868 /* Default offset for little endian. */
2869 int offset = 0;
2870
2871 if (byte_order == BFD_ENDIAN_BIG)
2872 offset = 4;
2873
2874 /* Find the correct X register to extract the data from. */
2875 int x_regnum = AARCH64_X0_REGNUM + (regnum - tdep->w_pseudo_base);
2876
2877 /* First zero-out the contents of X. */
2878 ULONGEST zero = 0;
2879 regcache->raw_write (x_regnum, zero);
2880 /* Write to the bottom 4 bytes of X. */
2881 regcache->raw_write_part (x_regnum, offset, 4, buf);
2882 return;
2883 }
2884
07b287a0
MS
2885 regnum -= gdbarch_num_regs (gdbarch);
2886
2887 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
63bad7b6
AH
2888 return aarch64_pseudo_write_1 (gdbarch, regcache,
2889 regnum - AARCH64_Q0_REGNUM, Q_REGISTER_SIZE,
2890 buf);
07b287a0
MS
2891
2892 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
63bad7b6
AH
2893 return aarch64_pseudo_write_1 (gdbarch, regcache,
2894 regnum - AARCH64_D0_REGNUM, D_REGISTER_SIZE,
2895 buf);
07b287a0
MS
2896
2897 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
63bad7b6
AH
2898 return aarch64_pseudo_write_1 (gdbarch, regcache,
2899 regnum - AARCH64_S0_REGNUM, S_REGISTER_SIZE,
2900 buf);
07b287a0
MS
2901
2902 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
63bad7b6
AH
2903 return aarch64_pseudo_write_1 (gdbarch, regcache,
2904 regnum - AARCH64_H0_REGNUM, H_REGISTER_SIZE,
2905 buf);
07b287a0
MS
2906
2907 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
63bad7b6
AH
2908 return aarch64_pseudo_write_1 (gdbarch, regcache,
2909 regnum - AARCH64_B0_REGNUM, B_REGISTER_SIZE,
2910 buf);
2911
2912 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2913 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2914 return aarch64_pseudo_write_1 (gdbarch, regcache,
2915 regnum - AARCH64_SVE_V0_REGNUM,
2916 V_REGISTER_SIZE, buf);
07b287a0
MS
2917
2918 gdb_assert_not_reached ("regnum out of bound");
2919}
2920
07b287a0
MS
2921/* Callback function for user_reg_add. */
2922
2923static struct value *
2924value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2925{
9a3c8263 2926 const int *reg_p = (const int *) baton;
07b287a0
MS
2927
2928 return value_of_register (*reg_p, frame);
2929}
2930\f
2931
9404b58f
KM
2932/* Implement the "software_single_step" gdbarch method, needed to
2933 single step through atomic sequences on AArch64. */
2934
a0ff9e1a 2935static std::vector<CORE_ADDR>
f5ea389a 2936aarch64_software_single_step (struct regcache *regcache)
9404b58f 2937{
ac7936df 2938 struct gdbarch *gdbarch = regcache->arch ();
9404b58f
KM
2939 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2940 const int insn_size = 4;
2941 const int atomic_sequence_length = 16; /* Instruction sequence length. */
0187a92f 2942 CORE_ADDR pc = regcache_read_pc (regcache);
70ab8ccd 2943 CORE_ADDR breaks[2] = { CORE_ADDR_MAX, CORE_ADDR_MAX };
9404b58f
KM
2944 CORE_ADDR loc = pc;
2945 CORE_ADDR closing_insn = 0;
2946 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2947 byte_order_for_code);
2948 int index;
2949 int insn_count;
2950 int bc_insn_count = 0; /* Conditional branch instruction count. */
2951 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
f77ee802
YQ
2952 aarch64_inst inst;
2953
561a72d4 2954 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
a0ff9e1a 2955 return {};
9404b58f
KM
2956
2957 /* Look for a Load Exclusive instruction which begins the sequence. */
f77ee802 2958 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
a0ff9e1a 2959 return {};
9404b58f
KM
2960
2961 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2962 {
9404b58f
KM
2963 loc += insn_size;
2964 insn = read_memory_unsigned_integer (loc, insn_size,
2965 byte_order_for_code);
2966
561a72d4 2967 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
a0ff9e1a 2968 return {};
9404b58f 2969 /* Check if the instruction is a conditional branch. */
f77ee802 2970 if (inst.opcode->iclass == condbranch)
9404b58f 2971 {
f77ee802
YQ
2972 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
2973
9404b58f 2974 if (bc_insn_count >= 1)
a0ff9e1a 2975 return {};
9404b58f
KM
2976
2977 /* It is, so we'll try to set a breakpoint at the destination. */
f77ee802 2978 breaks[1] = loc + inst.operands[0].imm.value;
9404b58f
KM
2979
2980 bc_insn_count++;
2981 last_breakpoint++;
2982 }
2983
2984 /* Look for the Store Exclusive which closes the atomic sequence. */
f77ee802 2985 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
9404b58f
KM
2986 {
2987 closing_insn = loc;
2988 break;
2989 }
2990 }
2991
2992 /* We didn't find a closing Store Exclusive instruction, fall back. */
2993 if (!closing_insn)
a0ff9e1a 2994 return {};
9404b58f
KM
2995
2996 /* Insert breakpoint after the end of the atomic sequence. */
2997 breaks[0] = loc + insn_size;
2998
2999 /* Check for duplicated breakpoints, and also check that the second
3000 breakpoint is not within the atomic sequence. */
3001 if (last_breakpoint
3002 && (breaks[1] == breaks[0]
3003 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
3004 last_breakpoint = 0;
3005
a0ff9e1a
SM
3006 std::vector<CORE_ADDR> next_pcs;
3007
9404b58f
KM
3008 /* Insert the breakpoint at the end of the sequence, and one at the
3009 destination of the conditional branch, if it exists. */
3010 for (index = 0; index <= last_breakpoint; index++)
a0ff9e1a 3011 next_pcs.push_back (breaks[index]);
9404b58f 3012
93f9a11f 3013 return next_pcs;
9404b58f
KM
3014}
3015
1152d984
SM
3016struct aarch64_displaced_step_copy_insn_closure
3017 : public displaced_step_copy_insn_closure
b6542f81
YQ
3018{
3019 /* It is true when condition instruction, such as B.CON, TBZ, etc,
3020 is being displaced stepping. */
f0c702d4 3021 bool cond = false;
b6542f81 3022
0c271889
LM
3023 /* PC adjustment offset after displaced stepping. If 0, then we don't
3024 write the PC back, assuming the PC is already the right address. */
cfba9872 3025 int32_t pc_adjust = 0;
b6542f81
YQ
3026};
3027
3028/* Data when visiting instructions for displaced stepping. */
3029
3030struct aarch64_displaced_step_data
3031{
3032 struct aarch64_insn_data base;
3033
3034 /* The address where the instruction will be executed at. */
3035 CORE_ADDR new_addr;
3036 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
e935475c 3037 uint32_t insn_buf[AARCH64_DISPLACED_MODIFIED_INSNS];
b6542f81
YQ
3038 /* Number of instructions in INSN_BUF. */
3039 unsigned insn_count;
3040 /* Registers when doing displaced stepping. */
3041 struct regcache *regs;
3042
1152d984 3043 aarch64_displaced_step_copy_insn_closure *dsc;
b6542f81
YQ
3044};
3045
3046/* Implementation of aarch64_insn_visitor method "b". */
3047
3048static void
3049aarch64_displaced_step_b (const int is_bl, const int32_t offset,
3050 struct aarch64_insn_data *data)
3051{
3052 struct aarch64_displaced_step_data *dsd
3053 = (struct aarch64_displaced_step_data *) data;
2ac09a5b 3054 int64_t new_offset = data->insn_addr - dsd->new_addr + offset;
b6542f81
YQ
3055
3056 if (can_encode_int32 (new_offset, 28))
3057 {
3058 /* Emit B rather than BL, because executing BL on a new address
3059 will get the wrong address into LR. In order to avoid this,
3060 we emit B, and update LR if the instruction is BL. */
3061 emit_b (dsd->insn_buf, 0, new_offset);
3062 dsd->insn_count++;
3063 }
3064 else
3065 {
3066 /* Write NOP. */
3067 emit_nop (dsd->insn_buf);
3068 dsd->insn_count++;
3069 dsd->dsc->pc_adjust = offset;
3070 }
3071
3072 if (is_bl)
3073 {
3074 /* Update LR. */
3075 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
3076 data->insn_addr + 4);
3077 }
3078}
3079
3080/* Implementation of aarch64_insn_visitor method "b_cond". */
3081
3082static void
3083aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
3084 struct aarch64_insn_data *data)
3085{
3086 struct aarch64_displaced_step_data *dsd
3087 = (struct aarch64_displaced_step_data *) data;
b6542f81
YQ
3088
3089 /* GDB has to fix up PC after displaced step this instruction
3090 differently according to the condition is true or false. Instead
3091 of checking COND against conditional flags, we can use
3092 the following instructions, and GDB can tell how to fix up PC
3093 according to the PC value.
3094
3095 B.COND TAKEN ; If cond is true, then jump to TAKEN.
3096 INSN1 ;
3097 TAKEN:
3098 INSN2
3099 */
3100
3101 emit_bcond (dsd->insn_buf, cond, 8);
f0c702d4 3102 dsd->dsc->cond = true;
b6542f81
YQ
3103 dsd->dsc->pc_adjust = offset;
3104 dsd->insn_count = 1;
3105}
3106
3107/* Dynamically allocate a new register. If we know the register
3108 statically, we should make it a global as above instead of using this
3109 helper function. */
3110
3111static struct aarch64_register
3112aarch64_register (unsigned num, int is64)
3113{
3114 return (struct aarch64_register) { num, is64 };
3115}
3116
3117/* Implementation of aarch64_insn_visitor method "cb". */
3118
3119static void
3120aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
3121 const unsigned rn, int is64,
3122 struct aarch64_insn_data *data)
3123{
3124 struct aarch64_displaced_step_data *dsd
3125 = (struct aarch64_displaced_step_data *) data;
b6542f81
YQ
3126
3127 /* The offset is out of range for a compare and branch
3128 instruction. We can use the following instructions instead:
3129
3130 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
3131 INSN1 ;
3132 TAKEN:
3133 INSN2
3134 */
3135 emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
3136 dsd->insn_count = 1;
f0c702d4 3137 dsd->dsc->cond = true;
b6542f81
YQ
3138 dsd->dsc->pc_adjust = offset;
3139}
3140
3141/* Implementation of aarch64_insn_visitor method "tb". */
3142
3143static void
3144aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
3145 const unsigned rt, unsigned bit,
3146 struct aarch64_insn_data *data)
3147{
3148 struct aarch64_displaced_step_data *dsd
3149 = (struct aarch64_displaced_step_data *) data;
b6542f81
YQ
3150
3151 /* The offset is out of range for a test bit and branch
3152 instruction We can use the following instructions instead:
3153
3154 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
3155 INSN1 ;
3156 TAKEN:
3157 INSN2
3158
3159 */
3160 emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
3161 dsd->insn_count = 1;
f0c702d4 3162 dsd->dsc->cond = true;
b6542f81
YQ
3163 dsd->dsc->pc_adjust = offset;
3164}
3165
3166/* Implementation of aarch64_insn_visitor method "adr". */
3167
3168static void
3169aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
3170 const int is_adrp, struct aarch64_insn_data *data)
3171{
3172 struct aarch64_displaced_step_data *dsd
3173 = (struct aarch64_displaced_step_data *) data;
3174 /* We know exactly the address the ADR{P,} instruction will compute.
3175 We can just write it to the destination register. */
3176 CORE_ADDR address = data->insn_addr + offset;
3177
3178 if (is_adrp)
3179 {
3180 /* Clear the lower 12 bits of the offset to get the 4K page. */
3181 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
3182 address & ~0xfff);
3183 }
3184 else
3185 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
3186 address);
3187
3188 dsd->dsc->pc_adjust = 4;
3189 emit_nop (dsd->insn_buf);
3190 dsd->insn_count = 1;
3191}
3192
3193/* Implementation of aarch64_insn_visitor method "ldr_literal". */
3194
3195static void
3196aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
3197 const unsigned rt, const int is64,
3198 struct aarch64_insn_data *data)
3199{
3200 struct aarch64_displaced_step_data *dsd
3201 = (struct aarch64_displaced_step_data *) data;
3202 CORE_ADDR address = data->insn_addr + offset;
3203 struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
3204
3205 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
3206 address);
3207
3208 if (is_sw)
3209 dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
3210 aarch64_register (rt, 1), zero);
3211 else
3212 dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
3213 aarch64_register (rt, 1), zero);
3214
3215 dsd->dsc->pc_adjust = 4;
3216}
3217
3218/* Implementation of aarch64_insn_visitor method "others". */
3219
3220static void
3221aarch64_displaced_step_others (const uint32_t insn,
3222 struct aarch64_insn_data *data)
3223{
3224 struct aarch64_displaced_step_data *dsd
3225 = (struct aarch64_displaced_step_data *) data;
3226
807f647c
MM
3227 uint32_t masked_insn = (insn & CLEAR_Rn_MASK);
3228 if (masked_insn == BLR)
b6542f81 3229 {
807f647c
MM
3230 /* Emit a BR to the same register and then update LR to the original
3231 address (similar to aarch64_displaced_step_b). */
3232 aarch64_emit_insn (dsd->insn_buf, insn & 0xffdfffff);
3233 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
3234 data->insn_addr + 4);
b6542f81 3235 }
807f647c
MM
3236 else
3237 aarch64_emit_insn (dsd->insn_buf, insn);
3238 dsd->insn_count = 1;
3239
3240 if (masked_insn == RET || masked_insn == BR || masked_insn == BLR)
3241 dsd->dsc->pc_adjust = 0;
b6542f81
YQ
3242 else
3243 dsd->dsc->pc_adjust = 4;
3244}
3245
3246static const struct aarch64_insn_visitor visitor =
3247{
3248 aarch64_displaced_step_b,
3249 aarch64_displaced_step_b_cond,
3250 aarch64_displaced_step_cb,
3251 aarch64_displaced_step_tb,
3252 aarch64_displaced_step_adr,
3253 aarch64_displaced_step_ldr_literal,
3254 aarch64_displaced_step_others,
3255};
3256
3257/* Implement the "displaced_step_copy_insn" gdbarch method. */
3258
1152d984 3259displaced_step_copy_insn_closure_up
b6542f81
YQ
3260aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
3261 CORE_ADDR from, CORE_ADDR to,
3262 struct regcache *regs)
3263{
b6542f81
YQ
3264 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3265 uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
3266 struct aarch64_displaced_step_data dsd;
c86a40c6
YQ
3267 aarch64_inst inst;
3268
561a72d4 3269 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
c86a40c6 3270 return NULL;
b6542f81
YQ
3271
3272 /* Look for a Load Exclusive instruction which begins the sequence. */
c86a40c6 3273 if (inst.opcode->iclass == ldstexcl && bit (insn, 22))
b6542f81
YQ
3274 {
3275 /* We can't displaced step atomic sequences. */
3276 return NULL;
3277 }
3278
1152d984
SM
3279 std::unique_ptr<aarch64_displaced_step_copy_insn_closure> dsc
3280 (new aarch64_displaced_step_copy_insn_closure);
b6542f81
YQ
3281 dsd.base.insn_addr = from;
3282 dsd.new_addr = to;
3283 dsd.regs = regs;
cfba9872 3284 dsd.dsc = dsc.get ();
034f1a81 3285 dsd.insn_count = 0;
b6542f81
YQ
3286 aarch64_relocate_instruction (insn, &visitor,
3287 (struct aarch64_insn_data *) &dsd);
e935475c 3288 gdb_assert (dsd.insn_count <= AARCH64_DISPLACED_MODIFIED_INSNS);
b6542f81
YQ
3289
3290 if (dsd.insn_count != 0)
3291 {
3292 int i;
3293
3294 /* Instruction can be relocated to scratch pad. Copy
3295 relocated instruction(s) there. */
3296 for (i = 0; i < dsd.insn_count; i++)
3297 {
136821d9
SM
3298 displaced_debug_printf ("writing insn %.8x at %s",
3299 dsd.insn_buf[i],
3300 paddress (gdbarch, to + i * 4));
3301
b6542f81
YQ
3302 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
3303 (ULONGEST) dsd.insn_buf[i]);
3304 }
3305 }
3306 else
3307 {
b6542f81
YQ
3308 dsc = NULL;
3309 }
3310
6d0cf446 3311 /* This is a work around for a problem with g++ 4.8. */
1152d984 3312 return displaced_step_copy_insn_closure_up (dsc.release ());
b6542f81
YQ
3313}
3314
3315/* Implement the "displaced_step_fixup" gdbarch method. */
3316
3317void
3318aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
1152d984 3319 struct displaced_step_copy_insn_closure *dsc_,
b6542f81
YQ
3320 CORE_ADDR from, CORE_ADDR to,
3321 struct regcache *regs)
3322{
1152d984
SM
3323 aarch64_displaced_step_copy_insn_closure *dsc
3324 = (aarch64_displaced_step_copy_insn_closure *) dsc_;
cfba9872 3325
0c271889
LM
3326 ULONGEST pc;
3327
3328 regcache_cooked_read_unsigned (regs, AARCH64_PC_REGNUM, &pc);
3329
136821d9
SM
3330 displaced_debug_printf ("PC after stepping: %s (was %s).",
3331 paddress (gdbarch, pc), paddress (gdbarch, to));
1ab139e5 3332
b6542f81
YQ
3333 if (dsc->cond)
3334 {
136821d9
SM
3335 displaced_debug_printf ("[Conditional] pc_adjust before: %d",
3336 dsc->pc_adjust);
1ab139e5 3337
b6542f81
YQ
3338 if (pc - to == 8)
3339 {
3340 /* Condition is true. */
3341 }
3342 else if (pc - to == 4)
3343 {
3344 /* Condition is false. */
3345 dsc->pc_adjust = 4;
3346 }
3347 else
3348 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
1ab139e5 3349
136821d9
SM
3350 displaced_debug_printf ("[Conditional] pc_adjust after: %d",
3351 dsc->pc_adjust);
b6542f81
YQ
3352 }
3353
136821d9
SM
3354 displaced_debug_printf ("%s PC by %d",
3355 dsc->pc_adjust ? "adjusting" : "not adjusting",
3356 dsc->pc_adjust);
1ab139e5 3357
b6542f81
YQ
3358 if (dsc->pc_adjust != 0)
3359 {
0c271889
LM
3360 /* Make sure the previous instruction was executed (that is, the PC
3361 has changed). If the PC didn't change, then discard the adjustment
3362 offset. Otherwise we may skip an instruction before its execution
3363 took place. */
3364 if ((pc - to) == 0)
1ab139e5 3365 {
136821d9 3366 displaced_debug_printf ("PC did not move. Discarding PC adjustment.");
1ab139e5
LM
3367 dsc->pc_adjust = 0;
3368 }
0c271889 3369
136821d9
SM
3370 displaced_debug_printf ("fixup: set PC to %s:%d",
3371 paddress (gdbarch, from), dsc->pc_adjust);
3372
b6542f81
YQ
3373 regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
3374 from + dsc->pc_adjust);
3375 }
3376}
3377
3378/* Implement the "displaced_step_hw_singlestep" gdbarch method. */
3379
07fbbd01 3380bool
40a53766 3381aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch)
b6542f81 3382{
07fbbd01 3383 return true;
b6542f81
YQ
3384}
3385
95228a0d
AH
3386/* Get the correct target description for the given VQ value.
3387 If VQ is zero then it is assumed SVE is not supported.
c1bd443b
LM
3388 (It is not possible to set VQ to zero on an SVE system).
3389
414d5848
JB
3390 MTE_P indicates the presence of the Memory Tagging Extension feature.
3391
3392 TLS_P indicates the presence of the Thread Local Storage feature. */
da434ccb
AH
3393
3394const target_desc *
0ee6b1c5 3395aarch64_read_description (const aarch64_features &features)
da434ccb 3396{
0ee6b1c5
JB
3397 if (features.vq > AARCH64_MAX_SVE_VQ)
3398 error (_("VQ is %" PRIu64 ", maximum supported value is %d"), features.vq,
95228a0d
AH
3399 AARCH64_MAX_SVE_VQ);
3400
0ee6b1c5 3401 struct target_desc *tdesc = tdesc_aarch64_map[features];
da434ccb 3402
95228a0d
AH
3403 if (tdesc == NULL)
3404 {
0ee6b1c5
JB
3405 tdesc = aarch64_create_target_description (features);
3406 tdesc_aarch64_map[features] = tdesc;
95228a0d 3407 }
da434ccb 3408
95228a0d 3409 return tdesc;
da434ccb
AH
3410}
3411
ba2d2bb2
AH
3412/* Return the VQ used when creating the target description TDESC. */
3413
1332a140 3414static uint64_t
ba2d2bb2
AH
3415aarch64_get_tdesc_vq (const struct target_desc *tdesc)
3416{
3417 const struct tdesc_feature *feature_sve;
3418
3419 if (!tdesc_has_registers (tdesc))
3420 return 0;
3421
3422 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
3423
3424 if (feature_sve == nullptr)
3425 return 0;
3426
12863263
AH
3427 uint64_t vl = tdesc_register_bitsize (feature_sve,
3428 aarch64_sve_register_names[0]) / 8;
ba2d2bb2
AH
3429 return sve_vq_from_vl (vl);
3430}
3431
4f3681cc
TJB
3432/* Get the AArch64 features present in the given target description. */
3433
3434aarch64_features
3435aarch64_features_from_target_desc (const struct target_desc *tdesc)
3436{
3437 aarch64_features features;
3438
3439 if (tdesc == nullptr)
3440 return features;
3441
3442 features.vq = aarch64_get_tdesc_vq (tdesc);
3443 features.pauth
3444 = (tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.pauth") != nullptr);
3445 features.mte
3446 = (tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.mte") != nullptr);
3447 features.tls
3448 = (tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.tls") != nullptr);
3449
3450 return features;
3451}
3452
76bed0fd
AH
3453/* Implement the "cannot_store_register" gdbarch method. */
3454
3455static int
3456aarch64_cannot_store_register (struct gdbarch *gdbarch, int regnum)
3457{
08106042 3458 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
76bed0fd
AH
3459
3460 if (!tdep->has_pauth ())
3461 return 0;
3462
3463 /* Pointer authentication registers are read-only. */
3464 return (regnum == AARCH64_PAUTH_DMASK_REGNUM (tdep->pauth_reg_base)
3465 || regnum == AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base));
3466}
3467
da729c5c
TT
3468/* Implement the stack_frame_destroyed_p gdbarch method. */
3469
3470static int
3471aarch64_stack_frame_destroyed_p (struct gdbarch *gdbarch, CORE_ADDR pc)
3472{
3473 CORE_ADDR func_start, func_end;
3474 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
3475 return 0;
3476
3477 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3478 uint32_t insn = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
3479
3480 aarch64_inst inst;
3481 if (aarch64_decode_insn (insn, &inst, 1, nullptr) != 0)
3482 return 0;
3483
3484 return streq (inst.opcode->name, "ret");
3485}
3486
07b287a0
MS
3487/* Initialize the current architecture based on INFO. If possible,
3488 re-use an architecture from ARCHES, which is a list of
3489 architectures already created during this debugging session.
3490
3491 Called e.g. at program startup, when reading a core file, and when
3492 reading a binary file. */
3493
3494static struct gdbarch *
3495aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
3496{
ccb8d7e8 3497 const struct tdesc_feature *feature_core, *feature_fpu, *feature_sve;
76bed0fd 3498 const struct tdesc_feature *feature_pauth;
ccb8d7e8
AH
3499 bool valid_p = true;
3500 int i, num_regs = 0, num_pseudo_regs = 0;
c9cd8ca4 3501 int first_pauth_regnum = -1, ra_sign_state_offset = -1;
414d5848 3502 int first_mte_regnum = -1, tls_regnum = -1;
4f3681cc 3503 uint64_t vq = aarch64_get_tdesc_vq (info.target_desc);
4da037ef
AH
3504
3505 if (vq > AARCH64_MAX_SVE_VQ)
596179f7
SDJ
3506 internal_error (__FILE__, __LINE__, _("VQ out of bounds: %s (max %d)"),
3507 pulongest (vq), AARCH64_MAX_SVE_VQ);
4da037ef 3508
ccb8d7e8
AH
3509 /* If there is already a candidate, use it. */
3510 for (gdbarch_list *best_arch = gdbarch_list_lookup_by_info (arches, &info);
3511 best_arch != nullptr;
3512 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
3513 {
345bd07c 3514 aarch64_gdbarch_tdep *tdep
08106042 3515 = gdbarch_tdep<aarch64_gdbarch_tdep> (best_arch->gdbarch);
4da037ef 3516 if (tdep && tdep->vq == vq)
ccb8d7e8
AH
3517 return best_arch->gdbarch;
3518 }
07b287a0 3519
4da037ef
AH
3520 /* Ensure we always have a target descriptor, and that it is for the given VQ
3521 value. */
ccb8d7e8 3522 const struct target_desc *tdesc = info.target_desc;
4f3681cc
TJB
3523 if (!tdesc_has_registers (tdesc))
3524 tdesc = aarch64_read_description ({});
07b287a0
MS
3525 gdb_assert (tdesc);
3526
ccb8d7e8 3527 feature_core = tdesc_find_feature (tdesc,"org.gnu.gdb.aarch64.core");
ba2d2bb2
AH
3528 feature_fpu = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
3529 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
76bed0fd 3530 feature_pauth = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.pauth");
5e984dbf
LM
3531 const struct tdesc_feature *feature_mte
3532 = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.mte");
414d5848
JB
3533 const struct tdesc_feature *feature_tls
3534 = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.tls");
07b287a0 3535
ccb8d7e8
AH
3536 if (feature_core == nullptr)
3537 return nullptr;
07b287a0 3538
c1e1314d 3539 tdesc_arch_data_up tdesc_data = tdesc_data_alloc ();
07b287a0 3540
ba2d2bb2 3541 /* Validate the description provides the mandatory core R registers
07b287a0
MS
3542 and allocate their numbers. */
3543 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
c1e1314d 3544 valid_p &= tdesc_numbered_register (feature_core, tdesc_data.get (),
ba2d2bb2
AH
3545 AARCH64_X0_REGNUM + i,
3546 aarch64_r_register_names[i]);
07b287a0
MS
3547
3548 num_regs = AARCH64_X0_REGNUM + i;
3549
ba2d2bb2 3550 /* Add the V registers. */
ccb8d7e8 3551 if (feature_fpu != nullptr)
07b287a0 3552 {
ccb8d7e8 3553 if (feature_sve != nullptr)
ba2d2bb2
AH
3554 error (_("Program contains both fpu and SVE features."));
3555
3556 /* Validate the description provides the mandatory V registers
3557 and allocate their numbers. */
07b287a0 3558 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
c1e1314d 3559 valid_p &= tdesc_numbered_register (feature_fpu, tdesc_data.get (),
ba2d2bb2
AH
3560 AARCH64_V0_REGNUM + i,
3561 aarch64_v_register_names[i]);
07b287a0
MS
3562
3563 num_regs = AARCH64_V0_REGNUM + i;
ba2d2bb2 3564 }
07b287a0 3565
ba2d2bb2 3566 /* Add the SVE registers. */
ccb8d7e8 3567 if (feature_sve != nullptr)
ba2d2bb2
AH
3568 {
3569 /* Validate the description provides the mandatory SVE registers
3570 and allocate their numbers. */
3571 for (i = 0; i < ARRAY_SIZE (aarch64_sve_register_names); i++)
c1e1314d 3572 valid_p &= tdesc_numbered_register (feature_sve, tdesc_data.get (),
ba2d2bb2
AH
3573 AARCH64_SVE_Z0_REGNUM + i,
3574 aarch64_sve_register_names[i]);
3575
3576 num_regs = AARCH64_SVE_Z0_REGNUM + i;
3577 num_pseudo_regs += 32; /* add the Vn register pseudos. */
3578 }
3579
ccb8d7e8 3580 if (feature_fpu != nullptr || feature_sve != nullptr)
ba2d2bb2 3581 {
07b287a0
MS
3582 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
3583 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
3584 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
3585 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
3586 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
3587 }
3588
414d5848
JB
3589 /* Add the TLS register. */
3590 if (feature_tls != nullptr)
3591 {
3592 tls_regnum = num_regs;
3593 /* Validate the descriptor provides the mandatory TLS register
3594 and allocate its number. */
3595 valid_p = tdesc_numbered_register (feature_tls, tdesc_data.get (),
3596 tls_regnum, "tpidr");
3597
3598 num_regs++;
3599 }
3600
76bed0fd
AH
3601 /* Add the pauth registers. */
3602 if (feature_pauth != NULL)
3603 {
3604 first_pauth_regnum = num_regs;
c9cd8ca4 3605 ra_sign_state_offset = num_pseudo_regs;
76bed0fd
AH
3606 /* Validate the descriptor provides the mandatory PAUTH registers and
3607 allocate their numbers. */
3608 for (i = 0; i < ARRAY_SIZE (aarch64_pauth_register_names); i++)
c1e1314d 3609 valid_p &= tdesc_numbered_register (feature_pauth, tdesc_data.get (),
76bed0fd
AH
3610 first_pauth_regnum + i,
3611 aarch64_pauth_register_names[i]);
3612
3613 num_regs += i;
34dcc7cf 3614 num_pseudo_regs += 1; /* Count RA_STATE pseudo register. */
76bed0fd
AH
3615 }
3616
5e984dbf
LM
3617 /* Add the MTE registers. */
3618 if (feature_mte != NULL)
3619 {
3620 first_mte_regnum = num_regs;
3621 /* Validate the descriptor provides the mandatory MTE registers and
3622 allocate their numbers. */
3623 for (i = 0; i < ARRAY_SIZE (aarch64_mte_register_names); i++)
3624 valid_p &= tdesc_numbered_register (feature_mte, tdesc_data.get (),
3625 first_mte_regnum + i,
3626 aarch64_mte_register_names[i]);
3627
3628 num_regs += i;
3629 }
e63ae49b
LM
3630 /* W pseudo-registers */
3631 int first_w_regnum = num_pseudo_regs;
3632 num_pseudo_regs += 31;
5e984dbf 3633
07b287a0 3634 if (!valid_p)
c1e1314d 3635 return nullptr;
07b287a0
MS
3636
3637 /* AArch64 code is always little-endian. */
3638 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
3639
345bd07c 3640 aarch64_gdbarch_tdep *tdep = new aarch64_gdbarch_tdep;
ccb8d7e8 3641 struct gdbarch *gdbarch = gdbarch_alloc (&info, tdep);
07b287a0
MS
3642
3643 /* This should be low enough for everything. */
3644 tdep->lowest_pc = 0x20;
3645 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
3646 tdep->jb_elt_size = 8;
4da037ef 3647 tdep->vq = vq;
76bed0fd 3648 tdep->pauth_reg_base = first_pauth_regnum;
1ba3a322 3649 tdep->ra_sign_state_regnum = -1;
5e984dbf 3650 tdep->mte_reg_base = first_mte_regnum;
414d5848 3651 tdep->tls_regnum = tls_regnum;
34dcc7cf 3652
07b287a0
MS
3653 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
3654 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
3655
07b287a0
MS
3656 /* Advance PC across function entry code. */
3657 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
3658
3659 /* The stack grows downward. */
3660 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
3661
3662 /* Breakpoint manipulation. */
04180708
YQ
3663 set_gdbarch_breakpoint_kind_from_pc (gdbarch,
3664 aarch64_breakpoint::kind_from_pc);
3665 set_gdbarch_sw_breakpoint_from_kind (gdbarch,
3666 aarch64_breakpoint::bp_from_kind);
07b287a0 3667 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
9404b58f 3668 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
07b287a0
MS
3669
3670 /* Information about registers, etc. */
3671 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
3672 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
3673 set_gdbarch_num_regs (gdbarch, num_regs);
3674
3675 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
3676 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
3677 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
3678 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
3679 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
3680 set_tdesc_pseudo_register_reggroup_p (gdbarch,
3681 aarch64_pseudo_register_reggroup_p);
76bed0fd 3682 set_gdbarch_cannot_store_register (gdbarch, aarch64_cannot_store_register);
07b287a0
MS
3683
3684 /* ABI */
3685 set_gdbarch_short_bit (gdbarch, 16);
3686 set_gdbarch_int_bit (gdbarch, 32);
3687 set_gdbarch_float_bit (gdbarch, 32);
3688 set_gdbarch_double_bit (gdbarch, 64);
3689 set_gdbarch_long_double_bit (gdbarch, 128);
3690 set_gdbarch_long_bit (gdbarch, 64);
3691 set_gdbarch_long_long_bit (gdbarch, 64);
3692 set_gdbarch_ptr_bit (gdbarch, 64);
3693 set_gdbarch_char_signed (gdbarch, 0);
53375380 3694 set_gdbarch_wchar_signed (gdbarch, 0);
07b287a0
MS
3695 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
3696 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
552f1157 3697 set_gdbarch_long_double_format (gdbarch, floatformats_ieee_quad);
b907456c 3698 set_gdbarch_type_align (gdbarch, aarch64_type_align);
07b287a0 3699
da729c5c
TT
3700 /* Detect whether PC is at a point where the stack has been destroyed. */
3701 set_gdbarch_stack_frame_destroyed_p (gdbarch, aarch64_stack_frame_destroyed_p);
3702
07b287a0
MS
3703 /* Internal <-> external register number maps. */
3704 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
3705
3706 /* Returning results. */
3707 set_gdbarch_return_value (gdbarch, aarch64_return_value);
3708
3709 /* Disassembly. */
3710 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
3711
3712 /* Virtual tables. */
3713 set_gdbarch_vbit_in_delta (gdbarch, 1);
3714
3715 /* Hook in the ABI-specific overrides, if they have been registered. */
3716 info.target_desc = tdesc;
c1e1314d 3717 info.tdesc_data = tdesc_data.get ();
07b287a0
MS
3718 gdbarch_init_osabi (info, gdbarch);
3719
3720 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
11e1b75f
AH
3721 /* Register DWARF CFA vendor handler. */
3722 set_gdbarch_execute_dwarf_cfa_vendor_op (gdbarch,
3723 aarch64_execute_dwarf_cfa_vendor_op);
07b287a0 3724
5133a315
LM
3725 /* Permanent/Program breakpoint handling. */
3726 set_gdbarch_program_breakpoint_here_p (gdbarch,
3727 aarch64_program_breakpoint_here_p);
3728
07b287a0
MS
3729 /* Add some default predicates. */
3730 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
3731 dwarf2_append_unwinders (gdbarch);
3732 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
3733
3734 frame_base_set_default (gdbarch, &aarch64_normal_base);
3735
3736 /* Now we have tuned the configuration, set a few final things,
3737 based on what the OS ABI has told us. */
3738
3739 if (tdep->jb_pc >= 0)
3740 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
3741
ea873d8e
PL
3742 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
3743
aa7ca1bb
AH
3744 set_gdbarch_get_pc_address_flags (gdbarch, aarch64_get_pc_address_flags);
3745
c1e1314d 3746 tdesc_use_registers (gdbarch, tdesc, std::move (tdesc_data));
07b287a0 3747
1ba3a322
LM
3748 /* Fetch the updated number of registers after we're done adding all
3749 entries from features we don't explicitly care about. This is the case
3750 for bare metal debugging stubs that include a lot of system registers. */
3751 num_regs = gdbarch_num_regs (gdbarch);
3752
3753 /* With the number of real registers updated, setup the pseudo-registers and
3754 record their numbers. */
3755
e63ae49b
LM
3756 /* Setup W pseudo-register numbers. */
3757 tdep->w_pseudo_base = first_w_regnum + num_regs;
3758 tdep->w_pseudo_count = 31;
3759
1ba3a322
LM
3760 /* Pointer authentication pseudo-registers. */
3761 if (tdep->has_pauth ())
3762 tdep->ra_sign_state_regnum = ra_sign_state_offset + num_regs;
3763
07b287a0
MS
3764 /* Add standard register aliases. */
3765 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
3766 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
3767 value_of_aarch64_user_reg,
3768 &aarch64_register_aliases[i].regnum);
3769
e8bf1ce4
JB
3770 register_aarch64_ravenscar_ops (gdbarch);
3771
07b287a0
MS
3772 return gdbarch;
3773}
3774
3775static void
3776aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
3777{
08106042 3778 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
07b287a0
MS
3779
3780 if (tdep == NULL)
3781 return;
3782
6cb06a8c
TT
3783 gdb_printf (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
3784 paddress (gdbarch, tdep->lowest_pc));
07b287a0
MS
3785}
3786
0d4c07af 3787#if GDB_SELF_TEST
1e2b521d
YQ
3788namespace selftests
3789{
3790static void aarch64_process_record_test (void);
3791}
0d4c07af 3792#endif
1e2b521d 3793
6c265988 3794void _initialize_aarch64_tdep ();
07b287a0 3795void
6c265988 3796_initialize_aarch64_tdep ()
07b287a0
MS
3797{
3798 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
3799 aarch64_dump_tdep);
3800
07b287a0
MS
3801 /* Debug this file's internals. */
3802 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
3803Set AArch64 debugging."), _("\
3804Show AArch64 debugging."), _("\
3805When on, AArch64 specific debugging is enabled."),
3806 NULL,
3807 show_aarch64_debug,
3808 &setdebuglist, &showdebuglist);
4d9a9006
YQ
3809
3810#if GDB_SELF_TEST
1526853e
SM
3811 selftests::register_test ("aarch64-analyze-prologue",
3812 selftests::aarch64_analyze_prologue_test);
3813 selftests::register_test ("aarch64-process-record",
3814 selftests::aarch64_process_record_test);
4d9a9006 3815#endif
07b287a0 3816}
99afc88b
OJ
3817
3818/* AArch64 process record-replay related structures, defines etc. */
3819
99afc88b 3820#define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
dda83cd7
SM
3821 do \
3822 { \
3823 unsigned int reg_len = LENGTH; \
3824 if (reg_len) \
3825 { \
3826 REGS = XNEWVEC (uint32_t, reg_len); \
3827 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
3828 } \
3829 } \
3830 while (0)
99afc88b
OJ
3831
3832#define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
dda83cd7
SM
3833 do \
3834 { \
3835 unsigned int mem_len = LENGTH; \
3836 if (mem_len) \
01add95b
SM
3837 { \
3838 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
91ddba83 3839 memcpy(MEMS, &RECORD_BUF[0], \
01add95b
SM
3840 sizeof(struct aarch64_mem_r) * LENGTH); \
3841 } \
dda83cd7
SM
3842 } \
3843 while (0)
99afc88b
OJ
3844
3845/* AArch64 record/replay structures and enumerations. */
3846
3847struct aarch64_mem_r
3848{
3849 uint64_t len; /* Record length. */
3850 uint64_t addr; /* Memory address. */
3851};
3852
3853enum aarch64_record_result
3854{
3855 AARCH64_RECORD_SUCCESS,
99afc88b
OJ
3856 AARCH64_RECORD_UNSUPPORTED,
3857 AARCH64_RECORD_UNKNOWN
3858};
3859
4748a9be 3860struct aarch64_insn_decode_record
99afc88b
OJ
3861{
3862 struct gdbarch *gdbarch;
3863 struct regcache *regcache;
3864 CORE_ADDR this_addr; /* Address of insn to be recorded. */
3865 uint32_t aarch64_insn; /* Insn to be recorded. */
3866 uint32_t mem_rec_count; /* Count of memory records. */
3867 uint32_t reg_rec_count; /* Count of register records. */
3868 uint32_t *aarch64_regs; /* Registers to be recorded. */
3869 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
4748a9be 3870};
99afc88b
OJ
3871
3872/* Record handler for data processing - register instructions. */
3873
3874static unsigned int
4748a9be 3875aarch64_record_data_proc_reg (aarch64_insn_decode_record *aarch64_insn_r)
99afc88b
OJ
3876{
3877 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
3878 uint32_t record_buf[4];
3879
3880 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3881 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3882 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
3883
3884 if (!bit (aarch64_insn_r->aarch64_insn, 28))
3885 {
3886 uint8_t setflags;
3887
3888 /* Logical (shifted register). */
3889 if (insn_bits24_27 == 0x0a)
3890 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
3891 /* Add/subtract. */
3892 else if (insn_bits24_27 == 0x0b)
3893 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3894 else
3895 return AARCH64_RECORD_UNKNOWN;
3896
3897 record_buf[0] = reg_rd;
3898 aarch64_insn_r->reg_rec_count = 1;
3899 if (setflags)
3900 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3901 }
3902 else
3903 {
3904 if (insn_bits24_27 == 0x0b)
3905 {
3906 /* Data-processing (3 source). */
3907 record_buf[0] = reg_rd;
3908 aarch64_insn_r->reg_rec_count = 1;
3909 }
3910 else if (insn_bits24_27 == 0x0a)
3911 {
3912 if (insn_bits21_23 == 0x00)
3913 {
3914 /* Add/subtract (with carry). */
3915 record_buf[0] = reg_rd;
3916 aarch64_insn_r->reg_rec_count = 1;
3917 if (bit (aarch64_insn_r->aarch64_insn, 29))
3918 {
3919 record_buf[1] = AARCH64_CPSR_REGNUM;
3920 aarch64_insn_r->reg_rec_count = 2;
3921 }
3922 }
3923 else if (insn_bits21_23 == 0x02)
3924 {
3925 /* Conditional compare (register) and conditional compare
3926 (immediate) instructions. */
3927 record_buf[0] = AARCH64_CPSR_REGNUM;
3928 aarch64_insn_r->reg_rec_count = 1;
3929 }
3930 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
3931 {
85102364 3932 /* Conditional select. */
99afc88b
OJ
3933 /* Data-processing (2 source). */
3934 /* Data-processing (1 source). */
3935 record_buf[0] = reg_rd;
3936 aarch64_insn_r->reg_rec_count = 1;
3937 }
3938 else
3939 return AARCH64_RECORD_UNKNOWN;
3940 }
3941 }
3942
3943 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3944 record_buf);
3945 return AARCH64_RECORD_SUCCESS;
3946}
3947
3948/* Record handler for data processing - immediate instructions. */
3949
3950static unsigned int
4748a9be 3951aarch64_record_data_proc_imm (aarch64_insn_decode_record *aarch64_insn_r)
99afc88b 3952{
78cc6c2d 3953 uint8_t reg_rd, insn_bit23, insn_bits24_27, setflags;
99afc88b
OJ
3954 uint32_t record_buf[4];
3955
3956 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
99afc88b
OJ
3957 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3958 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3959
3960 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
3961 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
3962 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
3963 {
3964 record_buf[0] = reg_rd;
3965 aarch64_insn_r->reg_rec_count = 1;
3966 }
3967 else if (insn_bits24_27 == 0x01)
3968 {
3969 /* Add/Subtract (immediate). */
3970 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3971 record_buf[0] = reg_rd;
3972 aarch64_insn_r->reg_rec_count = 1;
3973 if (setflags)
3974 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3975 }
3976 else if (insn_bits24_27 == 0x02 && !insn_bit23)
3977 {
3978 /* Logical (immediate). */
3979 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
3980 record_buf[0] = reg_rd;
3981 aarch64_insn_r->reg_rec_count = 1;
3982 if (setflags)
3983 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3984 }
3985 else
3986 return AARCH64_RECORD_UNKNOWN;
3987
3988 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3989 record_buf);
3990 return AARCH64_RECORD_SUCCESS;
3991}
3992
3993/* Record handler for branch, exception generation and system instructions. */
3994
3995static unsigned int
4748a9be 3996aarch64_record_branch_except_sys (aarch64_insn_decode_record *aarch64_insn_r)
99afc88b 3997{
345bd07c
SM
3998
3999 aarch64_gdbarch_tdep *tdep
08106042 4000 = gdbarch_tdep<aarch64_gdbarch_tdep> (aarch64_insn_r->gdbarch);
99afc88b
OJ
4001 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
4002 uint32_t record_buf[4];
4003
4004 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
4005 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
4006 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4007
4008 if (insn_bits28_31 == 0x0d)
4009 {
4010 /* Exception generation instructions. */
4011 if (insn_bits24_27 == 0x04)
4012 {
5d98d3cd
YQ
4013 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
4014 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
4015 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
99afc88b
OJ
4016 {
4017 ULONGEST svc_number;
4018
4019 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
4020 &svc_number);
4021 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
4022 svc_number);
4023 }
4024 else
4025 return AARCH64_RECORD_UNSUPPORTED;
4026 }
4027 /* System instructions. */
4028 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
4029 {
4030 uint32_t reg_rt, reg_crn;
4031
4032 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
4033 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
4034
4035 /* Record rt in case of sysl and mrs instructions. */
4036 if (bit (aarch64_insn_r->aarch64_insn, 21))
4037 {
4038 record_buf[0] = reg_rt;
4039 aarch64_insn_r->reg_rec_count = 1;
4040 }
4041 /* Record cpsr for hint and msr(immediate) instructions. */
4042 else if (reg_crn == 0x02 || reg_crn == 0x04)
4043 {
4044 record_buf[0] = AARCH64_CPSR_REGNUM;
4045 aarch64_insn_r->reg_rec_count = 1;
4046 }
4047 }
4048 /* Unconditional branch (register). */
4049 else if((insn_bits24_27 & 0x0e) == 0x06)
4050 {
4051 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
4052 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
4053 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
4054 }
4055 else
4056 return AARCH64_RECORD_UNKNOWN;
4057 }
4058 /* Unconditional branch (immediate). */
4059 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
4060 {
4061 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
4062 if (bit (aarch64_insn_r->aarch64_insn, 31))
4063 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
4064 }
4065 else
4066 /* Compare & branch (immediate), Test & branch (immediate) and
4067 Conditional branch (immediate). */
4068 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
4069
4070 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4071 record_buf);
4072 return AARCH64_RECORD_SUCCESS;
4073}
4074
4075/* Record handler for advanced SIMD load and store instructions. */
4076
4077static unsigned int
4748a9be 4078aarch64_record_asimd_load_store (aarch64_insn_decode_record *aarch64_insn_r)
99afc88b
OJ
4079{
4080 CORE_ADDR address;
4081 uint64_t addr_offset = 0;
4082 uint32_t record_buf[24];
4083 uint64_t record_buf_mem[24];
4084 uint32_t reg_rn, reg_rt;
4085 uint32_t reg_index = 0, mem_index = 0;
4086 uint8_t opcode_bits, size_bits;
4087
4088 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
4089 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
4090 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
4091 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
4092 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
4093
4094 if (record_debug)
b277c936 4095 debug_printf ("Process record: Advanced SIMD load/store\n");
99afc88b
OJ
4096
4097 /* Load/store single structure. */
4098 if (bit (aarch64_insn_r->aarch64_insn, 24))
4099 {
4100 uint8_t sindex, scale, selem, esize, replicate = 0;
4101 scale = opcode_bits >> 2;
4102 selem = ((opcode_bits & 0x02) |
dda83cd7 4103 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
99afc88b 4104 switch (scale)
dda83cd7
SM
4105 {
4106 case 1:
4107 if (size_bits & 0x01)
4108 return AARCH64_RECORD_UNKNOWN;
4109 break;
4110 case 2:
4111 if ((size_bits >> 1) & 0x01)
4112 return AARCH64_RECORD_UNKNOWN;
4113 if (size_bits & 0x01)
4114 {
4115 if (!((opcode_bits >> 1) & 0x01))
4116 scale = 3;
4117 else
4118 return AARCH64_RECORD_UNKNOWN;
4119 }
4120 break;
4121 case 3:
4122 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
4123 {
4124 scale = size_bits;
4125 replicate = 1;
4126 break;
4127 }
4128 else
4129 return AARCH64_RECORD_UNKNOWN;
4130 default:
4131 break;
4132 }
99afc88b
OJ
4133 esize = 8 << scale;
4134 if (replicate)
dda83cd7
SM
4135 for (sindex = 0; sindex < selem; sindex++)
4136 {
4137 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
4138 reg_rt = (reg_rt + 1) % 32;
4139 }
99afc88b 4140 else
dda83cd7
SM
4141 {
4142 for (sindex = 0; sindex < selem; sindex++)
a2e3e93f
SM
4143 {
4144 if (bit (aarch64_insn_r->aarch64_insn, 22))
4145 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
4146 else
4147 {
4148 record_buf_mem[mem_index++] = esize / 8;
4149 record_buf_mem[mem_index++] = address + addr_offset;
4150 }
4151 addr_offset = addr_offset + (esize / 8);
4152 reg_rt = (reg_rt + 1) % 32;
4153 }
dda83cd7 4154 }
99afc88b
OJ
4155 }
4156 /* Load/store multiple structure. */
4157 else
4158 {
4159 uint8_t selem, esize, rpt, elements;
4160 uint8_t eindex, rindex;
4161
4162 esize = 8 << size_bits;
4163 if (bit (aarch64_insn_r->aarch64_insn, 30))
dda83cd7 4164 elements = 128 / esize;
99afc88b 4165 else
dda83cd7 4166 elements = 64 / esize;
99afc88b
OJ
4167
4168 switch (opcode_bits)
dda83cd7
SM
4169 {
4170 /*LD/ST4 (4 Registers). */
4171 case 0:
4172 rpt = 1;
4173 selem = 4;
4174 break;
4175 /*LD/ST1 (4 Registers). */
4176 case 2:
4177 rpt = 4;
4178 selem = 1;
4179 break;
4180 /*LD/ST3 (3 Registers). */
4181 case 4:
4182 rpt = 1;
4183 selem = 3;
4184 break;
4185 /*LD/ST1 (3 Registers). */
4186 case 6:
4187 rpt = 3;
4188 selem = 1;
4189 break;
4190 /*LD/ST1 (1 Register). */
4191 case 7:
4192 rpt = 1;
4193 selem = 1;
4194 break;
4195 /*LD/ST2 (2 Registers). */
4196 case 8:
4197 rpt = 1;
4198 selem = 2;
4199 break;
4200 /*LD/ST1 (2 Registers). */
4201 case 10:
4202 rpt = 2;
4203 selem = 1;
4204 break;
4205 default:
4206 return AARCH64_RECORD_UNSUPPORTED;
4207 break;
4208 }
99afc88b 4209 for (rindex = 0; rindex < rpt; rindex++)
dda83cd7
SM
4210 for (eindex = 0; eindex < elements; eindex++)
4211 {
4212 uint8_t reg_tt, sindex;
4213 reg_tt = (reg_rt + rindex) % 32;
4214 for (sindex = 0; sindex < selem; sindex++)
4215 {
4216 if (bit (aarch64_insn_r->aarch64_insn, 22))
4217 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
4218 else
4219 {
4220 record_buf_mem[mem_index++] = esize / 8;
4221 record_buf_mem[mem_index++] = address + addr_offset;
4222 }
4223 addr_offset = addr_offset + (esize / 8);
4224 reg_tt = (reg_tt + 1) % 32;
4225 }
4226 }
99afc88b
OJ
4227 }
4228
4229 if (bit (aarch64_insn_r->aarch64_insn, 23))
4230 record_buf[reg_index++] = reg_rn;
4231
4232 aarch64_insn_r->reg_rec_count = reg_index;
4233 aarch64_insn_r->mem_rec_count = mem_index / 2;
4234 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
dda83cd7 4235 record_buf_mem);
99afc88b 4236 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
dda83cd7 4237 record_buf);
99afc88b
OJ
4238 return AARCH64_RECORD_SUCCESS;
4239}
4240
4241/* Record handler for load and store instructions. */
4242
4243static unsigned int
4748a9be 4244aarch64_record_load_store (aarch64_insn_decode_record *aarch64_insn_r)
99afc88b
OJ
4245{
4246 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
4247 uint8_t insn_bit23, insn_bit21;
4248 uint8_t opc, size_bits, ld_flag, vector_flag;
4249 uint32_t reg_rn, reg_rt, reg_rt2;
4250 uint64_t datasize, offset;
4251 uint32_t record_buf[8];
4252 uint64_t record_buf_mem[8];
4253 CORE_ADDR address;
4254
4255 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
4256 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
4257 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
4258 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
4259 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
4260 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
4261 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
4262 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
4263 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
4264 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
4265 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
4266
4267 /* Load/store exclusive. */
4268 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
4269 {
4270 if (record_debug)
b277c936 4271 debug_printf ("Process record: load/store exclusive\n");
99afc88b
OJ
4272
4273 if (ld_flag)
4274 {
4275 record_buf[0] = reg_rt;
4276 aarch64_insn_r->reg_rec_count = 1;
4277 if (insn_bit21)
4278 {
4279 record_buf[1] = reg_rt2;
4280 aarch64_insn_r->reg_rec_count = 2;
4281 }
4282 }
4283 else
4284 {
4285 if (insn_bit21)
4286 datasize = (8 << size_bits) * 2;
4287 else
4288 datasize = (8 << size_bits);
4289 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4290 &address);
4291 record_buf_mem[0] = datasize / 8;
4292 record_buf_mem[1] = address;
4293 aarch64_insn_r->mem_rec_count = 1;
4294 if (!insn_bit23)
4295 {
4296 /* Save register rs. */
4297 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
4298 aarch64_insn_r->reg_rec_count = 1;
4299 }
4300 }
4301 }
4302 /* Load register (literal) instructions decoding. */
4303 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
4304 {
4305 if (record_debug)
b277c936 4306 debug_printf ("Process record: load register (literal)\n");
99afc88b 4307 if (vector_flag)
dda83cd7 4308 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
99afc88b 4309 else
dda83cd7 4310 record_buf[0] = reg_rt;
99afc88b
OJ
4311 aarch64_insn_r->reg_rec_count = 1;
4312 }
4313 /* All types of load/store pair instructions decoding. */
4314 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
4315 {
4316 if (record_debug)
b277c936 4317 debug_printf ("Process record: load/store pair\n");
99afc88b
OJ
4318
4319 if (ld_flag)
dda83cd7
SM
4320 {
4321 if (vector_flag)
4322 {
4323 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4324 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
4325 }
4326 else
4327 {
4328 record_buf[0] = reg_rt;
4329 record_buf[1] = reg_rt2;
4330 }
4331 aarch64_insn_r->reg_rec_count = 2;
4332 }
99afc88b 4333 else
dda83cd7
SM
4334 {
4335 uint16_t imm7_off;
4336 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
4337 if (!vector_flag)
4338 size_bits = size_bits >> 1;
4339 datasize = 8 << (2 + size_bits);
4340 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
4341 offset = offset << (2 + size_bits);
4342 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4343 &address);
4344 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
4345 {
4346 if (imm7_off & 0x40)
4347 address = address - offset;
4348 else
4349 address = address + offset;
4350 }
4351
4352 record_buf_mem[0] = datasize / 8;
4353 record_buf_mem[1] = address;
4354 record_buf_mem[2] = datasize / 8;
4355 record_buf_mem[3] = address + (datasize / 8);
4356 aarch64_insn_r->mem_rec_count = 2;
4357 }
99afc88b 4358 if (bit (aarch64_insn_r->aarch64_insn, 23))
dda83cd7 4359 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
99afc88b
OJ
4360 }
4361 /* Load/store register (unsigned immediate) instructions. */
4362 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
4363 {
4364 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4365 if (!(opc >> 1))
33877125
YQ
4366 {
4367 if (opc & 0x01)
4368 ld_flag = 0x01;
4369 else
4370 ld_flag = 0x0;
4371 }
99afc88b 4372 else
33877125 4373 {
1e2b521d
YQ
4374 if (size_bits == 0x3 && vector_flag == 0x0 && opc == 0x2)
4375 {
4376 /* PRFM (immediate) */
4377 return AARCH64_RECORD_SUCCESS;
4378 }
4379 else if (size_bits == 0x2 && vector_flag == 0x0 && opc == 0x2)
4380 {
4381 /* LDRSW (immediate) */
4382 ld_flag = 0x1;
4383 }
33877125 4384 else
1e2b521d
YQ
4385 {
4386 if (opc & 0x01)
4387 ld_flag = 0x01;
4388 else
4389 ld_flag = 0x0;
4390 }
33877125 4391 }
99afc88b
OJ
4392
4393 if (record_debug)
4394 {
b277c936
PL
4395 debug_printf ("Process record: load/store (unsigned immediate):"
4396 " size %x V %d opc %x\n", size_bits, vector_flag,
4397 opc);
99afc88b
OJ
4398 }
4399
4400 if (!ld_flag)
dda83cd7
SM
4401 {
4402 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
4403 datasize = 8 << size_bits;
4404 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4405 &address);
4406 offset = offset << size_bits;
4407 address = address + offset;
4408
4409 record_buf_mem[0] = datasize >> 3;
4410 record_buf_mem[1] = address;
4411 aarch64_insn_r->mem_rec_count = 1;
4412 }
99afc88b 4413 else
dda83cd7
SM
4414 {
4415 if (vector_flag)
4416 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4417 else
4418 record_buf[0] = reg_rt;
4419 aarch64_insn_r->reg_rec_count = 1;
4420 }
99afc88b
OJ
4421 }
4422 /* Load/store register (register offset) instructions. */
5d98d3cd
YQ
4423 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
4424 && insn_bits10_11 == 0x02 && insn_bit21)
99afc88b
OJ
4425 {
4426 if (record_debug)
b277c936 4427 debug_printf ("Process record: load/store (register offset)\n");
99afc88b
OJ
4428 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4429 if (!(opc >> 1))
dda83cd7
SM
4430 if (opc & 0x01)
4431 ld_flag = 0x01;
4432 else
4433 ld_flag = 0x0;
99afc88b 4434 else
dda83cd7
SM
4435 if (size_bits != 0x03)
4436 ld_flag = 0x01;
4437 else
4438 return AARCH64_RECORD_UNKNOWN;
99afc88b
OJ
4439
4440 if (!ld_flag)
dda83cd7
SM
4441 {
4442 ULONGEST reg_rm_val;
4443
4444 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
4445 bits (aarch64_insn_r->aarch64_insn, 16, 20), &reg_rm_val);
4446 if (bit (aarch64_insn_r->aarch64_insn, 12))
4447 offset = reg_rm_val << size_bits;
4448 else
4449 offset = reg_rm_val;
4450 datasize = 8 << size_bits;
4451 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4452 &address);
4453 address = address + offset;
4454 record_buf_mem[0] = datasize >> 3;
4455 record_buf_mem[1] = address;
4456 aarch64_insn_r->mem_rec_count = 1;
4457 }
99afc88b 4458 else
dda83cd7
SM
4459 {
4460 if (vector_flag)
4461 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4462 else
4463 record_buf[0] = reg_rt;
4464 aarch64_insn_r->reg_rec_count = 1;
4465 }
99afc88b
OJ
4466 }
4467 /* Load/store register (immediate and unprivileged) instructions. */
5d98d3cd
YQ
4468 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
4469 && !insn_bit21)
99afc88b
OJ
4470 {
4471 if (record_debug)
4472 {
b277c936
PL
4473 debug_printf ("Process record: load/store "
4474 "(immediate and unprivileged)\n");
99afc88b
OJ
4475 }
4476 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4477 if (!(opc >> 1))
dda83cd7
SM
4478 if (opc & 0x01)
4479 ld_flag = 0x01;
4480 else
4481 ld_flag = 0x0;
99afc88b 4482 else
dda83cd7
SM
4483 if (size_bits != 0x03)
4484 ld_flag = 0x01;
4485 else
4486 return AARCH64_RECORD_UNKNOWN;
99afc88b
OJ
4487
4488 if (!ld_flag)
dda83cd7
SM
4489 {
4490 uint16_t imm9_off;
4491 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
4492 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
4493 datasize = 8 << size_bits;
4494 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4495 &address);
4496 if (insn_bits10_11 != 0x01)
4497 {
4498 if (imm9_off & 0x0100)
4499 address = address - offset;
4500 else
4501 address = address + offset;
4502 }
4503 record_buf_mem[0] = datasize >> 3;
4504 record_buf_mem[1] = address;
4505 aarch64_insn_r->mem_rec_count = 1;
4506 }
99afc88b 4507 else
dda83cd7
SM
4508 {
4509 if (vector_flag)
4510 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4511 else
4512 record_buf[0] = reg_rt;
4513 aarch64_insn_r->reg_rec_count = 1;
4514 }
99afc88b 4515 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
dda83cd7 4516 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
99afc88b
OJ
4517 }
4518 /* Advanced SIMD load/store instructions. */
4519 else
4520 return aarch64_record_asimd_load_store (aarch64_insn_r);
4521
4522 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
dda83cd7 4523 record_buf_mem);
99afc88b 4524 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
dda83cd7 4525 record_buf);
99afc88b
OJ
4526 return AARCH64_RECORD_SUCCESS;
4527}
4528
4529/* Record handler for data processing SIMD and floating point instructions. */
4530
4531static unsigned int
4748a9be 4532aarch64_record_data_proc_simd_fp (aarch64_insn_decode_record *aarch64_insn_r)
99afc88b
OJ
4533{
4534 uint8_t insn_bit21, opcode, rmode, reg_rd;
4535 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
4536 uint8_t insn_bits11_14;
4537 uint32_t record_buf[2];
4538
4539 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
4540 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
4541 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
4542 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
4543 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
4544 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
4545 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
4546 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
4547 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
4548
4549 if (record_debug)
b277c936 4550 debug_printf ("Process record: data processing SIMD/FP: ");
99afc88b
OJ
4551
4552 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
4553 {
4554 /* Floating point - fixed point conversion instructions. */
4555 if (!insn_bit21)
4556 {
4557 if (record_debug)
b277c936 4558 debug_printf ("FP - fixed point conversion");
99afc88b
OJ
4559
4560 if ((opcode >> 1) == 0x0 && rmode == 0x03)
4561 record_buf[0] = reg_rd;
4562 else
4563 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4564 }
4565 /* Floating point - conditional compare instructions. */
4566 else if (insn_bits10_11 == 0x01)
4567 {
4568 if (record_debug)
b277c936 4569 debug_printf ("FP - conditional compare");
99afc88b
OJ
4570
4571 record_buf[0] = AARCH64_CPSR_REGNUM;
4572 }
4573 /* Floating point - data processing (2-source) and
dda83cd7 4574 conditional select instructions. */
99afc88b
OJ
4575 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
4576 {
4577 if (record_debug)
b277c936 4578 debug_printf ("FP - DP (2-source)");
99afc88b
OJ
4579
4580 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4581 }
4582 else if (insn_bits10_11 == 0x00)
4583 {
4584 /* Floating point - immediate instructions. */
4585 if ((insn_bits12_15 & 0x01) == 0x01
4586 || (insn_bits12_15 & 0x07) == 0x04)
4587 {
4588 if (record_debug)
b277c936 4589 debug_printf ("FP - immediate");
99afc88b
OJ
4590 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4591 }
4592 /* Floating point - compare instructions. */
4593 else if ((insn_bits12_15 & 0x03) == 0x02)
4594 {
4595 if (record_debug)
b277c936 4596 debug_printf ("FP - immediate");
99afc88b
OJ
4597 record_buf[0] = AARCH64_CPSR_REGNUM;
4598 }
4599 /* Floating point - integer conversions instructions. */
f62fce35 4600 else if (insn_bits12_15 == 0x00)
99afc88b
OJ
4601 {
4602 /* Convert float to integer instruction. */
4603 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
4604 {
4605 if (record_debug)
b277c936 4606 debug_printf ("float to int conversion");
99afc88b
OJ
4607
4608 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4609 }
4610 /* Convert integer to float instruction. */
4611 else if ((opcode >> 1) == 0x01 && !rmode)
4612 {
4613 if (record_debug)
b277c936 4614 debug_printf ("int to float conversion");
99afc88b
OJ
4615
4616 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4617 }
4618 /* Move float to integer instruction. */
4619 else if ((opcode >> 1) == 0x03)
4620 {
4621 if (record_debug)
b277c936 4622 debug_printf ("move float to int");
99afc88b
OJ
4623
4624 if (!(opcode & 0x01))
4625 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4626 else
4627 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4628 }
f62fce35
YQ
4629 else
4630 return AARCH64_RECORD_UNKNOWN;
dda83cd7 4631 }
f62fce35
YQ
4632 else
4633 return AARCH64_RECORD_UNKNOWN;
dda83cd7 4634 }
f62fce35
YQ
4635 else
4636 return AARCH64_RECORD_UNKNOWN;
99afc88b
OJ
4637 }
4638 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
4639 {
4640 if (record_debug)
b277c936 4641 debug_printf ("SIMD copy");
99afc88b
OJ
4642
4643 /* Advanced SIMD copy instructions. */
4644 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
4645 && !bit (aarch64_insn_r->aarch64_insn, 15)
4646 && bit (aarch64_insn_r->aarch64_insn, 10))
4647 {
4648 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
4649 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4650 else
4651 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4652 }
4653 else
4654 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4655 }
4656 /* All remaining floating point or advanced SIMD instructions. */
4657 else
4658 {
4659 if (record_debug)
b277c936 4660 debug_printf ("all remain");
99afc88b
OJ
4661
4662 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4663 }
4664
4665 if (record_debug)
b277c936 4666 debug_printf ("\n");
99afc88b 4667
bfbe4b84 4668 /* Record the V/X register. */
99afc88b 4669 aarch64_insn_r->reg_rec_count++;
bfbe4b84
LM
4670
4671 /* Some of these instructions may set bits in the FPSR, so record it
4672 too. */
4673 record_buf[1] = AARCH64_FPSR_REGNUM;
4674 aarch64_insn_r->reg_rec_count++;
4675
4676 gdb_assert (aarch64_insn_r->reg_rec_count == 2);
99afc88b
OJ
4677 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4678 record_buf);
4679 return AARCH64_RECORD_SUCCESS;
4680}
4681
4682/* Decodes insns type and invokes its record handler. */
4683
4684static unsigned int
4748a9be 4685aarch64_record_decode_insn_handler (aarch64_insn_decode_record *aarch64_insn_r)
99afc88b
OJ
4686{
4687 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
4688
4689 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
4690 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
4691 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
4692 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
4693
4694 /* Data processing - immediate instructions. */
4695 if (!ins_bit26 && !ins_bit27 && ins_bit28)
4696 return aarch64_record_data_proc_imm (aarch64_insn_r);
4697
4698 /* Branch, exception generation and system instructions. */
4699 if (ins_bit26 && !ins_bit27 && ins_bit28)
4700 return aarch64_record_branch_except_sys (aarch64_insn_r);
4701
4702 /* Load and store instructions. */
4703 if (!ins_bit25 && ins_bit27)
4704 return aarch64_record_load_store (aarch64_insn_r);
4705
4706 /* Data processing - register instructions. */
4707 if (ins_bit25 && !ins_bit26 && ins_bit27)
4708 return aarch64_record_data_proc_reg (aarch64_insn_r);
4709
4710 /* Data processing - SIMD and floating point instructions. */
4711 if (ins_bit25 && ins_bit26 && ins_bit27)
4712 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
4713
4714 return AARCH64_RECORD_UNSUPPORTED;
4715}
4716
4717/* Cleans up local record registers and memory allocations. */
4718
4719static void
4748a9be 4720deallocate_reg_mem (aarch64_insn_decode_record *record)
99afc88b
OJ
4721{
4722 xfree (record->aarch64_regs);
4723 xfree (record->aarch64_mems);
4724}
4725
1e2b521d
YQ
4726#if GDB_SELF_TEST
4727namespace selftests {
4728
4729static void
4730aarch64_process_record_test (void)
4731{
4732 struct gdbarch_info info;
4733 uint32_t ret;
4734
1e2b521d
YQ
4735 info.bfd_arch_info = bfd_scan_arch ("aarch64");
4736
4737 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
4738 SELF_CHECK (gdbarch != NULL);
4739
4748a9be 4740 aarch64_insn_decode_record aarch64_record;
1e2b521d 4741
4748a9be 4742 memset (&aarch64_record, 0, sizeof (aarch64_insn_decode_record));
1e2b521d
YQ
4743 aarch64_record.regcache = NULL;
4744 aarch64_record.this_addr = 0;
4745 aarch64_record.gdbarch = gdbarch;
4746
4747 /* 20 00 80 f9 prfm pldl1keep, [x1] */
4748 aarch64_record.aarch64_insn = 0xf9800020;
4749 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4750 SELF_CHECK (ret == AARCH64_RECORD_SUCCESS);
4751 SELF_CHECK (aarch64_record.reg_rec_count == 0);
4752 SELF_CHECK (aarch64_record.mem_rec_count == 0);
4753
4754 deallocate_reg_mem (&aarch64_record);
4755}
4756
4757} // namespace selftests
4758#endif /* GDB_SELF_TEST */
4759
99afc88b
OJ
4760/* Parse the current instruction and record the values of the registers and
4761 memory that will be changed in current instruction to record_arch_list
4762 return -1 if something is wrong. */
4763
4764int
4765aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
4766 CORE_ADDR insn_addr)
4767{
4768 uint32_t rec_no = 0;
4769 uint8_t insn_size = 4;
4770 uint32_t ret = 0;
99afc88b 4771 gdb_byte buf[insn_size];
4748a9be 4772 aarch64_insn_decode_record aarch64_record;
99afc88b
OJ
4773
4774 memset (&buf[0], 0, insn_size);
4748a9be 4775 memset (&aarch64_record, 0, sizeof (aarch64_insn_decode_record));
99afc88b
OJ
4776 target_read_memory (insn_addr, &buf[0], insn_size);
4777 aarch64_record.aarch64_insn
4778 = (uint32_t) extract_unsigned_integer (&buf[0],
4779 insn_size,
4780 gdbarch_byte_order (gdbarch));
4781 aarch64_record.regcache = regcache;
4782 aarch64_record.this_addr = insn_addr;
4783 aarch64_record.gdbarch = gdbarch;
4784
4785 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4786 if (ret == AARCH64_RECORD_UNSUPPORTED)
4787 {
6cb06a8c
TT
4788 gdb_printf (gdb_stderr,
4789 _("Process record does not support instruction "
4790 "0x%0x at address %s.\n"),
4791 aarch64_record.aarch64_insn,
4792 paddress (gdbarch, insn_addr));
99afc88b
OJ
4793 ret = -1;
4794 }
4795
4796 if (0 == ret)
4797 {
4798 /* Record registers. */
4799 record_full_arch_list_add_reg (aarch64_record.regcache,
4800 AARCH64_PC_REGNUM);
4801 /* Always record register CPSR. */
4802 record_full_arch_list_add_reg (aarch64_record.regcache,
4803 AARCH64_CPSR_REGNUM);
4804 if (aarch64_record.aarch64_regs)
4805 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
4806 if (record_full_arch_list_add_reg (aarch64_record.regcache,
4807 aarch64_record.aarch64_regs[rec_no]))
4808 ret = -1;
4809
4810 /* Record memories. */
4811 if (aarch64_record.aarch64_mems)
4812 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
4813 if (record_full_arch_list_add_mem
4814 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
4815 aarch64_record.aarch64_mems[rec_no].len))
4816 ret = -1;
4817
4818 if (record_full_arch_list_add_end ())
4819 ret = -1;
4820 }
4821
4822 deallocate_reg_mem (&aarch64_record);
4823 return ret;
4824}