]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/aarch64-tdep.c
Prevent an illegal memory access when comparing the prefix of a section name regexp.
[thirdparty/binutils-gdb.git] / gdb / aarch64-tdep.c
CommitLineData
07b287a0
MS
1/* Common target dependent code for GDB on AArch64 systems.
2
4a94e368 3 Copyright (C) 2009-2022 Free Software Foundation, Inc.
07b287a0
MS
4 Contributed by ARM Ltd.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21#include "defs.h"
22
23#include "frame.h"
07b287a0
MS
24#include "gdbcmd.h"
25#include "gdbcore.h"
4de283e4 26#include "dis-asm.h"
d55e5aa6
TT
27#include "regcache.h"
28#include "reggroups.h"
4de283e4
TT
29#include "value.h"
30#include "arch-utils.h"
31#include "osabi.h"
32#include "frame-unwind.h"
33#include "frame-base.h"
d55e5aa6 34#include "trad-frame.h"
4de283e4
TT
35#include "objfiles.h"
36#include "dwarf2.h"
82ca8957 37#include "dwarf2/frame.h"
4de283e4
TT
38#include "gdbtypes.h"
39#include "prologue-value.h"
40#include "target-descriptions.h"
07b287a0 41#include "user-regs.h"
4de283e4 42#include "ax-gdb.h"
268a13a5 43#include "gdbsupport/selftest.h"
4de283e4
TT
44
45#include "aarch64-tdep.h"
46#include "aarch64-ravenscar-thread.h"
47
4de283e4
TT
48#include "record.h"
49#include "record-full.h"
50#include "arch/aarch64-insn.h"
0d12e84c 51#include "gdbarch.h"
4de283e4
TT
52
53#include "opcode/aarch64.h"
54#include <algorithm>
0ee6b1c5 55#include <unordered_map>
f77ee802 56
ea92689a
AH
57/* A Homogeneous Floating-Point or Short-Vector Aggregate may have at most
58 four members. */
59#define HA_MAX_NUM_FLDS 4
60
95228a0d 61/* All possible aarch64 target descriptors. */
0ee6b1c5 62static std::unordered_map <aarch64_features, target_desc *> tdesc_aarch64_map;
95228a0d 63
07b287a0
MS
64/* The standard register names, and all the valid aliases for them. */
65static const struct
66{
67 const char *const name;
68 int regnum;
69} aarch64_register_aliases[] =
70{
71 /* 64-bit register names. */
72 {"fp", AARCH64_FP_REGNUM},
73 {"lr", AARCH64_LR_REGNUM},
74 {"sp", AARCH64_SP_REGNUM},
07b287a0
MS
75 /* specials */
76 {"ip0", AARCH64_X0_REGNUM + 16},
77 {"ip1", AARCH64_X0_REGNUM + 17}
78};
79
80/* The required core 'R' registers. */
81static const char *const aarch64_r_register_names[] =
82{
83 /* These registers must appear in consecutive RAW register number
84 order and they must begin with AARCH64_X0_REGNUM! */
85 "x0", "x1", "x2", "x3",
86 "x4", "x5", "x6", "x7",
87 "x8", "x9", "x10", "x11",
88 "x12", "x13", "x14", "x15",
89 "x16", "x17", "x18", "x19",
90 "x20", "x21", "x22", "x23",
91 "x24", "x25", "x26", "x27",
92 "x28", "x29", "x30", "sp",
93 "pc", "cpsr"
94};
95
96/* The FP/SIMD 'V' registers. */
97static const char *const aarch64_v_register_names[] =
98{
99 /* These registers must appear in consecutive RAW register number
100 order and they must begin with AARCH64_V0_REGNUM! */
101 "v0", "v1", "v2", "v3",
102 "v4", "v5", "v6", "v7",
103 "v8", "v9", "v10", "v11",
104 "v12", "v13", "v14", "v15",
105 "v16", "v17", "v18", "v19",
106 "v20", "v21", "v22", "v23",
107 "v24", "v25", "v26", "v27",
108 "v28", "v29", "v30", "v31",
109 "fpsr",
110 "fpcr"
111};
112
739e8682
AH
113/* The SVE 'Z' and 'P' registers. */
114static const char *const aarch64_sve_register_names[] =
115{
116 /* These registers must appear in consecutive RAW register number
117 order and they must begin with AARCH64_SVE_Z0_REGNUM! */
118 "z0", "z1", "z2", "z3",
119 "z4", "z5", "z6", "z7",
120 "z8", "z9", "z10", "z11",
121 "z12", "z13", "z14", "z15",
122 "z16", "z17", "z18", "z19",
123 "z20", "z21", "z22", "z23",
124 "z24", "z25", "z26", "z27",
125 "z28", "z29", "z30", "z31",
126 "fpsr", "fpcr",
127 "p0", "p1", "p2", "p3",
128 "p4", "p5", "p6", "p7",
129 "p8", "p9", "p10", "p11",
130 "p12", "p13", "p14", "p15",
131 "ffr", "vg"
132};
133
76bed0fd
AH
134static const char *const aarch64_pauth_register_names[] =
135{
136 /* Authentication mask for data pointer. */
137 "pauth_dmask",
138 /* Authentication mask for code pointer. */
139 "pauth_cmask"
140};
141
5e984dbf
LM
142static const char *const aarch64_mte_register_names[] =
143{
144 /* Tag Control Register. */
145 "tag_ctl"
146};
147
07b287a0
MS
148/* AArch64 prologue cache structure. */
149struct aarch64_prologue_cache
150{
db634143
PL
151 /* The program counter at the start of the function. It is used to
152 identify this frame as a prologue frame. */
153 CORE_ADDR func;
154
155 /* The program counter at the time this frame was created; i.e. where
156 this function was called from. It is used to identify this frame as a
157 stub frame. */
158 CORE_ADDR prev_pc;
159
07b287a0
MS
160 /* The stack pointer at the time this frame was created; i.e. the
161 caller's stack pointer when this function was called. It is used
162 to identify this frame. */
163 CORE_ADDR prev_sp;
164
7dfa3edc
PL
165 /* Is the target available to read from? */
166 int available_p;
167
07b287a0
MS
168 /* The frame base for this frame is just prev_sp - frame size.
169 FRAMESIZE is the distance from the frame pointer to the
170 initial stack pointer. */
171 int framesize;
172
173 /* The register used to hold the frame pointer for this frame. */
174 int framereg;
175
176 /* Saved register offsets. */
098caef4 177 trad_frame_saved_reg *saved_regs;
07b287a0
MS
178};
179
07b287a0
MS
180static void
181show_aarch64_debug (struct ui_file *file, int from_tty,
dda83cd7 182 struct cmd_list_element *c, const char *value)
07b287a0 183{
6cb06a8c 184 gdb_printf (file, _("AArch64 debugging is %s.\n"), value);
07b287a0
MS
185}
186
ffdbe864
YQ
187namespace {
188
4d9a9006
YQ
189/* Abstract instruction reader. */
190
191class abstract_instruction_reader
192{
193public:
194 /* Read in one instruction. */
195 virtual ULONGEST read (CORE_ADDR memaddr, int len,
196 enum bfd_endian byte_order) = 0;
197};
198
199/* Instruction reader from real target. */
200
201class instruction_reader : public abstract_instruction_reader
202{
203 public:
204 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
632e107b 205 override
4d9a9006 206 {
fc2f703e 207 return read_code_unsigned_integer (memaddr, len, byte_order);
4d9a9006
YQ
208 }
209};
210
ffdbe864
YQ
211} // namespace
212
3d31bc39
AH
213/* If address signing is enabled, mask off the signature bits from the link
214 register, which is passed by value in ADDR, using the register values in
215 THIS_FRAME. */
11e1b75f
AH
216
217static CORE_ADDR
345bd07c 218aarch64_frame_unmask_lr (aarch64_gdbarch_tdep *tdep,
bd2b40ac 219 frame_info_ptr this_frame, CORE_ADDR addr)
11e1b75f
AH
220{
221 if (tdep->has_pauth ()
222 && frame_unwind_register_unsigned (this_frame,
c9cd8ca4 223 tdep->ra_sign_state_regnum))
11e1b75f
AH
224 {
225 int cmask_num = AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base);
226 CORE_ADDR cmask = frame_unwind_register_unsigned (this_frame, cmask_num);
227 addr = addr & ~cmask;
3d31bc39
AH
228
229 /* Record in the frame that the link register required unmasking. */
230 set_frame_previous_pc_masked (this_frame);
11e1b75f
AH
231 }
232
233 return addr;
234}
235
aa7ca1bb
AH
236/* Implement the "get_pc_address_flags" gdbarch method. */
237
238static std::string
bd2b40ac 239aarch64_get_pc_address_flags (frame_info_ptr frame, CORE_ADDR pc)
aa7ca1bb
AH
240{
241 if (pc != 0 && get_frame_pc_masked (frame))
242 return "PAC";
243
244 return "";
245}
246
07b287a0
MS
247/* Analyze a prologue, looking for a recognizable stack frame
248 and frame pointer. Scan until we encounter a store that could
249 clobber the stack frame unexpectedly, or an unknown instruction. */
250
251static CORE_ADDR
252aarch64_analyze_prologue (struct gdbarch *gdbarch,
253 CORE_ADDR start, CORE_ADDR limit,
4d9a9006
YQ
254 struct aarch64_prologue_cache *cache,
255 abstract_instruction_reader& reader)
07b287a0
MS
256{
257 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
258 int i;
f8e3fe0d
LM
259
260 /* Whether the stack has been set. This should be true when we notice a SP
261 to FP move or if we are using the SP as the base register for storing
262 data, in case the FP is ommitted. */
263 bool seen_stack_set = false;
264
187f5d00
YQ
265 /* Track X registers and D registers in prologue. */
266 pv_t regs[AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT];
07b287a0 267
187f5d00 268 for (i = 0; i < AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT; i++)
07b287a0 269 regs[i] = pv_register (i, 0);
f7b7ed97 270 pv_area stack (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
07b287a0
MS
271
272 for (; start < limit; start += 4)
273 {
274 uint32_t insn;
d9ebcbce 275 aarch64_inst inst;
07b287a0 276
4d9a9006 277 insn = reader.read (start, 4, byte_order_for_code);
07b287a0 278
561a72d4 279 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
d9ebcbce
YQ
280 break;
281
282 if (inst.opcode->iclass == addsub_imm
283 && (inst.opcode->op == OP_ADD
284 || strcmp ("sub", inst.opcode->name) == 0))
07b287a0 285 {
d9ebcbce
YQ
286 unsigned rd = inst.operands[0].reg.regno;
287 unsigned rn = inst.operands[1].reg.regno;
288
289 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
290 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
291 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
292 gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM);
293
294 if (inst.opcode->op == OP_ADD)
295 {
296 regs[rd] = pv_add_constant (regs[rn],
297 inst.operands[2].imm.value);
298 }
299 else
300 {
301 regs[rd] = pv_add_constant (regs[rn],
302 -inst.operands[2].imm.value);
303 }
f8e3fe0d
LM
304
305 /* Did we move SP to FP? */
306 if (rn == AARCH64_SP_REGNUM && rd == AARCH64_FP_REGNUM)
307 seen_stack_set = true;
d9ebcbce 308 }
60adf22c
TV
309 else if (inst.opcode->iclass == addsub_ext
310 && strcmp ("sub", inst.opcode->name) == 0)
311 {
312 unsigned rd = inst.operands[0].reg.regno;
313 unsigned rn = inst.operands[1].reg.regno;
314 unsigned rm = inst.operands[2].reg.regno;
315
316 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
317 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
318 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
319 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_EXT);
320
321 regs[rd] = pv_subtract (regs[rn], regs[rm]);
322 }
d9ebcbce 323 else if (inst.opcode->iclass == branch_imm)
07b287a0
MS
324 {
325 /* Stop analysis on branch. */
326 break;
327 }
d9ebcbce 328 else if (inst.opcode->iclass == condbranch)
07b287a0
MS
329 {
330 /* Stop analysis on branch. */
331 break;
332 }
d9ebcbce 333 else if (inst.opcode->iclass == branch_reg)
07b287a0
MS
334 {
335 /* Stop analysis on branch. */
336 break;
337 }
d9ebcbce 338 else if (inst.opcode->iclass == compbranch)
07b287a0
MS
339 {
340 /* Stop analysis on branch. */
341 break;
342 }
d9ebcbce
YQ
343 else if (inst.opcode->op == OP_MOVZ)
344 {
60adf22c
TV
345 unsigned rd = inst.operands[0].reg.regno;
346
347 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
d9ebcbce 348 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
60adf22c
TV
349 gdb_assert (inst.operands[1].type == AARCH64_OPND_HALF);
350 gdb_assert (inst.operands[1].shifter.kind == AARCH64_MOD_LSL);
f8e3fe0d
LM
351
352 /* If this shows up before we set the stack, keep going. Otherwise
353 stop the analysis. */
354 if (seen_stack_set)
355 break;
356
60adf22c
TV
357 regs[rd] = pv_constant (inst.operands[1].imm.value
358 << inst.operands[1].shifter.amount);
d9ebcbce
YQ
359 }
360 else if (inst.opcode->iclass == log_shift
361 && strcmp (inst.opcode->name, "orr") == 0)
07b287a0 362 {
d9ebcbce
YQ
363 unsigned rd = inst.operands[0].reg.regno;
364 unsigned rn = inst.operands[1].reg.regno;
365 unsigned rm = inst.operands[2].reg.regno;
366
367 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
368 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn);
369 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT);
370
371 if (inst.operands[2].shifter.amount == 0
372 && rn == AARCH64_SP_REGNUM)
07b287a0
MS
373 regs[rd] = regs[rm];
374 else
375 {
c6185dce
SM
376 aarch64_debug_printf ("prologue analysis gave up "
377 "addr=%s opcode=0x%x (orr x register)",
378 core_addr_to_string_nz (start), insn);
379
07b287a0
MS
380 break;
381 }
382 }
d9ebcbce 383 else if (inst.opcode->op == OP_STUR)
07b287a0 384 {
d9ebcbce
YQ
385 unsigned rt = inst.operands[0].reg.regno;
386 unsigned rn = inst.operands[1].addr.base_regno;
75faf5c4 387 int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
d9ebcbce
YQ
388
389 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
390 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
391 gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9);
392 gdb_assert (!inst.operands[1].addr.offset.is_reg);
393
75faf5c4
AH
394 stack.store
395 (pv_add_constant (regs[rn], inst.operands[1].addr.offset.imm),
396 size, regs[rt]);
f8e3fe0d
LM
397
398 /* Are we storing with SP as a base? */
399 if (rn == AARCH64_SP_REGNUM)
400 seen_stack_set = true;
07b287a0 401 }
d9ebcbce 402 else if ((inst.opcode->iclass == ldstpair_off
03bcd739
YQ
403 || (inst.opcode->iclass == ldstpair_indexed
404 && inst.operands[2].addr.preind))
d9ebcbce 405 && strcmp ("stp", inst.opcode->name) == 0)
07b287a0 406 {
03bcd739 407 /* STP with addressing mode Pre-indexed and Base register. */
187f5d00
YQ
408 unsigned rt1;
409 unsigned rt2;
d9ebcbce
YQ
410 unsigned rn = inst.operands[2].addr.base_regno;
411 int32_t imm = inst.operands[2].addr.offset.imm;
75faf5c4 412 int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
d9ebcbce 413
187f5d00
YQ
414 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
415 || inst.operands[0].type == AARCH64_OPND_Ft);
416 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2
417 || inst.operands[1].type == AARCH64_OPND_Ft2);
d9ebcbce
YQ
418 gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7);
419 gdb_assert (!inst.operands[2].addr.offset.is_reg);
420
07b287a0
MS
421 /* If recording this store would invalidate the store area
422 (perhaps because rn is not known) then we should abandon
423 further prologue analysis. */
f7b7ed97 424 if (stack.store_would_trash (pv_add_constant (regs[rn], imm)))
07b287a0
MS
425 break;
426
f7b7ed97 427 if (stack.store_would_trash (pv_add_constant (regs[rn], imm + 8)))
07b287a0
MS
428 break;
429
187f5d00
YQ
430 rt1 = inst.operands[0].reg.regno;
431 rt2 = inst.operands[1].reg.regno;
432 if (inst.operands[0].type == AARCH64_OPND_Ft)
433 {
187f5d00
YQ
434 rt1 += AARCH64_X_REGISTER_COUNT;
435 rt2 += AARCH64_X_REGISTER_COUNT;
436 }
437
75faf5c4
AH
438 stack.store (pv_add_constant (regs[rn], imm), size, regs[rt1]);
439 stack.store (pv_add_constant (regs[rn], imm + size), size, regs[rt2]);
14ac654f 440
d9ebcbce 441 if (inst.operands[2].addr.writeback)
93d96012 442 regs[rn] = pv_add_constant (regs[rn], imm);
07b287a0 443
f8e3fe0d
LM
444 /* Ignore the instruction that allocates stack space and sets
445 the SP. */
446 if (rn == AARCH64_SP_REGNUM && !inst.operands[2].addr.writeback)
447 seen_stack_set = true;
07b287a0 448 }
432ec081
YQ
449 else if ((inst.opcode->iclass == ldst_imm9 /* Signed immediate. */
450 || (inst.opcode->iclass == ldst_pos /* Unsigned immediate. */
451 && (inst.opcode->op == OP_STR_POS
452 || inst.opcode->op == OP_STRF_POS)))
453 && inst.operands[1].addr.base_regno == AARCH64_SP_REGNUM
454 && strcmp ("str", inst.opcode->name) == 0)
455 {
456 /* STR (immediate) */
457 unsigned int rt = inst.operands[0].reg.regno;
458 int32_t imm = inst.operands[1].addr.offset.imm;
459 unsigned int rn = inst.operands[1].addr.base_regno;
75faf5c4 460 int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
432ec081
YQ
461 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
462 || inst.operands[0].type == AARCH64_OPND_Ft);
463
464 if (inst.operands[0].type == AARCH64_OPND_Ft)
75faf5c4 465 rt += AARCH64_X_REGISTER_COUNT;
432ec081 466
75faf5c4 467 stack.store (pv_add_constant (regs[rn], imm), size, regs[rt]);
432ec081
YQ
468 if (inst.operands[1].addr.writeback)
469 regs[rn] = pv_add_constant (regs[rn], imm);
f8e3fe0d
LM
470
471 /* Are we storing with SP as a base? */
472 if (rn == AARCH64_SP_REGNUM)
473 seen_stack_set = true;
432ec081 474 }
d9ebcbce 475 else if (inst.opcode->iclass == testbranch)
07b287a0
MS
476 {
477 /* Stop analysis on branch. */
478 break;
479 }
17e116a7
AH
480 else if (inst.opcode->iclass == ic_system)
481 {
345bd07c 482 aarch64_gdbarch_tdep *tdep
08106042 483 = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
17e116a7
AH
484 int ra_state_val = 0;
485
486 if (insn == 0xd503233f /* paciasp. */
487 || insn == 0xd503237f /* pacibsp. */)
488 {
489 /* Return addresses are mangled. */
490 ra_state_val = 1;
491 }
492 else if (insn == 0xd50323bf /* autiasp. */
493 || insn == 0xd50323ff /* autibsp. */)
494 {
495 /* Return addresses are not mangled. */
496 ra_state_val = 0;
497 }
37989733
LM
498 else if (IS_BTI (insn))
499 /* We don't need to do anything special for a BTI instruction. */
500 continue;
17e116a7
AH
501 else
502 {
c6185dce
SM
503 aarch64_debug_printf ("prologue analysis gave up addr=%s"
504 " opcode=0x%x (iclass)",
505 core_addr_to_string_nz (start), insn);
17e116a7
AH
506 break;
507 }
508
509 if (tdep->has_pauth () && cache != nullptr)
a9a87d35 510 {
c9cd8ca4 511 int regnum = tdep->ra_sign_state_regnum;
a9a87d35
LM
512 cache->saved_regs[regnum].set_value (ra_state_val);
513 }
17e116a7 514 }
07b287a0
MS
515 else
516 {
c6185dce
SM
517 aarch64_debug_printf ("prologue analysis gave up addr=%s"
518 " opcode=0x%x",
519 core_addr_to_string_nz (start), insn);
520
07b287a0
MS
521 break;
522 }
523 }
524
525 if (cache == NULL)
f7b7ed97 526 return start;
07b287a0
MS
527
528 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
529 {
530 /* Frame pointer is fp. Frame size is constant. */
531 cache->framereg = AARCH64_FP_REGNUM;
532 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
533 }
534 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
535 {
536 /* Try the stack pointer. */
537 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
538 cache->framereg = AARCH64_SP_REGNUM;
539 }
540 else
541 {
542 /* We're just out of luck. We don't know where the frame is. */
543 cache->framereg = -1;
544 cache->framesize = 0;
545 }
546
547 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
548 {
549 CORE_ADDR offset;
550
f7b7ed97 551 if (stack.find_reg (gdbarch, i, &offset))
098caef4 552 cache->saved_regs[i].set_addr (offset);
07b287a0
MS
553 }
554
187f5d00
YQ
555 for (i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
556 {
557 int regnum = gdbarch_num_regs (gdbarch);
558 CORE_ADDR offset;
559
f7b7ed97
TT
560 if (stack.find_reg (gdbarch, i + AARCH64_X_REGISTER_COUNT,
561 &offset))
098caef4 562 cache->saved_regs[i + regnum + AARCH64_D0_REGNUM].set_addr (offset);
187f5d00
YQ
563 }
564
07b287a0
MS
565 return start;
566}
567
4d9a9006
YQ
568static CORE_ADDR
569aarch64_analyze_prologue (struct gdbarch *gdbarch,
570 CORE_ADDR start, CORE_ADDR limit,
571 struct aarch64_prologue_cache *cache)
572{
573 instruction_reader reader;
574
575 return aarch64_analyze_prologue (gdbarch, start, limit, cache,
576 reader);
577}
578
579#if GDB_SELF_TEST
580
581namespace selftests {
582
583/* Instruction reader from manually cooked instruction sequences. */
584
585class instruction_reader_test : public abstract_instruction_reader
586{
587public:
588 template<size_t SIZE>
589 explicit instruction_reader_test (const uint32_t (&insns)[SIZE])
590 : m_insns (insns), m_insns_size (SIZE)
591 {}
592
593 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
632e107b 594 override
4d9a9006
YQ
595 {
596 SELF_CHECK (len == 4);
597 SELF_CHECK (memaddr % 4 == 0);
598 SELF_CHECK (memaddr / 4 < m_insns_size);
599
600 return m_insns[memaddr / 4];
601 }
602
603private:
604 const uint32_t *m_insns;
605 size_t m_insns_size;
606};
607
608static void
609aarch64_analyze_prologue_test (void)
610{
611 struct gdbarch_info info;
612
4d9a9006
YQ
613 info.bfd_arch_info = bfd_scan_arch ("aarch64");
614
615 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
616 SELF_CHECK (gdbarch != NULL);
617
17e116a7
AH
618 struct aarch64_prologue_cache cache;
619 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
620
08106042 621 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
17e116a7 622
4d9a9006
YQ
623 /* Test the simple prologue in which frame pointer is used. */
624 {
4d9a9006
YQ
625 static const uint32_t insns[] = {
626 0xa9af7bfd, /* stp x29, x30, [sp,#-272]! */
627 0x910003fd, /* mov x29, sp */
628 0x97ffffe6, /* bl 0x400580 */
629 };
630 instruction_reader_test reader (insns);
631
632 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
633 SELF_CHECK (end == 4 * 2);
634
635 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
636 SELF_CHECK (cache.framesize == 272);
637
638 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
639 {
640 if (i == AARCH64_FP_REGNUM)
098caef4 641 SELF_CHECK (cache.saved_regs[i].addr () == -272);
4d9a9006 642 else if (i == AARCH64_LR_REGNUM)
098caef4 643 SELF_CHECK (cache.saved_regs[i].addr () == -264);
4d9a9006 644 else
a9a87d35
LM
645 SELF_CHECK (cache.saved_regs[i].is_realreg ()
646 && cache.saved_regs[i].realreg () == i);
4d9a9006
YQ
647 }
648
649 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
650 {
a9a87d35
LM
651 int num_regs = gdbarch_num_regs (gdbarch);
652 int regnum = i + num_regs + AARCH64_D0_REGNUM;
4d9a9006 653
a9a87d35
LM
654 SELF_CHECK (cache.saved_regs[regnum].is_realreg ()
655 && cache.saved_regs[regnum].realreg () == regnum);
4d9a9006
YQ
656 }
657 }
432ec081
YQ
658
659 /* Test a prologue in which STR is used and frame pointer is not
660 used. */
661 {
432ec081
YQ
662 static const uint32_t insns[] = {
663 0xf81d0ff3, /* str x19, [sp, #-48]! */
664 0xb9002fe0, /* str w0, [sp, #44] */
665 0xf90013e1, /* str x1, [sp, #32]*/
666 0xfd000fe0, /* str d0, [sp, #24] */
667 0xaa0203f3, /* mov x19, x2 */
668 0xf94013e0, /* ldr x0, [sp, #32] */
669 };
670 instruction_reader_test reader (insns);
671
68811f8f 672 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
432ec081
YQ
673 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
674
675 SELF_CHECK (end == 4 * 5);
676
677 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
678 SELF_CHECK (cache.framesize == 48);
679
680 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
681 {
682 if (i == 1)
098caef4 683 SELF_CHECK (cache.saved_regs[i].addr () == -16);
432ec081 684 else if (i == 19)
098caef4 685 SELF_CHECK (cache.saved_regs[i].addr () == -48);
432ec081 686 else
a9a87d35
LM
687 SELF_CHECK (cache.saved_regs[i].is_realreg ()
688 && cache.saved_regs[i].realreg () == i);
432ec081
YQ
689 }
690
691 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
692 {
a9a87d35
LM
693 int num_regs = gdbarch_num_regs (gdbarch);
694 int regnum = i + num_regs + AARCH64_D0_REGNUM;
695
432ec081
YQ
696
697 if (i == 0)
a9a87d35 698 SELF_CHECK (cache.saved_regs[regnum].addr () == -24);
432ec081 699 else
a9a87d35
LM
700 SELF_CHECK (cache.saved_regs[regnum].is_realreg ()
701 && cache.saved_regs[regnum].realreg () == regnum);
432ec081
YQ
702 }
703 }
17e116a7 704
f8e3fe0d
LM
705 /* Test handling of movz before setting the frame pointer. */
706 {
707 static const uint32_t insns[] = {
708 0xa9bf7bfd, /* stp x29, x30, [sp, #-16]! */
709 0x52800020, /* mov w0, #0x1 */
710 0x910003fd, /* mov x29, sp */
711 0x528000a2, /* mov w2, #0x5 */
712 0x97fffff8, /* bl 6e4 */
713 };
714
715 instruction_reader_test reader (insns);
716
717 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
718 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
719
720 /* We should stop at the 4th instruction. */
721 SELF_CHECK (end == (4 - 1) * 4);
722 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
723 SELF_CHECK (cache.framesize == 16);
724 }
725
726 /* Test handling of movz/stp when using the stack pointer as frame
727 pointer. */
728 {
729 static const uint32_t insns[] = {
730 0xa9bc7bfd, /* stp x29, x30, [sp, #-64]! */
731 0x52800020, /* mov w0, #0x1 */
732 0x290207e0, /* stp w0, w1, [sp, #16] */
733 0xa9018fe2, /* stp x2, x3, [sp, #24] */
734 0x528000a2, /* mov w2, #0x5 */
735 0x97fffff8, /* bl 6e4 */
736 };
737
738 instruction_reader_test reader (insns);
739
740 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
741 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
742
743 /* We should stop at the 5th instruction. */
744 SELF_CHECK (end == (5 - 1) * 4);
745 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
746 SELF_CHECK (cache.framesize == 64);
747 }
748
749 /* Test handling of movz/str when using the stack pointer as frame
750 pointer */
751 {
752 static const uint32_t insns[] = {
753 0xa9bc7bfd, /* stp x29, x30, [sp, #-64]! */
754 0x52800020, /* mov w0, #0x1 */
755 0xb9002be4, /* str w4, [sp, #40] */
756 0xf9001be5, /* str x5, [sp, #48] */
757 0x528000a2, /* mov w2, #0x5 */
758 0x97fffff8, /* bl 6e4 */
759 };
760
761 instruction_reader_test reader (insns);
762
763 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
764 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
765
766 /* We should stop at the 5th instruction. */
767 SELF_CHECK (end == (5 - 1) * 4);
768 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
769 SELF_CHECK (cache.framesize == 64);
770 }
771
772 /* Test handling of movz/stur when using the stack pointer as frame
773 pointer. */
774 {
775 static const uint32_t insns[] = {
776 0xa9bc7bfd, /* stp x29, x30, [sp, #-64]! */
777 0x52800020, /* mov w0, #0x1 */
778 0xb80343e6, /* stur w6, [sp, #52] */
779 0xf80383e7, /* stur x7, [sp, #56] */
780 0x528000a2, /* mov w2, #0x5 */
781 0x97fffff8, /* bl 6e4 */
782 };
783
784 instruction_reader_test reader (insns);
785
786 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
787 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
788
789 /* We should stop at the 5th instruction. */
790 SELF_CHECK (end == (5 - 1) * 4);
791 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
792 SELF_CHECK (cache.framesize == 64);
793 }
794
795 /* Test handling of movz when there is no frame pointer set or no stack
796 pointer used. */
797 {
798 static const uint32_t insns[] = {
799 0xa9bf7bfd, /* stp x29, x30, [sp, #-16]! */
800 0x52800020, /* mov w0, #0x1 */
801 0x528000a2, /* mov w2, #0x5 */
802 0x97fffff8, /* bl 6e4 */
803 };
804
805 instruction_reader_test reader (insns);
806
807 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
808 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
809
810 /* We should stop at the 4th instruction. */
811 SELF_CHECK (end == (4 - 1) * 4);
812 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
813 SELF_CHECK (cache.framesize == 16);
814 }
815
17e116a7
AH
816 /* Test a prologue in which there is a return address signing instruction. */
817 if (tdep->has_pauth ())
818 {
819 static const uint32_t insns[] = {
820 0xd503233f, /* paciasp */
821 0xa9bd7bfd, /* stp x29, x30, [sp, #-48]! */
822 0x910003fd, /* mov x29, sp */
823 0xf801c3f3, /* str x19, [sp, #28] */
824 0xb9401fa0, /* ldr x19, [x29, #28] */
825 };
826 instruction_reader_test reader (insns);
827
68811f8f 828 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
17e116a7
AH
829 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache,
830 reader);
831
832 SELF_CHECK (end == 4 * 4);
833 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
834 SELF_CHECK (cache.framesize == 48);
835
836 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
837 {
838 if (i == 19)
098caef4 839 SELF_CHECK (cache.saved_regs[i].addr () == -20);
17e116a7 840 else if (i == AARCH64_FP_REGNUM)
098caef4 841 SELF_CHECK (cache.saved_regs[i].addr () == -48);
17e116a7 842 else if (i == AARCH64_LR_REGNUM)
098caef4 843 SELF_CHECK (cache.saved_regs[i].addr () == -40);
17e116a7 844 else
a9a87d35
LM
845 SELF_CHECK (cache.saved_regs[i].is_realreg ()
846 && cache.saved_regs[i].realreg () == i);
17e116a7
AH
847 }
848
849 if (tdep->has_pauth ())
850 {
c9cd8ca4 851 int regnum = tdep->ra_sign_state_regnum;
a9a87d35 852 SELF_CHECK (cache.saved_regs[regnum].is_value ());
17e116a7
AH
853 }
854 }
37989733
LM
855
856 /* Test a prologue with a BTI instruction. */
857 {
858 static const uint32_t insns[] = {
859 0xd503245f, /* bti */
860 0xa9bd7bfd, /* stp x29, x30, [sp, #-48]! */
861 0x910003fd, /* mov x29, sp */
862 0xf801c3f3, /* str x19, [sp, #28] */
863 0xb9401fa0, /* ldr x19, [x29, #28] */
864 };
865 instruction_reader_test reader (insns);
866
867 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
868 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache,
869 reader);
870
871 SELF_CHECK (end == 4 * 4);
872 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
873 SELF_CHECK (cache.framesize == 48);
874
875 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
876 {
877 if (i == 19)
878 SELF_CHECK (cache.saved_regs[i].addr () == -20);
879 else if (i == AARCH64_FP_REGNUM)
880 SELF_CHECK (cache.saved_regs[i].addr () == -48);
881 else if (i == AARCH64_LR_REGNUM)
882 SELF_CHECK (cache.saved_regs[i].addr () == -40);
883 else
884 SELF_CHECK (cache.saved_regs[i].is_realreg ()
885 && cache.saved_regs[i].realreg () == i);
886 }
887 }
4d9a9006
YQ
888}
889} // namespace selftests
890#endif /* GDB_SELF_TEST */
891
07b287a0
MS
892/* Implement the "skip_prologue" gdbarch method. */
893
894static CORE_ADDR
895aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
896{
07b287a0 897 CORE_ADDR func_addr, limit_pc;
07b287a0
MS
898
899 /* See if we can determine the end of the prologue via the symbol
900 table. If so, then return either PC, or the PC after the
901 prologue, whichever is greater. */
902 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
903 {
904 CORE_ADDR post_prologue_pc
905 = skip_prologue_using_sal (gdbarch, func_addr);
906
907 if (post_prologue_pc != 0)
325fac50 908 return std::max (pc, post_prologue_pc);
07b287a0
MS
909 }
910
911 /* Can't determine prologue from the symbol table, need to examine
912 instructions. */
913
914 /* Find an upper limit on the function prologue using the debug
915 information. If the debug information could not be used to
916 provide that bound, then use an arbitrary large number as the
917 upper bound. */
918 limit_pc = skip_prologue_using_sal (gdbarch, pc);
919 if (limit_pc == 0)
920 limit_pc = pc + 128; /* Magic. */
921
922 /* Try disassembling prologue. */
923 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
924}
925
926/* Scan the function prologue for THIS_FRAME and populate the prologue
927 cache CACHE. */
928
929static void
bd2b40ac 930aarch64_scan_prologue (frame_info_ptr this_frame,
07b287a0
MS
931 struct aarch64_prologue_cache *cache)
932{
933 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
934 CORE_ADDR prologue_start;
935 CORE_ADDR prologue_end;
936 CORE_ADDR prev_pc = get_frame_pc (this_frame);
937 struct gdbarch *gdbarch = get_frame_arch (this_frame);
938
db634143
PL
939 cache->prev_pc = prev_pc;
940
07b287a0
MS
941 /* Assume we do not find a frame. */
942 cache->framereg = -1;
943 cache->framesize = 0;
944
945 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
946 &prologue_end))
947 {
948 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
949
950 if (sal.line == 0)
951 {
952 /* No line info so use the current PC. */
953 prologue_end = prev_pc;
954 }
955 else if (sal.end < prologue_end)
956 {
957 /* The next line begins after the function end. */
958 prologue_end = sal.end;
959 }
960
325fac50 961 prologue_end = std::min (prologue_end, prev_pc);
07b287a0
MS
962 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
963 }
964 else
965 {
966 CORE_ADDR frame_loc;
07b287a0
MS
967
968 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
969 if (frame_loc == 0)
970 return;
971
972 cache->framereg = AARCH64_FP_REGNUM;
973 cache->framesize = 16;
098caef4
LM
974 cache->saved_regs[29].set_addr (0);
975 cache->saved_regs[30].set_addr (8);
07b287a0
MS
976 }
977}
978
7dfa3edc
PL
979/* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
980 function may throw an exception if the inferior's registers or memory is
981 not available. */
07b287a0 982
7dfa3edc 983static void
bd2b40ac 984aarch64_make_prologue_cache_1 (frame_info_ptr this_frame,
7dfa3edc 985 struct aarch64_prologue_cache *cache)
07b287a0 986{
07b287a0
MS
987 CORE_ADDR unwound_fp;
988 int reg;
989
07b287a0
MS
990 aarch64_scan_prologue (this_frame, cache);
991
992 if (cache->framereg == -1)
7dfa3edc 993 return;
07b287a0
MS
994
995 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
996 if (unwound_fp == 0)
7dfa3edc 997 return;
07b287a0
MS
998
999 cache->prev_sp = unwound_fp + cache->framesize;
1000
1001 /* Calculate actual addresses of saved registers using offsets
1002 determined by aarch64_analyze_prologue. */
1003 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
a9a87d35 1004 if (cache->saved_regs[reg].is_addr ())
098caef4
LM
1005 cache->saved_regs[reg].set_addr (cache->saved_regs[reg].addr ()
1006 + cache->prev_sp);
07b287a0 1007
db634143
PL
1008 cache->func = get_frame_func (this_frame);
1009
7dfa3edc
PL
1010 cache->available_p = 1;
1011}
1012
1013/* Allocate and fill in *THIS_CACHE with information about the prologue of
1014 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
1015 Return a pointer to the current aarch64_prologue_cache in
1016 *THIS_CACHE. */
1017
1018static struct aarch64_prologue_cache *
bd2b40ac 1019aarch64_make_prologue_cache (frame_info_ptr this_frame, void **this_cache)
7dfa3edc
PL
1020{
1021 struct aarch64_prologue_cache *cache;
1022
1023 if (*this_cache != NULL)
9a3c8263 1024 return (struct aarch64_prologue_cache *) *this_cache;
7dfa3edc
PL
1025
1026 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
1027 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
1028 *this_cache = cache;
1029
a70b8144 1030 try
7dfa3edc
PL
1031 {
1032 aarch64_make_prologue_cache_1 (this_frame, cache);
1033 }
230d2906 1034 catch (const gdb_exception_error &ex)
7dfa3edc
PL
1035 {
1036 if (ex.error != NOT_AVAILABLE_ERROR)
eedc3f4f 1037 throw;
7dfa3edc 1038 }
7dfa3edc 1039
07b287a0
MS
1040 return cache;
1041}
1042
7dfa3edc
PL
1043/* Implement the "stop_reason" frame_unwind method. */
1044
1045static enum unwind_stop_reason
bd2b40ac 1046aarch64_prologue_frame_unwind_stop_reason (frame_info_ptr this_frame,
7dfa3edc
PL
1047 void **this_cache)
1048{
1049 struct aarch64_prologue_cache *cache
1050 = aarch64_make_prologue_cache (this_frame, this_cache);
1051
1052 if (!cache->available_p)
1053 return UNWIND_UNAVAILABLE;
1054
1055 /* Halt the backtrace at "_start". */
345bd07c 1056 gdbarch *arch = get_frame_arch (this_frame);
08106042 1057 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (arch);
345bd07c 1058 if (cache->prev_pc <= tdep->lowest_pc)
7dfa3edc
PL
1059 return UNWIND_OUTERMOST;
1060
1061 /* We've hit a wall, stop. */
1062 if (cache->prev_sp == 0)
1063 return UNWIND_OUTERMOST;
1064
1065 return UNWIND_NO_REASON;
1066}
1067
07b287a0
MS
1068/* Our frame ID for a normal frame is the current function's starting
1069 PC and the caller's SP when we were called. */
1070
1071static void
bd2b40ac 1072aarch64_prologue_this_id (frame_info_ptr this_frame,
07b287a0
MS
1073 void **this_cache, struct frame_id *this_id)
1074{
7c8edfae
PL
1075 struct aarch64_prologue_cache *cache
1076 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0 1077
7dfa3edc
PL
1078 if (!cache->available_p)
1079 *this_id = frame_id_build_unavailable_stack (cache->func);
1080 else
1081 *this_id = frame_id_build (cache->prev_sp, cache->func);
07b287a0
MS
1082}
1083
1084/* Implement the "prev_register" frame_unwind method. */
1085
1086static struct value *
bd2b40ac 1087aarch64_prologue_prev_register (frame_info_ptr this_frame,
07b287a0
MS
1088 void **this_cache, int prev_regnum)
1089{
7c8edfae
PL
1090 struct aarch64_prologue_cache *cache
1091 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0
MS
1092
1093 /* If we are asked to unwind the PC, then we need to return the LR
1094 instead. The prologue may save PC, but it will point into this
1095 frame's prologue, not the next frame's resume location. */
1096 if (prev_regnum == AARCH64_PC_REGNUM)
1097 {
1098 CORE_ADDR lr;
17e116a7 1099 struct gdbarch *gdbarch = get_frame_arch (this_frame);
345bd07c 1100 aarch64_gdbarch_tdep *tdep
08106042 1101 = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
07b287a0
MS
1102
1103 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
17e116a7
AH
1104
1105 if (tdep->has_pauth ()
c9cd8ca4 1106 && cache->saved_regs[tdep->ra_sign_state_regnum].is_value ())
3d31bc39 1107 lr = aarch64_frame_unmask_lr (tdep, this_frame, lr);
17e116a7 1108
07b287a0
MS
1109 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
1110 }
1111
1112 /* SP is generally not saved to the stack, but this frame is
1113 identified by the next frame's stack pointer at the time of the
1114 call. The value was already reconstructed into PREV_SP. */
1115 /*
dda83cd7
SM
1116 +----------+ ^
1117 | saved lr | |
07b287a0
MS
1118 +->| saved fp |--+
1119 | | |
1120 | | | <- Previous SP
1121 | +----------+
1122 | | saved lr |
1123 +--| saved fp |<- FP
dda83cd7
SM
1124 | |
1125 | |<- SP
1126 +----------+ */
07b287a0
MS
1127 if (prev_regnum == AARCH64_SP_REGNUM)
1128 return frame_unwind_got_constant (this_frame, prev_regnum,
1129 cache->prev_sp);
1130
1131 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
1132 prev_regnum);
1133}
1134
1135/* AArch64 prologue unwinder. */
6bd434d6 1136static frame_unwind aarch64_prologue_unwind =
07b287a0 1137{
a154d838 1138 "aarch64 prologue",
07b287a0 1139 NORMAL_FRAME,
7dfa3edc 1140 aarch64_prologue_frame_unwind_stop_reason,
07b287a0
MS
1141 aarch64_prologue_this_id,
1142 aarch64_prologue_prev_register,
1143 NULL,
1144 default_frame_sniffer
1145};
1146
8b61f75d
PL
1147/* Allocate and fill in *THIS_CACHE with information about the prologue of
1148 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
1149 Return a pointer to the current aarch64_prologue_cache in
1150 *THIS_CACHE. */
07b287a0
MS
1151
1152static struct aarch64_prologue_cache *
bd2b40ac 1153aarch64_make_stub_cache (frame_info_ptr this_frame, void **this_cache)
07b287a0 1154{
07b287a0 1155 struct aarch64_prologue_cache *cache;
8b61f75d
PL
1156
1157 if (*this_cache != NULL)
9a3c8263 1158 return (struct aarch64_prologue_cache *) *this_cache;
07b287a0
MS
1159
1160 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
1161 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
8b61f75d 1162 *this_cache = cache;
07b287a0 1163
a70b8144 1164 try
02a2a705
PL
1165 {
1166 cache->prev_sp = get_frame_register_unsigned (this_frame,
1167 AARCH64_SP_REGNUM);
1168 cache->prev_pc = get_frame_pc (this_frame);
1169 cache->available_p = 1;
1170 }
230d2906 1171 catch (const gdb_exception_error &ex)
02a2a705
PL
1172 {
1173 if (ex.error != NOT_AVAILABLE_ERROR)
eedc3f4f 1174 throw;
02a2a705 1175 }
07b287a0
MS
1176
1177 return cache;
1178}
1179
02a2a705
PL
1180/* Implement the "stop_reason" frame_unwind method. */
1181
1182static enum unwind_stop_reason
bd2b40ac 1183aarch64_stub_frame_unwind_stop_reason (frame_info_ptr this_frame,
02a2a705
PL
1184 void **this_cache)
1185{
1186 struct aarch64_prologue_cache *cache
1187 = aarch64_make_stub_cache (this_frame, this_cache);
1188
1189 if (!cache->available_p)
1190 return UNWIND_UNAVAILABLE;
1191
1192 return UNWIND_NO_REASON;
1193}
1194
07b287a0
MS
1195/* Our frame ID for a stub frame is the current SP and LR. */
1196
1197static void
bd2b40ac 1198aarch64_stub_this_id (frame_info_ptr this_frame,
07b287a0
MS
1199 void **this_cache, struct frame_id *this_id)
1200{
8b61f75d
PL
1201 struct aarch64_prologue_cache *cache
1202 = aarch64_make_stub_cache (this_frame, this_cache);
07b287a0 1203
02a2a705
PL
1204 if (cache->available_p)
1205 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
1206 else
1207 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
07b287a0
MS
1208}
1209
1210/* Implement the "sniffer" frame_unwind method. */
1211
1212static int
1213aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
bd2b40ac 1214 frame_info_ptr this_frame,
07b287a0
MS
1215 void **this_prologue_cache)
1216{
1217 CORE_ADDR addr_in_block;
1218 gdb_byte dummy[4];
1219
1220 addr_in_block = get_frame_address_in_block (this_frame);
3e5d3a5a 1221 if (in_plt_section (addr_in_block)
07b287a0
MS
1222 /* We also use the stub winder if the target memory is unreadable
1223 to avoid having the prologue unwinder trying to read it. */
1224 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
1225 return 1;
1226
1227 return 0;
1228}
1229
1230/* AArch64 stub unwinder. */
6bd434d6 1231static frame_unwind aarch64_stub_unwind =
07b287a0 1232{
a154d838 1233 "aarch64 stub",
07b287a0 1234 NORMAL_FRAME,
02a2a705 1235 aarch64_stub_frame_unwind_stop_reason,
07b287a0
MS
1236 aarch64_stub_this_id,
1237 aarch64_prologue_prev_register,
1238 NULL,
1239 aarch64_stub_unwind_sniffer
1240};
1241
1242/* Return the frame base address of *THIS_FRAME. */
1243
1244static CORE_ADDR
bd2b40ac 1245aarch64_normal_frame_base (frame_info_ptr this_frame, void **this_cache)
07b287a0 1246{
7c8edfae
PL
1247 struct aarch64_prologue_cache *cache
1248 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0
MS
1249
1250 return cache->prev_sp - cache->framesize;
1251}
1252
1253/* AArch64 default frame base information. */
6bd434d6 1254static frame_base aarch64_normal_base =
07b287a0
MS
1255{
1256 &aarch64_prologue_unwind,
1257 aarch64_normal_frame_base,
1258 aarch64_normal_frame_base,
1259 aarch64_normal_frame_base
1260};
1261
07b287a0
MS
1262/* Return the value of the REGNUM register in the previous frame of
1263 *THIS_FRAME. */
1264
1265static struct value *
bd2b40ac 1266aarch64_dwarf2_prev_register (frame_info_ptr this_frame,
07b287a0
MS
1267 void **this_cache, int regnum)
1268{
345bd07c 1269 gdbarch *arch = get_frame_arch (this_frame);
08106042 1270 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (arch);
07b287a0
MS
1271 CORE_ADDR lr;
1272
1273 switch (regnum)
1274 {
1275 case AARCH64_PC_REGNUM:
1276 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
3d31bc39 1277 lr = aarch64_frame_unmask_lr (tdep, this_frame, lr);
07b287a0
MS
1278 return frame_unwind_got_constant (this_frame, regnum, lr);
1279
1280 default:
f34652de 1281 internal_error (_("Unexpected register %d"), regnum);
07b287a0
MS
1282 }
1283}
1284
11e1b75f
AH
1285static const unsigned char op_lit0 = DW_OP_lit0;
1286static const unsigned char op_lit1 = DW_OP_lit1;
1287
07b287a0
MS
1288/* Implement the "init_reg" dwarf2_frame_ops method. */
1289
1290static void
1291aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1292 struct dwarf2_frame_state_reg *reg,
bd2b40ac 1293 frame_info_ptr this_frame)
07b287a0 1294{
08106042 1295 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
11e1b75f 1296
07b287a0
MS
1297 switch (regnum)
1298 {
1299 case AARCH64_PC_REGNUM:
1300 reg->how = DWARF2_FRAME_REG_FN;
1301 reg->loc.fn = aarch64_dwarf2_prev_register;
11e1b75f
AH
1302 return;
1303
07b287a0
MS
1304 case AARCH64_SP_REGNUM:
1305 reg->how = DWARF2_FRAME_REG_CFA;
11e1b75f
AH
1306 return;
1307 }
1308
1309 /* Init pauth registers. */
1310 if (tdep->has_pauth ())
1311 {
c9cd8ca4 1312 if (regnum == tdep->ra_sign_state_regnum)
11e1b75f
AH
1313 {
1314 /* Initialize RA_STATE to zero. */
1315 reg->how = DWARF2_FRAME_REG_SAVED_VAL_EXP;
1316 reg->loc.exp.start = &op_lit0;
1317 reg->loc.exp.len = 1;
1318 return;
1319 }
1320 else if (regnum == AARCH64_PAUTH_DMASK_REGNUM (tdep->pauth_reg_base)
1321 || regnum == AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base))
1322 {
1323 reg->how = DWARF2_FRAME_REG_SAME_VALUE;
1324 return;
1325 }
07b287a0
MS
1326 }
1327}
1328
11e1b75f
AH
1329/* Implement the execute_dwarf_cfa_vendor_op method. */
1330
1331static bool
1332aarch64_execute_dwarf_cfa_vendor_op (struct gdbarch *gdbarch, gdb_byte op,
1333 struct dwarf2_frame_state *fs)
1334{
08106042 1335 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
11e1b75f
AH
1336 struct dwarf2_frame_state_reg *ra_state;
1337
8fca4da0 1338 if (op == DW_CFA_AARCH64_negate_ra_state)
11e1b75f 1339 {
8fca4da0
AH
1340 /* On systems without pauth, treat as a nop. */
1341 if (!tdep->has_pauth ())
1342 return true;
1343
11e1b75f 1344 /* Allocate RA_STATE column if it's not allocated yet. */
c9cd8ca4 1345 fs->regs.alloc_regs (AARCH64_DWARF_RA_SIGN_STATE + 1);
11e1b75f
AH
1346
1347 /* Toggle the status of RA_STATE between 0 and 1. */
c9cd8ca4 1348 ra_state = &(fs->regs.reg[AARCH64_DWARF_RA_SIGN_STATE]);
11e1b75f
AH
1349 ra_state->how = DWARF2_FRAME_REG_SAVED_VAL_EXP;
1350
1351 if (ra_state->loc.exp.start == nullptr
1352 || ra_state->loc.exp.start == &op_lit0)
1353 ra_state->loc.exp.start = &op_lit1;
1354 else
1355 ra_state->loc.exp.start = &op_lit0;
1356
1357 ra_state->loc.exp.len = 1;
1358
1359 return true;
1360 }
1361
1362 return false;
1363}
1364
5133a315
LM
1365/* Used for matching BRK instructions for AArch64. */
1366static constexpr uint32_t BRK_INSN_MASK = 0xffe0001f;
1367static constexpr uint32_t BRK_INSN_BASE = 0xd4200000;
1368
1369/* Implementation of gdbarch_program_breakpoint_here_p for aarch64. */
1370
1371static bool
1372aarch64_program_breakpoint_here_p (gdbarch *gdbarch, CORE_ADDR address)
1373{
1374 const uint32_t insn_len = 4;
1375 gdb_byte target_mem[4];
1376
1377 /* Enable the automatic memory restoration from breakpoints while
1378 we read the memory. Otherwise we may find temporary breakpoints, ones
1379 inserted by GDB, and flag them as permanent breakpoints. */
1380 scoped_restore restore_memory
1381 = make_scoped_restore_show_memory_breakpoints (0);
1382
1383 if (target_read_memory (address, target_mem, insn_len) == 0)
1384 {
1385 uint32_t insn =
1386 (uint32_t) extract_unsigned_integer (target_mem, insn_len,
1387 gdbarch_byte_order_for_code (gdbarch));
1388
1389 /* Check if INSN is a BRK instruction pattern. There are multiple choices
1390 of such instructions with different immediate values. Different OS'
1391 may use a different variation, but they have the same outcome. */
1392 return ((insn & BRK_INSN_MASK) == BRK_INSN_BASE);
1393 }
1394
1395 return false;
1396}
1397
07b287a0
MS
1398/* When arguments must be pushed onto the stack, they go on in reverse
1399 order. The code below implements a FILO (stack) to do this. */
1400
89055eaa 1401struct stack_item_t
07b287a0 1402{
c3c87445
YQ
1403 /* Value to pass on stack. It can be NULL if this item is for stack
1404 padding. */
7c543f7b 1405 const gdb_byte *data;
07b287a0
MS
1406
1407 /* Size in bytes of value to pass on stack. */
1408 int len;
89055eaa 1409};
07b287a0 1410
b907456c
AB
1411/* Implement the gdbarch type alignment method, overrides the generic
1412 alignment algorithm for anything that is aarch64 specific. */
07b287a0 1413
b907456c
AB
1414static ULONGEST
1415aarch64_type_align (gdbarch *gdbarch, struct type *t)
07b287a0 1416{
07b287a0 1417 t = check_typedef (t);
bd63c870 1418 if (t->code () == TYPE_CODE_ARRAY && t->is_vector ())
07b287a0 1419 {
b907456c
AB
1420 /* Use the natural alignment for vector types (the same for
1421 scalar type), but the maximum alignment is 128-bit. */
df86565b 1422 if (t->length () > 16)
b907456c 1423 return 16;
238f2452 1424 else
df86565b 1425 return t->length ();
07b287a0 1426 }
b907456c
AB
1427
1428 /* Allow the common code to calculate the alignment. */
1429 return 0;
07b287a0
MS
1430}
1431
ea92689a
AH
1432/* Worker function for aapcs_is_vfp_call_or_return_candidate.
1433
1434 Return the number of register required, or -1 on failure.
1435
1436 When encountering a base element, if FUNDAMENTAL_TYPE is not set then set it
1437 to the element, else fail if the type of this element does not match the
1438 existing value. */
1439
1440static int
1441aapcs_is_vfp_call_or_return_candidate_1 (struct type *type,
1442 struct type **fundamental_type)
1443{
1444 if (type == nullptr)
1445 return -1;
1446
78134374 1447 switch (type->code ())
ea92689a
AH
1448 {
1449 case TYPE_CODE_FLT:
81657e58 1450 case TYPE_CODE_DECFLOAT:
df86565b 1451 if (type->length () > 16)
ea92689a
AH
1452 return -1;
1453
1454 if (*fundamental_type == nullptr)
1455 *fundamental_type = type;
df86565b 1456 else if (type->length () != (*fundamental_type)->length ()
78134374 1457 || type->code () != (*fundamental_type)->code ())
ea92689a
AH
1458 return -1;
1459
1460 return 1;
1461
1462 case TYPE_CODE_COMPLEX:
1463 {
27710edb 1464 struct type *target_type = check_typedef (type->target_type ());
df86565b 1465 if (target_type->length () > 16)
ea92689a
AH
1466 return -1;
1467
1468 if (*fundamental_type == nullptr)
1469 *fundamental_type = target_type;
df86565b 1470 else if (target_type->length () != (*fundamental_type)->length ()
78134374 1471 || target_type->code () != (*fundamental_type)->code ())
ea92689a
AH
1472 return -1;
1473
1474 return 2;
1475 }
1476
1477 case TYPE_CODE_ARRAY:
1478 {
bd63c870 1479 if (type->is_vector ())
ea92689a 1480 {
df86565b 1481 if (type->length () != 8 && type->length () != 16)
ea92689a
AH
1482 return -1;
1483
1484 if (*fundamental_type == nullptr)
1485 *fundamental_type = type;
df86565b 1486 else if (type->length () != (*fundamental_type)->length ()
78134374 1487 || type->code () != (*fundamental_type)->code ())
ea92689a
AH
1488 return -1;
1489
1490 return 1;
1491 }
1492 else
1493 {
27710edb 1494 struct type *target_type = type->target_type ();
ea92689a
AH
1495 int count = aapcs_is_vfp_call_or_return_candidate_1
1496 (target_type, fundamental_type);
1497
1498 if (count == -1)
1499 return count;
1500
df86565b 1501 count *= (type->length () / target_type->length ());
ea92689a
AH
1502 return count;
1503 }
1504 }
1505
1506 case TYPE_CODE_STRUCT:
1507 case TYPE_CODE_UNION:
1508 {
1509 int count = 0;
1510
1f704f76 1511 for (int i = 0; i < type->num_fields (); i++)
ea92689a 1512 {
353229bf 1513 /* Ignore any static fields. */
ceacbf6e 1514 if (field_is_static (&type->field (i)))
353229bf
AH
1515 continue;
1516
940da03e 1517 struct type *member = check_typedef (type->field (i).type ());
ea92689a
AH
1518
1519 int sub_count = aapcs_is_vfp_call_or_return_candidate_1
1520 (member, fundamental_type);
1521 if (sub_count == -1)
1522 return -1;
1523 count += sub_count;
1524 }
73021deb
AH
1525
1526 /* Ensure there is no padding between the fields (allowing for empty
1527 zero length structs) */
1528 int ftype_length = (*fundamental_type == nullptr)
df86565b
SM
1529 ? 0 : (*fundamental_type)->length ();
1530 if (count * ftype_length != type->length ())
73021deb
AH
1531 return -1;
1532
ea92689a
AH
1533 return count;
1534 }
1535
1536 default:
1537 break;
1538 }
1539
1540 return -1;
1541}
1542
1543/* Return true if an argument, whose type is described by TYPE, can be passed or
1544 returned in simd/fp registers, providing enough parameter passing registers
1545 are available. This is as described in the AAPCS64.
1546
1547 Upon successful return, *COUNT returns the number of needed registers,
1548 *FUNDAMENTAL_TYPE contains the type of those registers.
1549
1550 Candidate as per the AAPCS64 5.4.2.C is either a:
1551 - float.
1552 - short-vector.
1553 - HFA (Homogeneous Floating-point Aggregate, 4.3.5.1). A Composite type where
1554 all the members are floats and has at most 4 members.
1555 - HVA (Homogeneous Short-vector Aggregate, 4.3.5.2). A Composite type where
1556 all the members are short vectors and has at most 4 members.
1557 - Complex (7.1.1)
1558
1559 Note that HFAs and HVAs can include nested structures and arrays. */
1560
0e745c60 1561static bool
ea92689a
AH
1562aapcs_is_vfp_call_or_return_candidate (struct type *type, int *count,
1563 struct type **fundamental_type)
1564{
1565 if (type == nullptr)
1566 return false;
1567
1568 *fundamental_type = nullptr;
1569
1570 int ag_count = aapcs_is_vfp_call_or_return_candidate_1 (type,
1571 fundamental_type);
1572
1573 if (ag_count > 0 && ag_count <= HA_MAX_NUM_FLDS)
1574 {
1575 *count = ag_count;
1576 return true;
1577 }
1578 else
1579 return false;
1580}
1581
07b287a0
MS
1582/* AArch64 function call information structure. */
1583struct aarch64_call_info
1584{
1585 /* the current argument number. */
89055eaa 1586 unsigned argnum = 0;
07b287a0
MS
1587
1588 /* The next general purpose register number, equivalent to NGRN as
1589 described in the AArch64 Procedure Call Standard. */
89055eaa 1590 unsigned ngrn = 0;
07b287a0
MS
1591
1592 /* The next SIMD and floating point register number, equivalent to
1593 NSRN as described in the AArch64 Procedure Call Standard. */
89055eaa 1594 unsigned nsrn = 0;
07b287a0
MS
1595
1596 /* The next stacked argument address, equivalent to NSAA as
1597 described in the AArch64 Procedure Call Standard. */
89055eaa 1598 unsigned nsaa = 0;
07b287a0
MS
1599
1600 /* Stack item vector. */
89055eaa 1601 std::vector<stack_item_t> si;
07b287a0
MS
1602};
1603
1604/* Pass a value in a sequence of consecutive X registers. The caller
30baf67b 1605 is responsible for ensuring sufficient registers are available. */
07b287a0
MS
1606
1607static void
1608pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1609 struct aarch64_call_info *info, struct type *type,
8e80f9d1 1610 struct value *arg)
07b287a0
MS
1611{
1612 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
df86565b 1613 int len = type->length ();
78134374 1614 enum type_code typecode = type->code ();
07b287a0 1615 int regnum = AARCH64_X0_REGNUM + info->ngrn;
50888e42 1616 const bfd_byte *buf = value_contents (arg).data ();
07b287a0
MS
1617
1618 info->argnum++;
1619
1620 while (len > 0)
1621 {
1622 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1623 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1624 byte_order);
1625
1626
1627 /* Adjust sub-word struct/union args when big-endian. */
1628 if (byte_order == BFD_ENDIAN_BIG
1629 && partial_len < X_REGISTER_SIZE
1630 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1631 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1632
c6185dce
SM
1633 aarch64_debug_printf ("arg %d in %s = 0x%s", info->argnum,
1634 gdbarch_register_name (gdbarch, regnum),
1635 phex (regval, X_REGISTER_SIZE));
1636
07b287a0
MS
1637 regcache_cooked_write_unsigned (regcache, regnum, regval);
1638 len -= partial_len;
1639 buf += partial_len;
1640 regnum++;
1641 }
1642}
1643
1644/* Attempt to marshall a value in a V register. Return 1 if
1645 successful, or 0 if insufficient registers are available. This
1646 function, unlike the equivalent pass_in_x() function does not
1647 handle arguments spread across multiple registers. */
1648
1649static int
1650pass_in_v (struct gdbarch *gdbarch,
1651 struct regcache *regcache,
1652 struct aarch64_call_info *info,
0735fddd 1653 int len, const bfd_byte *buf)
07b287a0
MS
1654{
1655 if (info->nsrn < 8)
1656 {
07b287a0 1657 int regnum = AARCH64_V0_REGNUM + info->nsrn;
3ff2c72e
AH
1658 /* Enough space for a full vector register. */
1659 gdb_byte reg[register_size (gdbarch, regnum)];
1660 gdb_assert (len <= sizeof (reg));
07b287a0
MS
1661
1662 info->argnum++;
1663 info->nsrn++;
1664
0735fddd
YQ
1665 memset (reg, 0, sizeof (reg));
1666 /* PCS C.1, the argument is allocated to the least significant
1667 bits of V register. */
1668 memcpy (reg, buf, len);
b66f5587 1669 regcache->cooked_write (regnum, reg);
0735fddd 1670
c6185dce
SM
1671 aarch64_debug_printf ("arg %d in %s", info->argnum,
1672 gdbarch_register_name (gdbarch, regnum));
1673
07b287a0
MS
1674 return 1;
1675 }
1676 info->nsrn = 8;
1677 return 0;
1678}
1679
1680/* Marshall an argument onto the stack. */
1681
1682static void
1683pass_on_stack (struct aarch64_call_info *info, struct type *type,
8e80f9d1 1684 struct value *arg)
07b287a0 1685{
50888e42 1686 const bfd_byte *buf = value_contents (arg).data ();
df86565b 1687 int len = type->length ();
07b287a0
MS
1688 int align;
1689 stack_item_t item;
1690
1691 info->argnum++;
1692
b907456c 1693 align = type_align (type);
07b287a0
MS
1694
1695 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1696 Natural alignment of the argument's type. */
1697 align = align_up (align, 8);
1698
1699 /* The AArch64 PCS requires at most doubleword alignment. */
1700 if (align > 16)
1701 align = 16;
1702
c6185dce
SM
1703 aarch64_debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1704 info->nsaa);
07b287a0
MS
1705
1706 item.len = len;
1707 item.data = buf;
89055eaa 1708 info->si.push_back (item);
07b287a0
MS
1709
1710 info->nsaa += len;
1711 if (info->nsaa & (align - 1))
1712 {
1713 /* Push stack alignment padding. */
1714 int pad = align - (info->nsaa & (align - 1));
1715
1716 item.len = pad;
c3c87445 1717 item.data = NULL;
07b287a0 1718
89055eaa 1719 info->si.push_back (item);
07b287a0
MS
1720 info->nsaa += pad;
1721 }
1722}
1723
1724/* Marshall an argument into a sequence of one or more consecutive X
1725 registers or, if insufficient X registers are available then onto
1726 the stack. */
1727
1728static void
1729pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1730 struct aarch64_call_info *info, struct type *type,
8e80f9d1 1731 struct value *arg)
07b287a0 1732{
df86565b 1733 int len = type->length ();
07b287a0
MS
1734 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1735
1736 /* PCS C.13 - Pass in registers if we have enough spare */
1737 if (info->ngrn + nregs <= 8)
1738 {
8e80f9d1 1739 pass_in_x (gdbarch, regcache, info, type, arg);
07b287a0
MS
1740 info->ngrn += nregs;
1741 }
1742 else
1743 {
1744 info->ngrn = 8;
8e80f9d1 1745 pass_on_stack (info, type, arg);
07b287a0
MS
1746 }
1747}
1748
0e745c60
AH
1749/* Pass a value, which is of type arg_type, in a V register. Assumes value is a
1750 aapcs_is_vfp_call_or_return_candidate and there are enough spare V
1751 registers. A return value of false is an error state as the value will have
1752 been partially passed to the stack. */
1753static bool
1754pass_in_v_vfp_candidate (struct gdbarch *gdbarch, struct regcache *regcache,
1755 struct aarch64_call_info *info, struct type *arg_type,
1756 struct value *arg)
07b287a0 1757{
78134374 1758 switch (arg_type->code ())
0e745c60
AH
1759 {
1760 case TYPE_CODE_FLT:
81657e58 1761 case TYPE_CODE_DECFLOAT:
df86565b 1762 return pass_in_v (gdbarch, regcache, info, arg_type->length (),
50888e42 1763 value_contents (arg).data ());
0e745c60
AH
1764 break;
1765
1766 case TYPE_CODE_COMPLEX:
1767 {
50888e42 1768 const bfd_byte *buf = value_contents (arg).data ();
27710edb 1769 struct type *target_type = check_typedef (arg_type->target_type ());
0e745c60 1770
df86565b 1771 if (!pass_in_v (gdbarch, regcache, info, target_type->length (),
0e745c60
AH
1772 buf))
1773 return false;
1774
df86565b
SM
1775 return pass_in_v (gdbarch, regcache, info, target_type->length (),
1776 buf + target_type->length ());
0e745c60
AH
1777 }
1778
1779 case TYPE_CODE_ARRAY:
bd63c870 1780 if (arg_type->is_vector ())
df86565b 1781 return pass_in_v (gdbarch, regcache, info, arg_type->length (),
50888e42 1782 value_contents (arg).data ());
0e745c60
AH
1783 /* fall through. */
1784
1785 case TYPE_CODE_STRUCT:
1786 case TYPE_CODE_UNION:
1f704f76 1787 for (int i = 0; i < arg_type->num_fields (); i++)
0e745c60 1788 {
353229bf 1789 /* Don't include static fields. */
ceacbf6e 1790 if (field_is_static (&arg_type->field (i)))
353229bf
AH
1791 continue;
1792
0e745c60
AH
1793 struct value *field = value_primitive_field (arg, 0, i, arg_type);
1794 struct type *field_type = check_typedef (value_type (field));
1795
1796 if (!pass_in_v_vfp_candidate (gdbarch, regcache, info, field_type,
1797 field))
1798 return false;
1799 }
1800 return true;
1801
1802 default:
1803 return false;
1804 }
07b287a0
MS
1805}
1806
1807/* Implement the "push_dummy_call" gdbarch method. */
1808
1809static CORE_ADDR
1810aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1811 struct regcache *regcache, CORE_ADDR bp_addr,
1812 int nargs,
cf84fa6b
AH
1813 struct value **args, CORE_ADDR sp,
1814 function_call_return_method return_method,
07b287a0
MS
1815 CORE_ADDR struct_addr)
1816{
07b287a0 1817 int argnum;
07b287a0 1818 struct aarch64_call_info info;
07b287a0 1819
07b287a0
MS
1820 /* We need to know what the type of the called function is in order
1821 to determine the number of named/anonymous arguments for the
1822 actual argument placement, and the return type in order to handle
1823 return value correctly.
1824
1825 The generic code above us views the decision of return in memory
1826 or return in registers as a two stage processes. The language
1827 handler is consulted first and may decide to return in memory (eg
1828 class with copy constructor returned by value), this will cause
1829 the generic code to allocate space AND insert an initial leading
1830 argument.
1831
1832 If the language code does not decide to pass in memory then the
1833 target code is consulted.
1834
1835 If the language code decides to pass in memory we want to move
1836 the pointer inserted as the initial argument from the argument
1837 list and into X8, the conventional AArch64 struct return pointer
38a72da0 1838 register. */
07b287a0
MS
1839
1840 /* Set the return address. For the AArch64, the return breakpoint
1841 is always at BP_ADDR. */
1842 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1843
38a72da0
AH
1844 /* If we were given an initial argument for the return slot, lose it. */
1845 if (return_method == return_method_hidden_param)
07b287a0
MS
1846 {
1847 args++;
1848 nargs--;
1849 }
1850
1851 /* The struct_return pointer occupies X8. */
38a72da0 1852 if (return_method != return_method_normal)
07b287a0 1853 {
c6185dce
SM
1854 aarch64_debug_printf ("struct return in %s = 0x%s",
1855 gdbarch_register_name
1856 (gdbarch, AARCH64_STRUCT_RETURN_REGNUM),
1857 paddress (gdbarch, struct_addr));
1858
07b287a0
MS
1859 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1860 struct_addr);
1861 }
1862
1863 for (argnum = 0; argnum < nargs; argnum++)
1864 {
1865 struct value *arg = args[argnum];
0e745c60
AH
1866 struct type *arg_type, *fundamental_type;
1867 int len, elements;
07b287a0
MS
1868
1869 arg_type = check_typedef (value_type (arg));
df86565b 1870 len = arg_type->length ();
07b287a0 1871
0e745c60
AH
1872 /* If arg can be passed in v registers as per the AAPCS64, then do so if
1873 if there are enough spare registers. */
1874 if (aapcs_is_vfp_call_or_return_candidate (arg_type, &elements,
1875 &fundamental_type))
1876 {
1877 if (info.nsrn + elements <= 8)
1878 {
1879 /* We know that we have sufficient registers available therefore
1880 this will never need to fallback to the stack. */
1881 if (!pass_in_v_vfp_candidate (gdbarch, regcache, &info, arg_type,
1882 arg))
1883 gdb_assert_not_reached ("Failed to push args");
1884 }
1885 else
1886 {
1887 info.nsrn = 8;
1888 pass_on_stack (&info, arg_type, arg);
1889 }
1890 continue;
1891 }
1892
78134374 1893 switch (arg_type->code ())
07b287a0
MS
1894 {
1895 case TYPE_CODE_INT:
1896 case TYPE_CODE_BOOL:
1897 case TYPE_CODE_CHAR:
1898 case TYPE_CODE_RANGE:
1899 case TYPE_CODE_ENUM:
28397ae7 1900 if (len < 4 && !is_fixed_point_type (arg_type))
07b287a0
MS
1901 {
1902 /* Promote to 32 bit integer. */
c6d940a9 1903 if (arg_type->is_unsigned ())
07b287a0
MS
1904 arg_type = builtin_type (gdbarch)->builtin_uint32;
1905 else
1906 arg_type = builtin_type (gdbarch)->builtin_int32;
1907 arg = value_cast (arg_type, arg);
1908 }
8e80f9d1 1909 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1910 break;
1911
07b287a0
MS
1912 case TYPE_CODE_STRUCT:
1913 case TYPE_CODE_ARRAY:
1914 case TYPE_CODE_UNION:
0e745c60 1915 if (len > 16)
07b287a0
MS
1916 {
1917 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1918 invisible reference. */
1919
1920 /* Allocate aligned storage. */
1921 sp = align_down (sp - len, 16);
1922
1923 /* Write the real data into the stack. */
50888e42 1924 write_memory (sp, value_contents (arg).data (), len);
07b287a0
MS
1925
1926 /* Construct the indirection. */
1927 arg_type = lookup_pointer_type (arg_type);
1928 arg = value_from_pointer (arg_type, sp);
8e80f9d1 1929 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1930 }
1931 else
1932 /* PCS C.15 / C.18 multiple values pass. */
8e80f9d1 1933 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1934 break;
1935
1936 default:
8e80f9d1 1937 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1938 break;
1939 }
1940 }
1941
1942 /* Make sure stack retains 16 byte alignment. */
1943 if (info.nsaa & 15)
1944 sp -= 16 - (info.nsaa & 15);
1945
89055eaa 1946 while (!info.si.empty ())
07b287a0 1947 {
89055eaa 1948 const stack_item_t &si = info.si.back ();
07b287a0 1949
89055eaa
TT
1950 sp -= si.len;
1951 if (si.data != NULL)
1952 write_memory (sp, si.data, si.len);
1953 info.si.pop_back ();
07b287a0
MS
1954 }
1955
07b287a0
MS
1956 /* Finally, update the SP register. */
1957 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1958
1959 return sp;
1960}
1961
1962/* Implement the "frame_align" gdbarch method. */
1963
1964static CORE_ADDR
1965aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1966{
1967 /* Align the stack to sixteen bytes. */
1968 return sp & ~(CORE_ADDR) 15;
1969}
1970
1971/* Return the type for an AdvSISD Q register. */
1972
1973static struct type *
1974aarch64_vnq_type (struct gdbarch *gdbarch)
1975{
08106042 1976 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
07b287a0
MS
1977
1978 if (tdep->vnq_type == NULL)
1979 {
1980 struct type *t;
1981 struct type *elem;
1982
1983 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1984 TYPE_CODE_UNION);
1985
1986 elem = builtin_type (gdbarch)->builtin_uint128;
1987 append_composite_type_field (t, "u", elem);
1988
1989 elem = builtin_type (gdbarch)->builtin_int128;
1990 append_composite_type_field (t, "s", elem);
1991
1992 tdep->vnq_type = t;
1993 }
1994
1995 return tdep->vnq_type;
1996}
1997
1998/* Return the type for an AdvSISD D register. */
1999
2000static struct type *
2001aarch64_vnd_type (struct gdbarch *gdbarch)
2002{
08106042 2003 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
07b287a0
MS
2004
2005 if (tdep->vnd_type == NULL)
2006 {
2007 struct type *t;
2008 struct type *elem;
2009
2010 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
2011 TYPE_CODE_UNION);
2012
2013 elem = builtin_type (gdbarch)->builtin_double;
2014 append_composite_type_field (t, "f", elem);
2015
2016 elem = builtin_type (gdbarch)->builtin_uint64;
2017 append_composite_type_field (t, "u", elem);
2018
2019 elem = builtin_type (gdbarch)->builtin_int64;
2020 append_composite_type_field (t, "s", elem);
2021
2022 tdep->vnd_type = t;
2023 }
2024
2025 return tdep->vnd_type;
2026}
2027
2028/* Return the type for an AdvSISD S register. */
2029
2030static struct type *
2031aarch64_vns_type (struct gdbarch *gdbarch)
2032{
08106042 2033 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
07b287a0
MS
2034
2035 if (tdep->vns_type == NULL)
2036 {
2037 struct type *t;
2038 struct type *elem;
2039
2040 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
2041 TYPE_CODE_UNION);
2042
2043 elem = builtin_type (gdbarch)->builtin_float;
2044 append_composite_type_field (t, "f", elem);
2045
2046 elem = builtin_type (gdbarch)->builtin_uint32;
2047 append_composite_type_field (t, "u", elem);
2048
2049 elem = builtin_type (gdbarch)->builtin_int32;
2050 append_composite_type_field (t, "s", elem);
2051
2052 tdep->vns_type = t;
2053 }
2054
2055 return tdep->vns_type;
2056}
2057
2058/* Return the type for an AdvSISD H register. */
2059
2060static struct type *
2061aarch64_vnh_type (struct gdbarch *gdbarch)
2062{
08106042 2063 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
07b287a0
MS
2064
2065 if (tdep->vnh_type == NULL)
2066 {
2067 struct type *t;
2068 struct type *elem;
2069
2070 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
2071 TYPE_CODE_UNION);
2072
5291fe3c
SP
2073 elem = builtin_type (gdbarch)->builtin_bfloat16;
2074 append_composite_type_field (t, "bf", elem);
2075
a6d0f249
AH
2076 elem = builtin_type (gdbarch)->builtin_half;
2077 append_composite_type_field (t, "f", elem);
2078
07b287a0
MS
2079 elem = builtin_type (gdbarch)->builtin_uint16;
2080 append_composite_type_field (t, "u", elem);
2081
2082 elem = builtin_type (gdbarch)->builtin_int16;
2083 append_composite_type_field (t, "s", elem);
2084
2085 tdep->vnh_type = t;
2086 }
2087
2088 return tdep->vnh_type;
2089}
2090
2091/* Return the type for an AdvSISD B register. */
2092
2093static struct type *
2094aarch64_vnb_type (struct gdbarch *gdbarch)
2095{
08106042 2096 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
07b287a0
MS
2097
2098 if (tdep->vnb_type == NULL)
2099 {
2100 struct type *t;
2101 struct type *elem;
2102
2103 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
2104 TYPE_CODE_UNION);
2105
2106 elem = builtin_type (gdbarch)->builtin_uint8;
2107 append_composite_type_field (t, "u", elem);
2108
2109 elem = builtin_type (gdbarch)->builtin_int8;
2110 append_composite_type_field (t, "s", elem);
2111
2112 tdep->vnb_type = t;
2113 }
2114
2115 return tdep->vnb_type;
2116}
2117
63bad7b6
AH
2118/* Return the type for an AdvSISD V register. */
2119
2120static struct type *
2121aarch64_vnv_type (struct gdbarch *gdbarch)
2122{
08106042 2123 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
63bad7b6
AH
2124
2125 if (tdep->vnv_type == NULL)
2126 {
09624f1f 2127 /* The other AArch64 pseudo registers (Q,D,H,S,B) refer to a single value
bffa1015
AH
2128 slice from the non-pseudo vector registers. However NEON V registers
2129 are always vector registers, and need constructing as such. */
2130 const struct builtin_type *bt = builtin_type (gdbarch);
2131
63bad7b6
AH
2132 struct type *t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnv",
2133 TYPE_CODE_UNION);
2134
bffa1015
AH
2135 struct type *sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
2136 TYPE_CODE_UNION);
2137 append_composite_type_field (sub, "f",
2138 init_vector_type (bt->builtin_double, 2));
2139 append_composite_type_field (sub, "u",
2140 init_vector_type (bt->builtin_uint64, 2));
2141 append_composite_type_field (sub, "s",
2142 init_vector_type (bt->builtin_int64, 2));
2143 append_composite_type_field (t, "d", sub);
2144
2145 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
2146 TYPE_CODE_UNION);
2147 append_composite_type_field (sub, "f",
2148 init_vector_type (bt->builtin_float, 4));
2149 append_composite_type_field (sub, "u",
2150 init_vector_type (bt->builtin_uint32, 4));
2151 append_composite_type_field (sub, "s",
2152 init_vector_type (bt->builtin_int32, 4));
2153 append_composite_type_field (t, "s", sub);
2154
2155 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
2156 TYPE_CODE_UNION);
5291fe3c
SP
2157 append_composite_type_field (sub, "bf",
2158 init_vector_type (bt->builtin_bfloat16, 8));
a6d0f249
AH
2159 append_composite_type_field (sub, "f",
2160 init_vector_type (bt->builtin_half, 8));
bffa1015
AH
2161 append_composite_type_field (sub, "u",
2162 init_vector_type (bt->builtin_uint16, 8));
2163 append_composite_type_field (sub, "s",
2164 init_vector_type (bt->builtin_int16, 8));
2165 append_composite_type_field (t, "h", sub);
2166
2167 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
2168 TYPE_CODE_UNION);
2169 append_composite_type_field (sub, "u",
2170 init_vector_type (bt->builtin_uint8, 16));
2171 append_composite_type_field (sub, "s",
2172 init_vector_type (bt->builtin_int8, 16));
2173 append_composite_type_field (t, "b", sub);
2174
2175 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
2176 TYPE_CODE_UNION);
2177 append_composite_type_field (sub, "u",
2178 init_vector_type (bt->builtin_uint128, 1));
2179 append_composite_type_field (sub, "s",
2180 init_vector_type (bt->builtin_int128, 1));
2181 append_composite_type_field (t, "q", sub);
63bad7b6
AH
2182
2183 tdep->vnv_type = t;
2184 }
2185
2186 return tdep->vnv_type;
2187}
2188
07b287a0
MS
2189/* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
2190
2191static int
2192aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
2193{
08106042 2194 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
34dcc7cf 2195
07b287a0
MS
2196 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
2197 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
2198
2199 if (reg == AARCH64_DWARF_SP)
2200 return AARCH64_SP_REGNUM;
2201
1fe84861
YY
2202 if (reg == AARCH64_DWARF_PC)
2203 return AARCH64_PC_REGNUM;
2204
07b287a0
MS
2205 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
2206 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
2207
65d4cada
AH
2208 if (reg == AARCH64_DWARF_SVE_VG)
2209 return AARCH64_SVE_VG_REGNUM;
2210
2211 if (reg == AARCH64_DWARF_SVE_FFR)
2212 return AARCH64_SVE_FFR_REGNUM;
2213
2214 if (reg >= AARCH64_DWARF_SVE_P0 && reg <= AARCH64_DWARF_SVE_P0 + 15)
2215 return AARCH64_SVE_P0_REGNUM + reg - AARCH64_DWARF_SVE_P0;
2216
2217 if (reg >= AARCH64_DWARF_SVE_Z0 && reg <= AARCH64_DWARF_SVE_Z0 + 15)
2218 return AARCH64_SVE_Z0_REGNUM + reg - AARCH64_DWARF_SVE_Z0;
2219
34dcc7cf
AH
2220 if (tdep->has_pauth ())
2221 {
c9cd8ca4
LM
2222 if (reg == AARCH64_DWARF_RA_SIGN_STATE)
2223 return tdep->ra_sign_state_regnum;
34dcc7cf
AH
2224 }
2225
07b287a0
MS
2226 return -1;
2227}
07b287a0
MS
2228
2229/* Implement the "print_insn" gdbarch method. */
2230
2231static int
2232aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
2233{
2234 info->symbols = NULL;
6394c606 2235 return default_print_insn (memaddr, info);
07b287a0
MS
2236}
2237
2238/* AArch64 BRK software debug mode instruction.
2239 Note that AArch64 code is always little-endian.
2240 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
04180708 2241constexpr gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
07b287a0 2242
04180708 2243typedef BP_MANIPULATION (aarch64_default_breakpoint) aarch64_breakpoint;
07b287a0
MS
2244
2245/* Extract from an array REGS containing the (raw) register state a
2246 function return value of type TYPE, and copy that, in virtual
2247 format, into VALBUF. */
2248
2249static void
2250aarch64_extract_return_value (struct type *type, struct regcache *regs,
2251 gdb_byte *valbuf)
2252{
ac7936df 2253 struct gdbarch *gdbarch = regs->arch ();
07b287a0 2254 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
4f4aedeb
AH
2255 int elements;
2256 struct type *fundamental_type;
07b287a0 2257
4f4aedeb
AH
2258 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2259 &fundamental_type))
07b287a0 2260 {
df86565b 2261 int len = fundamental_type->length ();
4f4aedeb
AH
2262
2263 for (int i = 0; i < elements; i++)
2264 {
2265 int regno = AARCH64_V0_REGNUM + i;
3ff2c72e
AH
2266 /* Enough space for a full vector register. */
2267 gdb_byte buf[register_size (gdbarch, regno)];
2268 gdb_assert (len <= sizeof (buf));
4f4aedeb 2269
c6185dce
SM
2270 aarch64_debug_printf
2271 ("read HFA or HVA return value element %d from %s",
2272 i + 1, gdbarch_register_name (gdbarch, regno));
2273
4f4aedeb 2274 regs->cooked_read (regno, buf);
07b287a0 2275
4f4aedeb
AH
2276 memcpy (valbuf, buf, len);
2277 valbuf += len;
2278 }
07b287a0 2279 }
78134374
SM
2280 else if (type->code () == TYPE_CODE_INT
2281 || type->code () == TYPE_CODE_CHAR
2282 || type->code () == TYPE_CODE_BOOL
2283 || type->code () == TYPE_CODE_PTR
aa006118 2284 || TYPE_IS_REFERENCE (type)
78134374 2285 || type->code () == TYPE_CODE_ENUM)
07b287a0 2286 {
6471e7d2 2287 /* If the type is a plain integer, then the access is
07b287a0
MS
2288 straight-forward. Otherwise we have to play around a bit
2289 more. */
df86565b 2290 int len = type->length ();
07b287a0
MS
2291 int regno = AARCH64_X0_REGNUM;
2292 ULONGEST tmp;
2293
2294 while (len > 0)
2295 {
2296 /* By using store_unsigned_integer we avoid having to do
2297 anything special for small big-endian values. */
2298 regcache_cooked_read_unsigned (regs, regno++, &tmp);
2299 store_unsigned_integer (valbuf,
2300 (len > X_REGISTER_SIZE
2301 ? X_REGISTER_SIZE : len), byte_order, tmp);
2302 len -= X_REGISTER_SIZE;
2303 valbuf += X_REGISTER_SIZE;
2304 }
2305 }
07b287a0
MS
2306 else
2307 {
2308 /* For a structure or union the behaviour is as if the value had
dda83cd7
SM
2309 been stored to word-aligned memory and then loaded into
2310 registers with 64-bit load instruction(s). */
df86565b 2311 int len = type->length ();
07b287a0
MS
2312 int regno = AARCH64_X0_REGNUM;
2313 bfd_byte buf[X_REGISTER_SIZE];
2314
2315 while (len > 0)
2316 {
dca08e1f 2317 regs->cooked_read (regno++, buf);
07b287a0
MS
2318 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2319 len -= X_REGISTER_SIZE;
2320 valbuf += X_REGISTER_SIZE;
2321 }
2322 }
2323}
2324
2325
2326/* Will a function return an aggregate type in memory or in a
2327 register? Return 0 if an aggregate type can be returned in a
2328 register, 1 if it must be returned in memory. */
2329
2330static int
2331aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
2332{
f168693b 2333 type = check_typedef (type);
4f4aedeb
AH
2334 int elements;
2335 struct type *fundamental_type;
07b287a0 2336
4f4aedeb
AH
2337 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2338 &fundamental_type))
07b287a0 2339 {
cd635f74
YQ
2340 /* v0-v7 are used to return values and one register is allocated
2341 for one member. However, HFA or HVA has at most four members. */
07b287a0
MS
2342 return 0;
2343 }
2344
df86565b 2345 if (type->length () > 16
bab22d06 2346 || !language_pass_by_reference (type).trivially_copyable)
07b287a0
MS
2347 {
2348 /* PCS B.6 Aggregates larger than 16 bytes are passed by
dda83cd7 2349 invisible reference. */
07b287a0
MS
2350
2351 return 1;
2352 }
2353
2354 return 0;
2355}
2356
2357/* Write into appropriate registers a function return value of type
2358 TYPE, given in virtual format. */
2359
2360static void
2361aarch64_store_return_value (struct type *type, struct regcache *regs,
2362 const gdb_byte *valbuf)
2363{
ac7936df 2364 struct gdbarch *gdbarch = regs->arch ();
07b287a0 2365 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
4f4aedeb
AH
2366 int elements;
2367 struct type *fundamental_type;
07b287a0 2368
4f4aedeb
AH
2369 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2370 &fundamental_type))
07b287a0 2371 {
df86565b 2372 int len = fundamental_type->length ();
4f4aedeb
AH
2373
2374 for (int i = 0; i < elements; i++)
2375 {
2376 int regno = AARCH64_V0_REGNUM + i;
3ff2c72e
AH
2377 /* Enough space for a full vector register. */
2378 gdb_byte tmpbuf[register_size (gdbarch, regno)];
2379 gdb_assert (len <= sizeof (tmpbuf));
4f4aedeb 2380
c6185dce
SM
2381 aarch64_debug_printf
2382 ("write HFA or HVA return value element %d to %s",
2383 i + 1, gdbarch_register_name (gdbarch, regno));
07b287a0 2384
4f4aedeb
AH
2385 memcpy (tmpbuf, valbuf,
2386 len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
2387 regs->cooked_write (regno, tmpbuf);
2388 valbuf += len;
2389 }
07b287a0 2390 }
78134374
SM
2391 else if (type->code () == TYPE_CODE_INT
2392 || type->code () == TYPE_CODE_CHAR
2393 || type->code () == TYPE_CODE_BOOL
2394 || type->code () == TYPE_CODE_PTR
aa006118 2395 || TYPE_IS_REFERENCE (type)
78134374 2396 || type->code () == TYPE_CODE_ENUM)
07b287a0 2397 {
df86565b 2398 if (type->length () <= X_REGISTER_SIZE)
07b287a0
MS
2399 {
2400 /* Values of one word or less are zero/sign-extended and
2401 returned in r0. */
2402 bfd_byte tmpbuf[X_REGISTER_SIZE];
2403 LONGEST val = unpack_long (type, valbuf);
2404
2405 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
b66f5587 2406 regs->cooked_write (AARCH64_X0_REGNUM, tmpbuf);
07b287a0
MS
2407 }
2408 else
2409 {
2410 /* Integral values greater than one word are stored in
2411 consecutive registers starting with r0. This will always
2412 be a multiple of the regiser size. */
df86565b 2413 int len = type->length ();
07b287a0
MS
2414 int regno = AARCH64_X0_REGNUM;
2415
2416 while (len > 0)
2417 {
b66f5587 2418 regs->cooked_write (regno++, valbuf);
07b287a0
MS
2419 len -= X_REGISTER_SIZE;
2420 valbuf += X_REGISTER_SIZE;
2421 }
2422 }
2423 }
07b287a0
MS
2424 else
2425 {
2426 /* For a structure or union the behaviour is as if the value had
2427 been stored to word-aligned memory and then loaded into
2428 registers with 64-bit load instruction(s). */
df86565b 2429 int len = type->length ();
07b287a0
MS
2430 int regno = AARCH64_X0_REGNUM;
2431 bfd_byte tmpbuf[X_REGISTER_SIZE];
2432
2433 while (len > 0)
2434 {
2435 memcpy (tmpbuf, valbuf,
2436 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
b66f5587 2437 regs->cooked_write (regno++, tmpbuf);
07b287a0
MS
2438 len -= X_REGISTER_SIZE;
2439 valbuf += X_REGISTER_SIZE;
2440 }
2441 }
2442}
2443
2444/* Implement the "return_value" gdbarch method. */
2445
2446static enum return_value_convention
2447aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2448 struct type *valtype, struct regcache *regcache,
2449 gdb_byte *readbuf, const gdb_byte *writebuf)
2450{
07b287a0 2451
78134374
SM
2452 if (valtype->code () == TYPE_CODE_STRUCT
2453 || valtype->code () == TYPE_CODE_UNION
2454 || valtype->code () == TYPE_CODE_ARRAY)
07b287a0
MS
2455 {
2456 if (aarch64_return_in_memory (gdbarch, valtype))
2457 {
bab22d06
LM
2458 /* From the AAPCS64's Result Return section:
2459
2460 "Otherwise, the caller shall reserve a block of memory of
2461 sufficient size and alignment to hold the result. The address
2462 of the memory block shall be passed as an additional argument to
2463 the function in x8. */
2464
c6185dce 2465 aarch64_debug_printf ("return value in memory");
bab22d06
LM
2466
2467 if (readbuf)
2468 {
2469 CORE_ADDR addr;
2470
2471 regcache->cooked_read (AARCH64_STRUCT_RETURN_REGNUM, &addr);
df86565b 2472 read_memory (addr, readbuf, valtype->length ());
bab22d06
LM
2473 }
2474
2475 return RETURN_VALUE_ABI_RETURNS_ADDRESS;
07b287a0
MS
2476 }
2477 }
2478
2479 if (writebuf)
2480 aarch64_store_return_value (valtype, regcache, writebuf);
2481
2482 if (readbuf)
2483 aarch64_extract_return_value (valtype, regcache, readbuf);
2484
c6185dce 2485 aarch64_debug_printf ("return value in registers");
07b287a0
MS
2486
2487 return RETURN_VALUE_REGISTER_CONVENTION;
2488}
2489
2490/* Implement the "get_longjmp_target" gdbarch method. */
2491
2492static int
bd2b40ac 2493aarch64_get_longjmp_target (frame_info_ptr frame, CORE_ADDR *pc)
07b287a0
MS
2494{
2495 CORE_ADDR jb_addr;
2496 gdb_byte buf[X_REGISTER_SIZE];
2497 struct gdbarch *gdbarch = get_frame_arch (frame);
08106042 2498 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
07b287a0
MS
2499 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2500
2501 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
2502
2503 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2504 X_REGISTER_SIZE))
2505 return 0;
2506
2507 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2508 return 1;
2509}
ea873d8e
PL
2510
2511/* Implement the "gen_return_address" gdbarch method. */
2512
2513static void
2514aarch64_gen_return_address (struct gdbarch *gdbarch,
2515 struct agent_expr *ax, struct axs_value *value,
2516 CORE_ADDR scope)
2517{
2518 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
2519 value->kind = axs_lvalue_register;
2520 value->u.reg = AARCH64_LR_REGNUM;
2521}
07b287a0
MS
2522\f
2523
e63ae49b
LM
2524/* Return TRUE if REGNUM is a W pseudo-register number. Return FALSE
2525 otherwise. */
2526
2527static bool
2528is_w_pseudo_register (struct gdbarch *gdbarch, int regnum)
2529{
2530 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2531
2532 if (tdep->w_pseudo_base <= regnum
2533 && regnum < tdep->w_pseudo_base + tdep->w_pseudo_count)
2534 return true;
2535
2536 return false;
2537}
2538
07b287a0
MS
2539/* Return the pseudo register name corresponding to register regnum. */
2540
2541static const char *
2542aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2543{
08106042 2544 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
63bad7b6 2545
e63ae49b
LM
2546 /* W pseudo-registers. Bottom halves of the X registers. */
2547 static const char *const w_name[] =
2548 {
2549 "w0", "w1", "w2", "w3",
2550 "w4", "w5", "w6", "w7",
2551 "w8", "w9", "w10", "w11",
2552 "w12", "w13", "w14", "w15",
2553 "w16", "w17", "w18", "w19",
2554 "w20", "w21", "w22", "w23",
2555 "w24", "w25", "w26", "w27",
2556 "w28", "w29", "w30",
2557 };
2558
07b287a0
MS
2559 static const char *const q_name[] =
2560 {
2561 "q0", "q1", "q2", "q3",
2562 "q4", "q5", "q6", "q7",
2563 "q8", "q9", "q10", "q11",
2564 "q12", "q13", "q14", "q15",
2565 "q16", "q17", "q18", "q19",
2566 "q20", "q21", "q22", "q23",
2567 "q24", "q25", "q26", "q27",
2568 "q28", "q29", "q30", "q31",
2569 };
2570
2571 static const char *const d_name[] =
2572 {
2573 "d0", "d1", "d2", "d3",
2574 "d4", "d5", "d6", "d7",
2575 "d8", "d9", "d10", "d11",
2576 "d12", "d13", "d14", "d15",
2577 "d16", "d17", "d18", "d19",
2578 "d20", "d21", "d22", "d23",
2579 "d24", "d25", "d26", "d27",
2580 "d28", "d29", "d30", "d31",
2581 };
2582
2583 static const char *const s_name[] =
2584 {
2585 "s0", "s1", "s2", "s3",
2586 "s4", "s5", "s6", "s7",
2587 "s8", "s9", "s10", "s11",
2588 "s12", "s13", "s14", "s15",
2589 "s16", "s17", "s18", "s19",
2590 "s20", "s21", "s22", "s23",
2591 "s24", "s25", "s26", "s27",
2592 "s28", "s29", "s30", "s31",
2593 };
2594
2595 static const char *const h_name[] =
2596 {
2597 "h0", "h1", "h2", "h3",
2598 "h4", "h5", "h6", "h7",
2599 "h8", "h9", "h10", "h11",
2600 "h12", "h13", "h14", "h15",
2601 "h16", "h17", "h18", "h19",
2602 "h20", "h21", "h22", "h23",
2603 "h24", "h25", "h26", "h27",
2604 "h28", "h29", "h30", "h31",
2605 };
2606
2607 static const char *const b_name[] =
2608 {
2609 "b0", "b1", "b2", "b3",
2610 "b4", "b5", "b6", "b7",
2611 "b8", "b9", "b10", "b11",
2612 "b12", "b13", "b14", "b15",
2613 "b16", "b17", "b18", "b19",
2614 "b20", "b21", "b22", "b23",
2615 "b24", "b25", "b26", "b27",
2616 "b28", "b29", "b30", "b31",
2617 };
2618
34dcc7cf 2619 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
07b287a0 2620
34dcc7cf
AH
2621 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2622 return q_name[p_regnum - AARCH64_Q0_REGNUM];
07b287a0 2623
34dcc7cf
AH
2624 if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2625 return d_name[p_regnum - AARCH64_D0_REGNUM];
07b287a0 2626
34dcc7cf
AH
2627 if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2628 return s_name[p_regnum - AARCH64_S0_REGNUM];
07b287a0 2629
34dcc7cf
AH
2630 if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2631 return h_name[p_regnum - AARCH64_H0_REGNUM];
07b287a0 2632
34dcc7cf
AH
2633 if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2634 return b_name[p_regnum - AARCH64_B0_REGNUM];
07b287a0 2635
e63ae49b
LM
2636 /* W pseudo-registers? */
2637 if (is_w_pseudo_register (gdbarch, regnum))
2638 return w_name[regnum - tdep->w_pseudo_base];
2639
63bad7b6
AH
2640 if (tdep->has_sve ())
2641 {
2642 static const char *const sve_v_name[] =
2643 {
2644 "v0", "v1", "v2", "v3",
2645 "v4", "v5", "v6", "v7",
2646 "v8", "v9", "v10", "v11",
2647 "v12", "v13", "v14", "v15",
2648 "v16", "v17", "v18", "v19",
2649 "v20", "v21", "v22", "v23",
2650 "v24", "v25", "v26", "v27",
2651 "v28", "v29", "v30", "v31",
2652 };
2653
34dcc7cf
AH
2654 if (p_regnum >= AARCH64_SVE_V0_REGNUM
2655 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2656 return sve_v_name[p_regnum - AARCH64_SVE_V0_REGNUM];
63bad7b6
AH
2657 }
2658
34dcc7cf
AH
2659 /* RA_STATE is used for unwinding only. Do not assign it a name - this
2660 prevents it from being read by methods such as
2661 mi_cmd_trace_frame_collected. */
c9cd8ca4 2662 if (tdep->has_pauth () && regnum == tdep->ra_sign_state_regnum)
34dcc7cf
AH
2663 return "";
2664
f34652de 2665 internal_error (_("aarch64_pseudo_register_name: bad register number %d"),
34dcc7cf 2666 p_regnum);
07b287a0
MS
2667}
2668
2669/* Implement the "pseudo_register_type" tdesc_arch_data method. */
2670
2671static struct type *
2672aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2673{
08106042 2674 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
63bad7b6 2675
34dcc7cf 2676 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
07b287a0 2677
34dcc7cf 2678 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
07b287a0
MS
2679 return aarch64_vnq_type (gdbarch);
2680
34dcc7cf 2681 if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
07b287a0
MS
2682 return aarch64_vnd_type (gdbarch);
2683
34dcc7cf 2684 if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
07b287a0
MS
2685 return aarch64_vns_type (gdbarch);
2686
34dcc7cf 2687 if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
07b287a0
MS
2688 return aarch64_vnh_type (gdbarch);
2689
34dcc7cf 2690 if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
07b287a0
MS
2691 return aarch64_vnb_type (gdbarch);
2692
34dcc7cf
AH
2693 if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
2694 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
63bad7b6
AH
2695 return aarch64_vnv_type (gdbarch);
2696
e63ae49b
LM
2697 /* W pseudo-registers are 32-bit. */
2698 if (is_w_pseudo_register (gdbarch, regnum))
2699 return builtin_type (gdbarch)->builtin_uint32;
2700
c9cd8ca4 2701 if (tdep->has_pauth () && regnum == tdep->ra_sign_state_regnum)
34dcc7cf
AH
2702 return builtin_type (gdbarch)->builtin_uint64;
2703
f34652de 2704 internal_error (_("aarch64_pseudo_register_type: bad register number %d"),
34dcc7cf 2705 p_regnum);
07b287a0
MS
2706}
2707
2708/* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2709
2710static int
2711aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
dbf5d61b 2712 const struct reggroup *group)
07b287a0 2713{
08106042 2714 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
63bad7b6 2715
34dcc7cf 2716 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
07b287a0 2717
34dcc7cf 2718 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
07b287a0 2719 return group == all_reggroup || group == vector_reggroup;
34dcc7cf 2720 else if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
07b287a0
MS
2721 return (group == all_reggroup || group == vector_reggroup
2722 || group == float_reggroup);
34dcc7cf 2723 else if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
07b287a0
MS
2724 return (group == all_reggroup || group == vector_reggroup
2725 || group == float_reggroup);
34dcc7cf 2726 else if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
07b287a0 2727 return group == all_reggroup || group == vector_reggroup;
34dcc7cf 2728 else if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
07b287a0 2729 return group == all_reggroup || group == vector_reggroup;
34dcc7cf
AH
2730 else if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
2731 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
63bad7b6 2732 return group == all_reggroup || group == vector_reggroup;
34dcc7cf 2733 /* RA_STATE is used for unwinding only. Do not assign it to any groups. */
c9cd8ca4 2734 if (tdep->has_pauth () && regnum == tdep->ra_sign_state_regnum)
34dcc7cf 2735 return 0;
07b287a0
MS
2736
2737 return group == all_reggroup;
2738}
2739
3c5cd5c3
AH
2740/* Helper for aarch64_pseudo_read_value. */
2741
2742static struct value *
63bad7b6
AH
2743aarch64_pseudo_read_value_1 (struct gdbarch *gdbarch,
2744 readable_regcache *regcache, int regnum_offset,
3c5cd5c3
AH
2745 int regsize, struct value *result_value)
2746{
3c5cd5c3
AH
2747 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2748
63bad7b6
AH
2749 /* Enough space for a full vector register. */
2750 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2751 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2752
3c5cd5c3
AH
2753 if (regcache->raw_read (v_regnum, reg_buf) != REG_VALID)
2754 mark_value_bytes_unavailable (result_value, 0,
df86565b 2755 value_type (result_value)->length ());
3c5cd5c3 2756 else
50888e42 2757 memcpy (value_contents_raw (result_value).data (), reg_buf, regsize);
63bad7b6 2758
3c5cd5c3
AH
2759 return result_value;
2760 }
2761
07b287a0
MS
2762/* Implement the "pseudo_register_read_value" gdbarch method. */
2763
2764static struct value *
3c5cd5c3 2765aarch64_pseudo_read_value (struct gdbarch *gdbarch, readable_regcache *regcache,
07b287a0
MS
2766 int regnum)
2767{
08106042 2768 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
3c5cd5c3 2769 struct value *result_value = allocate_value (register_type (gdbarch, regnum));
07b287a0 2770
07b287a0
MS
2771 VALUE_LVAL (result_value) = lval_register;
2772 VALUE_REGNUM (result_value) = regnum;
07b287a0 2773
e63ae49b
LM
2774 if (is_w_pseudo_register (gdbarch, regnum))
2775 {
2776 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2777 /* Default offset for little endian. */
2778 int offset = 0;
2779
2780 if (byte_order == BFD_ENDIAN_BIG)
2781 offset = 4;
2782
2783 /* Find the correct X register to extract the data from. */
2784 int x_regnum = AARCH64_X0_REGNUM + (regnum - tdep->w_pseudo_base);
2785 gdb_byte data[4];
2786
2787 /* Read the bottom 4 bytes of X. */
2788 if (regcache->raw_read_part (x_regnum, offset, 4, data) != REG_VALID)
2789 mark_value_bytes_unavailable (result_value, 0, 4);
2790 else
2791 memcpy (value_contents_raw (result_value).data (), data, 4);
2792
2793 return result_value;
2794 }
2795
07b287a0
MS
2796 regnum -= gdbarch_num_regs (gdbarch);
2797
2798 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
63bad7b6
AH
2799 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2800 regnum - AARCH64_Q0_REGNUM,
3c5cd5c3 2801 Q_REGISTER_SIZE, result_value);
07b287a0
MS
2802
2803 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
63bad7b6
AH
2804 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2805 regnum - AARCH64_D0_REGNUM,
3c5cd5c3 2806 D_REGISTER_SIZE, result_value);
07b287a0
MS
2807
2808 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
63bad7b6
AH
2809 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2810 regnum - AARCH64_S0_REGNUM,
3c5cd5c3 2811 S_REGISTER_SIZE, result_value);
07b287a0
MS
2812
2813 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
63bad7b6
AH
2814 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2815 regnum - AARCH64_H0_REGNUM,
3c5cd5c3 2816 H_REGISTER_SIZE, result_value);
07b287a0
MS
2817
2818 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
63bad7b6
AH
2819 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2820 regnum - AARCH64_B0_REGNUM,
3c5cd5c3 2821 B_REGISTER_SIZE, result_value);
07b287a0 2822
63bad7b6
AH
2823 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2824 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2825 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2826 regnum - AARCH64_SVE_V0_REGNUM,
2827 V_REGISTER_SIZE, result_value);
2828
07b287a0
MS
2829 gdb_assert_not_reached ("regnum out of bound");
2830}
2831
3c5cd5c3 2832/* Helper for aarch64_pseudo_write. */
07b287a0
MS
2833
2834static void
63bad7b6
AH
2835aarch64_pseudo_write_1 (struct gdbarch *gdbarch, struct regcache *regcache,
2836 int regnum_offset, int regsize, const gdb_byte *buf)
07b287a0 2837{
3c5cd5c3 2838 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
07b287a0 2839
63bad7b6
AH
2840 /* Enough space for a full vector register. */
2841 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2842 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2843
07b287a0
MS
2844 /* Ensure the register buffer is zero, we want gdb writes of the
2845 various 'scalar' pseudo registers to behavior like architectural
2846 writes, register width bytes are written the remainder are set to
2847 zero. */
63bad7b6 2848 memset (reg_buf, 0, register_size (gdbarch, AARCH64_V0_REGNUM));
07b287a0 2849
3c5cd5c3
AH
2850 memcpy (reg_buf, buf, regsize);
2851 regcache->raw_write (v_regnum, reg_buf);
2852}
2853
2854/* Implement the "pseudo_register_write" gdbarch method. */
2855
2856static void
2857aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2858 int regnum, const gdb_byte *buf)
2859{
08106042 2860 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
e63ae49b
LM
2861
2862 if (is_w_pseudo_register (gdbarch, regnum))
2863 {
2864 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2865 /* Default offset for little endian. */
2866 int offset = 0;
2867
2868 if (byte_order == BFD_ENDIAN_BIG)
2869 offset = 4;
2870
2871 /* Find the correct X register to extract the data from. */
2872 int x_regnum = AARCH64_X0_REGNUM + (regnum - tdep->w_pseudo_base);
2873
2874 /* First zero-out the contents of X. */
2875 ULONGEST zero = 0;
2876 regcache->raw_write (x_regnum, zero);
2877 /* Write to the bottom 4 bytes of X. */
2878 regcache->raw_write_part (x_regnum, offset, 4, buf);
2879 return;
2880 }
2881
07b287a0
MS
2882 regnum -= gdbarch_num_regs (gdbarch);
2883
2884 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
63bad7b6
AH
2885 return aarch64_pseudo_write_1 (gdbarch, regcache,
2886 regnum - AARCH64_Q0_REGNUM, Q_REGISTER_SIZE,
2887 buf);
07b287a0
MS
2888
2889 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
63bad7b6
AH
2890 return aarch64_pseudo_write_1 (gdbarch, regcache,
2891 regnum - AARCH64_D0_REGNUM, D_REGISTER_SIZE,
2892 buf);
07b287a0
MS
2893
2894 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
63bad7b6
AH
2895 return aarch64_pseudo_write_1 (gdbarch, regcache,
2896 regnum - AARCH64_S0_REGNUM, S_REGISTER_SIZE,
2897 buf);
07b287a0
MS
2898
2899 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
63bad7b6
AH
2900 return aarch64_pseudo_write_1 (gdbarch, regcache,
2901 regnum - AARCH64_H0_REGNUM, H_REGISTER_SIZE,
2902 buf);
07b287a0
MS
2903
2904 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
63bad7b6
AH
2905 return aarch64_pseudo_write_1 (gdbarch, regcache,
2906 regnum - AARCH64_B0_REGNUM, B_REGISTER_SIZE,
2907 buf);
2908
2909 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2910 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2911 return aarch64_pseudo_write_1 (gdbarch, regcache,
2912 regnum - AARCH64_SVE_V0_REGNUM,
2913 V_REGISTER_SIZE, buf);
07b287a0
MS
2914
2915 gdb_assert_not_reached ("regnum out of bound");
2916}
2917
07b287a0
MS
2918/* Callback function for user_reg_add. */
2919
2920static struct value *
bd2b40ac 2921value_of_aarch64_user_reg (frame_info_ptr frame, const void *baton)
07b287a0 2922{
9a3c8263 2923 const int *reg_p = (const int *) baton;
07b287a0
MS
2924
2925 return value_of_register (*reg_p, frame);
2926}
2927\f
2928
9404b58f
KM
2929/* Implement the "software_single_step" gdbarch method, needed to
2930 single step through atomic sequences on AArch64. */
2931
a0ff9e1a 2932static std::vector<CORE_ADDR>
f5ea389a 2933aarch64_software_single_step (struct regcache *regcache)
9404b58f 2934{
ac7936df 2935 struct gdbarch *gdbarch = regcache->arch ();
9404b58f
KM
2936 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2937 const int insn_size = 4;
2938 const int atomic_sequence_length = 16; /* Instruction sequence length. */
0187a92f 2939 CORE_ADDR pc = regcache_read_pc (regcache);
70ab8ccd 2940 CORE_ADDR breaks[2] = { CORE_ADDR_MAX, CORE_ADDR_MAX };
9404b58f
KM
2941 CORE_ADDR loc = pc;
2942 CORE_ADDR closing_insn = 0;
94355de7
LM
2943
2944 ULONGEST insn_from_memory;
2945 if (!safe_read_memory_unsigned_integer (loc, insn_size,
2946 byte_order_for_code,
2947 &insn_from_memory))
2948 {
2949 /* Assume we don't have a atomic sequence, as we couldn't read the
2950 instruction in this location. */
2951 return {};
2952 }
2953
2954 uint32_t insn = insn_from_memory;
9404b58f
KM
2955 int index;
2956 int insn_count;
2957 int bc_insn_count = 0; /* Conditional branch instruction count. */
2958 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
f77ee802
YQ
2959 aarch64_inst inst;
2960
561a72d4 2961 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
a0ff9e1a 2962 return {};
9404b58f
KM
2963
2964 /* Look for a Load Exclusive instruction which begins the sequence. */
f77ee802 2965 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
a0ff9e1a 2966 return {};
9404b58f
KM
2967
2968 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2969 {
9404b58f 2970 loc += insn_size;
9404b58f 2971
94355de7
LM
2972 if (!safe_read_memory_unsigned_integer (loc, insn_size,
2973 byte_order_for_code,
2974 &insn_from_memory))
2975 {
2976 /* Assume we don't have a atomic sequence, as we couldn't read the
2977 instruction in this location. */
2978 return {};
2979 }
2980
2981 insn = insn_from_memory;
561a72d4 2982 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
a0ff9e1a 2983 return {};
9404b58f 2984 /* Check if the instruction is a conditional branch. */
f77ee802 2985 if (inst.opcode->iclass == condbranch)
9404b58f 2986 {
f77ee802
YQ
2987 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
2988
9404b58f 2989 if (bc_insn_count >= 1)
a0ff9e1a 2990 return {};
9404b58f
KM
2991
2992 /* It is, so we'll try to set a breakpoint at the destination. */
f77ee802 2993 breaks[1] = loc + inst.operands[0].imm.value;
9404b58f
KM
2994
2995 bc_insn_count++;
2996 last_breakpoint++;
2997 }
2998
2999 /* Look for the Store Exclusive which closes the atomic sequence. */
f77ee802 3000 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
9404b58f
KM
3001 {
3002 closing_insn = loc;
3003 break;
3004 }
3005 }
3006
3007 /* We didn't find a closing Store Exclusive instruction, fall back. */
3008 if (!closing_insn)
a0ff9e1a 3009 return {};
9404b58f
KM
3010
3011 /* Insert breakpoint after the end of the atomic sequence. */
3012 breaks[0] = loc + insn_size;
3013
3014 /* Check for duplicated breakpoints, and also check that the second
3015 breakpoint is not within the atomic sequence. */
3016 if (last_breakpoint
3017 && (breaks[1] == breaks[0]
3018 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
3019 last_breakpoint = 0;
3020
a0ff9e1a
SM
3021 std::vector<CORE_ADDR> next_pcs;
3022
9404b58f
KM
3023 /* Insert the breakpoint at the end of the sequence, and one at the
3024 destination of the conditional branch, if it exists. */
3025 for (index = 0; index <= last_breakpoint; index++)
a0ff9e1a 3026 next_pcs.push_back (breaks[index]);
9404b58f 3027
93f9a11f 3028 return next_pcs;
9404b58f
KM
3029}
3030
1152d984
SM
3031struct aarch64_displaced_step_copy_insn_closure
3032 : public displaced_step_copy_insn_closure
b6542f81
YQ
3033{
3034 /* It is true when condition instruction, such as B.CON, TBZ, etc,
3035 is being displaced stepping. */
f0c702d4 3036 bool cond = false;
b6542f81 3037
0c271889
LM
3038 /* PC adjustment offset after displaced stepping. If 0, then we don't
3039 write the PC back, assuming the PC is already the right address. */
cfba9872 3040 int32_t pc_adjust = 0;
b6542f81
YQ
3041};
3042
3043/* Data when visiting instructions for displaced stepping. */
3044
3045struct aarch64_displaced_step_data
3046{
3047 struct aarch64_insn_data base;
3048
3049 /* The address where the instruction will be executed at. */
3050 CORE_ADDR new_addr;
3051 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
e935475c 3052 uint32_t insn_buf[AARCH64_DISPLACED_MODIFIED_INSNS];
b6542f81
YQ
3053 /* Number of instructions in INSN_BUF. */
3054 unsigned insn_count;
3055 /* Registers when doing displaced stepping. */
3056 struct regcache *regs;
3057
1152d984 3058 aarch64_displaced_step_copy_insn_closure *dsc;
b6542f81
YQ
3059};
3060
3061/* Implementation of aarch64_insn_visitor method "b". */
3062
3063static void
3064aarch64_displaced_step_b (const int is_bl, const int32_t offset,
3065 struct aarch64_insn_data *data)
3066{
3067 struct aarch64_displaced_step_data *dsd
3068 = (struct aarch64_displaced_step_data *) data;
2ac09a5b 3069 int64_t new_offset = data->insn_addr - dsd->new_addr + offset;
b6542f81
YQ
3070
3071 if (can_encode_int32 (new_offset, 28))
3072 {
3073 /* Emit B rather than BL, because executing BL on a new address
3074 will get the wrong address into LR. In order to avoid this,
3075 we emit B, and update LR if the instruction is BL. */
3076 emit_b (dsd->insn_buf, 0, new_offset);
3077 dsd->insn_count++;
3078 }
3079 else
3080 {
3081 /* Write NOP. */
3082 emit_nop (dsd->insn_buf);
3083 dsd->insn_count++;
3084 dsd->dsc->pc_adjust = offset;
3085 }
3086
3087 if (is_bl)
3088 {
3089 /* Update LR. */
3090 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
3091 data->insn_addr + 4);
3092 }
3093}
3094
3095/* Implementation of aarch64_insn_visitor method "b_cond". */
3096
3097static void
3098aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
3099 struct aarch64_insn_data *data)
3100{
3101 struct aarch64_displaced_step_data *dsd
3102 = (struct aarch64_displaced_step_data *) data;
b6542f81
YQ
3103
3104 /* GDB has to fix up PC after displaced step this instruction
3105 differently according to the condition is true or false. Instead
3106 of checking COND against conditional flags, we can use
3107 the following instructions, and GDB can tell how to fix up PC
3108 according to the PC value.
3109
3110 B.COND TAKEN ; If cond is true, then jump to TAKEN.
3111 INSN1 ;
3112 TAKEN:
3113 INSN2
3114 */
3115
3116 emit_bcond (dsd->insn_buf, cond, 8);
f0c702d4 3117 dsd->dsc->cond = true;
b6542f81
YQ
3118 dsd->dsc->pc_adjust = offset;
3119 dsd->insn_count = 1;
3120}
3121
3122/* Dynamically allocate a new register. If we know the register
3123 statically, we should make it a global as above instead of using this
3124 helper function. */
3125
3126static struct aarch64_register
3127aarch64_register (unsigned num, int is64)
3128{
3129 return (struct aarch64_register) { num, is64 };
3130}
3131
3132/* Implementation of aarch64_insn_visitor method "cb". */
3133
3134static void
3135aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
3136 const unsigned rn, int is64,
3137 struct aarch64_insn_data *data)
3138{
3139 struct aarch64_displaced_step_data *dsd
3140 = (struct aarch64_displaced_step_data *) data;
b6542f81
YQ
3141
3142 /* The offset is out of range for a compare and branch
3143 instruction. We can use the following instructions instead:
3144
3145 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
3146 INSN1 ;
3147 TAKEN:
3148 INSN2
3149 */
3150 emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
3151 dsd->insn_count = 1;
f0c702d4 3152 dsd->dsc->cond = true;
b6542f81
YQ
3153 dsd->dsc->pc_adjust = offset;
3154}
3155
3156/* Implementation of aarch64_insn_visitor method "tb". */
3157
3158static void
3159aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
3160 const unsigned rt, unsigned bit,
3161 struct aarch64_insn_data *data)
3162{
3163 struct aarch64_displaced_step_data *dsd
3164 = (struct aarch64_displaced_step_data *) data;
b6542f81
YQ
3165
3166 /* The offset is out of range for a test bit and branch
3167 instruction We can use the following instructions instead:
3168
3169 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
3170 INSN1 ;
3171 TAKEN:
3172 INSN2
3173
3174 */
3175 emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
3176 dsd->insn_count = 1;
f0c702d4 3177 dsd->dsc->cond = true;
b6542f81
YQ
3178 dsd->dsc->pc_adjust = offset;
3179}
3180
3181/* Implementation of aarch64_insn_visitor method "adr". */
3182
3183static void
3184aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
3185 const int is_adrp, struct aarch64_insn_data *data)
3186{
3187 struct aarch64_displaced_step_data *dsd
3188 = (struct aarch64_displaced_step_data *) data;
3189 /* We know exactly the address the ADR{P,} instruction will compute.
3190 We can just write it to the destination register. */
3191 CORE_ADDR address = data->insn_addr + offset;
3192
3193 if (is_adrp)
3194 {
3195 /* Clear the lower 12 bits of the offset to get the 4K page. */
3196 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
3197 address & ~0xfff);
3198 }
3199 else
3200 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
3201 address);
3202
3203 dsd->dsc->pc_adjust = 4;
3204 emit_nop (dsd->insn_buf);
3205 dsd->insn_count = 1;
3206}
3207
3208/* Implementation of aarch64_insn_visitor method "ldr_literal". */
3209
3210static void
3211aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
3212 const unsigned rt, const int is64,
3213 struct aarch64_insn_data *data)
3214{
3215 struct aarch64_displaced_step_data *dsd
3216 = (struct aarch64_displaced_step_data *) data;
3217 CORE_ADDR address = data->insn_addr + offset;
3218 struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
3219
3220 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
3221 address);
3222
3223 if (is_sw)
3224 dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
3225 aarch64_register (rt, 1), zero);
3226 else
3227 dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
3228 aarch64_register (rt, 1), zero);
3229
3230 dsd->dsc->pc_adjust = 4;
3231}
3232
3233/* Implementation of aarch64_insn_visitor method "others". */
3234
3235static void
3236aarch64_displaced_step_others (const uint32_t insn,
3237 struct aarch64_insn_data *data)
3238{
3239 struct aarch64_displaced_step_data *dsd
3240 = (struct aarch64_displaced_step_data *) data;
3241
807f647c
MM
3242 uint32_t masked_insn = (insn & CLEAR_Rn_MASK);
3243 if (masked_insn == BLR)
b6542f81 3244 {
807f647c
MM
3245 /* Emit a BR to the same register and then update LR to the original
3246 address (similar to aarch64_displaced_step_b). */
3247 aarch64_emit_insn (dsd->insn_buf, insn & 0xffdfffff);
3248 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
3249 data->insn_addr + 4);
b6542f81 3250 }
807f647c
MM
3251 else
3252 aarch64_emit_insn (dsd->insn_buf, insn);
3253 dsd->insn_count = 1;
3254
3255 if (masked_insn == RET || masked_insn == BR || masked_insn == BLR)
3256 dsd->dsc->pc_adjust = 0;
b6542f81
YQ
3257 else
3258 dsd->dsc->pc_adjust = 4;
3259}
3260
3261static const struct aarch64_insn_visitor visitor =
3262{
3263 aarch64_displaced_step_b,
3264 aarch64_displaced_step_b_cond,
3265 aarch64_displaced_step_cb,
3266 aarch64_displaced_step_tb,
3267 aarch64_displaced_step_adr,
3268 aarch64_displaced_step_ldr_literal,
3269 aarch64_displaced_step_others,
3270};
3271
3272/* Implement the "displaced_step_copy_insn" gdbarch method. */
3273
1152d984 3274displaced_step_copy_insn_closure_up
b6542f81
YQ
3275aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
3276 CORE_ADDR from, CORE_ADDR to,
3277 struct regcache *regs)
3278{
b6542f81 3279 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
b6542f81 3280 struct aarch64_displaced_step_data dsd;
c86a40c6 3281 aarch64_inst inst;
94355de7
LM
3282 ULONGEST insn_from_memory;
3283
3284 if (!safe_read_memory_unsigned_integer (from, 4, byte_order_for_code,
3285 &insn_from_memory))
3286 return nullptr;
3287
3288 uint32_t insn = insn_from_memory;
c86a40c6 3289
561a72d4 3290 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
c86a40c6 3291 return NULL;
b6542f81
YQ
3292
3293 /* Look for a Load Exclusive instruction which begins the sequence. */
c86a40c6 3294 if (inst.opcode->iclass == ldstexcl && bit (insn, 22))
b6542f81
YQ
3295 {
3296 /* We can't displaced step atomic sequences. */
3297 return NULL;
3298 }
3299
1152d984
SM
3300 std::unique_ptr<aarch64_displaced_step_copy_insn_closure> dsc
3301 (new aarch64_displaced_step_copy_insn_closure);
b6542f81
YQ
3302 dsd.base.insn_addr = from;
3303 dsd.new_addr = to;
3304 dsd.regs = regs;
cfba9872 3305 dsd.dsc = dsc.get ();
034f1a81 3306 dsd.insn_count = 0;
b6542f81
YQ
3307 aarch64_relocate_instruction (insn, &visitor,
3308 (struct aarch64_insn_data *) &dsd);
e935475c 3309 gdb_assert (dsd.insn_count <= AARCH64_DISPLACED_MODIFIED_INSNS);
b6542f81
YQ
3310
3311 if (dsd.insn_count != 0)
3312 {
3313 int i;
3314
3315 /* Instruction can be relocated to scratch pad. Copy
3316 relocated instruction(s) there. */
3317 for (i = 0; i < dsd.insn_count; i++)
3318 {
136821d9
SM
3319 displaced_debug_printf ("writing insn %.8x at %s",
3320 dsd.insn_buf[i],
3321 paddress (gdbarch, to + i * 4));
3322
b6542f81
YQ
3323 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
3324 (ULONGEST) dsd.insn_buf[i]);
3325 }
3326 }
3327 else
3328 {
b6542f81
YQ
3329 dsc = NULL;
3330 }
3331
6d0cf446 3332 /* This is a work around for a problem with g++ 4.8. */
1152d984 3333 return displaced_step_copy_insn_closure_up (dsc.release ());
b6542f81
YQ
3334}
3335
3336/* Implement the "displaced_step_fixup" gdbarch method. */
3337
3338void
3339aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
1152d984 3340 struct displaced_step_copy_insn_closure *dsc_,
b6542f81
YQ
3341 CORE_ADDR from, CORE_ADDR to,
3342 struct regcache *regs)
3343{
1152d984
SM
3344 aarch64_displaced_step_copy_insn_closure *dsc
3345 = (aarch64_displaced_step_copy_insn_closure *) dsc_;
cfba9872 3346
0c271889
LM
3347 ULONGEST pc;
3348
3349 regcache_cooked_read_unsigned (regs, AARCH64_PC_REGNUM, &pc);
3350
136821d9
SM
3351 displaced_debug_printf ("PC after stepping: %s (was %s).",
3352 paddress (gdbarch, pc), paddress (gdbarch, to));
1ab139e5 3353
b6542f81
YQ
3354 if (dsc->cond)
3355 {
136821d9
SM
3356 displaced_debug_printf ("[Conditional] pc_adjust before: %d",
3357 dsc->pc_adjust);
1ab139e5 3358
b6542f81
YQ
3359 if (pc - to == 8)
3360 {
3361 /* Condition is true. */
3362 }
3363 else if (pc - to == 4)
3364 {
3365 /* Condition is false. */
3366 dsc->pc_adjust = 4;
3367 }
3368 else
3369 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
1ab139e5 3370
136821d9
SM
3371 displaced_debug_printf ("[Conditional] pc_adjust after: %d",
3372 dsc->pc_adjust);
b6542f81
YQ
3373 }
3374
136821d9
SM
3375 displaced_debug_printf ("%s PC by %d",
3376 dsc->pc_adjust ? "adjusting" : "not adjusting",
3377 dsc->pc_adjust);
1ab139e5 3378
b6542f81
YQ
3379 if (dsc->pc_adjust != 0)
3380 {
0c271889
LM
3381 /* Make sure the previous instruction was executed (that is, the PC
3382 has changed). If the PC didn't change, then discard the adjustment
3383 offset. Otherwise we may skip an instruction before its execution
3384 took place. */
3385 if ((pc - to) == 0)
1ab139e5 3386 {
136821d9 3387 displaced_debug_printf ("PC did not move. Discarding PC adjustment.");
1ab139e5
LM
3388 dsc->pc_adjust = 0;
3389 }
0c271889 3390
136821d9
SM
3391 displaced_debug_printf ("fixup: set PC to %s:%d",
3392 paddress (gdbarch, from), dsc->pc_adjust);
3393
b6542f81
YQ
3394 regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
3395 from + dsc->pc_adjust);
3396 }
3397}
3398
3399/* Implement the "displaced_step_hw_singlestep" gdbarch method. */
3400
07fbbd01 3401bool
40a53766 3402aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch)
b6542f81 3403{
07fbbd01 3404 return true;
b6542f81
YQ
3405}
3406
95228a0d
AH
3407/* Get the correct target description for the given VQ value.
3408 If VQ is zero then it is assumed SVE is not supported.
c1bd443b
LM
3409 (It is not possible to set VQ to zero on an SVE system).
3410
414d5848
JB
3411 MTE_P indicates the presence of the Memory Tagging Extension feature.
3412
3413 TLS_P indicates the presence of the Thread Local Storage feature. */
da434ccb
AH
3414
3415const target_desc *
0ee6b1c5 3416aarch64_read_description (const aarch64_features &features)
da434ccb 3417{
0ee6b1c5
JB
3418 if (features.vq > AARCH64_MAX_SVE_VQ)
3419 error (_("VQ is %" PRIu64 ", maximum supported value is %d"), features.vq,
95228a0d
AH
3420 AARCH64_MAX_SVE_VQ);
3421
0ee6b1c5 3422 struct target_desc *tdesc = tdesc_aarch64_map[features];
da434ccb 3423
95228a0d
AH
3424 if (tdesc == NULL)
3425 {
0ee6b1c5
JB
3426 tdesc = aarch64_create_target_description (features);
3427 tdesc_aarch64_map[features] = tdesc;
95228a0d 3428 }
da434ccb 3429
95228a0d 3430 return tdesc;
da434ccb
AH
3431}
3432
ba2d2bb2
AH
3433/* Return the VQ used when creating the target description TDESC. */
3434
1332a140 3435static uint64_t
ba2d2bb2
AH
3436aarch64_get_tdesc_vq (const struct target_desc *tdesc)
3437{
3438 const struct tdesc_feature *feature_sve;
3439
3440 if (!tdesc_has_registers (tdesc))
3441 return 0;
3442
3443 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
3444
3445 if (feature_sve == nullptr)
3446 return 0;
3447
12863263
AH
3448 uint64_t vl = tdesc_register_bitsize (feature_sve,
3449 aarch64_sve_register_names[0]) / 8;
ba2d2bb2
AH
3450 return sve_vq_from_vl (vl);
3451}
3452
4f3681cc
TJB
3453/* Get the AArch64 features present in the given target description. */
3454
3455aarch64_features
3456aarch64_features_from_target_desc (const struct target_desc *tdesc)
3457{
3458 aarch64_features features;
3459
3460 if (tdesc == nullptr)
3461 return features;
3462
3463 features.vq = aarch64_get_tdesc_vq (tdesc);
3464 features.pauth
3465 = (tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.pauth") != nullptr);
3466 features.mte
3467 = (tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.mte") != nullptr);
3468 features.tls
3469 = (tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.tls") != nullptr);
3470
3471 return features;
3472}
3473
76bed0fd
AH
3474/* Implement the "cannot_store_register" gdbarch method. */
3475
3476static int
3477aarch64_cannot_store_register (struct gdbarch *gdbarch, int regnum)
3478{
08106042 3479 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
76bed0fd
AH
3480
3481 if (!tdep->has_pauth ())
3482 return 0;
3483
3484 /* Pointer authentication registers are read-only. */
3485 return (regnum == AARCH64_PAUTH_DMASK_REGNUM (tdep->pauth_reg_base)
3486 || regnum == AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base));
3487}
3488
da729c5c
TT
3489/* Implement the stack_frame_destroyed_p gdbarch method. */
3490
3491static int
3492aarch64_stack_frame_destroyed_p (struct gdbarch *gdbarch, CORE_ADDR pc)
3493{
3494 CORE_ADDR func_start, func_end;
3495 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
3496 return 0;
3497
3498 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
94355de7
LM
3499
3500 ULONGEST insn_from_memory;
3501 if (!safe_read_memory_unsigned_integer (pc, 4, byte_order_for_code,
3502 &insn_from_memory))
3503 return 0;
3504
3505 uint32_t insn = insn_from_memory;
da729c5c
TT
3506
3507 aarch64_inst inst;
3508 if (aarch64_decode_insn (insn, &inst, 1, nullptr) != 0)
3509 return 0;
3510
3511 return streq (inst.opcode->name, "ret");
3512}
3513
07b287a0
MS
3514/* Initialize the current architecture based on INFO. If possible,
3515 re-use an architecture from ARCHES, which is a list of
3516 architectures already created during this debugging session.
3517
3518 Called e.g. at program startup, when reading a core file, and when
3519 reading a binary file. */
3520
3521static struct gdbarch *
3522aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
3523{
ccb8d7e8 3524 const struct tdesc_feature *feature_core, *feature_fpu, *feature_sve;
76bed0fd 3525 const struct tdesc_feature *feature_pauth;
ccb8d7e8
AH
3526 bool valid_p = true;
3527 int i, num_regs = 0, num_pseudo_regs = 0;
c9cd8ca4 3528 int first_pauth_regnum = -1, ra_sign_state_offset = -1;
414d5848 3529 int first_mte_regnum = -1, tls_regnum = -1;
4f3681cc 3530 uint64_t vq = aarch64_get_tdesc_vq (info.target_desc);
4da037ef
AH
3531
3532 if (vq > AARCH64_MAX_SVE_VQ)
f34652de 3533 internal_error (_("VQ out of bounds: %s (max %d)"),
596179f7 3534 pulongest (vq), AARCH64_MAX_SVE_VQ);
4da037ef 3535
ccb8d7e8
AH
3536 /* If there is already a candidate, use it. */
3537 for (gdbarch_list *best_arch = gdbarch_list_lookup_by_info (arches, &info);
3538 best_arch != nullptr;
3539 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
3540 {
345bd07c 3541 aarch64_gdbarch_tdep *tdep
08106042 3542 = gdbarch_tdep<aarch64_gdbarch_tdep> (best_arch->gdbarch);
4da037ef 3543 if (tdep && tdep->vq == vq)
ccb8d7e8
AH
3544 return best_arch->gdbarch;
3545 }
07b287a0 3546
4da037ef
AH
3547 /* Ensure we always have a target descriptor, and that it is for the given VQ
3548 value. */
ccb8d7e8 3549 const struct target_desc *tdesc = info.target_desc;
4f3681cc
TJB
3550 if (!tdesc_has_registers (tdesc))
3551 tdesc = aarch64_read_description ({});
07b287a0
MS
3552 gdb_assert (tdesc);
3553
ccb8d7e8 3554 feature_core = tdesc_find_feature (tdesc,"org.gnu.gdb.aarch64.core");
ba2d2bb2
AH
3555 feature_fpu = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
3556 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
76bed0fd 3557 feature_pauth = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.pauth");
5e984dbf
LM
3558 const struct tdesc_feature *feature_mte
3559 = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.mte");
414d5848
JB
3560 const struct tdesc_feature *feature_tls
3561 = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.tls");
07b287a0 3562
ccb8d7e8
AH
3563 if (feature_core == nullptr)
3564 return nullptr;
07b287a0 3565
c1e1314d 3566 tdesc_arch_data_up tdesc_data = tdesc_data_alloc ();
07b287a0 3567
ba2d2bb2 3568 /* Validate the description provides the mandatory core R registers
07b287a0
MS
3569 and allocate their numbers. */
3570 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
c1e1314d 3571 valid_p &= tdesc_numbered_register (feature_core, tdesc_data.get (),
ba2d2bb2
AH
3572 AARCH64_X0_REGNUM + i,
3573 aarch64_r_register_names[i]);
07b287a0
MS
3574
3575 num_regs = AARCH64_X0_REGNUM + i;
3576
ba2d2bb2 3577 /* Add the V registers. */
ccb8d7e8 3578 if (feature_fpu != nullptr)
07b287a0 3579 {
ccb8d7e8 3580 if (feature_sve != nullptr)
ba2d2bb2
AH
3581 error (_("Program contains both fpu and SVE features."));
3582
3583 /* Validate the description provides the mandatory V registers
3584 and allocate their numbers. */
07b287a0 3585 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
c1e1314d 3586 valid_p &= tdesc_numbered_register (feature_fpu, tdesc_data.get (),
ba2d2bb2
AH
3587 AARCH64_V0_REGNUM + i,
3588 aarch64_v_register_names[i]);
07b287a0
MS
3589
3590 num_regs = AARCH64_V0_REGNUM + i;
ba2d2bb2 3591 }
07b287a0 3592
ba2d2bb2 3593 /* Add the SVE registers. */
ccb8d7e8 3594 if (feature_sve != nullptr)
ba2d2bb2
AH
3595 {
3596 /* Validate the description provides the mandatory SVE registers
3597 and allocate their numbers. */
3598 for (i = 0; i < ARRAY_SIZE (aarch64_sve_register_names); i++)
c1e1314d 3599 valid_p &= tdesc_numbered_register (feature_sve, tdesc_data.get (),
ba2d2bb2
AH
3600 AARCH64_SVE_Z0_REGNUM + i,
3601 aarch64_sve_register_names[i]);
3602
3603 num_regs = AARCH64_SVE_Z0_REGNUM + i;
3604 num_pseudo_regs += 32; /* add the Vn register pseudos. */
3605 }
3606
ccb8d7e8 3607 if (feature_fpu != nullptr || feature_sve != nullptr)
ba2d2bb2 3608 {
07b287a0
MS
3609 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
3610 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
3611 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
3612 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
3613 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
3614 }
3615
414d5848
JB
3616 /* Add the TLS register. */
3617 if (feature_tls != nullptr)
3618 {
3619 tls_regnum = num_regs;
3620 /* Validate the descriptor provides the mandatory TLS register
3621 and allocate its number. */
3622 valid_p = tdesc_numbered_register (feature_tls, tdesc_data.get (),
3623 tls_regnum, "tpidr");
3624
3625 num_regs++;
3626 }
3627
76bed0fd
AH
3628 /* Add the pauth registers. */
3629 if (feature_pauth != NULL)
3630 {
3631 first_pauth_regnum = num_regs;
c9cd8ca4 3632 ra_sign_state_offset = num_pseudo_regs;
76bed0fd
AH
3633 /* Validate the descriptor provides the mandatory PAUTH registers and
3634 allocate their numbers. */
3635 for (i = 0; i < ARRAY_SIZE (aarch64_pauth_register_names); i++)
c1e1314d 3636 valid_p &= tdesc_numbered_register (feature_pauth, tdesc_data.get (),
76bed0fd
AH
3637 first_pauth_regnum + i,
3638 aarch64_pauth_register_names[i]);
3639
3640 num_regs += i;
34dcc7cf 3641 num_pseudo_regs += 1; /* Count RA_STATE pseudo register. */
76bed0fd
AH
3642 }
3643
5e984dbf
LM
3644 /* Add the MTE registers. */
3645 if (feature_mte != NULL)
3646 {
3647 first_mte_regnum = num_regs;
3648 /* Validate the descriptor provides the mandatory MTE registers and
3649 allocate their numbers. */
3650 for (i = 0; i < ARRAY_SIZE (aarch64_mte_register_names); i++)
3651 valid_p &= tdesc_numbered_register (feature_mte, tdesc_data.get (),
3652 first_mte_regnum + i,
3653 aarch64_mte_register_names[i]);
3654
3655 num_regs += i;
3656 }
e63ae49b
LM
3657 /* W pseudo-registers */
3658 int first_w_regnum = num_pseudo_regs;
3659 num_pseudo_regs += 31;
5e984dbf 3660
07b287a0 3661 if (!valid_p)
c1e1314d 3662 return nullptr;
07b287a0
MS
3663
3664 /* AArch64 code is always little-endian. */
3665 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
3666
345bd07c 3667 aarch64_gdbarch_tdep *tdep = new aarch64_gdbarch_tdep;
ccb8d7e8 3668 struct gdbarch *gdbarch = gdbarch_alloc (&info, tdep);
07b287a0
MS
3669
3670 /* This should be low enough for everything. */
3671 tdep->lowest_pc = 0x20;
3672 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
3673 tdep->jb_elt_size = 8;
4da037ef 3674 tdep->vq = vq;
76bed0fd 3675 tdep->pauth_reg_base = first_pauth_regnum;
1ba3a322 3676 tdep->ra_sign_state_regnum = -1;
5e984dbf 3677 tdep->mte_reg_base = first_mte_regnum;
414d5848 3678 tdep->tls_regnum = tls_regnum;
34dcc7cf 3679
07b287a0
MS
3680 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
3681 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
3682
07b287a0
MS
3683 /* Advance PC across function entry code. */
3684 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
3685
3686 /* The stack grows downward. */
3687 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
3688
3689 /* Breakpoint manipulation. */
04180708
YQ
3690 set_gdbarch_breakpoint_kind_from_pc (gdbarch,
3691 aarch64_breakpoint::kind_from_pc);
3692 set_gdbarch_sw_breakpoint_from_kind (gdbarch,
3693 aarch64_breakpoint::bp_from_kind);
07b287a0 3694 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
9404b58f 3695 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
07b287a0
MS
3696
3697 /* Information about registers, etc. */
3698 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
3699 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
3700 set_gdbarch_num_regs (gdbarch, num_regs);
3701
3702 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
3703 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
3704 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
3705 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
3706 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
3707 set_tdesc_pseudo_register_reggroup_p (gdbarch,
3708 aarch64_pseudo_register_reggroup_p);
76bed0fd 3709 set_gdbarch_cannot_store_register (gdbarch, aarch64_cannot_store_register);
07b287a0
MS
3710
3711 /* ABI */
3712 set_gdbarch_short_bit (gdbarch, 16);
3713 set_gdbarch_int_bit (gdbarch, 32);
3714 set_gdbarch_float_bit (gdbarch, 32);
3715 set_gdbarch_double_bit (gdbarch, 64);
3716 set_gdbarch_long_double_bit (gdbarch, 128);
3717 set_gdbarch_long_bit (gdbarch, 64);
3718 set_gdbarch_long_long_bit (gdbarch, 64);
3719 set_gdbarch_ptr_bit (gdbarch, 64);
3720 set_gdbarch_char_signed (gdbarch, 0);
53375380 3721 set_gdbarch_wchar_signed (gdbarch, 0);
07b287a0
MS
3722 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
3723 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
552f1157 3724 set_gdbarch_long_double_format (gdbarch, floatformats_ieee_quad);
b907456c 3725 set_gdbarch_type_align (gdbarch, aarch64_type_align);
07b287a0 3726
da729c5c
TT
3727 /* Detect whether PC is at a point where the stack has been destroyed. */
3728 set_gdbarch_stack_frame_destroyed_p (gdbarch, aarch64_stack_frame_destroyed_p);
3729
07b287a0
MS
3730 /* Internal <-> external register number maps. */
3731 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
3732
3733 /* Returning results. */
3734 set_gdbarch_return_value (gdbarch, aarch64_return_value);
3735
3736 /* Disassembly. */
3737 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
3738
3739 /* Virtual tables. */
3740 set_gdbarch_vbit_in_delta (gdbarch, 1);
3741
3742 /* Hook in the ABI-specific overrides, if they have been registered. */
3743 info.target_desc = tdesc;
c1e1314d 3744 info.tdesc_data = tdesc_data.get ();
07b287a0
MS
3745 gdbarch_init_osabi (info, gdbarch);
3746
3747 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
11e1b75f
AH
3748 /* Register DWARF CFA vendor handler. */
3749 set_gdbarch_execute_dwarf_cfa_vendor_op (gdbarch,
3750 aarch64_execute_dwarf_cfa_vendor_op);
07b287a0 3751
5133a315
LM
3752 /* Permanent/Program breakpoint handling. */
3753 set_gdbarch_program_breakpoint_here_p (gdbarch,
3754 aarch64_program_breakpoint_here_p);
3755
07b287a0
MS
3756 /* Add some default predicates. */
3757 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
3758 dwarf2_append_unwinders (gdbarch);
3759 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
3760
3761 frame_base_set_default (gdbarch, &aarch64_normal_base);
3762
3763 /* Now we have tuned the configuration, set a few final things,
3764 based on what the OS ABI has told us. */
3765
3766 if (tdep->jb_pc >= 0)
3767 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
3768
ea873d8e
PL
3769 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
3770
aa7ca1bb
AH
3771 set_gdbarch_get_pc_address_flags (gdbarch, aarch64_get_pc_address_flags);
3772
c1e1314d 3773 tdesc_use_registers (gdbarch, tdesc, std::move (tdesc_data));
07b287a0 3774
1ba3a322
LM
3775 /* Fetch the updated number of registers after we're done adding all
3776 entries from features we don't explicitly care about. This is the case
3777 for bare metal debugging stubs that include a lot of system registers. */
3778 num_regs = gdbarch_num_regs (gdbarch);
3779
3780 /* With the number of real registers updated, setup the pseudo-registers and
3781 record their numbers. */
3782
e63ae49b
LM
3783 /* Setup W pseudo-register numbers. */
3784 tdep->w_pseudo_base = first_w_regnum + num_regs;
3785 tdep->w_pseudo_count = 31;
3786
1ba3a322
LM
3787 /* Pointer authentication pseudo-registers. */
3788 if (tdep->has_pauth ())
3789 tdep->ra_sign_state_regnum = ra_sign_state_offset + num_regs;
3790
07b287a0
MS
3791 /* Add standard register aliases. */
3792 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
3793 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
3794 value_of_aarch64_user_reg,
3795 &aarch64_register_aliases[i].regnum);
3796
e8bf1ce4
JB
3797 register_aarch64_ravenscar_ops (gdbarch);
3798
07b287a0
MS
3799 return gdbarch;
3800}
3801
3802static void
3803aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
3804{
08106042 3805 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
07b287a0
MS
3806
3807 if (tdep == NULL)
3808 return;
3809
6cb06a8c
TT
3810 gdb_printf (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
3811 paddress (gdbarch, tdep->lowest_pc));
07b287a0
MS
3812}
3813
0d4c07af 3814#if GDB_SELF_TEST
1e2b521d
YQ
3815namespace selftests
3816{
3817static void aarch64_process_record_test (void);
3818}
0d4c07af 3819#endif
1e2b521d 3820
6c265988 3821void _initialize_aarch64_tdep ();
07b287a0 3822void
6c265988 3823_initialize_aarch64_tdep ()
07b287a0
MS
3824{
3825 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
3826 aarch64_dump_tdep);
3827
07b287a0
MS
3828 /* Debug this file's internals. */
3829 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
3830Set AArch64 debugging."), _("\
3831Show AArch64 debugging."), _("\
3832When on, AArch64 specific debugging is enabled."),
3833 NULL,
3834 show_aarch64_debug,
3835 &setdebuglist, &showdebuglist);
4d9a9006
YQ
3836
3837#if GDB_SELF_TEST
1526853e
SM
3838 selftests::register_test ("aarch64-analyze-prologue",
3839 selftests::aarch64_analyze_prologue_test);
3840 selftests::register_test ("aarch64-process-record",
3841 selftests::aarch64_process_record_test);
4d9a9006 3842#endif
07b287a0 3843}
99afc88b
OJ
3844
3845/* AArch64 process record-replay related structures, defines etc. */
3846
99afc88b 3847#define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
dda83cd7
SM
3848 do \
3849 { \
3850 unsigned int reg_len = LENGTH; \
3851 if (reg_len) \
3852 { \
3853 REGS = XNEWVEC (uint32_t, reg_len); \
3854 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
3855 } \
3856 } \
3857 while (0)
99afc88b
OJ
3858
3859#define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
dda83cd7
SM
3860 do \
3861 { \
3862 unsigned int mem_len = LENGTH; \
3863 if (mem_len) \
01add95b
SM
3864 { \
3865 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
91ddba83 3866 memcpy(MEMS, &RECORD_BUF[0], \
01add95b
SM
3867 sizeof(struct aarch64_mem_r) * LENGTH); \
3868 } \
dda83cd7
SM
3869 } \
3870 while (0)
99afc88b
OJ
3871
3872/* AArch64 record/replay structures and enumerations. */
3873
3874struct aarch64_mem_r
3875{
3876 uint64_t len; /* Record length. */
3877 uint64_t addr; /* Memory address. */
3878};
3879
3880enum aarch64_record_result
3881{
3882 AARCH64_RECORD_SUCCESS,
99afc88b
OJ
3883 AARCH64_RECORD_UNSUPPORTED,
3884 AARCH64_RECORD_UNKNOWN
3885};
3886
4748a9be 3887struct aarch64_insn_decode_record
99afc88b
OJ
3888{
3889 struct gdbarch *gdbarch;
3890 struct regcache *regcache;
3891 CORE_ADDR this_addr; /* Address of insn to be recorded. */
3892 uint32_t aarch64_insn; /* Insn to be recorded. */
3893 uint32_t mem_rec_count; /* Count of memory records. */
3894 uint32_t reg_rec_count; /* Count of register records. */
3895 uint32_t *aarch64_regs; /* Registers to be recorded. */
3896 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
4748a9be 3897};
99afc88b
OJ
3898
3899/* Record handler for data processing - register instructions. */
3900
3901static unsigned int
4748a9be 3902aarch64_record_data_proc_reg (aarch64_insn_decode_record *aarch64_insn_r)
99afc88b
OJ
3903{
3904 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
3905 uint32_t record_buf[4];
3906
3907 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3908 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3909 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
3910
3911 if (!bit (aarch64_insn_r->aarch64_insn, 28))
3912 {
3913 uint8_t setflags;
3914
3915 /* Logical (shifted register). */
3916 if (insn_bits24_27 == 0x0a)
3917 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
3918 /* Add/subtract. */
3919 else if (insn_bits24_27 == 0x0b)
3920 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3921 else
3922 return AARCH64_RECORD_UNKNOWN;
3923
3924 record_buf[0] = reg_rd;
3925 aarch64_insn_r->reg_rec_count = 1;
3926 if (setflags)
3927 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3928 }
3929 else
3930 {
3931 if (insn_bits24_27 == 0x0b)
3932 {
3933 /* Data-processing (3 source). */
3934 record_buf[0] = reg_rd;
3935 aarch64_insn_r->reg_rec_count = 1;
3936 }
3937 else if (insn_bits24_27 == 0x0a)
3938 {
3939 if (insn_bits21_23 == 0x00)
3940 {
3941 /* Add/subtract (with carry). */
3942 record_buf[0] = reg_rd;
3943 aarch64_insn_r->reg_rec_count = 1;
3944 if (bit (aarch64_insn_r->aarch64_insn, 29))
3945 {
3946 record_buf[1] = AARCH64_CPSR_REGNUM;
3947 aarch64_insn_r->reg_rec_count = 2;
3948 }
3949 }
3950 else if (insn_bits21_23 == 0x02)
3951 {
3952 /* Conditional compare (register) and conditional compare
3953 (immediate) instructions. */
3954 record_buf[0] = AARCH64_CPSR_REGNUM;
3955 aarch64_insn_r->reg_rec_count = 1;
3956 }
3957 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
3958 {
85102364 3959 /* Conditional select. */
99afc88b
OJ
3960 /* Data-processing (2 source). */
3961 /* Data-processing (1 source). */
3962 record_buf[0] = reg_rd;
3963 aarch64_insn_r->reg_rec_count = 1;
3964 }
3965 else
3966 return AARCH64_RECORD_UNKNOWN;
3967 }
3968 }
3969
3970 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3971 record_buf);
3972 return AARCH64_RECORD_SUCCESS;
3973}
3974
3975/* Record handler for data processing - immediate instructions. */
3976
3977static unsigned int
4748a9be 3978aarch64_record_data_proc_imm (aarch64_insn_decode_record *aarch64_insn_r)
99afc88b 3979{
78cc6c2d 3980 uint8_t reg_rd, insn_bit23, insn_bits24_27, setflags;
99afc88b
OJ
3981 uint32_t record_buf[4];
3982
3983 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
99afc88b
OJ
3984 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3985 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3986
3987 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
3988 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
3989 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
3990 {
3991 record_buf[0] = reg_rd;
3992 aarch64_insn_r->reg_rec_count = 1;
3993 }
3994 else if (insn_bits24_27 == 0x01)
3995 {
3996 /* Add/Subtract (immediate). */
3997 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3998 record_buf[0] = reg_rd;
3999 aarch64_insn_r->reg_rec_count = 1;
4000 if (setflags)
4001 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
4002 }
4003 else if (insn_bits24_27 == 0x02 && !insn_bit23)
4004 {
4005 /* Logical (immediate). */
4006 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
4007 record_buf[0] = reg_rd;
4008 aarch64_insn_r->reg_rec_count = 1;
4009 if (setflags)
4010 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
4011 }
4012 else
4013 return AARCH64_RECORD_UNKNOWN;
4014
4015 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4016 record_buf);
4017 return AARCH64_RECORD_SUCCESS;
4018}
4019
4020/* Record handler for branch, exception generation and system instructions. */
4021
4022static unsigned int
4748a9be 4023aarch64_record_branch_except_sys (aarch64_insn_decode_record *aarch64_insn_r)
99afc88b 4024{
345bd07c
SM
4025
4026 aarch64_gdbarch_tdep *tdep
08106042 4027 = gdbarch_tdep<aarch64_gdbarch_tdep> (aarch64_insn_r->gdbarch);
99afc88b
OJ
4028 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
4029 uint32_t record_buf[4];
4030
4031 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
4032 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
4033 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4034
4035 if (insn_bits28_31 == 0x0d)
4036 {
4037 /* Exception generation instructions. */
4038 if (insn_bits24_27 == 0x04)
4039 {
5d98d3cd
YQ
4040 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
4041 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
4042 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
99afc88b
OJ
4043 {
4044 ULONGEST svc_number;
4045
4046 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
4047 &svc_number);
4048 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
4049 svc_number);
4050 }
4051 else
4052 return AARCH64_RECORD_UNSUPPORTED;
4053 }
4054 /* System instructions. */
4055 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
4056 {
4057 uint32_t reg_rt, reg_crn;
4058
4059 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
4060 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
4061
4062 /* Record rt in case of sysl and mrs instructions. */
4063 if (bit (aarch64_insn_r->aarch64_insn, 21))
4064 {
4065 record_buf[0] = reg_rt;
4066 aarch64_insn_r->reg_rec_count = 1;
4067 }
4068 /* Record cpsr for hint and msr(immediate) instructions. */
4069 else if (reg_crn == 0x02 || reg_crn == 0x04)
4070 {
4071 record_buf[0] = AARCH64_CPSR_REGNUM;
4072 aarch64_insn_r->reg_rec_count = 1;
4073 }
4074 }
4075 /* Unconditional branch (register). */
4076 else if((insn_bits24_27 & 0x0e) == 0x06)
4077 {
4078 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
4079 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
4080 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
4081 }
4082 else
4083 return AARCH64_RECORD_UNKNOWN;
4084 }
4085 /* Unconditional branch (immediate). */
4086 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
4087 {
4088 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
4089 if (bit (aarch64_insn_r->aarch64_insn, 31))
4090 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
4091 }
4092 else
4093 /* Compare & branch (immediate), Test & branch (immediate) and
4094 Conditional branch (immediate). */
4095 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
4096
4097 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4098 record_buf);
4099 return AARCH64_RECORD_SUCCESS;
4100}
4101
4102/* Record handler for advanced SIMD load and store instructions. */
4103
4104static unsigned int
4748a9be 4105aarch64_record_asimd_load_store (aarch64_insn_decode_record *aarch64_insn_r)
99afc88b
OJ
4106{
4107 CORE_ADDR address;
4108 uint64_t addr_offset = 0;
4109 uint32_t record_buf[24];
4110 uint64_t record_buf_mem[24];
4111 uint32_t reg_rn, reg_rt;
4112 uint32_t reg_index = 0, mem_index = 0;
4113 uint8_t opcode_bits, size_bits;
4114
4115 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
4116 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
4117 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
4118 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
4119 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
4120
4121 if (record_debug)
b277c936 4122 debug_printf ("Process record: Advanced SIMD load/store\n");
99afc88b
OJ
4123
4124 /* Load/store single structure. */
4125 if (bit (aarch64_insn_r->aarch64_insn, 24))
4126 {
4127 uint8_t sindex, scale, selem, esize, replicate = 0;
4128 scale = opcode_bits >> 2;
4129 selem = ((opcode_bits & 0x02) |
dda83cd7 4130 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
99afc88b 4131 switch (scale)
dda83cd7
SM
4132 {
4133 case 1:
4134 if (size_bits & 0x01)
4135 return AARCH64_RECORD_UNKNOWN;
4136 break;
4137 case 2:
4138 if ((size_bits >> 1) & 0x01)
4139 return AARCH64_RECORD_UNKNOWN;
4140 if (size_bits & 0x01)
4141 {
4142 if (!((opcode_bits >> 1) & 0x01))
4143 scale = 3;
4144 else
4145 return AARCH64_RECORD_UNKNOWN;
4146 }
4147 break;
4148 case 3:
4149 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
4150 {
4151 scale = size_bits;
4152 replicate = 1;
4153 break;
4154 }
4155 else
4156 return AARCH64_RECORD_UNKNOWN;
4157 default:
4158 break;
4159 }
99afc88b
OJ
4160 esize = 8 << scale;
4161 if (replicate)
dda83cd7
SM
4162 for (sindex = 0; sindex < selem; sindex++)
4163 {
4164 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
4165 reg_rt = (reg_rt + 1) % 32;
4166 }
99afc88b 4167 else
dda83cd7
SM
4168 {
4169 for (sindex = 0; sindex < selem; sindex++)
a2e3e93f
SM
4170 {
4171 if (bit (aarch64_insn_r->aarch64_insn, 22))
4172 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
4173 else
4174 {
4175 record_buf_mem[mem_index++] = esize / 8;
4176 record_buf_mem[mem_index++] = address + addr_offset;
4177 }
4178 addr_offset = addr_offset + (esize / 8);
4179 reg_rt = (reg_rt + 1) % 32;
4180 }
dda83cd7 4181 }
99afc88b
OJ
4182 }
4183 /* Load/store multiple structure. */
4184 else
4185 {
4186 uint8_t selem, esize, rpt, elements;
4187 uint8_t eindex, rindex;
4188
4189 esize = 8 << size_bits;
4190 if (bit (aarch64_insn_r->aarch64_insn, 30))
dda83cd7 4191 elements = 128 / esize;
99afc88b 4192 else
dda83cd7 4193 elements = 64 / esize;
99afc88b
OJ
4194
4195 switch (opcode_bits)
dda83cd7
SM
4196 {
4197 /*LD/ST4 (4 Registers). */
4198 case 0:
4199 rpt = 1;
4200 selem = 4;
4201 break;
4202 /*LD/ST1 (4 Registers). */
4203 case 2:
4204 rpt = 4;
4205 selem = 1;
4206 break;
4207 /*LD/ST3 (3 Registers). */
4208 case 4:
4209 rpt = 1;
4210 selem = 3;
4211 break;
4212 /*LD/ST1 (3 Registers). */
4213 case 6:
4214 rpt = 3;
4215 selem = 1;
4216 break;
4217 /*LD/ST1 (1 Register). */
4218 case 7:
4219 rpt = 1;
4220 selem = 1;
4221 break;
4222 /*LD/ST2 (2 Registers). */
4223 case 8:
4224 rpt = 1;
4225 selem = 2;
4226 break;
4227 /*LD/ST1 (2 Registers). */
4228 case 10:
4229 rpt = 2;
4230 selem = 1;
4231 break;
4232 default:
4233 return AARCH64_RECORD_UNSUPPORTED;
4234 break;
4235 }
99afc88b 4236 for (rindex = 0; rindex < rpt; rindex++)
dda83cd7
SM
4237 for (eindex = 0; eindex < elements; eindex++)
4238 {
4239 uint8_t reg_tt, sindex;
4240 reg_tt = (reg_rt + rindex) % 32;
4241 for (sindex = 0; sindex < selem; sindex++)
4242 {
4243 if (bit (aarch64_insn_r->aarch64_insn, 22))
4244 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
4245 else
4246 {
4247 record_buf_mem[mem_index++] = esize / 8;
4248 record_buf_mem[mem_index++] = address + addr_offset;
4249 }
4250 addr_offset = addr_offset + (esize / 8);
4251 reg_tt = (reg_tt + 1) % 32;
4252 }
4253 }
99afc88b
OJ
4254 }
4255
4256 if (bit (aarch64_insn_r->aarch64_insn, 23))
4257 record_buf[reg_index++] = reg_rn;
4258
4259 aarch64_insn_r->reg_rec_count = reg_index;
4260 aarch64_insn_r->mem_rec_count = mem_index / 2;
4261 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
dda83cd7 4262 record_buf_mem);
99afc88b 4263 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
dda83cd7 4264 record_buf);
99afc88b
OJ
4265 return AARCH64_RECORD_SUCCESS;
4266}
4267
4268/* Record handler for load and store instructions. */
4269
4270static unsigned int
4748a9be 4271aarch64_record_load_store (aarch64_insn_decode_record *aarch64_insn_r)
99afc88b
OJ
4272{
4273 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
4274 uint8_t insn_bit23, insn_bit21;
4275 uint8_t opc, size_bits, ld_flag, vector_flag;
4276 uint32_t reg_rn, reg_rt, reg_rt2;
4277 uint64_t datasize, offset;
4278 uint32_t record_buf[8];
4279 uint64_t record_buf_mem[8];
4280 CORE_ADDR address;
4281
4282 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
4283 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
4284 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
4285 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
4286 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
4287 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
4288 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
4289 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
4290 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
4291 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
4292 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
4293
4294 /* Load/store exclusive. */
4295 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
4296 {
4297 if (record_debug)
b277c936 4298 debug_printf ("Process record: load/store exclusive\n");
99afc88b
OJ
4299
4300 if (ld_flag)
4301 {
4302 record_buf[0] = reg_rt;
4303 aarch64_insn_r->reg_rec_count = 1;
4304 if (insn_bit21)
4305 {
4306 record_buf[1] = reg_rt2;
4307 aarch64_insn_r->reg_rec_count = 2;
4308 }
4309 }
4310 else
4311 {
4312 if (insn_bit21)
4313 datasize = (8 << size_bits) * 2;
4314 else
4315 datasize = (8 << size_bits);
4316 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4317 &address);
4318 record_buf_mem[0] = datasize / 8;
4319 record_buf_mem[1] = address;
4320 aarch64_insn_r->mem_rec_count = 1;
4321 if (!insn_bit23)
4322 {
4323 /* Save register rs. */
4324 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
4325 aarch64_insn_r->reg_rec_count = 1;
4326 }
4327 }
4328 }
4329 /* Load register (literal) instructions decoding. */
4330 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
4331 {
4332 if (record_debug)
b277c936 4333 debug_printf ("Process record: load register (literal)\n");
99afc88b 4334 if (vector_flag)
dda83cd7 4335 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
99afc88b 4336 else
dda83cd7 4337 record_buf[0] = reg_rt;
99afc88b
OJ
4338 aarch64_insn_r->reg_rec_count = 1;
4339 }
4340 /* All types of load/store pair instructions decoding. */
4341 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
4342 {
4343 if (record_debug)
b277c936 4344 debug_printf ("Process record: load/store pair\n");
99afc88b
OJ
4345
4346 if (ld_flag)
dda83cd7
SM
4347 {
4348 if (vector_flag)
4349 {
4350 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4351 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
4352 }
4353 else
4354 {
4355 record_buf[0] = reg_rt;
4356 record_buf[1] = reg_rt2;
4357 }
4358 aarch64_insn_r->reg_rec_count = 2;
4359 }
99afc88b 4360 else
dda83cd7
SM
4361 {
4362 uint16_t imm7_off;
4363 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
4364 if (!vector_flag)
4365 size_bits = size_bits >> 1;
4366 datasize = 8 << (2 + size_bits);
4367 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
4368 offset = offset << (2 + size_bits);
4369 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4370 &address);
4371 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
4372 {
4373 if (imm7_off & 0x40)
4374 address = address - offset;
4375 else
4376 address = address + offset;
4377 }
4378
4379 record_buf_mem[0] = datasize / 8;
4380 record_buf_mem[1] = address;
4381 record_buf_mem[2] = datasize / 8;
4382 record_buf_mem[3] = address + (datasize / 8);
4383 aarch64_insn_r->mem_rec_count = 2;
4384 }
99afc88b 4385 if (bit (aarch64_insn_r->aarch64_insn, 23))
dda83cd7 4386 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
99afc88b
OJ
4387 }
4388 /* Load/store register (unsigned immediate) instructions. */
4389 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
4390 {
4391 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4392 if (!(opc >> 1))
33877125
YQ
4393 {
4394 if (opc & 0x01)
4395 ld_flag = 0x01;
4396 else
4397 ld_flag = 0x0;
4398 }
99afc88b 4399 else
33877125 4400 {
1e2b521d
YQ
4401 if (size_bits == 0x3 && vector_flag == 0x0 && opc == 0x2)
4402 {
4403 /* PRFM (immediate) */
4404 return AARCH64_RECORD_SUCCESS;
4405 }
4406 else if (size_bits == 0x2 && vector_flag == 0x0 && opc == 0x2)
4407 {
4408 /* LDRSW (immediate) */
4409 ld_flag = 0x1;
4410 }
33877125 4411 else
1e2b521d
YQ
4412 {
4413 if (opc & 0x01)
4414 ld_flag = 0x01;
4415 else
4416 ld_flag = 0x0;
4417 }
33877125 4418 }
99afc88b
OJ
4419
4420 if (record_debug)
4421 {
b277c936
PL
4422 debug_printf ("Process record: load/store (unsigned immediate):"
4423 " size %x V %d opc %x\n", size_bits, vector_flag,
4424 opc);
99afc88b
OJ
4425 }
4426
4427 if (!ld_flag)
dda83cd7
SM
4428 {
4429 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
4430 datasize = 8 << size_bits;
4431 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4432 &address);
4433 offset = offset << size_bits;
4434 address = address + offset;
4435
4436 record_buf_mem[0] = datasize >> 3;
4437 record_buf_mem[1] = address;
4438 aarch64_insn_r->mem_rec_count = 1;
4439 }
99afc88b 4440 else
dda83cd7
SM
4441 {
4442 if (vector_flag)
4443 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4444 else
4445 record_buf[0] = reg_rt;
4446 aarch64_insn_r->reg_rec_count = 1;
4447 }
99afc88b
OJ
4448 }
4449 /* Load/store register (register offset) instructions. */
5d98d3cd
YQ
4450 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
4451 && insn_bits10_11 == 0x02 && insn_bit21)
99afc88b
OJ
4452 {
4453 if (record_debug)
b277c936 4454 debug_printf ("Process record: load/store (register offset)\n");
99afc88b
OJ
4455 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4456 if (!(opc >> 1))
dda83cd7
SM
4457 if (opc & 0x01)
4458 ld_flag = 0x01;
4459 else
4460 ld_flag = 0x0;
99afc88b 4461 else
dda83cd7
SM
4462 if (size_bits != 0x03)
4463 ld_flag = 0x01;
4464 else
4465 return AARCH64_RECORD_UNKNOWN;
99afc88b
OJ
4466
4467 if (!ld_flag)
dda83cd7
SM
4468 {
4469 ULONGEST reg_rm_val;
4470
4471 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
4472 bits (aarch64_insn_r->aarch64_insn, 16, 20), &reg_rm_val);
4473 if (bit (aarch64_insn_r->aarch64_insn, 12))
4474 offset = reg_rm_val << size_bits;
4475 else
4476 offset = reg_rm_val;
4477 datasize = 8 << size_bits;
4478 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4479 &address);
4480 address = address + offset;
4481 record_buf_mem[0] = datasize >> 3;
4482 record_buf_mem[1] = address;
4483 aarch64_insn_r->mem_rec_count = 1;
4484 }
99afc88b 4485 else
dda83cd7
SM
4486 {
4487 if (vector_flag)
4488 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4489 else
4490 record_buf[0] = reg_rt;
4491 aarch64_insn_r->reg_rec_count = 1;
4492 }
99afc88b
OJ
4493 }
4494 /* Load/store register (immediate and unprivileged) instructions. */
5d98d3cd
YQ
4495 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
4496 && !insn_bit21)
99afc88b
OJ
4497 {
4498 if (record_debug)
4499 {
b277c936
PL
4500 debug_printf ("Process record: load/store "
4501 "(immediate and unprivileged)\n");
99afc88b
OJ
4502 }
4503 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4504 if (!(opc >> 1))
dda83cd7
SM
4505 if (opc & 0x01)
4506 ld_flag = 0x01;
4507 else
4508 ld_flag = 0x0;
99afc88b 4509 else
dda83cd7
SM
4510 if (size_bits != 0x03)
4511 ld_flag = 0x01;
4512 else
4513 return AARCH64_RECORD_UNKNOWN;
99afc88b
OJ
4514
4515 if (!ld_flag)
dda83cd7
SM
4516 {
4517 uint16_t imm9_off;
4518 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
4519 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
4520 datasize = 8 << size_bits;
4521 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4522 &address);
4523 if (insn_bits10_11 != 0x01)
4524 {
4525 if (imm9_off & 0x0100)
4526 address = address - offset;
4527 else
4528 address = address + offset;
4529 }
4530 record_buf_mem[0] = datasize >> 3;
4531 record_buf_mem[1] = address;
4532 aarch64_insn_r->mem_rec_count = 1;
4533 }
99afc88b 4534 else
dda83cd7
SM
4535 {
4536 if (vector_flag)
4537 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4538 else
4539 record_buf[0] = reg_rt;
4540 aarch64_insn_r->reg_rec_count = 1;
4541 }
99afc88b 4542 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
dda83cd7 4543 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
99afc88b
OJ
4544 }
4545 /* Advanced SIMD load/store instructions. */
4546 else
4547 return aarch64_record_asimd_load_store (aarch64_insn_r);
4548
4549 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
dda83cd7 4550 record_buf_mem);
99afc88b 4551 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
dda83cd7 4552 record_buf);
99afc88b
OJ
4553 return AARCH64_RECORD_SUCCESS;
4554}
4555
4556/* Record handler for data processing SIMD and floating point instructions. */
4557
4558static unsigned int
4748a9be 4559aarch64_record_data_proc_simd_fp (aarch64_insn_decode_record *aarch64_insn_r)
99afc88b
OJ
4560{
4561 uint8_t insn_bit21, opcode, rmode, reg_rd;
4562 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
4563 uint8_t insn_bits11_14;
4564 uint32_t record_buf[2];
4565
4566 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
4567 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
4568 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
4569 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
4570 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
4571 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
4572 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
4573 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
4574 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
4575
4576 if (record_debug)
b277c936 4577 debug_printf ("Process record: data processing SIMD/FP: ");
99afc88b
OJ
4578
4579 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
4580 {
4581 /* Floating point - fixed point conversion instructions. */
4582 if (!insn_bit21)
4583 {
4584 if (record_debug)
b277c936 4585 debug_printf ("FP - fixed point conversion");
99afc88b
OJ
4586
4587 if ((opcode >> 1) == 0x0 && rmode == 0x03)
4588 record_buf[0] = reg_rd;
4589 else
4590 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4591 }
4592 /* Floating point - conditional compare instructions. */
4593 else if (insn_bits10_11 == 0x01)
4594 {
4595 if (record_debug)
b277c936 4596 debug_printf ("FP - conditional compare");
99afc88b
OJ
4597
4598 record_buf[0] = AARCH64_CPSR_REGNUM;
4599 }
4600 /* Floating point - data processing (2-source) and
dda83cd7 4601 conditional select instructions. */
99afc88b
OJ
4602 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
4603 {
4604 if (record_debug)
b277c936 4605 debug_printf ("FP - DP (2-source)");
99afc88b
OJ
4606
4607 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4608 }
4609 else if (insn_bits10_11 == 0x00)
4610 {
4611 /* Floating point - immediate instructions. */
4612 if ((insn_bits12_15 & 0x01) == 0x01
4613 || (insn_bits12_15 & 0x07) == 0x04)
4614 {
4615 if (record_debug)
b277c936 4616 debug_printf ("FP - immediate");
99afc88b
OJ
4617 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4618 }
4619 /* Floating point - compare instructions. */
4620 else if ((insn_bits12_15 & 0x03) == 0x02)
4621 {
4622 if (record_debug)
b277c936 4623 debug_printf ("FP - immediate");
99afc88b
OJ
4624 record_buf[0] = AARCH64_CPSR_REGNUM;
4625 }
4626 /* Floating point - integer conversions instructions. */
f62fce35 4627 else if (insn_bits12_15 == 0x00)
99afc88b
OJ
4628 {
4629 /* Convert float to integer instruction. */
4630 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
4631 {
4632 if (record_debug)
b277c936 4633 debug_printf ("float to int conversion");
99afc88b
OJ
4634
4635 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4636 }
4637 /* Convert integer to float instruction. */
4638 else if ((opcode >> 1) == 0x01 && !rmode)
4639 {
4640 if (record_debug)
b277c936 4641 debug_printf ("int to float conversion");
99afc88b
OJ
4642
4643 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4644 }
4645 /* Move float to integer instruction. */
4646 else if ((opcode >> 1) == 0x03)
4647 {
4648 if (record_debug)
b277c936 4649 debug_printf ("move float to int");
99afc88b
OJ
4650
4651 if (!(opcode & 0x01))
4652 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4653 else
4654 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4655 }
f62fce35
YQ
4656 else
4657 return AARCH64_RECORD_UNKNOWN;
dda83cd7 4658 }
f62fce35
YQ
4659 else
4660 return AARCH64_RECORD_UNKNOWN;
dda83cd7 4661 }
f62fce35
YQ
4662 else
4663 return AARCH64_RECORD_UNKNOWN;
99afc88b
OJ
4664 }
4665 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
4666 {
4667 if (record_debug)
b277c936 4668 debug_printf ("SIMD copy");
99afc88b
OJ
4669
4670 /* Advanced SIMD copy instructions. */
4671 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
4672 && !bit (aarch64_insn_r->aarch64_insn, 15)
4673 && bit (aarch64_insn_r->aarch64_insn, 10))
4674 {
4675 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
4676 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4677 else
4678 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4679 }
4680 else
4681 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4682 }
4683 /* All remaining floating point or advanced SIMD instructions. */
4684 else
4685 {
4686 if (record_debug)
b277c936 4687 debug_printf ("all remain");
99afc88b
OJ
4688
4689 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4690 }
4691
4692 if (record_debug)
b277c936 4693 debug_printf ("\n");
99afc88b 4694
bfbe4b84 4695 /* Record the V/X register. */
99afc88b 4696 aarch64_insn_r->reg_rec_count++;
bfbe4b84
LM
4697
4698 /* Some of these instructions may set bits in the FPSR, so record it
4699 too. */
4700 record_buf[1] = AARCH64_FPSR_REGNUM;
4701 aarch64_insn_r->reg_rec_count++;
4702
4703 gdb_assert (aarch64_insn_r->reg_rec_count == 2);
99afc88b
OJ
4704 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4705 record_buf);
4706 return AARCH64_RECORD_SUCCESS;
4707}
4708
4709/* Decodes insns type and invokes its record handler. */
4710
4711static unsigned int
4748a9be 4712aarch64_record_decode_insn_handler (aarch64_insn_decode_record *aarch64_insn_r)
99afc88b
OJ
4713{
4714 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
4715
4716 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
4717 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
4718 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
4719 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
4720
4721 /* Data processing - immediate instructions. */
4722 if (!ins_bit26 && !ins_bit27 && ins_bit28)
4723 return aarch64_record_data_proc_imm (aarch64_insn_r);
4724
4725 /* Branch, exception generation and system instructions. */
4726 if (ins_bit26 && !ins_bit27 && ins_bit28)
4727 return aarch64_record_branch_except_sys (aarch64_insn_r);
4728
4729 /* Load and store instructions. */
4730 if (!ins_bit25 && ins_bit27)
4731 return aarch64_record_load_store (aarch64_insn_r);
4732
4733 /* Data processing - register instructions. */
4734 if (ins_bit25 && !ins_bit26 && ins_bit27)
4735 return aarch64_record_data_proc_reg (aarch64_insn_r);
4736
4737 /* Data processing - SIMD and floating point instructions. */
4738 if (ins_bit25 && ins_bit26 && ins_bit27)
4739 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
4740
4741 return AARCH64_RECORD_UNSUPPORTED;
4742}
4743
4744/* Cleans up local record registers and memory allocations. */
4745
4746static void
4748a9be 4747deallocate_reg_mem (aarch64_insn_decode_record *record)
99afc88b
OJ
4748{
4749 xfree (record->aarch64_regs);
4750 xfree (record->aarch64_mems);
4751}
4752
1e2b521d
YQ
4753#if GDB_SELF_TEST
4754namespace selftests {
4755
4756static void
4757aarch64_process_record_test (void)
4758{
4759 struct gdbarch_info info;
4760 uint32_t ret;
4761
1e2b521d
YQ
4762 info.bfd_arch_info = bfd_scan_arch ("aarch64");
4763
4764 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
4765 SELF_CHECK (gdbarch != NULL);
4766
4748a9be 4767 aarch64_insn_decode_record aarch64_record;
1e2b521d 4768
4748a9be 4769 memset (&aarch64_record, 0, sizeof (aarch64_insn_decode_record));
1e2b521d
YQ
4770 aarch64_record.regcache = NULL;
4771 aarch64_record.this_addr = 0;
4772 aarch64_record.gdbarch = gdbarch;
4773
4774 /* 20 00 80 f9 prfm pldl1keep, [x1] */
4775 aarch64_record.aarch64_insn = 0xf9800020;
4776 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4777 SELF_CHECK (ret == AARCH64_RECORD_SUCCESS);
4778 SELF_CHECK (aarch64_record.reg_rec_count == 0);
4779 SELF_CHECK (aarch64_record.mem_rec_count == 0);
4780
4781 deallocate_reg_mem (&aarch64_record);
4782}
4783
4784} // namespace selftests
4785#endif /* GDB_SELF_TEST */
4786
99afc88b
OJ
4787/* Parse the current instruction and record the values of the registers and
4788 memory that will be changed in current instruction to record_arch_list
4789 return -1 if something is wrong. */
4790
4791int
4792aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
4793 CORE_ADDR insn_addr)
4794{
4795 uint32_t rec_no = 0;
4796 uint8_t insn_size = 4;
4797 uint32_t ret = 0;
99afc88b 4798 gdb_byte buf[insn_size];
4748a9be 4799 aarch64_insn_decode_record aarch64_record;
99afc88b
OJ
4800
4801 memset (&buf[0], 0, insn_size);
4748a9be 4802 memset (&aarch64_record, 0, sizeof (aarch64_insn_decode_record));
99afc88b
OJ
4803 target_read_memory (insn_addr, &buf[0], insn_size);
4804 aarch64_record.aarch64_insn
4805 = (uint32_t) extract_unsigned_integer (&buf[0],
4806 insn_size,
4807 gdbarch_byte_order (gdbarch));
4808 aarch64_record.regcache = regcache;
4809 aarch64_record.this_addr = insn_addr;
4810 aarch64_record.gdbarch = gdbarch;
4811
4812 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4813 if (ret == AARCH64_RECORD_UNSUPPORTED)
4814 {
6cb06a8c
TT
4815 gdb_printf (gdb_stderr,
4816 _("Process record does not support instruction "
4817 "0x%0x at address %s.\n"),
4818 aarch64_record.aarch64_insn,
4819 paddress (gdbarch, insn_addr));
99afc88b
OJ
4820 ret = -1;
4821 }
4822
4823 if (0 == ret)
4824 {
4825 /* Record registers. */
4826 record_full_arch_list_add_reg (aarch64_record.regcache,
4827 AARCH64_PC_REGNUM);
4828 /* Always record register CPSR. */
4829 record_full_arch_list_add_reg (aarch64_record.regcache,
4830 AARCH64_CPSR_REGNUM);
4831 if (aarch64_record.aarch64_regs)
4832 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
4833 if (record_full_arch_list_add_reg (aarch64_record.regcache,
4834 aarch64_record.aarch64_regs[rec_no]))
4835 ret = -1;
4836
4837 /* Record memories. */
4838 if (aarch64_record.aarch64_mems)
4839 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
4840 if (record_full_arch_list_add_mem
4841 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
4842 aarch64_record.aarch64_mems[rec_no].len))
4843 ret = -1;
4844
4845 if (record_full_arch_list_add_end ())
4846 ret = -1;
4847 }
4848
4849 deallocate_reg_mem (&aarch64_record);
4850 return ret;
4851}