]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/aarch64-tdep.c
[gdb/testsuite] Fix gdb.python/py-value-cc.exp for big endian
[thirdparty/binutils-gdb.git] / gdb / aarch64-tdep.c
CommitLineData
07b287a0
MS
1/* Common target dependent code for GDB on AArch64 systems.
2
213516ef 3 Copyright (C) 2009-2023 Free Software Foundation, Inc.
07b287a0
MS
4 Contributed by ARM Ltd.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21#include "defs.h"
22
23#include "frame.h"
07b287a0
MS
24#include "gdbcmd.h"
25#include "gdbcore.h"
4de283e4 26#include "dis-asm.h"
d55e5aa6
TT
27#include "regcache.h"
28#include "reggroups.h"
4de283e4
TT
29#include "value.h"
30#include "arch-utils.h"
31#include "osabi.h"
32#include "frame-unwind.h"
33#include "frame-base.h"
d55e5aa6 34#include "trad-frame.h"
4de283e4
TT
35#include "objfiles.h"
36#include "dwarf2.h"
82ca8957 37#include "dwarf2/frame.h"
4de283e4
TT
38#include "gdbtypes.h"
39#include "prologue-value.h"
40#include "target-descriptions.h"
07b287a0 41#include "user-regs.h"
4de283e4 42#include "ax-gdb.h"
268a13a5 43#include "gdbsupport/selftest.h"
4de283e4
TT
44
45#include "aarch64-tdep.h"
46#include "aarch64-ravenscar-thread.h"
47
4de283e4
TT
48#include "record.h"
49#include "record-full.h"
50#include "arch/aarch64-insn.h"
0d12e84c 51#include "gdbarch.h"
4de283e4
TT
52
53#include "opcode/aarch64.h"
54#include <algorithm>
0ee6b1c5 55#include <unordered_map>
f77ee802 56
ea92689a
AH
57/* A Homogeneous Floating-Point or Short-Vector Aggregate may have at most
58 four members. */
59#define HA_MAX_NUM_FLDS 4
60
95228a0d 61/* All possible aarch64 target descriptors. */
0ee6b1c5 62static std::unordered_map <aarch64_features, target_desc *> tdesc_aarch64_map;
95228a0d 63
07b287a0
MS
64/* The standard register names, and all the valid aliases for them. */
65static const struct
66{
67 const char *const name;
68 int regnum;
69} aarch64_register_aliases[] =
70{
71 /* 64-bit register names. */
72 {"fp", AARCH64_FP_REGNUM},
73 {"lr", AARCH64_LR_REGNUM},
74 {"sp", AARCH64_SP_REGNUM},
07b287a0
MS
75 /* specials */
76 {"ip0", AARCH64_X0_REGNUM + 16},
77 {"ip1", AARCH64_X0_REGNUM + 17}
78};
79
80/* The required core 'R' registers. */
81static const char *const aarch64_r_register_names[] =
82{
83 /* These registers must appear in consecutive RAW register number
84 order and they must begin with AARCH64_X0_REGNUM! */
85 "x0", "x1", "x2", "x3",
86 "x4", "x5", "x6", "x7",
87 "x8", "x9", "x10", "x11",
88 "x12", "x13", "x14", "x15",
89 "x16", "x17", "x18", "x19",
90 "x20", "x21", "x22", "x23",
91 "x24", "x25", "x26", "x27",
92 "x28", "x29", "x30", "sp",
93 "pc", "cpsr"
94};
95
96/* The FP/SIMD 'V' registers. */
97static const char *const aarch64_v_register_names[] =
98{
99 /* These registers must appear in consecutive RAW register number
100 order and they must begin with AARCH64_V0_REGNUM! */
101 "v0", "v1", "v2", "v3",
102 "v4", "v5", "v6", "v7",
103 "v8", "v9", "v10", "v11",
104 "v12", "v13", "v14", "v15",
105 "v16", "v17", "v18", "v19",
106 "v20", "v21", "v22", "v23",
107 "v24", "v25", "v26", "v27",
108 "v28", "v29", "v30", "v31",
109 "fpsr",
110 "fpcr"
111};
112
739e8682
AH
113/* The SVE 'Z' and 'P' registers. */
114static const char *const aarch64_sve_register_names[] =
115{
116 /* These registers must appear in consecutive RAW register number
117 order and they must begin with AARCH64_SVE_Z0_REGNUM! */
118 "z0", "z1", "z2", "z3",
119 "z4", "z5", "z6", "z7",
120 "z8", "z9", "z10", "z11",
121 "z12", "z13", "z14", "z15",
122 "z16", "z17", "z18", "z19",
123 "z20", "z21", "z22", "z23",
124 "z24", "z25", "z26", "z27",
125 "z28", "z29", "z30", "z31",
126 "fpsr", "fpcr",
127 "p0", "p1", "p2", "p3",
128 "p4", "p5", "p6", "p7",
129 "p8", "p9", "p10", "p11",
130 "p12", "p13", "p14", "p15",
131 "ffr", "vg"
132};
133
76bed0fd
AH
134static const char *const aarch64_pauth_register_names[] =
135{
136 /* Authentication mask for data pointer. */
137 "pauth_dmask",
138 /* Authentication mask for code pointer. */
139 "pauth_cmask"
140};
141
5e984dbf
LM
142static const char *const aarch64_mte_register_names[] =
143{
144 /* Tag Control Register. */
145 "tag_ctl"
146};
147
07b287a0
MS
148/* AArch64 prologue cache structure. */
149struct aarch64_prologue_cache
150{
db634143
PL
151 /* The program counter at the start of the function. It is used to
152 identify this frame as a prologue frame. */
153 CORE_ADDR func;
154
155 /* The program counter at the time this frame was created; i.e. where
156 this function was called from. It is used to identify this frame as a
157 stub frame. */
158 CORE_ADDR prev_pc;
159
07b287a0
MS
160 /* The stack pointer at the time this frame was created; i.e. the
161 caller's stack pointer when this function was called. It is used
162 to identify this frame. */
163 CORE_ADDR prev_sp;
164
7dfa3edc
PL
165 /* Is the target available to read from? */
166 int available_p;
167
07b287a0
MS
168 /* The frame base for this frame is just prev_sp - frame size.
169 FRAMESIZE is the distance from the frame pointer to the
170 initial stack pointer. */
171 int framesize;
172
173 /* The register used to hold the frame pointer for this frame. */
174 int framereg;
175
176 /* Saved register offsets. */
098caef4 177 trad_frame_saved_reg *saved_regs;
07b287a0
MS
178};
179
07b287a0
MS
180static void
181show_aarch64_debug (struct ui_file *file, int from_tty,
dda83cd7 182 struct cmd_list_element *c, const char *value)
07b287a0 183{
6cb06a8c 184 gdb_printf (file, _("AArch64 debugging is %s.\n"), value);
07b287a0
MS
185}
186
ffdbe864
YQ
187namespace {
188
4d9a9006
YQ
189/* Abstract instruction reader. */
190
191class abstract_instruction_reader
192{
193public:
194 /* Read in one instruction. */
195 virtual ULONGEST read (CORE_ADDR memaddr, int len,
196 enum bfd_endian byte_order) = 0;
197};
198
199/* Instruction reader from real target. */
200
201class instruction_reader : public abstract_instruction_reader
202{
203 public:
204 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
632e107b 205 override
4d9a9006 206 {
fc2f703e 207 return read_code_unsigned_integer (memaddr, len, byte_order);
4d9a9006
YQ
208 }
209};
210
ffdbe864
YQ
211} // namespace
212
3d31bc39
AH
213/* If address signing is enabled, mask off the signature bits from the link
214 register, which is passed by value in ADDR, using the register values in
215 THIS_FRAME. */
11e1b75f
AH
216
217static CORE_ADDR
345bd07c 218aarch64_frame_unmask_lr (aarch64_gdbarch_tdep *tdep,
bd2b40ac 219 frame_info_ptr this_frame, CORE_ADDR addr)
11e1b75f
AH
220{
221 if (tdep->has_pauth ()
222 && frame_unwind_register_unsigned (this_frame,
c9cd8ca4 223 tdep->ra_sign_state_regnum))
11e1b75f
AH
224 {
225 int cmask_num = AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base);
226 CORE_ADDR cmask = frame_unwind_register_unsigned (this_frame, cmask_num);
227 addr = addr & ~cmask;
3d31bc39
AH
228
229 /* Record in the frame that the link register required unmasking. */
230 set_frame_previous_pc_masked (this_frame);
11e1b75f
AH
231 }
232
233 return addr;
234}
235
aa7ca1bb
AH
236/* Implement the "get_pc_address_flags" gdbarch method. */
237
238static std::string
bd2b40ac 239aarch64_get_pc_address_flags (frame_info_ptr frame, CORE_ADDR pc)
aa7ca1bb
AH
240{
241 if (pc != 0 && get_frame_pc_masked (frame))
242 return "PAC";
243
244 return "";
245}
246
07b287a0
MS
247/* Analyze a prologue, looking for a recognizable stack frame
248 and frame pointer. Scan until we encounter a store that could
249 clobber the stack frame unexpectedly, or an unknown instruction. */
250
251static CORE_ADDR
252aarch64_analyze_prologue (struct gdbarch *gdbarch,
253 CORE_ADDR start, CORE_ADDR limit,
4d9a9006
YQ
254 struct aarch64_prologue_cache *cache,
255 abstract_instruction_reader& reader)
07b287a0
MS
256{
257 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
258 int i;
f8e3fe0d
LM
259
260 /* Whether the stack has been set. This should be true when we notice a SP
261 to FP move or if we are using the SP as the base register for storing
262 data, in case the FP is ommitted. */
263 bool seen_stack_set = false;
264
187f5d00
YQ
265 /* Track X registers and D registers in prologue. */
266 pv_t regs[AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT];
07b287a0 267
187f5d00 268 for (i = 0; i < AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT; i++)
07b287a0 269 regs[i] = pv_register (i, 0);
f7b7ed97 270 pv_area stack (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
07b287a0
MS
271
272 for (; start < limit; start += 4)
273 {
274 uint32_t insn;
d9ebcbce 275 aarch64_inst inst;
07b287a0 276
4d9a9006 277 insn = reader.read (start, 4, byte_order_for_code);
07b287a0 278
561a72d4 279 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
d9ebcbce
YQ
280 break;
281
282 if (inst.opcode->iclass == addsub_imm
283 && (inst.opcode->op == OP_ADD
284 || strcmp ("sub", inst.opcode->name) == 0))
07b287a0 285 {
d9ebcbce
YQ
286 unsigned rd = inst.operands[0].reg.regno;
287 unsigned rn = inst.operands[1].reg.regno;
288
289 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
290 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
291 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
292 gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM);
293
294 if (inst.opcode->op == OP_ADD)
295 {
296 regs[rd] = pv_add_constant (regs[rn],
297 inst.operands[2].imm.value);
298 }
299 else
300 {
301 regs[rd] = pv_add_constant (regs[rn],
302 -inst.operands[2].imm.value);
303 }
f8e3fe0d
LM
304
305 /* Did we move SP to FP? */
306 if (rn == AARCH64_SP_REGNUM && rd == AARCH64_FP_REGNUM)
307 seen_stack_set = true;
d9ebcbce 308 }
60adf22c
TV
309 else if (inst.opcode->iclass == addsub_ext
310 && strcmp ("sub", inst.opcode->name) == 0)
311 {
312 unsigned rd = inst.operands[0].reg.regno;
313 unsigned rn = inst.operands[1].reg.regno;
314 unsigned rm = inst.operands[2].reg.regno;
315
316 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
317 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
318 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
319 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_EXT);
320
321 regs[rd] = pv_subtract (regs[rn], regs[rm]);
322 }
d9ebcbce 323 else if (inst.opcode->iclass == branch_imm)
07b287a0
MS
324 {
325 /* Stop analysis on branch. */
326 break;
327 }
d9ebcbce 328 else if (inst.opcode->iclass == condbranch)
07b287a0
MS
329 {
330 /* Stop analysis on branch. */
331 break;
332 }
d9ebcbce 333 else if (inst.opcode->iclass == branch_reg)
07b287a0
MS
334 {
335 /* Stop analysis on branch. */
336 break;
337 }
d9ebcbce 338 else if (inst.opcode->iclass == compbranch)
07b287a0
MS
339 {
340 /* Stop analysis on branch. */
341 break;
342 }
d9ebcbce
YQ
343 else if (inst.opcode->op == OP_MOVZ)
344 {
60adf22c
TV
345 unsigned rd = inst.operands[0].reg.regno;
346
347 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
d9ebcbce 348 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
60adf22c
TV
349 gdb_assert (inst.operands[1].type == AARCH64_OPND_HALF);
350 gdb_assert (inst.operands[1].shifter.kind == AARCH64_MOD_LSL);
f8e3fe0d
LM
351
352 /* If this shows up before we set the stack, keep going. Otherwise
353 stop the analysis. */
354 if (seen_stack_set)
355 break;
356
60adf22c
TV
357 regs[rd] = pv_constant (inst.operands[1].imm.value
358 << inst.operands[1].shifter.amount);
d9ebcbce
YQ
359 }
360 else if (inst.opcode->iclass == log_shift
361 && strcmp (inst.opcode->name, "orr") == 0)
07b287a0 362 {
d9ebcbce
YQ
363 unsigned rd = inst.operands[0].reg.regno;
364 unsigned rn = inst.operands[1].reg.regno;
365 unsigned rm = inst.operands[2].reg.regno;
366
367 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
368 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn);
369 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT);
370
371 if (inst.operands[2].shifter.amount == 0
372 && rn == AARCH64_SP_REGNUM)
07b287a0
MS
373 regs[rd] = regs[rm];
374 else
375 {
c6185dce
SM
376 aarch64_debug_printf ("prologue analysis gave up "
377 "addr=%s opcode=0x%x (orr x register)",
378 core_addr_to_string_nz (start), insn);
379
07b287a0
MS
380 break;
381 }
382 }
d9ebcbce 383 else if (inst.opcode->op == OP_STUR)
07b287a0 384 {
d9ebcbce
YQ
385 unsigned rt = inst.operands[0].reg.regno;
386 unsigned rn = inst.operands[1].addr.base_regno;
75faf5c4 387 int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
d9ebcbce
YQ
388
389 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
390 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
391 gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9);
392 gdb_assert (!inst.operands[1].addr.offset.is_reg);
393
75faf5c4
AH
394 stack.store
395 (pv_add_constant (regs[rn], inst.operands[1].addr.offset.imm),
396 size, regs[rt]);
f8e3fe0d
LM
397
398 /* Are we storing with SP as a base? */
399 if (rn == AARCH64_SP_REGNUM)
400 seen_stack_set = true;
07b287a0 401 }
d9ebcbce 402 else if ((inst.opcode->iclass == ldstpair_off
03bcd739
YQ
403 || (inst.opcode->iclass == ldstpair_indexed
404 && inst.operands[2].addr.preind))
d9ebcbce 405 && strcmp ("stp", inst.opcode->name) == 0)
07b287a0 406 {
03bcd739 407 /* STP with addressing mode Pre-indexed and Base register. */
187f5d00
YQ
408 unsigned rt1;
409 unsigned rt2;
d9ebcbce
YQ
410 unsigned rn = inst.operands[2].addr.base_regno;
411 int32_t imm = inst.operands[2].addr.offset.imm;
75faf5c4 412 int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
d9ebcbce 413
187f5d00
YQ
414 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
415 || inst.operands[0].type == AARCH64_OPND_Ft);
416 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2
417 || inst.operands[1].type == AARCH64_OPND_Ft2);
d9ebcbce
YQ
418 gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7);
419 gdb_assert (!inst.operands[2].addr.offset.is_reg);
420
07b287a0
MS
421 /* If recording this store would invalidate the store area
422 (perhaps because rn is not known) then we should abandon
423 further prologue analysis. */
f7b7ed97 424 if (stack.store_would_trash (pv_add_constant (regs[rn], imm)))
07b287a0
MS
425 break;
426
f7b7ed97 427 if (stack.store_would_trash (pv_add_constant (regs[rn], imm + 8)))
07b287a0
MS
428 break;
429
187f5d00
YQ
430 rt1 = inst.operands[0].reg.regno;
431 rt2 = inst.operands[1].reg.regno;
432 if (inst.operands[0].type == AARCH64_OPND_Ft)
433 {
187f5d00
YQ
434 rt1 += AARCH64_X_REGISTER_COUNT;
435 rt2 += AARCH64_X_REGISTER_COUNT;
436 }
437
75faf5c4
AH
438 stack.store (pv_add_constant (regs[rn], imm), size, regs[rt1]);
439 stack.store (pv_add_constant (regs[rn], imm + size), size, regs[rt2]);
14ac654f 440
d9ebcbce 441 if (inst.operands[2].addr.writeback)
93d96012 442 regs[rn] = pv_add_constant (regs[rn], imm);
07b287a0 443
f8e3fe0d
LM
444 /* Ignore the instruction that allocates stack space and sets
445 the SP. */
446 if (rn == AARCH64_SP_REGNUM && !inst.operands[2].addr.writeback)
447 seen_stack_set = true;
07b287a0 448 }
432ec081
YQ
449 else if ((inst.opcode->iclass == ldst_imm9 /* Signed immediate. */
450 || (inst.opcode->iclass == ldst_pos /* Unsigned immediate. */
451 && (inst.opcode->op == OP_STR_POS
452 || inst.opcode->op == OP_STRF_POS)))
453 && inst.operands[1].addr.base_regno == AARCH64_SP_REGNUM
454 && strcmp ("str", inst.opcode->name) == 0)
455 {
456 /* STR (immediate) */
457 unsigned int rt = inst.operands[0].reg.regno;
458 int32_t imm = inst.operands[1].addr.offset.imm;
459 unsigned int rn = inst.operands[1].addr.base_regno;
75faf5c4 460 int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
432ec081
YQ
461 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
462 || inst.operands[0].type == AARCH64_OPND_Ft);
463
464 if (inst.operands[0].type == AARCH64_OPND_Ft)
75faf5c4 465 rt += AARCH64_X_REGISTER_COUNT;
432ec081 466
75faf5c4 467 stack.store (pv_add_constant (regs[rn], imm), size, regs[rt]);
432ec081
YQ
468 if (inst.operands[1].addr.writeback)
469 regs[rn] = pv_add_constant (regs[rn], imm);
f8e3fe0d
LM
470
471 /* Are we storing with SP as a base? */
472 if (rn == AARCH64_SP_REGNUM)
473 seen_stack_set = true;
432ec081 474 }
d9ebcbce 475 else if (inst.opcode->iclass == testbranch)
07b287a0
MS
476 {
477 /* Stop analysis on branch. */
478 break;
479 }
17e116a7
AH
480 else if (inst.opcode->iclass == ic_system)
481 {
345bd07c 482 aarch64_gdbarch_tdep *tdep
08106042 483 = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
17e116a7
AH
484 int ra_state_val = 0;
485
486 if (insn == 0xd503233f /* paciasp. */
487 || insn == 0xd503237f /* pacibsp. */)
488 {
489 /* Return addresses are mangled. */
490 ra_state_val = 1;
491 }
492 else if (insn == 0xd50323bf /* autiasp. */
493 || insn == 0xd50323ff /* autibsp. */)
494 {
495 /* Return addresses are not mangled. */
496 ra_state_val = 0;
497 }
37989733
LM
498 else if (IS_BTI (insn))
499 /* We don't need to do anything special for a BTI instruction. */
500 continue;
17e116a7
AH
501 else
502 {
c6185dce
SM
503 aarch64_debug_printf ("prologue analysis gave up addr=%s"
504 " opcode=0x%x (iclass)",
505 core_addr_to_string_nz (start), insn);
17e116a7
AH
506 break;
507 }
508
509 if (tdep->has_pauth () && cache != nullptr)
a9a87d35 510 {
c9cd8ca4 511 int regnum = tdep->ra_sign_state_regnum;
a9a87d35
LM
512 cache->saved_regs[regnum].set_value (ra_state_val);
513 }
17e116a7 514 }
07b287a0
MS
515 else
516 {
c6185dce
SM
517 aarch64_debug_printf ("prologue analysis gave up addr=%s"
518 " opcode=0x%x",
519 core_addr_to_string_nz (start), insn);
520
07b287a0
MS
521 break;
522 }
523 }
524
525 if (cache == NULL)
f7b7ed97 526 return start;
07b287a0
MS
527
528 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
529 {
530 /* Frame pointer is fp. Frame size is constant. */
531 cache->framereg = AARCH64_FP_REGNUM;
532 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
533 }
534 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
535 {
536 /* Try the stack pointer. */
537 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
538 cache->framereg = AARCH64_SP_REGNUM;
539 }
540 else
541 {
542 /* We're just out of luck. We don't know where the frame is. */
543 cache->framereg = -1;
544 cache->framesize = 0;
545 }
546
547 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
548 {
549 CORE_ADDR offset;
550
f7b7ed97 551 if (stack.find_reg (gdbarch, i, &offset))
098caef4 552 cache->saved_regs[i].set_addr (offset);
07b287a0
MS
553 }
554
187f5d00
YQ
555 for (i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
556 {
557 int regnum = gdbarch_num_regs (gdbarch);
558 CORE_ADDR offset;
559
f7b7ed97
TT
560 if (stack.find_reg (gdbarch, i + AARCH64_X_REGISTER_COUNT,
561 &offset))
098caef4 562 cache->saved_regs[i + regnum + AARCH64_D0_REGNUM].set_addr (offset);
187f5d00
YQ
563 }
564
07b287a0
MS
565 return start;
566}
567
4d9a9006
YQ
568static CORE_ADDR
569aarch64_analyze_prologue (struct gdbarch *gdbarch,
570 CORE_ADDR start, CORE_ADDR limit,
571 struct aarch64_prologue_cache *cache)
572{
573 instruction_reader reader;
574
575 return aarch64_analyze_prologue (gdbarch, start, limit, cache,
576 reader);
577}
578
579#if GDB_SELF_TEST
580
581namespace selftests {
582
583/* Instruction reader from manually cooked instruction sequences. */
584
585class instruction_reader_test : public abstract_instruction_reader
586{
587public:
588 template<size_t SIZE>
589 explicit instruction_reader_test (const uint32_t (&insns)[SIZE])
590 : m_insns (insns), m_insns_size (SIZE)
591 {}
592
593 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
632e107b 594 override
4d9a9006
YQ
595 {
596 SELF_CHECK (len == 4);
597 SELF_CHECK (memaddr % 4 == 0);
598 SELF_CHECK (memaddr / 4 < m_insns_size);
599
600 return m_insns[memaddr / 4];
601 }
602
603private:
604 const uint32_t *m_insns;
605 size_t m_insns_size;
606};
607
608static void
609aarch64_analyze_prologue_test (void)
610{
611 struct gdbarch_info info;
612
4d9a9006
YQ
613 info.bfd_arch_info = bfd_scan_arch ("aarch64");
614
615 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
616 SELF_CHECK (gdbarch != NULL);
617
17e116a7
AH
618 struct aarch64_prologue_cache cache;
619 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
620
08106042 621 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
17e116a7 622
4d9a9006
YQ
623 /* Test the simple prologue in which frame pointer is used. */
624 {
4d9a9006
YQ
625 static const uint32_t insns[] = {
626 0xa9af7bfd, /* stp x29, x30, [sp,#-272]! */
627 0x910003fd, /* mov x29, sp */
628 0x97ffffe6, /* bl 0x400580 */
629 };
630 instruction_reader_test reader (insns);
631
632 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
633 SELF_CHECK (end == 4 * 2);
634
635 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
636 SELF_CHECK (cache.framesize == 272);
637
638 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
639 {
640 if (i == AARCH64_FP_REGNUM)
098caef4 641 SELF_CHECK (cache.saved_regs[i].addr () == -272);
4d9a9006 642 else if (i == AARCH64_LR_REGNUM)
098caef4 643 SELF_CHECK (cache.saved_regs[i].addr () == -264);
4d9a9006 644 else
a9a87d35
LM
645 SELF_CHECK (cache.saved_regs[i].is_realreg ()
646 && cache.saved_regs[i].realreg () == i);
4d9a9006
YQ
647 }
648
649 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
650 {
a9a87d35
LM
651 int num_regs = gdbarch_num_regs (gdbarch);
652 int regnum = i + num_regs + AARCH64_D0_REGNUM;
4d9a9006 653
a9a87d35
LM
654 SELF_CHECK (cache.saved_regs[regnum].is_realreg ()
655 && cache.saved_regs[regnum].realreg () == regnum);
4d9a9006
YQ
656 }
657 }
432ec081
YQ
658
659 /* Test a prologue in which STR is used and frame pointer is not
660 used. */
661 {
432ec081
YQ
662 static const uint32_t insns[] = {
663 0xf81d0ff3, /* str x19, [sp, #-48]! */
664 0xb9002fe0, /* str w0, [sp, #44] */
665 0xf90013e1, /* str x1, [sp, #32]*/
666 0xfd000fe0, /* str d0, [sp, #24] */
667 0xaa0203f3, /* mov x19, x2 */
668 0xf94013e0, /* ldr x0, [sp, #32] */
669 };
670 instruction_reader_test reader (insns);
671
68811f8f 672 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
432ec081
YQ
673 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
674
675 SELF_CHECK (end == 4 * 5);
676
677 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
678 SELF_CHECK (cache.framesize == 48);
679
680 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
681 {
682 if (i == 1)
098caef4 683 SELF_CHECK (cache.saved_regs[i].addr () == -16);
432ec081 684 else if (i == 19)
098caef4 685 SELF_CHECK (cache.saved_regs[i].addr () == -48);
432ec081 686 else
a9a87d35
LM
687 SELF_CHECK (cache.saved_regs[i].is_realreg ()
688 && cache.saved_regs[i].realreg () == i);
432ec081
YQ
689 }
690
691 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
692 {
a9a87d35
LM
693 int num_regs = gdbarch_num_regs (gdbarch);
694 int regnum = i + num_regs + AARCH64_D0_REGNUM;
695
432ec081
YQ
696
697 if (i == 0)
a9a87d35 698 SELF_CHECK (cache.saved_regs[regnum].addr () == -24);
432ec081 699 else
a9a87d35
LM
700 SELF_CHECK (cache.saved_regs[regnum].is_realreg ()
701 && cache.saved_regs[regnum].realreg () == regnum);
432ec081
YQ
702 }
703 }
17e116a7 704
f8e3fe0d
LM
705 /* Test handling of movz before setting the frame pointer. */
706 {
707 static const uint32_t insns[] = {
708 0xa9bf7bfd, /* stp x29, x30, [sp, #-16]! */
709 0x52800020, /* mov w0, #0x1 */
710 0x910003fd, /* mov x29, sp */
711 0x528000a2, /* mov w2, #0x5 */
712 0x97fffff8, /* bl 6e4 */
713 };
714
715 instruction_reader_test reader (insns);
716
717 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
718 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
719
720 /* We should stop at the 4th instruction. */
721 SELF_CHECK (end == (4 - 1) * 4);
722 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
723 SELF_CHECK (cache.framesize == 16);
724 }
725
726 /* Test handling of movz/stp when using the stack pointer as frame
727 pointer. */
728 {
729 static const uint32_t insns[] = {
730 0xa9bc7bfd, /* stp x29, x30, [sp, #-64]! */
731 0x52800020, /* mov w0, #0x1 */
732 0x290207e0, /* stp w0, w1, [sp, #16] */
733 0xa9018fe2, /* stp x2, x3, [sp, #24] */
734 0x528000a2, /* mov w2, #0x5 */
735 0x97fffff8, /* bl 6e4 */
736 };
737
738 instruction_reader_test reader (insns);
739
740 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
741 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
742
743 /* We should stop at the 5th instruction. */
744 SELF_CHECK (end == (5 - 1) * 4);
745 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
746 SELF_CHECK (cache.framesize == 64);
747 }
748
749 /* Test handling of movz/str when using the stack pointer as frame
750 pointer */
751 {
752 static const uint32_t insns[] = {
753 0xa9bc7bfd, /* stp x29, x30, [sp, #-64]! */
754 0x52800020, /* mov w0, #0x1 */
755 0xb9002be4, /* str w4, [sp, #40] */
756 0xf9001be5, /* str x5, [sp, #48] */
757 0x528000a2, /* mov w2, #0x5 */
758 0x97fffff8, /* bl 6e4 */
759 };
760
761 instruction_reader_test reader (insns);
762
763 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
764 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
765
766 /* We should stop at the 5th instruction. */
767 SELF_CHECK (end == (5 - 1) * 4);
768 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
769 SELF_CHECK (cache.framesize == 64);
770 }
771
772 /* Test handling of movz/stur when using the stack pointer as frame
773 pointer. */
774 {
775 static const uint32_t insns[] = {
776 0xa9bc7bfd, /* stp x29, x30, [sp, #-64]! */
777 0x52800020, /* mov w0, #0x1 */
778 0xb80343e6, /* stur w6, [sp, #52] */
779 0xf80383e7, /* stur x7, [sp, #56] */
780 0x528000a2, /* mov w2, #0x5 */
781 0x97fffff8, /* bl 6e4 */
782 };
783
784 instruction_reader_test reader (insns);
785
786 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
787 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
788
789 /* We should stop at the 5th instruction. */
790 SELF_CHECK (end == (5 - 1) * 4);
791 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
792 SELF_CHECK (cache.framesize == 64);
793 }
794
795 /* Test handling of movz when there is no frame pointer set or no stack
796 pointer used. */
797 {
798 static const uint32_t insns[] = {
799 0xa9bf7bfd, /* stp x29, x30, [sp, #-16]! */
800 0x52800020, /* mov w0, #0x1 */
801 0x528000a2, /* mov w2, #0x5 */
802 0x97fffff8, /* bl 6e4 */
803 };
804
805 instruction_reader_test reader (insns);
806
807 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
808 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
809
810 /* We should stop at the 4th instruction. */
811 SELF_CHECK (end == (4 - 1) * 4);
812 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
813 SELF_CHECK (cache.framesize == 16);
814 }
815
17e116a7
AH
816 /* Test a prologue in which there is a return address signing instruction. */
817 if (tdep->has_pauth ())
818 {
819 static const uint32_t insns[] = {
820 0xd503233f, /* paciasp */
821 0xa9bd7bfd, /* stp x29, x30, [sp, #-48]! */
822 0x910003fd, /* mov x29, sp */
823 0xf801c3f3, /* str x19, [sp, #28] */
824 0xb9401fa0, /* ldr x19, [x29, #28] */
825 };
826 instruction_reader_test reader (insns);
827
68811f8f 828 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
17e116a7
AH
829 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache,
830 reader);
831
832 SELF_CHECK (end == 4 * 4);
833 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
834 SELF_CHECK (cache.framesize == 48);
835
836 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
837 {
838 if (i == 19)
098caef4 839 SELF_CHECK (cache.saved_regs[i].addr () == -20);
17e116a7 840 else if (i == AARCH64_FP_REGNUM)
098caef4 841 SELF_CHECK (cache.saved_regs[i].addr () == -48);
17e116a7 842 else if (i == AARCH64_LR_REGNUM)
098caef4 843 SELF_CHECK (cache.saved_regs[i].addr () == -40);
17e116a7 844 else
a9a87d35
LM
845 SELF_CHECK (cache.saved_regs[i].is_realreg ()
846 && cache.saved_regs[i].realreg () == i);
17e116a7
AH
847 }
848
849 if (tdep->has_pauth ())
850 {
c9cd8ca4 851 int regnum = tdep->ra_sign_state_regnum;
a9a87d35 852 SELF_CHECK (cache.saved_regs[regnum].is_value ());
17e116a7
AH
853 }
854 }
37989733
LM
855
856 /* Test a prologue with a BTI instruction. */
857 {
858 static const uint32_t insns[] = {
859 0xd503245f, /* bti */
860 0xa9bd7bfd, /* stp x29, x30, [sp, #-48]! */
861 0x910003fd, /* mov x29, sp */
862 0xf801c3f3, /* str x19, [sp, #28] */
863 0xb9401fa0, /* ldr x19, [x29, #28] */
864 };
865 instruction_reader_test reader (insns);
866
867 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
868 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache,
869 reader);
870
871 SELF_CHECK (end == 4 * 4);
872 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
873 SELF_CHECK (cache.framesize == 48);
874
875 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
876 {
877 if (i == 19)
878 SELF_CHECK (cache.saved_regs[i].addr () == -20);
879 else if (i == AARCH64_FP_REGNUM)
880 SELF_CHECK (cache.saved_regs[i].addr () == -48);
881 else if (i == AARCH64_LR_REGNUM)
882 SELF_CHECK (cache.saved_regs[i].addr () == -40);
883 else
884 SELF_CHECK (cache.saved_regs[i].is_realreg ()
885 && cache.saved_regs[i].realreg () == i);
886 }
887 }
4d9a9006
YQ
888}
889} // namespace selftests
890#endif /* GDB_SELF_TEST */
891
07b287a0
MS
892/* Implement the "skip_prologue" gdbarch method. */
893
894static CORE_ADDR
895aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
896{
07b287a0 897 CORE_ADDR func_addr, limit_pc;
07b287a0
MS
898
899 /* See if we can determine the end of the prologue via the symbol
900 table. If so, then return either PC, or the PC after the
901 prologue, whichever is greater. */
902 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
903 {
904 CORE_ADDR post_prologue_pc
905 = skip_prologue_using_sal (gdbarch, func_addr);
906
907 if (post_prologue_pc != 0)
325fac50 908 return std::max (pc, post_prologue_pc);
07b287a0
MS
909 }
910
911 /* Can't determine prologue from the symbol table, need to examine
912 instructions. */
913
914 /* Find an upper limit on the function prologue using the debug
915 information. If the debug information could not be used to
916 provide that bound, then use an arbitrary large number as the
917 upper bound. */
918 limit_pc = skip_prologue_using_sal (gdbarch, pc);
919 if (limit_pc == 0)
920 limit_pc = pc + 128; /* Magic. */
921
922 /* Try disassembling prologue. */
923 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
924}
925
926/* Scan the function prologue for THIS_FRAME and populate the prologue
927 cache CACHE. */
928
929static void
bd2b40ac 930aarch64_scan_prologue (frame_info_ptr this_frame,
07b287a0
MS
931 struct aarch64_prologue_cache *cache)
932{
933 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
934 CORE_ADDR prologue_start;
935 CORE_ADDR prologue_end;
936 CORE_ADDR prev_pc = get_frame_pc (this_frame);
937 struct gdbarch *gdbarch = get_frame_arch (this_frame);
938
db634143
PL
939 cache->prev_pc = prev_pc;
940
07b287a0
MS
941 /* Assume we do not find a frame. */
942 cache->framereg = -1;
943 cache->framesize = 0;
944
945 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
946 &prologue_end))
947 {
948 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
949
950 if (sal.line == 0)
951 {
952 /* No line info so use the current PC. */
953 prologue_end = prev_pc;
954 }
955 else if (sal.end < prologue_end)
956 {
957 /* The next line begins after the function end. */
958 prologue_end = sal.end;
959 }
960
325fac50 961 prologue_end = std::min (prologue_end, prev_pc);
07b287a0
MS
962 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
963 }
964 else
965 {
966 CORE_ADDR frame_loc;
07b287a0
MS
967
968 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
969 if (frame_loc == 0)
970 return;
971
972 cache->framereg = AARCH64_FP_REGNUM;
973 cache->framesize = 16;
098caef4
LM
974 cache->saved_regs[29].set_addr (0);
975 cache->saved_regs[30].set_addr (8);
07b287a0
MS
976 }
977}
978
7dfa3edc
PL
979/* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
980 function may throw an exception if the inferior's registers or memory is
981 not available. */
07b287a0 982
7dfa3edc 983static void
bd2b40ac 984aarch64_make_prologue_cache_1 (frame_info_ptr this_frame,
7dfa3edc 985 struct aarch64_prologue_cache *cache)
07b287a0 986{
07b287a0
MS
987 CORE_ADDR unwound_fp;
988 int reg;
989
07b287a0
MS
990 aarch64_scan_prologue (this_frame, cache);
991
992 if (cache->framereg == -1)
7dfa3edc 993 return;
07b287a0
MS
994
995 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
996 if (unwound_fp == 0)
7dfa3edc 997 return;
07b287a0
MS
998
999 cache->prev_sp = unwound_fp + cache->framesize;
1000
1001 /* Calculate actual addresses of saved registers using offsets
1002 determined by aarch64_analyze_prologue. */
1003 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
a9a87d35 1004 if (cache->saved_regs[reg].is_addr ())
098caef4
LM
1005 cache->saved_regs[reg].set_addr (cache->saved_regs[reg].addr ()
1006 + cache->prev_sp);
07b287a0 1007
db634143
PL
1008 cache->func = get_frame_func (this_frame);
1009
7dfa3edc
PL
1010 cache->available_p = 1;
1011}
1012
1013/* Allocate and fill in *THIS_CACHE with information about the prologue of
1014 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
1015 Return a pointer to the current aarch64_prologue_cache in
1016 *THIS_CACHE. */
1017
1018static struct aarch64_prologue_cache *
bd2b40ac 1019aarch64_make_prologue_cache (frame_info_ptr this_frame, void **this_cache)
7dfa3edc
PL
1020{
1021 struct aarch64_prologue_cache *cache;
1022
1023 if (*this_cache != NULL)
9a3c8263 1024 return (struct aarch64_prologue_cache *) *this_cache;
7dfa3edc
PL
1025
1026 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
1027 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
1028 *this_cache = cache;
1029
a70b8144 1030 try
7dfa3edc
PL
1031 {
1032 aarch64_make_prologue_cache_1 (this_frame, cache);
1033 }
230d2906 1034 catch (const gdb_exception_error &ex)
7dfa3edc
PL
1035 {
1036 if (ex.error != NOT_AVAILABLE_ERROR)
eedc3f4f 1037 throw;
7dfa3edc 1038 }
7dfa3edc 1039
07b287a0
MS
1040 return cache;
1041}
1042
7dfa3edc
PL
1043/* Implement the "stop_reason" frame_unwind method. */
1044
1045static enum unwind_stop_reason
bd2b40ac 1046aarch64_prologue_frame_unwind_stop_reason (frame_info_ptr this_frame,
7dfa3edc
PL
1047 void **this_cache)
1048{
1049 struct aarch64_prologue_cache *cache
1050 = aarch64_make_prologue_cache (this_frame, this_cache);
1051
1052 if (!cache->available_p)
1053 return UNWIND_UNAVAILABLE;
1054
1055 /* Halt the backtrace at "_start". */
345bd07c 1056 gdbarch *arch = get_frame_arch (this_frame);
08106042 1057 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (arch);
345bd07c 1058 if (cache->prev_pc <= tdep->lowest_pc)
7dfa3edc
PL
1059 return UNWIND_OUTERMOST;
1060
1061 /* We've hit a wall, stop. */
1062 if (cache->prev_sp == 0)
1063 return UNWIND_OUTERMOST;
1064
1065 return UNWIND_NO_REASON;
1066}
1067
07b287a0
MS
1068/* Our frame ID for a normal frame is the current function's starting
1069 PC and the caller's SP when we were called. */
1070
1071static void
bd2b40ac 1072aarch64_prologue_this_id (frame_info_ptr this_frame,
07b287a0
MS
1073 void **this_cache, struct frame_id *this_id)
1074{
7c8edfae
PL
1075 struct aarch64_prologue_cache *cache
1076 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0 1077
7dfa3edc
PL
1078 if (!cache->available_p)
1079 *this_id = frame_id_build_unavailable_stack (cache->func);
1080 else
1081 *this_id = frame_id_build (cache->prev_sp, cache->func);
07b287a0
MS
1082}
1083
1084/* Implement the "prev_register" frame_unwind method. */
1085
1086static struct value *
bd2b40ac 1087aarch64_prologue_prev_register (frame_info_ptr this_frame,
07b287a0
MS
1088 void **this_cache, int prev_regnum)
1089{
7c8edfae
PL
1090 struct aarch64_prologue_cache *cache
1091 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0
MS
1092
1093 /* If we are asked to unwind the PC, then we need to return the LR
1094 instead. The prologue may save PC, but it will point into this
1095 frame's prologue, not the next frame's resume location. */
1096 if (prev_regnum == AARCH64_PC_REGNUM)
1097 {
1098 CORE_ADDR lr;
17e116a7 1099 struct gdbarch *gdbarch = get_frame_arch (this_frame);
345bd07c 1100 aarch64_gdbarch_tdep *tdep
08106042 1101 = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
07b287a0
MS
1102
1103 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
17e116a7
AH
1104
1105 if (tdep->has_pauth ()
c9cd8ca4 1106 && cache->saved_regs[tdep->ra_sign_state_regnum].is_value ())
3d31bc39 1107 lr = aarch64_frame_unmask_lr (tdep, this_frame, lr);
17e116a7 1108
07b287a0
MS
1109 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
1110 }
1111
1112 /* SP is generally not saved to the stack, but this frame is
1113 identified by the next frame's stack pointer at the time of the
1114 call. The value was already reconstructed into PREV_SP. */
1115 /*
dda83cd7
SM
1116 +----------+ ^
1117 | saved lr | |
07b287a0
MS
1118 +->| saved fp |--+
1119 | | |
1120 | | | <- Previous SP
1121 | +----------+
1122 | | saved lr |
1123 +--| saved fp |<- FP
dda83cd7
SM
1124 | |
1125 | |<- SP
1126 +----------+ */
07b287a0
MS
1127 if (prev_regnum == AARCH64_SP_REGNUM)
1128 return frame_unwind_got_constant (this_frame, prev_regnum,
1129 cache->prev_sp);
1130
1131 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
1132 prev_regnum);
1133}
1134
1135/* AArch64 prologue unwinder. */
6bd434d6 1136static frame_unwind aarch64_prologue_unwind =
07b287a0 1137{
a154d838 1138 "aarch64 prologue",
07b287a0 1139 NORMAL_FRAME,
7dfa3edc 1140 aarch64_prologue_frame_unwind_stop_reason,
07b287a0
MS
1141 aarch64_prologue_this_id,
1142 aarch64_prologue_prev_register,
1143 NULL,
1144 default_frame_sniffer
1145};
1146
8b61f75d
PL
1147/* Allocate and fill in *THIS_CACHE with information about the prologue of
1148 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
1149 Return a pointer to the current aarch64_prologue_cache in
1150 *THIS_CACHE. */
07b287a0
MS
1151
1152static struct aarch64_prologue_cache *
bd2b40ac 1153aarch64_make_stub_cache (frame_info_ptr this_frame, void **this_cache)
07b287a0 1154{
07b287a0 1155 struct aarch64_prologue_cache *cache;
8b61f75d
PL
1156
1157 if (*this_cache != NULL)
9a3c8263 1158 return (struct aarch64_prologue_cache *) *this_cache;
07b287a0
MS
1159
1160 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
1161 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
8b61f75d 1162 *this_cache = cache;
07b287a0 1163
a70b8144 1164 try
02a2a705
PL
1165 {
1166 cache->prev_sp = get_frame_register_unsigned (this_frame,
1167 AARCH64_SP_REGNUM);
1168 cache->prev_pc = get_frame_pc (this_frame);
1169 cache->available_p = 1;
1170 }
230d2906 1171 catch (const gdb_exception_error &ex)
02a2a705
PL
1172 {
1173 if (ex.error != NOT_AVAILABLE_ERROR)
eedc3f4f 1174 throw;
02a2a705 1175 }
07b287a0
MS
1176
1177 return cache;
1178}
1179
02a2a705
PL
1180/* Implement the "stop_reason" frame_unwind method. */
1181
1182static enum unwind_stop_reason
bd2b40ac 1183aarch64_stub_frame_unwind_stop_reason (frame_info_ptr this_frame,
02a2a705
PL
1184 void **this_cache)
1185{
1186 struct aarch64_prologue_cache *cache
1187 = aarch64_make_stub_cache (this_frame, this_cache);
1188
1189 if (!cache->available_p)
1190 return UNWIND_UNAVAILABLE;
1191
1192 return UNWIND_NO_REASON;
1193}
1194
07b287a0
MS
1195/* Our frame ID for a stub frame is the current SP and LR. */
1196
1197static void
bd2b40ac 1198aarch64_stub_this_id (frame_info_ptr this_frame,
07b287a0
MS
1199 void **this_cache, struct frame_id *this_id)
1200{
8b61f75d
PL
1201 struct aarch64_prologue_cache *cache
1202 = aarch64_make_stub_cache (this_frame, this_cache);
07b287a0 1203
02a2a705
PL
1204 if (cache->available_p)
1205 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
1206 else
1207 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
07b287a0
MS
1208}
1209
1210/* Implement the "sniffer" frame_unwind method. */
1211
1212static int
1213aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
bd2b40ac 1214 frame_info_ptr this_frame,
07b287a0
MS
1215 void **this_prologue_cache)
1216{
1217 CORE_ADDR addr_in_block;
1218 gdb_byte dummy[4];
1219
1220 addr_in_block = get_frame_address_in_block (this_frame);
3e5d3a5a 1221 if (in_plt_section (addr_in_block)
07b287a0
MS
1222 /* We also use the stub winder if the target memory is unreadable
1223 to avoid having the prologue unwinder trying to read it. */
1224 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
1225 return 1;
1226
1227 return 0;
1228}
1229
1230/* AArch64 stub unwinder. */
6bd434d6 1231static frame_unwind aarch64_stub_unwind =
07b287a0 1232{
a154d838 1233 "aarch64 stub",
07b287a0 1234 NORMAL_FRAME,
02a2a705 1235 aarch64_stub_frame_unwind_stop_reason,
07b287a0
MS
1236 aarch64_stub_this_id,
1237 aarch64_prologue_prev_register,
1238 NULL,
1239 aarch64_stub_unwind_sniffer
1240};
1241
1242/* Return the frame base address of *THIS_FRAME. */
1243
1244static CORE_ADDR
bd2b40ac 1245aarch64_normal_frame_base (frame_info_ptr this_frame, void **this_cache)
07b287a0 1246{
7c8edfae
PL
1247 struct aarch64_prologue_cache *cache
1248 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0
MS
1249
1250 return cache->prev_sp - cache->framesize;
1251}
1252
1253/* AArch64 default frame base information. */
6bd434d6 1254static frame_base aarch64_normal_base =
07b287a0
MS
1255{
1256 &aarch64_prologue_unwind,
1257 aarch64_normal_frame_base,
1258 aarch64_normal_frame_base,
1259 aarch64_normal_frame_base
1260};
1261
07b287a0
MS
1262/* Return the value of the REGNUM register in the previous frame of
1263 *THIS_FRAME. */
1264
1265static struct value *
bd2b40ac 1266aarch64_dwarf2_prev_register (frame_info_ptr this_frame,
07b287a0
MS
1267 void **this_cache, int regnum)
1268{
345bd07c 1269 gdbarch *arch = get_frame_arch (this_frame);
08106042 1270 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (arch);
07b287a0
MS
1271 CORE_ADDR lr;
1272
1273 switch (regnum)
1274 {
1275 case AARCH64_PC_REGNUM:
1276 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
3d31bc39 1277 lr = aarch64_frame_unmask_lr (tdep, this_frame, lr);
07b287a0
MS
1278 return frame_unwind_got_constant (this_frame, regnum, lr);
1279
1280 default:
f34652de 1281 internal_error (_("Unexpected register %d"), regnum);
07b287a0
MS
1282 }
1283}
1284
11e1b75f
AH
1285static const unsigned char op_lit0 = DW_OP_lit0;
1286static const unsigned char op_lit1 = DW_OP_lit1;
1287
07b287a0
MS
1288/* Implement the "init_reg" dwarf2_frame_ops method. */
1289
1290static void
1291aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1292 struct dwarf2_frame_state_reg *reg,
bd2b40ac 1293 frame_info_ptr this_frame)
07b287a0 1294{
08106042 1295 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
11e1b75f 1296
07b287a0
MS
1297 switch (regnum)
1298 {
1299 case AARCH64_PC_REGNUM:
1300 reg->how = DWARF2_FRAME_REG_FN;
1301 reg->loc.fn = aarch64_dwarf2_prev_register;
11e1b75f
AH
1302 return;
1303
07b287a0
MS
1304 case AARCH64_SP_REGNUM:
1305 reg->how = DWARF2_FRAME_REG_CFA;
11e1b75f
AH
1306 return;
1307 }
1308
1309 /* Init pauth registers. */
1310 if (tdep->has_pauth ())
1311 {
c9cd8ca4 1312 if (regnum == tdep->ra_sign_state_regnum)
11e1b75f
AH
1313 {
1314 /* Initialize RA_STATE to zero. */
1315 reg->how = DWARF2_FRAME_REG_SAVED_VAL_EXP;
1316 reg->loc.exp.start = &op_lit0;
1317 reg->loc.exp.len = 1;
1318 return;
1319 }
1320 else if (regnum == AARCH64_PAUTH_DMASK_REGNUM (tdep->pauth_reg_base)
1321 || regnum == AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base))
1322 {
1323 reg->how = DWARF2_FRAME_REG_SAME_VALUE;
1324 return;
1325 }
07b287a0
MS
1326 }
1327}
1328
11e1b75f
AH
1329/* Implement the execute_dwarf_cfa_vendor_op method. */
1330
1331static bool
1332aarch64_execute_dwarf_cfa_vendor_op (struct gdbarch *gdbarch, gdb_byte op,
1333 struct dwarf2_frame_state *fs)
1334{
08106042 1335 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
11e1b75f
AH
1336 struct dwarf2_frame_state_reg *ra_state;
1337
8fca4da0 1338 if (op == DW_CFA_AARCH64_negate_ra_state)
11e1b75f 1339 {
8fca4da0
AH
1340 /* On systems without pauth, treat as a nop. */
1341 if (!tdep->has_pauth ())
1342 return true;
1343
11e1b75f 1344 /* Allocate RA_STATE column if it's not allocated yet. */
c9cd8ca4 1345 fs->regs.alloc_regs (AARCH64_DWARF_RA_SIGN_STATE + 1);
11e1b75f
AH
1346
1347 /* Toggle the status of RA_STATE between 0 and 1. */
c9cd8ca4 1348 ra_state = &(fs->regs.reg[AARCH64_DWARF_RA_SIGN_STATE]);
11e1b75f
AH
1349 ra_state->how = DWARF2_FRAME_REG_SAVED_VAL_EXP;
1350
1351 if (ra_state->loc.exp.start == nullptr
1352 || ra_state->loc.exp.start == &op_lit0)
1353 ra_state->loc.exp.start = &op_lit1;
1354 else
1355 ra_state->loc.exp.start = &op_lit0;
1356
1357 ra_state->loc.exp.len = 1;
1358
1359 return true;
1360 }
1361
1362 return false;
1363}
1364
5133a315
LM
1365/* Used for matching BRK instructions for AArch64. */
1366static constexpr uint32_t BRK_INSN_MASK = 0xffe0001f;
1367static constexpr uint32_t BRK_INSN_BASE = 0xd4200000;
1368
1369/* Implementation of gdbarch_program_breakpoint_here_p for aarch64. */
1370
1371static bool
1372aarch64_program_breakpoint_here_p (gdbarch *gdbarch, CORE_ADDR address)
1373{
1374 const uint32_t insn_len = 4;
1375 gdb_byte target_mem[4];
1376
1377 /* Enable the automatic memory restoration from breakpoints while
1378 we read the memory. Otherwise we may find temporary breakpoints, ones
1379 inserted by GDB, and flag them as permanent breakpoints. */
1380 scoped_restore restore_memory
1381 = make_scoped_restore_show_memory_breakpoints (0);
1382
1383 if (target_read_memory (address, target_mem, insn_len) == 0)
1384 {
1385 uint32_t insn =
1386 (uint32_t) extract_unsigned_integer (target_mem, insn_len,
1387 gdbarch_byte_order_for_code (gdbarch));
1388
1389 /* Check if INSN is a BRK instruction pattern. There are multiple choices
1390 of such instructions with different immediate values. Different OS'
1391 may use a different variation, but they have the same outcome. */
1392 return ((insn & BRK_INSN_MASK) == BRK_INSN_BASE);
1393 }
1394
1395 return false;
1396}
1397
07b287a0
MS
1398/* When arguments must be pushed onto the stack, they go on in reverse
1399 order. The code below implements a FILO (stack) to do this. */
1400
89055eaa 1401struct stack_item_t
07b287a0 1402{
c3c87445
YQ
1403 /* Value to pass on stack. It can be NULL if this item is for stack
1404 padding. */
7c543f7b 1405 const gdb_byte *data;
07b287a0
MS
1406
1407 /* Size in bytes of value to pass on stack. */
1408 int len;
89055eaa 1409};
07b287a0 1410
b907456c
AB
1411/* Implement the gdbarch type alignment method, overrides the generic
1412 alignment algorithm for anything that is aarch64 specific. */
07b287a0 1413
b907456c
AB
1414static ULONGEST
1415aarch64_type_align (gdbarch *gdbarch, struct type *t)
07b287a0 1416{
07b287a0 1417 t = check_typedef (t);
bd63c870 1418 if (t->code () == TYPE_CODE_ARRAY && t->is_vector ())
07b287a0 1419 {
b907456c
AB
1420 /* Use the natural alignment for vector types (the same for
1421 scalar type), but the maximum alignment is 128-bit. */
df86565b 1422 if (t->length () > 16)
b907456c 1423 return 16;
238f2452 1424 else
df86565b 1425 return t->length ();
07b287a0 1426 }
b907456c
AB
1427
1428 /* Allow the common code to calculate the alignment. */
1429 return 0;
07b287a0
MS
1430}
1431
ea92689a
AH
1432/* Worker function for aapcs_is_vfp_call_or_return_candidate.
1433
1434 Return the number of register required, or -1 on failure.
1435
1436 When encountering a base element, if FUNDAMENTAL_TYPE is not set then set it
1437 to the element, else fail if the type of this element does not match the
1438 existing value. */
1439
1440static int
1441aapcs_is_vfp_call_or_return_candidate_1 (struct type *type,
1442 struct type **fundamental_type)
1443{
1444 if (type == nullptr)
1445 return -1;
1446
78134374 1447 switch (type->code ())
ea92689a
AH
1448 {
1449 case TYPE_CODE_FLT:
81657e58 1450 case TYPE_CODE_DECFLOAT:
df86565b 1451 if (type->length () > 16)
ea92689a
AH
1452 return -1;
1453
1454 if (*fundamental_type == nullptr)
1455 *fundamental_type = type;
df86565b 1456 else if (type->length () != (*fundamental_type)->length ()
78134374 1457 || type->code () != (*fundamental_type)->code ())
ea92689a
AH
1458 return -1;
1459
1460 return 1;
1461
1462 case TYPE_CODE_COMPLEX:
1463 {
27710edb 1464 struct type *target_type = check_typedef (type->target_type ());
df86565b 1465 if (target_type->length () > 16)
ea92689a
AH
1466 return -1;
1467
1468 if (*fundamental_type == nullptr)
1469 *fundamental_type = target_type;
df86565b 1470 else if (target_type->length () != (*fundamental_type)->length ()
78134374 1471 || target_type->code () != (*fundamental_type)->code ())
ea92689a
AH
1472 return -1;
1473
1474 return 2;
1475 }
1476
1477 case TYPE_CODE_ARRAY:
1478 {
bd63c870 1479 if (type->is_vector ())
ea92689a 1480 {
df86565b 1481 if (type->length () != 8 && type->length () != 16)
ea92689a
AH
1482 return -1;
1483
1484 if (*fundamental_type == nullptr)
1485 *fundamental_type = type;
df86565b 1486 else if (type->length () != (*fundamental_type)->length ()
78134374 1487 || type->code () != (*fundamental_type)->code ())
ea92689a
AH
1488 return -1;
1489
1490 return 1;
1491 }
1492 else
1493 {
27710edb 1494 struct type *target_type = type->target_type ();
ea92689a
AH
1495 int count = aapcs_is_vfp_call_or_return_candidate_1
1496 (target_type, fundamental_type);
1497
1498 if (count == -1)
1499 return count;
1500
df86565b 1501 count *= (type->length () / target_type->length ());
ea92689a
AH
1502 return count;
1503 }
1504 }
1505
1506 case TYPE_CODE_STRUCT:
1507 case TYPE_CODE_UNION:
1508 {
1509 int count = 0;
1510
1f704f76 1511 for (int i = 0; i < type->num_fields (); i++)
ea92689a 1512 {
353229bf 1513 /* Ignore any static fields. */
ceacbf6e 1514 if (field_is_static (&type->field (i)))
353229bf
AH
1515 continue;
1516
940da03e 1517 struct type *member = check_typedef (type->field (i).type ());
ea92689a
AH
1518
1519 int sub_count = aapcs_is_vfp_call_or_return_candidate_1
1520 (member, fundamental_type);
1521 if (sub_count == -1)
1522 return -1;
1523 count += sub_count;
1524 }
73021deb
AH
1525
1526 /* Ensure there is no padding between the fields (allowing for empty
1527 zero length structs) */
1528 int ftype_length = (*fundamental_type == nullptr)
df86565b
SM
1529 ? 0 : (*fundamental_type)->length ();
1530 if (count * ftype_length != type->length ())
73021deb
AH
1531 return -1;
1532
ea92689a
AH
1533 return count;
1534 }
1535
1536 default:
1537 break;
1538 }
1539
1540 return -1;
1541}
1542
1543/* Return true if an argument, whose type is described by TYPE, can be passed or
1544 returned in simd/fp registers, providing enough parameter passing registers
1545 are available. This is as described in the AAPCS64.
1546
1547 Upon successful return, *COUNT returns the number of needed registers,
1548 *FUNDAMENTAL_TYPE contains the type of those registers.
1549
1550 Candidate as per the AAPCS64 5.4.2.C is either a:
1551 - float.
1552 - short-vector.
1553 - HFA (Homogeneous Floating-point Aggregate, 4.3.5.1). A Composite type where
1554 all the members are floats and has at most 4 members.
1555 - HVA (Homogeneous Short-vector Aggregate, 4.3.5.2). A Composite type where
1556 all the members are short vectors and has at most 4 members.
1557 - Complex (7.1.1)
1558
1559 Note that HFAs and HVAs can include nested structures and arrays. */
1560
0e745c60 1561static bool
ea92689a
AH
1562aapcs_is_vfp_call_or_return_candidate (struct type *type, int *count,
1563 struct type **fundamental_type)
1564{
1565 if (type == nullptr)
1566 return false;
1567
1568 *fundamental_type = nullptr;
1569
1570 int ag_count = aapcs_is_vfp_call_or_return_candidate_1 (type,
1571 fundamental_type);
1572
1573 if (ag_count > 0 && ag_count <= HA_MAX_NUM_FLDS)
1574 {
1575 *count = ag_count;
1576 return true;
1577 }
1578 else
1579 return false;
1580}
1581
07b287a0
MS
1582/* AArch64 function call information structure. */
1583struct aarch64_call_info
1584{
1585 /* the current argument number. */
89055eaa 1586 unsigned argnum = 0;
07b287a0
MS
1587
1588 /* The next general purpose register number, equivalent to NGRN as
1589 described in the AArch64 Procedure Call Standard. */
89055eaa 1590 unsigned ngrn = 0;
07b287a0
MS
1591
1592 /* The next SIMD and floating point register number, equivalent to
1593 NSRN as described in the AArch64 Procedure Call Standard. */
89055eaa 1594 unsigned nsrn = 0;
07b287a0
MS
1595
1596 /* The next stacked argument address, equivalent to NSAA as
1597 described in the AArch64 Procedure Call Standard. */
89055eaa 1598 unsigned nsaa = 0;
07b287a0
MS
1599
1600 /* Stack item vector. */
89055eaa 1601 std::vector<stack_item_t> si;
07b287a0
MS
1602};
1603
1604/* Pass a value in a sequence of consecutive X registers. The caller
30baf67b 1605 is responsible for ensuring sufficient registers are available. */
07b287a0
MS
1606
1607static void
1608pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1609 struct aarch64_call_info *info, struct type *type,
8e80f9d1 1610 struct value *arg)
07b287a0
MS
1611{
1612 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
df86565b 1613 int len = type->length ();
78134374 1614 enum type_code typecode = type->code ();
07b287a0 1615 int regnum = AARCH64_X0_REGNUM + info->ngrn;
50888e42 1616 const bfd_byte *buf = value_contents (arg).data ();
07b287a0
MS
1617
1618 info->argnum++;
1619
1620 while (len > 0)
1621 {
1622 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1623 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1624 byte_order);
1625
1626
1627 /* Adjust sub-word struct/union args when big-endian. */
1628 if (byte_order == BFD_ENDIAN_BIG
1629 && partial_len < X_REGISTER_SIZE
1630 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1631 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1632
c6185dce
SM
1633 aarch64_debug_printf ("arg %d in %s = 0x%s", info->argnum,
1634 gdbarch_register_name (gdbarch, regnum),
1635 phex (regval, X_REGISTER_SIZE));
1636
07b287a0
MS
1637 regcache_cooked_write_unsigned (regcache, regnum, regval);
1638 len -= partial_len;
1639 buf += partial_len;
1640 regnum++;
1641 }
1642}
1643
1644/* Attempt to marshall a value in a V register. Return 1 if
1645 successful, or 0 if insufficient registers are available. This
1646 function, unlike the equivalent pass_in_x() function does not
1647 handle arguments spread across multiple registers. */
1648
1649static int
1650pass_in_v (struct gdbarch *gdbarch,
1651 struct regcache *regcache,
1652 struct aarch64_call_info *info,
0735fddd 1653 int len, const bfd_byte *buf)
07b287a0
MS
1654{
1655 if (info->nsrn < 8)
1656 {
07b287a0 1657 int regnum = AARCH64_V0_REGNUM + info->nsrn;
3ff2c72e
AH
1658 /* Enough space for a full vector register. */
1659 gdb_byte reg[register_size (gdbarch, regnum)];
1660 gdb_assert (len <= sizeof (reg));
07b287a0
MS
1661
1662 info->argnum++;
1663 info->nsrn++;
1664
0735fddd
YQ
1665 memset (reg, 0, sizeof (reg));
1666 /* PCS C.1, the argument is allocated to the least significant
1667 bits of V register. */
1668 memcpy (reg, buf, len);
b66f5587 1669 regcache->cooked_write (regnum, reg);
0735fddd 1670
c6185dce
SM
1671 aarch64_debug_printf ("arg %d in %s", info->argnum,
1672 gdbarch_register_name (gdbarch, regnum));
1673
07b287a0
MS
1674 return 1;
1675 }
1676 info->nsrn = 8;
1677 return 0;
1678}
1679
1680/* Marshall an argument onto the stack. */
1681
1682static void
1683pass_on_stack (struct aarch64_call_info *info, struct type *type,
8e80f9d1 1684 struct value *arg)
07b287a0 1685{
50888e42 1686 const bfd_byte *buf = value_contents (arg).data ();
df86565b 1687 int len = type->length ();
07b287a0
MS
1688 int align;
1689 stack_item_t item;
1690
1691 info->argnum++;
1692
b907456c 1693 align = type_align (type);
07b287a0
MS
1694
1695 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1696 Natural alignment of the argument's type. */
1697 align = align_up (align, 8);
1698
1699 /* The AArch64 PCS requires at most doubleword alignment. */
1700 if (align > 16)
1701 align = 16;
1702
c6185dce
SM
1703 aarch64_debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1704 info->nsaa);
07b287a0
MS
1705
1706 item.len = len;
1707 item.data = buf;
89055eaa 1708 info->si.push_back (item);
07b287a0
MS
1709
1710 info->nsaa += len;
1711 if (info->nsaa & (align - 1))
1712 {
1713 /* Push stack alignment padding. */
1714 int pad = align - (info->nsaa & (align - 1));
1715
1716 item.len = pad;
c3c87445 1717 item.data = NULL;
07b287a0 1718
89055eaa 1719 info->si.push_back (item);
07b287a0
MS
1720 info->nsaa += pad;
1721 }
1722}
1723
1724/* Marshall an argument into a sequence of one or more consecutive X
1725 registers or, if insufficient X registers are available then onto
1726 the stack. */
1727
1728static void
1729pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1730 struct aarch64_call_info *info, struct type *type,
8e80f9d1 1731 struct value *arg)
07b287a0 1732{
df86565b 1733 int len = type->length ();
07b287a0
MS
1734 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1735
1736 /* PCS C.13 - Pass in registers if we have enough spare */
1737 if (info->ngrn + nregs <= 8)
1738 {
8e80f9d1 1739 pass_in_x (gdbarch, regcache, info, type, arg);
07b287a0
MS
1740 info->ngrn += nregs;
1741 }
1742 else
1743 {
1744 info->ngrn = 8;
8e80f9d1 1745 pass_on_stack (info, type, arg);
07b287a0
MS
1746 }
1747}
1748
0e745c60
AH
1749/* Pass a value, which is of type arg_type, in a V register. Assumes value is a
1750 aapcs_is_vfp_call_or_return_candidate and there are enough spare V
1751 registers. A return value of false is an error state as the value will have
1752 been partially passed to the stack. */
1753static bool
1754pass_in_v_vfp_candidate (struct gdbarch *gdbarch, struct regcache *regcache,
1755 struct aarch64_call_info *info, struct type *arg_type,
1756 struct value *arg)
07b287a0 1757{
78134374 1758 switch (arg_type->code ())
0e745c60
AH
1759 {
1760 case TYPE_CODE_FLT:
81657e58 1761 case TYPE_CODE_DECFLOAT:
df86565b 1762 return pass_in_v (gdbarch, regcache, info, arg_type->length (),
50888e42 1763 value_contents (arg).data ());
0e745c60
AH
1764 break;
1765
1766 case TYPE_CODE_COMPLEX:
1767 {
50888e42 1768 const bfd_byte *buf = value_contents (arg).data ();
27710edb 1769 struct type *target_type = check_typedef (arg_type->target_type ());
0e745c60 1770
df86565b 1771 if (!pass_in_v (gdbarch, regcache, info, target_type->length (),
0e745c60
AH
1772 buf))
1773 return false;
1774
df86565b
SM
1775 return pass_in_v (gdbarch, regcache, info, target_type->length (),
1776 buf + target_type->length ());
0e745c60
AH
1777 }
1778
1779 case TYPE_CODE_ARRAY:
bd63c870 1780 if (arg_type->is_vector ())
df86565b 1781 return pass_in_v (gdbarch, regcache, info, arg_type->length (),
50888e42 1782 value_contents (arg).data ());
0e745c60
AH
1783 /* fall through. */
1784
1785 case TYPE_CODE_STRUCT:
1786 case TYPE_CODE_UNION:
1f704f76 1787 for (int i = 0; i < arg_type->num_fields (); i++)
0e745c60 1788 {
353229bf 1789 /* Don't include static fields. */
ceacbf6e 1790 if (field_is_static (&arg_type->field (i)))
353229bf
AH
1791 continue;
1792
0e745c60
AH
1793 struct value *field = value_primitive_field (arg, 0, i, arg_type);
1794 struct type *field_type = check_typedef (value_type (field));
1795
1796 if (!pass_in_v_vfp_candidate (gdbarch, regcache, info, field_type,
1797 field))
1798 return false;
1799 }
1800 return true;
1801
1802 default:
1803 return false;
1804 }
07b287a0
MS
1805}
1806
1807/* Implement the "push_dummy_call" gdbarch method. */
1808
1809static CORE_ADDR
1810aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1811 struct regcache *regcache, CORE_ADDR bp_addr,
1812 int nargs,
cf84fa6b
AH
1813 struct value **args, CORE_ADDR sp,
1814 function_call_return_method return_method,
07b287a0
MS
1815 CORE_ADDR struct_addr)
1816{
07b287a0 1817 int argnum;
07b287a0 1818 struct aarch64_call_info info;
07b287a0 1819
07b287a0
MS
1820 /* We need to know what the type of the called function is in order
1821 to determine the number of named/anonymous arguments for the
1822 actual argument placement, and the return type in order to handle
1823 return value correctly.
1824
1825 The generic code above us views the decision of return in memory
1826 or return in registers as a two stage processes. The language
1827 handler is consulted first and may decide to return in memory (eg
1828 class with copy constructor returned by value), this will cause
1829 the generic code to allocate space AND insert an initial leading
1830 argument.
1831
1832 If the language code does not decide to pass in memory then the
1833 target code is consulted.
1834
1835 If the language code decides to pass in memory we want to move
1836 the pointer inserted as the initial argument from the argument
1837 list and into X8, the conventional AArch64 struct return pointer
38a72da0 1838 register. */
07b287a0
MS
1839
1840 /* Set the return address. For the AArch64, the return breakpoint
1841 is always at BP_ADDR. */
1842 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1843
38a72da0
AH
1844 /* If we were given an initial argument for the return slot, lose it. */
1845 if (return_method == return_method_hidden_param)
07b287a0
MS
1846 {
1847 args++;
1848 nargs--;
1849 }
1850
1851 /* The struct_return pointer occupies X8. */
38a72da0 1852 if (return_method != return_method_normal)
07b287a0 1853 {
c6185dce
SM
1854 aarch64_debug_printf ("struct return in %s = 0x%s",
1855 gdbarch_register_name
1856 (gdbarch, AARCH64_STRUCT_RETURN_REGNUM),
1857 paddress (gdbarch, struct_addr));
1858
07b287a0
MS
1859 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1860 struct_addr);
1861 }
1862
1863 for (argnum = 0; argnum < nargs; argnum++)
1864 {
1865 struct value *arg = args[argnum];
0e745c60
AH
1866 struct type *arg_type, *fundamental_type;
1867 int len, elements;
07b287a0
MS
1868
1869 arg_type = check_typedef (value_type (arg));
df86565b 1870 len = arg_type->length ();
07b287a0 1871
0e745c60
AH
1872 /* If arg can be passed in v registers as per the AAPCS64, then do so if
1873 if there are enough spare registers. */
1874 if (aapcs_is_vfp_call_or_return_candidate (arg_type, &elements,
1875 &fundamental_type))
1876 {
1877 if (info.nsrn + elements <= 8)
1878 {
1879 /* We know that we have sufficient registers available therefore
1880 this will never need to fallback to the stack. */
1881 if (!pass_in_v_vfp_candidate (gdbarch, regcache, &info, arg_type,
1882 arg))
1883 gdb_assert_not_reached ("Failed to push args");
1884 }
1885 else
1886 {
1887 info.nsrn = 8;
1888 pass_on_stack (&info, arg_type, arg);
1889 }
1890 continue;
1891 }
1892
78134374 1893 switch (arg_type->code ())
07b287a0
MS
1894 {
1895 case TYPE_CODE_INT:
1896 case TYPE_CODE_BOOL:
1897 case TYPE_CODE_CHAR:
1898 case TYPE_CODE_RANGE:
1899 case TYPE_CODE_ENUM:
28397ae7 1900 if (len < 4 && !is_fixed_point_type (arg_type))
07b287a0
MS
1901 {
1902 /* Promote to 32 bit integer. */
c6d940a9 1903 if (arg_type->is_unsigned ())
07b287a0
MS
1904 arg_type = builtin_type (gdbarch)->builtin_uint32;
1905 else
1906 arg_type = builtin_type (gdbarch)->builtin_int32;
1907 arg = value_cast (arg_type, arg);
1908 }
8e80f9d1 1909 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1910 break;
1911
07b287a0
MS
1912 case TYPE_CODE_STRUCT:
1913 case TYPE_CODE_ARRAY:
1914 case TYPE_CODE_UNION:
0e745c60 1915 if (len > 16)
07b287a0
MS
1916 {
1917 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1918 invisible reference. */
1919
1920 /* Allocate aligned storage. */
1921 sp = align_down (sp - len, 16);
1922
1923 /* Write the real data into the stack. */
50888e42 1924 write_memory (sp, value_contents (arg).data (), len);
07b287a0
MS
1925
1926 /* Construct the indirection. */
1927 arg_type = lookup_pointer_type (arg_type);
1928 arg = value_from_pointer (arg_type, sp);
8e80f9d1 1929 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1930 }
1931 else
1932 /* PCS C.15 / C.18 multiple values pass. */
8e80f9d1 1933 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1934 break;
1935
1936 default:
8e80f9d1 1937 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1938 break;
1939 }
1940 }
1941
1942 /* Make sure stack retains 16 byte alignment. */
1943 if (info.nsaa & 15)
1944 sp -= 16 - (info.nsaa & 15);
1945
89055eaa 1946 while (!info.si.empty ())
07b287a0 1947 {
89055eaa 1948 const stack_item_t &si = info.si.back ();
07b287a0 1949
89055eaa
TT
1950 sp -= si.len;
1951 if (si.data != NULL)
1952 write_memory (sp, si.data, si.len);
1953 info.si.pop_back ();
07b287a0
MS
1954 }
1955
07b287a0
MS
1956 /* Finally, update the SP register. */
1957 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1958
1959 return sp;
1960}
1961
1962/* Implement the "frame_align" gdbarch method. */
1963
1964static CORE_ADDR
1965aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1966{
1967 /* Align the stack to sixteen bytes. */
1968 return sp & ~(CORE_ADDR) 15;
1969}
1970
1971/* Return the type for an AdvSISD Q register. */
1972
1973static struct type *
1974aarch64_vnq_type (struct gdbarch *gdbarch)
1975{
08106042 1976 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
07b287a0
MS
1977
1978 if (tdep->vnq_type == NULL)
1979 {
1980 struct type *t;
1981 struct type *elem;
1982
1983 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1984 TYPE_CODE_UNION);
1985
1986 elem = builtin_type (gdbarch)->builtin_uint128;
1987 append_composite_type_field (t, "u", elem);
1988
1989 elem = builtin_type (gdbarch)->builtin_int128;
1990 append_composite_type_field (t, "s", elem);
1991
1992 tdep->vnq_type = t;
1993 }
1994
1995 return tdep->vnq_type;
1996}
1997
1998/* Return the type for an AdvSISD D register. */
1999
2000static struct type *
2001aarch64_vnd_type (struct gdbarch *gdbarch)
2002{
08106042 2003 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
07b287a0
MS
2004
2005 if (tdep->vnd_type == NULL)
2006 {
2007 struct type *t;
2008 struct type *elem;
2009
2010 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
2011 TYPE_CODE_UNION);
2012
2013 elem = builtin_type (gdbarch)->builtin_double;
2014 append_composite_type_field (t, "f", elem);
2015
2016 elem = builtin_type (gdbarch)->builtin_uint64;
2017 append_composite_type_field (t, "u", elem);
2018
2019 elem = builtin_type (gdbarch)->builtin_int64;
2020 append_composite_type_field (t, "s", elem);
2021
2022 tdep->vnd_type = t;
2023 }
2024
2025 return tdep->vnd_type;
2026}
2027
2028/* Return the type for an AdvSISD S register. */
2029
2030static struct type *
2031aarch64_vns_type (struct gdbarch *gdbarch)
2032{
08106042 2033 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
07b287a0
MS
2034
2035 if (tdep->vns_type == NULL)
2036 {
2037 struct type *t;
2038 struct type *elem;
2039
2040 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
2041 TYPE_CODE_UNION);
2042
2043 elem = builtin_type (gdbarch)->builtin_float;
2044 append_composite_type_field (t, "f", elem);
2045
2046 elem = builtin_type (gdbarch)->builtin_uint32;
2047 append_composite_type_field (t, "u", elem);
2048
2049 elem = builtin_type (gdbarch)->builtin_int32;
2050 append_composite_type_field (t, "s", elem);
2051
2052 tdep->vns_type = t;
2053 }
2054
2055 return tdep->vns_type;
2056}
2057
2058/* Return the type for an AdvSISD H register. */
2059
2060static struct type *
2061aarch64_vnh_type (struct gdbarch *gdbarch)
2062{
08106042 2063 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
07b287a0
MS
2064
2065 if (tdep->vnh_type == NULL)
2066 {
2067 struct type *t;
2068 struct type *elem;
2069
2070 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
2071 TYPE_CODE_UNION);
2072
5291fe3c
SP
2073 elem = builtin_type (gdbarch)->builtin_bfloat16;
2074 append_composite_type_field (t, "bf", elem);
2075
a6d0f249
AH
2076 elem = builtin_type (gdbarch)->builtin_half;
2077 append_composite_type_field (t, "f", elem);
2078
07b287a0
MS
2079 elem = builtin_type (gdbarch)->builtin_uint16;
2080 append_composite_type_field (t, "u", elem);
2081
2082 elem = builtin_type (gdbarch)->builtin_int16;
2083 append_composite_type_field (t, "s", elem);
2084
2085 tdep->vnh_type = t;
2086 }
2087
2088 return tdep->vnh_type;
2089}
2090
2091/* Return the type for an AdvSISD B register. */
2092
2093static struct type *
2094aarch64_vnb_type (struct gdbarch *gdbarch)
2095{
08106042 2096 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
07b287a0
MS
2097
2098 if (tdep->vnb_type == NULL)
2099 {
2100 struct type *t;
2101 struct type *elem;
2102
2103 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
2104 TYPE_CODE_UNION);
2105
2106 elem = builtin_type (gdbarch)->builtin_uint8;
2107 append_composite_type_field (t, "u", elem);
2108
2109 elem = builtin_type (gdbarch)->builtin_int8;
2110 append_composite_type_field (t, "s", elem);
2111
2112 tdep->vnb_type = t;
2113 }
2114
2115 return tdep->vnb_type;
2116}
2117
63bad7b6
AH
2118/* Return the type for an AdvSISD V register. */
2119
2120static struct type *
2121aarch64_vnv_type (struct gdbarch *gdbarch)
2122{
08106042 2123 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
63bad7b6
AH
2124
2125 if (tdep->vnv_type == NULL)
2126 {
09624f1f 2127 /* The other AArch64 pseudo registers (Q,D,H,S,B) refer to a single value
bffa1015
AH
2128 slice from the non-pseudo vector registers. However NEON V registers
2129 are always vector registers, and need constructing as such. */
2130 const struct builtin_type *bt = builtin_type (gdbarch);
2131
63bad7b6
AH
2132 struct type *t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnv",
2133 TYPE_CODE_UNION);
2134
bffa1015
AH
2135 struct type *sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
2136 TYPE_CODE_UNION);
2137 append_composite_type_field (sub, "f",
2138 init_vector_type (bt->builtin_double, 2));
2139 append_composite_type_field (sub, "u",
2140 init_vector_type (bt->builtin_uint64, 2));
2141 append_composite_type_field (sub, "s",
2142 init_vector_type (bt->builtin_int64, 2));
2143 append_composite_type_field (t, "d", sub);
2144
2145 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
2146 TYPE_CODE_UNION);
2147 append_composite_type_field (sub, "f",
2148 init_vector_type (bt->builtin_float, 4));
2149 append_composite_type_field (sub, "u",
2150 init_vector_type (bt->builtin_uint32, 4));
2151 append_composite_type_field (sub, "s",
2152 init_vector_type (bt->builtin_int32, 4));
2153 append_composite_type_field (t, "s", sub);
2154
2155 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
2156 TYPE_CODE_UNION);
5291fe3c
SP
2157 append_composite_type_field (sub, "bf",
2158 init_vector_type (bt->builtin_bfloat16, 8));
a6d0f249
AH
2159 append_composite_type_field (sub, "f",
2160 init_vector_type (bt->builtin_half, 8));
bffa1015
AH
2161 append_composite_type_field (sub, "u",
2162 init_vector_type (bt->builtin_uint16, 8));
2163 append_composite_type_field (sub, "s",
2164 init_vector_type (bt->builtin_int16, 8));
2165 append_composite_type_field (t, "h", sub);
2166
2167 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
2168 TYPE_CODE_UNION);
2169 append_composite_type_field (sub, "u",
2170 init_vector_type (bt->builtin_uint8, 16));
2171 append_composite_type_field (sub, "s",
2172 init_vector_type (bt->builtin_int8, 16));
2173 append_composite_type_field (t, "b", sub);
2174
2175 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
2176 TYPE_CODE_UNION);
2177 append_composite_type_field (sub, "u",
2178 init_vector_type (bt->builtin_uint128, 1));
2179 append_composite_type_field (sub, "s",
2180 init_vector_type (bt->builtin_int128, 1));
2181 append_composite_type_field (t, "q", sub);
63bad7b6
AH
2182
2183 tdep->vnv_type = t;
2184 }
2185
2186 return tdep->vnv_type;
2187}
2188
07b287a0
MS
2189/* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
2190
2191static int
2192aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
2193{
08106042 2194 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
34dcc7cf 2195
07b287a0
MS
2196 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
2197 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
2198
2199 if (reg == AARCH64_DWARF_SP)
2200 return AARCH64_SP_REGNUM;
2201
1fe84861
YY
2202 if (reg == AARCH64_DWARF_PC)
2203 return AARCH64_PC_REGNUM;
2204
07b287a0
MS
2205 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
2206 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
2207
65d4cada
AH
2208 if (reg == AARCH64_DWARF_SVE_VG)
2209 return AARCH64_SVE_VG_REGNUM;
2210
2211 if (reg == AARCH64_DWARF_SVE_FFR)
2212 return AARCH64_SVE_FFR_REGNUM;
2213
2214 if (reg >= AARCH64_DWARF_SVE_P0 && reg <= AARCH64_DWARF_SVE_P0 + 15)
2215 return AARCH64_SVE_P0_REGNUM + reg - AARCH64_DWARF_SVE_P0;
2216
2217 if (reg >= AARCH64_DWARF_SVE_Z0 && reg <= AARCH64_DWARF_SVE_Z0 + 15)
2218 return AARCH64_SVE_Z0_REGNUM + reg - AARCH64_DWARF_SVE_Z0;
2219
34dcc7cf
AH
2220 if (tdep->has_pauth ())
2221 {
c9cd8ca4
LM
2222 if (reg == AARCH64_DWARF_RA_SIGN_STATE)
2223 return tdep->ra_sign_state_regnum;
34dcc7cf
AH
2224 }
2225
07b287a0
MS
2226 return -1;
2227}
07b287a0
MS
2228
2229/* Implement the "print_insn" gdbarch method. */
2230
2231static int
2232aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
2233{
2234 info->symbols = NULL;
6394c606 2235 return default_print_insn (memaddr, info);
07b287a0
MS
2236}
2237
2238/* AArch64 BRK software debug mode instruction.
2239 Note that AArch64 code is always little-endian.
2240 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
04180708 2241constexpr gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
07b287a0 2242
04180708 2243typedef BP_MANIPULATION (aarch64_default_breakpoint) aarch64_breakpoint;
07b287a0
MS
2244
2245/* Extract from an array REGS containing the (raw) register state a
2246 function return value of type TYPE, and copy that, in virtual
2247 format, into VALBUF. */
2248
2249static void
2250aarch64_extract_return_value (struct type *type, struct regcache *regs,
2251 gdb_byte *valbuf)
2252{
ac7936df 2253 struct gdbarch *gdbarch = regs->arch ();
07b287a0 2254 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
4f4aedeb
AH
2255 int elements;
2256 struct type *fundamental_type;
07b287a0 2257
4f4aedeb
AH
2258 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2259 &fundamental_type))
07b287a0 2260 {
df86565b 2261 int len = fundamental_type->length ();
4f4aedeb
AH
2262
2263 for (int i = 0; i < elements; i++)
2264 {
2265 int regno = AARCH64_V0_REGNUM + i;
3ff2c72e
AH
2266 /* Enough space for a full vector register. */
2267 gdb_byte buf[register_size (gdbarch, regno)];
2268 gdb_assert (len <= sizeof (buf));
4f4aedeb 2269
c6185dce
SM
2270 aarch64_debug_printf
2271 ("read HFA or HVA return value element %d from %s",
2272 i + 1, gdbarch_register_name (gdbarch, regno));
2273
4f4aedeb 2274 regs->cooked_read (regno, buf);
07b287a0 2275
4f4aedeb
AH
2276 memcpy (valbuf, buf, len);
2277 valbuf += len;
2278 }
07b287a0 2279 }
78134374
SM
2280 else if (type->code () == TYPE_CODE_INT
2281 || type->code () == TYPE_CODE_CHAR
2282 || type->code () == TYPE_CODE_BOOL
2283 || type->code () == TYPE_CODE_PTR
aa006118 2284 || TYPE_IS_REFERENCE (type)
78134374 2285 || type->code () == TYPE_CODE_ENUM)
07b287a0 2286 {
6471e7d2 2287 /* If the type is a plain integer, then the access is
07b287a0
MS
2288 straight-forward. Otherwise we have to play around a bit
2289 more. */
df86565b 2290 int len = type->length ();
07b287a0
MS
2291 int regno = AARCH64_X0_REGNUM;
2292 ULONGEST tmp;
2293
2294 while (len > 0)
2295 {
2296 /* By using store_unsigned_integer we avoid having to do
2297 anything special for small big-endian values. */
2298 regcache_cooked_read_unsigned (regs, regno++, &tmp);
2299 store_unsigned_integer (valbuf,
2300 (len > X_REGISTER_SIZE
2301 ? X_REGISTER_SIZE : len), byte_order, tmp);
2302 len -= X_REGISTER_SIZE;
2303 valbuf += X_REGISTER_SIZE;
2304 }
2305 }
07b287a0
MS
2306 else
2307 {
2308 /* For a structure or union the behaviour is as if the value had
dda83cd7
SM
2309 been stored to word-aligned memory and then loaded into
2310 registers with 64-bit load instruction(s). */
df86565b 2311 int len = type->length ();
07b287a0
MS
2312 int regno = AARCH64_X0_REGNUM;
2313 bfd_byte buf[X_REGISTER_SIZE];
2314
2315 while (len > 0)
2316 {
dca08e1f 2317 regs->cooked_read (regno++, buf);
07b287a0
MS
2318 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2319 len -= X_REGISTER_SIZE;
2320 valbuf += X_REGISTER_SIZE;
2321 }
2322 }
2323}
2324
2325
2326/* Will a function return an aggregate type in memory or in a
2327 register? Return 0 if an aggregate type can be returned in a
2328 register, 1 if it must be returned in memory. */
2329
2330static int
2331aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
2332{
f168693b 2333 type = check_typedef (type);
4f4aedeb
AH
2334 int elements;
2335 struct type *fundamental_type;
07b287a0 2336
911627e7
TT
2337 if (TYPE_HAS_DYNAMIC_LENGTH (type))
2338 return 1;
2339
4f4aedeb
AH
2340 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2341 &fundamental_type))
07b287a0 2342 {
cd635f74
YQ
2343 /* v0-v7 are used to return values and one register is allocated
2344 for one member. However, HFA or HVA has at most four members. */
07b287a0
MS
2345 return 0;
2346 }
2347
df86565b 2348 if (type->length () > 16
bab22d06 2349 || !language_pass_by_reference (type).trivially_copyable)
07b287a0
MS
2350 {
2351 /* PCS B.6 Aggregates larger than 16 bytes are passed by
dda83cd7 2352 invisible reference. */
07b287a0
MS
2353
2354 return 1;
2355 }
2356
2357 return 0;
2358}
2359
2360/* Write into appropriate registers a function return value of type
2361 TYPE, given in virtual format. */
2362
2363static void
2364aarch64_store_return_value (struct type *type, struct regcache *regs,
2365 const gdb_byte *valbuf)
2366{
ac7936df 2367 struct gdbarch *gdbarch = regs->arch ();
07b287a0 2368 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
4f4aedeb
AH
2369 int elements;
2370 struct type *fundamental_type;
07b287a0 2371
4f4aedeb
AH
2372 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2373 &fundamental_type))
07b287a0 2374 {
df86565b 2375 int len = fundamental_type->length ();
4f4aedeb
AH
2376
2377 for (int i = 0; i < elements; i++)
2378 {
2379 int regno = AARCH64_V0_REGNUM + i;
3ff2c72e
AH
2380 /* Enough space for a full vector register. */
2381 gdb_byte tmpbuf[register_size (gdbarch, regno)];
2382 gdb_assert (len <= sizeof (tmpbuf));
4f4aedeb 2383
c6185dce
SM
2384 aarch64_debug_printf
2385 ("write HFA or HVA return value element %d to %s",
2386 i + 1, gdbarch_register_name (gdbarch, regno));
07b287a0 2387
4f4aedeb
AH
2388 memcpy (tmpbuf, valbuf,
2389 len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
2390 regs->cooked_write (regno, tmpbuf);
2391 valbuf += len;
2392 }
07b287a0 2393 }
78134374
SM
2394 else if (type->code () == TYPE_CODE_INT
2395 || type->code () == TYPE_CODE_CHAR
2396 || type->code () == TYPE_CODE_BOOL
2397 || type->code () == TYPE_CODE_PTR
aa006118 2398 || TYPE_IS_REFERENCE (type)
78134374 2399 || type->code () == TYPE_CODE_ENUM)
07b287a0 2400 {
df86565b 2401 if (type->length () <= X_REGISTER_SIZE)
07b287a0
MS
2402 {
2403 /* Values of one word or less are zero/sign-extended and
2404 returned in r0. */
2405 bfd_byte tmpbuf[X_REGISTER_SIZE];
2406 LONGEST val = unpack_long (type, valbuf);
2407
2408 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
b66f5587 2409 regs->cooked_write (AARCH64_X0_REGNUM, tmpbuf);
07b287a0
MS
2410 }
2411 else
2412 {
2413 /* Integral values greater than one word are stored in
2414 consecutive registers starting with r0. This will always
2415 be a multiple of the regiser size. */
df86565b 2416 int len = type->length ();
07b287a0
MS
2417 int regno = AARCH64_X0_REGNUM;
2418
2419 while (len > 0)
2420 {
b66f5587 2421 regs->cooked_write (regno++, valbuf);
07b287a0
MS
2422 len -= X_REGISTER_SIZE;
2423 valbuf += X_REGISTER_SIZE;
2424 }
2425 }
2426 }
07b287a0
MS
2427 else
2428 {
2429 /* For a structure or union the behaviour is as if the value had
2430 been stored to word-aligned memory and then loaded into
2431 registers with 64-bit load instruction(s). */
df86565b 2432 int len = type->length ();
07b287a0
MS
2433 int regno = AARCH64_X0_REGNUM;
2434 bfd_byte tmpbuf[X_REGISTER_SIZE];
2435
2436 while (len > 0)
2437 {
2438 memcpy (tmpbuf, valbuf,
2439 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
b66f5587 2440 regs->cooked_write (regno++, tmpbuf);
07b287a0
MS
2441 len -= X_REGISTER_SIZE;
2442 valbuf += X_REGISTER_SIZE;
2443 }
2444 }
2445}
2446
2447/* Implement the "return_value" gdbarch method. */
2448
2449static enum return_value_convention
2450aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2451 struct type *valtype, struct regcache *regcache,
5cb0f2d5 2452 struct value **read_value, const gdb_byte *writebuf)
07b287a0 2453{
78134374
SM
2454 if (valtype->code () == TYPE_CODE_STRUCT
2455 || valtype->code () == TYPE_CODE_UNION
2456 || valtype->code () == TYPE_CODE_ARRAY)
07b287a0
MS
2457 {
2458 if (aarch64_return_in_memory (gdbarch, valtype))
2459 {
bab22d06
LM
2460 /* From the AAPCS64's Result Return section:
2461
2462 "Otherwise, the caller shall reserve a block of memory of
2463 sufficient size and alignment to hold the result. The address
2464 of the memory block shall be passed as an additional argument to
2465 the function in x8. */
2466
c6185dce 2467 aarch64_debug_printf ("return value in memory");
bab22d06 2468
911627e7 2469 if (read_value != nullptr)
bab22d06
LM
2470 {
2471 CORE_ADDR addr;
2472
2473 regcache->cooked_read (AARCH64_STRUCT_RETURN_REGNUM, &addr);
911627e7 2474 *read_value = value_at_non_lval (valtype, addr);
bab22d06
LM
2475 }
2476
2477 return RETURN_VALUE_ABI_RETURNS_ADDRESS;
07b287a0
MS
2478 }
2479 }
2480
2481 if (writebuf)
2482 aarch64_store_return_value (valtype, regcache, writebuf);
2483
911627e7
TT
2484 if (read_value)
2485 {
2486 *read_value = allocate_value (valtype);
2487 aarch64_extract_return_value (valtype, regcache,
2488 value_contents_raw (*read_value).data ());
2489 }
07b287a0 2490
c6185dce 2491 aarch64_debug_printf ("return value in registers");
07b287a0
MS
2492
2493 return RETURN_VALUE_REGISTER_CONVENTION;
2494}
2495
2496/* Implement the "get_longjmp_target" gdbarch method. */
2497
2498static int
bd2b40ac 2499aarch64_get_longjmp_target (frame_info_ptr frame, CORE_ADDR *pc)
07b287a0
MS
2500{
2501 CORE_ADDR jb_addr;
2502 gdb_byte buf[X_REGISTER_SIZE];
2503 struct gdbarch *gdbarch = get_frame_arch (frame);
08106042 2504 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
07b287a0
MS
2505 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2506
2507 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
2508
2509 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2510 X_REGISTER_SIZE))
2511 return 0;
2512
2513 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2514 return 1;
2515}
ea873d8e
PL
2516
2517/* Implement the "gen_return_address" gdbarch method. */
2518
2519static void
2520aarch64_gen_return_address (struct gdbarch *gdbarch,
2521 struct agent_expr *ax, struct axs_value *value,
2522 CORE_ADDR scope)
2523{
2524 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
2525 value->kind = axs_lvalue_register;
2526 value->u.reg = AARCH64_LR_REGNUM;
2527}
07b287a0
MS
2528\f
2529
e63ae49b
LM
2530/* Return TRUE if REGNUM is a W pseudo-register number. Return FALSE
2531 otherwise. */
2532
2533static bool
2534is_w_pseudo_register (struct gdbarch *gdbarch, int regnum)
2535{
2536 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2537
2538 if (tdep->w_pseudo_base <= regnum
2539 && regnum < tdep->w_pseudo_base + tdep->w_pseudo_count)
2540 return true;
2541
2542 return false;
2543}
2544
07b287a0
MS
2545/* Return the pseudo register name corresponding to register regnum. */
2546
2547static const char *
2548aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2549{
08106042 2550 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
63bad7b6 2551
e63ae49b
LM
2552 /* W pseudo-registers. Bottom halves of the X registers. */
2553 static const char *const w_name[] =
2554 {
2555 "w0", "w1", "w2", "w3",
2556 "w4", "w5", "w6", "w7",
2557 "w8", "w9", "w10", "w11",
2558 "w12", "w13", "w14", "w15",
2559 "w16", "w17", "w18", "w19",
2560 "w20", "w21", "w22", "w23",
2561 "w24", "w25", "w26", "w27",
2562 "w28", "w29", "w30",
2563 };
2564
07b287a0
MS
2565 static const char *const q_name[] =
2566 {
2567 "q0", "q1", "q2", "q3",
2568 "q4", "q5", "q6", "q7",
2569 "q8", "q9", "q10", "q11",
2570 "q12", "q13", "q14", "q15",
2571 "q16", "q17", "q18", "q19",
2572 "q20", "q21", "q22", "q23",
2573 "q24", "q25", "q26", "q27",
2574 "q28", "q29", "q30", "q31",
2575 };
2576
2577 static const char *const d_name[] =
2578 {
2579 "d0", "d1", "d2", "d3",
2580 "d4", "d5", "d6", "d7",
2581 "d8", "d9", "d10", "d11",
2582 "d12", "d13", "d14", "d15",
2583 "d16", "d17", "d18", "d19",
2584 "d20", "d21", "d22", "d23",
2585 "d24", "d25", "d26", "d27",
2586 "d28", "d29", "d30", "d31",
2587 };
2588
2589 static const char *const s_name[] =
2590 {
2591 "s0", "s1", "s2", "s3",
2592 "s4", "s5", "s6", "s7",
2593 "s8", "s9", "s10", "s11",
2594 "s12", "s13", "s14", "s15",
2595 "s16", "s17", "s18", "s19",
2596 "s20", "s21", "s22", "s23",
2597 "s24", "s25", "s26", "s27",
2598 "s28", "s29", "s30", "s31",
2599 };
2600
2601 static const char *const h_name[] =
2602 {
2603 "h0", "h1", "h2", "h3",
2604 "h4", "h5", "h6", "h7",
2605 "h8", "h9", "h10", "h11",
2606 "h12", "h13", "h14", "h15",
2607 "h16", "h17", "h18", "h19",
2608 "h20", "h21", "h22", "h23",
2609 "h24", "h25", "h26", "h27",
2610 "h28", "h29", "h30", "h31",
2611 };
2612
2613 static const char *const b_name[] =
2614 {
2615 "b0", "b1", "b2", "b3",
2616 "b4", "b5", "b6", "b7",
2617 "b8", "b9", "b10", "b11",
2618 "b12", "b13", "b14", "b15",
2619 "b16", "b17", "b18", "b19",
2620 "b20", "b21", "b22", "b23",
2621 "b24", "b25", "b26", "b27",
2622 "b28", "b29", "b30", "b31",
2623 };
2624
34dcc7cf 2625 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
07b287a0 2626
34dcc7cf
AH
2627 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2628 return q_name[p_regnum - AARCH64_Q0_REGNUM];
07b287a0 2629
34dcc7cf
AH
2630 if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2631 return d_name[p_regnum - AARCH64_D0_REGNUM];
07b287a0 2632
34dcc7cf
AH
2633 if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2634 return s_name[p_regnum - AARCH64_S0_REGNUM];
07b287a0 2635
34dcc7cf
AH
2636 if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2637 return h_name[p_regnum - AARCH64_H0_REGNUM];
07b287a0 2638
34dcc7cf
AH
2639 if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2640 return b_name[p_regnum - AARCH64_B0_REGNUM];
07b287a0 2641
e63ae49b
LM
2642 /* W pseudo-registers? */
2643 if (is_w_pseudo_register (gdbarch, regnum))
2644 return w_name[regnum - tdep->w_pseudo_base];
2645
63bad7b6
AH
2646 if (tdep->has_sve ())
2647 {
2648 static const char *const sve_v_name[] =
2649 {
2650 "v0", "v1", "v2", "v3",
2651 "v4", "v5", "v6", "v7",
2652 "v8", "v9", "v10", "v11",
2653 "v12", "v13", "v14", "v15",
2654 "v16", "v17", "v18", "v19",
2655 "v20", "v21", "v22", "v23",
2656 "v24", "v25", "v26", "v27",
2657 "v28", "v29", "v30", "v31",
2658 };
2659
34dcc7cf
AH
2660 if (p_regnum >= AARCH64_SVE_V0_REGNUM
2661 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2662 return sve_v_name[p_regnum - AARCH64_SVE_V0_REGNUM];
63bad7b6
AH
2663 }
2664
34dcc7cf
AH
2665 /* RA_STATE is used for unwinding only. Do not assign it a name - this
2666 prevents it from being read by methods such as
2667 mi_cmd_trace_frame_collected. */
c9cd8ca4 2668 if (tdep->has_pauth () && regnum == tdep->ra_sign_state_regnum)
34dcc7cf
AH
2669 return "";
2670
f34652de 2671 internal_error (_("aarch64_pseudo_register_name: bad register number %d"),
34dcc7cf 2672 p_regnum);
07b287a0
MS
2673}
2674
2675/* Implement the "pseudo_register_type" tdesc_arch_data method. */
2676
2677static struct type *
2678aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2679{
08106042 2680 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
63bad7b6 2681
34dcc7cf 2682 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
07b287a0 2683
34dcc7cf 2684 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
07b287a0
MS
2685 return aarch64_vnq_type (gdbarch);
2686
34dcc7cf 2687 if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
07b287a0
MS
2688 return aarch64_vnd_type (gdbarch);
2689
34dcc7cf 2690 if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
07b287a0
MS
2691 return aarch64_vns_type (gdbarch);
2692
34dcc7cf 2693 if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
07b287a0
MS
2694 return aarch64_vnh_type (gdbarch);
2695
34dcc7cf 2696 if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
07b287a0
MS
2697 return aarch64_vnb_type (gdbarch);
2698
34dcc7cf
AH
2699 if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
2700 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
63bad7b6
AH
2701 return aarch64_vnv_type (gdbarch);
2702
e63ae49b
LM
2703 /* W pseudo-registers are 32-bit. */
2704 if (is_w_pseudo_register (gdbarch, regnum))
2705 return builtin_type (gdbarch)->builtin_uint32;
2706
c9cd8ca4 2707 if (tdep->has_pauth () && regnum == tdep->ra_sign_state_regnum)
34dcc7cf
AH
2708 return builtin_type (gdbarch)->builtin_uint64;
2709
f34652de 2710 internal_error (_("aarch64_pseudo_register_type: bad register number %d"),
34dcc7cf 2711 p_regnum);
07b287a0
MS
2712}
2713
2714/* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2715
2716static int
2717aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
dbf5d61b 2718 const struct reggroup *group)
07b287a0 2719{
08106042 2720 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
63bad7b6 2721
34dcc7cf 2722 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
07b287a0 2723
34dcc7cf 2724 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
07b287a0 2725 return group == all_reggroup || group == vector_reggroup;
34dcc7cf 2726 else if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
07b287a0
MS
2727 return (group == all_reggroup || group == vector_reggroup
2728 || group == float_reggroup);
34dcc7cf 2729 else if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
07b287a0
MS
2730 return (group == all_reggroup || group == vector_reggroup
2731 || group == float_reggroup);
34dcc7cf 2732 else if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
07b287a0 2733 return group == all_reggroup || group == vector_reggroup;
34dcc7cf 2734 else if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
07b287a0 2735 return group == all_reggroup || group == vector_reggroup;
34dcc7cf
AH
2736 else if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
2737 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
63bad7b6 2738 return group == all_reggroup || group == vector_reggroup;
34dcc7cf 2739 /* RA_STATE is used for unwinding only. Do not assign it to any groups. */
c9cd8ca4 2740 if (tdep->has_pauth () && regnum == tdep->ra_sign_state_regnum)
34dcc7cf 2741 return 0;
07b287a0
MS
2742
2743 return group == all_reggroup;
2744}
2745
3c5cd5c3
AH
2746/* Helper for aarch64_pseudo_read_value. */
2747
2748static struct value *
63bad7b6
AH
2749aarch64_pseudo_read_value_1 (struct gdbarch *gdbarch,
2750 readable_regcache *regcache, int regnum_offset,
3c5cd5c3
AH
2751 int regsize, struct value *result_value)
2752{
3c5cd5c3
AH
2753 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2754
63bad7b6
AH
2755 /* Enough space for a full vector register. */
2756 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2757 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2758
3c5cd5c3
AH
2759 if (regcache->raw_read (v_regnum, reg_buf) != REG_VALID)
2760 mark_value_bytes_unavailable (result_value, 0,
df86565b 2761 value_type (result_value)->length ());
3c5cd5c3 2762 else
50888e42 2763 memcpy (value_contents_raw (result_value).data (), reg_buf, regsize);
63bad7b6 2764
3c5cd5c3
AH
2765 return result_value;
2766 }
2767
07b287a0
MS
2768/* Implement the "pseudo_register_read_value" gdbarch method. */
2769
2770static struct value *
3c5cd5c3 2771aarch64_pseudo_read_value (struct gdbarch *gdbarch, readable_regcache *regcache,
07b287a0
MS
2772 int regnum)
2773{
08106042 2774 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
3c5cd5c3 2775 struct value *result_value = allocate_value (register_type (gdbarch, regnum));
07b287a0 2776
07b287a0
MS
2777 VALUE_LVAL (result_value) = lval_register;
2778 VALUE_REGNUM (result_value) = regnum;
07b287a0 2779
e63ae49b
LM
2780 if (is_w_pseudo_register (gdbarch, regnum))
2781 {
2782 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2783 /* Default offset for little endian. */
2784 int offset = 0;
2785
2786 if (byte_order == BFD_ENDIAN_BIG)
2787 offset = 4;
2788
2789 /* Find the correct X register to extract the data from. */
2790 int x_regnum = AARCH64_X0_REGNUM + (regnum - tdep->w_pseudo_base);
2791 gdb_byte data[4];
2792
2793 /* Read the bottom 4 bytes of X. */
2794 if (regcache->raw_read_part (x_regnum, offset, 4, data) != REG_VALID)
2795 mark_value_bytes_unavailable (result_value, 0, 4);
2796 else
2797 memcpy (value_contents_raw (result_value).data (), data, 4);
2798
2799 return result_value;
2800 }
2801
07b287a0
MS
2802 regnum -= gdbarch_num_regs (gdbarch);
2803
2804 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
63bad7b6
AH
2805 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2806 regnum - AARCH64_Q0_REGNUM,
3c5cd5c3 2807 Q_REGISTER_SIZE, result_value);
07b287a0
MS
2808
2809 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
63bad7b6
AH
2810 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2811 regnum - AARCH64_D0_REGNUM,
3c5cd5c3 2812 D_REGISTER_SIZE, result_value);
07b287a0
MS
2813
2814 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
63bad7b6
AH
2815 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2816 regnum - AARCH64_S0_REGNUM,
3c5cd5c3 2817 S_REGISTER_SIZE, result_value);
07b287a0
MS
2818
2819 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
63bad7b6
AH
2820 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2821 regnum - AARCH64_H0_REGNUM,
3c5cd5c3 2822 H_REGISTER_SIZE, result_value);
07b287a0
MS
2823
2824 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
63bad7b6
AH
2825 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2826 regnum - AARCH64_B0_REGNUM,
3c5cd5c3 2827 B_REGISTER_SIZE, result_value);
07b287a0 2828
63bad7b6
AH
2829 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2830 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2831 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2832 regnum - AARCH64_SVE_V0_REGNUM,
2833 V_REGISTER_SIZE, result_value);
2834
07b287a0
MS
2835 gdb_assert_not_reached ("regnum out of bound");
2836}
2837
3c5cd5c3 2838/* Helper for aarch64_pseudo_write. */
07b287a0
MS
2839
2840static void
63bad7b6
AH
2841aarch64_pseudo_write_1 (struct gdbarch *gdbarch, struct regcache *regcache,
2842 int regnum_offset, int regsize, const gdb_byte *buf)
07b287a0 2843{
3c5cd5c3 2844 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
07b287a0 2845
63bad7b6
AH
2846 /* Enough space for a full vector register. */
2847 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2848 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2849
07b287a0
MS
2850 /* Ensure the register buffer is zero, we want gdb writes of the
2851 various 'scalar' pseudo registers to behavior like architectural
2852 writes, register width bytes are written the remainder are set to
2853 zero. */
63bad7b6 2854 memset (reg_buf, 0, register_size (gdbarch, AARCH64_V0_REGNUM));
07b287a0 2855
3c5cd5c3
AH
2856 memcpy (reg_buf, buf, regsize);
2857 regcache->raw_write (v_regnum, reg_buf);
2858}
2859
2860/* Implement the "pseudo_register_write" gdbarch method. */
2861
2862static void
2863aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2864 int regnum, const gdb_byte *buf)
2865{
08106042 2866 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
e63ae49b
LM
2867
2868 if (is_w_pseudo_register (gdbarch, regnum))
2869 {
2870 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2871 /* Default offset for little endian. */
2872 int offset = 0;
2873
2874 if (byte_order == BFD_ENDIAN_BIG)
2875 offset = 4;
2876
2877 /* Find the correct X register to extract the data from. */
2878 int x_regnum = AARCH64_X0_REGNUM + (regnum - tdep->w_pseudo_base);
2879
2880 /* First zero-out the contents of X. */
2881 ULONGEST zero = 0;
2882 regcache->raw_write (x_regnum, zero);
2883 /* Write to the bottom 4 bytes of X. */
2884 regcache->raw_write_part (x_regnum, offset, 4, buf);
2885 return;
2886 }
2887
07b287a0
MS
2888 regnum -= gdbarch_num_regs (gdbarch);
2889
2890 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
63bad7b6
AH
2891 return aarch64_pseudo_write_1 (gdbarch, regcache,
2892 regnum - AARCH64_Q0_REGNUM, Q_REGISTER_SIZE,
2893 buf);
07b287a0
MS
2894
2895 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
63bad7b6
AH
2896 return aarch64_pseudo_write_1 (gdbarch, regcache,
2897 regnum - AARCH64_D0_REGNUM, D_REGISTER_SIZE,
2898 buf);
07b287a0
MS
2899
2900 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
63bad7b6
AH
2901 return aarch64_pseudo_write_1 (gdbarch, regcache,
2902 regnum - AARCH64_S0_REGNUM, S_REGISTER_SIZE,
2903 buf);
07b287a0
MS
2904
2905 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
63bad7b6
AH
2906 return aarch64_pseudo_write_1 (gdbarch, regcache,
2907 regnum - AARCH64_H0_REGNUM, H_REGISTER_SIZE,
2908 buf);
07b287a0
MS
2909
2910 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
63bad7b6
AH
2911 return aarch64_pseudo_write_1 (gdbarch, regcache,
2912 regnum - AARCH64_B0_REGNUM, B_REGISTER_SIZE,
2913 buf);
2914
2915 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2916 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2917 return aarch64_pseudo_write_1 (gdbarch, regcache,
2918 regnum - AARCH64_SVE_V0_REGNUM,
2919 V_REGISTER_SIZE, buf);
07b287a0
MS
2920
2921 gdb_assert_not_reached ("regnum out of bound");
2922}
2923
07b287a0
MS
2924/* Callback function for user_reg_add. */
2925
2926static struct value *
bd2b40ac 2927value_of_aarch64_user_reg (frame_info_ptr frame, const void *baton)
07b287a0 2928{
9a3c8263 2929 const int *reg_p = (const int *) baton;
07b287a0
MS
2930
2931 return value_of_register (*reg_p, frame);
2932}
2933\f
2934
9404b58f
KM
2935/* Implement the "software_single_step" gdbarch method, needed to
2936 single step through atomic sequences on AArch64. */
2937
a0ff9e1a 2938static std::vector<CORE_ADDR>
f5ea389a 2939aarch64_software_single_step (struct regcache *regcache)
9404b58f 2940{
ac7936df 2941 struct gdbarch *gdbarch = regcache->arch ();
9404b58f
KM
2942 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2943 const int insn_size = 4;
2944 const int atomic_sequence_length = 16; /* Instruction sequence length. */
0187a92f 2945 CORE_ADDR pc = regcache_read_pc (regcache);
70ab8ccd 2946 CORE_ADDR breaks[2] = { CORE_ADDR_MAX, CORE_ADDR_MAX };
9404b58f
KM
2947 CORE_ADDR loc = pc;
2948 CORE_ADDR closing_insn = 0;
94355de7
LM
2949
2950 ULONGEST insn_from_memory;
2951 if (!safe_read_memory_unsigned_integer (loc, insn_size,
2952 byte_order_for_code,
2953 &insn_from_memory))
2954 {
2955 /* Assume we don't have a atomic sequence, as we couldn't read the
2956 instruction in this location. */
2957 return {};
2958 }
2959
2960 uint32_t insn = insn_from_memory;
9404b58f
KM
2961 int index;
2962 int insn_count;
2963 int bc_insn_count = 0; /* Conditional branch instruction count. */
2964 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
f77ee802
YQ
2965 aarch64_inst inst;
2966
561a72d4 2967 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
a0ff9e1a 2968 return {};
9404b58f
KM
2969
2970 /* Look for a Load Exclusive instruction which begins the sequence. */
f77ee802 2971 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
a0ff9e1a 2972 return {};
9404b58f
KM
2973
2974 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2975 {
9404b58f 2976 loc += insn_size;
9404b58f 2977
94355de7
LM
2978 if (!safe_read_memory_unsigned_integer (loc, insn_size,
2979 byte_order_for_code,
2980 &insn_from_memory))
2981 {
2982 /* Assume we don't have a atomic sequence, as we couldn't read the
2983 instruction in this location. */
2984 return {};
2985 }
2986
2987 insn = insn_from_memory;
561a72d4 2988 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
a0ff9e1a 2989 return {};
9404b58f 2990 /* Check if the instruction is a conditional branch. */
f77ee802 2991 if (inst.opcode->iclass == condbranch)
9404b58f 2992 {
f77ee802
YQ
2993 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
2994
9404b58f 2995 if (bc_insn_count >= 1)
a0ff9e1a 2996 return {};
9404b58f
KM
2997
2998 /* It is, so we'll try to set a breakpoint at the destination. */
f77ee802 2999 breaks[1] = loc + inst.operands[0].imm.value;
9404b58f
KM
3000
3001 bc_insn_count++;
3002 last_breakpoint++;
3003 }
3004
3005 /* Look for the Store Exclusive which closes the atomic sequence. */
f77ee802 3006 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
9404b58f
KM
3007 {
3008 closing_insn = loc;
3009 break;
3010 }
3011 }
3012
3013 /* We didn't find a closing Store Exclusive instruction, fall back. */
3014 if (!closing_insn)
a0ff9e1a 3015 return {};
9404b58f
KM
3016
3017 /* Insert breakpoint after the end of the atomic sequence. */
3018 breaks[0] = loc + insn_size;
3019
3020 /* Check for duplicated breakpoints, and also check that the second
3021 breakpoint is not within the atomic sequence. */
3022 if (last_breakpoint
3023 && (breaks[1] == breaks[0]
3024 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
3025 last_breakpoint = 0;
3026
a0ff9e1a
SM
3027 std::vector<CORE_ADDR> next_pcs;
3028
9404b58f
KM
3029 /* Insert the breakpoint at the end of the sequence, and one at the
3030 destination of the conditional branch, if it exists. */
3031 for (index = 0; index <= last_breakpoint; index++)
a0ff9e1a 3032 next_pcs.push_back (breaks[index]);
9404b58f 3033
93f9a11f 3034 return next_pcs;
9404b58f
KM
3035}
3036
1152d984
SM
3037struct aarch64_displaced_step_copy_insn_closure
3038 : public displaced_step_copy_insn_closure
b6542f81
YQ
3039{
3040 /* It is true when condition instruction, such as B.CON, TBZ, etc,
3041 is being displaced stepping. */
f0c702d4 3042 bool cond = false;
b6542f81 3043
0c271889
LM
3044 /* PC adjustment offset after displaced stepping. If 0, then we don't
3045 write the PC back, assuming the PC is already the right address. */
cfba9872 3046 int32_t pc_adjust = 0;
b6542f81
YQ
3047};
3048
3049/* Data when visiting instructions for displaced stepping. */
3050
3051struct aarch64_displaced_step_data
3052{
3053 struct aarch64_insn_data base;
3054
3055 /* The address where the instruction will be executed at. */
3056 CORE_ADDR new_addr;
3057 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
e935475c 3058 uint32_t insn_buf[AARCH64_DISPLACED_MODIFIED_INSNS];
b6542f81
YQ
3059 /* Number of instructions in INSN_BUF. */
3060 unsigned insn_count;
3061 /* Registers when doing displaced stepping. */
3062 struct regcache *regs;
3063
1152d984 3064 aarch64_displaced_step_copy_insn_closure *dsc;
b6542f81
YQ
3065};
3066
3067/* Implementation of aarch64_insn_visitor method "b". */
3068
3069static void
3070aarch64_displaced_step_b (const int is_bl, const int32_t offset,
3071 struct aarch64_insn_data *data)
3072{
3073 struct aarch64_displaced_step_data *dsd
3074 = (struct aarch64_displaced_step_data *) data;
2ac09a5b 3075 int64_t new_offset = data->insn_addr - dsd->new_addr + offset;
b6542f81
YQ
3076
3077 if (can_encode_int32 (new_offset, 28))
3078 {
3079 /* Emit B rather than BL, because executing BL on a new address
3080 will get the wrong address into LR. In order to avoid this,
3081 we emit B, and update LR if the instruction is BL. */
3082 emit_b (dsd->insn_buf, 0, new_offset);
3083 dsd->insn_count++;
3084 }
3085 else
3086 {
3087 /* Write NOP. */
3088 emit_nop (dsd->insn_buf);
3089 dsd->insn_count++;
3090 dsd->dsc->pc_adjust = offset;
3091 }
3092
3093 if (is_bl)
3094 {
3095 /* Update LR. */
3096 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
3097 data->insn_addr + 4);
3098 }
3099}
3100
3101/* Implementation of aarch64_insn_visitor method "b_cond". */
3102
3103static void
3104aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
3105 struct aarch64_insn_data *data)
3106{
3107 struct aarch64_displaced_step_data *dsd
3108 = (struct aarch64_displaced_step_data *) data;
b6542f81
YQ
3109
3110 /* GDB has to fix up PC after displaced step this instruction
3111 differently according to the condition is true or false. Instead
3112 of checking COND against conditional flags, we can use
3113 the following instructions, and GDB can tell how to fix up PC
3114 according to the PC value.
3115
3116 B.COND TAKEN ; If cond is true, then jump to TAKEN.
3117 INSN1 ;
3118 TAKEN:
3119 INSN2
3120 */
3121
3122 emit_bcond (dsd->insn_buf, cond, 8);
f0c702d4 3123 dsd->dsc->cond = true;
b6542f81
YQ
3124 dsd->dsc->pc_adjust = offset;
3125 dsd->insn_count = 1;
3126}
3127
3128/* Dynamically allocate a new register. If we know the register
3129 statically, we should make it a global as above instead of using this
3130 helper function. */
3131
3132static struct aarch64_register
3133aarch64_register (unsigned num, int is64)
3134{
3135 return (struct aarch64_register) { num, is64 };
3136}
3137
3138/* Implementation of aarch64_insn_visitor method "cb". */
3139
3140static void
3141aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
3142 const unsigned rn, int is64,
3143 struct aarch64_insn_data *data)
3144{
3145 struct aarch64_displaced_step_data *dsd
3146 = (struct aarch64_displaced_step_data *) data;
b6542f81
YQ
3147
3148 /* The offset is out of range for a compare and branch
3149 instruction. We can use the following instructions instead:
3150
3151 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
3152 INSN1 ;
3153 TAKEN:
3154 INSN2
3155 */
3156 emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
3157 dsd->insn_count = 1;
f0c702d4 3158 dsd->dsc->cond = true;
b6542f81
YQ
3159 dsd->dsc->pc_adjust = offset;
3160}
3161
3162/* Implementation of aarch64_insn_visitor method "tb". */
3163
3164static void
3165aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
3166 const unsigned rt, unsigned bit,
3167 struct aarch64_insn_data *data)
3168{
3169 struct aarch64_displaced_step_data *dsd
3170 = (struct aarch64_displaced_step_data *) data;
b6542f81
YQ
3171
3172 /* The offset is out of range for a test bit and branch
3173 instruction We can use the following instructions instead:
3174
3175 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
3176 INSN1 ;
3177 TAKEN:
3178 INSN2
3179
3180 */
3181 emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
3182 dsd->insn_count = 1;
f0c702d4 3183 dsd->dsc->cond = true;
b6542f81
YQ
3184 dsd->dsc->pc_adjust = offset;
3185}
3186
3187/* Implementation of aarch64_insn_visitor method "adr". */
3188
3189static void
3190aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
3191 const int is_adrp, struct aarch64_insn_data *data)
3192{
3193 struct aarch64_displaced_step_data *dsd
3194 = (struct aarch64_displaced_step_data *) data;
3195 /* We know exactly the address the ADR{P,} instruction will compute.
3196 We can just write it to the destination register. */
3197 CORE_ADDR address = data->insn_addr + offset;
3198
3199 if (is_adrp)
3200 {
3201 /* Clear the lower 12 bits of the offset to get the 4K page. */
3202 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
3203 address & ~0xfff);
3204 }
3205 else
3206 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
3207 address);
3208
3209 dsd->dsc->pc_adjust = 4;
3210 emit_nop (dsd->insn_buf);
3211 dsd->insn_count = 1;
3212}
3213
3214/* Implementation of aarch64_insn_visitor method "ldr_literal". */
3215
3216static void
3217aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
3218 const unsigned rt, const int is64,
3219 struct aarch64_insn_data *data)
3220{
3221 struct aarch64_displaced_step_data *dsd
3222 = (struct aarch64_displaced_step_data *) data;
3223 CORE_ADDR address = data->insn_addr + offset;
3224 struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
3225
3226 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
3227 address);
3228
3229 if (is_sw)
3230 dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
3231 aarch64_register (rt, 1), zero);
3232 else
3233 dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
3234 aarch64_register (rt, 1), zero);
3235
3236 dsd->dsc->pc_adjust = 4;
3237}
3238
3239/* Implementation of aarch64_insn_visitor method "others". */
3240
3241static void
3242aarch64_displaced_step_others (const uint32_t insn,
3243 struct aarch64_insn_data *data)
3244{
3245 struct aarch64_displaced_step_data *dsd
3246 = (struct aarch64_displaced_step_data *) data;
3247
807f647c
MM
3248 uint32_t masked_insn = (insn & CLEAR_Rn_MASK);
3249 if (masked_insn == BLR)
b6542f81 3250 {
807f647c
MM
3251 /* Emit a BR to the same register and then update LR to the original
3252 address (similar to aarch64_displaced_step_b). */
3253 aarch64_emit_insn (dsd->insn_buf, insn & 0xffdfffff);
3254 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
3255 data->insn_addr + 4);
b6542f81 3256 }
807f647c
MM
3257 else
3258 aarch64_emit_insn (dsd->insn_buf, insn);
3259 dsd->insn_count = 1;
3260
3261 if (masked_insn == RET || masked_insn == BR || masked_insn == BLR)
3262 dsd->dsc->pc_adjust = 0;
b6542f81
YQ
3263 else
3264 dsd->dsc->pc_adjust = 4;
3265}
3266
3267static const struct aarch64_insn_visitor visitor =
3268{
3269 aarch64_displaced_step_b,
3270 aarch64_displaced_step_b_cond,
3271 aarch64_displaced_step_cb,
3272 aarch64_displaced_step_tb,
3273 aarch64_displaced_step_adr,
3274 aarch64_displaced_step_ldr_literal,
3275 aarch64_displaced_step_others,
3276};
3277
3278/* Implement the "displaced_step_copy_insn" gdbarch method. */
3279
1152d984 3280displaced_step_copy_insn_closure_up
b6542f81
YQ
3281aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
3282 CORE_ADDR from, CORE_ADDR to,
3283 struct regcache *regs)
3284{
b6542f81 3285 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
b6542f81 3286 struct aarch64_displaced_step_data dsd;
c86a40c6 3287 aarch64_inst inst;
94355de7
LM
3288 ULONGEST insn_from_memory;
3289
3290 if (!safe_read_memory_unsigned_integer (from, 4, byte_order_for_code,
3291 &insn_from_memory))
3292 return nullptr;
3293
3294 uint32_t insn = insn_from_memory;
c86a40c6 3295
561a72d4 3296 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
c86a40c6 3297 return NULL;
b6542f81
YQ
3298
3299 /* Look for a Load Exclusive instruction which begins the sequence. */
c86a40c6 3300 if (inst.opcode->iclass == ldstexcl && bit (insn, 22))
b6542f81
YQ
3301 {
3302 /* We can't displaced step atomic sequences. */
3303 return NULL;
3304 }
3305
1152d984
SM
3306 std::unique_ptr<aarch64_displaced_step_copy_insn_closure> dsc
3307 (new aarch64_displaced_step_copy_insn_closure);
b6542f81
YQ
3308 dsd.base.insn_addr = from;
3309 dsd.new_addr = to;
3310 dsd.regs = regs;
cfba9872 3311 dsd.dsc = dsc.get ();
034f1a81 3312 dsd.insn_count = 0;
b6542f81
YQ
3313 aarch64_relocate_instruction (insn, &visitor,
3314 (struct aarch64_insn_data *) &dsd);
e935475c 3315 gdb_assert (dsd.insn_count <= AARCH64_DISPLACED_MODIFIED_INSNS);
b6542f81
YQ
3316
3317 if (dsd.insn_count != 0)
3318 {
3319 int i;
3320
3321 /* Instruction can be relocated to scratch pad. Copy
3322 relocated instruction(s) there. */
3323 for (i = 0; i < dsd.insn_count; i++)
3324 {
136821d9
SM
3325 displaced_debug_printf ("writing insn %.8x at %s",
3326 dsd.insn_buf[i],
3327 paddress (gdbarch, to + i * 4));
3328
b6542f81
YQ
3329 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
3330 (ULONGEST) dsd.insn_buf[i]);
3331 }
3332 }
3333 else
3334 {
b6542f81
YQ
3335 dsc = NULL;
3336 }
3337
6d0cf446 3338 /* This is a work around for a problem with g++ 4.8. */
1152d984 3339 return displaced_step_copy_insn_closure_up (dsc.release ());
b6542f81
YQ
3340}
3341
3342/* Implement the "displaced_step_fixup" gdbarch method. */
3343
3344void
3345aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
1152d984 3346 struct displaced_step_copy_insn_closure *dsc_,
b6542f81
YQ
3347 CORE_ADDR from, CORE_ADDR to,
3348 struct regcache *regs)
3349{
1152d984
SM
3350 aarch64_displaced_step_copy_insn_closure *dsc
3351 = (aarch64_displaced_step_copy_insn_closure *) dsc_;
cfba9872 3352
0c271889
LM
3353 ULONGEST pc;
3354
3355 regcache_cooked_read_unsigned (regs, AARCH64_PC_REGNUM, &pc);
3356
136821d9
SM
3357 displaced_debug_printf ("PC after stepping: %s (was %s).",
3358 paddress (gdbarch, pc), paddress (gdbarch, to));
1ab139e5 3359
b6542f81
YQ
3360 if (dsc->cond)
3361 {
136821d9
SM
3362 displaced_debug_printf ("[Conditional] pc_adjust before: %d",
3363 dsc->pc_adjust);
1ab139e5 3364
b6542f81
YQ
3365 if (pc - to == 8)
3366 {
3367 /* Condition is true. */
3368 }
3369 else if (pc - to == 4)
3370 {
3371 /* Condition is false. */
3372 dsc->pc_adjust = 4;
3373 }
3374 else
3375 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
1ab139e5 3376
136821d9
SM
3377 displaced_debug_printf ("[Conditional] pc_adjust after: %d",
3378 dsc->pc_adjust);
b6542f81
YQ
3379 }
3380
136821d9
SM
3381 displaced_debug_printf ("%s PC by %d",
3382 dsc->pc_adjust ? "adjusting" : "not adjusting",
3383 dsc->pc_adjust);
1ab139e5 3384
b6542f81
YQ
3385 if (dsc->pc_adjust != 0)
3386 {
0c271889
LM
3387 /* Make sure the previous instruction was executed (that is, the PC
3388 has changed). If the PC didn't change, then discard the adjustment
3389 offset. Otherwise we may skip an instruction before its execution
3390 took place. */
3391 if ((pc - to) == 0)
1ab139e5 3392 {
136821d9 3393 displaced_debug_printf ("PC did not move. Discarding PC adjustment.");
1ab139e5
LM
3394 dsc->pc_adjust = 0;
3395 }
0c271889 3396
136821d9
SM
3397 displaced_debug_printf ("fixup: set PC to %s:%d",
3398 paddress (gdbarch, from), dsc->pc_adjust);
3399
b6542f81
YQ
3400 regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
3401 from + dsc->pc_adjust);
3402 }
3403}
3404
3405/* Implement the "displaced_step_hw_singlestep" gdbarch method. */
3406
07fbbd01 3407bool
40a53766 3408aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch)
b6542f81 3409{
07fbbd01 3410 return true;
b6542f81
YQ
3411}
3412
95228a0d
AH
3413/* Get the correct target description for the given VQ value.
3414 If VQ is zero then it is assumed SVE is not supported.
c1bd443b
LM
3415 (It is not possible to set VQ to zero on an SVE system).
3416
414d5848
JB
3417 MTE_P indicates the presence of the Memory Tagging Extension feature.
3418
3419 TLS_P indicates the presence of the Thread Local Storage feature. */
da434ccb
AH
3420
3421const target_desc *
0ee6b1c5 3422aarch64_read_description (const aarch64_features &features)
da434ccb 3423{
0ee6b1c5
JB
3424 if (features.vq > AARCH64_MAX_SVE_VQ)
3425 error (_("VQ is %" PRIu64 ", maximum supported value is %d"), features.vq,
95228a0d
AH
3426 AARCH64_MAX_SVE_VQ);
3427
0ee6b1c5 3428 struct target_desc *tdesc = tdesc_aarch64_map[features];
da434ccb 3429
95228a0d
AH
3430 if (tdesc == NULL)
3431 {
0ee6b1c5
JB
3432 tdesc = aarch64_create_target_description (features);
3433 tdesc_aarch64_map[features] = tdesc;
95228a0d 3434 }
da434ccb 3435
95228a0d 3436 return tdesc;
da434ccb
AH
3437}
3438
ba2d2bb2
AH
3439/* Return the VQ used when creating the target description TDESC. */
3440
1332a140 3441static uint64_t
ba2d2bb2
AH
3442aarch64_get_tdesc_vq (const struct target_desc *tdesc)
3443{
3444 const struct tdesc_feature *feature_sve;
3445
3446 if (!tdesc_has_registers (tdesc))
3447 return 0;
3448
3449 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
3450
3451 if (feature_sve == nullptr)
3452 return 0;
3453
12863263
AH
3454 uint64_t vl = tdesc_register_bitsize (feature_sve,
3455 aarch64_sve_register_names[0]) / 8;
ba2d2bb2
AH
3456 return sve_vq_from_vl (vl);
3457}
3458
4f3681cc
TJB
3459/* Get the AArch64 features present in the given target description. */
3460
3461aarch64_features
3462aarch64_features_from_target_desc (const struct target_desc *tdesc)
3463{
3464 aarch64_features features;
3465
3466 if (tdesc == nullptr)
3467 return features;
3468
3469 features.vq = aarch64_get_tdesc_vq (tdesc);
3470 features.pauth
3471 = (tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.pauth") != nullptr);
3472 features.mte
3473 = (tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.mte") != nullptr);
ba60b963
LM
3474
3475 const struct tdesc_feature *tls_feature
3476 = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.tls");
3477
3478 if (tls_feature != nullptr)
3479 {
3480 /* We have TLS registers. Find out how many. */
3481 if (tdesc_unnumbered_register (tls_feature, "tpidr2"))
3482 features.tls = 2;
3483 else
3484 features.tls = 1;
3485 }
4f3681cc
TJB
3486
3487 return features;
3488}
3489
76bed0fd
AH
3490/* Implement the "cannot_store_register" gdbarch method. */
3491
3492static int
3493aarch64_cannot_store_register (struct gdbarch *gdbarch, int regnum)
3494{
08106042 3495 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
76bed0fd
AH
3496
3497 if (!tdep->has_pauth ())
3498 return 0;
3499
3500 /* Pointer authentication registers are read-only. */
3501 return (regnum == AARCH64_PAUTH_DMASK_REGNUM (tdep->pauth_reg_base)
3502 || regnum == AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base));
3503}
3504
da729c5c
TT
3505/* Implement the stack_frame_destroyed_p gdbarch method. */
3506
3507static int
3508aarch64_stack_frame_destroyed_p (struct gdbarch *gdbarch, CORE_ADDR pc)
3509{
3510 CORE_ADDR func_start, func_end;
3511 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
3512 return 0;
3513
3514 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
94355de7
LM
3515
3516 ULONGEST insn_from_memory;
3517 if (!safe_read_memory_unsigned_integer (pc, 4, byte_order_for_code,
3518 &insn_from_memory))
3519 return 0;
3520
3521 uint32_t insn = insn_from_memory;
da729c5c
TT
3522
3523 aarch64_inst inst;
3524 if (aarch64_decode_insn (insn, &inst, 1, nullptr) != 0)
3525 return 0;
3526
3527 return streq (inst.opcode->name, "ret");
3528}
3529
07b287a0
MS
3530/* Initialize the current architecture based on INFO. If possible,
3531 re-use an architecture from ARCHES, which is a list of
3532 architectures already created during this debugging session.
3533
3534 Called e.g. at program startup, when reading a core file, and when
3535 reading a binary file. */
3536
3537static struct gdbarch *
3538aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
3539{
ccb8d7e8 3540 const struct tdesc_feature *feature_core, *feature_fpu, *feature_sve;
76bed0fd 3541 const struct tdesc_feature *feature_pauth;
ccb8d7e8
AH
3542 bool valid_p = true;
3543 int i, num_regs = 0, num_pseudo_regs = 0;
c9cd8ca4 3544 int first_pauth_regnum = -1, ra_sign_state_offset = -1;
ba60b963 3545 int first_mte_regnum = -1, first_tls_regnum = -1;
4f3681cc 3546 uint64_t vq = aarch64_get_tdesc_vq (info.target_desc);
4da037ef
AH
3547
3548 if (vq > AARCH64_MAX_SVE_VQ)
f34652de 3549 internal_error (_("VQ out of bounds: %s (max %d)"),
596179f7 3550 pulongest (vq), AARCH64_MAX_SVE_VQ);
4da037ef 3551
ccb8d7e8
AH
3552 /* If there is already a candidate, use it. */
3553 for (gdbarch_list *best_arch = gdbarch_list_lookup_by_info (arches, &info);
3554 best_arch != nullptr;
3555 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
3556 {
345bd07c 3557 aarch64_gdbarch_tdep *tdep
08106042 3558 = gdbarch_tdep<aarch64_gdbarch_tdep> (best_arch->gdbarch);
4da037ef 3559 if (tdep && tdep->vq == vq)
ccb8d7e8
AH
3560 return best_arch->gdbarch;
3561 }
07b287a0 3562
4da037ef
AH
3563 /* Ensure we always have a target descriptor, and that it is for the given VQ
3564 value. */
ccb8d7e8 3565 const struct target_desc *tdesc = info.target_desc;
4f3681cc
TJB
3566 if (!tdesc_has_registers (tdesc))
3567 tdesc = aarch64_read_description ({});
07b287a0
MS
3568 gdb_assert (tdesc);
3569
ccb8d7e8 3570 feature_core = tdesc_find_feature (tdesc,"org.gnu.gdb.aarch64.core");
ba2d2bb2
AH
3571 feature_fpu = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
3572 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
76bed0fd 3573 feature_pauth = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.pauth");
5e984dbf
LM
3574 const struct tdesc_feature *feature_mte
3575 = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.mte");
414d5848
JB
3576 const struct tdesc_feature *feature_tls
3577 = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.tls");
07b287a0 3578
ccb8d7e8
AH
3579 if (feature_core == nullptr)
3580 return nullptr;
07b287a0 3581
c1e1314d 3582 tdesc_arch_data_up tdesc_data = tdesc_data_alloc ();
07b287a0 3583
ba2d2bb2 3584 /* Validate the description provides the mandatory core R registers
07b287a0
MS
3585 and allocate their numbers. */
3586 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
c1e1314d 3587 valid_p &= tdesc_numbered_register (feature_core, tdesc_data.get (),
ba2d2bb2
AH
3588 AARCH64_X0_REGNUM + i,
3589 aarch64_r_register_names[i]);
07b287a0
MS
3590
3591 num_regs = AARCH64_X0_REGNUM + i;
3592
ba2d2bb2 3593 /* Add the V registers. */
ccb8d7e8 3594 if (feature_fpu != nullptr)
07b287a0 3595 {
ccb8d7e8 3596 if (feature_sve != nullptr)
ba2d2bb2
AH
3597 error (_("Program contains both fpu and SVE features."));
3598
3599 /* Validate the description provides the mandatory V registers
3600 and allocate their numbers. */
07b287a0 3601 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
c1e1314d 3602 valid_p &= tdesc_numbered_register (feature_fpu, tdesc_data.get (),
ba2d2bb2
AH
3603 AARCH64_V0_REGNUM + i,
3604 aarch64_v_register_names[i]);
07b287a0
MS
3605
3606 num_regs = AARCH64_V0_REGNUM + i;
ba2d2bb2 3607 }
07b287a0 3608
ba2d2bb2 3609 /* Add the SVE registers. */
ccb8d7e8 3610 if (feature_sve != nullptr)
ba2d2bb2
AH
3611 {
3612 /* Validate the description provides the mandatory SVE registers
3613 and allocate their numbers. */
3614 for (i = 0; i < ARRAY_SIZE (aarch64_sve_register_names); i++)
c1e1314d 3615 valid_p &= tdesc_numbered_register (feature_sve, tdesc_data.get (),
ba2d2bb2
AH
3616 AARCH64_SVE_Z0_REGNUM + i,
3617 aarch64_sve_register_names[i]);
3618
3619 num_regs = AARCH64_SVE_Z0_REGNUM + i;
3620 num_pseudo_regs += 32; /* add the Vn register pseudos. */
3621 }
3622
ccb8d7e8 3623 if (feature_fpu != nullptr || feature_sve != nullptr)
ba2d2bb2 3624 {
07b287a0
MS
3625 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
3626 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
3627 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
3628 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
3629 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
3630 }
3631
414d5848 3632 /* Add the TLS register. */
ba60b963 3633 int tls_register_count = 0;
414d5848
JB
3634 if (feature_tls != nullptr)
3635 {
ba60b963 3636 first_tls_regnum = num_regs;
414d5848 3637
ba60b963
LM
3638 /* Look for the TLS registers. tpidr is required, but tpidr2 is
3639 optional. */
3640 valid_p
3641 = tdesc_numbered_register (feature_tls, tdesc_data.get (),
3642 first_tls_regnum, "tpidr");
3643
3644 if (valid_p)
3645 {
3646 tls_register_count++;
3647
3648 bool has_tpidr2
3649 = tdesc_numbered_register (feature_tls, tdesc_data.get (),
3650 first_tls_regnum + tls_register_count,
3651 "tpidr2");
3652
3653 /* Figure out how many TLS registers we have. */
3654 if (has_tpidr2)
3655 tls_register_count++;
3656
3657 num_regs += tls_register_count;
3658 }
3659 else
3660 {
3661 warning (_("Provided TLS register feature doesn't contain "
3662 "required tpidr register."));
3663 return nullptr;
3664 }
414d5848
JB
3665 }
3666
76bed0fd
AH
3667 /* Add the pauth registers. */
3668 if (feature_pauth != NULL)
3669 {
3670 first_pauth_regnum = num_regs;
c9cd8ca4 3671 ra_sign_state_offset = num_pseudo_regs;
76bed0fd
AH
3672 /* Validate the descriptor provides the mandatory PAUTH registers and
3673 allocate their numbers. */
3674 for (i = 0; i < ARRAY_SIZE (aarch64_pauth_register_names); i++)
c1e1314d 3675 valid_p &= tdesc_numbered_register (feature_pauth, tdesc_data.get (),
76bed0fd
AH
3676 first_pauth_regnum + i,
3677 aarch64_pauth_register_names[i]);
3678
3679 num_regs += i;
34dcc7cf 3680 num_pseudo_regs += 1; /* Count RA_STATE pseudo register. */
76bed0fd
AH
3681 }
3682
5e984dbf
LM
3683 /* Add the MTE registers. */
3684 if (feature_mte != NULL)
3685 {
3686 first_mte_regnum = num_regs;
3687 /* Validate the descriptor provides the mandatory MTE registers and
3688 allocate their numbers. */
3689 for (i = 0; i < ARRAY_SIZE (aarch64_mte_register_names); i++)
3690 valid_p &= tdesc_numbered_register (feature_mte, tdesc_data.get (),
3691 first_mte_regnum + i,
3692 aarch64_mte_register_names[i]);
3693
3694 num_regs += i;
3695 }
e63ae49b
LM
3696 /* W pseudo-registers */
3697 int first_w_regnum = num_pseudo_regs;
3698 num_pseudo_regs += 31;
5e984dbf 3699
07b287a0 3700 if (!valid_p)
c1e1314d 3701 return nullptr;
07b287a0
MS
3702
3703 /* AArch64 code is always little-endian. */
3704 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
3705
2b16913c
SM
3706 gdbarch *gdbarch
3707 = gdbarch_alloc (&info, gdbarch_tdep_up (new aarch64_gdbarch_tdep));
3708 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
07b287a0
MS
3709
3710 /* This should be low enough for everything. */
3711 tdep->lowest_pc = 0x20;
3712 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
3713 tdep->jb_elt_size = 8;
4da037ef 3714 tdep->vq = vq;
76bed0fd 3715 tdep->pauth_reg_base = first_pauth_regnum;
1ba3a322 3716 tdep->ra_sign_state_regnum = -1;
5e984dbf 3717 tdep->mte_reg_base = first_mte_regnum;
ba60b963
LM
3718 tdep->tls_regnum_base = first_tls_regnum;
3719 tdep->tls_register_count = tls_register_count;
34dcc7cf 3720
07b287a0
MS
3721 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
3722 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
3723
07b287a0
MS
3724 /* Advance PC across function entry code. */
3725 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
3726
3727 /* The stack grows downward. */
3728 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
3729
3730 /* Breakpoint manipulation. */
04180708
YQ
3731 set_gdbarch_breakpoint_kind_from_pc (gdbarch,
3732 aarch64_breakpoint::kind_from_pc);
3733 set_gdbarch_sw_breakpoint_from_kind (gdbarch,
3734 aarch64_breakpoint::bp_from_kind);
07b287a0 3735 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
9404b58f 3736 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
07b287a0
MS
3737
3738 /* Information about registers, etc. */
3739 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
3740 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
3741 set_gdbarch_num_regs (gdbarch, num_regs);
3742
3743 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
3744 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
3745 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
3746 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
3747 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
3748 set_tdesc_pseudo_register_reggroup_p (gdbarch,
3749 aarch64_pseudo_register_reggroup_p);
76bed0fd 3750 set_gdbarch_cannot_store_register (gdbarch, aarch64_cannot_store_register);
07b287a0
MS
3751
3752 /* ABI */
3753 set_gdbarch_short_bit (gdbarch, 16);
3754 set_gdbarch_int_bit (gdbarch, 32);
3755 set_gdbarch_float_bit (gdbarch, 32);
3756 set_gdbarch_double_bit (gdbarch, 64);
3757 set_gdbarch_long_double_bit (gdbarch, 128);
3758 set_gdbarch_long_bit (gdbarch, 64);
3759 set_gdbarch_long_long_bit (gdbarch, 64);
3760 set_gdbarch_ptr_bit (gdbarch, 64);
3761 set_gdbarch_char_signed (gdbarch, 0);
53375380 3762 set_gdbarch_wchar_signed (gdbarch, 0);
07b287a0
MS
3763 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
3764 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
552f1157 3765 set_gdbarch_long_double_format (gdbarch, floatformats_ieee_quad);
b907456c 3766 set_gdbarch_type_align (gdbarch, aarch64_type_align);
07b287a0 3767
da729c5c
TT
3768 /* Detect whether PC is at a point where the stack has been destroyed. */
3769 set_gdbarch_stack_frame_destroyed_p (gdbarch, aarch64_stack_frame_destroyed_p);
3770
07b287a0
MS
3771 /* Internal <-> external register number maps. */
3772 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
3773
3774 /* Returning results. */
5cb0f2d5 3775 set_gdbarch_return_value_as_value (gdbarch, aarch64_return_value);
07b287a0
MS
3776
3777 /* Disassembly. */
3778 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
3779
3780 /* Virtual tables. */
3781 set_gdbarch_vbit_in_delta (gdbarch, 1);
3782
3783 /* Hook in the ABI-specific overrides, if they have been registered. */
3784 info.target_desc = tdesc;
c1e1314d 3785 info.tdesc_data = tdesc_data.get ();
07b287a0
MS
3786 gdbarch_init_osabi (info, gdbarch);
3787
3788 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
11e1b75f
AH
3789 /* Register DWARF CFA vendor handler. */
3790 set_gdbarch_execute_dwarf_cfa_vendor_op (gdbarch,
3791 aarch64_execute_dwarf_cfa_vendor_op);
07b287a0 3792
5133a315
LM
3793 /* Permanent/Program breakpoint handling. */
3794 set_gdbarch_program_breakpoint_here_p (gdbarch,
3795 aarch64_program_breakpoint_here_p);
3796
07b287a0
MS
3797 /* Add some default predicates. */
3798 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
3799 dwarf2_append_unwinders (gdbarch);
3800 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
3801
3802 frame_base_set_default (gdbarch, &aarch64_normal_base);
3803
3804 /* Now we have tuned the configuration, set a few final things,
3805 based on what the OS ABI has told us. */
3806
3807 if (tdep->jb_pc >= 0)
3808 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
3809
ea873d8e
PL
3810 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
3811
aa7ca1bb
AH
3812 set_gdbarch_get_pc_address_flags (gdbarch, aarch64_get_pc_address_flags);
3813
c1e1314d 3814 tdesc_use_registers (gdbarch, tdesc, std::move (tdesc_data));
07b287a0 3815
1ba3a322
LM
3816 /* Fetch the updated number of registers after we're done adding all
3817 entries from features we don't explicitly care about. This is the case
3818 for bare metal debugging stubs that include a lot of system registers. */
3819 num_regs = gdbarch_num_regs (gdbarch);
3820
3821 /* With the number of real registers updated, setup the pseudo-registers and
3822 record their numbers. */
3823
e63ae49b
LM
3824 /* Setup W pseudo-register numbers. */
3825 tdep->w_pseudo_base = first_w_regnum + num_regs;
3826 tdep->w_pseudo_count = 31;
3827
1ba3a322
LM
3828 /* Pointer authentication pseudo-registers. */
3829 if (tdep->has_pauth ())
3830 tdep->ra_sign_state_regnum = ra_sign_state_offset + num_regs;
3831
07b287a0
MS
3832 /* Add standard register aliases. */
3833 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
3834 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
3835 value_of_aarch64_user_reg,
3836 &aarch64_register_aliases[i].regnum);
3837
e8bf1ce4
JB
3838 register_aarch64_ravenscar_ops (gdbarch);
3839
07b287a0
MS
3840 return gdbarch;
3841}
3842
3843static void
3844aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
3845{
08106042 3846 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
07b287a0
MS
3847
3848 if (tdep == NULL)
3849 return;
3850
09a5d200 3851 gdb_printf (file, _("aarch64_dump_tdep: Lowest pc = 0x%s\n"),
6cb06a8c 3852 paddress (gdbarch, tdep->lowest_pc));
07b287a0
MS
3853}
3854
0d4c07af 3855#if GDB_SELF_TEST
1e2b521d
YQ
3856namespace selftests
3857{
3858static void aarch64_process_record_test (void);
3859}
0d4c07af 3860#endif
1e2b521d 3861
6c265988 3862void _initialize_aarch64_tdep ();
07b287a0 3863void
6c265988 3864_initialize_aarch64_tdep ()
07b287a0
MS
3865{
3866 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
3867 aarch64_dump_tdep);
3868
07b287a0
MS
3869 /* Debug this file's internals. */
3870 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
3871Set AArch64 debugging."), _("\
3872Show AArch64 debugging."), _("\
3873When on, AArch64 specific debugging is enabled."),
3874 NULL,
3875 show_aarch64_debug,
3876 &setdebuglist, &showdebuglist);
4d9a9006
YQ
3877
3878#if GDB_SELF_TEST
1526853e
SM
3879 selftests::register_test ("aarch64-analyze-prologue",
3880 selftests::aarch64_analyze_prologue_test);
3881 selftests::register_test ("aarch64-process-record",
3882 selftests::aarch64_process_record_test);
4d9a9006 3883#endif
07b287a0 3884}
99afc88b
OJ
3885
3886/* AArch64 process record-replay related structures, defines etc. */
3887
99afc88b 3888#define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
dda83cd7
SM
3889 do \
3890 { \
3891 unsigned int reg_len = LENGTH; \
3892 if (reg_len) \
3893 { \
3894 REGS = XNEWVEC (uint32_t, reg_len); \
3895 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
3896 } \
3897 } \
3898 while (0)
99afc88b
OJ
3899
3900#define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
dda83cd7
SM
3901 do \
3902 { \
3903 unsigned int mem_len = LENGTH; \
3904 if (mem_len) \
01add95b
SM
3905 { \
3906 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
91ddba83 3907 memcpy(MEMS, &RECORD_BUF[0], \
01add95b
SM
3908 sizeof(struct aarch64_mem_r) * LENGTH); \
3909 } \
dda83cd7
SM
3910 } \
3911 while (0)
99afc88b
OJ
3912
3913/* AArch64 record/replay structures and enumerations. */
3914
3915struct aarch64_mem_r
3916{
3917 uint64_t len; /* Record length. */
3918 uint64_t addr; /* Memory address. */
3919};
3920
3921enum aarch64_record_result
3922{
3923 AARCH64_RECORD_SUCCESS,
99afc88b
OJ
3924 AARCH64_RECORD_UNSUPPORTED,
3925 AARCH64_RECORD_UNKNOWN
3926};
3927
4748a9be 3928struct aarch64_insn_decode_record
99afc88b
OJ
3929{
3930 struct gdbarch *gdbarch;
3931 struct regcache *regcache;
3932 CORE_ADDR this_addr; /* Address of insn to be recorded. */
3933 uint32_t aarch64_insn; /* Insn to be recorded. */
3934 uint32_t mem_rec_count; /* Count of memory records. */
3935 uint32_t reg_rec_count; /* Count of register records. */
3936 uint32_t *aarch64_regs; /* Registers to be recorded. */
3937 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
4748a9be 3938};
99afc88b
OJ
3939
3940/* Record handler for data processing - register instructions. */
3941
3942static unsigned int
4748a9be 3943aarch64_record_data_proc_reg (aarch64_insn_decode_record *aarch64_insn_r)
99afc88b
OJ
3944{
3945 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
3946 uint32_t record_buf[4];
3947
3948 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3949 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3950 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
3951
3952 if (!bit (aarch64_insn_r->aarch64_insn, 28))
3953 {
3954 uint8_t setflags;
3955
3956 /* Logical (shifted register). */
3957 if (insn_bits24_27 == 0x0a)
3958 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
3959 /* Add/subtract. */
3960 else if (insn_bits24_27 == 0x0b)
3961 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3962 else
3963 return AARCH64_RECORD_UNKNOWN;
3964
3965 record_buf[0] = reg_rd;
3966 aarch64_insn_r->reg_rec_count = 1;
3967 if (setflags)
3968 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3969 }
3970 else
3971 {
3972 if (insn_bits24_27 == 0x0b)
3973 {
3974 /* Data-processing (3 source). */
3975 record_buf[0] = reg_rd;
3976 aarch64_insn_r->reg_rec_count = 1;
3977 }
3978 else if (insn_bits24_27 == 0x0a)
3979 {
3980 if (insn_bits21_23 == 0x00)
3981 {
3982 /* Add/subtract (with carry). */
3983 record_buf[0] = reg_rd;
3984 aarch64_insn_r->reg_rec_count = 1;
3985 if (bit (aarch64_insn_r->aarch64_insn, 29))
3986 {
3987 record_buf[1] = AARCH64_CPSR_REGNUM;
3988 aarch64_insn_r->reg_rec_count = 2;
3989 }
3990 }
3991 else if (insn_bits21_23 == 0x02)
3992 {
3993 /* Conditional compare (register) and conditional compare
3994 (immediate) instructions. */
3995 record_buf[0] = AARCH64_CPSR_REGNUM;
3996 aarch64_insn_r->reg_rec_count = 1;
3997 }
3998 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
3999 {
85102364 4000 /* Conditional select. */
99afc88b
OJ
4001 /* Data-processing (2 source). */
4002 /* Data-processing (1 source). */
4003 record_buf[0] = reg_rd;
4004 aarch64_insn_r->reg_rec_count = 1;
4005 }
4006 else
4007 return AARCH64_RECORD_UNKNOWN;
4008 }
4009 }
4010
4011 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4012 record_buf);
4013 return AARCH64_RECORD_SUCCESS;
4014}
4015
4016/* Record handler for data processing - immediate instructions. */
4017
4018static unsigned int
4748a9be 4019aarch64_record_data_proc_imm (aarch64_insn_decode_record *aarch64_insn_r)
99afc88b 4020{
78cc6c2d 4021 uint8_t reg_rd, insn_bit23, insn_bits24_27, setflags;
99afc88b
OJ
4022 uint32_t record_buf[4];
4023
4024 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
99afc88b
OJ
4025 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
4026 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
4027
4028 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
4029 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
4030 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
4031 {
4032 record_buf[0] = reg_rd;
4033 aarch64_insn_r->reg_rec_count = 1;
4034 }
4035 else if (insn_bits24_27 == 0x01)
4036 {
4037 /* Add/Subtract (immediate). */
4038 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
4039 record_buf[0] = reg_rd;
4040 aarch64_insn_r->reg_rec_count = 1;
4041 if (setflags)
4042 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
4043 }
4044 else if (insn_bits24_27 == 0x02 && !insn_bit23)
4045 {
4046 /* Logical (immediate). */
4047 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
4048 record_buf[0] = reg_rd;
4049 aarch64_insn_r->reg_rec_count = 1;
4050 if (setflags)
4051 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
4052 }
4053 else
4054 return AARCH64_RECORD_UNKNOWN;
4055
4056 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4057 record_buf);
4058 return AARCH64_RECORD_SUCCESS;
4059}
4060
4061/* Record handler for branch, exception generation and system instructions. */
4062
4063static unsigned int
4748a9be 4064aarch64_record_branch_except_sys (aarch64_insn_decode_record *aarch64_insn_r)
99afc88b 4065{
345bd07c
SM
4066
4067 aarch64_gdbarch_tdep *tdep
08106042 4068 = gdbarch_tdep<aarch64_gdbarch_tdep> (aarch64_insn_r->gdbarch);
99afc88b
OJ
4069 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
4070 uint32_t record_buf[4];
4071
4072 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
4073 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
4074 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4075
4076 if (insn_bits28_31 == 0x0d)
4077 {
4078 /* Exception generation instructions. */
4079 if (insn_bits24_27 == 0x04)
4080 {
5d98d3cd
YQ
4081 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
4082 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
4083 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
99afc88b
OJ
4084 {
4085 ULONGEST svc_number;
4086
4087 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
4088 &svc_number);
4089 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
4090 svc_number);
4091 }
4092 else
4093 return AARCH64_RECORD_UNSUPPORTED;
4094 }
4095 /* System instructions. */
4096 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
4097 {
4098 uint32_t reg_rt, reg_crn;
4099
4100 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
4101 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
4102
4103 /* Record rt in case of sysl and mrs instructions. */
4104 if (bit (aarch64_insn_r->aarch64_insn, 21))
4105 {
4106 record_buf[0] = reg_rt;
4107 aarch64_insn_r->reg_rec_count = 1;
4108 }
4109 /* Record cpsr for hint and msr(immediate) instructions. */
4110 else if (reg_crn == 0x02 || reg_crn == 0x04)
4111 {
4112 record_buf[0] = AARCH64_CPSR_REGNUM;
4113 aarch64_insn_r->reg_rec_count = 1;
4114 }
4115 }
4116 /* Unconditional branch (register). */
4117 else if((insn_bits24_27 & 0x0e) == 0x06)
4118 {
4119 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
4120 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
4121 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
4122 }
4123 else
4124 return AARCH64_RECORD_UNKNOWN;
4125 }
4126 /* Unconditional branch (immediate). */
4127 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
4128 {
4129 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
4130 if (bit (aarch64_insn_r->aarch64_insn, 31))
4131 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
4132 }
4133 else
4134 /* Compare & branch (immediate), Test & branch (immediate) and
4135 Conditional branch (immediate). */
4136 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
4137
4138 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4139 record_buf);
4140 return AARCH64_RECORD_SUCCESS;
4141}
4142
4143/* Record handler for advanced SIMD load and store instructions. */
4144
4145static unsigned int
4748a9be 4146aarch64_record_asimd_load_store (aarch64_insn_decode_record *aarch64_insn_r)
99afc88b
OJ
4147{
4148 CORE_ADDR address;
4149 uint64_t addr_offset = 0;
4150 uint32_t record_buf[24];
4151 uint64_t record_buf_mem[24];
4152 uint32_t reg_rn, reg_rt;
4153 uint32_t reg_index = 0, mem_index = 0;
4154 uint8_t opcode_bits, size_bits;
4155
4156 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
4157 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
4158 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
4159 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
4160 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
4161
4162 if (record_debug)
b277c936 4163 debug_printf ("Process record: Advanced SIMD load/store\n");
99afc88b
OJ
4164
4165 /* Load/store single structure. */
4166 if (bit (aarch64_insn_r->aarch64_insn, 24))
4167 {
4168 uint8_t sindex, scale, selem, esize, replicate = 0;
4169 scale = opcode_bits >> 2;
4170 selem = ((opcode_bits & 0x02) |
dda83cd7 4171 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
99afc88b 4172 switch (scale)
dda83cd7
SM
4173 {
4174 case 1:
4175 if (size_bits & 0x01)
4176 return AARCH64_RECORD_UNKNOWN;
4177 break;
4178 case 2:
4179 if ((size_bits >> 1) & 0x01)
4180 return AARCH64_RECORD_UNKNOWN;
4181 if (size_bits & 0x01)
4182 {
4183 if (!((opcode_bits >> 1) & 0x01))
4184 scale = 3;
4185 else
4186 return AARCH64_RECORD_UNKNOWN;
4187 }
4188 break;
4189 case 3:
4190 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
4191 {
4192 scale = size_bits;
4193 replicate = 1;
4194 break;
4195 }
4196 else
4197 return AARCH64_RECORD_UNKNOWN;
4198 default:
4199 break;
4200 }
99afc88b
OJ
4201 esize = 8 << scale;
4202 if (replicate)
dda83cd7
SM
4203 for (sindex = 0; sindex < selem; sindex++)
4204 {
4205 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
4206 reg_rt = (reg_rt + 1) % 32;
4207 }
99afc88b 4208 else
dda83cd7
SM
4209 {
4210 for (sindex = 0; sindex < selem; sindex++)
a2e3e93f
SM
4211 {
4212 if (bit (aarch64_insn_r->aarch64_insn, 22))
4213 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
4214 else
4215 {
4216 record_buf_mem[mem_index++] = esize / 8;
4217 record_buf_mem[mem_index++] = address + addr_offset;
4218 }
4219 addr_offset = addr_offset + (esize / 8);
4220 reg_rt = (reg_rt + 1) % 32;
4221 }
dda83cd7 4222 }
99afc88b
OJ
4223 }
4224 /* Load/store multiple structure. */
4225 else
4226 {
4227 uint8_t selem, esize, rpt, elements;
4228 uint8_t eindex, rindex;
4229
4230 esize = 8 << size_bits;
4231 if (bit (aarch64_insn_r->aarch64_insn, 30))
dda83cd7 4232 elements = 128 / esize;
99afc88b 4233 else
dda83cd7 4234 elements = 64 / esize;
99afc88b
OJ
4235
4236 switch (opcode_bits)
dda83cd7
SM
4237 {
4238 /*LD/ST4 (4 Registers). */
4239 case 0:
4240 rpt = 1;
4241 selem = 4;
4242 break;
4243 /*LD/ST1 (4 Registers). */
4244 case 2:
4245 rpt = 4;
4246 selem = 1;
4247 break;
4248 /*LD/ST3 (3 Registers). */
4249 case 4:
4250 rpt = 1;
4251 selem = 3;
4252 break;
4253 /*LD/ST1 (3 Registers). */
4254 case 6:
4255 rpt = 3;
4256 selem = 1;
4257 break;
4258 /*LD/ST1 (1 Register). */
4259 case 7:
4260 rpt = 1;
4261 selem = 1;
4262 break;
4263 /*LD/ST2 (2 Registers). */
4264 case 8:
4265 rpt = 1;
4266 selem = 2;
4267 break;
4268 /*LD/ST1 (2 Registers). */
4269 case 10:
4270 rpt = 2;
4271 selem = 1;
4272 break;
4273 default:
4274 return AARCH64_RECORD_UNSUPPORTED;
4275 break;
4276 }
99afc88b 4277 for (rindex = 0; rindex < rpt; rindex++)
dda83cd7
SM
4278 for (eindex = 0; eindex < elements; eindex++)
4279 {
4280 uint8_t reg_tt, sindex;
4281 reg_tt = (reg_rt + rindex) % 32;
4282 for (sindex = 0; sindex < selem; sindex++)
4283 {
4284 if (bit (aarch64_insn_r->aarch64_insn, 22))
4285 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
4286 else
4287 {
4288 record_buf_mem[mem_index++] = esize / 8;
4289 record_buf_mem[mem_index++] = address + addr_offset;
4290 }
4291 addr_offset = addr_offset + (esize / 8);
4292 reg_tt = (reg_tt + 1) % 32;
4293 }
4294 }
99afc88b
OJ
4295 }
4296
4297 if (bit (aarch64_insn_r->aarch64_insn, 23))
4298 record_buf[reg_index++] = reg_rn;
4299
4300 aarch64_insn_r->reg_rec_count = reg_index;
4301 aarch64_insn_r->mem_rec_count = mem_index / 2;
4302 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
dda83cd7 4303 record_buf_mem);
99afc88b 4304 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
dda83cd7 4305 record_buf);
99afc88b
OJ
4306 return AARCH64_RECORD_SUCCESS;
4307}
4308
4309/* Record handler for load and store instructions. */
4310
4311static unsigned int
4748a9be 4312aarch64_record_load_store (aarch64_insn_decode_record *aarch64_insn_r)
99afc88b
OJ
4313{
4314 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
4315 uint8_t insn_bit23, insn_bit21;
4316 uint8_t opc, size_bits, ld_flag, vector_flag;
4317 uint32_t reg_rn, reg_rt, reg_rt2;
4318 uint64_t datasize, offset;
4319 uint32_t record_buf[8];
4320 uint64_t record_buf_mem[8];
4321 CORE_ADDR address;
4322
4323 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
4324 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
4325 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
4326 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
4327 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
4328 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
4329 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
4330 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
4331 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
4332 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
4333 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
4334
4335 /* Load/store exclusive. */
4336 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
4337 {
4338 if (record_debug)
b277c936 4339 debug_printf ("Process record: load/store exclusive\n");
99afc88b
OJ
4340
4341 if (ld_flag)
4342 {
4343 record_buf[0] = reg_rt;
4344 aarch64_insn_r->reg_rec_count = 1;
4345 if (insn_bit21)
4346 {
4347 record_buf[1] = reg_rt2;
4348 aarch64_insn_r->reg_rec_count = 2;
4349 }
4350 }
4351 else
4352 {
4353 if (insn_bit21)
4354 datasize = (8 << size_bits) * 2;
4355 else
4356 datasize = (8 << size_bits);
4357 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4358 &address);
4359 record_buf_mem[0] = datasize / 8;
4360 record_buf_mem[1] = address;
4361 aarch64_insn_r->mem_rec_count = 1;
4362 if (!insn_bit23)
4363 {
4364 /* Save register rs. */
4365 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
4366 aarch64_insn_r->reg_rec_count = 1;
4367 }
4368 }
4369 }
4370 /* Load register (literal) instructions decoding. */
4371 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
4372 {
4373 if (record_debug)
b277c936 4374 debug_printf ("Process record: load register (literal)\n");
99afc88b 4375 if (vector_flag)
dda83cd7 4376 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
99afc88b 4377 else
dda83cd7 4378 record_buf[0] = reg_rt;
99afc88b
OJ
4379 aarch64_insn_r->reg_rec_count = 1;
4380 }
4381 /* All types of load/store pair instructions decoding. */
4382 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
4383 {
4384 if (record_debug)
b277c936 4385 debug_printf ("Process record: load/store pair\n");
99afc88b
OJ
4386
4387 if (ld_flag)
dda83cd7
SM
4388 {
4389 if (vector_flag)
4390 {
4391 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4392 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
4393 }
4394 else
4395 {
4396 record_buf[0] = reg_rt;
4397 record_buf[1] = reg_rt2;
4398 }
4399 aarch64_insn_r->reg_rec_count = 2;
4400 }
99afc88b 4401 else
dda83cd7
SM
4402 {
4403 uint16_t imm7_off;
4404 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
4405 if (!vector_flag)
4406 size_bits = size_bits >> 1;
4407 datasize = 8 << (2 + size_bits);
4408 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
4409 offset = offset << (2 + size_bits);
4410 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4411 &address);
4412 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
4413 {
4414 if (imm7_off & 0x40)
4415 address = address - offset;
4416 else
4417 address = address + offset;
4418 }
4419
4420 record_buf_mem[0] = datasize / 8;
4421 record_buf_mem[1] = address;
4422 record_buf_mem[2] = datasize / 8;
4423 record_buf_mem[3] = address + (datasize / 8);
4424 aarch64_insn_r->mem_rec_count = 2;
4425 }
99afc88b 4426 if (bit (aarch64_insn_r->aarch64_insn, 23))
dda83cd7 4427 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
99afc88b
OJ
4428 }
4429 /* Load/store register (unsigned immediate) instructions. */
4430 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
4431 {
4432 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4433 if (!(opc >> 1))
33877125
YQ
4434 {
4435 if (opc & 0x01)
4436 ld_flag = 0x01;
4437 else
4438 ld_flag = 0x0;
4439 }
99afc88b 4440 else
33877125 4441 {
1e2b521d
YQ
4442 if (size_bits == 0x3 && vector_flag == 0x0 && opc == 0x2)
4443 {
4444 /* PRFM (immediate) */
4445 return AARCH64_RECORD_SUCCESS;
4446 }
4447 else if (size_bits == 0x2 && vector_flag == 0x0 && opc == 0x2)
4448 {
4449 /* LDRSW (immediate) */
4450 ld_flag = 0x1;
4451 }
33877125 4452 else
1e2b521d
YQ
4453 {
4454 if (opc & 0x01)
4455 ld_flag = 0x01;
4456 else
4457 ld_flag = 0x0;
4458 }
33877125 4459 }
99afc88b
OJ
4460
4461 if (record_debug)
4462 {
b277c936
PL
4463 debug_printf ("Process record: load/store (unsigned immediate):"
4464 " size %x V %d opc %x\n", size_bits, vector_flag,
4465 opc);
99afc88b
OJ
4466 }
4467
4468 if (!ld_flag)
dda83cd7
SM
4469 {
4470 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
4471 datasize = 8 << size_bits;
4472 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4473 &address);
4474 offset = offset << size_bits;
4475 address = address + offset;
4476
4477 record_buf_mem[0] = datasize >> 3;
4478 record_buf_mem[1] = address;
4479 aarch64_insn_r->mem_rec_count = 1;
4480 }
99afc88b 4481 else
dda83cd7
SM
4482 {
4483 if (vector_flag)
4484 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4485 else
4486 record_buf[0] = reg_rt;
4487 aarch64_insn_r->reg_rec_count = 1;
4488 }
99afc88b
OJ
4489 }
4490 /* Load/store register (register offset) instructions. */
5d98d3cd
YQ
4491 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
4492 && insn_bits10_11 == 0x02 && insn_bit21)
99afc88b
OJ
4493 {
4494 if (record_debug)
b277c936 4495 debug_printf ("Process record: load/store (register offset)\n");
99afc88b
OJ
4496 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4497 if (!(opc >> 1))
dda83cd7
SM
4498 if (opc & 0x01)
4499 ld_flag = 0x01;
4500 else
4501 ld_flag = 0x0;
99afc88b 4502 else
dda83cd7
SM
4503 if (size_bits != 0x03)
4504 ld_flag = 0x01;
4505 else
4506 return AARCH64_RECORD_UNKNOWN;
99afc88b
OJ
4507
4508 if (!ld_flag)
dda83cd7
SM
4509 {
4510 ULONGEST reg_rm_val;
4511
4512 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
4513 bits (aarch64_insn_r->aarch64_insn, 16, 20), &reg_rm_val);
4514 if (bit (aarch64_insn_r->aarch64_insn, 12))
4515 offset = reg_rm_val << size_bits;
4516 else
4517 offset = reg_rm_val;
4518 datasize = 8 << size_bits;
4519 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4520 &address);
4521 address = address + offset;
4522 record_buf_mem[0] = datasize >> 3;
4523 record_buf_mem[1] = address;
4524 aarch64_insn_r->mem_rec_count = 1;
4525 }
99afc88b 4526 else
dda83cd7
SM
4527 {
4528 if (vector_flag)
4529 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4530 else
4531 record_buf[0] = reg_rt;
4532 aarch64_insn_r->reg_rec_count = 1;
4533 }
99afc88b
OJ
4534 }
4535 /* Load/store register (immediate and unprivileged) instructions. */
5d98d3cd
YQ
4536 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
4537 && !insn_bit21)
99afc88b
OJ
4538 {
4539 if (record_debug)
4540 {
b277c936
PL
4541 debug_printf ("Process record: load/store "
4542 "(immediate and unprivileged)\n");
99afc88b
OJ
4543 }
4544 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4545 if (!(opc >> 1))
dda83cd7
SM
4546 if (opc & 0x01)
4547 ld_flag = 0x01;
4548 else
4549 ld_flag = 0x0;
99afc88b 4550 else
dda83cd7
SM
4551 if (size_bits != 0x03)
4552 ld_flag = 0x01;
4553 else
4554 return AARCH64_RECORD_UNKNOWN;
99afc88b
OJ
4555
4556 if (!ld_flag)
dda83cd7
SM
4557 {
4558 uint16_t imm9_off;
4559 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
4560 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
4561 datasize = 8 << size_bits;
4562 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4563 &address);
4564 if (insn_bits10_11 != 0x01)
4565 {
4566 if (imm9_off & 0x0100)
4567 address = address - offset;
4568 else
4569 address = address + offset;
4570 }
4571 record_buf_mem[0] = datasize >> 3;
4572 record_buf_mem[1] = address;
4573 aarch64_insn_r->mem_rec_count = 1;
4574 }
99afc88b 4575 else
dda83cd7
SM
4576 {
4577 if (vector_flag)
4578 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4579 else
4580 record_buf[0] = reg_rt;
4581 aarch64_insn_r->reg_rec_count = 1;
4582 }
99afc88b 4583 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
dda83cd7 4584 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
99afc88b
OJ
4585 }
4586 /* Advanced SIMD load/store instructions. */
4587 else
4588 return aarch64_record_asimd_load_store (aarch64_insn_r);
4589
4590 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
dda83cd7 4591 record_buf_mem);
99afc88b 4592 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
dda83cd7 4593 record_buf);
99afc88b
OJ
4594 return AARCH64_RECORD_SUCCESS;
4595}
4596
4597/* Record handler for data processing SIMD and floating point instructions. */
4598
4599static unsigned int
4748a9be 4600aarch64_record_data_proc_simd_fp (aarch64_insn_decode_record *aarch64_insn_r)
99afc88b
OJ
4601{
4602 uint8_t insn_bit21, opcode, rmode, reg_rd;
4603 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
4604 uint8_t insn_bits11_14;
4605 uint32_t record_buf[2];
4606
4607 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
4608 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
4609 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
4610 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
4611 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
4612 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
4613 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
4614 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
4615 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
4616
4617 if (record_debug)
b277c936 4618 debug_printf ("Process record: data processing SIMD/FP: ");
99afc88b
OJ
4619
4620 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
4621 {
4622 /* Floating point - fixed point conversion instructions. */
4623 if (!insn_bit21)
4624 {
4625 if (record_debug)
b277c936 4626 debug_printf ("FP - fixed point conversion");
99afc88b
OJ
4627
4628 if ((opcode >> 1) == 0x0 && rmode == 0x03)
4629 record_buf[0] = reg_rd;
4630 else
4631 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4632 }
4633 /* Floating point - conditional compare instructions. */
4634 else if (insn_bits10_11 == 0x01)
4635 {
4636 if (record_debug)
b277c936 4637 debug_printf ("FP - conditional compare");
99afc88b
OJ
4638
4639 record_buf[0] = AARCH64_CPSR_REGNUM;
4640 }
4641 /* Floating point - data processing (2-source) and
dda83cd7 4642 conditional select instructions. */
99afc88b
OJ
4643 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
4644 {
4645 if (record_debug)
b277c936 4646 debug_printf ("FP - DP (2-source)");
99afc88b
OJ
4647
4648 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4649 }
4650 else if (insn_bits10_11 == 0x00)
4651 {
4652 /* Floating point - immediate instructions. */
4653 if ((insn_bits12_15 & 0x01) == 0x01
4654 || (insn_bits12_15 & 0x07) == 0x04)
4655 {
4656 if (record_debug)
b277c936 4657 debug_printf ("FP - immediate");
99afc88b
OJ
4658 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4659 }
4660 /* Floating point - compare instructions. */
4661 else if ((insn_bits12_15 & 0x03) == 0x02)
4662 {
4663 if (record_debug)
b277c936 4664 debug_printf ("FP - immediate");
99afc88b
OJ
4665 record_buf[0] = AARCH64_CPSR_REGNUM;
4666 }
4667 /* Floating point - integer conversions instructions. */
f62fce35 4668 else if (insn_bits12_15 == 0x00)
99afc88b
OJ
4669 {
4670 /* Convert float to integer instruction. */
4671 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
4672 {
4673 if (record_debug)
b277c936 4674 debug_printf ("float to int conversion");
99afc88b
OJ
4675
4676 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4677 }
4678 /* Convert integer to float instruction. */
4679 else if ((opcode >> 1) == 0x01 && !rmode)
4680 {
4681 if (record_debug)
b277c936 4682 debug_printf ("int to float conversion");
99afc88b
OJ
4683
4684 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4685 }
4686 /* Move float to integer instruction. */
4687 else if ((opcode >> 1) == 0x03)
4688 {
4689 if (record_debug)
b277c936 4690 debug_printf ("move float to int");
99afc88b
OJ
4691
4692 if (!(opcode & 0x01))
4693 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4694 else
4695 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4696 }
f62fce35
YQ
4697 else
4698 return AARCH64_RECORD_UNKNOWN;
dda83cd7 4699 }
f62fce35
YQ
4700 else
4701 return AARCH64_RECORD_UNKNOWN;
dda83cd7 4702 }
f62fce35
YQ
4703 else
4704 return AARCH64_RECORD_UNKNOWN;
99afc88b
OJ
4705 }
4706 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
4707 {
4708 if (record_debug)
b277c936 4709 debug_printf ("SIMD copy");
99afc88b
OJ
4710
4711 /* Advanced SIMD copy instructions. */
4712 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
4713 && !bit (aarch64_insn_r->aarch64_insn, 15)
4714 && bit (aarch64_insn_r->aarch64_insn, 10))
4715 {
4716 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
4717 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4718 else
4719 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4720 }
4721 else
4722 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4723 }
4724 /* All remaining floating point or advanced SIMD instructions. */
4725 else
4726 {
4727 if (record_debug)
b277c936 4728 debug_printf ("all remain");
99afc88b
OJ
4729
4730 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4731 }
4732
4733 if (record_debug)
b277c936 4734 debug_printf ("\n");
99afc88b 4735
bfbe4b84 4736 /* Record the V/X register. */
99afc88b 4737 aarch64_insn_r->reg_rec_count++;
bfbe4b84
LM
4738
4739 /* Some of these instructions may set bits in the FPSR, so record it
4740 too. */
4741 record_buf[1] = AARCH64_FPSR_REGNUM;
4742 aarch64_insn_r->reg_rec_count++;
4743
4744 gdb_assert (aarch64_insn_r->reg_rec_count == 2);
99afc88b
OJ
4745 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4746 record_buf);
4747 return AARCH64_RECORD_SUCCESS;
4748}
4749
4750/* Decodes insns type and invokes its record handler. */
4751
4752static unsigned int
4748a9be 4753aarch64_record_decode_insn_handler (aarch64_insn_decode_record *aarch64_insn_r)
99afc88b
OJ
4754{
4755 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
4756
4757 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
4758 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
4759 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
4760 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
4761
4762 /* Data processing - immediate instructions. */
4763 if (!ins_bit26 && !ins_bit27 && ins_bit28)
4764 return aarch64_record_data_proc_imm (aarch64_insn_r);
4765
4766 /* Branch, exception generation and system instructions. */
4767 if (ins_bit26 && !ins_bit27 && ins_bit28)
4768 return aarch64_record_branch_except_sys (aarch64_insn_r);
4769
4770 /* Load and store instructions. */
4771 if (!ins_bit25 && ins_bit27)
4772 return aarch64_record_load_store (aarch64_insn_r);
4773
4774 /* Data processing - register instructions. */
4775 if (ins_bit25 && !ins_bit26 && ins_bit27)
4776 return aarch64_record_data_proc_reg (aarch64_insn_r);
4777
4778 /* Data processing - SIMD and floating point instructions. */
4779 if (ins_bit25 && ins_bit26 && ins_bit27)
4780 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
4781
4782 return AARCH64_RECORD_UNSUPPORTED;
4783}
4784
4785/* Cleans up local record registers and memory allocations. */
4786
4787static void
4748a9be 4788deallocate_reg_mem (aarch64_insn_decode_record *record)
99afc88b
OJ
4789{
4790 xfree (record->aarch64_regs);
4791 xfree (record->aarch64_mems);
4792}
4793
1e2b521d
YQ
4794#if GDB_SELF_TEST
4795namespace selftests {
4796
4797static void
4798aarch64_process_record_test (void)
4799{
4800 struct gdbarch_info info;
4801 uint32_t ret;
4802
1e2b521d
YQ
4803 info.bfd_arch_info = bfd_scan_arch ("aarch64");
4804
4805 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
4806 SELF_CHECK (gdbarch != NULL);
4807
4748a9be 4808 aarch64_insn_decode_record aarch64_record;
1e2b521d 4809
4748a9be 4810 memset (&aarch64_record, 0, sizeof (aarch64_insn_decode_record));
1e2b521d
YQ
4811 aarch64_record.regcache = NULL;
4812 aarch64_record.this_addr = 0;
4813 aarch64_record.gdbarch = gdbarch;
4814
4815 /* 20 00 80 f9 prfm pldl1keep, [x1] */
4816 aarch64_record.aarch64_insn = 0xf9800020;
4817 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4818 SELF_CHECK (ret == AARCH64_RECORD_SUCCESS);
4819 SELF_CHECK (aarch64_record.reg_rec_count == 0);
4820 SELF_CHECK (aarch64_record.mem_rec_count == 0);
4821
4822 deallocate_reg_mem (&aarch64_record);
4823}
4824
4825} // namespace selftests
4826#endif /* GDB_SELF_TEST */
4827
99afc88b
OJ
4828/* Parse the current instruction and record the values of the registers and
4829 memory that will be changed in current instruction to record_arch_list
4830 return -1 if something is wrong. */
4831
4832int
4833aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
4834 CORE_ADDR insn_addr)
4835{
4836 uint32_t rec_no = 0;
4837 uint8_t insn_size = 4;
4838 uint32_t ret = 0;
99afc88b 4839 gdb_byte buf[insn_size];
4748a9be 4840 aarch64_insn_decode_record aarch64_record;
99afc88b
OJ
4841
4842 memset (&buf[0], 0, insn_size);
4748a9be 4843 memset (&aarch64_record, 0, sizeof (aarch64_insn_decode_record));
99afc88b
OJ
4844 target_read_memory (insn_addr, &buf[0], insn_size);
4845 aarch64_record.aarch64_insn
4846 = (uint32_t) extract_unsigned_integer (&buf[0],
4847 insn_size,
4848 gdbarch_byte_order (gdbarch));
4849 aarch64_record.regcache = regcache;
4850 aarch64_record.this_addr = insn_addr;
4851 aarch64_record.gdbarch = gdbarch;
4852
4853 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4854 if (ret == AARCH64_RECORD_UNSUPPORTED)
4855 {
6cb06a8c
TT
4856 gdb_printf (gdb_stderr,
4857 _("Process record does not support instruction "
4858 "0x%0x at address %s.\n"),
4859 aarch64_record.aarch64_insn,
4860 paddress (gdbarch, insn_addr));
99afc88b
OJ
4861 ret = -1;
4862 }
4863
4864 if (0 == ret)
4865 {
4866 /* Record registers. */
4867 record_full_arch_list_add_reg (aarch64_record.regcache,
4868 AARCH64_PC_REGNUM);
4869 /* Always record register CPSR. */
4870 record_full_arch_list_add_reg (aarch64_record.regcache,
4871 AARCH64_CPSR_REGNUM);
4872 if (aarch64_record.aarch64_regs)
4873 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
4874 if (record_full_arch_list_add_reg (aarch64_record.regcache,
4875 aarch64_record.aarch64_regs[rec_no]))
4876 ret = -1;
4877
4878 /* Record memories. */
4879 if (aarch64_record.aarch64_mems)
4880 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
4881 if (record_full_arch_list_add_mem
4882 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
4883 aarch64_record.aarch64_mems[rec_no].len))
4884 ret = -1;
4885
4886 if (record_full_arch_list_add_end ())
4887 ret = -1;
4888 }
4889
4890 deallocate_reg_mem (&aarch64_record);
4891 return ret;
4892}