]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/aarch64-tdep.c
Turn value_copy into a method
[thirdparty/binutils-gdb.git] / gdb / aarch64-tdep.c
CommitLineData
07b287a0
MS
1/* Common target dependent code for GDB on AArch64 systems.
2
213516ef 3 Copyright (C) 2009-2023 Free Software Foundation, Inc.
07b287a0
MS
4 Contributed by ARM Ltd.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21#include "defs.h"
22
23#include "frame.h"
83b6e1f1 24#include "language.h"
07b287a0
MS
25#include "gdbcmd.h"
26#include "gdbcore.h"
4de283e4 27#include "dis-asm.h"
d55e5aa6
TT
28#include "regcache.h"
29#include "reggroups.h"
4de283e4
TT
30#include "value.h"
31#include "arch-utils.h"
32#include "osabi.h"
33#include "frame-unwind.h"
34#include "frame-base.h"
d55e5aa6 35#include "trad-frame.h"
4de283e4
TT
36#include "objfiles.h"
37#include "dwarf2.h"
82ca8957 38#include "dwarf2/frame.h"
4de283e4
TT
39#include "gdbtypes.h"
40#include "prologue-value.h"
41#include "target-descriptions.h"
07b287a0 42#include "user-regs.h"
4de283e4 43#include "ax-gdb.h"
268a13a5 44#include "gdbsupport/selftest.h"
4de283e4
TT
45
46#include "aarch64-tdep.h"
47#include "aarch64-ravenscar-thread.h"
48
4de283e4
TT
49#include "record.h"
50#include "record-full.h"
51#include "arch/aarch64-insn.h"
0d12e84c 52#include "gdbarch.h"
4de283e4
TT
53
54#include "opcode/aarch64.h"
55#include <algorithm>
0ee6b1c5 56#include <unordered_map>
f77ee802 57
ea92689a
AH
58/* A Homogeneous Floating-Point or Short-Vector Aggregate may have at most
59 four members. */
60#define HA_MAX_NUM_FLDS 4
61
95228a0d 62/* All possible aarch64 target descriptors. */
0ee6b1c5 63static std::unordered_map <aarch64_features, target_desc *> tdesc_aarch64_map;
95228a0d 64
ea2f6fad
TV
65/* The standard register names, and all the valid aliases for them.
66 We're not adding fp here, that name is already taken, see
67 _initialize_frame_reg. */
07b287a0
MS
68static const struct
69{
70 const char *const name;
71 int regnum;
72} aarch64_register_aliases[] =
73{
ea2f6fad 74 /* Link register alias for x30. */
07b287a0 75 {"lr", AARCH64_LR_REGNUM},
ea2f6fad
TV
76 /* SP is the canonical name for x31 according to aarch64_r_register_names,
77 so we're adding an x31 alias for sp. */
78 {"x31", AARCH64_SP_REGNUM},
07b287a0
MS
79 /* specials */
80 {"ip0", AARCH64_X0_REGNUM + 16},
81 {"ip1", AARCH64_X0_REGNUM + 17}
82};
83
84/* The required core 'R' registers. */
85static const char *const aarch64_r_register_names[] =
86{
87 /* These registers must appear in consecutive RAW register number
88 order and they must begin with AARCH64_X0_REGNUM! */
89 "x0", "x1", "x2", "x3",
90 "x4", "x5", "x6", "x7",
91 "x8", "x9", "x10", "x11",
92 "x12", "x13", "x14", "x15",
93 "x16", "x17", "x18", "x19",
94 "x20", "x21", "x22", "x23",
95 "x24", "x25", "x26", "x27",
96 "x28", "x29", "x30", "sp",
97 "pc", "cpsr"
98};
99
100/* The FP/SIMD 'V' registers. */
101static const char *const aarch64_v_register_names[] =
102{
103 /* These registers must appear in consecutive RAW register number
104 order and they must begin with AARCH64_V0_REGNUM! */
105 "v0", "v1", "v2", "v3",
106 "v4", "v5", "v6", "v7",
107 "v8", "v9", "v10", "v11",
108 "v12", "v13", "v14", "v15",
109 "v16", "v17", "v18", "v19",
110 "v20", "v21", "v22", "v23",
111 "v24", "v25", "v26", "v27",
112 "v28", "v29", "v30", "v31",
113 "fpsr",
114 "fpcr"
115};
116
739e8682
AH
117/* The SVE 'Z' and 'P' registers. */
118static const char *const aarch64_sve_register_names[] =
119{
120 /* These registers must appear in consecutive RAW register number
121 order and they must begin with AARCH64_SVE_Z0_REGNUM! */
122 "z0", "z1", "z2", "z3",
123 "z4", "z5", "z6", "z7",
124 "z8", "z9", "z10", "z11",
125 "z12", "z13", "z14", "z15",
126 "z16", "z17", "z18", "z19",
127 "z20", "z21", "z22", "z23",
128 "z24", "z25", "z26", "z27",
129 "z28", "z29", "z30", "z31",
130 "fpsr", "fpcr",
131 "p0", "p1", "p2", "p3",
132 "p4", "p5", "p6", "p7",
133 "p8", "p9", "p10", "p11",
134 "p12", "p13", "p14", "p15",
135 "ffr", "vg"
136};
137
76bed0fd
AH
138static const char *const aarch64_pauth_register_names[] =
139{
140 /* Authentication mask for data pointer. */
141 "pauth_dmask",
142 /* Authentication mask for code pointer. */
143 "pauth_cmask"
144};
145
5e984dbf
LM
146static const char *const aarch64_mte_register_names[] =
147{
148 /* Tag Control Register. */
149 "tag_ctl"
150};
151
29e09a42
TV
152static int aarch64_stack_frame_destroyed_p (struct gdbarch *, CORE_ADDR);
153
07b287a0
MS
154/* AArch64 prologue cache structure. */
155struct aarch64_prologue_cache
156{
db634143
PL
157 /* The program counter at the start of the function. It is used to
158 identify this frame as a prologue frame. */
159 CORE_ADDR func;
160
161 /* The program counter at the time this frame was created; i.e. where
162 this function was called from. It is used to identify this frame as a
163 stub frame. */
164 CORE_ADDR prev_pc;
165
07b287a0
MS
166 /* The stack pointer at the time this frame was created; i.e. the
167 caller's stack pointer when this function was called. It is used
168 to identify this frame. */
169 CORE_ADDR prev_sp;
170
7dfa3edc
PL
171 /* Is the target available to read from? */
172 int available_p;
173
07b287a0
MS
174 /* The frame base for this frame is just prev_sp - frame size.
175 FRAMESIZE is the distance from the frame pointer to the
176 initial stack pointer. */
177 int framesize;
178
179 /* The register used to hold the frame pointer for this frame. */
180 int framereg;
181
182 /* Saved register offsets. */
098caef4 183 trad_frame_saved_reg *saved_regs;
07b287a0
MS
184};
185
07b287a0
MS
186static void
187show_aarch64_debug (struct ui_file *file, int from_tty,
dda83cd7 188 struct cmd_list_element *c, const char *value)
07b287a0 189{
6cb06a8c 190 gdb_printf (file, _("AArch64 debugging is %s.\n"), value);
07b287a0
MS
191}
192
ffdbe864
YQ
193namespace {
194
4d9a9006
YQ
195/* Abstract instruction reader. */
196
197class abstract_instruction_reader
198{
199public:
200 /* Read in one instruction. */
201 virtual ULONGEST read (CORE_ADDR memaddr, int len,
202 enum bfd_endian byte_order) = 0;
203};
204
205/* Instruction reader from real target. */
206
207class instruction_reader : public abstract_instruction_reader
208{
209 public:
210 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
632e107b 211 override
4d9a9006 212 {
fc2f703e 213 return read_code_unsigned_integer (memaddr, len, byte_order);
4d9a9006
YQ
214 }
215};
216
ffdbe864
YQ
217} // namespace
218
3d31bc39
AH
219/* If address signing is enabled, mask off the signature bits from the link
220 register, which is passed by value in ADDR, using the register values in
221 THIS_FRAME. */
11e1b75f
AH
222
223static CORE_ADDR
345bd07c 224aarch64_frame_unmask_lr (aarch64_gdbarch_tdep *tdep,
bd2b40ac 225 frame_info_ptr this_frame, CORE_ADDR addr)
11e1b75f
AH
226{
227 if (tdep->has_pauth ()
228 && frame_unwind_register_unsigned (this_frame,
c9cd8ca4 229 tdep->ra_sign_state_regnum))
11e1b75f
AH
230 {
231 int cmask_num = AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base);
232 CORE_ADDR cmask = frame_unwind_register_unsigned (this_frame, cmask_num);
233 addr = addr & ~cmask;
3d31bc39
AH
234
235 /* Record in the frame that the link register required unmasking. */
236 set_frame_previous_pc_masked (this_frame);
11e1b75f
AH
237 }
238
239 return addr;
240}
241
aa7ca1bb
AH
242/* Implement the "get_pc_address_flags" gdbarch method. */
243
244static std::string
bd2b40ac 245aarch64_get_pc_address_flags (frame_info_ptr frame, CORE_ADDR pc)
aa7ca1bb
AH
246{
247 if (pc != 0 && get_frame_pc_masked (frame))
248 return "PAC";
249
250 return "";
251}
252
07b287a0
MS
253/* Analyze a prologue, looking for a recognizable stack frame
254 and frame pointer. Scan until we encounter a store that could
255 clobber the stack frame unexpectedly, or an unknown instruction. */
256
257static CORE_ADDR
258aarch64_analyze_prologue (struct gdbarch *gdbarch,
259 CORE_ADDR start, CORE_ADDR limit,
4d9a9006
YQ
260 struct aarch64_prologue_cache *cache,
261 abstract_instruction_reader& reader)
07b287a0
MS
262{
263 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
264 int i;
f8e3fe0d
LM
265
266 /* Whether the stack has been set. This should be true when we notice a SP
267 to FP move or if we are using the SP as the base register for storing
268 data, in case the FP is ommitted. */
269 bool seen_stack_set = false;
270
187f5d00
YQ
271 /* Track X registers and D registers in prologue. */
272 pv_t regs[AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT];
07b287a0 273
187f5d00 274 for (i = 0; i < AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT; i++)
07b287a0 275 regs[i] = pv_register (i, 0);
f7b7ed97 276 pv_area stack (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
07b287a0
MS
277
278 for (; start < limit; start += 4)
279 {
280 uint32_t insn;
d9ebcbce 281 aarch64_inst inst;
07b287a0 282
4d9a9006 283 insn = reader.read (start, 4, byte_order_for_code);
07b287a0 284
561a72d4 285 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
d9ebcbce
YQ
286 break;
287
288 if (inst.opcode->iclass == addsub_imm
289 && (inst.opcode->op == OP_ADD
290 || strcmp ("sub", inst.opcode->name) == 0))
07b287a0 291 {
d9ebcbce
YQ
292 unsigned rd = inst.operands[0].reg.regno;
293 unsigned rn = inst.operands[1].reg.regno;
294
295 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
296 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
297 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
298 gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM);
299
300 if (inst.opcode->op == OP_ADD)
301 {
302 regs[rd] = pv_add_constant (regs[rn],
303 inst.operands[2].imm.value);
304 }
305 else
306 {
307 regs[rd] = pv_add_constant (regs[rn],
308 -inst.operands[2].imm.value);
309 }
f8e3fe0d
LM
310
311 /* Did we move SP to FP? */
312 if (rn == AARCH64_SP_REGNUM && rd == AARCH64_FP_REGNUM)
313 seen_stack_set = true;
d9ebcbce 314 }
60adf22c
TV
315 else if (inst.opcode->iclass == addsub_ext
316 && strcmp ("sub", inst.opcode->name) == 0)
317 {
318 unsigned rd = inst.operands[0].reg.regno;
319 unsigned rn = inst.operands[1].reg.regno;
320 unsigned rm = inst.operands[2].reg.regno;
321
322 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
323 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
324 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
325 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_EXT);
326
327 regs[rd] = pv_subtract (regs[rn], regs[rm]);
328 }
d9ebcbce 329 else if (inst.opcode->iclass == branch_imm)
07b287a0
MS
330 {
331 /* Stop analysis on branch. */
332 break;
333 }
d9ebcbce 334 else if (inst.opcode->iclass == condbranch)
07b287a0
MS
335 {
336 /* Stop analysis on branch. */
337 break;
338 }
d9ebcbce 339 else if (inst.opcode->iclass == branch_reg)
07b287a0
MS
340 {
341 /* Stop analysis on branch. */
342 break;
343 }
d9ebcbce 344 else if (inst.opcode->iclass == compbranch)
07b287a0
MS
345 {
346 /* Stop analysis on branch. */
347 break;
348 }
d9ebcbce
YQ
349 else if (inst.opcode->op == OP_MOVZ)
350 {
60adf22c
TV
351 unsigned rd = inst.operands[0].reg.regno;
352
353 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
d9ebcbce 354 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
60adf22c
TV
355 gdb_assert (inst.operands[1].type == AARCH64_OPND_HALF);
356 gdb_assert (inst.operands[1].shifter.kind == AARCH64_MOD_LSL);
f8e3fe0d
LM
357
358 /* If this shows up before we set the stack, keep going. Otherwise
359 stop the analysis. */
360 if (seen_stack_set)
361 break;
362
60adf22c
TV
363 regs[rd] = pv_constant (inst.operands[1].imm.value
364 << inst.operands[1].shifter.amount);
d9ebcbce
YQ
365 }
366 else if (inst.opcode->iclass == log_shift
367 && strcmp (inst.opcode->name, "orr") == 0)
07b287a0 368 {
d9ebcbce
YQ
369 unsigned rd = inst.operands[0].reg.regno;
370 unsigned rn = inst.operands[1].reg.regno;
371 unsigned rm = inst.operands[2].reg.regno;
372
373 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
374 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn);
375 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT);
376
377 if (inst.operands[2].shifter.amount == 0
378 && rn == AARCH64_SP_REGNUM)
07b287a0
MS
379 regs[rd] = regs[rm];
380 else
381 {
c6185dce
SM
382 aarch64_debug_printf ("prologue analysis gave up "
383 "addr=%s opcode=0x%x (orr x register)",
384 core_addr_to_string_nz (start), insn);
385
07b287a0
MS
386 break;
387 }
388 }
d9ebcbce 389 else if (inst.opcode->op == OP_STUR)
07b287a0 390 {
d9ebcbce
YQ
391 unsigned rt = inst.operands[0].reg.regno;
392 unsigned rn = inst.operands[1].addr.base_regno;
75faf5c4 393 int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
d9ebcbce
YQ
394
395 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
396 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
397 gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9);
398 gdb_assert (!inst.operands[1].addr.offset.is_reg);
399
75faf5c4
AH
400 stack.store
401 (pv_add_constant (regs[rn], inst.operands[1].addr.offset.imm),
402 size, regs[rt]);
f8e3fe0d
LM
403
404 /* Are we storing with SP as a base? */
405 if (rn == AARCH64_SP_REGNUM)
406 seen_stack_set = true;
07b287a0 407 }
d9ebcbce 408 else if ((inst.opcode->iclass == ldstpair_off
03bcd739
YQ
409 || (inst.opcode->iclass == ldstpair_indexed
410 && inst.operands[2].addr.preind))
d9ebcbce 411 && strcmp ("stp", inst.opcode->name) == 0)
07b287a0 412 {
03bcd739 413 /* STP with addressing mode Pre-indexed and Base register. */
187f5d00
YQ
414 unsigned rt1;
415 unsigned rt2;
d9ebcbce
YQ
416 unsigned rn = inst.operands[2].addr.base_regno;
417 int32_t imm = inst.operands[2].addr.offset.imm;
75faf5c4 418 int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
d9ebcbce 419
187f5d00
YQ
420 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
421 || inst.operands[0].type == AARCH64_OPND_Ft);
422 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2
423 || inst.operands[1].type == AARCH64_OPND_Ft2);
d9ebcbce
YQ
424 gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7);
425 gdb_assert (!inst.operands[2].addr.offset.is_reg);
426
07b287a0
MS
427 /* If recording this store would invalidate the store area
428 (perhaps because rn is not known) then we should abandon
429 further prologue analysis. */
f7b7ed97 430 if (stack.store_would_trash (pv_add_constant (regs[rn], imm)))
07b287a0
MS
431 break;
432
f7b7ed97 433 if (stack.store_would_trash (pv_add_constant (regs[rn], imm + 8)))
07b287a0
MS
434 break;
435
187f5d00
YQ
436 rt1 = inst.operands[0].reg.regno;
437 rt2 = inst.operands[1].reg.regno;
438 if (inst.operands[0].type == AARCH64_OPND_Ft)
439 {
187f5d00
YQ
440 rt1 += AARCH64_X_REGISTER_COUNT;
441 rt2 += AARCH64_X_REGISTER_COUNT;
442 }
443
75faf5c4
AH
444 stack.store (pv_add_constant (regs[rn], imm), size, regs[rt1]);
445 stack.store (pv_add_constant (regs[rn], imm + size), size, regs[rt2]);
14ac654f 446
d9ebcbce 447 if (inst.operands[2].addr.writeback)
93d96012 448 regs[rn] = pv_add_constant (regs[rn], imm);
07b287a0 449
f8e3fe0d
LM
450 /* Ignore the instruction that allocates stack space and sets
451 the SP. */
452 if (rn == AARCH64_SP_REGNUM && !inst.operands[2].addr.writeback)
453 seen_stack_set = true;
07b287a0 454 }
432ec081
YQ
455 else if ((inst.opcode->iclass == ldst_imm9 /* Signed immediate. */
456 || (inst.opcode->iclass == ldst_pos /* Unsigned immediate. */
457 && (inst.opcode->op == OP_STR_POS
458 || inst.opcode->op == OP_STRF_POS)))
459 && inst.operands[1].addr.base_regno == AARCH64_SP_REGNUM
460 && strcmp ("str", inst.opcode->name) == 0)
461 {
462 /* STR (immediate) */
463 unsigned int rt = inst.operands[0].reg.regno;
464 int32_t imm = inst.operands[1].addr.offset.imm;
465 unsigned int rn = inst.operands[1].addr.base_regno;
75faf5c4 466 int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
432ec081
YQ
467 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
468 || inst.operands[0].type == AARCH64_OPND_Ft);
469
470 if (inst.operands[0].type == AARCH64_OPND_Ft)
75faf5c4 471 rt += AARCH64_X_REGISTER_COUNT;
432ec081 472
75faf5c4 473 stack.store (pv_add_constant (regs[rn], imm), size, regs[rt]);
432ec081
YQ
474 if (inst.operands[1].addr.writeback)
475 regs[rn] = pv_add_constant (regs[rn], imm);
f8e3fe0d
LM
476
477 /* Are we storing with SP as a base? */
478 if (rn == AARCH64_SP_REGNUM)
479 seen_stack_set = true;
432ec081 480 }
d9ebcbce 481 else if (inst.opcode->iclass == testbranch)
07b287a0
MS
482 {
483 /* Stop analysis on branch. */
484 break;
485 }
17e116a7
AH
486 else if (inst.opcode->iclass == ic_system)
487 {
345bd07c 488 aarch64_gdbarch_tdep *tdep
08106042 489 = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
17e116a7
AH
490 int ra_state_val = 0;
491
492 if (insn == 0xd503233f /* paciasp. */
493 || insn == 0xd503237f /* pacibsp. */)
494 {
495 /* Return addresses are mangled. */
496 ra_state_val = 1;
497 }
498 else if (insn == 0xd50323bf /* autiasp. */
499 || insn == 0xd50323ff /* autibsp. */)
500 {
501 /* Return addresses are not mangled. */
502 ra_state_val = 0;
503 }
37989733
LM
504 else if (IS_BTI (insn))
505 /* We don't need to do anything special for a BTI instruction. */
506 continue;
17e116a7
AH
507 else
508 {
c6185dce
SM
509 aarch64_debug_printf ("prologue analysis gave up addr=%s"
510 " opcode=0x%x (iclass)",
511 core_addr_to_string_nz (start), insn);
17e116a7
AH
512 break;
513 }
514
515 if (tdep->has_pauth () && cache != nullptr)
a9a87d35 516 {
c9cd8ca4 517 int regnum = tdep->ra_sign_state_regnum;
a9a87d35
LM
518 cache->saved_regs[regnum].set_value (ra_state_val);
519 }
17e116a7 520 }
07b287a0
MS
521 else
522 {
c6185dce
SM
523 aarch64_debug_printf ("prologue analysis gave up addr=%s"
524 " opcode=0x%x",
525 core_addr_to_string_nz (start), insn);
526
07b287a0
MS
527 break;
528 }
529 }
530
531 if (cache == NULL)
f7b7ed97 532 return start;
07b287a0
MS
533
534 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
535 {
536 /* Frame pointer is fp. Frame size is constant. */
537 cache->framereg = AARCH64_FP_REGNUM;
538 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
539 }
540 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
541 {
542 /* Try the stack pointer. */
543 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
544 cache->framereg = AARCH64_SP_REGNUM;
545 }
546 else
547 {
548 /* We're just out of luck. We don't know where the frame is. */
549 cache->framereg = -1;
550 cache->framesize = 0;
551 }
552
553 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
554 {
555 CORE_ADDR offset;
556
f7b7ed97 557 if (stack.find_reg (gdbarch, i, &offset))
098caef4 558 cache->saved_regs[i].set_addr (offset);
07b287a0
MS
559 }
560
187f5d00
YQ
561 for (i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
562 {
563 int regnum = gdbarch_num_regs (gdbarch);
564 CORE_ADDR offset;
565
f7b7ed97
TT
566 if (stack.find_reg (gdbarch, i + AARCH64_X_REGISTER_COUNT,
567 &offset))
098caef4 568 cache->saved_regs[i + regnum + AARCH64_D0_REGNUM].set_addr (offset);
187f5d00
YQ
569 }
570
07b287a0
MS
571 return start;
572}
573
4d9a9006
YQ
574static CORE_ADDR
575aarch64_analyze_prologue (struct gdbarch *gdbarch,
576 CORE_ADDR start, CORE_ADDR limit,
577 struct aarch64_prologue_cache *cache)
578{
579 instruction_reader reader;
580
581 return aarch64_analyze_prologue (gdbarch, start, limit, cache,
582 reader);
583}
584
585#if GDB_SELF_TEST
586
587namespace selftests {
588
589/* Instruction reader from manually cooked instruction sequences. */
590
591class instruction_reader_test : public abstract_instruction_reader
592{
593public:
594 template<size_t SIZE>
595 explicit instruction_reader_test (const uint32_t (&insns)[SIZE])
596 : m_insns (insns), m_insns_size (SIZE)
597 {}
598
599 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
632e107b 600 override
4d9a9006
YQ
601 {
602 SELF_CHECK (len == 4);
603 SELF_CHECK (memaddr % 4 == 0);
604 SELF_CHECK (memaddr / 4 < m_insns_size);
605
606 return m_insns[memaddr / 4];
607 }
608
609private:
610 const uint32_t *m_insns;
611 size_t m_insns_size;
612};
613
614static void
615aarch64_analyze_prologue_test (void)
616{
617 struct gdbarch_info info;
618
4d9a9006
YQ
619 info.bfd_arch_info = bfd_scan_arch ("aarch64");
620
621 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
622 SELF_CHECK (gdbarch != NULL);
623
17e116a7
AH
624 struct aarch64_prologue_cache cache;
625 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
626
08106042 627 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
17e116a7 628
4d9a9006
YQ
629 /* Test the simple prologue in which frame pointer is used. */
630 {
4d9a9006
YQ
631 static const uint32_t insns[] = {
632 0xa9af7bfd, /* stp x29, x30, [sp,#-272]! */
633 0x910003fd, /* mov x29, sp */
634 0x97ffffe6, /* bl 0x400580 */
635 };
636 instruction_reader_test reader (insns);
637
638 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
639 SELF_CHECK (end == 4 * 2);
640
641 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
642 SELF_CHECK (cache.framesize == 272);
643
644 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
645 {
646 if (i == AARCH64_FP_REGNUM)
098caef4 647 SELF_CHECK (cache.saved_regs[i].addr () == -272);
4d9a9006 648 else if (i == AARCH64_LR_REGNUM)
098caef4 649 SELF_CHECK (cache.saved_regs[i].addr () == -264);
4d9a9006 650 else
a9a87d35
LM
651 SELF_CHECK (cache.saved_regs[i].is_realreg ()
652 && cache.saved_regs[i].realreg () == i);
4d9a9006
YQ
653 }
654
655 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
656 {
a9a87d35
LM
657 int num_regs = gdbarch_num_regs (gdbarch);
658 int regnum = i + num_regs + AARCH64_D0_REGNUM;
4d9a9006 659
a9a87d35
LM
660 SELF_CHECK (cache.saved_regs[regnum].is_realreg ()
661 && cache.saved_regs[regnum].realreg () == regnum);
4d9a9006
YQ
662 }
663 }
432ec081
YQ
664
665 /* Test a prologue in which STR is used and frame pointer is not
666 used. */
667 {
432ec081
YQ
668 static const uint32_t insns[] = {
669 0xf81d0ff3, /* str x19, [sp, #-48]! */
670 0xb9002fe0, /* str w0, [sp, #44] */
671 0xf90013e1, /* str x1, [sp, #32]*/
672 0xfd000fe0, /* str d0, [sp, #24] */
673 0xaa0203f3, /* mov x19, x2 */
674 0xf94013e0, /* ldr x0, [sp, #32] */
675 };
676 instruction_reader_test reader (insns);
677
68811f8f 678 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
432ec081
YQ
679 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
680
681 SELF_CHECK (end == 4 * 5);
682
683 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
684 SELF_CHECK (cache.framesize == 48);
685
686 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
687 {
688 if (i == 1)
098caef4 689 SELF_CHECK (cache.saved_regs[i].addr () == -16);
432ec081 690 else if (i == 19)
098caef4 691 SELF_CHECK (cache.saved_regs[i].addr () == -48);
432ec081 692 else
a9a87d35
LM
693 SELF_CHECK (cache.saved_regs[i].is_realreg ()
694 && cache.saved_regs[i].realreg () == i);
432ec081
YQ
695 }
696
697 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
698 {
a9a87d35
LM
699 int num_regs = gdbarch_num_regs (gdbarch);
700 int regnum = i + num_regs + AARCH64_D0_REGNUM;
701
432ec081
YQ
702
703 if (i == 0)
a9a87d35 704 SELF_CHECK (cache.saved_regs[regnum].addr () == -24);
432ec081 705 else
a9a87d35
LM
706 SELF_CHECK (cache.saved_regs[regnum].is_realreg ()
707 && cache.saved_regs[regnum].realreg () == regnum);
432ec081
YQ
708 }
709 }
17e116a7 710
f8e3fe0d
LM
711 /* Test handling of movz before setting the frame pointer. */
712 {
713 static const uint32_t insns[] = {
714 0xa9bf7bfd, /* stp x29, x30, [sp, #-16]! */
715 0x52800020, /* mov w0, #0x1 */
716 0x910003fd, /* mov x29, sp */
717 0x528000a2, /* mov w2, #0x5 */
718 0x97fffff8, /* bl 6e4 */
719 };
720
721 instruction_reader_test reader (insns);
722
723 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
724 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
725
726 /* We should stop at the 4th instruction. */
727 SELF_CHECK (end == (4 - 1) * 4);
728 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
729 SELF_CHECK (cache.framesize == 16);
730 }
731
732 /* Test handling of movz/stp when using the stack pointer as frame
733 pointer. */
734 {
735 static const uint32_t insns[] = {
736 0xa9bc7bfd, /* stp x29, x30, [sp, #-64]! */
737 0x52800020, /* mov w0, #0x1 */
738 0x290207e0, /* stp w0, w1, [sp, #16] */
739 0xa9018fe2, /* stp x2, x3, [sp, #24] */
740 0x528000a2, /* mov w2, #0x5 */
741 0x97fffff8, /* bl 6e4 */
742 };
743
744 instruction_reader_test reader (insns);
745
746 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
747 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
748
749 /* We should stop at the 5th instruction. */
750 SELF_CHECK (end == (5 - 1) * 4);
751 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
752 SELF_CHECK (cache.framesize == 64);
753 }
754
755 /* Test handling of movz/str when using the stack pointer as frame
756 pointer */
757 {
758 static const uint32_t insns[] = {
759 0xa9bc7bfd, /* stp x29, x30, [sp, #-64]! */
760 0x52800020, /* mov w0, #0x1 */
761 0xb9002be4, /* str w4, [sp, #40] */
762 0xf9001be5, /* str x5, [sp, #48] */
763 0x528000a2, /* mov w2, #0x5 */
764 0x97fffff8, /* bl 6e4 */
765 };
766
767 instruction_reader_test reader (insns);
768
769 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
770 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
771
772 /* We should stop at the 5th instruction. */
773 SELF_CHECK (end == (5 - 1) * 4);
774 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
775 SELF_CHECK (cache.framesize == 64);
776 }
777
778 /* Test handling of movz/stur when using the stack pointer as frame
779 pointer. */
780 {
781 static const uint32_t insns[] = {
782 0xa9bc7bfd, /* stp x29, x30, [sp, #-64]! */
783 0x52800020, /* mov w0, #0x1 */
784 0xb80343e6, /* stur w6, [sp, #52] */
785 0xf80383e7, /* stur x7, [sp, #56] */
786 0x528000a2, /* mov w2, #0x5 */
787 0x97fffff8, /* bl 6e4 */
788 };
789
790 instruction_reader_test reader (insns);
791
792 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
793 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
794
795 /* We should stop at the 5th instruction. */
796 SELF_CHECK (end == (5 - 1) * 4);
797 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
798 SELF_CHECK (cache.framesize == 64);
799 }
800
801 /* Test handling of movz when there is no frame pointer set or no stack
802 pointer used. */
803 {
804 static const uint32_t insns[] = {
805 0xa9bf7bfd, /* stp x29, x30, [sp, #-16]! */
806 0x52800020, /* mov w0, #0x1 */
807 0x528000a2, /* mov w2, #0x5 */
808 0x97fffff8, /* bl 6e4 */
809 };
810
811 instruction_reader_test reader (insns);
812
813 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
814 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
815
816 /* We should stop at the 4th instruction. */
817 SELF_CHECK (end == (4 - 1) * 4);
818 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
819 SELF_CHECK (cache.framesize == 16);
820 }
821
17e116a7
AH
822 /* Test a prologue in which there is a return address signing instruction. */
823 if (tdep->has_pauth ())
824 {
825 static const uint32_t insns[] = {
826 0xd503233f, /* paciasp */
827 0xa9bd7bfd, /* stp x29, x30, [sp, #-48]! */
828 0x910003fd, /* mov x29, sp */
829 0xf801c3f3, /* str x19, [sp, #28] */
830 0xb9401fa0, /* ldr x19, [x29, #28] */
831 };
832 instruction_reader_test reader (insns);
833
68811f8f 834 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
17e116a7
AH
835 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache,
836 reader);
837
838 SELF_CHECK (end == 4 * 4);
839 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
840 SELF_CHECK (cache.framesize == 48);
841
842 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
843 {
844 if (i == 19)
098caef4 845 SELF_CHECK (cache.saved_regs[i].addr () == -20);
17e116a7 846 else if (i == AARCH64_FP_REGNUM)
098caef4 847 SELF_CHECK (cache.saved_regs[i].addr () == -48);
17e116a7 848 else if (i == AARCH64_LR_REGNUM)
098caef4 849 SELF_CHECK (cache.saved_regs[i].addr () == -40);
17e116a7 850 else
a9a87d35
LM
851 SELF_CHECK (cache.saved_regs[i].is_realreg ()
852 && cache.saved_regs[i].realreg () == i);
17e116a7
AH
853 }
854
855 if (tdep->has_pauth ())
856 {
c9cd8ca4 857 int regnum = tdep->ra_sign_state_regnum;
a9a87d35 858 SELF_CHECK (cache.saved_regs[regnum].is_value ());
17e116a7
AH
859 }
860 }
37989733
LM
861
862 /* Test a prologue with a BTI instruction. */
863 {
864 static const uint32_t insns[] = {
865 0xd503245f, /* bti */
866 0xa9bd7bfd, /* stp x29, x30, [sp, #-48]! */
867 0x910003fd, /* mov x29, sp */
868 0xf801c3f3, /* str x19, [sp, #28] */
869 0xb9401fa0, /* ldr x19, [x29, #28] */
870 };
871 instruction_reader_test reader (insns);
872
873 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
874 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache,
875 reader);
876
877 SELF_CHECK (end == 4 * 4);
878 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
879 SELF_CHECK (cache.framesize == 48);
880
881 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
882 {
883 if (i == 19)
884 SELF_CHECK (cache.saved_regs[i].addr () == -20);
885 else if (i == AARCH64_FP_REGNUM)
886 SELF_CHECK (cache.saved_regs[i].addr () == -48);
887 else if (i == AARCH64_LR_REGNUM)
888 SELF_CHECK (cache.saved_regs[i].addr () == -40);
889 else
890 SELF_CHECK (cache.saved_regs[i].is_realreg ()
891 && cache.saved_regs[i].realreg () == i);
892 }
893 }
4d9a9006
YQ
894}
895} // namespace selftests
896#endif /* GDB_SELF_TEST */
897
07b287a0
MS
898/* Implement the "skip_prologue" gdbarch method. */
899
900static CORE_ADDR
901aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
902{
07b287a0 903 CORE_ADDR func_addr, limit_pc;
07b287a0
MS
904
905 /* See if we can determine the end of the prologue via the symbol
906 table. If so, then return either PC, or the PC after the
907 prologue, whichever is greater. */
908 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
909 {
910 CORE_ADDR post_prologue_pc
911 = skip_prologue_using_sal (gdbarch, func_addr);
912
913 if (post_prologue_pc != 0)
325fac50 914 return std::max (pc, post_prologue_pc);
07b287a0
MS
915 }
916
917 /* Can't determine prologue from the symbol table, need to examine
918 instructions. */
919
920 /* Find an upper limit on the function prologue using the debug
921 information. If the debug information could not be used to
922 provide that bound, then use an arbitrary large number as the
923 upper bound. */
924 limit_pc = skip_prologue_using_sal (gdbarch, pc);
925 if (limit_pc == 0)
926 limit_pc = pc + 128; /* Magic. */
927
928 /* Try disassembling prologue. */
929 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
930}
931
932/* Scan the function prologue for THIS_FRAME and populate the prologue
933 cache CACHE. */
934
935static void
bd2b40ac 936aarch64_scan_prologue (frame_info_ptr this_frame,
07b287a0
MS
937 struct aarch64_prologue_cache *cache)
938{
939 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
940 CORE_ADDR prologue_start;
941 CORE_ADDR prologue_end;
942 CORE_ADDR prev_pc = get_frame_pc (this_frame);
943 struct gdbarch *gdbarch = get_frame_arch (this_frame);
944
db634143
PL
945 cache->prev_pc = prev_pc;
946
07b287a0
MS
947 /* Assume we do not find a frame. */
948 cache->framereg = -1;
949 cache->framesize = 0;
950
951 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
952 &prologue_end))
953 {
954 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
955
956 if (sal.line == 0)
957 {
958 /* No line info so use the current PC. */
959 prologue_end = prev_pc;
960 }
961 else if (sal.end < prologue_end)
962 {
963 /* The next line begins after the function end. */
964 prologue_end = sal.end;
965 }
966
325fac50 967 prologue_end = std::min (prologue_end, prev_pc);
07b287a0
MS
968 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
969 }
970 else
971 {
972 CORE_ADDR frame_loc;
07b287a0
MS
973
974 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
975 if (frame_loc == 0)
976 return;
977
978 cache->framereg = AARCH64_FP_REGNUM;
979 cache->framesize = 16;
098caef4
LM
980 cache->saved_regs[29].set_addr (0);
981 cache->saved_regs[30].set_addr (8);
07b287a0
MS
982 }
983}
984
7dfa3edc
PL
985/* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
986 function may throw an exception if the inferior's registers or memory is
987 not available. */
07b287a0 988
7dfa3edc 989static void
bd2b40ac 990aarch64_make_prologue_cache_1 (frame_info_ptr this_frame,
7dfa3edc 991 struct aarch64_prologue_cache *cache)
07b287a0 992{
07b287a0
MS
993 CORE_ADDR unwound_fp;
994 int reg;
995
07b287a0
MS
996 aarch64_scan_prologue (this_frame, cache);
997
998 if (cache->framereg == -1)
7dfa3edc 999 return;
07b287a0
MS
1000
1001 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
1002 if (unwound_fp == 0)
7dfa3edc 1003 return;
07b287a0 1004
29e09a42
TV
1005 cache->prev_sp = unwound_fp;
1006 if (!aarch64_stack_frame_destroyed_p (get_frame_arch (this_frame),
1007 cache->prev_pc))
1008 cache->prev_sp += cache->framesize;
07b287a0
MS
1009
1010 /* Calculate actual addresses of saved registers using offsets
1011 determined by aarch64_analyze_prologue. */
1012 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
a9a87d35 1013 if (cache->saved_regs[reg].is_addr ())
098caef4
LM
1014 cache->saved_regs[reg].set_addr (cache->saved_regs[reg].addr ()
1015 + cache->prev_sp);
07b287a0 1016
db634143
PL
1017 cache->func = get_frame_func (this_frame);
1018
7dfa3edc
PL
1019 cache->available_p = 1;
1020}
1021
1022/* Allocate and fill in *THIS_CACHE with information about the prologue of
1023 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
1024 Return a pointer to the current aarch64_prologue_cache in
1025 *THIS_CACHE. */
1026
1027static struct aarch64_prologue_cache *
bd2b40ac 1028aarch64_make_prologue_cache (frame_info_ptr this_frame, void **this_cache)
7dfa3edc
PL
1029{
1030 struct aarch64_prologue_cache *cache;
1031
1032 if (*this_cache != NULL)
9a3c8263 1033 return (struct aarch64_prologue_cache *) *this_cache;
7dfa3edc
PL
1034
1035 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
1036 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
1037 *this_cache = cache;
1038
a70b8144 1039 try
7dfa3edc
PL
1040 {
1041 aarch64_make_prologue_cache_1 (this_frame, cache);
1042 }
230d2906 1043 catch (const gdb_exception_error &ex)
7dfa3edc
PL
1044 {
1045 if (ex.error != NOT_AVAILABLE_ERROR)
eedc3f4f 1046 throw;
7dfa3edc 1047 }
7dfa3edc 1048
07b287a0
MS
1049 return cache;
1050}
1051
7dfa3edc
PL
1052/* Implement the "stop_reason" frame_unwind method. */
1053
1054static enum unwind_stop_reason
bd2b40ac 1055aarch64_prologue_frame_unwind_stop_reason (frame_info_ptr this_frame,
7dfa3edc
PL
1056 void **this_cache)
1057{
1058 struct aarch64_prologue_cache *cache
1059 = aarch64_make_prologue_cache (this_frame, this_cache);
1060
1061 if (!cache->available_p)
1062 return UNWIND_UNAVAILABLE;
1063
1064 /* Halt the backtrace at "_start". */
345bd07c 1065 gdbarch *arch = get_frame_arch (this_frame);
08106042 1066 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (arch);
345bd07c 1067 if (cache->prev_pc <= tdep->lowest_pc)
7dfa3edc
PL
1068 return UNWIND_OUTERMOST;
1069
1070 /* We've hit a wall, stop. */
1071 if (cache->prev_sp == 0)
1072 return UNWIND_OUTERMOST;
1073
1074 return UNWIND_NO_REASON;
1075}
1076
07b287a0
MS
1077/* Our frame ID for a normal frame is the current function's starting
1078 PC and the caller's SP when we were called. */
1079
1080static void
bd2b40ac 1081aarch64_prologue_this_id (frame_info_ptr this_frame,
07b287a0
MS
1082 void **this_cache, struct frame_id *this_id)
1083{
7c8edfae
PL
1084 struct aarch64_prologue_cache *cache
1085 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0 1086
7dfa3edc
PL
1087 if (!cache->available_p)
1088 *this_id = frame_id_build_unavailable_stack (cache->func);
1089 else
1090 *this_id = frame_id_build (cache->prev_sp, cache->func);
07b287a0
MS
1091}
1092
1093/* Implement the "prev_register" frame_unwind method. */
1094
1095static struct value *
bd2b40ac 1096aarch64_prologue_prev_register (frame_info_ptr this_frame,
07b287a0
MS
1097 void **this_cache, int prev_regnum)
1098{
7c8edfae
PL
1099 struct aarch64_prologue_cache *cache
1100 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0
MS
1101
1102 /* If we are asked to unwind the PC, then we need to return the LR
1103 instead. The prologue may save PC, but it will point into this
1104 frame's prologue, not the next frame's resume location. */
1105 if (prev_regnum == AARCH64_PC_REGNUM)
1106 {
1107 CORE_ADDR lr;
17e116a7 1108 struct gdbarch *gdbarch = get_frame_arch (this_frame);
345bd07c 1109 aarch64_gdbarch_tdep *tdep
08106042 1110 = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
07b287a0
MS
1111
1112 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
17e116a7
AH
1113
1114 if (tdep->has_pauth ()
c9cd8ca4 1115 && cache->saved_regs[tdep->ra_sign_state_regnum].is_value ())
3d31bc39 1116 lr = aarch64_frame_unmask_lr (tdep, this_frame, lr);
17e116a7 1117
07b287a0
MS
1118 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
1119 }
1120
1121 /* SP is generally not saved to the stack, but this frame is
1122 identified by the next frame's stack pointer at the time of the
1123 call. The value was already reconstructed into PREV_SP. */
1124 /*
dda83cd7
SM
1125 +----------+ ^
1126 | saved lr | |
07b287a0
MS
1127 +->| saved fp |--+
1128 | | |
1129 | | | <- Previous SP
1130 | +----------+
1131 | | saved lr |
1132 +--| saved fp |<- FP
dda83cd7
SM
1133 | |
1134 | |<- SP
1135 +----------+ */
07b287a0
MS
1136 if (prev_regnum == AARCH64_SP_REGNUM)
1137 return frame_unwind_got_constant (this_frame, prev_regnum,
1138 cache->prev_sp);
1139
1140 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
1141 prev_regnum);
1142}
1143
1144/* AArch64 prologue unwinder. */
6bd434d6 1145static frame_unwind aarch64_prologue_unwind =
07b287a0 1146{
a154d838 1147 "aarch64 prologue",
07b287a0 1148 NORMAL_FRAME,
7dfa3edc 1149 aarch64_prologue_frame_unwind_stop_reason,
07b287a0
MS
1150 aarch64_prologue_this_id,
1151 aarch64_prologue_prev_register,
1152 NULL,
1153 default_frame_sniffer
1154};
1155
8b61f75d
PL
1156/* Allocate and fill in *THIS_CACHE with information about the prologue of
1157 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
1158 Return a pointer to the current aarch64_prologue_cache in
1159 *THIS_CACHE. */
07b287a0
MS
1160
1161static struct aarch64_prologue_cache *
bd2b40ac 1162aarch64_make_stub_cache (frame_info_ptr this_frame, void **this_cache)
07b287a0 1163{
07b287a0 1164 struct aarch64_prologue_cache *cache;
8b61f75d
PL
1165
1166 if (*this_cache != NULL)
9a3c8263 1167 return (struct aarch64_prologue_cache *) *this_cache;
07b287a0
MS
1168
1169 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
1170 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
8b61f75d 1171 *this_cache = cache;
07b287a0 1172
a70b8144 1173 try
02a2a705
PL
1174 {
1175 cache->prev_sp = get_frame_register_unsigned (this_frame,
1176 AARCH64_SP_REGNUM);
1177 cache->prev_pc = get_frame_pc (this_frame);
1178 cache->available_p = 1;
1179 }
230d2906 1180 catch (const gdb_exception_error &ex)
02a2a705
PL
1181 {
1182 if (ex.error != NOT_AVAILABLE_ERROR)
eedc3f4f 1183 throw;
02a2a705 1184 }
07b287a0
MS
1185
1186 return cache;
1187}
1188
02a2a705
PL
1189/* Implement the "stop_reason" frame_unwind method. */
1190
1191static enum unwind_stop_reason
bd2b40ac 1192aarch64_stub_frame_unwind_stop_reason (frame_info_ptr this_frame,
02a2a705
PL
1193 void **this_cache)
1194{
1195 struct aarch64_prologue_cache *cache
1196 = aarch64_make_stub_cache (this_frame, this_cache);
1197
1198 if (!cache->available_p)
1199 return UNWIND_UNAVAILABLE;
1200
1201 return UNWIND_NO_REASON;
1202}
1203
07b287a0
MS
1204/* Our frame ID for a stub frame is the current SP and LR. */
1205
1206static void
bd2b40ac 1207aarch64_stub_this_id (frame_info_ptr this_frame,
07b287a0
MS
1208 void **this_cache, struct frame_id *this_id)
1209{
8b61f75d
PL
1210 struct aarch64_prologue_cache *cache
1211 = aarch64_make_stub_cache (this_frame, this_cache);
07b287a0 1212
02a2a705
PL
1213 if (cache->available_p)
1214 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
1215 else
1216 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
07b287a0
MS
1217}
1218
1219/* Implement the "sniffer" frame_unwind method. */
1220
1221static int
1222aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
bd2b40ac 1223 frame_info_ptr this_frame,
07b287a0
MS
1224 void **this_prologue_cache)
1225{
1226 CORE_ADDR addr_in_block;
1227 gdb_byte dummy[4];
1228
1229 addr_in_block = get_frame_address_in_block (this_frame);
3e5d3a5a 1230 if (in_plt_section (addr_in_block)
07b287a0
MS
1231 /* We also use the stub winder if the target memory is unreadable
1232 to avoid having the prologue unwinder trying to read it. */
1233 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
1234 return 1;
1235
1236 return 0;
1237}
1238
1239/* AArch64 stub unwinder. */
6bd434d6 1240static frame_unwind aarch64_stub_unwind =
07b287a0 1241{
a154d838 1242 "aarch64 stub",
07b287a0 1243 NORMAL_FRAME,
02a2a705 1244 aarch64_stub_frame_unwind_stop_reason,
07b287a0
MS
1245 aarch64_stub_this_id,
1246 aarch64_prologue_prev_register,
1247 NULL,
1248 aarch64_stub_unwind_sniffer
1249};
1250
1251/* Return the frame base address of *THIS_FRAME. */
1252
1253static CORE_ADDR
bd2b40ac 1254aarch64_normal_frame_base (frame_info_ptr this_frame, void **this_cache)
07b287a0 1255{
7c8edfae
PL
1256 struct aarch64_prologue_cache *cache
1257 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0
MS
1258
1259 return cache->prev_sp - cache->framesize;
1260}
1261
1262/* AArch64 default frame base information. */
6bd434d6 1263static frame_base aarch64_normal_base =
07b287a0
MS
1264{
1265 &aarch64_prologue_unwind,
1266 aarch64_normal_frame_base,
1267 aarch64_normal_frame_base,
1268 aarch64_normal_frame_base
1269};
1270
07b287a0
MS
1271/* Return the value of the REGNUM register in the previous frame of
1272 *THIS_FRAME. */
1273
1274static struct value *
bd2b40ac 1275aarch64_dwarf2_prev_register (frame_info_ptr this_frame,
07b287a0
MS
1276 void **this_cache, int regnum)
1277{
345bd07c 1278 gdbarch *arch = get_frame_arch (this_frame);
08106042 1279 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (arch);
07b287a0
MS
1280 CORE_ADDR lr;
1281
1282 switch (regnum)
1283 {
1284 case AARCH64_PC_REGNUM:
1285 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
3d31bc39 1286 lr = aarch64_frame_unmask_lr (tdep, this_frame, lr);
07b287a0
MS
1287 return frame_unwind_got_constant (this_frame, regnum, lr);
1288
1289 default:
f34652de 1290 internal_error (_("Unexpected register %d"), regnum);
07b287a0
MS
1291 }
1292}
1293
11e1b75f
AH
1294static const unsigned char op_lit0 = DW_OP_lit0;
1295static const unsigned char op_lit1 = DW_OP_lit1;
1296
07b287a0
MS
1297/* Implement the "init_reg" dwarf2_frame_ops method. */
1298
1299static void
1300aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1301 struct dwarf2_frame_state_reg *reg,
bd2b40ac 1302 frame_info_ptr this_frame)
07b287a0 1303{
08106042 1304 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
11e1b75f 1305
07b287a0
MS
1306 switch (regnum)
1307 {
1308 case AARCH64_PC_REGNUM:
1309 reg->how = DWARF2_FRAME_REG_FN;
1310 reg->loc.fn = aarch64_dwarf2_prev_register;
11e1b75f
AH
1311 return;
1312
07b287a0
MS
1313 case AARCH64_SP_REGNUM:
1314 reg->how = DWARF2_FRAME_REG_CFA;
11e1b75f
AH
1315 return;
1316 }
1317
1318 /* Init pauth registers. */
1319 if (tdep->has_pauth ())
1320 {
c9cd8ca4 1321 if (regnum == tdep->ra_sign_state_regnum)
11e1b75f
AH
1322 {
1323 /* Initialize RA_STATE to zero. */
1324 reg->how = DWARF2_FRAME_REG_SAVED_VAL_EXP;
1325 reg->loc.exp.start = &op_lit0;
1326 reg->loc.exp.len = 1;
1327 return;
1328 }
1329 else if (regnum == AARCH64_PAUTH_DMASK_REGNUM (tdep->pauth_reg_base)
1330 || regnum == AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base))
1331 {
1332 reg->how = DWARF2_FRAME_REG_SAME_VALUE;
1333 return;
1334 }
07b287a0
MS
1335 }
1336}
1337
11e1b75f
AH
1338/* Implement the execute_dwarf_cfa_vendor_op method. */
1339
1340static bool
1341aarch64_execute_dwarf_cfa_vendor_op (struct gdbarch *gdbarch, gdb_byte op,
1342 struct dwarf2_frame_state *fs)
1343{
08106042 1344 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
11e1b75f
AH
1345 struct dwarf2_frame_state_reg *ra_state;
1346
8fca4da0 1347 if (op == DW_CFA_AARCH64_negate_ra_state)
11e1b75f 1348 {
8fca4da0
AH
1349 /* On systems without pauth, treat as a nop. */
1350 if (!tdep->has_pauth ())
1351 return true;
1352
11e1b75f 1353 /* Allocate RA_STATE column if it's not allocated yet. */
c9cd8ca4 1354 fs->regs.alloc_regs (AARCH64_DWARF_RA_SIGN_STATE + 1);
11e1b75f
AH
1355
1356 /* Toggle the status of RA_STATE between 0 and 1. */
c9cd8ca4 1357 ra_state = &(fs->regs.reg[AARCH64_DWARF_RA_SIGN_STATE]);
11e1b75f
AH
1358 ra_state->how = DWARF2_FRAME_REG_SAVED_VAL_EXP;
1359
1360 if (ra_state->loc.exp.start == nullptr
1361 || ra_state->loc.exp.start == &op_lit0)
1362 ra_state->loc.exp.start = &op_lit1;
1363 else
1364 ra_state->loc.exp.start = &op_lit0;
1365
1366 ra_state->loc.exp.len = 1;
1367
1368 return true;
1369 }
1370
1371 return false;
1372}
1373
5133a315
LM
1374/* Used for matching BRK instructions for AArch64. */
1375static constexpr uint32_t BRK_INSN_MASK = 0xffe0001f;
1376static constexpr uint32_t BRK_INSN_BASE = 0xd4200000;
1377
1378/* Implementation of gdbarch_program_breakpoint_here_p for aarch64. */
1379
1380static bool
1381aarch64_program_breakpoint_here_p (gdbarch *gdbarch, CORE_ADDR address)
1382{
1383 const uint32_t insn_len = 4;
1384 gdb_byte target_mem[4];
1385
1386 /* Enable the automatic memory restoration from breakpoints while
1387 we read the memory. Otherwise we may find temporary breakpoints, ones
1388 inserted by GDB, and flag them as permanent breakpoints. */
1389 scoped_restore restore_memory
1390 = make_scoped_restore_show_memory_breakpoints (0);
1391
1392 if (target_read_memory (address, target_mem, insn_len) == 0)
1393 {
1394 uint32_t insn =
1395 (uint32_t) extract_unsigned_integer (target_mem, insn_len,
1396 gdbarch_byte_order_for_code (gdbarch));
1397
1398 /* Check if INSN is a BRK instruction pattern. There are multiple choices
1399 of such instructions with different immediate values. Different OS'
1400 may use a different variation, but they have the same outcome. */
1401 return ((insn & BRK_INSN_MASK) == BRK_INSN_BASE);
1402 }
1403
1404 return false;
1405}
1406
07b287a0
MS
1407/* When arguments must be pushed onto the stack, they go on in reverse
1408 order. The code below implements a FILO (stack) to do this. */
1409
89055eaa 1410struct stack_item_t
07b287a0 1411{
c3c87445
YQ
1412 /* Value to pass on stack. It can be NULL if this item is for stack
1413 padding. */
7c543f7b 1414 const gdb_byte *data;
07b287a0
MS
1415
1416 /* Size in bytes of value to pass on stack. */
1417 int len;
89055eaa 1418};
07b287a0 1419
b907456c
AB
1420/* Implement the gdbarch type alignment method, overrides the generic
1421 alignment algorithm for anything that is aarch64 specific. */
07b287a0 1422
b907456c
AB
1423static ULONGEST
1424aarch64_type_align (gdbarch *gdbarch, struct type *t)
07b287a0 1425{
07b287a0 1426 t = check_typedef (t);
bd63c870 1427 if (t->code () == TYPE_CODE_ARRAY && t->is_vector ())
07b287a0 1428 {
b907456c
AB
1429 /* Use the natural alignment for vector types (the same for
1430 scalar type), but the maximum alignment is 128-bit. */
df86565b 1431 if (t->length () > 16)
b907456c 1432 return 16;
238f2452 1433 else
df86565b 1434 return t->length ();
07b287a0 1435 }
b907456c
AB
1436
1437 /* Allow the common code to calculate the alignment. */
1438 return 0;
07b287a0
MS
1439}
1440
ea92689a
AH
1441/* Worker function for aapcs_is_vfp_call_or_return_candidate.
1442
1443 Return the number of register required, or -1 on failure.
1444
1445 When encountering a base element, if FUNDAMENTAL_TYPE is not set then set it
1446 to the element, else fail if the type of this element does not match the
1447 existing value. */
1448
1449static int
1450aapcs_is_vfp_call_or_return_candidate_1 (struct type *type,
1451 struct type **fundamental_type)
1452{
1453 if (type == nullptr)
1454 return -1;
1455
78134374 1456 switch (type->code ())
ea92689a
AH
1457 {
1458 case TYPE_CODE_FLT:
81657e58 1459 case TYPE_CODE_DECFLOAT:
df86565b 1460 if (type->length () > 16)
ea92689a
AH
1461 return -1;
1462
1463 if (*fundamental_type == nullptr)
1464 *fundamental_type = type;
df86565b 1465 else if (type->length () != (*fundamental_type)->length ()
78134374 1466 || type->code () != (*fundamental_type)->code ())
ea92689a
AH
1467 return -1;
1468
1469 return 1;
1470
1471 case TYPE_CODE_COMPLEX:
1472 {
27710edb 1473 struct type *target_type = check_typedef (type->target_type ());
df86565b 1474 if (target_type->length () > 16)
ea92689a
AH
1475 return -1;
1476
1477 if (*fundamental_type == nullptr)
1478 *fundamental_type = target_type;
df86565b 1479 else if (target_type->length () != (*fundamental_type)->length ()
78134374 1480 || target_type->code () != (*fundamental_type)->code ())
ea92689a
AH
1481 return -1;
1482
1483 return 2;
1484 }
1485
1486 case TYPE_CODE_ARRAY:
1487 {
bd63c870 1488 if (type->is_vector ())
ea92689a 1489 {
df86565b 1490 if (type->length () != 8 && type->length () != 16)
ea92689a
AH
1491 return -1;
1492
1493 if (*fundamental_type == nullptr)
1494 *fundamental_type = type;
df86565b 1495 else if (type->length () != (*fundamental_type)->length ()
78134374 1496 || type->code () != (*fundamental_type)->code ())
ea92689a
AH
1497 return -1;
1498
1499 return 1;
1500 }
1501 else
1502 {
27710edb 1503 struct type *target_type = type->target_type ();
ea92689a
AH
1504 int count = aapcs_is_vfp_call_or_return_candidate_1
1505 (target_type, fundamental_type);
1506
1507 if (count == -1)
1508 return count;
1509
df86565b 1510 count *= (type->length () / target_type->length ());
ea92689a
AH
1511 return count;
1512 }
1513 }
1514
1515 case TYPE_CODE_STRUCT:
1516 case TYPE_CODE_UNION:
1517 {
1518 int count = 0;
1519
1f704f76 1520 for (int i = 0; i < type->num_fields (); i++)
ea92689a 1521 {
353229bf 1522 /* Ignore any static fields. */
ceacbf6e 1523 if (field_is_static (&type->field (i)))
353229bf
AH
1524 continue;
1525
940da03e 1526 struct type *member = check_typedef (type->field (i).type ());
ea92689a
AH
1527
1528 int sub_count = aapcs_is_vfp_call_or_return_candidate_1
1529 (member, fundamental_type);
1530 if (sub_count == -1)
1531 return -1;
1532 count += sub_count;
1533 }
73021deb
AH
1534
1535 /* Ensure there is no padding between the fields (allowing for empty
1536 zero length structs) */
1537 int ftype_length = (*fundamental_type == nullptr)
df86565b
SM
1538 ? 0 : (*fundamental_type)->length ();
1539 if (count * ftype_length != type->length ())
73021deb
AH
1540 return -1;
1541
ea92689a
AH
1542 return count;
1543 }
1544
1545 default:
1546 break;
1547 }
1548
1549 return -1;
1550}
1551
1552/* Return true if an argument, whose type is described by TYPE, can be passed or
1553 returned in simd/fp registers, providing enough parameter passing registers
1554 are available. This is as described in the AAPCS64.
1555
1556 Upon successful return, *COUNT returns the number of needed registers,
1557 *FUNDAMENTAL_TYPE contains the type of those registers.
1558
1559 Candidate as per the AAPCS64 5.4.2.C is either a:
1560 - float.
1561 - short-vector.
1562 - HFA (Homogeneous Floating-point Aggregate, 4.3.5.1). A Composite type where
1563 all the members are floats and has at most 4 members.
1564 - HVA (Homogeneous Short-vector Aggregate, 4.3.5.2). A Composite type where
1565 all the members are short vectors and has at most 4 members.
1566 - Complex (7.1.1)
1567
1568 Note that HFAs and HVAs can include nested structures and arrays. */
1569
0e745c60 1570static bool
ea92689a
AH
1571aapcs_is_vfp_call_or_return_candidate (struct type *type, int *count,
1572 struct type **fundamental_type)
1573{
1574 if (type == nullptr)
1575 return false;
1576
1577 *fundamental_type = nullptr;
1578
1579 int ag_count = aapcs_is_vfp_call_or_return_candidate_1 (type,
1580 fundamental_type);
1581
1582 if (ag_count > 0 && ag_count <= HA_MAX_NUM_FLDS)
1583 {
1584 *count = ag_count;
1585 return true;
1586 }
1587 else
1588 return false;
1589}
1590
07b287a0
MS
1591/* AArch64 function call information structure. */
1592struct aarch64_call_info
1593{
1594 /* the current argument number. */
89055eaa 1595 unsigned argnum = 0;
07b287a0
MS
1596
1597 /* The next general purpose register number, equivalent to NGRN as
1598 described in the AArch64 Procedure Call Standard. */
89055eaa 1599 unsigned ngrn = 0;
07b287a0
MS
1600
1601 /* The next SIMD and floating point register number, equivalent to
1602 NSRN as described in the AArch64 Procedure Call Standard. */
89055eaa 1603 unsigned nsrn = 0;
07b287a0
MS
1604
1605 /* The next stacked argument address, equivalent to NSAA as
1606 described in the AArch64 Procedure Call Standard. */
89055eaa 1607 unsigned nsaa = 0;
07b287a0
MS
1608
1609 /* Stack item vector. */
89055eaa 1610 std::vector<stack_item_t> si;
07b287a0
MS
1611};
1612
1613/* Pass a value in a sequence of consecutive X registers. The caller
30baf67b 1614 is responsible for ensuring sufficient registers are available. */
07b287a0
MS
1615
1616static void
1617pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1618 struct aarch64_call_info *info, struct type *type,
8e80f9d1 1619 struct value *arg)
07b287a0
MS
1620{
1621 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
df86565b 1622 int len = type->length ();
78134374 1623 enum type_code typecode = type->code ();
07b287a0 1624 int regnum = AARCH64_X0_REGNUM + info->ngrn;
efaf1ae0 1625 const bfd_byte *buf = arg->contents ().data ();
07b287a0
MS
1626
1627 info->argnum++;
1628
1629 while (len > 0)
1630 {
1631 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1632 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1633 byte_order);
1634
1635
1636 /* Adjust sub-word struct/union args when big-endian. */
1637 if (byte_order == BFD_ENDIAN_BIG
1638 && partial_len < X_REGISTER_SIZE
1639 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1640 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1641
c6185dce
SM
1642 aarch64_debug_printf ("arg %d in %s = 0x%s", info->argnum,
1643 gdbarch_register_name (gdbarch, regnum),
1644 phex (regval, X_REGISTER_SIZE));
1645
07b287a0
MS
1646 regcache_cooked_write_unsigned (regcache, regnum, regval);
1647 len -= partial_len;
1648 buf += partial_len;
1649 regnum++;
1650 }
1651}
1652
1653/* Attempt to marshall a value in a V register. Return 1 if
1654 successful, or 0 if insufficient registers are available. This
1655 function, unlike the equivalent pass_in_x() function does not
1656 handle arguments spread across multiple registers. */
1657
1658static int
1659pass_in_v (struct gdbarch *gdbarch,
1660 struct regcache *regcache,
1661 struct aarch64_call_info *info,
0735fddd 1662 int len, const bfd_byte *buf)
07b287a0
MS
1663{
1664 if (info->nsrn < 8)
1665 {
07b287a0 1666 int regnum = AARCH64_V0_REGNUM + info->nsrn;
3ff2c72e
AH
1667 /* Enough space for a full vector register. */
1668 gdb_byte reg[register_size (gdbarch, regnum)];
1669 gdb_assert (len <= sizeof (reg));
07b287a0
MS
1670
1671 info->argnum++;
1672 info->nsrn++;
1673
0735fddd
YQ
1674 memset (reg, 0, sizeof (reg));
1675 /* PCS C.1, the argument is allocated to the least significant
1676 bits of V register. */
1677 memcpy (reg, buf, len);
b66f5587 1678 regcache->cooked_write (regnum, reg);
0735fddd 1679
c6185dce
SM
1680 aarch64_debug_printf ("arg %d in %s", info->argnum,
1681 gdbarch_register_name (gdbarch, regnum));
1682
07b287a0
MS
1683 return 1;
1684 }
1685 info->nsrn = 8;
1686 return 0;
1687}
1688
1689/* Marshall an argument onto the stack. */
1690
1691static void
1692pass_on_stack (struct aarch64_call_info *info, struct type *type,
8e80f9d1 1693 struct value *arg)
07b287a0 1694{
efaf1ae0 1695 const bfd_byte *buf = arg->contents ().data ();
df86565b 1696 int len = type->length ();
07b287a0
MS
1697 int align;
1698 stack_item_t item;
1699
1700 info->argnum++;
1701
b907456c 1702 align = type_align (type);
07b287a0
MS
1703
1704 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1705 Natural alignment of the argument's type. */
1706 align = align_up (align, 8);
1707
1708 /* The AArch64 PCS requires at most doubleword alignment. */
1709 if (align > 16)
1710 align = 16;
1711
c6185dce
SM
1712 aarch64_debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1713 info->nsaa);
07b287a0
MS
1714
1715 item.len = len;
1716 item.data = buf;
89055eaa 1717 info->si.push_back (item);
07b287a0
MS
1718
1719 info->nsaa += len;
1720 if (info->nsaa & (align - 1))
1721 {
1722 /* Push stack alignment padding. */
1723 int pad = align - (info->nsaa & (align - 1));
1724
1725 item.len = pad;
c3c87445 1726 item.data = NULL;
07b287a0 1727
89055eaa 1728 info->si.push_back (item);
07b287a0
MS
1729 info->nsaa += pad;
1730 }
1731}
1732
1733/* Marshall an argument into a sequence of one or more consecutive X
1734 registers or, if insufficient X registers are available then onto
1735 the stack. */
1736
1737static void
1738pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1739 struct aarch64_call_info *info, struct type *type,
8e80f9d1 1740 struct value *arg)
07b287a0 1741{
df86565b 1742 int len = type->length ();
07b287a0
MS
1743 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1744
1745 /* PCS C.13 - Pass in registers if we have enough spare */
1746 if (info->ngrn + nregs <= 8)
1747 {
8e80f9d1 1748 pass_in_x (gdbarch, regcache, info, type, arg);
07b287a0
MS
1749 info->ngrn += nregs;
1750 }
1751 else
1752 {
1753 info->ngrn = 8;
8e80f9d1 1754 pass_on_stack (info, type, arg);
07b287a0
MS
1755 }
1756}
1757
0e745c60
AH
1758/* Pass a value, which is of type arg_type, in a V register. Assumes value is a
1759 aapcs_is_vfp_call_or_return_candidate and there are enough spare V
1760 registers. A return value of false is an error state as the value will have
1761 been partially passed to the stack. */
1762static bool
1763pass_in_v_vfp_candidate (struct gdbarch *gdbarch, struct regcache *regcache,
1764 struct aarch64_call_info *info, struct type *arg_type,
1765 struct value *arg)
07b287a0 1766{
78134374 1767 switch (arg_type->code ())
0e745c60
AH
1768 {
1769 case TYPE_CODE_FLT:
81657e58 1770 case TYPE_CODE_DECFLOAT:
df86565b 1771 return pass_in_v (gdbarch, regcache, info, arg_type->length (),
efaf1ae0 1772 arg->contents ().data ());
0e745c60
AH
1773 break;
1774
1775 case TYPE_CODE_COMPLEX:
1776 {
efaf1ae0 1777 const bfd_byte *buf = arg->contents ().data ();
27710edb 1778 struct type *target_type = check_typedef (arg_type->target_type ());
0e745c60 1779
df86565b 1780 if (!pass_in_v (gdbarch, regcache, info, target_type->length (),
0e745c60
AH
1781 buf))
1782 return false;
1783
df86565b
SM
1784 return pass_in_v (gdbarch, regcache, info, target_type->length (),
1785 buf + target_type->length ());
0e745c60
AH
1786 }
1787
1788 case TYPE_CODE_ARRAY:
bd63c870 1789 if (arg_type->is_vector ())
df86565b 1790 return pass_in_v (gdbarch, regcache, info, arg_type->length (),
efaf1ae0 1791 arg->contents ().data ());
0e745c60
AH
1792 /* fall through. */
1793
1794 case TYPE_CODE_STRUCT:
1795 case TYPE_CODE_UNION:
1f704f76 1796 for (int i = 0; i < arg_type->num_fields (); i++)
0e745c60 1797 {
353229bf 1798 /* Don't include static fields. */
ceacbf6e 1799 if (field_is_static (&arg_type->field (i)))
353229bf
AH
1800 continue;
1801
0e745c60 1802 struct value *field = value_primitive_field (arg, 0, i, arg_type);
d0c97917 1803 struct type *field_type = check_typedef (field->type ());
0e745c60
AH
1804
1805 if (!pass_in_v_vfp_candidate (gdbarch, regcache, info, field_type,
1806 field))
1807 return false;
1808 }
1809 return true;
1810
1811 default:
1812 return false;
1813 }
07b287a0
MS
1814}
1815
1816/* Implement the "push_dummy_call" gdbarch method. */
1817
1818static CORE_ADDR
1819aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1820 struct regcache *regcache, CORE_ADDR bp_addr,
1821 int nargs,
cf84fa6b
AH
1822 struct value **args, CORE_ADDR sp,
1823 function_call_return_method return_method,
07b287a0
MS
1824 CORE_ADDR struct_addr)
1825{
07b287a0 1826 int argnum;
07b287a0 1827 struct aarch64_call_info info;
07b287a0 1828
07b287a0
MS
1829 /* We need to know what the type of the called function is in order
1830 to determine the number of named/anonymous arguments for the
1831 actual argument placement, and the return type in order to handle
1832 return value correctly.
1833
1834 The generic code above us views the decision of return in memory
1835 or return in registers as a two stage processes. The language
1836 handler is consulted first and may decide to return in memory (eg
1837 class with copy constructor returned by value), this will cause
1838 the generic code to allocate space AND insert an initial leading
1839 argument.
1840
1841 If the language code does not decide to pass in memory then the
1842 target code is consulted.
1843
1844 If the language code decides to pass in memory we want to move
1845 the pointer inserted as the initial argument from the argument
1846 list and into X8, the conventional AArch64 struct return pointer
38a72da0 1847 register. */
07b287a0
MS
1848
1849 /* Set the return address. For the AArch64, the return breakpoint
1850 is always at BP_ADDR. */
1851 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1852
38a72da0
AH
1853 /* If we were given an initial argument for the return slot, lose it. */
1854 if (return_method == return_method_hidden_param)
07b287a0
MS
1855 {
1856 args++;
1857 nargs--;
1858 }
1859
1860 /* The struct_return pointer occupies X8. */
38a72da0 1861 if (return_method != return_method_normal)
07b287a0 1862 {
c6185dce
SM
1863 aarch64_debug_printf ("struct return in %s = 0x%s",
1864 gdbarch_register_name
1865 (gdbarch, AARCH64_STRUCT_RETURN_REGNUM),
1866 paddress (gdbarch, struct_addr));
1867
07b287a0
MS
1868 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1869 struct_addr);
1870 }
1871
1872 for (argnum = 0; argnum < nargs; argnum++)
1873 {
1874 struct value *arg = args[argnum];
0e745c60
AH
1875 struct type *arg_type, *fundamental_type;
1876 int len, elements;
07b287a0 1877
d0c97917 1878 arg_type = check_typedef (arg->type ());
df86565b 1879 len = arg_type->length ();
07b287a0 1880
0e745c60
AH
1881 /* If arg can be passed in v registers as per the AAPCS64, then do so if
1882 if there are enough spare registers. */
1883 if (aapcs_is_vfp_call_or_return_candidate (arg_type, &elements,
1884 &fundamental_type))
1885 {
1886 if (info.nsrn + elements <= 8)
1887 {
1888 /* We know that we have sufficient registers available therefore
1889 this will never need to fallback to the stack. */
1890 if (!pass_in_v_vfp_candidate (gdbarch, regcache, &info, arg_type,
1891 arg))
1892 gdb_assert_not_reached ("Failed to push args");
1893 }
1894 else
1895 {
1896 info.nsrn = 8;
1897 pass_on_stack (&info, arg_type, arg);
1898 }
1899 continue;
1900 }
1901
78134374 1902 switch (arg_type->code ())
07b287a0
MS
1903 {
1904 case TYPE_CODE_INT:
1905 case TYPE_CODE_BOOL:
1906 case TYPE_CODE_CHAR:
1907 case TYPE_CODE_RANGE:
1908 case TYPE_CODE_ENUM:
28397ae7 1909 if (len < 4 && !is_fixed_point_type (arg_type))
07b287a0
MS
1910 {
1911 /* Promote to 32 bit integer. */
c6d940a9 1912 if (arg_type->is_unsigned ())
07b287a0
MS
1913 arg_type = builtin_type (gdbarch)->builtin_uint32;
1914 else
1915 arg_type = builtin_type (gdbarch)->builtin_int32;
1916 arg = value_cast (arg_type, arg);
1917 }
8e80f9d1 1918 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1919 break;
1920
07b287a0
MS
1921 case TYPE_CODE_STRUCT:
1922 case TYPE_CODE_ARRAY:
1923 case TYPE_CODE_UNION:
0e745c60 1924 if (len > 16)
07b287a0
MS
1925 {
1926 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1927 invisible reference. */
1928
1929 /* Allocate aligned storage. */
1930 sp = align_down (sp - len, 16);
1931
1932 /* Write the real data into the stack. */
efaf1ae0 1933 write_memory (sp, arg->contents ().data (), len);
07b287a0
MS
1934
1935 /* Construct the indirection. */
1936 arg_type = lookup_pointer_type (arg_type);
1937 arg = value_from_pointer (arg_type, sp);
8e80f9d1 1938 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1939 }
1940 else
1941 /* PCS C.15 / C.18 multiple values pass. */
8e80f9d1 1942 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1943 break;
1944
1945 default:
8e80f9d1 1946 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1947 break;
1948 }
1949 }
1950
1951 /* Make sure stack retains 16 byte alignment. */
1952 if (info.nsaa & 15)
1953 sp -= 16 - (info.nsaa & 15);
1954
89055eaa 1955 while (!info.si.empty ())
07b287a0 1956 {
89055eaa 1957 const stack_item_t &si = info.si.back ();
07b287a0 1958
89055eaa
TT
1959 sp -= si.len;
1960 if (si.data != NULL)
1961 write_memory (sp, si.data, si.len);
1962 info.si.pop_back ();
07b287a0
MS
1963 }
1964
07b287a0
MS
1965 /* Finally, update the SP register. */
1966 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1967
1968 return sp;
1969}
1970
1971/* Implement the "frame_align" gdbarch method. */
1972
1973static CORE_ADDR
1974aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1975{
1976 /* Align the stack to sixteen bytes. */
1977 return sp & ~(CORE_ADDR) 15;
1978}
1979
1980/* Return the type for an AdvSISD Q register. */
1981
1982static struct type *
1983aarch64_vnq_type (struct gdbarch *gdbarch)
1984{
08106042 1985 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
07b287a0
MS
1986
1987 if (tdep->vnq_type == NULL)
1988 {
1989 struct type *t;
1990 struct type *elem;
1991
1992 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1993 TYPE_CODE_UNION);
1994
1995 elem = builtin_type (gdbarch)->builtin_uint128;
1996 append_composite_type_field (t, "u", elem);
1997
1998 elem = builtin_type (gdbarch)->builtin_int128;
1999 append_composite_type_field (t, "s", elem);
2000
2001 tdep->vnq_type = t;
2002 }
2003
2004 return tdep->vnq_type;
2005}
2006
2007/* Return the type for an AdvSISD D register. */
2008
2009static struct type *
2010aarch64_vnd_type (struct gdbarch *gdbarch)
2011{
08106042 2012 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
07b287a0
MS
2013
2014 if (tdep->vnd_type == NULL)
2015 {
2016 struct type *t;
2017 struct type *elem;
2018
2019 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
2020 TYPE_CODE_UNION);
2021
2022 elem = builtin_type (gdbarch)->builtin_double;
2023 append_composite_type_field (t, "f", elem);
2024
2025 elem = builtin_type (gdbarch)->builtin_uint64;
2026 append_composite_type_field (t, "u", elem);
2027
2028 elem = builtin_type (gdbarch)->builtin_int64;
2029 append_composite_type_field (t, "s", elem);
2030
2031 tdep->vnd_type = t;
2032 }
2033
2034 return tdep->vnd_type;
2035}
2036
2037/* Return the type for an AdvSISD S register. */
2038
2039static struct type *
2040aarch64_vns_type (struct gdbarch *gdbarch)
2041{
08106042 2042 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
07b287a0
MS
2043
2044 if (tdep->vns_type == NULL)
2045 {
2046 struct type *t;
2047 struct type *elem;
2048
2049 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
2050 TYPE_CODE_UNION);
2051
2052 elem = builtin_type (gdbarch)->builtin_float;
2053 append_composite_type_field (t, "f", elem);
2054
2055 elem = builtin_type (gdbarch)->builtin_uint32;
2056 append_composite_type_field (t, "u", elem);
2057
2058 elem = builtin_type (gdbarch)->builtin_int32;
2059 append_composite_type_field (t, "s", elem);
2060
2061 tdep->vns_type = t;
2062 }
2063
2064 return tdep->vns_type;
2065}
2066
2067/* Return the type for an AdvSISD H register. */
2068
2069static struct type *
2070aarch64_vnh_type (struct gdbarch *gdbarch)
2071{
08106042 2072 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
07b287a0
MS
2073
2074 if (tdep->vnh_type == NULL)
2075 {
2076 struct type *t;
2077 struct type *elem;
2078
2079 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
2080 TYPE_CODE_UNION);
2081
5291fe3c
SP
2082 elem = builtin_type (gdbarch)->builtin_bfloat16;
2083 append_composite_type_field (t, "bf", elem);
2084
a6d0f249
AH
2085 elem = builtin_type (gdbarch)->builtin_half;
2086 append_composite_type_field (t, "f", elem);
2087
07b287a0
MS
2088 elem = builtin_type (gdbarch)->builtin_uint16;
2089 append_composite_type_field (t, "u", elem);
2090
2091 elem = builtin_type (gdbarch)->builtin_int16;
2092 append_composite_type_field (t, "s", elem);
2093
2094 tdep->vnh_type = t;
2095 }
2096
2097 return tdep->vnh_type;
2098}
2099
2100/* Return the type for an AdvSISD B register. */
2101
2102static struct type *
2103aarch64_vnb_type (struct gdbarch *gdbarch)
2104{
08106042 2105 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
07b287a0
MS
2106
2107 if (tdep->vnb_type == NULL)
2108 {
2109 struct type *t;
2110 struct type *elem;
2111
2112 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
2113 TYPE_CODE_UNION);
2114
2115 elem = builtin_type (gdbarch)->builtin_uint8;
2116 append_composite_type_field (t, "u", elem);
2117
2118 elem = builtin_type (gdbarch)->builtin_int8;
2119 append_composite_type_field (t, "s", elem);
2120
2121 tdep->vnb_type = t;
2122 }
2123
2124 return tdep->vnb_type;
2125}
2126
63bad7b6
AH
2127/* Return the type for an AdvSISD V register. */
2128
2129static struct type *
2130aarch64_vnv_type (struct gdbarch *gdbarch)
2131{
08106042 2132 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
63bad7b6
AH
2133
2134 if (tdep->vnv_type == NULL)
2135 {
09624f1f 2136 /* The other AArch64 pseudo registers (Q,D,H,S,B) refer to a single value
bffa1015
AH
2137 slice from the non-pseudo vector registers. However NEON V registers
2138 are always vector registers, and need constructing as such. */
2139 const struct builtin_type *bt = builtin_type (gdbarch);
2140
63bad7b6
AH
2141 struct type *t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnv",
2142 TYPE_CODE_UNION);
2143
bffa1015
AH
2144 struct type *sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
2145 TYPE_CODE_UNION);
2146 append_composite_type_field (sub, "f",
2147 init_vector_type (bt->builtin_double, 2));
2148 append_composite_type_field (sub, "u",
2149 init_vector_type (bt->builtin_uint64, 2));
2150 append_composite_type_field (sub, "s",
2151 init_vector_type (bt->builtin_int64, 2));
2152 append_composite_type_field (t, "d", sub);
2153
2154 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
2155 TYPE_CODE_UNION);
2156 append_composite_type_field (sub, "f",
2157 init_vector_type (bt->builtin_float, 4));
2158 append_composite_type_field (sub, "u",
2159 init_vector_type (bt->builtin_uint32, 4));
2160 append_composite_type_field (sub, "s",
2161 init_vector_type (bt->builtin_int32, 4));
2162 append_composite_type_field (t, "s", sub);
2163
2164 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
2165 TYPE_CODE_UNION);
5291fe3c
SP
2166 append_composite_type_field (sub, "bf",
2167 init_vector_type (bt->builtin_bfloat16, 8));
a6d0f249
AH
2168 append_composite_type_field (sub, "f",
2169 init_vector_type (bt->builtin_half, 8));
bffa1015
AH
2170 append_composite_type_field (sub, "u",
2171 init_vector_type (bt->builtin_uint16, 8));
2172 append_composite_type_field (sub, "s",
2173 init_vector_type (bt->builtin_int16, 8));
2174 append_composite_type_field (t, "h", sub);
2175
2176 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
2177 TYPE_CODE_UNION);
2178 append_composite_type_field (sub, "u",
2179 init_vector_type (bt->builtin_uint8, 16));
2180 append_composite_type_field (sub, "s",
2181 init_vector_type (bt->builtin_int8, 16));
2182 append_composite_type_field (t, "b", sub);
2183
2184 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
2185 TYPE_CODE_UNION);
2186 append_composite_type_field (sub, "u",
2187 init_vector_type (bt->builtin_uint128, 1));
2188 append_composite_type_field (sub, "s",
2189 init_vector_type (bt->builtin_int128, 1));
2190 append_composite_type_field (t, "q", sub);
63bad7b6
AH
2191
2192 tdep->vnv_type = t;
2193 }
2194
2195 return tdep->vnv_type;
2196}
2197
07b287a0
MS
2198/* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
2199
2200static int
2201aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
2202{
08106042 2203 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
34dcc7cf 2204
07b287a0
MS
2205 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
2206 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
2207
2208 if (reg == AARCH64_DWARF_SP)
2209 return AARCH64_SP_REGNUM;
2210
1fe84861
YY
2211 if (reg == AARCH64_DWARF_PC)
2212 return AARCH64_PC_REGNUM;
2213
07b287a0
MS
2214 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
2215 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
2216
65d4cada
AH
2217 if (reg == AARCH64_DWARF_SVE_VG)
2218 return AARCH64_SVE_VG_REGNUM;
2219
2220 if (reg == AARCH64_DWARF_SVE_FFR)
2221 return AARCH64_SVE_FFR_REGNUM;
2222
2223 if (reg >= AARCH64_DWARF_SVE_P0 && reg <= AARCH64_DWARF_SVE_P0 + 15)
2224 return AARCH64_SVE_P0_REGNUM + reg - AARCH64_DWARF_SVE_P0;
2225
2226 if (reg >= AARCH64_DWARF_SVE_Z0 && reg <= AARCH64_DWARF_SVE_Z0 + 15)
2227 return AARCH64_SVE_Z0_REGNUM + reg - AARCH64_DWARF_SVE_Z0;
2228
34dcc7cf
AH
2229 if (tdep->has_pauth ())
2230 {
c9cd8ca4
LM
2231 if (reg == AARCH64_DWARF_RA_SIGN_STATE)
2232 return tdep->ra_sign_state_regnum;
34dcc7cf
AH
2233 }
2234
07b287a0
MS
2235 return -1;
2236}
07b287a0
MS
2237
2238/* Implement the "print_insn" gdbarch method. */
2239
2240static int
2241aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
2242{
2243 info->symbols = NULL;
6394c606 2244 return default_print_insn (memaddr, info);
07b287a0
MS
2245}
2246
2247/* AArch64 BRK software debug mode instruction.
2248 Note that AArch64 code is always little-endian.
2249 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
04180708 2250constexpr gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
07b287a0 2251
04180708 2252typedef BP_MANIPULATION (aarch64_default_breakpoint) aarch64_breakpoint;
07b287a0
MS
2253
2254/* Extract from an array REGS containing the (raw) register state a
2255 function return value of type TYPE, and copy that, in virtual
2256 format, into VALBUF. */
2257
2258static void
2259aarch64_extract_return_value (struct type *type, struct regcache *regs,
2260 gdb_byte *valbuf)
2261{
ac7936df 2262 struct gdbarch *gdbarch = regs->arch ();
07b287a0 2263 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
4f4aedeb
AH
2264 int elements;
2265 struct type *fundamental_type;
07b287a0 2266
4f4aedeb
AH
2267 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2268 &fundamental_type))
07b287a0 2269 {
df86565b 2270 int len = fundamental_type->length ();
4f4aedeb
AH
2271
2272 for (int i = 0; i < elements; i++)
2273 {
2274 int regno = AARCH64_V0_REGNUM + i;
3ff2c72e
AH
2275 /* Enough space for a full vector register. */
2276 gdb_byte buf[register_size (gdbarch, regno)];
2277 gdb_assert (len <= sizeof (buf));
4f4aedeb 2278
c6185dce
SM
2279 aarch64_debug_printf
2280 ("read HFA or HVA return value element %d from %s",
2281 i + 1, gdbarch_register_name (gdbarch, regno));
2282
4f4aedeb 2283 regs->cooked_read (regno, buf);
07b287a0 2284
4f4aedeb
AH
2285 memcpy (valbuf, buf, len);
2286 valbuf += len;
2287 }
07b287a0 2288 }
78134374
SM
2289 else if (type->code () == TYPE_CODE_INT
2290 || type->code () == TYPE_CODE_CHAR
2291 || type->code () == TYPE_CODE_BOOL
2292 || type->code () == TYPE_CODE_PTR
aa006118 2293 || TYPE_IS_REFERENCE (type)
78134374 2294 || type->code () == TYPE_CODE_ENUM)
07b287a0 2295 {
6471e7d2 2296 /* If the type is a plain integer, then the access is
07b287a0
MS
2297 straight-forward. Otherwise we have to play around a bit
2298 more. */
df86565b 2299 int len = type->length ();
07b287a0
MS
2300 int regno = AARCH64_X0_REGNUM;
2301 ULONGEST tmp;
2302
2303 while (len > 0)
2304 {
2305 /* By using store_unsigned_integer we avoid having to do
2306 anything special for small big-endian values. */
2307 regcache_cooked_read_unsigned (regs, regno++, &tmp);
2308 store_unsigned_integer (valbuf,
2309 (len > X_REGISTER_SIZE
2310 ? X_REGISTER_SIZE : len), byte_order, tmp);
2311 len -= X_REGISTER_SIZE;
2312 valbuf += X_REGISTER_SIZE;
2313 }
2314 }
07b287a0
MS
2315 else
2316 {
2317 /* For a structure or union the behaviour is as if the value had
dda83cd7
SM
2318 been stored to word-aligned memory and then loaded into
2319 registers with 64-bit load instruction(s). */
df86565b 2320 int len = type->length ();
07b287a0
MS
2321 int regno = AARCH64_X0_REGNUM;
2322 bfd_byte buf[X_REGISTER_SIZE];
2323
2324 while (len > 0)
2325 {
dca08e1f 2326 regs->cooked_read (regno++, buf);
07b287a0
MS
2327 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2328 len -= X_REGISTER_SIZE;
2329 valbuf += X_REGISTER_SIZE;
2330 }
2331 }
2332}
2333
2334
2335/* Will a function return an aggregate type in memory or in a
2336 register? Return 0 if an aggregate type can be returned in a
2337 register, 1 if it must be returned in memory. */
2338
2339static int
2340aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
2341{
f168693b 2342 type = check_typedef (type);
4f4aedeb
AH
2343 int elements;
2344 struct type *fundamental_type;
07b287a0 2345
911627e7
TT
2346 if (TYPE_HAS_DYNAMIC_LENGTH (type))
2347 return 1;
2348
4f4aedeb
AH
2349 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2350 &fundamental_type))
07b287a0 2351 {
cd635f74
YQ
2352 /* v0-v7 are used to return values and one register is allocated
2353 for one member. However, HFA or HVA has at most four members. */
07b287a0
MS
2354 return 0;
2355 }
2356
df86565b 2357 if (type->length () > 16
bab22d06 2358 || !language_pass_by_reference (type).trivially_copyable)
07b287a0
MS
2359 {
2360 /* PCS B.6 Aggregates larger than 16 bytes are passed by
dda83cd7 2361 invisible reference. */
07b287a0
MS
2362
2363 return 1;
2364 }
2365
2366 return 0;
2367}
2368
2369/* Write into appropriate registers a function return value of type
2370 TYPE, given in virtual format. */
2371
2372static void
2373aarch64_store_return_value (struct type *type, struct regcache *regs,
2374 const gdb_byte *valbuf)
2375{
ac7936df 2376 struct gdbarch *gdbarch = regs->arch ();
07b287a0 2377 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
4f4aedeb
AH
2378 int elements;
2379 struct type *fundamental_type;
07b287a0 2380
4f4aedeb
AH
2381 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2382 &fundamental_type))
07b287a0 2383 {
df86565b 2384 int len = fundamental_type->length ();
4f4aedeb
AH
2385
2386 for (int i = 0; i < elements; i++)
2387 {
2388 int regno = AARCH64_V0_REGNUM + i;
3ff2c72e
AH
2389 /* Enough space for a full vector register. */
2390 gdb_byte tmpbuf[register_size (gdbarch, regno)];
2391 gdb_assert (len <= sizeof (tmpbuf));
4f4aedeb 2392
c6185dce
SM
2393 aarch64_debug_printf
2394 ("write HFA or HVA return value element %d to %s",
2395 i + 1, gdbarch_register_name (gdbarch, regno));
07b287a0 2396
4f4aedeb
AH
2397 memcpy (tmpbuf, valbuf,
2398 len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
2399 regs->cooked_write (regno, tmpbuf);
2400 valbuf += len;
2401 }
07b287a0 2402 }
78134374
SM
2403 else if (type->code () == TYPE_CODE_INT
2404 || type->code () == TYPE_CODE_CHAR
2405 || type->code () == TYPE_CODE_BOOL
2406 || type->code () == TYPE_CODE_PTR
aa006118 2407 || TYPE_IS_REFERENCE (type)
78134374 2408 || type->code () == TYPE_CODE_ENUM)
07b287a0 2409 {
df86565b 2410 if (type->length () <= X_REGISTER_SIZE)
07b287a0
MS
2411 {
2412 /* Values of one word or less are zero/sign-extended and
2413 returned in r0. */
2414 bfd_byte tmpbuf[X_REGISTER_SIZE];
2415 LONGEST val = unpack_long (type, valbuf);
2416
2417 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
b66f5587 2418 regs->cooked_write (AARCH64_X0_REGNUM, tmpbuf);
07b287a0
MS
2419 }
2420 else
2421 {
2422 /* Integral values greater than one word are stored in
2423 consecutive registers starting with r0. This will always
2424 be a multiple of the regiser size. */
df86565b 2425 int len = type->length ();
07b287a0
MS
2426 int regno = AARCH64_X0_REGNUM;
2427
2428 while (len > 0)
2429 {
b66f5587 2430 regs->cooked_write (regno++, valbuf);
07b287a0
MS
2431 len -= X_REGISTER_SIZE;
2432 valbuf += X_REGISTER_SIZE;
2433 }
2434 }
2435 }
07b287a0
MS
2436 else
2437 {
2438 /* For a structure or union the behaviour is as if the value had
2439 been stored to word-aligned memory and then loaded into
2440 registers with 64-bit load instruction(s). */
df86565b 2441 int len = type->length ();
07b287a0
MS
2442 int regno = AARCH64_X0_REGNUM;
2443 bfd_byte tmpbuf[X_REGISTER_SIZE];
2444
2445 while (len > 0)
2446 {
2447 memcpy (tmpbuf, valbuf,
2448 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
b66f5587 2449 regs->cooked_write (regno++, tmpbuf);
07b287a0
MS
2450 len -= X_REGISTER_SIZE;
2451 valbuf += X_REGISTER_SIZE;
2452 }
2453 }
2454}
2455
2456/* Implement the "return_value" gdbarch method. */
2457
2458static enum return_value_convention
2459aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2460 struct type *valtype, struct regcache *regcache,
5cb0f2d5 2461 struct value **read_value, const gdb_byte *writebuf)
07b287a0 2462{
78134374
SM
2463 if (valtype->code () == TYPE_CODE_STRUCT
2464 || valtype->code () == TYPE_CODE_UNION
2465 || valtype->code () == TYPE_CODE_ARRAY)
07b287a0
MS
2466 {
2467 if (aarch64_return_in_memory (gdbarch, valtype))
2468 {
bab22d06
LM
2469 /* From the AAPCS64's Result Return section:
2470
2471 "Otherwise, the caller shall reserve a block of memory of
2472 sufficient size and alignment to hold the result. The address
2473 of the memory block shall be passed as an additional argument to
2474 the function in x8. */
2475
c6185dce 2476 aarch64_debug_printf ("return value in memory");
bab22d06 2477
911627e7 2478 if (read_value != nullptr)
bab22d06
LM
2479 {
2480 CORE_ADDR addr;
2481
2482 regcache->cooked_read (AARCH64_STRUCT_RETURN_REGNUM, &addr);
911627e7 2483 *read_value = value_at_non_lval (valtype, addr);
bab22d06
LM
2484 }
2485
2486 return RETURN_VALUE_ABI_RETURNS_ADDRESS;
07b287a0
MS
2487 }
2488 }
2489
2490 if (writebuf)
2491 aarch64_store_return_value (valtype, regcache, writebuf);
2492
911627e7
TT
2493 if (read_value)
2494 {
317c3ed9 2495 *read_value = value::allocate (valtype);
911627e7 2496 aarch64_extract_return_value (valtype, regcache,
bbe912ba 2497 (*read_value)->contents_raw ().data ());
911627e7 2498 }
07b287a0 2499
c6185dce 2500 aarch64_debug_printf ("return value in registers");
07b287a0
MS
2501
2502 return RETURN_VALUE_REGISTER_CONVENTION;
2503}
2504
2505/* Implement the "get_longjmp_target" gdbarch method. */
2506
2507static int
bd2b40ac 2508aarch64_get_longjmp_target (frame_info_ptr frame, CORE_ADDR *pc)
07b287a0
MS
2509{
2510 CORE_ADDR jb_addr;
2511 gdb_byte buf[X_REGISTER_SIZE];
2512 struct gdbarch *gdbarch = get_frame_arch (frame);
08106042 2513 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
07b287a0
MS
2514 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2515
2516 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
2517
2518 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2519 X_REGISTER_SIZE))
2520 return 0;
2521
2522 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2523 return 1;
2524}
ea873d8e
PL
2525
2526/* Implement the "gen_return_address" gdbarch method. */
2527
2528static void
2529aarch64_gen_return_address (struct gdbarch *gdbarch,
2530 struct agent_expr *ax, struct axs_value *value,
2531 CORE_ADDR scope)
2532{
2533 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
2534 value->kind = axs_lvalue_register;
2535 value->u.reg = AARCH64_LR_REGNUM;
2536}
07b287a0
MS
2537\f
2538
e63ae49b
LM
2539/* Return TRUE if REGNUM is a W pseudo-register number. Return FALSE
2540 otherwise. */
2541
2542static bool
2543is_w_pseudo_register (struct gdbarch *gdbarch, int regnum)
2544{
2545 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2546
2547 if (tdep->w_pseudo_base <= regnum
2548 && regnum < tdep->w_pseudo_base + tdep->w_pseudo_count)
2549 return true;
2550
2551 return false;
2552}
2553
07b287a0
MS
2554/* Return the pseudo register name corresponding to register regnum. */
2555
2556static const char *
2557aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2558{
08106042 2559 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
63bad7b6 2560
e63ae49b
LM
2561 /* W pseudo-registers. Bottom halves of the X registers. */
2562 static const char *const w_name[] =
2563 {
2564 "w0", "w1", "w2", "w3",
2565 "w4", "w5", "w6", "w7",
2566 "w8", "w9", "w10", "w11",
2567 "w12", "w13", "w14", "w15",
2568 "w16", "w17", "w18", "w19",
2569 "w20", "w21", "w22", "w23",
2570 "w24", "w25", "w26", "w27",
2571 "w28", "w29", "w30",
2572 };
2573
07b287a0
MS
2574 static const char *const q_name[] =
2575 {
2576 "q0", "q1", "q2", "q3",
2577 "q4", "q5", "q6", "q7",
2578 "q8", "q9", "q10", "q11",
2579 "q12", "q13", "q14", "q15",
2580 "q16", "q17", "q18", "q19",
2581 "q20", "q21", "q22", "q23",
2582 "q24", "q25", "q26", "q27",
2583 "q28", "q29", "q30", "q31",
2584 };
2585
2586 static const char *const d_name[] =
2587 {
2588 "d0", "d1", "d2", "d3",
2589 "d4", "d5", "d6", "d7",
2590 "d8", "d9", "d10", "d11",
2591 "d12", "d13", "d14", "d15",
2592 "d16", "d17", "d18", "d19",
2593 "d20", "d21", "d22", "d23",
2594 "d24", "d25", "d26", "d27",
2595 "d28", "d29", "d30", "d31",
2596 };
2597
2598 static const char *const s_name[] =
2599 {
2600 "s0", "s1", "s2", "s3",
2601 "s4", "s5", "s6", "s7",
2602 "s8", "s9", "s10", "s11",
2603 "s12", "s13", "s14", "s15",
2604 "s16", "s17", "s18", "s19",
2605 "s20", "s21", "s22", "s23",
2606 "s24", "s25", "s26", "s27",
2607 "s28", "s29", "s30", "s31",
2608 };
2609
2610 static const char *const h_name[] =
2611 {
2612 "h0", "h1", "h2", "h3",
2613 "h4", "h5", "h6", "h7",
2614 "h8", "h9", "h10", "h11",
2615 "h12", "h13", "h14", "h15",
2616 "h16", "h17", "h18", "h19",
2617 "h20", "h21", "h22", "h23",
2618 "h24", "h25", "h26", "h27",
2619 "h28", "h29", "h30", "h31",
2620 };
2621
2622 static const char *const b_name[] =
2623 {
2624 "b0", "b1", "b2", "b3",
2625 "b4", "b5", "b6", "b7",
2626 "b8", "b9", "b10", "b11",
2627 "b12", "b13", "b14", "b15",
2628 "b16", "b17", "b18", "b19",
2629 "b20", "b21", "b22", "b23",
2630 "b24", "b25", "b26", "b27",
2631 "b28", "b29", "b30", "b31",
2632 };
2633
34dcc7cf 2634 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
07b287a0 2635
34dcc7cf
AH
2636 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2637 return q_name[p_regnum - AARCH64_Q0_REGNUM];
07b287a0 2638
34dcc7cf
AH
2639 if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2640 return d_name[p_regnum - AARCH64_D0_REGNUM];
07b287a0 2641
34dcc7cf
AH
2642 if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2643 return s_name[p_regnum - AARCH64_S0_REGNUM];
07b287a0 2644
34dcc7cf
AH
2645 if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2646 return h_name[p_regnum - AARCH64_H0_REGNUM];
07b287a0 2647
34dcc7cf
AH
2648 if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2649 return b_name[p_regnum - AARCH64_B0_REGNUM];
07b287a0 2650
e63ae49b
LM
2651 /* W pseudo-registers? */
2652 if (is_w_pseudo_register (gdbarch, regnum))
2653 return w_name[regnum - tdep->w_pseudo_base];
2654
63bad7b6
AH
2655 if (tdep->has_sve ())
2656 {
2657 static const char *const sve_v_name[] =
2658 {
2659 "v0", "v1", "v2", "v3",
2660 "v4", "v5", "v6", "v7",
2661 "v8", "v9", "v10", "v11",
2662 "v12", "v13", "v14", "v15",
2663 "v16", "v17", "v18", "v19",
2664 "v20", "v21", "v22", "v23",
2665 "v24", "v25", "v26", "v27",
2666 "v28", "v29", "v30", "v31",
2667 };
2668
34dcc7cf
AH
2669 if (p_regnum >= AARCH64_SVE_V0_REGNUM
2670 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2671 return sve_v_name[p_regnum - AARCH64_SVE_V0_REGNUM];
63bad7b6
AH
2672 }
2673
34dcc7cf
AH
2674 /* RA_STATE is used for unwinding only. Do not assign it a name - this
2675 prevents it from being read by methods such as
2676 mi_cmd_trace_frame_collected. */
c9cd8ca4 2677 if (tdep->has_pauth () && regnum == tdep->ra_sign_state_regnum)
34dcc7cf
AH
2678 return "";
2679
f34652de 2680 internal_error (_("aarch64_pseudo_register_name: bad register number %d"),
34dcc7cf 2681 p_regnum);
07b287a0
MS
2682}
2683
2684/* Implement the "pseudo_register_type" tdesc_arch_data method. */
2685
2686static struct type *
2687aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2688{
08106042 2689 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
63bad7b6 2690
34dcc7cf 2691 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
07b287a0 2692
34dcc7cf 2693 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
07b287a0
MS
2694 return aarch64_vnq_type (gdbarch);
2695
34dcc7cf 2696 if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
07b287a0
MS
2697 return aarch64_vnd_type (gdbarch);
2698
34dcc7cf 2699 if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
07b287a0
MS
2700 return aarch64_vns_type (gdbarch);
2701
34dcc7cf 2702 if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
07b287a0
MS
2703 return aarch64_vnh_type (gdbarch);
2704
34dcc7cf 2705 if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
07b287a0
MS
2706 return aarch64_vnb_type (gdbarch);
2707
34dcc7cf
AH
2708 if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
2709 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
63bad7b6
AH
2710 return aarch64_vnv_type (gdbarch);
2711
e63ae49b
LM
2712 /* W pseudo-registers are 32-bit. */
2713 if (is_w_pseudo_register (gdbarch, regnum))
2714 return builtin_type (gdbarch)->builtin_uint32;
2715
c9cd8ca4 2716 if (tdep->has_pauth () && regnum == tdep->ra_sign_state_regnum)
34dcc7cf
AH
2717 return builtin_type (gdbarch)->builtin_uint64;
2718
f34652de 2719 internal_error (_("aarch64_pseudo_register_type: bad register number %d"),
34dcc7cf 2720 p_regnum);
07b287a0
MS
2721}
2722
2723/* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2724
2725static int
2726aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
dbf5d61b 2727 const struct reggroup *group)
07b287a0 2728{
08106042 2729 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
63bad7b6 2730
34dcc7cf 2731 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
07b287a0 2732
34dcc7cf 2733 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
07b287a0 2734 return group == all_reggroup || group == vector_reggroup;
34dcc7cf 2735 else if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
07b287a0
MS
2736 return (group == all_reggroup || group == vector_reggroup
2737 || group == float_reggroup);
34dcc7cf 2738 else if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
07b287a0
MS
2739 return (group == all_reggroup || group == vector_reggroup
2740 || group == float_reggroup);
34dcc7cf 2741 else if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
07b287a0 2742 return group == all_reggroup || group == vector_reggroup;
34dcc7cf 2743 else if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
07b287a0 2744 return group == all_reggroup || group == vector_reggroup;
34dcc7cf
AH
2745 else if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
2746 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
63bad7b6 2747 return group == all_reggroup || group == vector_reggroup;
34dcc7cf 2748 /* RA_STATE is used for unwinding only. Do not assign it to any groups. */
c9cd8ca4 2749 if (tdep->has_pauth () && regnum == tdep->ra_sign_state_regnum)
34dcc7cf 2750 return 0;
07b287a0
MS
2751
2752 return group == all_reggroup;
2753}
2754
3c5cd5c3
AH
2755/* Helper for aarch64_pseudo_read_value. */
2756
2757static struct value *
63bad7b6
AH
2758aarch64_pseudo_read_value_1 (struct gdbarch *gdbarch,
2759 readable_regcache *regcache, int regnum_offset,
3c5cd5c3
AH
2760 int regsize, struct value *result_value)
2761{
3c5cd5c3
AH
2762 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2763
63bad7b6
AH
2764 /* Enough space for a full vector register. */
2765 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2766 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2767
3c5cd5c3
AH
2768 if (regcache->raw_read (v_regnum, reg_buf) != REG_VALID)
2769 mark_value_bytes_unavailable (result_value, 0,
d0c97917 2770 result_value->type ()->length ());
3c5cd5c3 2771 else
bbe912ba 2772 memcpy (result_value->contents_raw ().data (), reg_buf, regsize);
63bad7b6 2773
3c5cd5c3
AH
2774 return result_value;
2775 }
2776
07b287a0
MS
2777/* Implement the "pseudo_register_read_value" gdbarch method. */
2778
2779static struct value *
3c5cd5c3 2780aarch64_pseudo_read_value (struct gdbarch *gdbarch, readable_regcache *regcache,
07b287a0
MS
2781 int regnum)
2782{
08106042 2783 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
317c3ed9 2784 struct value *result_value = value::allocate (register_type (gdbarch, regnum));
07b287a0 2785
07b287a0
MS
2786 VALUE_LVAL (result_value) = lval_register;
2787 VALUE_REGNUM (result_value) = regnum;
07b287a0 2788
e63ae49b
LM
2789 if (is_w_pseudo_register (gdbarch, regnum))
2790 {
2791 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2792 /* Default offset for little endian. */
2793 int offset = 0;
2794
2795 if (byte_order == BFD_ENDIAN_BIG)
2796 offset = 4;
2797
2798 /* Find the correct X register to extract the data from. */
2799 int x_regnum = AARCH64_X0_REGNUM + (regnum - tdep->w_pseudo_base);
2800 gdb_byte data[4];
2801
2802 /* Read the bottom 4 bytes of X. */
2803 if (regcache->raw_read_part (x_regnum, offset, 4, data) != REG_VALID)
2804 mark_value_bytes_unavailable (result_value, 0, 4);
2805 else
bbe912ba 2806 memcpy (result_value->contents_raw ().data (), data, 4);
e63ae49b
LM
2807
2808 return result_value;
2809 }
2810
07b287a0
MS
2811 regnum -= gdbarch_num_regs (gdbarch);
2812
2813 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
63bad7b6
AH
2814 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2815 regnum - AARCH64_Q0_REGNUM,
3c5cd5c3 2816 Q_REGISTER_SIZE, result_value);
07b287a0
MS
2817
2818 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
63bad7b6
AH
2819 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2820 regnum - AARCH64_D0_REGNUM,
3c5cd5c3 2821 D_REGISTER_SIZE, result_value);
07b287a0
MS
2822
2823 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
63bad7b6
AH
2824 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2825 regnum - AARCH64_S0_REGNUM,
3c5cd5c3 2826 S_REGISTER_SIZE, result_value);
07b287a0
MS
2827
2828 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
63bad7b6
AH
2829 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2830 regnum - AARCH64_H0_REGNUM,
3c5cd5c3 2831 H_REGISTER_SIZE, result_value);
07b287a0
MS
2832
2833 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
63bad7b6
AH
2834 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2835 regnum - AARCH64_B0_REGNUM,
3c5cd5c3 2836 B_REGISTER_SIZE, result_value);
07b287a0 2837
63bad7b6
AH
2838 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2839 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2840 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2841 regnum - AARCH64_SVE_V0_REGNUM,
2842 V_REGISTER_SIZE, result_value);
2843
07b287a0
MS
2844 gdb_assert_not_reached ("regnum out of bound");
2845}
2846
3c5cd5c3 2847/* Helper for aarch64_pseudo_write. */
07b287a0
MS
2848
2849static void
63bad7b6
AH
2850aarch64_pseudo_write_1 (struct gdbarch *gdbarch, struct regcache *regcache,
2851 int regnum_offset, int regsize, const gdb_byte *buf)
07b287a0 2852{
3c5cd5c3 2853 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
07b287a0 2854
63bad7b6
AH
2855 /* Enough space for a full vector register. */
2856 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2857 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2858
07b287a0
MS
2859 /* Ensure the register buffer is zero, we want gdb writes of the
2860 various 'scalar' pseudo registers to behavior like architectural
2861 writes, register width bytes are written the remainder are set to
2862 zero. */
63bad7b6 2863 memset (reg_buf, 0, register_size (gdbarch, AARCH64_V0_REGNUM));
07b287a0 2864
3c5cd5c3
AH
2865 memcpy (reg_buf, buf, regsize);
2866 regcache->raw_write (v_regnum, reg_buf);
2867}
2868
2869/* Implement the "pseudo_register_write" gdbarch method. */
2870
2871static void
2872aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2873 int regnum, const gdb_byte *buf)
2874{
08106042 2875 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
e63ae49b
LM
2876
2877 if (is_w_pseudo_register (gdbarch, regnum))
2878 {
2879 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2880 /* Default offset for little endian. */
2881 int offset = 0;
2882
2883 if (byte_order == BFD_ENDIAN_BIG)
2884 offset = 4;
2885
2886 /* Find the correct X register to extract the data from. */
2887 int x_regnum = AARCH64_X0_REGNUM + (regnum - tdep->w_pseudo_base);
2888
2889 /* First zero-out the contents of X. */
2890 ULONGEST zero = 0;
2891 regcache->raw_write (x_regnum, zero);
2892 /* Write to the bottom 4 bytes of X. */
2893 regcache->raw_write_part (x_regnum, offset, 4, buf);
2894 return;
2895 }
2896
07b287a0
MS
2897 regnum -= gdbarch_num_regs (gdbarch);
2898
2899 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
63bad7b6
AH
2900 return aarch64_pseudo_write_1 (gdbarch, regcache,
2901 regnum - AARCH64_Q0_REGNUM, Q_REGISTER_SIZE,
2902 buf);
07b287a0
MS
2903
2904 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
63bad7b6
AH
2905 return aarch64_pseudo_write_1 (gdbarch, regcache,
2906 regnum - AARCH64_D0_REGNUM, D_REGISTER_SIZE,
2907 buf);
07b287a0
MS
2908
2909 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
63bad7b6
AH
2910 return aarch64_pseudo_write_1 (gdbarch, regcache,
2911 regnum - AARCH64_S0_REGNUM, S_REGISTER_SIZE,
2912 buf);
07b287a0
MS
2913
2914 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
63bad7b6
AH
2915 return aarch64_pseudo_write_1 (gdbarch, regcache,
2916 regnum - AARCH64_H0_REGNUM, H_REGISTER_SIZE,
2917 buf);
07b287a0
MS
2918
2919 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
63bad7b6
AH
2920 return aarch64_pseudo_write_1 (gdbarch, regcache,
2921 regnum - AARCH64_B0_REGNUM, B_REGISTER_SIZE,
2922 buf);
2923
2924 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2925 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2926 return aarch64_pseudo_write_1 (gdbarch, regcache,
2927 regnum - AARCH64_SVE_V0_REGNUM,
2928 V_REGISTER_SIZE, buf);
07b287a0
MS
2929
2930 gdb_assert_not_reached ("regnum out of bound");
2931}
2932
07b287a0
MS
2933/* Callback function for user_reg_add. */
2934
2935static struct value *
bd2b40ac 2936value_of_aarch64_user_reg (frame_info_ptr frame, const void *baton)
07b287a0 2937{
9a3c8263 2938 const int *reg_p = (const int *) baton;
07b287a0
MS
2939
2940 return value_of_register (*reg_p, frame);
2941}
2942\f
2943
9404b58f
KM
2944/* Implement the "software_single_step" gdbarch method, needed to
2945 single step through atomic sequences on AArch64. */
2946
a0ff9e1a 2947static std::vector<CORE_ADDR>
f5ea389a 2948aarch64_software_single_step (struct regcache *regcache)
9404b58f 2949{
ac7936df 2950 struct gdbarch *gdbarch = regcache->arch ();
9404b58f
KM
2951 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2952 const int insn_size = 4;
2953 const int atomic_sequence_length = 16; /* Instruction sequence length. */
0187a92f 2954 CORE_ADDR pc = regcache_read_pc (regcache);
70ab8ccd 2955 CORE_ADDR breaks[2] = { CORE_ADDR_MAX, CORE_ADDR_MAX };
9404b58f
KM
2956 CORE_ADDR loc = pc;
2957 CORE_ADDR closing_insn = 0;
94355de7
LM
2958
2959 ULONGEST insn_from_memory;
2960 if (!safe_read_memory_unsigned_integer (loc, insn_size,
2961 byte_order_for_code,
2962 &insn_from_memory))
2963 {
2964 /* Assume we don't have a atomic sequence, as we couldn't read the
2965 instruction in this location. */
2966 return {};
2967 }
2968
2969 uint32_t insn = insn_from_memory;
9404b58f
KM
2970 int index;
2971 int insn_count;
2972 int bc_insn_count = 0; /* Conditional branch instruction count. */
2973 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
f77ee802
YQ
2974 aarch64_inst inst;
2975
561a72d4 2976 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
a0ff9e1a 2977 return {};
9404b58f
KM
2978
2979 /* Look for a Load Exclusive instruction which begins the sequence. */
f77ee802 2980 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
a0ff9e1a 2981 return {};
9404b58f
KM
2982
2983 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2984 {
9404b58f 2985 loc += insn_size;
9404b58f 2986
94355de7
LM
2987 if (!safe_read_memory_unsigned_integer (loc, insn_size,
2988 byte_order_for_code,
2989 &insn_from_memory))
2990 {
2991 /* Assume we don't have a atomic sequence, as we couldn't read the
2992 instruction in this location. */
2993 return {};
2994 }
2995
2996 insn = insn_from_memory;
561a72d4 2997 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
a0ff9e1a 2998 return {};
9404b58f 2999 /* Check if the instruction is a conditional branch. */
f77ee802 3000 if (inst.opcode->iclass == condbranch)
9404b58f 3001 {
f77ee802
YQ
3002 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
3003
9404b58f 3004 if (bc_insn_count >= 1)
a0ff9e1a 3005 return {};
9404b58f
KM
3006
3007 /* It is, so we'll try to set a breakpoint at the destination. */
f77ee802 3008 breaks[1] = loc + inst.operands[0].imm.value;
9404b58f
KM
3009
3010 bc_insn_count++;
3011 last_breakpoint++;
3012 }
3013
3014 /* Look for the Store Exclusive which closes the atomic sequence. */
f77ee802 3015 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
9404b58f
KM
3016 {
3017 closing_insn = loc;
3018 break;
3019 }
3020 }
3021
3022 /* We didn't find a closing Store Exclusive instruction, fall back. */
3023 if (!closing_insn)
a0ff9e1a 3024 return {};
9404b58f
KM
3025
3026 /* Insert breakpoint after the end of the atomic sequence. */
3027 breaks[0] = loc + insn_size;
3028
3029 /* Check for duplicated breakpoints, and also check that the second
3030 breakpoint is not within the atomic sequence. */
3031 if (last_breakpoint
3032 && (breaks[1] == breaks[0]
3033 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
3034 last_breakpoint = 0;
3035
a0ff9e1a
SM
3036 std::vector<CORE_ADDR> next_pcs;
3037
9404b58f
KM
3038 /* Insert the breakpoint at the end of the sequence, and one at the
3039 destination of the conditional branch, if it exists. */
3040 for (index = 0; index <= last_breakpoint; index++)
a0ff9e1a 3041 next_pcs.push_back (breaks[index]);
9404b58f 3042
93f9a11f 3043 return next_pcs;
9404b58f
KM
3044}
3045
1152d984
SM
3046struct aarch64_displaced_step_copy_insn_closure
3047 : public displaced_step_copy_insn_closure
b6542f81
YQ
3048{
3049 /* It is true when condition instruction, such as B.CON, TBZ, etc,
3050 is being displaced stepping. */
f0c702d4 3051 bool cond = false;
b6542f81 3052
0c271889
LM
3053 /* PC adjustment offset after displaced stepping. If 0, then we don't
3054 write the PC back, assuming the PC is already the right address. */
cfba9872 3055 int32_t pc_adjust = 0;
b6542f81
YQ
3056};
3057
3058/* Data when visiting instructions for displaced stepping. */
3059
3060struct aarch64_displaced_step_data
3061{
3062 struct aarch64_insn_data base;
3063
3064 /* The address where the instruction will be executed at. */
3065 CORE_ADDR new_addr;
3066 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
e935475c 3067 uint32_t insn_buf[AARCH64_DISPLACED_MODIFIED_INSNS];
b6542f81
YQ
3068 /* Number of instructions in INSN_BUF. */
3069 unsigned insn_count;
3070 /* Registers when doing displaced stepping. */
3071 struct regcache *regs;
3072
1152d984 3073 aarch64_displaced_step_copy_insn_closure *dsc;
b6542f81
YQ
3074};
3075
3076/* Implementation of aarch64_insn_visitor method "b". */
3077
3078static void
3079aarch64_displaced_step_b (const int is_bl, const int32_t offset,
3080 struct aarch64_insn_data *data)
3081{
3082 struct aarch64_displaced_step_data *dsd
3083 = (struct aarch64_displaced_step_data *) data;
2ac09a5b 3084 int64_t new_offset = data->insn_addr - dsd->new_addr + offset;
b6542f81
YQ
3085
3086 if (can_encode_int32 (new_offset, 28))
3087 {
3088 /* Emit B rather than BL, because executing BL on a new address
3089 will get the wrong address into LR. In order to avoid this,
3090 we emit B, and update LR if the instruction is BL. */
3091 emit_b (dsd->insn_buf, 0, new_offset);
3092 dsd->insn_count++;
3093 }
3094 else
3095 {
3096 /* Write NOP. */
3097 emit_nop (dsd->insn_buf);
3098 dsd->insn_count++;
3099 dsd->dsc->pc_adjust = offset;
3100 }
3101
3102 if (is_bl)
3103 {
3104 /* Update LR. */
3105 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
3106 data->insn_addr + 4);
3107 }
3108}
3109
3110/* Implementation of aarch64_insn_visitor method "b_cond". */
3111
3112static void
3113aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
3114 struct aarch64_insn_data *data)
3115{
3116 struct aarch64_displaced_step_data *dsd
3117 = (struct aarch64_displaced_step_data *) data;
b6542f81
YQ
3118
3119 /* GDB has to fix up PC after displaced step this instruction
3120 differently according to the condition is true or false. Instead
3121 of checking COND against conditional flags, we can use
3122 the following instructions, and GDB can tell how to fix up PC
3123 according to the PC value.
3124
3125 B.COND TAKEN ; If cond is true, then jump to TAKEN.
3126 INSN1 ;
3127 TAKEN:
3128 INSN2
3129 */
3130
3131 emit_bcond (dsd->insn_buf, cond, 8);
f0c702d4 3132 dsd->dsc->cond = true;
b6542f81
YQ
3133 dsd->dsc->pc_adjust = offset;
3134 dsd->insn_count = 1;
3135}
3136
3137/* Dynamically allocate a new register. If we know the register
3138 statically, we should make it a global as above instead of using this
3139 helper function. */
3140
3141static struct aarch64_register
3142aarch64_register (unsigned num, int is64)
3143{
3144 return (struct aarch64_register) { num, is64 };
3145}
3146
3147/* Implementation of aarch64_insn_visitor method "cb". */
3148
3149static void
3150aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
3151 const unsigned rn, int is64,
3152 struct aarch64_insn_data *data)
3153{
3154 struct aarch64_displaced_step_data *dsd
3155 = (struct aarch64_displaced_step_data *) data;
b6542f81
YQ
3156
3157 /* The offset is out of range for a compare and branch
3158 instruction. We can use the following instructions instead:
3159
3160 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
3161 INSN1 ;
3162 TAKEN:
3163 INSN2
3164 */
3165 emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
3166 dsd->insn_count = 1;
f0c702d4 3167 dsd->dsc->cond = true;
b6542f81
YQ
3168 dsd->dsc->pc_adjust = offset;
3169}
3170
3171/* Implementation of aarch64_insn_visitor method "tb". */
3172
3173static void
3174aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
3175 const unsigned rt, unsigned bit,
3176 struct aarch64_insn_data *data)
3177{
3178 struct aarch64_displaced_step_data *dsd
3179 = (struct aarch64_displaced_step_data *) data;
b6542f81
YQ
3180
3181 /* The offset is out of range for a test bit and branch
3182 instruction We can use the following instructions instead:
3183
3184 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
3185 INSN1 ;
3186 TAKEN:
3187 INSN2
3188
3189 */
3190 emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
3191 dsd->insn_count = 1;
f0c702d4 3192 dsd->dsc->cond = true;
b6542f81
YQ
3193 dsd->dsc->pc_adjust = offset;
3194}
3195
3196/* Implementation of aarch64_insn_visitor method "adr". */
3197
3198static void
3199aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
3200 const int is_adrp, struct aarch64_insn_data *data)
3201{
3202 struct aarch64_displaced_step_data *dsd
3203 = (struct aarch64_displaced_step_data *) data;
3204 /* We know exactly the address the ADR{P,} instruction will compute.
3205 We can just write it to the destination register. */
3206 CORE_ADDR address = data->insn_addr + offset;
3207
3208 if (is_adrp)
3209 {
3210 /* Clear the lower 12 bits of the offset to get the 4K page. */
3211 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
3212 address & ~0xfff);
3213 }
3214 else
3215 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
3216 address);
3217
3218 dsd->dsc->pc_adjust = 4;
3219 emit_nop (dsd->insn_buf);
3220 dsd->insn_count = 1;
3221}
3222
3223/* Implementation of aarch64_insn_visitor method "ldr_literal". */
3224
3225static void
3226aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
3227 const unsigned rt, const int is64,
3228 struct aarch64_insn_data *data)
3229{
3230 struct aarch64_displaced_step_data *dsd
3231 = (struct aarch64_displaced_step_data *) data;
3232 CORE_ADDR address = data->insn_addr + offset;
3233 struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
3234
3235 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
3236 address);
3237
3238 if (is_sw)
3239 dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
3240 aarch64_register (rt, 1), zero);
3241 else
3242 dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
3243 aarch64_register (rt, 1), zero);
3244
3245 dsd->dsc->pc_adjust = 4;
3246}
3247
3248/* Implementation of aarch64_insn_visitor method "others". */
3249
3250static void
3251aarch64_displaced_step_others (const uint32_t insn,
3252 struct aarch64_insn_data *data)
3253{
3254 struct aarch64_displaced_step_data *dsd
3255 = (struct aarch64_displaced_step_data *) data;
3256
807f647c
MM
3257 uint32_t masked_insn = (insn & CLEAR_Rn_MASK);
3258 if (masked_insn == BLR)
b6542f81 3259 {
807f647c
MM
3260 /* Emit a BR to the same register and then update LR to the original
3261 address (similar to aarch64_displaced_step_b). */
3262 aarch64_emit_insn (dsd->insn_buf, insn & 0xffdfffff);
3263 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
3264 data->insn_addr + 4);
b6542f81 3265 }
807f647c
MM
3266 else
3267 aarch64_emit_insn (dsd->insn_buf, insn);
3268 dsd->insn_count = 1;
3269
3270 if (masked_insn == RET || masked_insn == BR || masked_insn == BLR)
3271 dsd->dsc->pc_adjust = 0;
b6542f81
YQ
3272 else
3273 dsd->dsc->pc_adjust = 4;
3274}
3275
3276static const struct aarch64_insn_visitor visitor =
3277{
3278 aarch64_displaced_step_b,
3279 aarch64_displaced_step_b_cond,
3280 aarch64_displaced_step_cb,
3281 aarch64_displaced_step_tb,
3282 aarch64_displaced_step_adr,
3283 aarch64_displaced_step_ldr_literal,
3284 aarch64_displaced_step_others,
3285};
3286
3287/* Implement the "displaced_step_copy_insn" gdbarch method. */
3288
1152d984 3289displaced_step_copy_insn_closure_up
b6542f81
YQ
3290aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
3291 CORE_ADDR from, CORE_ADDR to,
3292 struct regcache *regs)
3293{
b6542f81 3294 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
b6542f81 3295 struct aarch64_displaced_step_data dsd;
c86a40c6 3296 aarch64_inst inst;
94355de7
LM
3297 ULONGEST insn_from_memory;
3298
3299 if (!safe_read_memory_unsigned_integer (from, 4, byte_order_for_code,
3300 &insn_from_memory))
3301 return nullptr;
3302
3303 uint32_t insn = insn_from_memory;
c86a40c6 3304
561a72d4 3305 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
c86a40c6 3306 return NULL;
b6542f81
YQ
3307
3308 /* Look for a Load Exclusive instruction which begins the sequence. */
c86a40c6 3309 if (inst.opcode->iclass == ldstexcl && bit (insn, 22))
b6542f81
YQ
3310 {
3311 /* We can't displaced step atomic sequences. */
3312 return NULL;
3313 }
3314
1152d984
SM
3315 std::unique_ptr<aarch64_displaced_step_copy_insn_closure> dsc
3316 (new aarch64_displaced_step_copy_insn_closure);
b6542f81
YQ
3317 dsd.base.insn_addr = from;
3318 dsd.new_addr = to;
3319 dsd.regs = regs;
cfba9872 3320 dsd.dsc = dsc.get ();
034f1a81 3321 dsd.insn_count = 0;
b6542f81
YQ
3322 aarch64_relocate_instruction (insn, &visitor,
3323 (struct aarch64_insn_data *) &dsd);
e935475c 3324 gdb_assert (dsd.insn_count <= AARCH64_DISPLACED_MODIFIED_INSNS);
b6542f81
YQ
3325
3326 if (dsd.insn_count != 0)
3327 {
3328 int i;
3329
3330 /* Instruction can be relocated to scratch pad. Copy
3331 relocated instruction(s) there. */
3332 for (i = 0; i < dsd.insn_count; i++)
3333 {
136821d9
SM
3334 displaced_debug_printf ("writing insn %.8x at %s",
3335 dsd.insn_buf[i],
3336 paddress (gdbarch, to + i * 4));
3337
b6542f81
YQ
3338 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
3339 (ULONGEST) dsd.insn_buf[i]);
3340 }
3341 }
3342 else
3343 {
b6542f81
YQ
3344 dsc = NULL;
3345 }
3346
6d0cf446 3347 /* This is a work around for a problem with g++ 4.8. */
1152d984 3348 return displaced_step_copy_insn_closure_up (dsc.release ());
b6542f81
YQ
3349}
3350
3351/* Implement the "displaced_step_fixup" gdbarch method. */
3352
3353void
3354aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
1152d984 3355 struct displaced_step_copy_insn_closure *dsc_,
b6542f81
YQ
3356 CORE_ADDR from, CORE_ADDR to,
3357 struct regcache *regs)
3358{
1152d984
SM
3359 aarch64_displaced_step_copy_insn_closure *dsc
3360 = (aarch64_displaced_step_copy_insn_closure *) dsc_;
cfba9872 3361
0c271889
LM
3362 ULONGEST pc;
3363
3364 regcache_cooked_read_unsigned (regs, AARCH64_PC_REGNUM, &pc);
3365
136821d9
SM
3366 displaced_debug_printf ("PC after stepping: %s (was %s).",
3367 paddress (gdbarch, pc), paddress (gdbarch, to));
1ab139e5 3368
b6542f81
YQ
3369 if (dsc->cond)
3370 {
136821d9
SM
3371 displaced_debug_printf ("[Conditional] pc_adjust before: %d",
3372 dsc->pc_adjust);
1ab139e5 3373
b6542f81
YQ
3374 if (pc - to == 8)
3375 {
3376 /* Condition is true. */
3377 }
3378 else if (pc - to == 4)
3379 {
3380 /* Condition is false. */
3381 dsc->pc_adjust = 4;
3382 }
3383 else
3384 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
1ab139e5 3385
136821d9
SM
3386 displaced_debug_printf ("[Conditional] pc_adjust after: %d",
3387 dsc->pc_adjust);
b6542f81
YQ
3388 }
3389
136821d9
SM
3390 displaced_debug_printf ("%s PC by %d",
3391 dsc->pc_adjust ? "adjusting" : "not adjusting",
3392 dsc->pc_adjust);
1ab139e5 3393
b6542f81
YQ
3394 if (dsc->pc_adjust != 0)
3395 {
0c271889
LM
3396 /* Make sure the previous instruction was executed (that is, the PC
3397 has changed). If the PC didn't change, then discard the adjustment
3398 offset. Otherwise we may skip an instruction before its execution
3399 took place. */
3400 if ((pc - to) == 0)
1ab139e5 3401 {
136821d9 3402 displaced_debug_printf ("PC did not move. Discarding PC adjustment.");
1ab139e5
LM
3403 dsc->pc_adjust = 0;
3404 }
0c271889 3405
136821d9
SM
3406 displaced_debug_printf ("fixup: set PC to %s:%d",
3407 paddress (gdbarch, from), dsc->pc_adjust);
3408
b6542f81
YQ
3409 regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
3410 from + dsc->pc_adjust);
3411 }
3412}
3413
3414/* Implement the "displaced_step_hw_singlestep" gdbarch method. */
3415
07fbbd01 3416bool
40a53766 3417aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch)
b6542f81 3418{
07fbbd01 3419 return true;
b6542f81
YQ
3420}
3421
95228a0d
AH
3422/* Get the correct target description for the given VQ value.
3423 If VQ is zero then it is assumed SVE is not supported.
c1bd443b
LM
3424 (It is not possible to set VQ to zero on an SVE system).
3425
414d5848
JB
3426 MTE_P indicates the presence of the Memory Tagging Extension feature.
3427
3428 TLS_P indicates the presence of the Thread Local Storage feature. */
da434ccb
AH
3429
3430const target_desc *
0ee6b1c5 3431aarch64_read_description (const aarch64_features &features)
da434ccb 3432{
0ee6b1c5
JB
3433 if (features.vq > AARCH64_MAX_SVE_VQ)
3434 error (_("VQ is %" PRIu64 ", maximum supported value is %d"), features.vq,
95228a0d
AH
3435 AARCH64_MAX_SVE_VQ);
3436
0ee6b1c5 3437 struct target_desc *tdesc = tdesc_aarch64_map[features];
da434ccb 3438
95228a0d
AH
3439 if (tdesc == NULL)
3440 {
0ee6b1c5
JB
3441 tdesc = aarch64_create_target_description (features);
3442 tdesc_aarch64_map[features] = tdesc;
95228a0d 3443 }
da434ccb 3444
95228a0d 3445 return tdesc;
da434ccb
AH
3446}
3447
ba2d2bb2
AH
3448/* Return the VQ used when creating the target description TDESC. */
3449
1332a140 3450static uint64_t
ba2d2bb2
AH
3451aarch64_get_tdesc_vq (const struct target_desc *tdesc)
3452{
3453 const struct tdesc_feature *feature_sve;
3454
3455 if (!tdesc_has_registers (tdesc))
3456 return 0;
3457
3458 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
3459
3460 if (feature_sve == nullptr)
3461 return 0;
3462
12863263
AH
3463 uint64_t vl = tdesc_register_bitsize (feature_sve,
3464 aarch64_sve_register_names[0]) / 8;
ba2d2bb2
AH
3465 return sve_vq_from_vl (vl);
3466}
3467
4f3681cc
TJB
3468/* Get the AArch64 features present in the given target description. */
3469
3470aarch64_features
3471aarch64_features_from_target_desc (const struct target_desc *tdesc)
3472{
3473 aarch64_features features;
3474
3475 if (tdesc == nullptr)
3476 return features;
3477
3478 features.vq = aarch64_get_tdesc_vq (tdesc);
3479 features.pauth
3480 = (tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.pauth") != nullptr);
3481 features.mte
3482 = (tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.mte") != nullptr);
ba60b963
LM
3483
3484 const struct tdesc_feature *tls_feature
3485 = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.tls");
3486
3487 if (tls_feature != nullptr)
3488 {
3489 /* We have TLS registers. Find out how many. */
3490 if (tdesc_unnumbered_register (tls_feature, "tpidr2"))
3491 features.tls = 2;
3492 else
3493 features.tls = 1;
3494 }
4f3681cc
TJB
3495
3496 return features;
3497}
3498
76bed0fd
AH
3499/* Implement the "cannot_store_register" gdbarch method. */
3500
3501static int
3502aarch64_cannot_store_register (struct gdbarch *gdbarch, int regnum)
3503{
08106042 3504 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
76bed0fd
AH
3505
3506 if (!tdep->has_pauth ())
3507 return 0;
3508
3509 /* Pointer authentication registers are read-only. */
3510 return (regnum == AARCH64_PAUTH_DMASK_REGNUM (tdep->pauth_reg_base)
3511 || regnum == AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base));
3512}
3513
da729c5c
TT
3514/* Implement the stack_frame_destroyed_p gdbarch method. */
3515
3516static int
3517aarch64_stack_frame_destroyed_p (struct gdbarch *gdbarch, CORE_ADDR pc)
3518{
3519 CORE_ADDR func_start, func_end;
3520 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
3521 return 0;
3522
3523 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
94355de7
LM
3524
3525 ULONGEST insn_from_memory;
3526 if (!safe_read_memory_unsigned_integer (pc, 4, byte_order_for_code,
3527 &insn_from_memory))
3528 return 0;
3529
3530 uint32_t insn = insn_from_memory;
da729c5c
TT
3531
3532 aarch64_inst inst;
3533 if (aarch64_decode_insn (insn, &inst, 1, nullptr) != 0)
3534 return 0;
3535
3536 return streq (inst.opcode->name, "ret");
3537}
3538
07b287a0
MS
3539/* Initialize the current architecture based on INFO. If possible,
3540 re-use an architecture from ARCHES, which is a list of
3541 architectures already created during this debugging session.
3542
3543 Called e.g. at program startup, when reading a core file, and when
3544 reading a binary file. */
3545
3546static struct gdbarch *
3547aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
3548{
ccb8d7e8 3549 const struct tdesc_feature *feature_core, *feature_fpu, *feature_sve;
76bed0fd 3550 const struct tdesc_feature *feature_pauth;
ccb8d7e8
AH
3551 bool valid_p = true;
3552 int i, num_regs = 0, num_pseudo_regs = 0;
c9cd8ca4 3553 int first_pauth_regnum = -1, ra_sign_state_offset = -1;
ba60b963 3554 int first_mte_regnum = -1, first_tls_regnum = -1;
4f3681cc 3555 uint64_t vq = aarch64_get_tdesc_vq (info.target_desc);
4da037ef
AH
3556
3557 if (vq > AARCH64_MAX_SVE_VQ)
f34652de 3558 internal_error (_("VQ out of bounds: %s (max %d)"),
596179f7 3559 pulongest (vq), AARCH64_MAX_SVE_VQ);
4da037ef 3560
ccb8d7e8
AH
3561 /* If there is already a candidate, use it. */
3562 for (gdbarch_list *best_arch = gdbarch_list_lookup_by_info (arches, &info);
3563 best_arch != nullptr;
3564 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
3565 {
345bd07c 3566 aarch64_gdbarch_tdep *tdep
08106042 3567 = gdbarch_tdep<aarch64_gdbarch_tdep> (best_arch->gdbarch);
4da037ef 3568 if (tdep && tdep->vq == vq)
ccb8d7e8
AH
3569 return best_arch->gdbarch;
3570 }
07b287a0 3571
4da037ef
AH
3572 /* Ensure we always have a target descriptor, and that it is for the given VQ
3573 value. */
ccb8d7e8 3574 const struct target_desc *tdesc = info.target_desc;
4f3681cc
TJB
3575 if (!tdesc_has_registers (tdesc))
3576 tdesc = aarch64_read_description ({});
07b287a0
MS
3577 gdb_assert (tdesc);
3578
ccb8d7e8 3579 feature_core = tdesc_find_feature (tdesc,"org.gnu.gdb.aarch64.core");
ba2d2bb2
AH
3580 feature_fpu = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
3581 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
76bed0fd 3582 feature_pauth = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.pauth");
5e984dbf
LM
3583 const struct tdesc_feature *feature_mte
3584 = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.mte");
414d5848
JB
3585 const struct tdesc_feature *feature_tls
3586 = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.tls");
07b287a0 3587
ccb8d7e8
AH
3588 if (feature_core == nullptr)
3589 return nullptr;
07b287a0 3590
c1e1314d 3591 tdesc_arch_data_up tdesc_data = tdesc_data_alloc ();
07b287a0 3592
ba2d2bb2 3593 /* Validate the description provides the mandatory core R registers
07b287a0
MS
3594 and allocate their numbers. */
3595 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
c1e1314d 3596 valid_p &= tdesc_numbered_register (feature_core, tdesc_data.get (),
ba2d2bb2
AH
3597 AARCH64_X0_REGNUM + i,
3598 aarch64_r_register_names[i]);
07b287a0
MS
3599
3600 num_regs = AARCH64_X0_REGNUM + i;
3601
ba2d2bb2 3602 /* Add the V registers. */
ccb8d7e8 3603 if (feature_fpu != nullptr)
07b287a0 3604 {
ccb8d7e8 3605 if (feature_sve != nullptr)
ba2d2bb2
AH
3606 error (_("Program contains both fpu and SVE features."));
3607
3608 /* Validate the description provides the mandatory V registers
3609 and allocate their numbers. */
07b287a0 3610 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
c1e1314d 3611 valid_p &= tdesc_numbered_register (feature_fpu, tdesc_data.get (),
ba2d2bb2
AH
3612 AARCH64_V0_REGNUM + i,
3613 aarch64_v_register_names[i]);
07b287a0
MS
3614
3615 num_regs = AARCH64_V0_REGNUM + i;
ba2d2bb2 3616 }
07b287a0 3617
ba2d2bb2 3618 /* Add the SVE registers. */
ccb8d7e8 3619 if (feature_sve != nullptr)
ba2d2bb2
AH
3620 {
3621 /* Validate the description provides the mandatory SVE registers
3622 and allocate their numbers. */
3623 for (i = 0; i < ARRAY_SIZE (aarch64_sve_register_names); i++)
c1e1314d 3624 valid_p &= tdesc_numbered_register (feature_sve, tdesc_data.get (),
ba2d2bb2
AH
3625 AARCH64_SVE_Z0_REGNUM + i,
3626 aarch64_sve_register_names[i]);
3627
3628 num_regs = AARCH64_SVE_Z0_REGNUM + i;
3629 num_pseudo_regs += 32; /* add the Vn register pseudos. */
3630 }
3631
ccb8d7e8 3632 if (feature_fpu != nullptr || feature_sve != nullptr)
ba2d2bb2 3633 {
07b287a0
MS
3634 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
3635 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
3636 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
3637 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
3638 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
3639 }
3640
414d5848 3641 /* Add the TLS register. */
ba60b963 3642 int tls_register_count = 0;
414d5848
JB
3643 if (feature_tls != nullptr)
3644 {
ba60b963 3645 first_tls_regnum = num_regs;
414d5848 3646
ba60b963
LM
3647 /* Look for the TLS registers. tpidr is required, but tpidr2 is
3648 optional. */
3649 valid_p
3650 = tdesc_numbered_register (feature_tls, tdesc_data.get (),
3651 first_tls_regnum, "tpidr");
3652
3653 if (valid_p)
3654 {
3655 tls_register_count++;
3656
3657 bool has_tpidr2
3658 = tdesc_numbered_register (feature_tls, tdesc_data.get (),
3659 first_tls_regnum + tls_register_count,
3660 "tpidr2");
3661
3662 /* Figure out how many TLS registers we have. */
3663 if (has_tpidr2)
3664 tls_register_count++;
3665
3666 num_regs += tls_register_count;
3667 }
3668 else
3669 {
3670 warning (_("Provided TLS register feature doesn't contain "
3671 "required tpidr register."));
3672 return nullptr;
3673 }
414d5848
JB
3674 }
3675
76bed0fd
AH
3676 /* Add the pauth registers. */
3677 if (feature_pauth != NULL)
3678 {
3679 first_pauth_regnum = num_regs;
c9cd8ca4 3680 ra_sign_state_offset = num_pseudo_regs;
76bed0fd
AH
3681 /* Validate the descriptor provides the mandatory PAUTH registers and
3682 allocate their numbers. */
3683 for (i = 0; i < ARRAY_SIZE (aarch64_pauth_register_names); i++)
c1e1314d 3684 valid_p &= tdesc_numbered_register (feature_pauth, tdesc_data.get (),
76bed0fd
AH
3685 first_pauth_regnum + i,
3686 aarch64_pauth_register_names[i]);
3687
3688 num_regs += i;
34dcc7cf 3689 num_pseudo_regs += 1; /* Count RA_STATE pseudo register. */
76bed0fd
AH
3690 }
3691
5e984dbf
LM
3692 /* Add the MTE registers. */
3693 if (feature_mte != NULL)
3694 {
3695 first_mte_regnum = num_regs;
3696 /* Validate the descriptor provides the mandatory MTE registers and
3697 allocate their numbers. */
3698 for (i = 0; i < ARRAY_SIZE (aarch64_mte_register_names); i++)
3699 valid_p &= tdesc_numbered_register (feature_mte, tdesc_data.get (),
3700 first_mte_regnum + i,
3701 aarch64_mte_register_names[i]);
3702
3703 num_regs += i;
3704 }
e63ae49b
LM
3705 /* W pseudo-registers */
3706 int first_w_regnum = num_pseudo_regs;
3707 num_pseudo_regs += 31;
5e984dbf 3708
07b287a0 3709 if (!valid_p)
c1e1314d 3710 return nullptr;
07b287a0
MS
3711
3712 /* AArch64 code is always little-endian. */
3713 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
3714
2b16913c
SM
3715 gdbarch *gdbarch
3716 = gdbarch_alloc (&info, gdbarch_tdep_up (new aarch64_gdbarch_tdep));
3717 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
07b287a0
MS
3718
3719 /* This should be low enough for everything. */
3720 tdep->lowest_pc = 0x20;
3721 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
3722 tdep->jb_elt_size = 8;
4da037ef 3723 tdep->vq = vq;
76bed0fd 3724 tdep->pauth_reg_base = first_pauth_regnum;
1ba3a322 3725 tdep->ra_sign_state_regnum = -1;
5e984dbf 3726 tdep->mte_reg_base = first_mte_regnum;
ba60b963
LM
3727 tdep->tls_regnum_base = first_tls_regnum;
3728 tdep->tls_register_count = tls_register_count;
34dcc7cf 3729
07b287a0
MS
3730 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
3731 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
3732
07b287a0
MS
3733 /* Advance PC across function entry code. */
3734 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
3735
3736 /* The stack grows downward. */
3737 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
3738
3739 /* Breakpoint manipulation. */
04180708
YQ
3740 set_gdbarch_breakpoint_kind_from_pc (gdbarch,
3741 aarch64_breakpoint::kind_from_pc);
3742 set_gdbarch_sw_breakpoint_from_kind (gdbarch,
3743 aarch64_breakpoint::bp_from_kind);
07b287a0 3744 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
9404b58f 3745 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
07b287a0
MS
3746
3747 /* Information about registers, etc. */
3748 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
3749 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
3750 set_gdbarch_num_regs (gdbarch, num_regs);
3751
3752 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
3753 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
3754 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
3755 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
3756 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
3757 set_tdesc_pseudo_register_reggroup_p (gdbarch,
3758 aarch64_pseudo_register_reggroup_p);
76bed0fd 3759 set_gdbarch_cannot_store_register (gdbarch, aarch64_cannot_store_register);
07b287a0
MS
3760
3761 /* ABI */
3762 set_gdbarch_short_bit (gdbarch, 16);
3763 set_gdbarch_int_bit (gdbarch, 32);
3764 set_gdbarch_float_bit (gdbarch, 32);
3765 set_gdbarch_double_bit (gdbarch, 64);
3766 set_gdbarch_long_double_bit (gdbarch, 128);
3767 set_gdbarch_long_bit (gdbarch, 64);
3768 set_gdbarch_long_long_bit (gdbarch, 64);
3769 set_gdbarch_ptr_bit (gdbarch, 64);
3770 set_gdbarch_char_signed (gdbarch, 0);
53375380 3771 set_gdbarch_wchar_signed (gdbarch, 0);
07b287a0
MS
3772 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
3773 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
552f1157 3774 set_gdbarch_long_double_format (gdbarch, floatformats_ieee_quad);
b907456c 3775 set_gdbarch_type_align (gdbarch, aarch64_type_align);
07b287a0 3776
da729c5c
TT
3777 /* Detect whether PC is at a point where the stack has been destroyed. */
3778 set_gdbarch_stack_frame_destroyed_p (gdbarch, aarch64_stack_frame_destroyed_p);
3779
07b287a0
MS
3780 /* Internal <-> external register number maps. */
3781 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
3782
3783 /* Returning results. */
5cb0f2d5 3784 set_gdbarch_return_value_as_value (gdbarch, aarch64_return_value);
07b287a0
MS
3785
3786 /* Disassembly. */
3787 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
3788
3789 /* Virtual tables. */
3790 set_gdbarch_vbit_in_delta (gdbarch, 1);
3791
3792 /* Hook in the ABI-specific overrides, if they have been registered. */
3793 info.target_desc = tdesc;
c1e1314d 3794 info.tdesc_data = tdesc_data.get ();
07b287a0
MS
3795 gdbarch_init_osabi (info, gdbarch);
3796
3797 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
11e1b75f
AH
3798 /* Register DWARF CFA vendor handler. */
3799 set_gdbarch_execute_dwarf_cfa_vendor_op (gdbarch,
3800 aarch64_execute_dwarf_cfa_vendor_op);
07b287a0 3801
5133a315
LM
3802 /* Permanent/Program breakpoint handling. */
3803 set_gdbarch_program_breakpoint_here_p (gdbarch,
3804 aarch64_program_breakpoint_here_p);
3805
07b287a0
MS
3806 /* Add some default predicates. */
3807 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
3808 dwarf2_append_unwinders (gdbarch);
3809 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
3810
3811 frame_base_set_default (gdbarch, &aarch64_normal_base);
3812
3813 /* Now we have tuned the configuration, set a few final things,
3814 based on what the OS ABI has told us. */
3815
3816 if (tdep->jb_pc >= 0)
3817 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
3818
ea873d8e
PL
3819 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
3820
aa7ca1bb
AH
3821 set_gdbarch_get_pc_address_flags (gdbarch, aarch64_get_pc_address_flags);
3822
c1e1314d 3823 tdesc_use_registers (gdbarch, tdesc, std::move (tdesc_data));
07b287a0 3824
1ba3a322
LM
3825 /* Fetch the updated number of registers after we're done adding all
3826 entries from features we don't explicitly care about. This is the case
3827 for bare metal debugging stubs that include a lot of system registers. */
3828 num_regs = gdbarch_num_regs (gdbarch);
3829
3830 /* With the number of real registers updated, setup the pseudo-registers and
3831 record their numbers. */
3832
e63ae49b
LM
3833 /* Setup W pseudo-register numbers. */
3834 tdep->w_pseudo_base = first_w_regnum + num_regs;
3835 tdep->w_pseudo_count = 31;
3836
1ba3a322
LM
3837 /* Pointer authentication pseudo-registers. */
3838 if (tdep->has_pauth ())
3839 tdep->ra_sign_state_regnum = ra_sign_state_offset + num_regs;
3840
07b287a0
MS
3841 /* Add standard register aliases. */
3842 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
3843 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
3844 value_of_aarch64_user_reg,
3845 &aarch64_register_aliases[i].regnum);
3846
e8bf1ce4
JB
3847 register_aarch64_ravenscar_ops (gdbarch);
3848
07b287a0
MS
3849 return gdbarch;
3850}
3851
3852static void
3853aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
3854{
08106042 3855 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
07b287a0
MS
3856
3857 if (tdep == NULL)
3858 return;
3859
09a5d200 3860 gdb_printf (file, _("aarch64_dump_tdep: Lowest pc = 0x%s\n"),
6cb06a8c 3861 paddress (gdbarch, tdep->lowest_pc));
07b287a0
MS
3862}
3863
0d4c07af 3864#if GDB_SELF_TEST
1e2b521d
YQ
3865namespace selftests
3866{
3867static void aarch64_process_record_test (void);
3868}
0d4c07af 3869#endif
1e2b521d 3870
6c265988 3871void _initialize_aarch64_tdep ();
07b287a0 3872void
6c265988 3873_initialize_aarch64_tdep ()
07b287a0
MS
3874{
3875 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
3876 aarch64_dump_tdep);
3877
07b287a0
MS
3878 /* Debug this file's internals. */
3879 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
3880Set AArch64 debugging."), _("\
3881Show AArch64 debugging."), _("\
3882When on, AArch64 specific debugging is enabled."),
3883 NULL,
3884 show_aarch64_debug,
3885 &setdebuglist, &showdebuglist);
4d9a9006
YQ
3886
3887#if GDB_SELF_TEST
1526853e
SM
3888 selftests::register_test ("aarch64-analyze-prologue",
3889 selftests::aarch64_analyze_prologue_test);
3890 selftests::register_test ("aarch64-process-record",
3891 selftests::aarch64_process_record_test);
4d9a9006 3892#endif
07b287a0 3893}
99afc88b
OJ
3894
3895/* AArch64 process record-replay related structures, defines etc. */
3896
99afc88b 3897#define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
dda83cd7
SM
3898 do \
3899 { \
3900 unsigned int reg_len = LENGTH; \
3901 if (reg_len) \
3902 { \
3903 REGS = XNEWVEC (uint32_t, reg_len); \
3904 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
3905 } \
3906 } \
3907 while (0)
99afc88b
OJ
3908
3909#define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
dda83cd7
SM
3910 do \
3911 { \
3912 unsigned int mem_len = LENGTH; \
3913 if (mem_len) \
01add95b
SM
3914 { \
3915 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
91ddba83 3916 memcpy(MEMS, &RECORD_BUF[0], \
01add95b
SM
3917 sizeof(struct aarch64_mem_r) * LENGTH); \
3918 } \
dda83cd7
SM
3919 } \
3920 while (0)
99afc88b
OJ
3921
3922/* AArch64 record/replay structures and enumerations. */
3923
3924struct aarch64_mem_r
3925{
3926 uint64_t len; /* Record length. */
3927 uint64_t addr; /* Memory address. */
3928};
3929
3930enum aarch64_record_result
3931{
3932 AARCH64_RECORD_SUCCESS,
99afc88b
OJ
3933 AARCH64_RECORD_UNSUPPORTED,
3934 AARCH64_RECORD_UNKNOWN
3935};
3936
4748a9be 3937struct aarch64_insn_decode_record
99afc88b
OJ
3938{
3939 struct gdbarch *gdbarch;
3940 struct regcache *regcache;
3941 CORE_ADDR this_addr; /* Address of insn to be recorded. */
3942 uint32_t aarch64_insn; /* Insn to be recorded. */
3943 uint32_t mem_rec_count; /* Count of memory records. */
3944 uint32_t reg_rec_count; /* Count of register records. */
3945 uint32_t *aarch64_regs; /* Registers to be recorded. */
3946 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
4748a9be 3947};
99afc88b
OJ
3948
3949/* Record handler for data processing - register instructions. */
3950
3951static unsigned int
4748a9be 3952aarch64_record_data_proc_reg (aarch64_insn_decode_record *aarch64_insn_r)
99afc88b
OJ
3953{
3954 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
3955 uint32_t record_buf[4];
3956
3957 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3958 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3959 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
3960
3961 if (!bit (aarch64_insn_r->aarch64_insn, 28))
3962 {
3963 uint8_t setflags;
3964
3965 /* Logical (shifted register). */
3966 if (insn_bits24_27 == 0x0a)
3967 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
3968 /* Add/subtract. */
3969 else if (insn_bits24_27 == 0x0b)
3970 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3971 else
3972 return AARCH64_RECORD_UNKNOWN;
3973
3974 record_buf[0] = reg_rd;
3975 aarch64_insn_r->reg_rec_count = 1;
3976 if (setflags)
3977 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3978 }
3979 else
3980 {
3981 if (insn_bits24_27 == 0x0b)
3982 {
3983 /* Data-processing (3 source). */
3984 record_buf[0] = reg_rd;
3985 aarch64_insn_r->reg_rec_count = 1;
3986 }
3987 else if (insn_bits24_27 == 0x0a)
3988 {
3989 if (insn_bits21_23 == 0x00)
3990 {
3991 /* Add/subtract (with carry). */
3992 record_buf[0] = reg_rd;
3993 aarch64_insn_r->reg_rec_count = 1;
3994 if (bit (aarch64_insn_r->aarch64_insn, 29))
3995 {
3996 record_buf[1] = AARCH64_CPSR_REGNUM;
3997 aarch64_insn_r->reg_rec_count = 2;
3998 }
3999 }
4000 else if (insn_bits21_23 == 0x02)
4001 {
4002 /* Conditional compare (register) and conditional compare
4003 (immediate) instructions. */
4004 record_buf[0] = AARCH64_CPSR_REGNUM;
4005 aarch64_insn_r->reg_rec_count = 1;
4006 }
4007 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
4008 {
85102364 4009 /* Conditional select. */
99afc88b
OJ
4010 /* Data-processing (2 source). */
4011 /* Data-processing (1 source). */
4012 record_buf[0] = reg_rd;
4013 aarch64_insn_r->reg_rec_count = 1;
4014 }
4015 else
4016 return AARCH64_RECORD_UNKNOWN;
4017 }
4018 }
4019
4020 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4021 record_buf);
4022 return AARCH64_RECORD_SUCCESS;
4023}
4024
4025/* Record handler for data processing - immediate instructions. */
4026
4027static unsigned int
4748a9be 4028aarch64_record_data_proc_imm (aarch64_insn_decode_record *aarch64_insn_r)
99afc88b 4029{
78cc6c2d 4030 uint8_t reg_rd, insn_bit23, insn_bits24_27, setflags;
99afc88b
OJ
4031 uint32_t record_buf[4];
4032
4033 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
99afc88b
OJ
4034 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
4035 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
4036
4037 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
4038 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
4039 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
4040 {
4041 record_buf[0] = reg_rd;
4042 aarch64_insn_r->reg_rec_count = 1;
4043 }
4044 else if (insn_bits24_27 == 0x01)
4045 {
4046 /* Add/Subtract (immediate). */
4047 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
4048 record_buf[0] = reg_rd;
4049 aarch64_insn_r->reg_rec_count = 1;
4050 if (setflags)
4051 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
4052 }
4053 else if (insn_bits24_27 == 0x02 && !insn_bit23)
4054 {
4055 /* Logical (immediate). */
4056 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
4057 record_buf[0] = reg_rd;
4058 aarch64_insn_r->reg_rec_count = 1;
4059 if (setflags)
4060 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
4061 }
4062 else
4063 return AARCH64_RECORD_UNKNOWN;
4064
4065 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4066 record_buf);
4067 return AARCH64_RECORD_SUCCESS;
4068}
4069
4070/* Record handler for branch, exception generation and system instructions. */
4071
4072static unsigned int
4748a9be 4073aarch64_record_branch_except_sys (aarch64_insn_decode_record *aarch64_insn_r)
99afc88b 4074{
345bd07c
SM
4075
4076 aarch64_gdbarch_tdep *tdep
08106042 4077 = gdbarch_tdep<aarch64_gdbarch_tdep> (aarch64_insn_r->gdbarch);
99afc88b
OJ
4078 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
4079 uint32_t record_buf[4];
4080
4081 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
4082 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
4083 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4084
4085 if (insn_bits28_31 == 0x0d)
4086 {
4087 /* Exception generation instructions. */
4088 if (insn_bits24_27 == 0x04)
4089 {
5d98d3cd
YQ
4090 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
4091 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
4092 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
99afc88b
OJ
4093 {
4094 ULONGEST svc_number;
4095
4096 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
4097 &svc_number);
4098 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
4099 svc_number);
4100 }
4101 else
4102 return AARCH64_RECORD_UNSUPPORTED;
4103 }
4104 /* System instructions. */
4105 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
4106 {
4107 uint32_t reg_rt, reg_crn;
4108
4109 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
4110 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
4111
4112 /* Record rt in case of sysl and mrs instructions. */
4113 if (bit (aarch64_insn_r->aarch64_insn, 21))
4114 {
4115 record_buf[0] = reg_rt;
4116 aarch64_insn_r->reg_rec_count = 1;
4117 }
4118 /* Record cpsr for hint and msr(immediate) instructions. */
4119 else if (reg_crn == 0x02 || reg_crn == 0x04)
4120 {
4121 record_buf[0] = AARCH64_CPSR_REGNUM;
4122 aarch64_insn_r->reg_rec_count = 1;
4123 }
4124 }
4125 /* Unconditional branch (register). */
4126 else if((insn_bits24_27 & 0x0e) == 0x06)
4127 {
4128 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
4129 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
4130 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
4131 }
4132 else
4133 return AARCH64_RECORD_UNKNOWN;
4134 }
4135 /* Unconditional branch (immediate). */
4136 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
4137 {
4138 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
4139 if (bit (aarch64_insn_r->aarch64_insn, 31))
4140 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
4141 }
4142 else
4143 /* Compare & branch (immediate), Test & branch (immediate) and
4144 Conditional branch (immediate). */
4145 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
4146
4147 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4148 record_buf);
4149 return AARCH64_RECORD_SUCCESS;
4150}
4151
4152/* Record handler for advanced SIMD load and store instructions. */
4153
4154static unsigned int
4748a9be 4155aarch64_record_asimd_load_store (aarch64_insn_decode_record *aarch64_insn_r)
99afc88b
OJ
4156{
4157 CORE_ADDR address;
4158 uint64_t addr_offset = 0;
4159 uint32_t record_buf[24];
4160 uint64_t record_buf_mem[24];
4161 uint32_t reg_rn, reg_rt;
4162 uint32_t reg_index = 0, mem_index = 0;
4163 uint8_t opcode_bits, size_bits;
4164
4165 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
4166 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
4167 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
4168 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
4169 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
4170
4171 if (record_debug)
b277c936 4172 debug_printf ("Process record: Advanced SIMD load/store\n");
99afc88b
OJ
4173
4174 /* Load/store single structure. */
4175 if (bit (aarch64_insn_r->aarch64_insn, 24))
4176 {
4177 uint8_t sindex, scale, selem, esize, replicate = 0;
4178 scale = opcode_bits >> 2;
4179 selem = ((opcode_bits & 0x02) |
dda83cd7 4180 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
99afc88b 4181 switch (scale)
dda83cd7
SM
4182 {
4183 case 1:
4184 if (size_bits & 0x01)
4185 return AARCH64_RECORD_UNKNOWN;
4186 break;
4187 case 2:
4188 if ((size_bits >> 1) & 0x01)
4189 return AARCH64_RECORD_UNKNOWN;
4190 if (size_bits & 0x01)
4191 {
4192 if (!((opcode_bits >> 1) & 0x01))
4193 scale = 3;
4194 else
4195 return AARCH64_RECORD_UNKNOWN;
4196 }
4197 break;
4198 case 3:
4199 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
4200 {
4201 scale = size_bits;
4202 replicate = 1;
4203 break;
4204 }
4205 else
4206 return AARCH64_RECORD_UNKNOWN;
4207 default:
4208 break;
4209 }
99afc88b
OJ
4210 esize = 8 << scale;
4211 if (replicate)
dda83cd7
SM
4212 for (sindex = 0; sindex < selem; sindex++)
4213 {
4214 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
4215 reg_rt = (reg_rt + 1) % 32;
4216 }
99afc88b 4217 else
dda83cd7
SM
4218 {
4219 for (sindex = 0; sindex < selem; sindex++)
a2e3e93f
SM
4220 {
4221 if (bit (aarch64_insn_r->aarch64_insn, 22))
4222 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
4223 else
4224 {
4225 record_buf_mem[mem_index++] = esize / 8;
4226 record_buf_mem[mem_index++] = address + addr_offset;
4227 }
4228 addr_offset = addr_offset + (esize / 8);
4229 reg_rt = (reg_rt + 1) % 32;
4230 }
dda83cd7 4231 }
99afc88b
OJ
4232 }
4233 /* Load/store multiple structure. */
4234 else
4235 {
4236 uint8_t selem, esize, rpt, elements;
4237 uint8_t eindex, rindex;
4238
4239 esize = 8 << size_bits;
4240 if (bit (aarch64_insn_r->aarch64_insn, 30))
dda83cd7 4241 elements = 128 / esize;
99afc88b 4242 else
dda83cd7 4243 elements = 64 / esize;
99afc88b
OJ
4244
4245 switch (opcode_bits)
dda83cd7
SM
4246 {
4247 /*LD/ST4 (4 Registers). */
4248 case 0:
4249 rpt = 1;
4250 selem = 4;
4251 break;
4252 /*LD/ST1 (4 Registers). */
4253 case 2:
4254 rpt = 4;
4255 selem = 1;
4256 break;
4257 /*LD/ST3 (3 Registers). */
4258 case 4:
4259 rpt = 1;
4260 selem = 3;
4261 break;
4262 /*LD/ST1 (3 Registers). */
4263 case 6:
4264 rpt = 3;
4265 selem = 1;
4266 break;
4267 /*LD/ST1 (1 Register). */
4268 case 7:
4269 rpt = 1;
4270 selem = 1;
4271 break;
4272 /*LD/ST2 (2 Registers). */
4273 case 8:
4274 rpt = 1;
4275 selem = 2;
4276 break;
4277 /*LD/ST1 (2 Registers). */
4278 case 10:
4279 rpt = 2;
4280 selem = 1;
4281 break;
4282 default:
4283 return AARCH64_RECORD_UNSUPPORTED;
4284 break;
4285 }
99afc88b 4286 for (rindex = 0; rindex < rpt; rindex++)
dda83cd7
SM
4287 for (eindex = 0; eindex < elements; eindex++)
4288 {
4289 uint8_t reg_tt, sindex;
4290 reg_tt = (reg_rt + rindex) % 32;
4291 for (sindex = 0; sindex < selem; sindex++)
4292 {
4293 if (bit (aarch64_insn_r->aarch64_insn, 22))
4294 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
4295 else
4296 {
4297 record_buf_mem[mem_index++] = esize / 8;
4298 record_buf_mem[mem_index++] = address + addr_offset;
4299 }
4300 addr_offset = addr_offset + (esize / 8);
4301 reg_tt = (reg_tt + 1) % 32;
4302 }
4303 }
99afc88b
OJ
4304 }
4305
4306 if (bit (aarch64_insn_r->aarch64_insn, 23))
4307 record_buf[reg_index++] = reg_rn;
4308
4309 aarch64_insn_r->reg_rec_count = reg_index;
4310 aarch64_insn_r->mem_rec_count = mem_index / 2;
4311 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
dda83cd7 4312 record_buf_mem);
99afc88b 4313 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
dda83cd7 4314 record_buf);
99afc88b
OJ
4315 return AARCH64_RECORD_SUCCESS;
4316}
4317
4318/* Record handler for load and store instructions. */
4319
4320static unsigned int
4748a9be 4321aarch64_record_load_store (aarch64_insn_decode_record *aarch64_insn_r)
99afc88b
OJ
4322{
4323 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
4324 uint8_t insn_bit23, insn_bit21;
4325 uint8_t opc, size_bits, ld_flag, vector_flag;
4326 uint32_t reg_rn, reg_rt, reg_rt2;
4327 uint64_t datasize, offset;
4328 uint32_t record_buf[8];
4329 uint64_t record_buf_mem[8];
4330 CORE_ADDR address;
4331
4332 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
4333 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
4334 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
4335 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
4336 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
4337 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
4338 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
4339 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
4340 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
4341 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
4342 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
4343
4344 /* Load/store exclusive. */
4345 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
4346 {
4347 if (record_debug)
b277c936 4348 debug_printf ("Process record: load/store exclusive\n");
99afc88b
OJ
4349
4350 if (ld_flag)
4351 {
4352 record_buf[0] = reg_rt;
4353 aarch64_insn_r->reg_rec_count = 1;
4354 if (insn_bit21)
4355 {
4356 record_buf[1] = reg_rt2;
4357 aarch64_insn_r->reg_rec_count = 2;
4358 }
4359 }
4360 else
4361 {
4362 if (insn_bit21)
4363 datasize = (8 << size_bits) * 2;
4364 else
4365 datasize = (8 << size_bits);
4366 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4367 &address);
4368 record_buf_mem[0] = datasize / 8;
4369 record_buf_mem[1] = address;
4370 aarch64_insn_r->mem_rec_count = 1;
4371 if (!insn_bit23)
4372 {
4373 /* Save register rs. */
4374 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
4375 aarch64_insn_r->reg_rec_count = 1;
4376 }
4377 }
4378 }
4379 /* Load register (literal) instructions decoding. */
4380 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
4381 {
4382 if (record_debug)
b277c936 4383 debug_printf ("Process record: load register (literal)\n");
99afc88b 4384 if (vector_flag)
dda83cd7 4385 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
99afc88b 4386 else
dda83cd7 4387 record_buf[0] = reg_rt;
99afc88b
OJ
4388 aarch64_insn_r->reg_rec_count = 1;
4389 }
4390 /* All types of load/store pair instructions decoding. */
4391 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
4392 {
4393 if (record_debug)
b277c936 4394 debug_printf ("Process record: load/store pair\n");
99afc88b
OJ
4395
4396 if (ld_flag)
dda83cd7
SM
4397 {
4398 if (vector_flag)
4399 {
4400 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4401 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
4402 }
4403 else
4404 {
4405 record_buf[0] = reg_rt;
4406 record_buf[1] = reg_rt2;
4407 }
4408 aarch64_insn_r->reg_rec_count = 2;
4409 }
99afc88b 4410 else
dda83cd7
SM
4411 {
4412 uint16_t imm7_off;
4413 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
4414 if (!vector_flag)
4415 size_bits = size_bits >> 1;
4416 datasize = 8 << (2 + size_bits);
4417 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
4418 offset = offset << (2 + size_bits);
4419 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4420 &address);
4421 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
4422 {
4423 if (imm7_off & 0x40)
4424 address = address - offset;
4425 else
4426 address = address + offset;
4427 }
4428
4429 record_buf_mem[0] = datasize / 8;
4430 record_buf_mem[1] = address;
4431 record_buf_mem[2] = datasize / 8;
4432 record_buf_mem[3] = address + (datasize / 8);
4433 aarch64_insn_r->mem_rec_count = 2;
4434 }
99afc88b 4435 if (bit (aarch64_insn_r->aarch64_insn, 23))
dda83cd7 4436 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
99afc88b
OJ
4437 }
4438 /* Load/store register (unsigned immediate) instructions. */
4439 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
4440 {
4441 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4442 if (!(opc >> 1))
33877125
YQ
4443 {
4444 if (opc & 0x01)
4445 ld_flag = 0x01;
4446 else
4447 ld_flag = 0x0;
4448 }
99afc88b 4449 else
33877125 4450 {
1e2b521d
YQ
4451 if (size_bits == 0x3 && vector_flag == 0x0 && opc == 0x2)
4452 {
4453 /* PRFM (immediate) */
4454 return AARCH64_RECORD_SUCCESS;
4455 }
4456 else if (size_bits == 0x2 && vector_flag == 0x0 && opc == 0x2)
4457 {
4458 /* LDRSW (immediate) */
4459 ld_flag = 0x1;
4460 }
33877125 4461 else
1e2b521d
YQ
4462 {
4463 if (opc & 0x01)
4464 ld_flag = 0x01;
4465 else
4466 ld_flag = 0x0;
4467 }
33877125 4468 }
99afc88b
OJ
4469
4470 if (record_debug)
4471 {
b277c936
PL
4472 debug_printf ("Process record: load/store (unsigned immediate):"
4473 " size %x V %d opc %x\n", size_bits, vector_flag,
4474 opc);
99afc88b
OJ
4475 }
4476
4477 if (!ld_flag)
dda83cd7
SM
4478 {
4479 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
4480 datasize = 8 << size_bits;
4481 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4482 &address);
4483 offset = offset << size_bits;
4484 address = address + offset;
4485
4486 record_buf_mem[0] = datasize >> 3;
4487 record_buf_mem[1] = address;
4488 aarch64_insn_r->mem_rec_count = 1;
4489 }
99afc88b 4490 else
dda83cd7
SM
4491 {
4492 if (vector_flag)
4493 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4494 else
4495 record_buf[0] = reg_rt;
4496 aarch64_insn_r->reg_rec_count = 1;
4497 }
99afc88b
OJ
4498 }
4499 /* Load/store register (register offset) instructions. */
5d98d3cd
YQ
4500 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
4501 && insn_bits10_11 == 0x02 && insn_bit21)
99afc88b
OJ
4502 {
4503 if (record_debug)
b277c936 4504 debug_printf ("Process record: load/store (register offset)\n");
99afc88b
OJ
4505 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4506 if (!(opc >> 1))
dda83cd7
SM
4507 if (opc & 0x01)
4508 ld_flag = 0x01;
4509 else
4510 ld_flag = 0x0;
99afc88b 4511 else
dda83cd7
SM
4512 if (size_bits != 0x03)
4513 ld_flag = 0x01;
4514 else
4515 return AARCH64_RECORD_UNKNOWN;
99afc88b
OJ
4516
4517 if (!ld_flag)
dda83cd7
SM
4518 {
4519 ULONGEST reg_rm_val;
4520
4521 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
4522 bits (aarch64_insn_r->aarch64_insn, 16, 20), &reg_rm_val);
4523 if (bit (aarch64_insn_r->aarch64_insn, 12))
4524 offset = reg_rm_val << size_bits;
4525 else
4526 offset = reg_rm_val;
4527 datasize = 8 << size_bits;
4528 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4529 &address);
4530 address = address + offset;
4531 record_buf_mem[0] = datasize >> 3;
4532 record_buf_mem[1] = address;
4533 aarch64_insn_r->mem_rec_count = 1;
4534 }
99afc88b 4535 else
dda83cd7
SM
4536 {
4537 if (vector_flag)
4538 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4539 else
4540 record_buf[0] = reg_rt;
4541 aarch64_insn_r->reg_rec_count = 1;
4542 }
99afc88b
OJ
4543 }
4544 /* Load/store register (immediate and unprivileged) instructions. */
5d98d3cd
YQ
4545 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
4546 && !insn_bit21)
99afc88b
OJ
4547 {
4548 if (record_debug)
4549 {
b277c936
PL
4550 debug_printf ("Process record: load/store "
4551 "(immediate and unprivileged)\n");
99afc88b
OJ
4552 }
4553 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4554 if (!(opc >> 1))
dda83cd7
SM
4555 if (opc & 0x01)
4556 ld_flag = 0x01;
4557 else
4558 ld_flag = 0x0;
99afc88b 4559 else
dda83cd7
SM
4560 if (size_bits != 0x03)
4561 ld_flag = 0x01;
4562 else
4563 return AARCH64_RECORD_UNKNOWN;
99afc88b
OJ
4564
4565 if (!ld_flag)
dda83cd7
SM
4566 {
4567 uint16_t imm9_off;
4568 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
4569 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
4570 datasize = 8 << size_bits;
4571 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4572 &address);
4573 if (insn_bits10_11 != 0x01)
4574 {
4575 if (imm9_off & 0x0100)
4576 address = address - offset;
4577 else
4578 address = address + offset;
4579 }
4580 record_buf_mem[0] = datasize >> 3;
4581 record_buf_mem[1] = address;
4582 aarch64_insn_r->mem_rec_count = 1;
4583 }
99afc88b 4584 else
dda83cd7
SM
4585 {
4586 if (vector_flag)
4587 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4588 else
4589 record_buf[0] = reg_rt;
4590 aarch64_insn_r->reg_rec_count = 1;
4591 }
99afc88b 4592 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
dda83cd7 4593 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
99afc88b
OJ
4594 }
4595 /* Advanced SIMD load/store instructions. */
4596 else
4597 return aarch64_record_asimd_load_store (aarch64_insn_r);
4598
4599 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
dda83cd7 4600 record_buf_mem);
99afc88b 4601 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
dda83cd7 4602 record_buf);
99afc88b
OJ
4603 return AARCH64_RECORD_SUCCESS;
4604}
4605
4606/* Record handler for data processing SIMD and floating point instructions. */
4607
4608static unsigned int
4748a9be 4609aarch64_record_data_proc_simd_fp (aarch64_insn_decode_record *aarch64_insn_r)
99afc88b
OJ
4610{
4611 uint8_t insn_bit21, opcode, rmode, reg_rd;
4612 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
4613 uint8_t insn_bits11_14;
4614 uint32_t record_buf[2];
4615
4616 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
4617 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
4618 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
4619 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
4620 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
4621 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
4622 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
4623 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
4624 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
4625
4626 if (record_debug)
b277c936 4627 debug_printf ("Process record: data processing SIMD/FP: ");
99afc88b
OJ
4628
4629 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
4630 {
4631 /* Floating point - fixed point conversion instructions. */
4632 if (!insn_bit21)
4633 {
4634 if (record_debug)
b277c936 4635 debug_printf ("FP - fixed point conversion");
99afc88b
OJ
4636
4637 if ((opcode >> 1) == 0x0 && rmode == 0x03)
4638 record_buf[0] = reg_rd;
4639 else
4640 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4641 }
4642 /* Floating point - conditional compare instructions. */
4643 else if (insn_bits10_11 == 0x01)
4644 {
4645 if (record_debug)
b277c936 4646 debug_printf ("FP - conditional compare");
99afc88b
OJ
4647
4648 record_buf[0] = AARCH64_CPSR_REGNUM;
4649 }
4650 /* Floating point - data processing (2-source) and
dda83cd7 4651 conditional select instructions. */
99afc88b
OJ
4652 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
4653 {
4654 if (record_debug)
b277c936 4655 debug_printf ("FP - DP (2-source)");
99afc88b
OJ
4656
4657 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4658 }
4659 else if (insn_bits10_11 == 0x00)
4660 {
4661 /* Floating point - immediate instructions. */
4662 if ((insn_bits12_15 & 0x01) == 0x01
4663 || (insn_bits12_15 & 0x07) == 0x04)
4664 {
4665 if (record_debug)
b277c936 4666 debug_printf ("FP - immediate");
99afc88b
OJ
4667 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4668 }
4669 /* Floating point - compare instructions. */
4670 else if ((insn_bits12_15 & 0x03) == 0x02)
4671 {
4672 if (record_debug)
b277c936 4673 debug_printf ("FP - immediate");
99afc88b
OJ
4674 record_buf[0] = AARCH64_CPSR_REGNUM;
4675 }
4676 /* Floating point - integer conversions instructions. */
f62fce35 4677 else if (insn_bits12_15 == 0x00)
99afc88b
OJ
4678 {
4679 /* Convert float to integer instruction. */
4680 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
4681 {
4682 if (record_debug)
b277c936 4683 debug_printf ("float to int conversion");
99afc88b
OJ
4684
4685 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4686 }
4687 /* Convert integer to float instruction. */
4688 else if ((opcode >> 1) == 0x01 && !rmode)
4689 {
4690 if (record_debug)
b277c936 4691 debug_printf ("int to float conversion");
99afc88b
OJ
4692
4693 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4694 }
4695 /* Move float to integer instruction. */
4696 else if ((opcode >> 1) == 0x03)
4697 {
4698 if (record_debug)
b277c936 4699 debug_printf ("move float to int");
99afc88b
OJ
4700
4701 if (!(opcode & 0x01))
4702 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4703 else
4704 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4705 }
f62fce35
YQ
4706 else
4707 return AARCH64_RECORD_UNKNOWN;
dda83cd7 4708 }
f62fce35
YQ
4709 else
4710 return AARCH64_RECORD_UNKNOWN;
dda83cd7 4711 }
f62fce35
YQ
4712 else
4713 return AARCH64_RECORD_UNKNOWN;
99afc88b
OJ
4714 }
4715 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
4716 {
4717 if (record_debug)
b277c936 4718 debug_printf ("SIMD copy");
99afc88b
OJ
4719
4720 /* Advanced SIMD copy instructions. */
4721 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
4722 && !bit (aarch64_insn_r->aarch64_insn, 15)
4723 && bit (aarch64_insn_r->aarch64_insn, 10))
4724 {
4725 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
4726 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4727 else
4728 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4729 }
4730 else
4731 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4732 }
4733 /* All remaining floating point or advanced SIMD instructions. */
4734 else
4735 {
4736 if (record_debug)
b277c936 4737 debug_printf ("all remain");
99afc88b
OJ
4738
4739 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4740 }
4741
4742 if (record_debug)
b277c936 4743 debug_printf ("\n");
99afc88b 4744
bfbe4b84 4745 /* Record the V/X register. */
99afc88b 4746 aarch64_insn_r->reg_rec_count++;
bfbe4b84
LM
4747
4748 /* Some of these instructions may set bits in the FPSR, so record it
4749 too. */
4750 record_buf[1] = AARCH64_FPSR_REGNUM;
4751 aarch64_insn_r->reg_rec_count++;
4752
4753 gdb_assert (aarch64_insn_r->reg_rec_count == 2);
99afc88b
OJ
4754 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4755 record_buf);
4756 return AARCH64_RECORD_SUCCESS;
4757}
4758
4759/* Decodes insns type and invokes its record handler. */
4760
4761static unsigned int
4748a9be 4762aarch64_record_decode_insn_handler (aarch64_insn_decode_record *aarch64_insn_r)
99afc88b
OJ
4763{
4764 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
4765
4766 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
4767 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
4768 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
4769 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
4770
4771 /* Data processing - immediate instructions. */
4772 if (!ins_bit26 && !ins_bit27 && ins_bit28)
4773 return aarch64_record_data_proc_imm (aarch64_insn_r);
4774
4775 /* Branch, exception generation and system instructions. */
4776 if (ins_bit26 && !ins_bit27 && ins_bit28)
4777 return aarch64_record_branch_except_sys (aarch64_insn_r);
4778
4779 /* Load and store instructions. */
4780 if (!ins_bit25 && ins_bit27)
4781 return aarch64_record_load_store (aarch64_insn_r);
4782
4783 /* Data processing - register instructions. */
4784 if (ins_bit25 && !ins_bit26 && ins_bit27)
4785 return aarch64_record_data_proc_reg (aarch64_insn_r);
4786
4787 /* Data processing - SIMD and floating point instructions. */
4788 if (ins_bit25 && ins_bit26 && ins_bit27)
4789 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
4790
4791 return AARCH64_RECORD_UNSUPPORTED;
4792}
4793
4794/* Cleans up local record registers and memory allocations. */
4795
4796static void
4748a9be 4797deallocate_reg_mem (aarch64_insn_decode_record *record)
99afc88b
OJ
4798{
4799 xfree (record->aarch64_regs);
4800 xfree (record->aarch64_mems);
4801}
4802
1e2b521d
YQ
4803#if GDB_SELF_TEST
4804namespace selftests {
4805
4806static void
4807aarch64_process_record_test (void)
4808{
4809 struct gdbarch_info info;
4810 uint32_t ret;
4811
1e2b521d
YQ
4812 info.bfd_arch_info = bfd_scan_arch ("aarch64");
4813
4814 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
4815 SELF_CHECK (gdbarch != NULL);
4816
4748a9be 4817 aarch64_insn_decode_record aarch64_record;
1e2b521d 4818
4748a9be 4819 memset (&aarch64_record, 0, sizeof (aarch64_insn_decode_record));
1e2b521d
YQ
4820 aarch64_record.regcache = NULL;
4821 aarch64_record.this_addr = 0;
4822 aarch64_record.gdbarch = gdbarch;
4823
4824 /* 20 00 80 f9 prfm pldl1keep, [x1] */
4825 aarch64_record.aarch64_insn = 0xf9800020;
4826 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4827 SELF_CHECK (ret == AARCH64_RECORD_SUCCESS);
4828 SELF_CHECK (aarch64_record.reg_rec_count == 0);
4829 SELF_CHECK (aarch64_record.mem_rec_count == 0);
4830
4831 deallocate_reg_mem (&aarch64_record);
4832}
4833
4834} // namespace selftests
4835#endif /* GDB_SELF_TEST */
4836
99afc88b
OJ
4837/* Parse the current instruction and record the values of the registers and
4838 memory that will be changed in current instruction to record_arch_list
4839 return -1 if something is wrong. */
4840
4841int
4842aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
4843 CORE_ADDR insn_addr)
4844{
4845 uint32_t rec_no = 0;
4846 uint8_t insn_size = 4;
4847 uint32_t ret = 0;
99afc88b 4848 gdb_byte buf[insn_size];
4748a9be 4849 aarch64_insn_decode_record aarch64_record;
99afc88b
OJ
4850
4851 memset (&buf[0], 0, insn_size);
4748a9be 4852 memset (&aarch64_record, 0, sizeof (aarch64_insn_decode_record));
99afc88b
OJ
4853 target_read_memory (insn_addr, &buf[0], insn_size);
4854 aarch64_record.aarch64_insn
4855 = (uint32_t) extract_unsigned_integer (&buf[0],
4856 insn_size,
4857 gdbarch_byte_order (gdbarch));
4858 aarch64_record.regcache = regcache;
4859 aarch64_record.this_addr = insn_addr;
4860 aarch64_record.gdbarch = gdbarch;
4861
4862 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4863 if (ret == AARCH64_RECORD_UNSUPPORTED)
4864 {
6cb06a8c
TT
4865 gdb_printf (gdb_stderr,
4866 _("Process record does not support instruction "
4867 "0x%0x at address %s.\n"),
4868 aarch64_record.aarch64_insn,
4869 paddress (gdbarch, insn_addr));
99afc88b
OJ
4870 ret = -1;
4871 }
4872
4873 if (0 == ret)
4874 {
4875 /* Record registers. */
4876 record_full_arch_list_add_reg (aarch64_record.regcache,
4877 AARCH64_PC_REGNUM);
4878 /* Always record register CPSR. */
4879 record_full_arch_list_add_reg (aarch64_record.regcache,
4880 AARCH64_CPSR_REGNUM);
4881 if (aarch64_record.aarch64_regs)
4882 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
4883 if (record_full_arch_list_add_reg (aarch64_record.regcache,
4884 aarch64_record.aarch64_regs[rec_no]))
4885 ret = -1;
4886
4887 /* Record memories. */
4888 if (aarch64_record.aarch64_mems)
4889 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
4890 if (record_full_arch_list_add_mem
4891 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
4892 aarch64_record.aarch64_mems[rec_no].len))
4893 ret = -1;
4894
4895 if (record_full_arch_list_add_end ())
4896 ret = -1;
4897 }
4898
4899 deallocate_reg_mem (&aarch64_record);
4900 return ret;
4901}