]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/aarch64-tdep.c
sme: Enable SME registers and pseudo-registers
[thirdparty/binutils-gdb.git] / gdb / aarch64-tdep.c
CommitLineData
07b287a0
MS
1/* Common target dependent code for GDB on AArch64 systems.
2
213516ef 3 Copyright (C) 2009-2023 Free Software Foundation, Inc.
07b287a0
MS
4 Contributed by ARM Ltd.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21#include "defs.h"
22
23#include "frame.h"
83b6e1f1 24#include "language.h"
07b287a0
MS
25#include "gdbcmd.h"
26#include "gdbcore.h"
4de283e4 27#include "dis-asm.h"
d55e5aa6
TT
28#include "regcache.h"
29#include "reggroups.h"
4de283e4
TT
30#include "value.h"
31#include "arch-utils.h"
32#include "osabi.h"
33#include "frame-unwind.h"
34#include "frame-base.h"
d55e5aa6 35#include "trad-frame.h"
4de283e4
TT
36#include "objfiles.h"
37#include "dwarf2.h"
82ca8957 38#include "dwarf2/frame.h"
4de283e4
TT
39#include "gdbtypes.h"
40#include "prologue-value.h"
41#include "target-descriptions.h"
07b287a0 42#include "user-regs.h"
4de283e4 43#include "ax-gdb.h"
268a13a5 44#include "gdbsupport/selftest.h"
4de283e4
TT
45
46#include "aarch64-tdep.h"
47#include "aarch64-ravenscar-thread.h"
48
4de283e4
TT
49#include "record.h"
50#include "record-full.h"
51#include "arch/aarch64-insn.h"
0d12e84c 52#include "gdbarch.h"
4de283e4
TT
53
54#include "opcode/aarch64.h"
55#include <algorithm>
0ee6b1c5 56#include <unordered_map>
f77ee802 57
ef139898
LM
58/* For inferior_ptid and current_inferior (). */
59#include "inferior.h"
ca65640f
LM
60/* For std::sqrt and std::pow. */
61#include <cmath>
ef139898 62
ea92689a
AH
63/* A Homogeneous Floating-Point or Short-Vector Aggregate may have at most
64 four members. */
65#define HA_MAX_NUM_FLDS 4
66
95228a0d 67/* All possible aarch64 target descriptors. */
0ee6b1c5 68static std::unordered_map <aarch64_features, target_desc *> tdesc_aarch64_map;
95228a0d 69
ea2f6fad
TV
70/* The standard register names, and all the valid aliases for them.
71 We're not adding fp here, that name is already taken, see
72 _initialize_frame_reg. */
07b287a0
MS
73static const struct
74{
75 const char *const name;
76 int regnum;
77} aarch64_register_aliases[] =
78{
ea2f6fad 79 /* Link register alias for x30. */
07b287a0 80 {"lr", AARCH64_LR_REGNUM},
ea2f6fad
TV
81 /* SP is the canonical name for x31 according to aarch64_r_register_names,
82 so we're adding an x31 alias for sp. */
83 {"x31", AARCH64_SP_REGNUM},
07b287a0
MS
84 /* specials */
85 {"ip0", AARCH64_X0_REGNUM + 16},
86 {"ip1", AARCH64_X0_REGNUM + 17}
87};
88
89/* The required core 'R' registers. */
90static const char *const aarch64_r_register_names[] =
91{
92 /* These registers must appear in consecutive RAW register number
93 order and they must begin with AARCH64_X0_REGNUM! */
94 "x0", "x1", "x2", "x3",
95 "x4", "x5", "x6", "x7",
96 "x8", "x9", "x10", "x11",
97 "x12", "x13", "x14", "x15",
98 "x16", "x17", "x18", "x19",
99 "x20", "x21", "x22", "x23",
100 "x24", "x25", "x26", "x27",
101 "x28", "x29", "x30", "sp",
102 "pc", "cpsr"
103};
104
105/* The FP/SIMD 'V' registers. */
106static const char *const aarch64_v_register_names[] =
107{
108 /* These registers must appear in consecutive RAW register number
109 order and they must begin with AARCH64_V0_REGNUM! */
110 "v0", "v1", "v2", "v3",
111 "v4", "v5", "v6", "v7",
112 "v8", "v9", "v10", "v11",
113 "v12", "v13", "v14", "v15",
114 "v16", "v17", "v18", "v19",
115 "v20", "v21", "v22", "v23",
116 "v24", "v25", "v26", "v27",
117 "v28", "v29", "v30", "v31",
118 "fpsr",
119 "fpcr"
120};
121
739e8682
AH
122/* The SVE 'Z' and 'P' registers. */
123static const char *const aarch64_sve_register_names[] =
124{
125 /* These registers must appear in consecutive RAW register number
126 order and they must begin with AARCH64_SVE_Z0_REGNUM! */
127 "z0", "z1", "z2", "z3",
128 "z4", "z5", "z6", "z7",
129 "z8", "z9", "z10", "z11",
130 "z12", "z13", "z14", "z15",
131 "z16", "z17", "z18", "z19",
132 "z20", "z21", "z22", "z23",
133 "z24", "z25", "z26", "z27",
134 "z28", "z29", "z30", "z31",
135 "fpsr", "fpcr",
136 "p0", "p1", "p2", "p3",
137 "p4", "p5", "p6", "p7",
138 "p8", "p9", "p10", "p11",
139 "p12", "p13", "p14", "p15",
140 "ffr", "vg"
141};
142
76bed0fd
AH
143static const char *const aarch64_pauth_register_names[] =
144{
6d002087 145 /* Authentication mask for data pointer, low half/user pointers. */
76bed0fd 146 "pauth_dmask",
6d002087
LM
147 /* Authentication mask for code pointer, low half/user pointers. */
148 "pauth_cmask",
149 /* Authentication mask for data pointer, high half / kernel pointers. */
150 "pauth_dmask_high",
151 /* Authentication mask for code pointer, high half / kernel pointers. */
152 "pauth_cmask_high"
76bed0fd
AH
153};
154
5e984dbf
LM
155static const char *const aarch64_mte_register_names[] =
156{
157 /* Tag Control Register. */
158 "tag_ctl"
159};
160
29e09a42
TV
161static int aarch64_stack_frame_destroyed_p (struct gdbarch *, CORE_ADDR);
162
07b287a0
MS
163/* AArch64 prologue cache structure. */
164struct aarch64_prologue_cache
165{
db634143
PL
166 /* The program counter at the start of the function. It is used to
167 identify this frame as a prologue frame. */
168 CORE_ADDR func;
169
170 /* The program counter at the time this frame was created; i.e. where
171 this function was called from. It is used to identify this frame as a
172 stub frame. */
173 CORE_ADDR prev_pc;
174
07b287a0
MS
175 /* The stack pointer at the time this frame was created; i.e. the
176 caller's stack pointer when this function was called. It is used
177 to identify this frame. */
178 CORE_ADDR prev_sp;
179
7dfa3edc
PL
180 /* Is the target available to read from? */
181 int available_p;
182
07b287a0
MS
183 /* The frame base for this frame is just prev_sp - frame size.
184 FRAMESIZE is the distance from the frame pointer to the
185 initial stack pointer. */
186 int framesize;
187
188 /* The register used to hold the frame pointer for this frame. */
189 int framereg;
190
191 /* Saved register offsets. */
098caef4 192 trad_frame_saved_reg *saved_regs;
07b287a0
MS
193};
194
ca65640f
LM
195/* Holds information used to read/write from/to ZA
196 pseudo-registers.
197
198 With this information, the read/write code can be simplified so it
199 deals only with the required information to map a ZA pseudo-register
200 to the exact bytes into the ZA contents buffer. Otherwise we'd need
201 to use a lot of conditionals. */
202
203struct za_offsets
204{
205 /* Offset, into ZA, of the starting byte of the pseudo-register. */
206 size_t starting_offset;
207 /* The size of the contiguous chunks of the pseudo-register. */
208 size_t chunk_size;
209 /* The number of pseudo-register chunks contained in ZA. */
210 size_t chunks;
211 /* The offset between each contiguous chunk. */
212 size_t stride_size;
213};
214
215/* Holds data that is helpful to determine the individual fields that make
216 up the names of the ZA pseudo-registers. It is also very helpful to
217 determine offsets, stride and sizes for reading ZA tiles and tile
218 slices. */
219
220struct za_pseudo_encoding
221{
222 /* The slice index (0 ~ svl). Only used for tile slices. */
223 uint8_t slice_index;
224 /* The tile number (0 ~ 15). */
225 uint8_t tile_index;
226 /* Direction (horizontal/vertical). Only used for tile slices. */
227 bool horizontal;
228 /* Qualifier index (0 ~ 4). These map to B, H, S, D and Q. */
229 uint8_t qualifier_index;
230};
231
07b287a0
MS
232static void
233show_aarch64_debug (struct ui_file *file, int from_tty,
dda83cd7 234 struct cmd_list_element *c, const char *value)
07b287a0 235{
6cb06a8c 236 gdb_printf (file, _("AArch64 debugging is %s.\n"), value);
07b287a0
MS
237}
238
ffdbe864
YQ
239namespace {
240
4d9a9006
YQ
241/* Abstract instruction reader. */
242
243class abstract_instruction_reader
244{
245public:
246 /* Read in one instruction. */
247 virtual ULONGEST read (CORE_ADDR memaddr, int len,
248 enum bfd_endian byte_order) = 0;
249};
250
251/* Instruction reader from real target. */
252
253class instruction_reader : public abstract_instruction_reader
254{
255 public:
256 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
632e107b 257 override
4d9a9006 258 {
fc2f703e 259 return read_code_unsigned_integer (memaddr, len, byte_order);
4d9a9006
YQ
260 }
261};
262
ffdbe864
YQ
263} // namespace
264
3d31bc39
AH
265/* If address signing is enabled, mask off the signature bits from the link
266 register, which is passed by value in ADDR, using the register values in
267 THIS_FRAME. */
11e1b75f
AH
268
269static CORE_ADDR
345bd07c 270aarch64_frame_unmask_lr (aarch64_gdbarch_tdep *tdep,
bd2b40ac 271 frame_info_ptr this_frame, CORE_ADDR addr)
11e1b75f
AH
272{
273 if (tdep->has_pauth ()
274 && frame_unwind_register_unsigned (this_frame,
c9cd8ca4 275 tdep->ra_sign_state_regnum))
11e1b75f 276 {
6d002087
LM
277 /* VA range select (bit 55) tells us whether to use the low half masks
278 or the high half masks. */
279 int cmask_num;
280 if (tdep->pauth_reg_count > 2 && addr & VA_RANGE_SELECT_BIT_MASK)
281 cmask_num = AARCH64_PAUTH_CMASK_HIGH_REGNUM (tdep->pauth_reg_base);
282 else
283 cmask_num = AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base);
284
285 /* By default, we assume TBI and discard the top 8 bits plus the VA range
286 select bit (55). */
287 CORE_ADDR mask = AARCH64_TOP_BITS_MASK;
288 mask |= frame_unwind_register_unsigned (this_frame, cmask_num);
289 addr = aarch64_remove_top_bits (addr, mask);
3d31bc39
AH
290
291 /* Record in the frame that the link register required unmasking. */
292 set_frame_previous_pc_masked (this_frame);
11e1b75f
AH
293 }
294
295 return addr;
296}
297
aa7ca1bb
AH
298/* Implement the "get_pc_address_flags" gdbarch method. */
299
300static std::string
bd2b40ac 301aarch64_get_pc_address_flags (frame_info_ptr frame, CORE_ADDR pc)
aa7ca1bb
AH
302{
303 if (pc != 0 && get_frame_pc_masked (frame))
304 return "PAC";
305
306 return "";
307}
308
07b287a0
MS
309/* Analyze a prologue, looking for a recognizable stack frame
310 and frame pointer. Scan until we encounter a store that could
311 clobber the stack frame unexpectedly, or an unknown instruction. */
312
313static CORE_ADDR
314aarch64_analyze_prologue (struct gdbarch *gdbarch,
315 CORE_ADDR start, CORE_ADDR limit,
4d9a9006
YQ
316 struct aarch64_prologue_cache *cache,
317 abstract_instruction_reader& reader)
07b287a0
MS
318{
319 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
320 int i;
f8e3fe0d
LM
321
322 /* Whether the stack has been set. This should be true when we notice a SP
323 to FP move or if we are using the SP as the base register for storing
33b5899f 324 data, in case the FP is omitted. */
f8e3fe0d
LM
325 bool seen_stack_set = false;
326
187f5d00
YQ
327 /* Track X registers and D registers in prologue. */
328 pv_t regs[AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT];
07b287a0 329
187f5d00 330 for (i = 0; i < AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT; i++)
07b287a0 331 regs[i] = pv_register (i, 0);
f7b7ed97 332 pv_area stack (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
07b287a0
MS
333
334 for (; start < limit; start += 4)
335 {
336 uint32_t insn;
d9ebcbce 337 aarch64_inst inst;
07b287a0 338
4d9a9006 339 insn = reader.read (start, 4, byte_order_for_code);
07b287a0 340
561a72d4 341 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
d9ebcbce
YQ
342 break;
343
344 if (inst.opcode->iclass == addsub_imm
345 && (inst.opcode->op == OP_ADD
346 || strcmp ("sub", inst.opcode->name) == 0))
07b287a0 347 {
d9ebcbce
YQ
348 unsigned rd = inst.operands[0].reg.regno;
349 unsigned rn = inst.operands[1].reg.regno;
350
351 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
352 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
353 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
354 gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM);
355
356 if (inst.opcode->op == OP_ADD)
357 {
358 regs[rd] = pv_add_constant (regs[rn],
359 inst.operands[2].imm.value);
360 }
361 else
362 {
363 regs[rd] = pv_add_constant (regs[rn],
364 -inst.operands[2].imm.value);
365 }
f8e3fe0d
LM
366
367 /* Did we move SP to FP? */
368 if (rn == AARCH64_SP_REGNUM && rd == AARCH64_FP_REGNUM)
369 seen_stack_set = true;
d9ebcbce 370 }
60adf22c
TV
371 else if (inst.opcode->iclass == addsub_ext
372 && strcmp ("sub", inst.opcode->name) == 0)
373 {
374 unsigned rd = inst.operands[0].reg.regno;
375 unsigned rn = inst.operands[1].reg.regno;
376 unsigned rm = inst.operands[2].reg.regno;
377
378 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
379 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
380 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
381 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_EXT);
382
383 regs[rd] = pv_subtract (regs[rn], regs[rm]);
384 }
d9ebcbce 385 else if (inst.opcode->iclass == branch_imm)
07b287a0
MS
386 {
387 /* Stop analysis on branch. */
388 break;
389 }
d9ebcbce 390 else if (inst.opcode->iclass == condbranch)
07b287a0
MS
391 {
392 /* Stop analysis on branch. */
393 break;
394 }
d9ebcbce 395 else if (inst.opcode->iclass == branch_reg)
07b287a0
MS
396 {
397 /* Stop analysis on branch. */
398 break;
399 }
d9ebcbce 400 else if (inst.opcode->iclass == compbranch)
07b287a0
MS
401 {
402 /* Stop analysis on branch. */
403 break;
404 }
d9ebcbce
YQ
405 else if (inst.opcode->op == OP_MOVZ)
406 {
60adf22c
TV
407 unsigned rd = inst.operands[0].reg.regno;
408
409 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
d9ebcbce 410 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
60adf22c
TV
411 gdb_assert (inst.operands[1].type == AARCH64_OPND_HALF);
412 gdb_assert (inst.operands[1].shifter.kind == AARCH64_MOD_LSL);
f8e3fe0d
LM
413
414 /* If this shows up before we set the stack, keep going. Otherwise
415 stop the analysis. */
416 if (seen_stack_set)
417 break;
418
60adf22c
TV
419 regs[rd] = pv_constant (inst.operands[1].imm.value
420 << inst.operands[1].shifter.amount);
d9ebcbce
YQ
421 }
422 else if (inst.opcode->iclass == log_shift
423 && strcmp (inst.opcode->name, "orr") == 0)
07b287a0 424 {
d9ebcbce
YQ
425 unsigned rd = inst.operands[0].reg.regno;
426 unsigned rn = inst.operands[1].reg.regno;
427 unsigned rm = inst.operands[2].reg.regno;
428
429 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
430 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn);
431 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT);
432
433 if (inst.operands[2].shifter.amount == 0
434 && rn == AARCH64_SP_REGNUM)
07b287a0
MS
435 regs[rd] = regs[rm];
436 else
437 {
c6185dce
SM
438 aarch64_debug_printf ("prologue analysis gave up "
439 "addr=%s opcode=0x%x (orr x register)",
440 core_addr_to_string_nz (start), insn);
441
07b287a0
MS
442 break;
443 }
444 }
d9ebcbce 445 else if (inst.opcode->op == OP_STUR)
07b287a0 446 {
d9ebcbce
YQ
447 unsigned rt = inst.operands[0].reg.regno;
448 unsigned rn = inst.operands[1].addr.base_regno;
75faf5c4 449 int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
d9ebcbce
YQ
450
451 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
452 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
453 gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9);
454 gdb_assert (!inst.operands[1].addr.offset.is_reg);
455
75faf5c4
AH
456 stack.store
457 (pv_add_constant (regs[rn], inst.operands[1].addr.offset.imm),
458 size, regs[rt]);
f8e3fe0d
LM
459
460 /* Are we storing with SP as a base? */
461 if (rn == AARCH64_SP_REGNUM)
462 seen_stack_set = true;
07b287a0 463 }
d9ebcbce 464 else if ((inst.opcode->iclass == ldstpair_off
03bcd739
YQ
465 || (inst.opcode->iclass == ldstpair_indexed
466 && inst.operands[2].addr.preind))
d9ebcbce 467 && strcmp ("stp", inst.opcode->name) == 0)
07b287a0 468 {
03bcd739 469 /* STP with addressing mode Pre-indexed and Base register. */
187f5d00
YQ
470 unsigned rt1;
471 unsigned rt2;
d9ebcbce
YQ
472 unsigned rn = inst.operands[2].addr.base_regno;
473 int32_t imm = inst.operands[2].addr.offset.imm;
75faf5c4 474 int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
d9ebcbce 475
187f5d00
YQ
476 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
477 || inst.operands[0].type == AARCH64_OPND_Ft);
478 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2
479 || inst.operands[1].type == AARCH64_OPND_Ft2);
d9ebcbce
YQ
480 gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7);
481 gdb_assert (!inst.operands[2].addr.offset.is_reg);
482
07b287a0
MS
483 /* If recording this store would invalidate the store area
484 (perhaps because rn is not known) then we should abandon
485 further prologue analysis. */
f7b7ed97 486 if (stack.store_would_trash (pv_add_constant (regs[rn], imm)))
07b287a0
MS
487 break;
488
f7b7ed97 489 if (stack.store_would_trash (pv_add_constant (regs[rn], imm + 8)))
07b287a0
MS
490 break;
491
187f5d00
YQ
492 rt1 = inst.operands[0].reg.regno;
493 rt2 = inst.operands[1].reg.regno;
494 if (inst.operands[0].type == AARCH64_OPND_Ft)
495 {
187f5d00
YQ
496 rt1 += AARCH64_X_REGISTER_COUNT;
497 rt2 += AARCH64_X_REGISTER_COUNT;
498 }
499
75faf5c4
AH
500 stack.store (pv_add_constant (regs[rn], imm), size, regs[rt1]);
501 stack.store (pv_add_constant (regs[rn], imm + size), size, regs[rt2]);
14ac654f 502
d9ebcbce 503 if (inst.operands[2].addr.writeback)
93d96012 504 regs[rn] = pv_add_constant (regs[rn], imm);
07b287a0 505
f8e3fe0d
LM
506 /* Ignore the instruction that allocates stack space and sets
507 the SP. */
508 if (rn == AARCH64_SP_REGNUM && !inst.operands[2].addr.writeback)
509 seen_stack_set = true;
07b287a0 510 }
432ec081
YQ
511 else if ((inst.opcode->iclass == ldst_imm9 /* Signed immediate. */
512 || (inst.opcode->iclass == ldst_pos /* Unsigned immediate. */
513 && (inst.opcode->op == OP_STR_POS
514 || inst.opcode->op == OP_STRF_POS)))
515 && inst.operands[1].addr.base_regno == AARCH64_SP_REGNUM
516 && strcmp ("str", inst.opcode->name) == 0)
517 {
518 /* STR (immediate) */
519 unsigned int rt = inst.operands[0].reg.regno;
520 int32_t imm = inst.operands[1].addr.offset.imm;
521 unsigned int rn = inst.operands[1].addr.base_regno;
75faf5c4 522 int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
432ec081
YQ
523 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
524 || inst.operands[0].type == AARCH64_OPND_Ft);
525
526 if (inst.operands[0].type == AARCH64_OPND_Ft)
75faf5c4 527 rt += AARCH64_X_REGISTER_COUNT;
432ec081 528
75faf5c4 529 stack.store (pv_add_constant (regs[rn], imm), size, regs[rt]);
432ec081
YQ
530 if (inst.operands[1].addr.writeback)
531 regs[rn] = pv_add_constant (regs[rn], imm);
f8e3fe0d
LM
532
533 /* Are we storing with SP as a base? */
534 if (rn == AARCH64_SP_REGNUM)
535 seen_stack_set = true;
432ec081 536 }
d9ebcbce 537 else if (inst.opcode->iclass == testbranch)
07b287a0
MS
538 {
539 /* Stop analysis on branch. */
540 break;
541 }
17e116a7
AH
542 else if (inst.opcode->iclass == ic_system)
543 {
345bd07c 544 aarch64_gdbarch_tdep *tdep
08106042 545 = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
17e116a7
AH
546 int ra_state_val = 0;
547
548 if (insn == 0xd503233f /* paciasp. */
549 || insn == 0xd503237f /* pacibsp. */)
550 {
551 /* Return addresses are mangled. */
552 ra_state_val = 1;
553 }
554 else if (insn == 0xd50323bf /* autiasp. */
555 || insn == 0xd50323ff /* autibsp. */)
556 {
557 /* Return addresses are not mangled. */
558 ra_state_val = 0;
559 }
37989733
LM
560 else if (IS_BTI (insn))
561 /* We don't need to do anything special for a BTI instruction. */
562 continue;
17e116a7
AH
563 else
564 {
c6185dce
SM
565 aarch64_debug_printf ("prologue analysis gave up addr=%s"
566 " opcode=0x%x (iclass)",
567 core_addr_to_string_nz (start), insn);
17e116a7
AH
568 break;
569 }
570
571 if (tdep->has_pauth () && cache != nullptr)
a9a87d35 572 {
c9cd8ca4 573 int regnum = tdep->ra_sign_state_regnum;
a9a87d35
LM
574 cache->saved_regs[regnum].set_value (ra_state_val);
575 }
17e116a7 576 }
07b287a0
MS
577 else
578 {
c6185dce
SM
579 aarch64_debug_printf ("prologue analysis gave up addr=%s"
580 " opcode=0x%x",
581 core_addr_to_string_nz (start), insn);
582
07b287a0
MS
583 break;
584 }
585 }
586
587 if (cache == NULL)
f7b7ed97 588 return start;
07b287a0
MS
589
590 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
591 {
592 /* Frame pointer is fp. Frame size is constant. */
593 cache->framereg = AARCH64_FP_REGNUM;
594 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
595 }
596 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
597 {
598 /* Try the stack pointer. */
599 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
600 cache->framereg = AARCH64_SP_REGNUM;
601 }
602 else
603 {
604 /* We're just out of luck. We don't know where the frame is. */
605 cache->framereg = -1;
606 cache->framesize = 0;
607 }
608
609 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
610 {
611 CORE_ADDR offset;
612
f7b7ed97 613 if (stack.find_reg (gdbarch, i, &offset))
098caef4 614 cache->saved_regs[i].set_addr (offset);
07b287a0
MS
615 }
616
187f5d00
YQ
617 for (i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
618 {
619 int regnum = gdbarch_num_regs (gdbarch);
620 CORE_ADDR offset;
621
f7b7ed97
TT
622 if (stack.find_reg (gdbarch, i + AARCH64_X_REGISTER_COUNT,
623 &offset))
098caef4 624 cache->saved_regs[i + regnum + AARCH64_D0_REGNUM].set_addr (offset);
187f5d00
YQ
625 }
626
07b287a0
MS
627 return start;
628}
629
4d9a9006
YQ
630static CORE_ADDR
631aarch64_analyze_prologue (struct gdbarch *gdbarch,
632 CORE_ADDR start, CORE_ADDR limit,
633 struct aarch64_prologue_cache *cache)
634{
635 instruction_reader reader;
636
637 return aarch64_analyze_prologue (gdbarch, start, limit, cache,
638 reader);
639}
640
641#if GDB_SELF_TEST
642
643namespace selftests {
644
645/* Instruction reader from manually cooked instruction sequences. */
646
647class instruction_reader_test : public abstract_instruction_reader
648{
649public:
650 template<size_t SIZE>
651 explicit instruction_reader_test (const uint32_t (&insns)[SIZE])
652 : m_insns (insns), m_insns_size (SIZE)
653 {}
654
655 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
632e107b 656 override
4d9a9006
YQ
657 {
658 SELF_CHECK (len == 4);
659 SELF_CHECK (memaddr % 4 == 0);
660 SELF_CHECK (memaddr / 4 < m_insns_size);
661
662 return m_insns[memaddr / 4];
663 }
664
665private:
666 const uint32_t *m_insns;
667 size_t m_insns_size;
668};
669
670static void
671aarch64_analyze_prologue_test (void)
672{
673 struct gdbarch_info info;
674
4d9a9006
YQ
675 info.bfd_arch_info = bfd_scan_arch ("aarch64");
676
677 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
678 SELF_CHECK (gdbarch != NULL);
679
17e116a7
AH
680 struct aarch64_prologue_cache cache;
681 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
682
08106042 683 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
17e116a7 684
4d9a9006
YQ
685 /* Test the simple prologue in which frame pointer is used. */
686 {
4d9a9006
YQ
687 static const uint32_t insns[] = {
688 0xa9af7bfd, /* stp x29, x30, [sp,#-272]! */
689 0x910003fd, /* mov x29, sp */
690 0x97ffffe6, /* bl 0x400580 */
691 };
692 instruction_reader_test reader (insns);
693
694 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
695 SELF_CHECK (end == 4 * 2);
696
697 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
698 SELF_CHECK (cache.framesize == 272);
699
700 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
701 {
702 if (i == AARCH64_FP_REGNUM)
098caef4 703 SELF_CHECK (cache.saved_regs[i].addr () == -272);
4d9a9006 704 else if (i == AARCH64_LR_REGNUM)
098caef4 705 SELF_CHECK (cache.saved_regs[i].addr () == -264);
4d9a9006 706 else
a9a87d35
LM
707 SELF_CHECK (cache.saved_regs[i].is_realreg ()
708 && cache.saved_regs[i].realreg () == i);
4d9a9006
YQ
709 }
710
711 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
712 {
a9a87d35
LM
713 int num_regs = gdbarch_num_regs (gdbarch);
714 int regnum = i + num_regs + AARCH64_D0_REGNUM;
4d9a9006 715
a9a87d35
LM
716 SELF_CHECK (cache.saved_regs[regnum].is_realreg ()
717 && cache.saved_regs[regnum].realreg () == regnum);
4d9a9006
YQ
718 }
719 }
432ec081
YQ
720
721 /* Test a prologue in which STR is used and frame pointer is not
722 used. */
723 {
432ec081
YQ
724 static const uint32_t insns[] = {
725 0xf81d0ff3, /* str x19, [sp, #-48]! */
726 0xb9002fe0, /* str w0, [sp, #44] */
727 0xf90013e1, /* str x1, [sp, #32]*/
728 0xfd000fe0, /* str d0, [sp, #24] */
729 0xaa0203f3, /* mov x19, x2 */
730 0xf94013e0, /* ldr x0, [sp, #32] */
731 };
732 instruction_reader_test reader (insns);
733
68811f8f 734 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
432ec081
YQ
735 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
736
737 SELF_CHECK (end == 4 * 5);
738
739 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
740 SELF_CHECK (cache.framesize == 48);
741
742 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
743 {
744 if (i == 1)
098caef4 745 SELF_CHECK (cache.saved_regs[i].addr () == -16);
432ec081 746 else if (i == 19)
098caef4 747 SELF_CHECK (cache.saved_regs[i].addr () == -48);
432ec081 748 else
a9a87d35
LM
749 SELF_CHECK (cache.saved_regs[i].is_realreg ()
750 && cache.saved_regs[i].realreg () == i);
432ec081
YQ
751 }
752
753 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
754 {
a9a87d35
LM
755 int num_regs = gdbarch_num_regs (gdbarch);
756 int regnum = i + num_regs + AARCH64_D0_REGNUM;
757
432ec081
YQ
758
759 if (i == 0)
a9a87d35 760 SELF_CHECK (cache.saved_regs[regnum].addr () == -24);
432ec081 761 else
a9a87d35
LM
762 SELF_CHECK (cache.saved_regs[regnum].is_realreg ()
763 && cache.saved_regs[regnum].realreg () == regnum);
432ec081
YQ
764 }
765 }
17e116a7 766
f8e3fe0d
LM
767 /* Test handling of movz before setting the frame pointer. */
768 {
769 static const uint32_t insns[] = {
770 0xa9bf7bfd, /* stp x29, x30, [sp, #-16]! */
771 0x52800020, /* mov w0, #0x1 */
772 0x910003fd, /* mov x29, sp */
773 0x528000a2, /* mov w2, #0x5 */
774 0x97fffff8, /* bl 6e4 */
775 };
776
777 instruction_reader_test reader (insns);
778
779 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
780 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
781
782 /* We should stop at the 4th instruction. */
783 SELF_CHECK (end == (4 - 1) * 4);
784 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
785 SELF_CHECK (cache.framesize == 16);
786 }
787
788 /* Test handling of movz/stp when using the stack pointer as frame
789 pointer. */
790 {
791 static const uint32_t insns[] = {
792 0xa9bc7bfd, /* stp x29, x30, [sp, #-64]! */
793 0x52800020, /* mov w0, #0x1 */
794 0x290207e0, /* stp w0, w1, [sp, #16] */
795 0xa9018fe2, /* stp x2, x3, [sp, #24] */
796 0x528000a2, /* mov w2, #0x5 */
797 0x97fffff8, /* bl 6e4 */
798 };
799
800 instruction_reader_test reader (insns);
801
802 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
803 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
804
805 /* We should stop at the 5th instruction. */
806 SELF_CHECK (end == (5 - 1) * 4);
807 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
808 SELF_CHECK (cache.framesize == 64);
809 }
810
811 /* Test handling of movz/str when using the stack pointer as frame
812 pointer */
813 {
814 static const uint32_t insns[] = {
815 0xa9bc7bfd, /* stp x29, x30, [sp, #-64]! */
816 0x52800020, /* mov w0, #0x1 */
817 0xb9002be4, /* str w4, [sp, #40] */
818 0xf9001be5, /* str x5, [sp, #48] */
819 0x528000a2, /* mov w2, #0x5 */
820 0x97fffff8, /* bl 6e4 */
821 };
822
823 instruction_reader_test reader (insns);
824
825 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
826 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
827
828 /* We should stop at the 5th instruction. */
829 SELF_CHECK (end == (5 - 1) * 4);
830 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
831 SELF_CHECK (cache.framesize == 64);
832 }
833
834 /* Test handling of movz/stur when using the stack pointer as frame
835 pointer. */
836 {
837 static const uint32_t insns[] = {
838 0xa9bc7bfd, /* stp x29, x30, [sp, #-64]! */
839 0x52800020, /* mov w0, #0x1 */
840 0xb80343e6, /* stur w6, [sp, #52] */
841 0xf80383e7, /* stur x7, [sp, #56] */
842 0x528000a2, /* mov w2, #0x5 */
843 0x97fffff8, /* bl 6e4 */
844 };
845
846 instruction_reader_test reader (insns);
847
848 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
849 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
850
851 /* We should stop at the 5th instruction. */
852 SELF_CHECK (end == (5 - 1) * 4);
853 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
854 SELF_CHECK (cache.framesize == 64);
855 }
856
857 /* Test handling of movz when there is no frame pointer set or no stack
858 pointer used. */
859 {
860 static const uint32_t insns[] = {
861 0xa9bf7bfd, /* stp x29, x30, [sp, #-16]! */
862 0x52800020, /* mov w0, #0x1 */
863 0x528000a2, /* mov w2, #0x5 */
864 0x97fffff8, /* bl 6e4 */
865 };
866
867 instruction_reader_test reader (insns);
868
869 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
870 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
871
872 /* We should stop at the 4th instruction. */
873 SELF_CHECK (end == (4 - 1) * 4);
874 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
875 SELF_CHECK (cache.framesize == 16);
876 }
877
17e116a7
AH
878 /* Test a prologue in which there is a return address signing instruction. */
879 if (tdep->has_pauth ())
880 {
881 static const uint32_t insns[] = {
882 0xd503233f, /* paciasp */
883 0xa9bd7bfd, /* stp x29, x30, [sp, #-48]! */
884 0x910003fd, /* mov x29, sp */
885 0xf801c3f3, /* str x19, [sp, #28] */
886 0xb9401fa0, /* ldr x19, [x29, #28] */
887 };
888 instruction_reader_test reader (insns);
889
68811f8f 890 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
17e116a7
AH
891 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache,
892 reader);
893
894 SELF_CHECK (end == 4 * 4);
895 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
896 SELF_CHECK (cache.framesize == 48);
897
898 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
899 {
900 if (i == 19)
098caef4 901 SELF_CHECK (cache.saved_regs[i].addr () == -20);
17e116a7 902 else if (i == AARCH64_FP_REGNUM)
098caef4 903 SELF_CHECK (cache.saved_regs[i].addr () == -48);
17e116a7 904 else if (i == AARCH64_LR_REGNUM)
098caef4 905 SELF_CHECK (cache.saved_regs[i].addr () == -40);
17e116a7 906 else
a9a87d35
LM
907 SELF_CHECK (cache.saved_regs[i].is_realreg ()
908 && cache.saved_regs[i].realreg () == i);
17e116a7
AH
909 }
910
911 if (tdep->has_pauth ())
912 {
c9cd8ca4 913 int regnum = tdep->ra_sign_state_regnum;
a9a87d35 914 SELF_CHECK (cache.saved_regs[regnum].is_value ());
17e116a7
AH
915 }
916 }
37989733
LM
917
918 /* Test a prologue with a BTI instruction. */
919 {
920 static const uint32_t insns[] = {
921 0xd503245f, /* bti */
922 0xa9bd7bfd, /* stp x29, x30, [sp, #-48]! */
923 0x910003fd, /* mov x29, sp */
924 0xf801c3f3, /* str x19, [sp, #28] */
925 0xb9401fa0, /* ldr x19, [x29, #28] */
926 };
927 instruction_reader_test reader (insns);
928
929 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
930 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache,
931 reader);
932
933 SELF_CHECK (end == 4 * 4);
934 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
935 SELF_CHECK (cache.framesize == 48);
936
937 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
938 {
939 if (i == 19)
940 SELF_CHECK (cache.saved_regs[i].addr () == -20);
941 else if (i == AARCH64_FP_REGNUM)
942 SELF_CHECK (cache.saved_regs[i].addr () == -48);
943 else if (i == AARCH64_LR_REGNUM)
944 SELF_CHECK (cache.saved_regs[i].addr () == -40);
945 else
946 SELF_CHECK (cache.saved_regs[i].is_realreg ()
947 && cache.saved_regs[i].realreg () == i);
948 }
949 }
4d9a9006
YQ
950}
951} // namespace selftests
952#endif /* GDB_SELF_TEST */
953
07b287a0
MS
954/* Implement the "skip_prologue" gdbarch method. */
955
956static CORE_ADDR
957aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
958{
22f2cf64 959 CORE_ADDR func_addr, func_end_addr, limit_pc;
07b287a0
MS
960
961 /* See if we can determine the end of the prologue via the symbol
962 table. If so, then return either PC, or the PC after the
963 prologue, whichever is greater. */
22f2cf64
TV
964 bool func_addr_found
965 = find_pc_partial_function (pc, NULL, &func_addr, &func_end_addr);
966
967 if (func_addr_found)
07b287a0
MS
968 {
969 CORE_ADDR post_prologue_pc
970 = skip_prologue_using_sal (gdbarch, func_addr);
971
972 if (post_prologue_pc != 0)
325fac50 973 return std::max (pc, post_prologue_pc);
07b287a0
MS
974 }
975
976 /* Can't determine prologue from the symbol table, need to examine
977 instructions. */
978
979 /* Find an upper limit on the function prologue using the debug
980 information. If the debug information could not be used to
981 provide that bound, then use an arbitrary large number as the
982 upper bound. */
983 limit_pc = skip_prologue_using_sal (gdbarch, pc);
984 if (limit_pc == 0)
985 limit_pc = pc + 128; /* Magic. */
986
22f2cf64 987 limit_pc
05d63baf 988 = func_end_addr == 0 ? limit_pc : std::min (limit_pc, func_end_addr - 4);
22f2cf64 989
07b287a0
MS
990 /* Try disassembling prologue. */
991 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
992}
993
994/* Scan the function prologue for THIS_FRAME and populate the prologue
995 cache CACHE. */
996
997static void
bd2b40ac 998aarch64_scan_prologue (frame_info_ptr this_frame,
07b287a0
MS
999 struct aarch64_prologue_cache *cache)
1000{
1001 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
1002 CORE_ADDR prologue_start;
1003 CORE_ADDR prologue_end;
1004 CORE_ADDR prev_pc = get_frame_pc (this_frame);
1005 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1006
db634143
PL
1007 cache->prev_pc = prev_pc;
1008
07b287a0
MS
1009 /* Assume we do not find a frame. */
1010 cache->framereg = -1;
1011 cache->framesize = 0;
1012
1013 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
1014 &prologue_end))
1015 {
1016 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
1017
1018 if (sal.line == 0)
1019 {
1020 /* No line info so use the current PC. */
1021 prologue_end = prev_pc;
1022 }
1023 else if (sal.end < prologue_end)
1024 {
1025 /* The next line begins after the function end. */
1026 prologue_end = sal.end;
1027 }
1028
325fac50 1029 prologue_end = std::min (prologue_end, prev_pc);
07b287a0
MS
1030 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
1031 }
1032 else
1033 {
1034 CORE_ADDR frame_loc;
07b287a0
MS
1035
1036 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
1037 if (frame_loc == 0)
1038 return;
1039
1040 cache->framereg = AARCH64_FP_REGNUM;
1041 cache->framesize = 16;
098caef4
LM
1042 cache->saved_regs[29].set_addr (0);
1043 cache->saved_regs[30].set_addr (8);
07b287a0
MS
1044 }
1045}
1046
7dfa3edc
PL
1047/* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
1048 function may throw an exception if the inferior's registers or memory is
1049 not available. */
07b287a0 1050
7dfa3edc 1051static void
bd2b40ac 1052aarch64_make_prologue_cache_1 (frame_info_ptr this_frame,
7dfa3edc 1053 struct aarch64_prologue_cache *cache)
07b287a0 1054{
07b287a0
MS
1055 CORE_ADDR unwound_fp;
1056 int reg;
1057
07b287a0
MS
1058 aarch64_scan_prologue (this_frame, cache);
1059
1060 if (cache->framereg == -1)
7dfa3edc 1061 return;
07b287a0
MS
1062
1063 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
1064 if (unwound_fp == 0)
7dfa3edc 1065 return;
07b287a0 1066
29e09a42
TV
1067 cache->prev_sp = unwound_fp;
1068 if (!aarch64_stack_frame_destroyed_p (get_frame_arch (this_frame),
1069 cache->prev_pc))
1070 cache->prev_sp += cache->framesize;
07b287a0
MS
1071
1072 /* Calculate actual addresses of saved registers using offsets
1073 determined by aarch64_analyze_prologue. */
1074 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
a9a87d35 1075 if (cache->saved_regs[reg].is_addr ())
098caef4
LM
1076 cache->saved_regs[reg].set_addr (cache->saved_regs[reg].addr ()
1077 + cache->prev_sp);
07b287a0 1078
db634143
PL
1079 cache->func = get_frame_func (this_frame);
1080
7dfa3edc
PL
1081 cache->available_p = 1;
1082}
1083
1084/* Allocate and fill in *THIS_CACHE with information about the prologue of
1085 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
1086 Return a pointer to the current aarch64_prologue_cache in
1087 *THIS_CACHE. */
1088
1089static struct aarch64_prologue_cache *
bd2b40ac 1090aarch64_make_prologue_cache (frame_info_ptr this_frame, void **this_cache)
7dfa3edc
PL
1091{
1092 struct aarch64_prologue_cache *cache;
1093
1094 if (*this_cache != NULL)
9a3c8263 1095 return (struct aarch64_prologue_cache *) *this_cache;
7dfa3edc
PL
1096
1097 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
1098 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
1099 *this_cache = cache;
1100
a70b8144 1101 try
7dfa3edc
PL
1102 {
1103 aarch64_make_prologue_cache_1 (this_frame, cache);
1104 }
230d2906 1105 catch (const gdb_exception_error &ex)
7dfa3edc
PL
1106 {
1107 if (ex.error != NOT_AVAILABLE_ERROR)
eedc3f4f 1108 throw;
7dfa3edc 1109 }
7dfa3edc 1110
07b287a0
MS
1111 return cache;
1112}
1113
7dfa3edc
PL
1114/* Implement the "stop_reason" frame_unwind method. */
1115
1116static enum unwind_stop_reason
bd2b40ac 1117aarch64_prologue_frame_unwind_stop_reason (frame_info_ptr this_frame,
7dfa3edc
PL
1118 void **this_cache)
1119{
1120 struct aarch64_prologue_cache *cache
1121 = aarch64_make_prologue_cache (this_frame, this_cache);
1122
1123 if (!cache->available_p)
1124 return UNWIND_UNAVAILABLE;
1125
1126 /* Halt the backtrace at "_start". */
345bd07c 1127 gdbarch *arch = get_frame_arch (this_frame);
08106042 1128 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (arch);
345bd07c 1129 if (cache->prev_pc <= tdep->lowest_pc)
7dfa3edc
PL
1130 return UNWIND_OUTERMOST;
1131
1132 /* We've hit a wall, stop. */
1133 if (cache->prev_sp == 0)
1134 return UNWIND_OUTERMOST;
1135
1136 return UNWIND_NO_REASON;
1137}
1138
07b287a0
MS
1139/* Our frame ID for a normal frame is the current function's starting
1140 PC and the caller's SP when we were called. */
1141
1142static void
bd2b40ac 1143aarch64_prologue_this_id (frame_info_ptr this_frame,
07b287a0
MS
1144 void **this_cache, struct frame_id *this_id)
1145{
7c8edfae
PL
1146 struct aarch64_prologue_cache *cache
1147 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0 1148
7dfa3edc
PL
1149 if (!cache->available_p)
1150 *this_id = frame_id_build_unavailable_stack (cache->func);
1151 else
1152 *this_id = frame_id_build (cache->prev_sp, cache->func);
07b287a0
MS
1153}
1154
1155/* Implement the "prev_register" frame_unwind method. */
1156
1157static struct value *
bd2b40ac 1158aarch64_prologue_prev_register (frame_info_ptr this_frame,
07b287a0
MS
1159 void **this_cache, int prev_regnum)
1160{
7c8edfae
PL
1161 struct aarch64_prologue_cache *cache
1162 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0
MS
1163
1164 /* If we are asked to unwind the PC, then we need to return the LR
1165 instead. The prologue may save PC, but it will point into this
1166 frame's prologue, not the next frame's resume location. */
1167 if (prev_regnum == AARCH64_PC_REGNUM)
1168 {
1169 CORE_ADDR lr;
17e116a7 1170 struct gdbarch *gdbarch = get_frame_arch (this_frame);
345bd07c 1171 aarch64_gdbarch_tdep *tdep
08106042 1172 = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
07b287a0
MS
1173
1174 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
17e116a7
AH
1175
1176 if (tdep->has_pauth ()
c9cd8ca4 1177 && cache->saved_regs[tdep->ra_sign_state_regnum].is_value ())
3d31bc39 1178 lr = aarch64_frame_unmask_lr (tdep, this_frame, lr);
17e116a7 1179
07b287a0
MS
1180 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
1181 }
1182
1183 /* SP is generally not saved to the stack, but this frame is
1184 identified by the next frame's stack pointer at the time of the
1185 call. The value was already reconstructed into PREV_SP. */
1186 /*
dda83cd7
SM
1187 +----------+ ^
1188 | saved lr | |
07b287a0
MS
1189 +->| saved fp |--+
1190 | | |
1191 | | | <- Previous SP
1192 | +----------+
1193 | | saved lr |
1194 +--| saved fp |<- FP
dda83cd7
SM
1195 | |
1196 | |<- SP
1197 +----------+ */
07b287a0
MS
1198 if (prev_regnum == AARCH64_SP_REGNUM)
1199 return frame_unwind_got_constant (this_frame, prev_regnum,
1200 cache->prev_sp);
1201
1202 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
1203 prev_regnum);
1204}
1205
1206/* AArch64 prologue unwinder. */
6bd434d6 1207static frame_unwind aarch64_prologue_unwind =
07b287a0 1208{
a154d838 1209 "aarch64 prologue",
07b287a0 1210 NORMAL_FRAME,
7dfa3edc 1211 aarch64_prologue_frame_unwind_stop_reason,
07b287a0
MS
1212 aarch64_prologue_this_id,
1213 aarch64_prologue_prev_register,
1214 NULL,
1215 default_frame_sniffer
1216};
1217
8b61f75d
PL
1218/* Allocate and fill in *THIS_CACHE with information about the prologue of
1219 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
1220 Return a pointer to the current aarch64_prologue_cache in
1221 *THIS_CACHE. */
07b287a0
MS
1222
1223static struct aarch64_prologue_cache *
bd2b40ac 1224aarch64_make_stub_cache (frame_info_ptr this_frame, void **this_cache)
07b287a0 1225{
07b287a0 1226 struct aarch64_prologue_cache *cache;
8b61f75d
PL
1227
1228 if (*this_cache != NULL)
9a3c8263 1229 return (struct aarch64_prologue_cache *) *this_cache;
07b287a0
MS
1230
1231 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
1232 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
8b61f75d 1233 *this_cache = cache;
07b287a0 1234
a70b8144 1235 try
02a2a705
PL
1236 {
1237 cache->prev_sp = get_frame_register_unsigned (this_frame,
1238 AARCH64_SP_REGNUM);
1239 cache->prev_pc = get_frame_pc (this_frame);
1240 cache->available_p = 1;
1241 }
230d2906 1242 catch (const gdb_exception_error &ex)
02a2a705
PL
1243 {
1244 if (ex.error != NOT_AVAILABLE_ERROR)
eedc3f4f 1245 throw;
02a2a705 1246 }
07b287a0
MS
1247
1248 return cache;
1249}
1250
02a2a705
PL
1251/* Implement the "stop_reason" frame_unwind method. */
1252
1253static enum unwind_stop_reason
bd2b40ac 1254aarch64_stub_frame_unwind_stop_reason (frame_info_ptr this_frame,
02a2a705
PL
1255 void **this_cache)
1256{
1257 struct aarch64_prologue_cache *cache
1258 = aarch64_make_stub_cache (this_frame, this_cache);
1259
1260 if (!cache->available_p)
1261 return UNWIND_UNAVAILABLE;
1262
1263 return UNWIND_NO_REASON;
1264}
1265
07b287a0
MS
1266/* Our frame ID for a stub frame is the current SP and LR. */
1267
1268static void
bd2b40ac 1269aarch64_stub_this_id (frame_info_ptr this_frame,
07b287a0
MS
1270 void **this_cache, struct frame_id *this_id)
1271{
8b61f75d
PL
1272 struct aarch64_prologue_cache *cache
1273 = aarch64_make_stub_cache (this_frame, this_cache);
07b287a0 1274
02a2a705
PL
1275 if (cache->available_p)
1276 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
1277 else
1278 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
07b287a0
MS
1279}
1280
1281/* Implement the "sniffer" frame_unwind method. */
1282
1283static int
1284aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
bd2b40ac 1285 frame_info_ptr this_frame,
07b287a0
MS
1286 void **this_prologue_cache)
1287{
1288 CORE_ADDR addr_in_block;
1289 gdb_byte dummy[4];
1290
1291 addr_in_block = get_frame_address_in_block (this_frame);
3e5d3a5a 1292 if (in_plt_section (addr_in_block)
07b287a0
MS
1293 /* We also use the stub winder if the target memory is unreadable
1294 to avoid having the prologue unwinder trying to read it. */
1295 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
1296 return 1;
1297
1298 return 0;
1299}
1300
1301/* AArch64 stub unwinder. */
6bd434d6 1302static frame_unwind aarch64_stub_unwind =
07b287a0 1303{
a154d838 1304 "aarch64 stub",
07b287a0 1305 NORMAL_FRAME,
02a2a705 1306 aarch64_stub_frame_unwind_stop_reason,
07b287a0
MS
1307 aarch64_stub_this_id,
1308 aarch64_prologue_prev_register,
1309 NULL,
1310 aarch64_stub_unwind_sniffer
1311};
1312
1313/* Return the frame base address of *THIS_FRAME. */
1314
1315static CORE_ADDR
bd2b40ac 1316aarch64_normal_frame_base (frame_info_ptr this_frame, void **this_cache)
07b287a0 1317{
7c8edfae
PL
1318 struct aarch64_prologue_cache *cache
1319 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0
MS
1320
1321 return cache->prev_sp - cache->framesize;
1322}
1323
1324/* AArch64 default frame base information. */
6bd434d6 1325static frame_base aarch64_normal_base =
07b287a0
MS
1326{
1327 &aarch64_prologue_unwind,
1328 aarch64_normal_frame_base,
1329 aarch64_normal_frame_base,
1330 aarch64_normal_frame_base
1331};
1332
07b287a0
MS
1333/* Return the value of the REGNUM register in the previous frame of
1334 *THIS_FRAME. */
1335
1336static struct value *
bd2b40ac 1337aarch64_dwarf2_prev_register (frame_info_ptr this_frame,
07b287a0
MS
1338 void **this_cache, int regnum)
1339{
345bd07c 1340 gdbarch *arch = get_frame_arch (this_frame);
08106042 1341 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (arch);
07b287a0
MS
1342 CORE_ADDR lr;
1343
1344 switch (regnum)
1345 {
1346 case AARCH64_PC_REGNUM:
1347 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
3d31bc39 1348 lr = aarch64_frame_unmask_lr (tdep, this_frame, lr);
07b287a0
MS
1349 return frame_unwind_got_constant (this_frame, regnum, lr);
1350
1351 default:
f34652de 1352 internal_error (_("Unexpected register %d"), regnum);
07b287a0
MS
1353 }
1354}
1355
11e1b75f
AH
1356static const unsigned char op_lit0 = DW_OP_lit0;
1357static const unsigned char op_lit1 = DW_OP_lit1;
1358
07b287a0
MS
1359/* Implement the "init_reg" dwarf2_frame_ops method. */
1360
1361static void
1362aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1363 struct dwarf2_frame_state_reg *reg,
bd2b40ac 1364 frame_info_ptr this_frame)
07b287a0 1365{
08106042 1366 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
11e1b75f 1367
07b287a0
MS
1368 switch (regnum)
1369 {
1370 case AARCH64_PC_REGNUM:
1371 reg->how = DWARF2_FRAME_REG_FN;
1372 reg->loc.fn = aarch64_dwarf2_prev_register;
11e1b75f
AH
1373 return;
1374
07b287a0
MS
1375 case AARCH64_SP_REGNUM:
1376 reg->how = DWARF2_FRAME_REG_CFA;
11e1b75f
AH
1377 return;
1378 }
1379
1380 /* Init pauth registers. */
1381 if (tdep->has_pauth ())
1382 {
c9cd8ca4 1383 if (regnum == tdep->ra_sign_state_regnum)
11e1b75f
AH
1384 {
1385 /* Initialize RA_STATE to zero. */
1386 reg->how = DWARF2_FRAME_REG_SAVED_VAL_EXP;
1387 reg->loc.exp.start = &op_lit0;
1388 reg->loc.exp.len = 1;
1389 return;
1390 }
6d002087
LM
1391 else if (regnum >= tdep->pauth_reg_base
1392 && regnum < tdep->pauth_reg_base + tdep->pauth_reg_count)
11e1b75f
AH
1393 {
1394 reg->how = DWARF2_FRAME_REG_SAME_VALUE;
1395 return;
1396 }
07b287a0
MS
1397 }
1398}
1399
11e1b75f
AH
1400/* Implement the execute_dwarf_cfa_vendor_op method. */
1401
1402static bool
1403aarch64_execute_dwarf_cfa_vendor_op (struct gdbarch *gdbarch, gdb_byte op,
1404 struct dwarf2_frame_state *fs)
1405{
08106042 1406 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
11e1b75f
AH
1407 struct dwarf2_frame_state_reg *ra_state;
1408
8fca4da0 1409 if (op == DW_CFA_AARCH64_negate_ra_state)
11e1b75f 1410 {
8fca4da0
AH
1411 /* On systems without pauth, treat as a nop. */
1412 if (!tdep->has_pauth ())
1413 return true;
1414
11e1b75f 1415 /* Allocate RA_STATE column if it's not allocated yet. */
c9cd8ca4 1416 fs->regs.alloc_regs (AARCH64_DWARF_RA_SIGN_STATE + 1);
11e1b75f
AH
1417
1418 /* Toggle the status of RA_STATE between 0 and 1. */
c9cd8ca4 1419 ra_state = &(fs->regs.reg[AARCH64_DWARF_RA_SIGN_STATE]);
11e1b75f
AH
1420 ra_state->how = DWARF2_FRAME_REG_SAVED_VAL_EXP;
1421
1422 if (ra_state->loc.exp.start == nullptr
1423 || ra_state->loc.exp.start == &op_lit0)
1424 ra_state->loc.exp.start = &op_lit1;
1425 else
1426 ra_state->loc.exp.start = &op_lit0;
1427
1428 ra_state->loc.exp.len = 1;
1429
1430 return true;
1431 }
1432
1433 return false;
1434}
1435
5133a315
LM
1436/* Used for matching BRK instructions for AArch64. */
1437static constexpr uint32_t BRK_INSN_MASK = 0xffe0001f;
1438static constexpr uint32_t BRK_INSN_BASE = 0xd4200000;
1439
1440/* Implementation of gdbarch_program_breakpoint_here_p for aarch64. */
1441
1442static bool
1443aarch64_program_breakpoint_here_p (gdbarch *gdbarch, CORE_ADDR address)
1444{
1445 const uint32_t insn_len = 4;
1446 gdb_byte target_mem[4];
1447
1448 /* Enable the automatic memory restoration from breakpoints while
1449 we read the memory. Otherwise we may find temporary breakpoints, ones
1450 inserted by GDB, and flag them as permanent breakpoints. */
1451 scoped_restore restore_memory
1452 = make_scoped_restore_show_memory_breakpoints (0);
1453
1454 if (target_read_memory (address, target_mem, insn_len) == 0)
1455 {
1456 uint32_t insn =
1457 (uint32_t) extract_unsigned_integer (target_mem, insn_len,
1458 gdbarch_byte_order_for_code (gdbarch));
1459
1460 /* Check if INSN is a BRK instruction pattern. There are multiple choices
1461 of such instructions with different immediate values. Different OS'
1462 may use a different variation, but they have the same outcome. */
1463 return ((insn & BRK_INSN_MASK) == BRK_INSN_BASE);
1464 }
1465
1466 return false;
1467}
1468
07b287a0
MS
1469/* When arguments must be pushed onto the stack, they go on in reverse
1470 order. The code below implements a FILO (stack) to do this. */
1471
89055eaa 1472struct stack_item_t
07b287a0 1473{
c3c87445
YQ
1474 /* Value to pass on stack. It can be NULL if this item is for stack
1475 padding. */
7c543f7b 1476 const gdb_byte *data;
07b287a0
MS
1477
1478 /* Size in bytes of value to pass on stack. */
1479 int len;
89055eaa 1480};
07b287a0 1481
b907456c
AB
1482/* Implement the gdbarch type alignment method, overrides the generic
1483 alignment algorithm for anything that is aarch64 specific. */
07b287a0 1484
b907456c
AB
1485static ULONGEST
1486aarch64_type_align (gdbarch *gdbarch, struct type *t)
07b287a0 1487{
07b287a0 1488 t = check_typedef (t);
bd63c870 1489 if (t->code () == TYPE_CODE_ARRAY && t->is_vector ())
07b287a0 1490 {
b907456c
AB
1491 /* Use the natural alignment for vector types (the same for
1492 scalar type), but the maximum alignment is 128-bit. */
df86565b 1493 if (t->length () > 16)
b907456c 1494 return 16;
238f2452 1495 else
df86565b 1496 return t->length ();
07b287a0 1497 }
b907456c
AB
1498
1499 /* Allow the common code to calculate the alignment. */
1500 return 0;
07b287a0
MS
1501}
1502
ea92689a
AH
1503/* Worker function for aapcs_is_vfp_call_or_return_candidate.
1504
1505 Return the number of register required, or -1 on failure.
1506
1507 When encountering a base element, if FUNDAMENTAL_TYPE is not set then set it
1508 to the element, else fail if the type of this element does not match the
1509 existing value. */
1510
1511static int
1512aapcs_is_vfp_call_or_return_candidate_1 (struct type *type,
1513 struct type **fundamental_type)
1514{
1515 if (type == nullptr)
1516 return -1;
1517
78134374 1518 switch (type->code ())
ea92689a
AH
1519 {
1520 case TYPE_CODE_FLT:
81657e58 1521 case TYPE_CODE_DECFLOAT:
df86565b 1522 if (type->length () > 16)
ea92689a
AH
1523 return -1;
1524
1525 if (*fundamental_type == nullptr)
1526 *fundamental_type = type;
df86565b 1527 else if (type->length () != (*fundamental_type)->length ()
78134374 1528 || type->code () != (*fundamental_type)->code ())
ea92689a
AH
1529 return -1;
1530
1531 return 1;
1532
1533 case TYPE_CODE_COMPLEX:
1534 {
27710edb 1535 struct type *target_type = check_typedef (type->target_type ());
df86565b 1536 if (target_type->length () > 16)
ea92689a
AH
1537 return -1;
1538
1539 if (*fundamental_type == nullptr)
1540 *fundamental_type = target_type;
df86565b 1541 else if (target_type->length () != (*fundamental_type)->length ()
78134374 1542 || target_type->code () != (*fundamental_type)->code ())
ea92689a
AH
1543 return -1;
1544
1545 return 2;
1546 }
1547
1548 case TYPE_CODE_ARRAY:
1549 {
bd63c870 1550 if (type->is_vector ())
ea92689a 1551 {
df86565b 1552 if (type->length () != 8 && type->length () != 16)
ea92689a
AH
1553 return -1;
1554
1555 if (*fundamental_type == nullptr)
1556 *fundamental_type = type;
df86565b 1557 else if (type->length () != (*fundamental_type)->length ()
78134374 1558 || type->code () != (*fundamental_type)->code ())
ea92689a
AH
1559 return -1;
1560
1561 return 1;
1562 }
1563 else
1564 {
27710edb 1565 struct type *target_type = type->target_type ();
ea92689a
AH
1566 int count = aapcs_is_vfp_call_or_return_candidate_1
1567 (target_type, fundamental_type);
1568
1569 if (count == -1)
1570 return count;
1571
df86565b 1572 count *= (type->length () / target_type->length ());
ea92689a
AH
1573 return count;
1574 }
1575 }
1576
1577 case TYPE_CODE_STRUCT:
1578 case TYPE_CODE_UNION:
1579 {
1580 int count = 0;
1581
1f704f76 1582 for (int i = 0; i < type->num_fields (); i++)
ea92689a 1583 {
353229bf 1584 /* Ignore any static fields. */
c819a338 1585 if (type->field (i).is_static ())
353229bf
AH
1586 continue;
1587
940da03e 1588 struct type *member = check_typedef (type->field (i).type ());
ea92689a
AH
1589
1590 int sub_count = aapcs_is_vfp_call_or_return_candidate_1
1591 (member, fundamental_type);
1592 if (sub_count == -1)
1593 return -1;
1594 count += sub_count;
1595 }
73021deb
AH
1596
1597 /* Ensure there is no padding between the fields (allowing for empty
1598 zero length structs) */
1599 int ftype_length = (*fundamental_type == nullptr)
df86565b
SM
1600 ? 0 : (*fundamental_type)->length ();
1601 if (count * ftype_length != type->length ())
73021deb
AH
1602 return -1;
1603
ea92689a
AH
1604 return count;
1605 }
1606
1607 default:
1608 break;
1609 }
1610
1611 return -1;
1612}
1613
1614/* Return true if an argument, whose type is described by TYPE, can be passed or
1615 returned in simd/fp registers, providing enough parameter passing registers
1616 are available. This is as described in the AAPCS64.
1617
1618 Upon successful return, *COUNT returns the number of needed registers,
1619 *FUNDAMENTAL_TYPE contains the type of those registers.
1620
1621 Candidate as per the AAPCS64 5.4.2.C is either a:
1622 - float.
1623 - short-vector.
1624 - HFA (Homogeneous Floating-point Aggregate, 4.3.5.1). A Composite type where
1625 all the members are floats and has at most 4 members.
1626 - HVA (Homogeneous Short-vector Aggregate, 4.3.5.2). A Composite type where
1627 all the members are short vectors and has at most 4 members.
1628 - Complex (7.1.1)
1629
1630 Note that HFAs and HVAs can include nested structures and arrays. */
1631
0e745c60 1632static bool
ea92689a
AH
1633aapcs_is_vfp_call_or_return_candidate (struct type *type, int *count,
1634 struct type **fundamental_type)
1635{
1636 if (type == nullptr)
1637 return false;
1638
1639 *fundamental_type = nullptr;
1640
1641 int ag_count = aapcs_is_vfp_call_or_return_candidate_1 (type,
1642 fundamental_type);
1643
1644 if (ag_count > 0 && ag_count <= HA_MAX_NUM_FLDS)
1645 {
1646 *count = ag_count;
1647 return true;
1648 }
1649 else
1650 return false;
1651}
1652
07b287a0
MS
1653/* AArch64 function call information structure. */
1654struct aarch64_call_info
1655{
1656 /* the current argument number. */
89055eaa 1657 unsigned argnum = 0;
07b287a0
MS
1658
1659 /* The next general purpose register number, equivalent to NGRN as
1660 described in the AArch64 Procedure Call Standard. */
89055eaa 1661 unsigned ngrn = 0;
07b287a0
MS
1662
1663 /* The next SIMD and floating point register number, equivalent to
1664 NSRN as described in the AArch64 Procedure Call Standard. */
89055eaa 1665 unsigned nsrn = 0;
07b287a0
MS
1666
1667 /* The next stacked argument address, equivalent to NSAA as
1668 described in the AArch64 Procedure Call Standard. */
89055eaa 1669 unsigned nsaa = 0;
07b287a0
MS
1670
1671 /* Stack item vector. */
89055eaa 1672 std::vector<stack_item_t> si;
07b287a0
MS
1673};
1674
1675/* Pass a value in a sequence of consecutive X registers. The caller
30baf67b 1676 is responsible for ensuring sufficient registers are available. */
07b287a0
MS
1677
1678static void
1679pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1680 struct aarch64_call_info *info, struct type *type,
8e80f9d1 1681 struct value *arg)
07b287a0
MS
1682{
1683 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
df86565b 1684 int len = type->length ();
78134374 1685 enum type_code typecode = type->code ();
07b287a0 1686 int regnum = AARCH64_X0_REGNUM + info->ngrn;
efaf1ae0 1687 const bfd_byte *buf = arg->contents ().data ();
07b287a0
MS
1688
1689 info->argnum++;
1690
1691 while (len > 0)
1692 {
1693 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1694 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1695 byte_order);
1696
1697
1698 /* Adjust sub-word struct/union args when big-endian. */
1699 if (byte_order == BFD_ENDIAN_BIG
1700 && partial_len < X_REGISTER_SIZE
1701 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1702 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1703
c6185dce
SM
1704 aarch64_debug_printf ("arg %d in %s = 0x%s", info->argnum,
1705 gdbarch_register_name (gdbarch, regnum),
1706 phex (regval, X_REGISTER_SIZE));
1707
07b287a0
MS
1708 regcache_cooked_write_unsigned (regcache, regnum, regval);
1709 len -= partial_len;
1710 buf += partial_len;
1711 regnum++;
1712 }
1713}
1714
1715/* Attempt to marshall a value in a V register. Return 1 if
1716 successful, or 0 if insufficient registers are available. This
1717 function, unlike the equivalent pass_in_x() function does not
1718 handle arguments spread across multiple registers. */
1719
1720static int
1721pass_in_v (struct gdbarch *gdbarch,
1722 struct regcache *regcache,
1723 struct aarch64_call_info *info,
0735fddd 1724 int len, const bfd_byte *buf)
07b287a0
MS
1725{
1726 if (info->nsrn < 8)
1727 {
07b287a0 1728 int regnum = AARCH64_V0_REGNUM + info->nsrn;
3ff2c72e
AH
1729 /* Enough space for a full vector register. */
1730 gdb_byte reg[register_size (gdbarch, regnum)];
1731 gdb_assert (len <= sizeof (reg));
07b287a0
MS
1732
1733 info->argnum++;
1734 info->nsrn++;
1735
0735fddd
YQ
1736 memset (reg, 0, sizeof (reg));
1737 /* PCS C.1, the argument is allocated to the least significant
1738 bits of V register. */
1739 memcpy (reg, buf, len);
b66f5587 1740 regcache->cooked_write (regnum, reg);
0735fddd 1741
c6185dce
SM
1742 aarch64_debug_printf ("arg %d in %s", info->argnum,
1743 gdbarch_register_name (gdbarch, regnum));
1744
07b287a0
MS
1745 return 1;
1746 }
1747 info->nsrn = 8;
1748 return 0;
1749}
1750
1751/* Marshall an argument onto the stack. */
1752
1753static void
1754pass_on_stack (struct aarch64_call_info *info, struct type *type,
8e80f9d1 1755 struct value *arg)
07b287a0 1756{
efaf1ae0 1757 const bfd_byte *buf = arg->contents ().data ();
df86565b 1758 int len = type->length ();
07b287a0
MS
1759 int align;
1760 stack_item_t item;
1761
1762 info->argnum++;
1763
b907456c 1764 align = type_align (type);
07b287a0
MS
1765
1766 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1767 Natural alignment of the argument's type. */
1768 align = align_up (align, 8);
1769
1770 /* The AArch64 PCS requires at most doubleword alignment. */
1771 if (align > 16)
1772 align = 16;
1773
c6185dce
SM
1774 aarch64_debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1775 info->nsaa);
07b287a0
MS
1776
1777 item.len = len;
1778 item.data = buf;
89055eaa 1779 info->si.push_back (item);
07b287a0
MS
1780
1781 info->nsaa += len;
1782 if (info->nsaa & (align - 1))
1783 {
1784 /* Push stack alignment padding. */
1785 int pad = align - (info->nsaa & (align - 1));
1786
1787 item.len = pad;
c3c87445 1788 item.data = NULL;
07b287a0 1789
89055eaa 1790 info->si.push_back (item);
07b287a0
MS
1791 info->nsaa += pad;
1792 }
1793}
1794
1795/* Marshall an argument into a sequence of one or more consecutive X
1796 registers or, if insufficient X registers are available then onto
1797 the stack. */
1798
1799static void
1800pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1801 struct aarch64_call_info *info, struct type *type,
8e80f9d1 1802 struct value *arg)
07b287a0 1803{
df86565b 1804 int len = type->length ();
07b287a0
MS
1805 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1806
1807 /* PCS C.13 - Pass in registers if we have enough spare */
1808 if (info->ngrn + nregs <= 8)
1809 {
8e80f9d1 1810 pass_in_x (gdbarch, regcache, info, type, arg);
07b287a0
MS
1811 info->ngrn += nregs;
1812 }
1813 else
1814 {
1815 info->ngrn = 8;
8e80f9d1 1816 pass_on_stack (info, type, arg);
07b287a0
MS
1817 }
1818}
1819
0e745c60
AH
1820/* Pass a value, which is of type arg_type, in a V register. Assumes value is a
1821 aapcs_is_vfp_call_or_return_candidate and there are enough spare V
1822 registers. A return value of false is an error state as the value will have
1823 been partially passed to the stack. */
1824static bool
1825pass_in_v_vfp_candidate (struct gdbarch *gdbarch, struct regcache *regcache,
1826 struct aarch64_call_info *info, struct type *arg_type,
1827 struct value *arg)
07b287a0 1828{
78134374 1829 switch (arg_type->code ())
0e745c60
AH
1830 {
1831 case TYPE_CODE_FLT:
81657e58 1832 case TYPE_CODE_DECFLOAT:
df86565b 1833 return pass_in_v (gdbarch, regcache, info, arg_type->length (),
efaf1ae0 1834 arg->contents ().data ());
0e745c60
AH
1835 break;
1836
1837 case TYPE_CODE_COMPLEX:
1838 {
efaf1ae0 1839 const bfd_byte *buf = arg->contents ().data ();
27710edb 1840 struct type *target_type = check_typedef (arg_type->target_type ());
0e745c60 1841
df86565b 1842 if (!pass_in_v (gdbarch, regcache, info, target_type->length (),
0e745c60
AH
1843 buf))
1844 return false;
1845
df86565b
SM
1846 return pass_in_v (gdbarch, regcache, info, target_type->length (),
1847 buf + target_type->length ());
0e745c60
AH
1848 }
1849
1850 case TYPE_CODE_ARRAY:
bd63c870 1851 if (arg_type->is_vector ())
df86565b 1852 return pass_in_v (gdbarch, regcache, info, arg_type->length (),
efaf1ae0 1853 arg->contents ().data ());
0e745c60
AH
1854 /* fall through. */
1855
1856 case TYPE_CODE_STRUCT:
1857 case TYPE_CODE_UNION:
1f704f76 1858 for (int i = 0; i < arg_type->num_fields (); i++)
0e745c60 1859 {
353229bf 1860 /* Don't include static fields. */
c819a338 1861 if (arg_type->field (i).is_static ())
353229bf
AH
1862 continue;
1863
6c49729e 1864 struct value *field = arg->primitive_field (0, i, arg_type);
d0c97917 1865 struct type *field_type = check_typedef (field->type ());
0e745c60
AH
1866
1867 if (!pass_in_v_vfp_candidate (gdbarch, regcache, info, field_type,
1868 field))
1869 return false;
1870 }
1871 return true;
1872
1873 default:
1874 return false;
1875 }
07b287a0
MS
1876}
1877
1878/* Implement the "push_dummy_call" gdbarch method. */
1879
1880static CORE_ADDR
1881aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1882 struct regcache *regcache, CORE_ADDR bp_addr,
1883 int nargs,
cf84fa6b
AH
1884 struct value **args, CORE_ADDR sp,
1885 function_call_return_method return_method,
07b287a0
MS
1886 CORE_ADDR struct_addr)
1887{
07b287a0 1888 int argnum;
07b287a0 1889 struct aarch64_call_info info;
07b287a0 1890
07b287a0
MS
1891 /* We need to know what the type of the called function is in order
1892 to determine the number of named/anonymous arguments for the
1893 actual argument placement, and the return type in order to handle
1894 return value correctly.
1895
1896 The generic code above us views the decision of return in memory
1897 or return in registers as a two stage processes. The language
1898 handler is consulted first and may decide to return in memory (eg
1899 class with copy constructor returned by value), this will cause
1900 the generic code to allocate space AND insert an initial leading
1901 argument.
1902
1903 If the language code does not decide to pass in memory then the
1904 target code is consulted.
1905
1906 If the language code decides to pass in memory we want to move
1907 the pointer inserted as the initial argument from the argument
1908 list and into X8, the conventional AArch64 struct return pointer
38a72da0 1909 register. */
07b287a0
MS
1910
1911 /* Set the return address. For the AArch64, the return breakpoint
1912 is always at BP_ADDR. */
1913 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1914
38a72da0
AH
1915 /* If we were given an initial argument for the return slot, lose it. */
1916 if (return_method == return_method_hidden_param)
07b287a0
MS
1917 {
1918 args++;
1919 nargs--;
1920 }
1921
1922 /* The struct_return pointer occupies X8. */
38a72da0 1923 if (return_method != return_method_normal)
07b287a0 1924 {
c6185dce
SM
1925 aarch64_debug_printf ("struct return in %s = 0x%s",
1926 gdbarch_register_name
1927 (gdbarch, AARCH64_STRUCT_RETURN_REGNUM),
1928 paddress (gdbarch, struct_addr));
1929
07b287a0
MS
1930 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1931 struct_addr);
1932 }
1933
1934 for (argnum = 0; argnum < nargs; argnum++)
1935 {
1936 struct value *arg = args[argnum];
0e745c60
AH
1937 struct type *arg_type, *fundamental_type;
1938 int len, elements;
07b287a0 1939
d0c97917 1940 arg_type = check_typedef (arg->type ());
df86565b 1941 len = arg_type->length ();
07b287a0 1942
0e745c60
AH
1943 /* If arg can be passed in v registers as per the AAPCS64, then do so if
1944 if there are enough spare registers. */
1945 if (aapcs_is_vfp_call_or_return_candidate (arg_type, &elements,
1946 &fundamental_type))
1947 {
1948 if (info.nsrn + elements <= 8)
1949 {
1950 /* We know that we have sufficient registers available therefore
1951 this will never need to fallback to the stack. */
1952 if (!pass_in_v_vfp_candidate (gdbarch, regcache, &info, arg_type,
1953 arg))
1954 gdb_assert_not_reached ("Failed to push args");
1955 }
1956 else
1957 {
1958 info.nsrn = 8;
1959 pass_on_stack (&info, arg_type, arg);
1960 }
1961 continue;
1962 }
1963
78134374 1964 switch (arg_type->code ())
07b287a0
MS
1965 {
1966 case TYPE_CODE_INT:
1967 case TYPE_CODE_BOOL:
1968 case TYPE_CODE_CHAR:
1969 case TYPE_CODE_RANGE:
1970 case TYPE_CODE_ENUM:
28397ae7 1971 if (len < 4 && !is_fixed_point_type (arg_type))
07b287a0
MS
1972 {
1973 /* Promote to 32 bit integer. */
c6d940a9 1974 if (arg_type->is_unsigned ())
07b287a0
MS
1975 arg_type = builtin_type (gdbarch)->builtin_uint32;
1976 else
1977 arg_type = builtin_type (gdbarch)->builtin_int32;
1978 arg = value_cast (arg_type, arg);
1979 }
8e80f9d1 1980 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1981 break;
1982
07b287a0
MS
1983 case TYPE_CODE_STRUCT:
1984 case TYPE_CODE_ARRAY:
1985 case TYPE_CODE_UNION:
0e745c60 1986 if (len > 16)
07b287a0
MS
1987 {
1988 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1989 invisible reference. */
1990
1991 /* Allocate aligned storage. */
1992 sp = align_down (sp - len, 16);
1993
1994 /* Write the real data into the stack. */
efaf1ae0 1995 write_memory (sp, arg->contents ().data (), len);
07b287a0
MS
1996
1997 /* Construct the indirection. */
1998 arg_type = lookup_pointer_type (arg_type);
1999 arg = value_from_pointer (arg_type, sp);
8e80f9d1 2000 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
2001 }
2002 else
2003 /* PCS C.15 / C.18 multiple values pass. */
8e80f9d1 2004 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
2005 break;
2006
2007 default:
8e80f9d1 2008 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
2009 break;
2010 }
2011 }
2012
2013 /* Make sure stack retains 16 byte alignment. */
2014 if (info.nsaa & 15)
2015 sp -= 16 - (info.nsaa & 15);
2016
89055eaa 2017 while (!info.si.empty ())
07b287a0 2018 {
89055eaa 2019 const stack_item_t &si = info.si.back ();
07b287a0 2020
89055eaa
TT
2021 sp -= si.len;
2022 if (si.data != NULL)
2023 write_memory (sp, si.data, si.len);
2024 info.si.pop_back ();
07b287a0
MS
2025 }
2026
07b287a0
MS
2027 /* Finally, update the SP register. */
2028 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
2029
2030 return sp;
2031}
2032
2033/* Implement the "frame_align" gdbarch method. */
2034
2035static CORE_ADDR
2036aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
2037{
2038 /* Align the stack to sixteen bytes. */
2039 return sp & ~(CORE_ADDR) 15;
2040}
2041
2042/* Return the type for an AdvSISD Q register. */
2043
2044static struct type *
2045aarch64_vnq_type (struct gdbarch *gdbarch)
2046{
08106042 2047 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
07b287a0
MS
2048
2049 if (tdep->vnq_type == NULL)
2050 {
2051 struct type *t;
2052 struct type *elem;
2053
2054 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
2055 TYPE_CODE_UNION);
2056
2057 elem = builtin_type (gdbarch)->builtin_uint128;
2058 append_composite_type_field (t, "u", elem);
2059
2060 elem = builtin_type (gdbarch)->builtin_int128;
2061 append_composite_type_field (t, "s", elem);
2062
2063 tdep->vnq_type = t;
2064 }
2065
2066 return tdep->vnq_type;
2067}
2068
2069/* Return the type for an AdvSISD D register. */
2070
2071static struct type *
2072aarch64_vnd_type (struct gdbarch *gdbarch)
2073{
08106042 2074 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
07b287a0
MS
2075
2076 if (tdep->vnd_type == NULL)
2077 {
2078 struct type *t;
2079 struct type *elem;
2080
2081 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
2082 TYPE_CODE_UNION);
2083
2084 elem = builtin_type (gdbarch)->builtin_double;
2085 append_composite_type_field (t, "f", elem);
2086
2087 elem = builtin_type (gdbarch)->builtin_uint64;
2088 append_composite_type_field (t, "u", elem);
2089
2090 elem = builtin_type (gdbarch)->builtin_int64;
2091 append_composite_type_field (t, "s", elem);
2092
2093 tdep->vnd_type = t;
2094 }
2095
2096 return tdep->vnd_type;
2097}
2098
2099/* Return the type for an AdvSISD S register. */
2100
2101static struct type *
2102aarch64_vns_type (struct gdbarch *gdbarch)
2103{
08106042 2104 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
07b287a0
MS
2105
2106 if (tdep->vns_type == NULL)
2107 {
2108 struct type *t;
2109 struct type *elem;
2110
2111 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
2112 TYPE_CODE_UNION);
2113
2114 elem = builtin_type (gdbarch)->builtin_float;
2115 append_composite_type_field (t, "f", elem);
2116
2117 elem = builtin_type (gdbarch)->builtin_uint32;
2118 append_composite_type_field (t, "u", elem);
2119
2120 elem = builtin_type (gdbarch)->builtin_int32;
2121 append_composite_type_field (t, "s", elem);
2122
2123 tdep->vns_type = t;
2124 }
2125
2126 return tdep->vns_type;
2127}
2128
2129/* Return the type for an AdvSISD H register. */
2130
2131static struct type *
2132aarch64_vnh_type (struct gdbarch *gdbarch)
2133{
08106042 2134 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
07b287a0
MS
2135
2136 if (tdep->vnh_type == NULL)
2137 {
2138 struct type *t;
2139 struct type *elem;
2140
2141 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
2142 TYPE_CODE_UNION);
2143
5291fe3c
SP
2144 elem = builtin_type (gdbarch)->builtin_bfloat16;
2145 append_composite_type_field (t, "bf", elem);
2146
a6d0f249
AH
2147 elem = builtin_type (gdbarch)->builtin_half;
2148 append_composite_type_field (t, "f", elem);
2149
07b287a0
MS
2150 elem = builtin_type (gdbarch)->builtin_uint16;
2151 append_composite_type_field (t, "u", elem);
2152
2153 elem = builtin_type (gdbarch)->builtin_int16;
2154 append_composite_type_field (t, "s", elem);
2155
2156 tdep->vnh_type = t;
2157 }
2158
2159 return tdep->vnh_type;
2160}
2161
2162/* Return the type for an AdvSISD B register. */
2163
2164static struct type *
2165aarch64_vnb_type (struct gdbarch *gdbarch)
2166{
08106042 2167 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
07b287a0
MS
2168
2169 if (tdep->vnb_type == NULL)
2170 {
2171 struct type *t;
2172 struct type *elem;
2173
2174 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
2175 TYPE_CODE_UNION);
2176
2177 elem = builtin_type (gdbarch)->builtin_uint8;
2178 append_composite_type_field (t, "u", elem);
2179
2180 elem = builtin_type (gdbarch)->builtin_int8;
2181 append_composite_type_field (t, "s", elem);
2182
2183 tdep->vnb_type = t;
2184 }
2185
2186 return tdep->vnb_type;
2187}
2188
ca65640f
LM
2189/* Return TRUE if REGNUM is a ZA tile slice pseudo-register number. Return
2190 FALSE otherwise. */
2191
2192static bool
2193is_sme_tile_slice_pseudo_register (struct gdbarch *gdbarch, int regnum)
2194{
2195 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2196
2197 gdb_assert (tdep->has_sme ());
2198 gdb_assert (tdep->sme_svq > 0);
2199 gdb_assert (tdep->sme_pseudo_base <= regnum);
2200 gdb_assert (regnum < tdep->sme_pseudo_base + tdep->sme_pseudo_count);
2201
2202 if (tdep->sme_tile_slice_pseudo_base <= regnum
2203 && regnum < tdep->sme_tile_slice_pseudo_base
2204 + tdep->sme_tile_slice_pseudo_count)
2205 return true;
2206
2207 return false;
2208}
2209
2210/* Given REGNUM, a ZA pseudo-register number, return, in ENCODING, the
2211 decoded fields that make up its name. */
2212
2213static void
2214aarch64_za_decode_pseudos (struct gdbarch *gdbarch, int regnum,
2215 struct za_pseudo_encoding &encoding)
2216{
2217 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2218
2219 gdb_assert (tdep->has_sme ());
2220 gdb_assert (tdep->sme_svq > 0);
2221 gdb_assert (tdep->sme_pseudo_base <= regnum);
2222 gdb_assert (regnum < tdep->sme_pseudo_base + tdep->sme_pseudo_count);
2223
2224 if (is_sme_tile_slice_pseudo_register (gdbarch, regnum))
2225 {
2226 /* Calculate the tile slice pseudo-register offset relative to the other
2227 tile slice pseudo-registers. */
2228 int offset = regnum - tdep->sme_tile_slice_pseudo_base;
2229
2230 /* Fetch the qualifier. We can have 160 to 2560 possible tile slice
2231 pseudo-registers. Each qualifier (we have 5 of them: B, H, S, D
2232 and Q) covers 32 * svq pseudo-registers, so we divide the offset by
2233 that constant. */
2234 size_t qualifier = offset / (tdep->sme_svq * 32);
2235 encoding.qualifier_index = qualifier;
2236
2237 /* Prepare to fetch the direction (d), tile number (t) and slice
2238 number (s). */
2239 int dts = offset % (tdep->sme_svq * 32);
2240
2241 /* The direction is represented by the even/odd numbers. Even-numbered
2242 pseudo-registers are horizontal tile slices and odd-numbered
2243 pseudo-registers are vertical tile slices. */
2244 encoding.horizontal = !(dts & 1);
2245
2246 /* Fetch the tile number. The tile number is closely related to the
2247 qualifier. B has 1 tile, H has 2 tiles, S has 4 tiles, D has 8 tiles
2248 and Q has 16 tiles. */
2249 encoding.tile_index = (dts >> 1) & ((1 << qualifier) - 1);
2250
2251 /* Fetch the slice number. The slice number is closely related to the
2252 qualifier and the svl. */
2253 encoding.slice_index = dts >> (qualifier + 1);
2254 }
2255 else
2256 {
2257 /* Calculate the tile pseudo-register offset relative to the other
2258 tile pseudo-registers. */
2259 int offset = regnum - tdep->sme_tile_pseudo_base;
2260
2261 encoding.qualifier_index = std::floor (std::log2 (offset + 1));
2262 /* Calculate the tile number. */
2263 encoding.tile_index = (offset + 1) - (1 << encoding.qualifier_index);
2264 /* Direction and slice index don't get used for tiles. Set them to
2265 0/false values. */
2266 encoding.slice_index = 0;
2267 encoding.horizontal = false;
2268 }
2269}
2270
2271/* Return the type for a ZA tile slice pseudo-register based on ENCODING. */
2272
2273static struct type *
2274aarch64_za_tile_slice_type (struct gdbarch *gdbarch,
2275 const struct za_pseudo_encoding &encoding)
2276{
2277 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2278
2279 gdb_assert (tdep->has_sme ());
2280 gdb_assert (tdep->sme_svq > 0);
2281
2282 if (tdep->sme_tile_slice_type_q == nullptr)
2283 {
2284 /* Q tile slice type. */
2285 tdep->sme_tile_slice_type_q
2286 = init_vector_type (builtin_type (gdbarch)->builtin_uint128,
2287 tdep->sme_svq);
2288 /* D tile slice type. */
2289 tdep->sme_tile_slice_type_d
2290 = init_vector_type (builtin_type (gdbarch)->builtin_uint64,
2291 tdep->sme_svq * 2);
2292 /* S tile slice type. */
2293 tdep->sme_tile_slice_type_s
2294 = init_vector_type (builtin_type (gdbarch)->builtin_uint32,
2295 tdep->sme_svq * 4);
2296 /* H tile slice type. */
2297 tdep->sme_tile_slice_type_h
2298 = init_vector_type (builtin_type (gdbarch)->builtin_uint16,
2299 tdep->sme_svq * 8);
2300 /* B tile slice type. */
2301 tdep->sme_tile_slice_type_b
2302 = init_vector_type (builtin_type (gdbarch)->builtin_uint8,
2303 tdep->sme_svq * 16);
2304 }
2305
2306 switch (encoding.qualifier_index)
2307 {
2308 case 4:
2309 return tdep->sme_tile_slice_type_q;
2310 case 3:
2311 return tdep->sme_tile_slice_type_d;
2312 case 2:
2313 return tdep->sme_tile_slice_type_s;
2314 case 1:
2315 return tdep->sme_tile_slice_type_h;
2316 case 0:
2317 return tdep->sme_tile_slice_type_b;
2318 default:
2319 error (_("Invalid qualifier index %s for tile slice pseudo register."),
2320 pulongest (encoding.qualifier_index));
2321 }
2322
2323 gdb_assert_not_reached ("Unknown qualifier for ZA tile slice register");
2324}
2325
2326/* Return the type for a ZA tile pseudo-register based on ENCODING. */
2327
2328static struct type *
2329aarch64_za_tile_type (struct gdbarch *gdbarch,
2330 const struct za_pseudo_encoding &encoding)
2331{
2332 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2333
2334 gdb_assert (tdep->has_sme ());
2335 gdb_assert (tdep->sme_svq > 0);
2336
2337 if (tdep->sme_tile_type_q == nullptr)
2338 {
2339 struct type *inner_vectors_type;
2340
2341 /* Q tile type. */
2342 inner_vectors_type
2343 = init_vector_type (builtin_type (gdbarch)->builtin_uint128,
2344 tdep->sme_svq);
2345 tdep->sme_tile_type_q
2346 = init_vector_type (inner_vectors_type, tdep->sme_svq);
2347
2348 /* D tile type. */
2349 inner_vectors_type
2350 = init_vector_type (builtin_type (gdbarch)->builtin_uint64,
2351 tdep->sme_svq * 2);
2352 tdep->sme_tile_type_d
2353 = init_vector_type (inner_vectors_type, tdep->sme_svq * 2);
2354
2355 /* S tile type. */
2356 inner_vectors_type
2357 = init_vector_type (builtin_type (gdbarch)->builtin_uint32,
2358 tdep->sme_svq * 4);
2359 tdep->sme_tile_type_s
2360 = init_vector_type (inner_vectors_type, tdep->sme_svq * 4);
2361
2362 /* H tile type. */
2363 inner_vectors_type
2364 = init_vector_type (builtin_type (gdbarch)->builtin_uint16,
2365 tdep->sme_svq * 8);
2366 tdep->sme_tile_type_h
2367 = init_vector_type (inner_vectors_type, tdep->sme_svq * 8);
2368
2369 /* B tile type. */
2370 inner_vectors_type
2371 = init_vector_type (builtin_type (gdbarch)->builtin_uint8,
2372 tdep->sme_svq * 16);
2373 tdep->sme_tile_type_b
2374 = init_vector_type (inner_vectors_type, tdep->sme_svq * 16);
2375 }
2376
2377 switch (encoding.qualifier_index)
2378 {
2379 case 4:
2380 return tdep->sme_tile_type_q;
2381 case 3:
2382 return tdep->sme_tile_type_d;
2383 case 2:
2384 return tdep->sme_tile_type_s;
2385 case 1:
2386 return tdep->sme_tile_type_h;
2387 case 0:
2388 return tdep->sme_tile_type_b;
2389 default:
2390 error (_("Invalid qualifier index %s for ZA tile pseudo register."),
2391 pulongest (encoding.qualifier_index));
2392 }
2393
2394 gdb_assert_not_reached ("unknown qualifier for tile pseudo-register");
2395}
2396
63bad7b6
AH
2397/* Return the type for an AdvSISD V register. */
2398
2399static struct type *
2400aarch64_vnv_type (struct gdbarch *gdbarch)
2401{
08106042 2402 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
63bad7b6
AH
2403
2404 if (tdep->vnv_type == NULL)
2405 {
09624f1f 2406 /* The other AArch64 pseudo registers (Q,D,H,S,B) refer to a single value
bffa1015
AH
2407 slice from the non-pseudo vector registers. However NEON V registers
2408 are always vector registers, and need constructing as such. */
2409 const struct builtin_type *bt = builtin_type (gdbarch);
2410
63bad7b6
AH
2411 struct type *t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnv",
2412 TYPE_CODE_UNION);
2413
bffa1015
AH
2414 struct type *sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
2415 TYPE_CODE_UNION);
2416 append_composite_type_field (sub, "f",
2417 init_vector_type (bt->builtin_double, 2));
2418 append_composite_type_field (sub, "u",
2419 init_vector_type (bt->builtin_uint64, 2));
2420 append_composite_type_field (sub, "s",
2421 init_vector_type (bt->builtin_int64, 2));
2422 append_composite_type_field (t, "d", sub);
2423
2424 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
2425 TYPE_CODE_UNION);
2426 append_composite_type_field (sub, "f",
2427 init_vector_type (bt->builtin_float, 4));
2428 append_composite_type_field (sub, "u",
2429 init_vector_type (bt->builtin_uint32, 4));
2430 append_composite_type_field (sub, "s",
2431 init_vector_type (bt->builtin_int32, 4));
2432 append_composite_type_field (t, "s", sub);
2433
2434 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
2435 TYPE_CODE_UNION);
5291fe3c
SP
2436 append_composite_type_field (sub, "bf",
2437 init_vector_type (bt->builtin_bfloat16, 8));
a6d0f249
AH
2438 append_composite_type_field (sub, "f",
2439 init_vector_type (bt->builtin_half, 8));
bffa1015
AH
2440 append_composite_type_field (sub, "u",
2441 init_vector_type (bt->builtin_uint16, 8));
2442 append_composite_type_field (sub, "s",
2443 init_vector_type (bt->builtin_int16, 8));
2444 append_composite_type_field (t, "h", sub);
2445
2446 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
2447 TYPE_CODE_UNION);
2448 append_composite_type_field (sub, "u",
2449 init_vector_type (bt->builtin_uint8, 16));
2450 append_composite_type_field (sub, "s",
2451 init_vector_type (bt->builtin_int8, 16));
2452 append_composite_type_field (t, "b", sub);
2453
2454 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
2455 TYPE_CODE_UNION);
2456 append_composite_type_field (sub, "u",
2457 init_vector_type (bt->builtin_uint128, 1));
2458 append_composite_type_field (sub, "s",
2459 init_vector_type (bt->builtin_int128, 1));
2460 append_composite_type_field (t, "q", sub);
63bad7b6
AH
2461
2462 tdep->vnv_type = t;
2463 }
2464
2465 return tdep->vnv_type;
2466}
2467
07b287a0
MS
2468/* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
2469
2470static int
2471aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
2472{
08106042 2473 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
34dcc7cf 2474
07b287a0
MS
2475 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
2476 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
2477
2478 if (reg == AARCH64_DWARF_SP)
2479 return AARCH64_SP_REGNUM;
2480
1fe84861
YY
2481 if (reg == AARCH64_DWARF_PC)
2482 return AARCH64_PC_REGNUM;
2483
07b287a0
MS
2484 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
2485 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
2486
65d4cada
AH
2487 if (reg == AARCH64_DWARF_SVE_VG)
2488 return AARCH64_SVE_VG_REGNUM;
2489
2490 if (reg == AARCH64_DWARF_SVE_FFR)
2491 return AARCH64_SVE_FFR_REGNUM;
2492
2493 if (reg >= AARCH64_DWARF_SVE_P0 && reg <= AARCH64_DWARF_SVE_P0 + 15)
2494 return AARCH64_SVE_P0_REGNUM + reg - AARCH64_DWARF_SVE_P0;
2495
2496 if (reg >= AARCH64_DWARF_SVE_Z0 && reg <= AARCH64_DWARF_SVE_Z0 + 15)
2497 return AARCH64_SVE_Z0_REGNUM + reg - AARCH64_DWARF_SVE_Z0;
2498
34dcc7cf
AH
2499 if (tdep->has_pauth ())
2500 {
c9cd8ca4
LM
2501 if (reg == AARCH64_DWARF_RA_SIGN_STATE)
2502 return tdep->ra_sign_state_regnum;
34dcc7cf
AH
2503 }
2504
07b287a0
MS
2505 return -1;
2506}
07b287a0
MS
2507
2508/* Implement the "print_insn" gdbarch method. */
2509
2510static int
2511aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
2512{
2513 info->symbols = NULL;
6394c606 2514 return default_print_insn (memaddr, info);
07b287a0
MS
2515}
2516
2517/* AArch64 BRK software debug mode instruction.
2518 Note that AArch64 code is always little-endian.
2519 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
04180708 2520constexpr gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
07b287a0 2521
04180708 2522typedef BP_MANIPULATION (aarch64_default_breakpoint) aarch64_breakpoint;
07b287a0
MS
2523
2524/* Extract from an array REGS containing the (raw) register state a
2525 function return value of type TYPE, and copy that, in virtual
2526 format, into VALBUF. */
2527
2528static void
2529aarch64_extract_return_value (struct type *type, struct regcache *regs,
2530 gdb_byte *valbuf)
2531{
ac7936df 2532 struct gdbarch *gdbarch = regs->arch ();
07b287a0 2533 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
4f4aedeb
AH
2534 int elements;
2535 struct type *fundamental_type;
07b287a0 2536
4f4aedeb
AH
2537 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2538 &fundamental_type))
07b287a0 2539 {
df86565b 2540 int len = fundamental_type->length ();
4f4aedeb
AH
2541
2542 for (int i = 0; i < elements; i++)
2543 {
2544 int regno = AARCH64_V0_REGNUM + i;
3ff2c72e
AH
2545 /* Enough space for a full vector register. */
2546 gdb_byte buf[register_size (gdbarch, regno)];
2547 gdb_assert (len <= sizeof (buf));
4f4aedeb 2548
c6185dce
SM
2549 aarch64_debug_printf
2550 ("read HFA or HVA return value element %d from %s",
2551 i + 1, gdbarch_register_name (gdbarch, regno));
2552
4f4aedeb 2553 regs->cooked_read (regno, buf);
07b287a0 2554
4f4aedeb
AH
2555 memcpy (valbuf, buf, len);
2556 valbuf += len;
2557 }
07b287a0 2558 }
78134374
SM
2559 else if (type->code () == TYPE_CODE_INT
2560 || type->code () == TYPE_CODE_CHAR
2561 || type->code () == TYPE_CODE_BOOL
2562 || type->code () == TYPE_CODE_PTR
aa006118 2563 || TYPE_IS_REFERENCE (type)
78134374 2564 || type->code () == TYPE_CODE_ENUM)
07b287a0 2565 {
6471e7d2 2566 /* If the type is a plain integer, then the access is
07b287a0
MS
2567 straight-forward. Otherwise we have to play around a bit
2568 more. */
df86565b 2569 int len = type->length ();
07b287a0
MS
2570 int regno = AARCH64_X0_REGNUM;
2571 ULONGEST tmp;
2572
2573 while (len > 0)
2574 {
2575 /* By using store_unsigned_integer we avoid having to do
2576 anything special for small big-endian values. */
2577 regcache_cooked_read_unsigned (regs, regno++, &tmp);
2578 store_unsigned_integer (valbuf,
2579 (len > X_REGISTER_SIZE
2580 ? X_REGISTER_SIZE : len), byte_order, tmp);
2581 len -= X_REGISTER_SIZE;
2582 valbuf += X_REGISTER_SIZE;
2583 }
2584 }
07b287a0
MS
2585 else
2586 {
2587 /* For a structure or union the behaviour is as if the value had
dda83cd7
SM
2588 been stored to word-aligned memory and then loaded into
2589 registers with 64-bit load instruction(s). */
df86565b 2590 int len = type->length ();
07b287a0
MS
2591 int regno = AARCH64_X0_REGNUM;
2592 bfd_byte buf[X_REGISTER_SIZE];
2593
2594 while (len > 0)
2595 {
dca08e1f 2596 regs->cooked_read (regno++, buf);
07b287a0
MS
2597 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2598 len -= X_REGISTER_SIZE;
2599 valbuf += X_REGISTER_SIZE;
2600 }
2601 }
2602}
2603
2604
2605/* Will a function return an aggregate type in memory or in a
2606 register? Return 0 if an aggregate type can be returned in a
2607 register, 1 if it must be returned in memory. */
2608
2609static int
2610aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
2611{
f168693b 2612 type = check_typedef (type);
4f4aedeb
AH
2613 int elements;
2614 struct type *fundamental_type;
07b287a0 2615
911627e7
TT
2616 if (TYPE_HAS_DYNAMIC_LENGTH (type))
2617 return 1;
2618
4f4aedeb
AH
2619 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2620 &fundamental_type))
07b287a0 2621 {
cd635f74
YQ
2622 /* v0-v7 are used to return values and one register is allocated
2623 for one member. However, HFA or HVA has at most four members. */
07b287a0
MS
2624 return 0;
2625 }
2626
df86565b 2627 if (type->length () > 16
bab22d06 2628 || !language_pass_by_reference (type).trivially_copyable)
07b287a0
MS
2629 {
2630 /* PCS B.6 Aggregates larger than 16 bytes are passed by
dda83cd7 2631 invisible reference. */
07b287a0
MS
2632
2633 return 1;
2634 }
2635
2636 return 0;
2637}
2638
2639/* Write into appropriate registers a function return value of type
2640 TYPE, given in virtual format. */
2641
2642static void
2643aarch64_store_return_value (struct type *type, struct regcache *regs,
2644 const gdb_byte *valbuf)
2645{
ac7936df 2646 struct gdbarch *gdbarch = regs->arch ();
07b287a0 2647 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
4f4aedeb
AH
2648 int elements;
2649 struct type *fundamental_type;
07b287a0 2650
4f4aedeb
AH
2651 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2652 &fundamental_type))
07b287a0 2653 {
df86565b 2654 int len = fundamental_type->length ();
4f4aedeb
AH
2655
2656 for (int i = 0; i < elements; i++)
2657 {
2658 int regno = AARCH64_V0_REGNUM + i;
3ff2c72e
AH
2659 /* Enough space for a full vector register. */
2660 gdb_byte tmpbuf[register_size (gdbarch, regno)];
2661 gdb_assert (len <= sizeof (tmpbuf));
4f4aedeb 2662
c6185dce
SM
2663 aarch64_debug_printf
2664 ("write HFA or HVA return value element %d to %s",
2665 i + 1, gdbarch_register_name (gdbarch, regno));
07b287a0 2666
89c4ee83
LM
2667 /* Depending on whether the target supports SVE or not, the V
2668 registers may report a size > 16 bytes. In that case, read the
2669 original contents of the register before overriding it with a new
2670 value that has a potential size <= 16 bytes. */
2671 regs->cooked_read (regno, tmpbuf);
4f4aedeb
AH
2672 memcpy (tmpbuf, valbuf,
2673 len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
2674 regs->cooked_write (regno, tmpbuf);
2675 valbuf += len;
2676 }
07b287a0 2677 }
78134374
SM
2678 else if (type->code () == TYPE_CODE_INT
2679 || type->code () == TYPE_CODE_CHAR
2680 || type->code () == TYPE_CODE_BOOL
2681 || type->code () == TYPE_CODE_PTR
aa006118 2682 || TYPE_IS_REFERENCE (type)
78134374 2683 || type->code () == TYPE_CODE_ENUM)
07b287a0 2684 {
df86565b 2685 if (type->length () <= X_REGISTER_SIZE)
07b287a0
MS
2686 {
2687 /* Values of one word or less are zero/sign-extended and
2688 returned in r0. */
2689 bfd_byte tmpbuf[X_REGISTER_SIZE];
2690 LONGEST val = unpack_long (type, valbuf);
2691
2692 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
b66f5587 2693 regs->cooked_write (AARCH64_X0_REGNUM, tmpbuf);
07b287a0
MS
2694 }
2695 else
2696 {
2697 /* Integral values greater than one word are stored in
2698 consecutive registers starting with r0. This will always
2699 be a multiple of the regiser size. */
df86565b 2700 int len = type->length ();
07b287a0
MS
2701 int regno = AARCH64_X0_REGNUM;
2702
2703 while (len > 0)
2704 {
b66f5587 2705 regs->cooked_write (regno++, valbuf);
07b287a0
MS
2706 len -= X_REGISTER_SIZE;
2707 valbuf += X_REGISTER_SIZE;
2708 }
2709 }
2710 }
07b287a0
MS
2711 else
2712 {
2713 /* For a structure or union the behaviour is as if the value had
2714 been stored to word-aligned memory and then loaded into
2715 registers with 64-bit load instruction(s). */
df86565b 2716 int len = type->length ();
07b287a0
MS
2717 int regno = AARCH64_X0_REGNUM;
2718 bfd_byte tmpbuf[X_REGISTER_SIZE];
2719
2720 while (len > 0)
2721 {
2722 memcpy (tmpbuf, valbuf,
2723 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
b66f5587 2724 regs->cooked_write (regno++, tmpbuf);
07b287a0
MS
2725 len -= X_REGISTER_SIZE;
2726 valbuf += X_REGISTER_SIZE;
2727 }
2728 }
2729}
2730
2731/* Implement the "return_value" gdbarch method. */
2732
2733static enum return_value_convention
2734aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2735 struct type *valtype, struct regcache *regcache,
5cb0f2d5 2736 struct value **read_value, const gdb_byte *writebuf)
07b287a0 2737{
78134374
SM
2738 if (valtype->code () == TYPE_CODE_STRUCT
2739 || valtype->code () == TYPE_CODE_UNION
2740 || valtype->code () == TYPE_CODE_ARRAY)
07b287a0
MS
2741 {
2742 if (aarch64_return_in_memory (gdbarch, valtype))
2743 {
bab22d06
LM
2744 /* From the AAPCS64's Result Return section:
2745
2746 "Otherwise, the caller shall reserve a block of memory of
2747 sufficient size and alignment to hold the result. The address
2748 of the memory block shall be passed as an additional argument to
2749 the function in x8. */
2750
c6185dce 2751 aarch64_debug_printf ("return value in memory");
bab22d06 2752
911627e7 2753 if (read_value != nullptr)
bab22d06
LM
2754 {
2755 CORE_ADDR addr;
2756
2757 regcache->cooked_read (AARCH64_STRUCT_RETURN_REGNUM, &addr);
911627e7 2758 *read_value = value_at_non_lval (valtype, addr);
bab22d06
LM
2759 }
2760
2761 return RETURN_VALUE_ABI_RETURNS_ADDRESS;
07b287a0
MS
2762 }
2763 }
2764
2765 if (writebuf)
2766 aarch64_store_return_value (valtype, regcache, writebuf);
2767
911627e7
TT
2768 if (read_value)
2769 {
317c3ed9 2770 *read_value = value::allocate (valtype);
911627e7 2771 aarch64_extract_return_value (valtype, regcache,
bbe912ba 2772 (*read_value)->contents_raw ().data ());
911627e7 2773 }
07b287a0 2774
c6185dce 2775 aarch64_debug_printf ("return value in registers");
07b287a0
MS
2776
2777 return RETURN_VALUE_REGISTER_CONVENTION;
2778}
2779
2780/* Implement the "get_longjmp_target" gdbarch method. */
2781
2782static int
bd2b40ac 2783aarch64_get_longjmp_target (frame_info_ptr frame, CORE_ADDR *pc)
07b287a0
MS
2784{
2785 CORE_ADDR jb_addr;
2786 gdb_byte buf[X_REGISTER_SIZE];
2787 struct gdbarch *gdbarch = get_frame_arch (frame);
08106042 2788 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
07b287a0
MS
2789 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2790
2791 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
2792
2793 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2794 X_REGISTER_SIZE))
2795 return 0;
2796
2797 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2798 return 1;
2799}
ea873d8e
PL
2800
2801/* Implement the "gen_return_address" gdbarch method. */
2802
2803static void
2804aarch64_gen_return_address (struct gdbarch *gdbarch,
2805 struct agent_expr *ax, struct axs_value *value,
2806 CORE_ADDR scope)
2807{
2808 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
2809 value->kind = axs_lvalue_register;
2810 value->u.reg = AARCH64_LR_REGNUM;
2811}
07b287a0
MS
2812\f
2813
e63ae49b
LM
2814/* Return TRUE if REGNUM is a W pseudo-register number. Return FALSE
2815 otherwise. */
2816
2817static bool
2818is_w_pseudo_register (struct gdbarch *gdbarch, int regnum)
2819{
2820 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2821
2822 if (tdep->w_pseudo_base <= regnum
2823 && regnum < tdep->w_pseudo_base + tdep->w_pseudo_count)
2824 return true;
2825
2826 return false;
2827}
2828
ca65640f
LM
2829/* Return TRUE if REGNUM is a SME pseudo-register number. Return FALSE
2830 otherwise. */
2831
2832static bool
2833is_sme_pseudo_register (struct gdbarch *gdbarch, int regnum)
2834{
2835 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2836
2837 if (tdep->has_sme () && tdep->sme_pseudo_base <= regnum
2838 && regnum < tdep->sme_pseudo_base + tdep->sme_pseudo_count)
2839 return true;
2840
2841 return false;
2842}
2843
2844/* Convert ENCODING into a ZA tile slice name. */
2845
2846static const std::string
2847aarch64_za_tile_slice_name (const struct za_pseudo_encoding &encoding)
2848{
2849 gdb_assert (encoding.qualifier_index >= 0);
2850 gdb_assert (encoding.qualifier_index <= 4);
2851 gdb_assert (encoding.tile_index >= 0);
2852 gdb_assert (encoding.tile_index <= 15);
2853 gdb_assert (encoding.slice_index >= 0);
2854 gdb_assert (encoding.slice_index <= 255);
2855
2856 const char orientation = encoding.horizontal ? 'h' : 'v';
2857
2858 const char qualifiers[6] = "bhsdq";
2859 const char qualifier = qualifiers [encoding.qualifier_index];
2860 return string_printf ("za%d%c%c%d", encoding.tile_index, orientation,
2861 qualifier, encoding.slice_index);
2862}
2863
2864/* Convert ENCODING into a ZA tile name. */
2865
2866static const std::string
2867aarch64_za_tile_name (const struct za_pseudo_encoding &encoding)
2868{
2869 /* Tiles don't use the slice number and the direction fields. */
2870 gdb_assert (encoding.qualifier_index >= 0);
2871 gdb_assert (encoding.qualifier_index <= 4);
2872 gdb_assert (encoding.tile_index >= 0);
2873 gdb_assert (encoding.tile_index <= 15);
2874
2875 const char qualifiers[6] = "bhsdq";
2876 const char qualifier = qualifiers [encoding.qualifier_index];
2877 return (string_printf ("za%d%c", encoding.tile_index, qualifier));
2878}
2879
2880/* Given a SME pseudo-register REGNUM, return its type. */
2881
2882static struct type *
2883aarch64_sme_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2884{
2885 struct za_pseudo_encoding encoding;
2886
2887 /* Decode the SME pseudo-register number. */
2888 aarch64_za_decode_pseudos (gdbarch, regnum, encoding);
2889
2890 if (is_sme_tile_slice_pseudo_register (gdbarch, regnum))
2891 return aarch64_za_tile_slice_type (gdbarch, encoding);
2892 else
2893 return aarch64_za_tile_type (gdbarch, encoding);
2894}
2895
07b287a0
MS
2896/* Return the pseudo register name corresponding to register regnum. */
2897
2898static const char *
2899aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2900{
08106042 2901 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
63bad7b6 2902
e63ae49b
LM
2903 /* W pseudo-registers. Bottom halves of the X registers. */
2904 static const char *const w_name[] =
2905 {
2906 "w0", "w1", "w2", "w3",
2907 "w4", "w5", "w6", "w7",
2908 "w8", "w9", "w10", "w11",
2909 "w12", "w13", "w14", "w15",
2910 "w16", "w17", "w18", "w19",
2911 "w20", "w21", "w22", "w23",
2912 "w24", "w25", "w26", "w27",
2913 "w28", "w29", "w30",
2914 };
2915
07b287a0
MS
2916 static const char *const q_name[] =
2917 {
2918 "q0", "q1", "q2", "q3",
2919 "q4", "q5", "q6", "q7",
2920 "q8", "q9", "q10", "q11",
2921 "q12", "q13", "q14", "q15",
2922 "q16", "q17", "q18", "q19",
2923 "q20", "q21", "q22", "q23",
2924 "q24", "q25", "q26", "q27",
2925 "q28", "q29", "q30", "q31",
2926 };
2927
2928 static const char *const d_name[] =
2929 {
2930 "d0", "d1", "d2", "d3",
2931 "d4", "d5", "d6", "d7",
2932 "d8", "d9", "d10", "d11",
2933 "d12", "d13", "d14", "d15",
2934 "d16", "d17", "d18", "d19",
2935 "d20", "d21", "d22", "d23",
2936 "d24", "d25", "d26", "d27",
2937 "d28", "d29", "d30", "d31",
2938 };
2939
2940 static const char *const s_name[] =
2941 {
2942 "s0", "s1", "s2", "s3",
2943 "s4", "s5", "s6", "s7",
2944 "s8", "s9", "s10", "s11",
2945 "s12", "s13", "s14", "s15",
2946 "s16", "s17", "s18", "s19",
2947 "s20", "s21", "s22", "s23",
2948 "s24", "s25", "s26", "s27",
2949 "s28", "s29", "s30", "s31",
2950 };
2951
2952 static const char *const h_name[] =
2953 {
2954 "h0", "h1", "h2", "h3",
2955 "h4", "h5", "h6", "h7",
2956 "h8", "h9", "h10", "h11",
2957 "h12", "h13", "h14", "h15",
2958 "h16", "h17", "h18", "h19",
2959 "h20", "h21", "h22", "h23",
2960 "h24", "h25", "h26", "h27",
2961 "h28", "h29", "h30", "h31",
2962 };
2963
2964 static const char *const b_name[] =
2965 {
2966 "b0", "b1", "b2", "b3",
2967 "b4", "b5", "b6", "b7",
2968 "b8", "b9", "b10", "b11",
2969 "b12", "b13", "b14", "b15",
2970 "b16", "b17", "b18", "b19",
2971 "b20", "b21", "b22", "b23",
2972 "b24", "b25", "b26", "b27",
2973 "b28", "b29", "b30", "b31",
2974 };
2975
34dcc7cf 2976 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
07b287a0 2977
34dcc7cf
AH
2978 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2979 return q_name[p_regnum - AARCH64_Q0_REGNUM];
07b287a0 2980
34dcc7cf
AH
2981 if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2982 return d_name[p_regnum - AARCH64_D0_REGNUM];
07b287a0 2983
34dcc7cf
AH
2984 if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2985 return s_name[p_regnum - AARCH64_S0_REGNUM];
07b287a0 2986
34dcc7cf
AH
2987 if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2988 return h_name[p_regnum - AARCH64_H0_REGNUM];
07b287a0 2989
34dcc7cf
AH
2990 if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2991 return b_name[p_regnum - AARCH64_B0_REGNUM];
07b287a0 2992
e63ae49b
LM
2993 /* W pseudo-registers? */
2994 if (is_w_pseudo_register (gdbarch, regnum))
2995 return w_name[regnum - tdep->w_pseudo_base];
2996
63bad7b6
AH
2997 if (tdep->has_sve ())
2998 {
2999 static const char *const sve_v_name[] =
3000 {
3001 "v0", "v1", "v2", "v3",
3002 "v4", "v5", "v6", "v7",
3003 "v8", "v9", "v10", "v11",
3004 "v12", "v13", "v14", "v15",
3005 "v16", "v17", "v18", "v19",
3006 "v20", "v21", "v22", "v23",
3007 "v24", "v25", "v26", "v27",
3008 "v28", "v29", "v30", "v31",
3009 };
3010
34dcc7cf
AH
3011 if (p_regnum >= AARCH64_SVE_V0_REGNUM
3012 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
3013 return sve_v_name[p_regnum - AARCH64_SVE_V0_REGNUM];
63bad7b6
AH
3014 }
3015
ca65640f
LM
3016 if (is_sme_pseudo_register (gdbarch, regnum))
3017 return tdep->sme_pseudo_names[regnum - tdep->sme_pseudo_base].c_str ();
3018
34dcc7cf
AH
3019 /* RA_STATE is used for unwinding only. Do not assign it a name - this
3020 prevents it from being read by methods such as
3021 mi_cmd_trace_frame_collected. */
c9cd8ca4 3022 if (tdep->has_pauth () && regnum == tdep->ra_sign_state_regnum)
34dcc7cf
AH
3023 return "";
3024
f34652de 3025 internal_error (_("aarch64_pseudo_register_name: bad register number %d"),
34dcc7cf 3026 p_regnum);
07b287a0
MS
3027}
3028
3029/* Implement the "pseudo_register_type" tdesc_arch_data method. */
3030
3031static struct type *
3032aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
3033{
08106042 3034 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
63bad7b6 3035
34dcc7cf 3036 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
07b287a0 3037
34dcc7cf 3038 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
07b287a0
MS
3039 return aarch64_vnq_type (gdbarch);
3040
34dcc7cf 3041 if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
07b287a0
MS
3042 return aarch64_vnd_type (gdbarch);
3043
34dcc7cf 3044 if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
07b287a0
MS
3045 return aarch64_vns_type (gdbarch);
3046
34dcc7cf 3047 if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
07b287a0
MS
3048 return aarch64_vnh_type (gdbarch);
3049
34dcc7cf 3050 if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
07b287a0
MS
3051 return aarch64_vnb_type (gdbarch);
3052
34dcc7cf
AH
3053 if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
3054 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
63bad7b6
AH
3055 return aarch64_vnv_type (gdbarch);
3056
e63ae49b
LM
3057 /* W pseudo-registers are 32-bit. */
3058 if (is_w_pseudo_register (gdbarch, regnum))
3059 return builtin_type (gdbarch)->builtin_uint32;
3060
ca65640f
LM
3061 if (is_sme_pseudo_register (gdbarch, regnum))
3062 return aarch64_sme_pseudo_register_type (gdbarch, regnum);
3063
c9cd8ca4 3064 if (tdep->has_pauth () && regnum == tdep->ra_sign_state_regnum)
34dcc7cf
AH
3065 return builtin_type (gdbarch)->builtin_uint64;
3066
f34652de 3067 internal_error (_("aarch64_pseudo_register_type: bad register number %d"),
34dcc7cf 3068 p_regnum);
07b287a0
MS
3069}
3070
3071/* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
3072
3073static int
3074aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
dbf5d61b 3075 const struct reggroup *group)
07b287a0 3076{
08106042 3077 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
63bad7b6 3078
34dcc7cf 3079 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
07b287a0 3080
34dcc7cf 3081 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
07b287a0 3082 return group == all_reggroup || group == vector_reggroup;
34dcc7cf 3083 else if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
07b287a0
MS
3084 return (group == all_reggroup || group == vector_reggroup
3085 || group == float_reggroup);
34dcc7cf 3086 else if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
07b287a0
MS
3087 return (group == all_reggroup || group == vector_reggroup
3088 || group == float_reggroup);
34dcc7cf 3089 else if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
07b287a0 3090 return group == all_reggroup || group == vector_reggroup;
34dcc7cf 3091 else if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
07b287a0 3092 return group == all_reggroup || group == vector_reggroup;
34dcc7cf
AH
3093 else if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
3094 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
63bad7b6 3095 return group == all_reggroup || group == vector_reggroup;
ca65640f
LM
3096 else if (is_sme_pseudo_register (gdbarch, regnum))
3097 return group == all_reggroup || group == vector_reggroup;
34dcc7cf 3098 /* RA_STATE is used for unwinding only. Do not assign it to any groups. */
c9cd8ca4 3099 if (tdep->has_pauth () && regnum == tdep->ra_sign_state_regnum)
34dcc7cf 3100 return 0;
07b287a0
MS
3101
3102 return group == all_reggroup;
3103}
3104
3c5cd5c3
AH
3105/* Helper for aarch64_pseudo_read_value. */
3106
3107static struct value *
63bad7b6
AH
3108aarch64_pseudo_read_value_1 (struct gdbarch *gdbarch,
3109 readable_regcache *regcache, int regnum_offset,
3c5cd5c3
AH
3110 int regsize, struct value *result_value)
3111{
3c5cd5c3
AH
3112 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
3113
63bad7b6
AH
3114 /* Enough space for a full vector register. */
3115 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
3116 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
3117
3c5cd5c3 3118 if (regcache->raw_read (v_regnum, reg_buf) != REG_VALID)
d00664db
TT
3119 result_value->mark_bytes_unavailable (0,
3120 result_value->type ()->length ());
3c5cd5c3 3121 else
bbe912ba 3122 memcpy (result_value->contents_raw ().data (), reg_buf, regsize);
63bad7b6 3123
3c5cd5c3
AH
3124 return result_value;
3125 }
3126
ca65640f
LM
3127/* Helper function for reading/writing ZA pseudo-registers. Given REGNUM,
3128 a ZA pseudo-register number, return, in OFFSETS, the information on positioning
3129 of the bytes that must be read from/written to. */
3130
3131static void
3132aarch64_za_offsets_from_regnum (struct gdbarch *gdbarch, int regnum,
3133 struct za_offsets &offsets)
3134{
3135 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
3136
3137 gdb_assert (tdep->has_sme ());
3138 gdb_assert (tdep->sme_svq > 0);
3139 gdb_assert (tdep->sme_pseudo_base <= regnum);
3140 gdb_assert (regnum < tdep->sme_pseudo_base + tdep->sme_pseudo_count);
3141
3142 struct za_pseudo_encoding encoding;
3143
3144 /* Decode the ZA pseudo-register number. */
3145 aarch64_za_decode_pseudos (gdbarch, regnum, encoding);
3146
3147 /* Fetch the streaming vector length. */
3148 size_t svl = sve_vl_from_vq (tdep->sme_svq);
3149
3150 if (is_sme_tile_slice_pseudo_register (gdbarch, regnum))
3151 {
3152 if (encoding.horizontal)
3153 {
3154 /* Horizontal tile slices are contiguous ranges of svl bytes. */
3155
3156 /* The starting offset depends on the tile index (to locate the tile
3157 in the ZA buffer), the slice index (to locate the slice within the
3158 tile) and the qualifier. */
3159 offsets.starting_offset
3160 = encoding.tile_index * svl + encoding.slice_index
3161 * (svl >> encoding.qualifier_index);
3162 /* Horizontal tile slice data is contiguous and thus doesn't have
3163 a stride. */
3164 offsets.stride_size = 0;
3165 /* Horizontal tile slice data is contiguous and thus only has 1
3166 chunk. */
3167 offsets.chunks = 1;
3168 /* The chunk size is always svl bytes. */
3169 offsets.chunk_size = svl;
3170 }
3171 else
3172 {
3173 /* Vertical tile slices are non-contiguous ranges of
3174 (1 << qualifier_index) bytes. */
3175
3176 /* The starting offset depends on the tile number (to locate the
3177 tile in the ZA buffer), the slice index (to locate the element
3178 within the tile slice) and the qualifier. */
3179 offsets.starting_offset
3180 = encoding.tile_index * svl + encoding.slice_index
3181 * (1 << encoding.qualifier_index);
3182 /* The offset between vertical tile slices depends on the qualifier
3183 and svl. */
3184 offsets.stride_size = svl << encoding.qualifier_index;
3185 /* The number of chunks depends on svl and the qualifier size. */
3186 offsets.chunks = svl >> encoding.qualifier_index;
3187 /* The chunk size depends on the qualifier. */
3188 offsets.chunk_size = 1 << encoding.qualifier_index;
3189 }
3190 }
3191 else
3192 {
3193 /* ZA tile pseudo-register. */
3194
3195 /* Starting offset depends on the tile index and qualifier. */
3196 offsets.starting_offset = encoding.tile_index * svl;
3197 /* The offset between tile slices depends on the qualifier and svl. */
3198 offsets.stride_size = svl << encoding.qualifier_index;
3199 /* The number of chunks depends on the qualifier and svl. */
3200 offsets.chunks = svl >> encoding.qualifier_index;
3201 /* The chunk size is always svl bytes. */
3202 offsets.chunk_size = svl;
3203 }
3204}
3205
3206/* Given REGNUM, a SME pseudo-register number, return its value in RESULT. */
3207
3208static struct value *
3209aarch64_sme_pseudo_register_read (struct gdbarch *gdbarch,
3210 readable_regcache *regcache, int regnum,
3211 struct value *result)
3212{
3213 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
3214
3215 gdb_assert (tdep->has_sme ());
3216 gdb_assert (tdep->sme_svq > 0);
3217 gdb_assert (tdep->sme_pseudo_base <= regnum);
3218 gdb_assert (regnum < tdep->sme_pseudo_base + tdep->sme_pseudo_count);
3219
3220 /* Fetch the offsets that we need in order to read from the correct blocks
3221 of ZA. */
3222 struct za_offsets offsets;
3223 aarch64_za_offsets_from_regnum (gdbarch, regnum, offsets);
3224
3225 /* Fetch the contents of ZA. */
3226 size_t svl = sve_vl_from_vq (tdep->sme_svq);
3227 gdb::byte_vector za (std::pow (svl, 2));
3228 regcache->raw_read (tdep->sme_za_regnum, za.data ());
3229
3230 /* Copy the requested data. */
3231 for (int chunks = 0; chunks < offsets.chunks; chunks++)
3232 {
3233 const gdb_byte *source
3234 = za.data () + offsets.starting_offset + chunks * offsets.stride_size;
3235 gdb_byte *destination
3236 = result->contents_raw ().data () + chunks * offsets.chunk_size;
3237
3238 memcpy (destination, source, offsets.chunk_size);
3239 }
3240 return result;
3241}
3242
07b287a0
MS
3243/* Implement the "pseudo_register_read_value" gdbarch method. */
3244
3245static struct value *
3c5cd5c3 3246aarch64_pseudo_read_value (struct gdbarch *gdbarch, readable_regcache *regcache,
07b287a0
MS
3247 int regnum)
3248{
08106042 3249 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
317c3ed9 3250 struct value *result_value = value::allocate (register_type (gdbarch, regnum));
07b287a0 3251
6f9c9d71 3252 result_value->set_lval (lval_register);
07b287a0 3253 VALUE_REGNUM (result_value) = regnum;
07b287a0 3254
e63ae49b
LM
3255 if (is_w_pseudo_register (gdbarch, regnum))
3256 {
3257 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3258 /* Default offset for little endian. */
3259 int offset = 0;
3260
3261 if (byte_order == BFD_ENDIAN_BIG)
3262 offset = 4;
3263
3264 /* Find the correct X register to extract the data from. */
3265 int x_regnum = AARCH64_X0_REGNUM + (regnum - tdep->w_pseudo_base);
3266 gdb_byte data[4];
3267
3268 /* Read the bottom 4 bytes of X. */
3269 if (regcache->raw_read_part (x_regnum, offset, 4, data) != REG_VALID)
d00664db 3270 result_value->mark_bytes_unavailable (0, 4);
e63ae49b 3271 else
bbe912ba 3272 memcpy (result_value->contents_raw ().data (), data, 4);
e63ae49b
LM
3273
3274 return result_value;
3275 }
ca65640f
LM
3276 else if (is_sme_pseudo_register (gdbarch, regnum))
3277 return aarch64_sme_pseudo_register_read (gdbarch, regcache, regnum,
3278 result_value);
e63ae49b 3279
07b287a0
MS
3280 regnum -= gdbarch_num_regs (gdbarch);
3281
3282 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
63bad7b6
AH
3283 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
3284 regnum - AARCH64_Q0_REGNUM,
3c5cd5c3 3285 Q_REGISTER_SIZE, result_value);
07b287a0
MS
3286
3287 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
63bad7b6
AH
3288 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
3289 regnum - AARCH64_D0_REGNUM,
3c5cd5c3 3290 D_REGISTER_SIZE, result_value);
07b287a0
MS
3291
3292 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
63bad7b6
AH
3293 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
3294 regnum - AARCH64_S0_REGNUM,
3c5cd5c3 3295 S_REGISTER_SIZE, result_value);
07b287a0
MS
3296
3297 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
63bad7b6
AH
3298 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
3299 regnum - AARCH64_H0_REGNUM,
3c5cd5c3 3300 H_REGISTER_SIZE, result_value);
07b287a0
MS
3301
3302 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
63bad7b6
AH
3303 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
3304 regnum - AARCH64_B0_REGNUM,
3c5cd5c3 3305 B_REGISTER_SIZE, result_value);
07b287a0 3306
63bad7b6
AH
3307 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
3308 && regnum < AARCH64_SVE_V0_REGNUM + 32)
3309 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
3310 regnum - AARCH64_SVE_V0_REGNUM,
3311 V_REGISTER_SIZE, result_value);
3312
07b287a0
MS
3313 gdb_assert_not_reached ("regnum out of bound");
3314}
3315
3c5cd5c3 3316/* Helper for aarch64_pseudo_write. */
07b287a0
MS
3317
3318static void
63bad7b6
AH
3319aarch64_pseudo_write_1 (struct gdbarch *gdbarch, struct regcache *regcache,
3320 int regnum_offset, int regsize, const gdb_byte *buf)
07b287a0 3321{
3c5cd5c3 3322 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
07b287a0 3323
63bad7b6
AH
3324 /* Enough space for a full vector register. */
3325 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
3326 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
3327
07b287a0
MS
3328 /* Ensure the register buffer is zero, we want gdb writes of the
3329 various 'scalar' pseudo registers to behavior like architectural
3330 writes, register width bytes are written the remainder are set to
3331 zero. */
63bad7b6 3332 memset (reg_buf, 0, register_size (gdbarch, AARCH64_V0_REGNUM));
07b287a0 3333
3c5cd5c3
AH
3334 memcpy (reg_buf, buf, regsize);
3335 regcache->raw_write (v_regnum, reg_buf);
3336}
3337
ca65640f
LM
3338/* Given REGNUM, a SME pseudo-register number, store the bytes from DATA to the
3339 pseudo-register. */
3340
3341static void
3342aarch64_sme_pseudo_register_write (struct gdbarch *gdbarch,
3343 struct regcache *regcache,
3344 int regnum, const gdb_byte *data)
3345{
3346 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
3347
3348 gdb_assert (tdep->has_sme ());
3349 gdb_assert (tdep->sme_svq > 0);
3350 gdb_assert (tdep->sme_pseudo_base <= regnum);
3351 gdb_assert (regnum < tdep->sme_pseudo_base + tdep->sme_pseudo_count);
3352
3353 /* Fetch the offsets that we need in order to write to the correct blocks
3354 of ZA. */
3355 struct za_offsets offsets;
3356 aarch64_za_offsets_from_regnum (gdbarch, regnum, offsets);
3357
3358 /* Fetch the contents of ZA. */
3359 size_t svl = sve_vl_from_vq (tdep->sme_svq);
3360 gdb::byte_vector za (std::pow (svl, 2));
3361
3362 /* Copy the requested data. */
3363 for (int chunks = 0; chunks < offsets.chunks; chunks++)
3364 {
3365 const gdb_byte *source = data + chunks * offsets.chunk_size;
3366 gdb_byte *destination
3367 = za.data () + offsets.starting_offset + chunks * offsets.stride_size;
3368
3369 memcpy (destination, source, offsets.chunk_size);
3370 }
3371
3372 /* Write back to ZA. */
3373 regcache->raw_write (tdep->sme_za_regnum, za.data ());
3374}
3375
3c5cd5c3
AH
3376/* Implement the "pseudo_register_write" gdbarch method. */
3377
3378static void
3379aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
3380 int regnum, const gdb_byte *buf)
3381{
08106042 3382 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
e63ae49b
LM
3383
3384 if (is_w_pseudo_register (gdbarch, regnum))
3385 {
3386 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3387 /* Default offset for little endian. */
3388 int offset = 0;
3389
3390 if (byte_order == BFD_ENDIAN_BIG)
3391 offset = 4;
3392
3393 /* Find the correct X register to extract the data from. */
3394 int x_regnum = AARCH64_X0_REGNUM + (regnum - tdep->w_pseudo_base);
3395
3396 /* First zero-out the contents of X. */
3397 ULONGEST zero = 0;
3398 regcache->raw_write (x_regnum, zero);
3399 /* Write to the bottom 4 bytes of X. */
3400 regcache->raw_write_part (x_regnum, offset, 4, buf);
3401 return;
3402 }
ca65640f
LM
3403 else if (is_sme_pseudo_register (gdbarch, regnum))
3404 {
3405 aarch64_sme_pseudo_register_write (gdbarch, regcache, regnum, buf);
3406 return;
3407 }
e63ae49b 3408
07b287a0
MS
3409 regnum -= gdbarch_num_regs (gdbarch);
3410
3411 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
63bad7b6
AH
3412 return aarch64_pseudo_write_1 (gdbarch, regcache,
3413 regnum - AARCH64_Q0_REGNUM, Q_REGISTER_SIZE,
3414 buf);
07b287a0
MS
3415
3416 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
63bad7b6
AH
3417 return aarch64_pseudo_write_1 (gdbarch, regcache,
3418 regnum - AARCH64_D0_REGNUM, D_REGISTER_SIZE,
3419 buf);
07b287a0
MS
3420
3421 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
63bad7b6
AH
3422 return aarch64_pseudo_write_1 (gdbarch, regcache,
3423 regnum - AARCH64_S0_REGNUM, S_REGISTER_SIZE,
3424 buf);
07b287a0
MS
3425
3426 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
63bad7b6
AH
3427 return aarch64_pseudo_write_1 (gdbarch, regcache,
3428 regnum - AARCH64_H0_REGNUM, H_REGISTER_SIZE,
3429 buf);
07b287a0
MS
3430
3431 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
63bad7b6
AH
3432 return aarch64_pseudo_write_1 (gdbarch, regcache,
3433 regnum - AARCH64_B0_REGNUM, B_REGISTER_SIZE,
3434 buf);
3435
3436 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
3437 && regnum < AARCH64_SVE_V0_REGNUM + 32)
3438 return aarch64_pseudo_write_1 (gdbarch, regcache,
3439 regnum - AARCH64_SVE_V0_REGNUM,
3440 V_REGISTER_SIZE, buf);
07b287a0
MS
3441
3442 gdb_assert_not_reached ("regnum out of bound");
3443}
3444
07b287a0
MS
3445/* Callback function for user_reg_add. */
3446
3447static struct value *
bd2b40ac 3448value_of_aarch64_user_reg (frame_info_ptr frame, const void *baton)
07b287a0 3449{
9a3c8263 3450 const int *reg_p = (const int *) baton;
07b287a0
MS
3451
3452 return value_of_register (*reg_p, frame);
3453}
3454\f
3455
9404b58f
KM
3456/* Implement the "software_single_step" gdbarch method, needed to
3457 single step through atomic sequences on AArch64. */
3458
a0ff9e1a 3459static std::vector<CORE_ADDR>
f5ea389a 3460aarch64_software_single_step (struct regcache *regcache)
9404b58f 3461{
ac7936df 3462 struct gdbarch *gdbarch = regcache->arch ();
9404b58f
KM
3463 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3464 const int insn_size = 4;
3465 const int atomic_sequence_length = 16; /* Instruction sequence length. */
0187a92f 3466 CORE_ADDR pc = regcache_read_pc (regcache);
70ab8ccd 3467 CORE_ADDR breaks[2] = { CORE_ADDR_MAX, CORE_ADDR_MAX };
9404b58f
KM
3468 CORE_ADDR loc = pc;
3469 CORE_ADDR closing_insn = 0;
94355de7
LM
3470
3471 ULONGEST insn_from_memory;
3472 if (!safe_read_memory_unsigned_integer (loc, insn_size,
3473 byte_order_for_code,
3474 &insn_from_memory))
3475 {
3476 /* Assume we don't have a atomic sequence, as we couldn't read the
3477 instruction in this location. */
3478 return {};
3479 }
3480
3481 uint32_t insn = insn_from_memory;
9404b58f
KM
3482 int index;
3483 int insn_count;
3484 int bc_insn_count = 0; /* Conditional branch instruction count. */
3485 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
f77ee802
YQ
3486 aarch64_inst inst;
3487
561a72d4 3488 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
a0ff9e1a 3489 return {};
9404b58f
KM
3490
3491 /* Look for a Load Exclusive instruction which begins the sequence. */
f77ee802 3492 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
a0ff9e1a 3493 return {};
9404b58f
KM
3494
3495 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
3496 {
9404b58f 3497 loc += insn_size;
9404b58f 3498
94355de7
LM
3499 if (!safe_read_memory_unsigned_integer (loc, insn_size,
3500 byte_order_for_code,
3501 &insn_from_memory))
3502 {
3503 /* Assume we don't have a atomic sequence, as we couldn't read the
3504 instruction in this location. */
3505 return {};
3506 }
3507
3508 insn = insn_from_memory;
561a72d4 3509 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
a0ff9e1a 3510 return {};
9404b58f 3511 /* Check if the instruction is a conditional branch. */
f77ee802 3512 if (inst.opcode->iclass == condbranch)
9404b58f 3513 {
f77ee802
YQ
3514 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
3515
9404b58f 3516 if (bc_insn_count >= 1)
a0ff9e1a 3517 return {};
9404b58f
KM
3518
3519 /* It is, so we'll try to set a breakpoint at the destination. */
f77ee802 3520 breaks[1] = loc + inst.operands[0].imm.value;
9404b58f
KM
3521
3522 bc_insn_count++;
3523 last_breakpoint++;
3524 }
3525
3526 /* Look for the Store Exclusive which closes the atomic sequence. */
f77ee802 3527 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
9404b58f
KM
3528 {
3529 closing_insn = loc;
3530 break;
3531 }
3532 }
3533
3534 /* We didn't find a closing Store Exclusive instruction, fall back. */
3535 if (!closing_insn)
a0ff9e1a 3536 return {};
9404b58f
KM
3537
3538 /* Insert breakpoint after the end of the atomic sequence. */
3539 breaks[0] = loc + insn_size;
3540
3541 /* Check for duplicated breakpoints, and also check that the second
3542 breakpoint is not within the atomic sequence. */
3543 if (last_breakpoint
3544 && (breaks[1] == breaks[0]
3545 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
3546 last_breakpoint = 0;
3547
a0ff9e1a
SM
3548 std::vector<CORE_ADDR> next_pcs;
3549
9404b58f
KM
3550 /* Insert the breakpoint at the end of the sequence, and one at the
3551 destination of the conditional branch, if it exists. */
3552 for (index = 0; index <= last_breakpoint; index++)
a0ff9e1a 3553 next_pcs.push_back (breaks[index]);
9404b58f 3554
93f9a11f 3555 return next_pcs;
9404b58f
KM
3556}
3557
1152d984
SM
3558struct aarch64_displaced_step_copy_insn_closure
3559 : public displaced_step_copy_insn_closure
b6542f81
YQ
3560{
3561 /* It is true when condition instruction, such as B.CON, TBZ, etc,
3562 is being displaced stepping. */
f0c702d4 3563 bool cond = false;
b6542f81 3564
0c271889
LM
3565 /* PC adjustment offset after displaced stepping. If 0, then we don't
3566 write the PC back, assuming the PC is already the right address. */
cfba9872 3567 int32_t pc_adjust = 0;
b6542f81
YQ
3568};
3569
3570/* Data when visiting instructions for displaced stepping. */
3571
3572struct aarch64_displaced_step_data
3573{
3574 struct aarch64_insn_data base;
3575
3576 /* The address where the instruction will be executed at. */
3577 CORE_ADDR new_addr;
3578 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
e935475c 3579 uint32_t insn_buf[AARCH64_DISPLACED_MODIFIED_INSNS];
b6542f81
YQ
3580 /* Number of instructions in INSN_BUF. */
3581 unsigned insn_count;
3582 /* Registers when doing displaced stepping. */
3583 struct regcache *regs;
3584
1152d984 3585 aarch64_displaced_step_copy_insn_closure *dsc;
b6542f81
YQ
3586};
3587
3588/* Implementation of aarch64_insn_visitor method "b". */
3589
3590static void
3591aarch64_displaced_step_b (const int is_bl, const int32_t offset,
3592 struct aarch64_insn_data *data)
3593{
3594 struct aarch64_displaced_step_data *dsd
3595 = (struct aarch64_displaced_step_data *) data;
2ac09a5b 3596 int64_t new_offset = data->insn_addr - dsd->new_addr + offset;
b6542f81
YQ
3597
3598 if (can_encode_int32 (new_offset, 28))
3599 {
3600 /* Emit B rather than BL, because executing BL on a new address
3601 will get the wrong address into LR. In order to avoid this,
3602 we emit B, and update LR if the instruction is BL. */
3603 emit_b (dsd->insn_buf, 0, new_offset);
3604 dsd->insn_count++;
3605 }
3606 else
3607 {
3608 /* Write NOP. */
3609 emit_nop (dsd->insn_buf);
3610 dsd->insn_count++;
3611 dsd->dsc->pc_adjust = offset;
3612 }
3613
3614 if (is_bl)
3615 {
3616 /* Update LR. */
3617 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
3618 data->insn_addr + 4);
3619 }
3620}
3621
3622/* Implementation of aarch64_insn_visitor method "b_cond". */
3623
3624static void
3625aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
3626 struct aarch64_insn_data *data)
3627{
3628 struct aarch64_displaced_step_data *dsd
3629 = (struct aarch64_displaced_step_data *) data;
b6542f81
YQ
3630
3631 /* GDB has to fix up PC after displaced step this instruction
3632 differently according to the condition is true or false. Instead
3633 of checking COND against conditional flags, we can use
3634 the following instructions, and GDB can tell how to fix up PC
3635 according to the PC value.
3636
3637 B.COND TAKEN ; If cond is true, then jump to TAKEN.
3638 INSN1 ;
3639 TAKEN:
3640 INSN2
3641 */
3642
3643 emit_bcond (dsd->insn_buf, cond, 8);
f0c702d4 3644 dsd->dsc->cond = true;
b6542f81
YQ
3645 dsd->dsc->pc_adjust = offset;
3646 dsd->insn_count = 1;
3647}
3648
3649/* Dynamically allocate a new register. If we know the register
3650 statically, we should make it a global as above instead of using this
3651 helper function. */
3652
3653static struct aarch64_register
3654aarch64_register (unsigned num, int is64)
3655{
3656 return (struct aarch64_register) { num, is64 };
3657}
3658
3659/* Implementation of aarch64_insn_visitor method "cb". */
3660
3661static void
3662aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
3663 const unsigned rn, int is64,
3664 struct aarch64_insn_data *data)
3665{
3666 struct aarch64_displaced_step_data *dsd
3667 = (struct aarch64_displaced_step_data *) data;
b6542f81
YQ
3668
3669 /* The offset is out of range for a compare and branch
3670 instruction. We can use the following instructions instead:
3671
3672 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
3673 INSN1 ;
3674 TAKEN:
3675 INSN2
3676 */
3677 emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
3678 dsd->insn_count = 1;
f0c702d4 3679 dsd->dsc->cond = true;
b6542f81
YQ
3680 dsd->dsc->pc_adjust = offset;
3681}
3682
3683/* Implementation of aarch64_insn_visitor method "tb". */
3684
3685static void
3686aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
3687 const unsigned rt, unsigned bit,
3688 struct aarch64_insn_data *data)
3689{
3690 struct aarch64_displaced_step_data *dsd
3691 = (struct aarch64_displaced_step_data *) data;
b6542f81
YQ
3692
3693 /* The offset is out of range for a test bit and branch
3694 instruction We can use the following instructions instead:
3695
3696 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
3697 INSN1 ;
3698 TAKEN:
3699 INSN2
3700
3701 */
3702 emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
3703 dsd->insn_count = 1;
f0c702d4 3704 dsd->dsc->cond = true;
b6542f81
YQ
3705 dsd->dsc->pc_adjust = offset;
3706}
3707
3708/* Implementation of aarch64_insn_visitor method "adr". */
3709
3710static void
3711aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
3712 const int is_adrp, struct aarch64_insn_data *data)
3713{
3714 struct aarch64_displaced_step_data *dsd
3715 = (struct aarch64_displaced_step_data *) data;
3716 /* We know exactly the address the ADR{P,} instruction will compute.
3717 We can just write it to the destination register. */
3718 CORE_ADDR address = data->insn_addr + offset;
3719
3720 if (is_adrp)
3721 {
3722 /* Clear the lower 12 bits of the offset to get the 4K page. */
3723 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
3724 address & ~0xfff);
3725 }
3726 else
3727 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
3728 address);
3729
3730 dsd->dsc->pc_adjust = 4;
3731 emit_nop (dsd->insn_buf);
3732 dsd->insn_count = 1;
3733}
3734
3735/* Implementation of aarch64_insn_visitor method "ldr_literal". */
3736
3737static void
3738aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
3739 const unsigned rt, const int is64,
3740 struct aarch64_insn_data *data)
3741{
3742 struct aarch64_displaced_step_data *dsd
3743 = (struct aarch64_displaced_step_data *) data;
3744 CORE_ADDR address = data->insn_addr + offset;
3745 struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
3746
3747 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
3748 address);
3749
3750 if (is_sw)
3751 dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
3752 aarch64_register (rt, 1), zero);
3753 else
3754 dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
3755 aarch64_register (rt, 1), zero);
3756
3757 dsd->dsc->pc_adjust = 4;
3758}
3759
3760/* Implementation of aarch64_insn_visitor method "others". */
3761
3762static void
3763aarch64_displaced_step_others (const uint32_t insn,
3764 struct aarch64_insn_data *data)
3765{
3766 struct aarch64_displaced_step_data *dsd
3767 = (struct aarch64_displaced_step_data *) data;
3768
807f647c
MM
3769 uint32_t masked_insn = (insn & CLEAR_Rn_MASK);
3770 if (masked_insn == BLR)
b6542f81 3771 {
807f647c
MM
3772 /* Emit a BR to the same register and then update LR to the original
3773 address (similar to aarch64_displaced_step_b). */
3774 aarch64_emit_insn (dsd->insn_buf, insn & 0xffdfffff);
3775 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
3776 data->insn_addr + 4);
b6542f81 3777 }
807f647c
MM
3778 else
3779 aarch64_emit_insn (dsd->insn_buf, insn);
3780 dsd->insn_count = 1;
3781
3782 if (masked_insn == RET || masked_insn == BR || masked_insn == BLR)
3783 dsd->dsc->pc_adjust = 0;
b6542f81
YQ
3784 else
3785 dsd->dsc->pc_adjust = 4;
3786}
3787
3788static const struct aarch64_insn_visitor visitor =
3789{
3790 aarch64_displaced_step_b,
3791 aarch64_displaced_step_b_cond,
3792 aarch64_displaced_step_cb,
3793 aarch64_displaced_step_tb,
3794 aarch64_displaced_step_adr,
3795 aarch64_displaced_step_ldr_literal,
3796 aarch64_displaced_step_others,
3797};
3798
3799/* Implement the "displaced_step_copy_insn" gdbarch method. */
3800
1152d984 3801displaced_step_copy_insn_closure_up
b6542f81
YQ
3802aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
3803 CORE_ADDR from, CORE_ADDR to,
3804 struct regcache *regs)
3805{
b6542f81 3806 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
b6542f81 3807 struct aarch64_displaced_step_data dsd;
c86a40c6 3808 aarch64_inst inst;
94355de7
LM
3809 ULONGEST insn_from_memory;
3810
3811 if (!safe_read_memory_unsigned_integer (from, 4, byte_order_for_code,
3812 &insn_from_memory))
3813 return nullptr;
3814
3815 uint32_t insn = insn_from_memory;
c86a40c6 3816
561a72d4 3817 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
c86a40c6 3818 return NULL;
b6542f81
YQ
3819
3820 /* Look for a Load Exclusive instruction which begins the sequence. */
c86a40c6 3821 if (inst.opcode->iclass == ldstexcl && bit (insn, 22))
b6542f81
YQ
3822 {
3823 /* We can't displaced step atomic sequences. */
3824 return NULL;
3825 }
3826
1152d984
SM
3827 std::unique_ptr<aarch64_displaced_step_copy_insn_closure> dsc
3828 (new aarch64_displaced_step_copy_insn_closure);
b6542f81
YQ
3829 dsd.base.insn_addr = from;
3830 dsd.new_addr = to;
3831 dsd.regs = regs;
cfba9872 3832 dsd.dsc = dsc.get ();
034f1a81 3833 dsd.insn_count = 0;
b6542f81
YQ
3834 aarch64_relocate_instruction (insn, &visitor,
3835 (struct aarch64_insn_data *) &dsd);
e935475c 3836 gdb_assert (dsd.insn_count <= AARCH64_DISPLACED_MODIFIED_INSNS);
b6542f81
YQ
3837
3838 if (dsd.insn_count != 0)
3839 {
3840 int i;
3841
3842 /* Instruction can be relocated to scratch pad. Copy
3843 relocated instruction(s) there. */
3844 for (i = 0; i < dsd.insn_count; i++)
3845 {
136821d9
SM
3846 displaced_debug_printf ("writing insn %.8x at %s",
3847 dsd.insn_buf[i],
3848 paddress (gdbarch, to + i * 4));
3849
b6542f81
YQ
3850 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
3851 (ULONGEST) dsd.insn_buf[i]);
3852 }
3853 }
3854 else
3855 {
b6542f81
YQ
3856 dsc = NULL;
3857 }
3858
6d0cf446 3859 /* This is a work around for a problem with g++ 4.8. */
1152d984 3860 return displaced_step_copy_insn_closure_up (dsc.release ());
b6542f81
YQ
3861}
3862
3863/* Implement the "displaced_step_fixup" gdbarch method. */
3864
3865void
3866aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
1152d984 3867 struct displaced_step_copy_insn_closure *dsc_,
b6542f81 3868 CORE_ADDR from, CORE_ADDR to,
cf141dd8 3869 struct regcache *regs, bool completed_p)
b6542f81 3870{
cf141dd8 3871 CORE_ADDR pc = regcache_read_pc (regs);
cfba9872 3872
cf141dd8
AB
3873 /* If the displaced instruction didn't complete successfully then all we
3874 need to do is restore the program counter. */
3875 if (!completed_p)
3876 {
3877 pc = from + (pc - to);
3878 regcache_write_pc (regs, pc);
3879 return;
3880 }
0c271889 3881
cf141dd8
AB
3882 aarch64_displaced_step_copy_insn_closure *dsc
3883 = (aarch64_displaced_step_copy_insn_closure *) dsc_;
0c271889 3884
136821d9
SM
3885 displaced_debug_printf ("PC after stepping: %s (was %s).",
3886 paddress (gdbarch, pc), paddress (gdbarch, to));
1ab139e5 3887
b6542f81
YQ
3888 if (dsc->cond)
3889 {
136821d9
SM
3890 displaced_debug_printf ("[Conditional] pc_adjust before: %d",
3891 dsc->pc_adjust);
1ab139e5 3892
b6542f81
YQ
3893 if (pc - to == 8)
3894 {
3895 /* Condition is true. */
3896 }
3897 else if (pc - to == 4)
3898 {
3899 /* Condition is false. */
3900 dsc->pc_adjust = 4;
3901 }
3902 else
3903 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
1ab139e5 3904
136821d9
SM
3905 displaced_debug_printf ("[Conditional] pc_adjust after: %d",
3906 dsc->pc_adjust);
b6542f81
YQ
3907 }
3908
136821d9
SM
3909 displaced_debug_printf ("%s PC by %d",
3910 dsc->pc_adjust ? "adjusting" : "not adjusting",
3911 dsc->pc_adjust);
1ab139e5 3912
b6542f81
YQ
3913 if (dsc->pc_adjust != 0)
3914 {
0c271889
LM
3915 /* Make sure the previous instruction was executed (that is, the PC
3916 has changed). If the PC didn't change, then discard the adjustment
3917 offset. Otherwise we may skip an instruction before its execution
3918 took place. */
3919 if ((pc - to) == 0)
1ab139e5 3920 {
136821d9 3921 displaced_debug_printf ("PC did not move. Discarding PC adjustment.");
1ab139e5
LM
3922 dsc->pc_adjust = 0;
3923 }
0c271889 3924
136821d9
SM
3925 displaced_debug_printf ("fixup: set PC to %s:%d",
3926 paddress (gdbarch, from), dsc->pc_adjust);
3927
b6542f81
YQ
3928 regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
3929 from + dsc->pc_adjust);
3930 }
3931}
3932
3933/* Implement the "displaced_step_hw_singlestep" gdbarch method. */
3934
07fbbd01 3935bool
40a53766 3936aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch)
b6542f81 3937{
07fbbd01 3938 return true;
b6542f81
YQ
3939}
3940
95228a0d
AH
3941/* Get the correct target description for the given VQ value.
3942 If VQ is zero then it is assumed SVE is not supported.
c1bd443b
LM
3943 (It is not possible to set VQ to zero on an SVE system).
3944
414d5848
JB
3945 MTE_P indicates the presence of the Memory Tagging Extension feature.
3946
3947 TLS_P indicates the presence of the Thread Local Storage feature. */
da434ccb
AH
3948
3949const target_desc *
0ee6b1c5 3950aarch64_read_description (const aarch64_features &features)
da434ccb 3951{
0ee6b1c5
JB
3952 if (features.vq > AARCH64_MAX_SVE_VQ)
3953 error (_("VQ is %" PRIu64 ", maximum supported value is %d"), features.vq,
95228a0d
AH
3954 AARCH64_MAX_SVE_VQ);
3955
0ee6b1c5 3956 struct target_desc *tdesc = tdesc_aarch64_map[features];
da434ccb 3957
95228a0d
AH
3958 if (tdesc == NULL)
3959 {
0ee6b1c5
JB
3960 tdesc = aarch64_create_target_description (features);
3961 tdesc_aarch64_map[features] = tdesc;
95228a0d 3962 }
da434ccb 3963
95228a0d 3964 return tdesc;
da434ccb
AH
3965}
3966
ba2d2bb2
AH
3967/* Return the VQ used when creating the target description TDESC. */
3968
1332a140 3969static uint64_t
ba2d2bb2
AH
3970aarch64_get_tdesc_vq (const struct target_desc *tdesc)
3971{
3972 const struct tdesc_feature *feature_sve;
3973
3974 if (!tdesc_has_registers (tdesc))
3975 return 0;
3976
3977 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
3978
3979 if (feature_sve == nullptr)
3980 return 0;
3981
12863263
AH
3982 uint64_t vl = tdesc_register_bitsize (feature_sve,
3983 aarch64_sve_register_names[0]) / 8;
ba2d2bb2
AH
3984 return sve_vq_from_vl (vl);
3985}
3986
ca65640f
LM
3987
3988/* Return the svq (streaming vector quotient) used when creating the target
3989 description TDESC. */
3990
3991static uint64_t
3992aarch64_get_tdesc_svq (const struct target_desc *tdesc)
3993{
3994 const struct tdesc_feature *feature_sme;
3995
3996 if (!tdesc_has_registers (tdesc))
3997 return 0;
3998
3999 feature_sme = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sme");
4000
4001 if (feature_sme == nullptr)
4002 return 0;
4003
4004 size_t svl_squared = tdesc_register_bitsize (feature_sme, "za");
4005
4006 /* We have the total size of the ZA matrix, in bits. Figure out the svl
4007 value. */
4008 size_t svl = std::sqrt (svl_squared / 8);
4009
4010 /* Now extract svq. */
4011 return sve_vq_from_vl (svl);
4012}
4013
4f3681cc
TJB
4014/* Get the AArch64 features present in the given target description. */
4015
4016aarch64_features
4017aarch64_features_from_target_desc (const struct target_desc *tdesc)
4018{
4019 aarch64_features features;
4020
4021 if (tdesc == nullptr)
4022 return features;
4023
4024 features.vq = aarch64_get_tdesc_vq (tdesc);
acdf6071
LM
4025
4026 /* We need to look for a couple pauth feature name variations. */
4f3681cc
TJB
4027 features.pauth
4028 = (tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.pauth") != nullptr);
acdf6071
LM
4029
4030 if (!features.pauth)
4031 features.pauth = (tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.pauth_v2")
4032 != nullptr);
4033
4f3681cc
TJB
4034 features.mte
4035 = (tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.mte") != nullptr);
ba60b963
LM
4036
4037 const struct tdesc_feature *tls_feature
4038 = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.tls");
4039
4040 if (tls_feature != nullptr)
4041 {
4042 /* We have TLS registers. Find out how many. */
4043 if (tdesc_unnumbered_register (tls_feature, "tpidr2"))
4044 features.tls = 2;
4045 else
4046 features.tls = 1;
4047 }
4f3681cc 4048
ca65640f
LM
4049 features.svq = aarch64_get_tdesc_svq (tdesc);
4050
4f3681cc
TJB
4051 return features;
4052}
4053
76bed0fd
AH
4054/* Implement the "cannot_store_register" gdbarch method. */
4055
4056static int
4057aarch64_cannot_store_register (struct gdbarch *gdbarch, int regnum)
4058{
08106042 4059 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
76bed0fd
AH
4060
4061 if (!tdep->has_pauth ())
4062 return 0;
4063
4064 /* Pointer authentication registers are read-only. */
6d002087
LM
4065 return (regnum >= tdep->pauth_reg_base
4066 && regnum < tdep->pauth_reg_base + tdep->pauth_reg_count);
76bed0fd
AH
4067}
4068
da729c5c
TT
4069/* Implement the stack_frame_destroyed_p gdbarch method. */
4070
4071static int
4072aarch64_stack_frame_destroyed_p (struct gdbarch *gdbarch, CORE_ADDR pc)
4073{
4074 CORE_ADDR func_start, func_end;
4075 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
4076 return 0;
4077
4078 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
94355de7
LM
4079
4080 ULONGEST insn_from_memory;
4081 if (!safe_read_memory_unsigned_integer (pc, 4, byte_order_for_code,
4082 &insn_from_memory))
4083 return 0;
4084
4085 uint32_t insn = insn_from_memory;
da729c5c
TT
4086
4087 aarch64_inst inst;
4088 if (aarch64_decode_insn (insn, &inst, 1, nullptr) != 0)
4089 return 0;
4090
4091 return streq (inst.opcode->name, "ret");
4092}
4093
6d002087
LM
4094/* AArch64 implementation of the remove_non_address_bits gdbarch hook. Remove
4095 non address bits from a pointer value. */
4096
4097static CORE_ADDR
4098aarch64_remove_non_address_bits (struct gdbarch *gdbarch, CORE_ADDR pointer)
4099{
6d002087 4100 /* By default, we assume TBI and discard the top 8 bits plus the VA range
ef139898
LM
4101 select bit (55). Below we try to fetch information about pointer
4102 authentication masks in order to make non-address removal more
4103 precise. */
6d002087
LM
4104 CORE_ADDR mask = AARCH64_TOP_BITS_MASK;
4105
ef139898
LM
4106 /* Check if we have an inferior first. If not, just use the default
4107 mask.
6d002087 4108
ef139898
LM
4109 We use the inferior_ptid here because the pointer authentication masks
4110 should be the same across threads of a process. Since we may not have
4111 access to the current thread (gdb may have switched to no inferiors
4112 momentarily), we use the inferior ptid. */
4113 if (inferior_ptid != null_ptid)
4114 {
4115 /* If we do have an inferior, attempt to fetch its thread's thread_info
4116 struct. */
9213a6d7 4117 thread_info *thread = current_inferior ()->find_thread (inferior_ptid);
6d002087 4118
ef139898
LM
4119 /* If the thread is running, we will not be able to fetch the mask
4120 registers. */
4121 if (thread != nullptr && thread->state != THREAD_RUNNING)
6d002087 4122 {
ef139898
LM
4123 /* Otherwise, fetch the register cache and the masks. */
4124 struct regcache *regs
4125 = get_thread_regcache (current_inferior ()->process_target (),
4126 inferior_ptid);
4127
4128 /* Use the gdbarch from the register cache to check for pointer
4129 authentication support, as it matches the features found in
4130 that particular thread. */
4131 aarch64_gdbarch_tdep *tdep
4132 = gdbarch_tdep<aarch64_gdbarch_tdep> (regs->arch ());
6d002087 4133
ef139898
LM
4134 /* Is there pointer authentication support? */
4135 if (tdep->has_pauth ())
4136 {
4137 CORE_ADDR cmask, dmask;
4138 int dmask_regnum
4139 = AARCH64_PAUTH_DMASK_REGNUM (tdep->pauth_reg_base);
4140 int cmask_regnum
4141 = AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base);
4142
4143 /* If we have a kernel address and we have kernel-mode address
4144 mask registers, use those instead. */
4145 if (tdep->pauth_reg_count > 2
4146 && pointer & VA_RANGE_SELECT_BIT_MASK)
4147 {
4148 dmask_regnum
4149 = AARCH64_PAUTH_DMASK_HIGH_REGNUM (tdep->pauth_reg_base);
4150 cmask_regnum
4151 = AARCH64_PAUTH_CMASK_HIGH_REGNUM (tdep->pauth_reg_base);
4152 }
4153
4154 /* We have both a code mask and a data mask. For now they are
4155 the same, but this may change in the future. */
4156 if (regs->cooked_read (dmask_regnum, &dmask) != REG_VALID)
4157 dmask = mask;
4158
4159 if (regs->cooked_read (cmask_regnum, &cmask) != REG_VALID)
4160 cmask = mask;
4161
4162 mask |= aarch64_mask_from_pac_registers (cmask, dmask);
4163 }
4164 }
6d002087
LM
4165 }
4166
4167 return aarch64_remove_top_bits (pointer, mask);
4168}
4169
ca65640f
LM
4170/* Given NAMES, a vector of strings, initialize it with all the SME
4171 pseudo-register names for the current streaming vector length. */
4172
4173static void
4174aarch64_initialize_sme_pseudo_names (struct gdbarch *gdbarch,
4175 std::vector<std::string> &names)
4176{
4177 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
4178
4179 gdb_assert (tdep->has_sme ());
4180 gdb_assert (tdep->sme_tile_slice_pseudo_base > 0);
4181 gdb_assert (tdep->sme_tile_pseudo_base > 0);
4182
4183 for (int i = 0; i < tdep->sme_tile_slice_pseudo_count; i++)
4184 {
4185 int regnum = tdep->sme_tile_slice_pseudo_base + i;
4186 struct za_pseudo_encoding encoding;
4187 aarch64_za_decode_pseudos (gdbarch, regnum, encoding);
4188 names.push_back (aarch64_za_tile_slice_name (encoding));
4189 }
4190 for (int i = 0; i < AARCH64_ZA_TILES_NUM; i++)
4191 {
4192 int regnum = tdep->sme_tile_pseudo_base + i;
4193 struct za_pseudo_encoding encoding;
4194 aarch64_za_decode_pseudos (gdbarch, regnum, encoding);
4195 names.push_back (aarch64_za_tile_name (encoding));
4196 }
4197}
4198
07b287a0
MS
4199/* Initialize the current architecture based on INFO. If possible,
4200 re-use an architecture from ARCHES, which is a list of
4201 architectures already created during this debugging session.
4202
4203 Called e.g. at program startup, when reading a core file, and when
4204 reading a binary file. */
4205
4206static struct gdbarch *
4207aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
4208{
ccb8d7e8 4209 const struct tdesc_feature *feature_core, *feature_fpu, *feature_sve;
76bed0fd 4210 const struct tdesc_feature *feature_pauth;
ccb8d7e8
AH
4211 bool valid_p = true;
4212 int i, num_regs = 0, num_pseudo_regs = 0;
c9cd8ca4 4213 int first_pauth_regnum = -1, ra_sign_state_offset = -1;
ba60b963 4214 int first_mte_regnum = -1, first_tls_regnum = -1;
4f3681cc 4215 uint64_t vq = aarch64_get_tdesc_vq (info.target_desc);
ca65640f 4216 uint64_t svq = aarch64_get_tdesc_svq (info.target_desc);
4da037ef
AH
4217
4218 if (vq > AARCH64_MAX_SVE_VQ)
f34652de 4219 internal_error (_("VQ out of bounds: %s (max %d)"),
596179f7 4220 pulongest (vq), AARCH64_MAX_SVE_VQ);
4da037ef 4221
ca65640f
LM
4222 if (svq > AARCH64_MAX_SVE_VQ)
4223 internal_error (_("Streaming vector quotient (svq) out of bounds: %s"
4224 " (max %d)"),
4225 pulongest (svq), AARCH64_MAX_SVE_VQ);
4226
ccb8d7e8
AH
4227 /* If there is already a candidate, use it. */
4228 for (gdbarch_list *best_arch = gdbarch_list_lookup_by_info (arches, &info);
4229 best_arch != nullptr;
4230 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
4231 {
345bd07c 4232 aarch64_gdbarch_tdep *tdep
08106042 4233 = gdbarch_tdep<aarch64_gdbarch_tdep> (best_arch->gdbarch);
ca65640f 4234 if (tdep && tdep->vq == vq && tdep->sme_svq == svq)
ccb8d7e8
AH
4235 return best_arch->gdbarch;
4236 }
07b287a0 4237
4da037ef
AH
4238 /* Ensure we always have a target descriptor, and that it is for the given VQ
4239 value. */
ccb8d7e8 4240 const struct target_desc *tdesc = info.target_desc;
ca65640f
LM
4241 if (!tdesc_has_registers (tdesc) || vq != aarch64_get_tdesc_vq (tdesc)
4242 || svq != aarch64_get_tdesc_svq (tdesc))
4243 {
4244 aarch64_features features;
4245 features.vq = vq;
4246 features.svq = svq;
4247 tdesc = aarch64_read_description (features);
4248 }
07b287a0
MS
4249 gdb_assert (tdesc);
4250
ccb8d7e8 4251 feature_core = tdesc_find_feature (tdesc,"org.gnu.gdb.aarch64.core");
ba2d2bb2
AH
4252 feature_fpu = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
4253 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
5e984dbf
LM
4254 const struct tdesc_feature *feature_mte
4255 = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.mte");
414d5848
JB
4256 const struct tdesc_feature *feature_tls
4257 = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.tls");
07b287a0 4258
ccb8d7e8
AH
4259 if (feature_core == nullptr)
4260 return nullptr;
07b287a0 4261
c1e1314d 4262 tdesc_arch_data_up tdesc_data = tdesc_data_alloc ();
07b287a0 4263
ba2d2bb2 4264 /* Validate the description provides the mandatory core R registers
07b287a0
MS
4265 and allocate their numbers. */
4266 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
c1e1314d 4267 valid_p &= tdesc_numbered_register (feature_core, tdesc_data.get (),
ba2d2bb2
AH
4268 AARCH64_X0_REGNUM + i,
4269 aarch64_r_register_names[i]);
07b287a0
MS
4270
4271 num_regs = AARCH64_X0_REGNUM + i;
4272
ba2d2bb2 4273 /* Add the V registers. */
ccb8d7e8 4274 if (feature_fpu != nullptr)
07b287a0 4275 {
ccb8d7e8 4276 if (feature_sve != nullptr)
ba2d2bb2
AH
4277 error (_("Program contains both fpu and SVE features."));
4278
4279 /* Validate the description provides the mandatory V registers
4280 and allocate their numbers. */
07b287a0 4281 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
c1e1314d 4282 valid_p &= tdesc_numbered_register (feature_fpu, tdesc_data.get (),
ba2d2bb2
AH
4283 AARCH64_V0_REGNUM + i,
4284 aarch64_v_register_names[i]);
07b287a0
MS
4285
4286 num_regs = AARCH64_V0_REGNUM + i;
ba2d2bb2 4287 }
07b287a0 4288
ba2d2bb2 4289 /* Add the SVE registers. */
ccb8d7e8 4290 if (feature_sve != nullptr)
ba2d2bb2
AH
4291 {
4292 /* Validate the description provides the mandatory SVE registers
4293 and allocate their numbers. */
4294 for (i = 0; i < ARRAY_SIZE (aarch64_sve_register_names); i++)
c1e1314d 4295 valid_p &= tdesc_numbered_register (feature_sve, tdesc_data.get (),
ba2d2bb2
AH
4296 AARCH64_SVE_Z0_REGNUM + i,
4297 aarch64_sve_register_names[i]);
4298
4299 num_regs = AARCH64_SVE_Z0_REGNUM + i;
4300 num_pseudo_regs += 32; /* add the Vn register pseudos. */
4301 }
4302
ccb8d7e8 4303 if (feature_fpu != nullptr || feature_sve != nullptr)
ba2d2bb2 4304 {
07b287a0
MS
4305 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
4306 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
4307 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
4308 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
4309 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
4310 }
4311
ca65640f
LM
4312 int first_sme_regnum = -1;
4313 int first_sme_pseudo_regnum = -1;
4314 const struct tdesc_feature *feature_sme
4315 = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sme");
4316 if (feature_sme != nullptr)
4317 {
4318 /* Record the first SME register. */
4319 first_sme_regnum = num_regs;
4320
4321 valid_p &= tdesc_numbered_register (feature_sme, tdesc_data.get (),
4322 num_regs++, "svg");
4323
4324 valid_p &= tdesc_numbered_register (feature_sme, tdesc_data.get (),
4325 num_regs++, "svcr");
4326
4327 valid_p &= tdesc_numbered_register (feature_sme, tdesc_data.get (),
4328 num_regs++, "za");
4329
4330 /* Record the first SME pseudo register. */
4331 first_sme_pseudo_regnum = num_pseudo_regs;
4332
4333 /* Add the ZA tile slice pseudo registers. The number of tile slice
4334 pseudo-registers depend on the svl, and is always a multiple of 5. */
4335 num_pseudo_regs += (svq << 5) * 5;
4336
4337 /* Add the ZA tile pseudo registers. */
4338 num_pseudo_regs += AARCH64_ZA_TILES_NUM;
4339 }
4340
414d5848 4341 /* Add the TLS register. */
ba60b963 4342 int tls_register_count = 0;
414d5848
JB
4343 if (feature_tls != nullptr)
4344 {
ba60b963 4345 first_tls_regnum = num_regs;
414d5848 4346
ba60b963
LM
4347 /* Look for the TLS registers. tpidr is required, but tpidr2 is
4348 optional. */
4349 valid_p
4350 = tdesc_numbered_register (feature_tls, tdesc_data.get (),
4351 first_tls_regnum, "tpidr");
4352
4353 if (valid_p)
4354 {
4355 tls_register_count++;
4356
4357 bool has_tpidr2
4358 = tdesc_numbered_register (feature_tls, tdesc_data.get (),
4359 first_tls_regnum + tls_register_count,
4360 "tpidr2");
4361
4362 /* Figure out how many TLS registers we have. */
4363 if (has_tpidr2)
4364 tls_register_count++;
4365
4366 num_regs += tls_register_count;
4367 }
4368 else
4369 {
4370 warning (_("Provided TLS register feature doesn't contain "
4371 "required tpidr register."));
4372 return nullptr;
4373 }
414d5848
JB
4374 }
4375
acdf6071
LM
4376 /* We have two versions of the pauth target description due to a past bug
4377 where GDB would crash when seeing the first version of the pauth target
4378 description. */
4379 feature_pauth = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.pauth");
4380 if (feature_pauth == nullptr)
4381 feature_pauth = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.pauth_v2");
4382
76bed0fd 4383 /* Add the pauth registers. */
6d002087 4384 int pauth_masks = 0;
76bed0fd
AH
4385 if (feature_pauth != NULL)
4386 {
4387 first_pauth_regnum = num_regs;
c9cd8ca4 4388 ra_sign_state_offset = num_pseudo_regs;
6d002087
LM
4389
4390 /* Size of the expected register set with all 4 masks. */
4391 int set_size = ARRAY_SIZE (aarch64_pauth_register_names);
4392
4393 /* QEMU exposes a couple additional masks for the high half of the
4394 address. We should either have 2 registers or 4 registers. */
4395 if (tdesc_unnumbered_register (feature_pauth,
4396 "pauth_dmask_high") == 0)
4397 {
4398 /* We did not find pauth_dmask_high, assume we only have
4399 2 masks. We are not dealing with QEMU/Emulators then. */
4400 set_size -= 2;
4401 }
4402
76bed0fd
AH
4403 /* Validate the descriptor provides the mandatory PAUTH registers and
4404 allocate their numbers. */
6d002087 4405 for (i = 0; i < set_size; i++)
c1e1314d 4406 valid_p &= tdesc_numbered_register (feature_pauth, tdesc_data.get (),
76bed0fd
AH
4407 first_pauth_regnum + i,
4408 aarch64_pauth_register_names[i]);
4409
4410 num_regs += i;
34dcc7cf 4411 num_pseudo_regs += 1; /* Count RA_STATE pseudo register. */
6d002087 4412 pauth_masks = set_size;
76bed0fd
AH
4413 }
4414
5e984dbf
LM
4415 /* Add the MTE registers. */
4416 if (feature_mte != NULL)
4417 {
4418 first_mte_regnum = num_regs;
4419 /* Validate the descriptor provides the mandatory MTE registers and
4420 allocate their numbers. */
4421 for (i = 0; i < ARRAY_SIZE (aarch64_mte_register_names); i++)
4422 valid_p &= tdesc_numbered_register (feature_mte, tdesc_data.get (),
4423 first_mte_regnum + i,
4424 aarch64_mte_register_names[i]);
4425
4426 num_regs += i;
4427 }
e63ae49b
LM
4428 /* W pseudo-registers */
4429 int first_w_regnum = num_pseudo_regs;
4430 num_pseudo_regs += 31;
5e984dbf 4431
07b287a0 4432 if (!valid_p)
c1e1314d 4433 return nullptr;
07b287a0
MS
4434
4435 /* AArch64 code is always little-endian. */
4436 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
4437
2b16913c
SM
4438 gdbarch *gdbarch
4439 = gdbarch_alloc (&info, gdbarch_tdep_up (new aarch64_gdbarch_tdep));
4440 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
07b287a0
MS
4441
4442 /* This should be low enough for everything. */
4443 tdep->lowest_pc = 0x20;
4444 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
4445 tdep->jb_elt_size = 8;
4da037ef 4446 tdep->vq = vq;
76bed0fd 4447 tdep->pauth_reg_base = first_pauth_regnum;
6d002087 4448 tdep->pauth_reg_count = pauth_masks;
1ba3a322 4449 tdep->ra_sign_state_regnum = -1;
5e984dbf 4450 tdep->mte_reg_base = first_mte_regnum;
ba60b963
LM
4451 tdep->tls_regnum_base = first_tls_regnum;
4452 tdep->tls_register_count = tls_register_count;
34dcc7cf 4453
ca65640f
LM
4454 /* Set the SME register set details. The pseudo-registers will be adjusted
4455 later. */
4456 tdep->sme_reg_base = first_sme_regnum;
4457 tdep->sme_svg_regnum = first_sme_regnum;
4458 tdep->sme_svcr_regnum = first_sme_regnum + 1;
4459 tdep->sme_za_regnum = first_sme_regnum + 2;
4460 tdep->sme_svq = svq;
4461
07b287a0
MS
4462 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
4463 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
4464
07b287a0
MS
4465 /* Advance PC across function entry code. */
4466 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
4467
4468 /* The stack grows downward. */
4469 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
4470
4471 /* Breakpoint manipulation. */
04180708
YQ
4472 set_gdbarch_breakpoint_kind_from_pc (gdbarch,
4473 aarch64_breakpoint::kind_from_pc);
4474 set_gdbarch_sw_breakpoint_from_kind (gdbarch,
4475 aarch64_breakpoint::bp_from_kind);
07b287a0 4476 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
9404b58f 4477 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
07b287a0
MS
4478
4479 /* Information about registers, etc. */
4480 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
4481 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
4482 set_gdbarch_num_regs (gdbarch, num_regs);
4483
4484 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
4485 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
4486 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
4487 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
4488 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
4489 set_tdesc_pseudo_register_reggroup_p (gdbarch,
4490 aarch64_pseudo_register_reggroup_p);
76bed0fd 4491 set_gdbarch_cannot_store_register (gdbarch, aarch64_cannot_store_register);
07b287a0
MS
4492
4493 /* ABI */
4494 set_gdbarch_short_bit (gdbarch, 16);
4495 set_gdbarch_int_bit (gdbarch, 32);
4496 set_gdbarch_float_bit (gdbarch, 32);
4497 set_gdbarch_double_bit (gdbarch, 64);
4498 set_gdbarch_long_double_bit (gdbarch, 128);
4499 set_gdbarch_long_bit (gdbarch, 64);
4500 set_gdbarch_long_long_bit (gdbarch, 64);
4501 set_gdbarch_ptr_bit (gdbarch, 64);
4502 set_gdbarch_char_signed (gdbarch, 0);
53375380 4503 set_gdbarch_wchar_signed (gdbarch, 0);
07b287a0
MS
4504 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
4505 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
552f1157 4506 set_gdbarch_long_double_format (gdbarch, floatformats_ieee_quad);
b907456c 4507 set_gdbarch_type_align (gdbarch, aarch64_type_align);
07b287a0 4508
da729c5c
TT
4509 /* Detect whether PC is at a point where the stack has been destroyed. */
4510 set_gdbarch_stack_frame_destroyed_p (gdbarch, aarch64_stack_frame_destroyed_p);
4511
07b287a0
MS
4512 /* Internal <-> external register number maps. */
4513 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
4514
4515 /* Returning results. */
5cb0f2d5 4516 set_gdbarch_return_value_as_value (gdbarch, aarch64_return_value);
07b287a0
MS
4517
4518 /* Disassembly. */
4519 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
4520
4521 /* Virtual tables. */
4522 set_gdbarch_vbit_in_delta (gdbarch, 1);
4523
4524 /* Hook in the ABI-specific overrides, if they have been registered. */
4525 info.target_desc = tdesc;
c1e1314d 4526 info.tdesc_data = tdesc_data.get ();
07b287a0
MS
4527 gdbarch_init_osabi (info, gdbarch);
4528
4529 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
11e1b75f
AH
4530 /* Register DWARF CFA vendor handler. */
4531 set_gdbarch_execute_dwarf_cfa_vendor_op (gdbarch,
4532 aarch64_execute_dwarf_cfa_vendor_op);
07b287a0 4533
5133a315
LM
4534 /* Permanent/Program breakpoint handling. */
4535 set_gdbarch_program_breakpoint_here_p (gdbarch,
4536 aarch64_program_breakpoint_here_p);
4537
07b287a0
MS
4538 /* Add some default predicates. */
4539 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
4540 dwarf2_append_unwinders (gdbarch);
4541 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
4542
4543 frame_base_set_default (gdbarch, &aarch64_normal_base);
4544
4545 /* Now we have tuned the configuration, set a few final things,
4546 based on what the OS ABI has told us. */
4547
4548 if (tdep->jb_pc >= 0)
4549 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
4550
ea873d8e
PL
4551 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
4552
aa7ca1bb
AH
4553 set_gdbarch_get_pc_address_flags (gdbarch, aarch64_get_pc_address_flags);
4554
c1e1314d 4555 tdesc_use_registers (gdbarch, tdesc, std::move (tdesc_data));
07b287a0 4556
1ba3a322
LM
4557 /* Fetch the updated number of registers after we're done adding all
4558 entries from features we don't explicitly care about. This is the case
4559 for bare metal debugging stubs that include a lot of system registers. */
4560 num_regs = gdbarch_num_regs (gdbarch);
4561
4562 /* With the number of real registers updated, setup the pseudo-registers and
4563 record their numbers. */
4564
e63ae49b
LM
4565 /* Setup W pseudo-register numbers. */
4566 tdep->w_pseudo_base = first_w_regnum + num_regs;
4567 tdep->w_pseudo_count = 31;
4568
1ba3a322
LM
4569 /* Pointer authentication pseudo-registers. */
4570 if (tdep->has_pauth ())
4571 tdep->ra_sign_state_regnum = ra_sign_state_offset + num_regs;
4572
6d002087
LM
4573 /* Architecture hook to remove bits of a pointer that are not part of the
4574 address, like memory tags (MTE) and pointer authentication signatures. */
4575 set_gdbarch_remove_non_address_bits (gdbarch,
4576 aarch64_remove_non_address_bits);
4577
ca65640f
LM
4578 /* SME pseudo-registers. */
4579 if (tdep->has_sme ())
4580 {
4581 tdep->sme_pseudo_base = num_regs + first_sme_pseudo_regnum;
4582 tdep->sme_tile_slice_pseudo_base = tdep->sme_pseudo_base;
4583 tdep->sme_tile_slice_pseudo_count = (svq * 32) * 5;
4584 tdep->sme_tile_pseudo_base
4585 = tdep->sme_pseudo_base + tdep->sme_tile_slice_pseudo_count;
4586 tdep->sme_pseudo_count
4587 = tdep->sme_tile_slice_pseudo_count + AARCH64_ZA_TILES_NUM;
4588
4589 /* The SME ZA pseudo-registers are a set of 160 to 2560 pseudo-registers
4590 depending on the value of svl.
4591
4592 The tile pseudo-registers are organized around their qualifiers
4593 (b, h, s, d and q). Their numbers are distributed as follows:
4594
4595 b 0
4596 h 1~2
4597 s 3~6
4598 d 7~14
4599 q 15~30
4600
4601 The naming of the tile pseudo-registers follows the pattern za<t><q>,
4602 where:
4603
4604 <t> is the tile number, with the following possible values based on
4605 the qualifiers:
4606
4607 Qualifier - Allocated indexes
4608
4609 b - 0
4610 h - 0~1
4611 s - 0~3
4612 d - 0~7
4613 q - 0~15
4614
4615 <q> is the qualifier: b, h, s, d and q.
4616
4617 The tile slice pseudo-registers are organized around their
4618 qualifiers as well (b, h, s, d and q), but also around their
4619 direction (h - horizontal and v - vertical).
4620
4621 Even-numbered tile slice pseudo-registers are horizontally-oriented
4622 and odd-numbered tile slice pseudo-registers are vertically-oriented.
4623
4624 Their numbers are distributed as follows:
4625
4626 Qualifier - Allocated indexes
4627
4628 b tile slices - 0~511
4629 h tile slices - 512~1023
4630 s tile slices - 1024~1535
4631 d tile slices - 1536~2047
4632 q tile slices - 2048~2559
4633
4634 The naming of the tile slice pseudo-registers follows the pattern
4635 za<t><d><q><s>, where:
4636
4637 <t> is the tile number as described for the tile pseudo-registers.
4638 <d> is the direction of the tile slice (h or v)
4639 <q> is the qualifier of the tile slice (b, h, s, d or q)
4640 <s> is the slice number, defined as follows:
4641
4642 Qualifier - Allocated indexes
4643
4644 b - 0~15
4645 h - 0~7
4646 s - 0~3
4647 d - 0~1
4648 q - 0
4649
4650 We have helper functions to translate to/from register index from/to
4651 the set of fields that make the pseudo-register names. */
4652
4653 /* Build the array of pseudo-register names available for this
4654 particular gdbarch configuration. */
4655 aarch64_initialize_sme_pseudo_names (gdbarch, tdep->sme_pseudo_names);
4656 }
4657
07b287a0
MS
4658 /* Add standard register aliases. */
4659 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
4660 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
4661 value_of_aarch64_user_reg,
4662 &aarch64_register_aliases[i].regnum);
4663
e8bf1ce4
JB
4664 register_aarch64_ravenscar_ops (gdbarch);
4665
07b287a0
MS
4666 return gdbarch;
4667}
4668
4669static void
4670aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
4671{
08106042 4672 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
07b287a0
MS
4673
4674 if (tdep == NULL)
4675 return;
4676
09a5d200 4677 gdb_printf (file, _("aarch64_dump_tdep: Lowest pc = 0x%s\n"),
6cb06a8c 4678 paddress (gdbarch, tdep->lowest_pc));
ca65640f
LM
4679
4680 /* SME fields. */
4681 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_type_q = %s\n"),
4682 host_address_to_string (tdep->sme_tile_type_q));
4683 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_type_d = %s\n"),
4684 host_address_to_string (tdep->sme_tile_type_d));
4685 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_type_s = %s\n"),
4686 host_address_to_string (tdep->sme_tile_type_s));
4687 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_type_h = %s\n"),
4688 host_address_to_string (tdep->sme_tile_type_h));
4689 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_type_n = %s\n"),
4690 host_address_to_string (tdep->sme_tile_type_b));
4691 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_slice_type_q = %s\n"),
4692 host_address_to_string (tdep->sme_tile_slice_type_q));
4693 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_slice_type_d = %s\n"),
4694 host_address_to_string (tdep->sme_tile_slice_type_d));
4695 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_slice_type_s = %s\n"),
4696 host_address_to_string (tdep->sme_tile_slice_type_s));
4697 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_slice_type_h = %s\n"),
4698 host_address_to_string (tdep->sme_tile_slice_type_h));
4699 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_slice_type_b = %s\n"),
4700 host_address_to_string (tdep->sme_tile_slice_type_b));
4701 gdb_printf (file, _("aarch64_dump_tdep: sme_reg_base = %s\n"),
4702 pulongest (tdep->sme_reg_base));
4703 gdb_printf (file, _("aarch64_dump_tdep: sme_svg_regnum = %s\n"),
4704 pulongest (tdep->sme_svg_regnum));
4705 gdb_printf (file, _("aarch64_dump_tdep: sme_svcr_regnum = %s\n"),
4706 pulongest (tdep->sme_svcr_regnum));
4707 gdb_printf (file, _("aarch64_dump_tdep: sme_za_regnum = %s\n"),
4708 pulongest (tdep->sme_za_regnum));
4709 gdb_printf (file, _("aarch64_dump_tdep: sme_pseudo_base = %s\n"),
4710 pulongest (tdep->sme_pseudo_base));
4711 gdb_printf (file, _("aarch64_dump_tdep: sme_pseudo_count = %s\n"),
4712 pulongest (tdep->sme_pseudo_count));
4713 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_slice_pseudo_base = %s\n"),
4714 pulongest (tdep->sme_tile_slice_pseudo_base));
4715 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_slice_pseudo_count = %s\n"),
4716 pulongest (tdep->sme_tile_slice_pseudo_count));
4717 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_pseudo_base = %s\n"),
4718 pulongest (tdep->sme_tile_pseudo_base));
4719 gdb_printf (file, _("aarch64_dump_tdep: sme_svq = %s\n"),
4720 pulongest (tdep->sme_svq));
07b287a0
MS
4721}
4722
0d4c07af 4723#if GDB_SELF_TEST
1e2b521d
YQ
4724namespace selftests
4725{
4726static void aarch64_process_record_test (void);
4727}
0d4c07af 4728#endif
1e2b521d 4729
6c265988 4730void _initialize_aarch64_tdep ();
07b287a0 4731void
6c265988 4732_initialize_aarch64_tdep ()
07b287a0
MS
4733{
4734 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
4735 aarch64_dump_tdep);
4736
07b287a0
MS
4737 /* Debug this file's internals. */
4738 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
4739Set AArch64 debugging."), _("\
4740Show AArch64 debugging."), _("\
4741When on, AArch64 specific debugging is enabled."),
4742 NULL,
4743 show_aarch64_debug,
4744 &setdebuglist, &showdebuglist);
4d9a9006
YQ
4745
4746#if GDB_SELF_TEST
1526853e
SM
4747 selftests::register_test ("aarch64-analyze-prologue",
4748 selftests::aarch64_analyze_prologue_test);
4749 selftests::register_test ("aarch64-process-record",
4750 selftests::aarch64_process_record_test);
4d9a9006 4751#endif
07b287a0 4752}
99afc88b
OJ
4753
4754/* AArch64 process record-replay related structures, defines etc. */
4755
99afc88b 4756#define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
dda83cd7
SM
4757 do \
4758 { \
4759 unsigned int reg_len = LENGTH; \
4760 if (reg_len) \
4761 { \
4762 REGS = XNEWVEC (uint32_t, reg_len); \
4763 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
4764 } \
4765 } \
4766 while (0)
99afc88b
OJ
4767
4768#define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
dda83cd7
SM
4769 do \
4770 { \
4771 unsigned int mem_len = LENGTH; \
4772 if (mem_len) \
01add95b
SM
4773 { \
4774 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
91ddba83 4775 memcpy(MEMS, &RECORD_BUF[0], \
01add95b
SM
4776 sizeof(struct aarch64_mem_r) * LENGTH); \
4777 } \
dda83cd7
SM
4778 } \
4779 while (0)
99afc88b
OJ
4780
4781/* AArch64 record/replay structures and enumerations. */
4782
4783struct aarch64_mem_r
4784{
4785 uint64_t len; /* Record length. */
4786 uint64_t addr; /* Memory address. */
4787};
4788
4789enum aarch64_record_result
4790{
4791 AARCH64_RECORD_SUCCESS,
99afc88b
OJ
4792 AARCH64_RECORD_UNSUPPORTED,
4793 AARCH64_RECORD_UNKNOWN
4794};
4795
4748a9be 4796struct aarch64_insn_decode_record
99afc88b
OJ
4797{
4798 struct gdbarch *gdbarch;
4799 struct regcache *regcache;
4800 CORE_ADDR this_addr; /* Address of insn to be recorded. */
4801 uint32_t aarch64_insn; /* Insn to be recorded. */
4802 uint32_t mem_rec_count; /* Count of memory records. */
4803 uint32_t reg_rec_count; /* Count of register records. */
4804 uint32_t *aarch64_regs; /* Registers to be recorded. */
4805 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
4748a9be 4806};
99afc88b
OJ
4807
4808/* Record handler for data processing - register instructions. */
4809
4810static unsigned int
4748a9be 4811aarch64_record_data_proc_reg (aarch64_insn_decode_record *aarch64_insn_r)
99afc88b
OJ
4812{
4813 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
4814 uint32_t record_buf[4];
4815
4816 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
4817 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
4818 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
4819
4820 if (!bit (aarch64_insn_r->aarch64_insn, 28))
4821 {
4822 uint8_t setflags;
4823
4824 /* Logical (shifted register). */
4825 if (insn_bits24_27 == 0x0a)
4826 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
4827 /* Add/subtract. */
4828 else if (insn_bits24_27 == 0x0b)
4829 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
4830 else
4831 return AARCH64_RECORD_UNKNOWN;
4832
4833 record_buf[0] = reg_rd;
4834 aarch64_insn_r->reg_rec_count = 1;
4835 if (setflags)
4836 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
4837 }
4838 else
4839 {
4840 if (insn_bits24_27 == 0x0b)
4841 {
4842 /* Data-processing (3 source). */
4843 record_buf[0] = reg_rd;
4844 aarch64_insn_r->reg_rec_count = 1;
4845 }
4846 else if (insn_bits24_27 == 0x0a)
4847 {
4848 if (insn_bits21_23 == 0x00)
4849 {
4850 /* Add/subtract (with carry). */
4851 record_buf[0] = reg_rd;
4852 aarch64_insn_r->reg_rec_count = 1;
4853 if (bit (aarch64_insn_r->aarch64_insn, 29))
4854 {
4855 record_buf[1] = AARCH64_CPSR_REGNUM;
4856 aarch64_insn_r->reg_rec_count = 2;
4857 }
4858 }
4859 else if (insn_bits21_23 == 0x02)
4860 {
4861 /* Conditional compare (register) and conditional compare
4862 (immediate) instructions. */
4863 record_buf[0] = AARCH64_CPSR_REGNUM;
4864 aarch64_insn_r->reg_rec_count = 1;
4865 }
4866 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
4867 {
85102364 4868 /* Conditional select. */
99afc88b
OJ
4869 /* Data-processing (2 source). */
4870 /* Data-processing (1 source). */
4871 record_buf[0] = reg_rd;
4872 aarch64_insn_r->reg_rec_count = 1;
4873 }
4874 else
4875 return AARCH64_RECORD_UNKNOWN;
4876 }
4877 }
4878
4879 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4880 record_buf);
4881 return AARCH64_RECORD_SUCCESS;
4882}
4883
4884/* Record handler for data processing - immediate instructions. */
4885
4886static unsigned int
4748a9be 4887aarch64_record_data_proc_imm (aarch64_insn_decode_record *aarch64_insn_r)
99afc88b 4888{
78cc6c2d 4889 uint8_t reg_rd, insn_bit23, insn_bits24_27, setflags;
99afc88b
OJ
4890 uint32_t record_buf[4];
4891
4892 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
99afc88b
OJ
4893 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
4894 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
4895
4896 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
4897 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
4898 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
4899 {
4900 record_buf[0] = reg_rd;
4901 aarch64_insn_r->reg_rec_count = 1;
4902 }
4903 else if (insn_bits24_27 == 0x01)
4904 {
4905 /* Add/Subtract (immediate). */
4906 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
4907 record_buf[0] = reg_rd;
4908 aarch64_insn_r->reg_rec_count = 1;
4909 if (setflags)
4910 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
4911 }
4912 else if (insn_bits24_27 == 0x02 && !insn_bit23)
4913 {
4914 /* Logical (immediate). */
4915 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
4916 record_buf[0] = reg_rd;
4917 aarch64_insn_r->reg_rec_count = 1;
4918 if (setflags)
4919 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
4920 }
4921 else
4922 return AARCH64_RECORD_UNKNOWN;
4923
4924 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4925 record_buf);
4926 return AARCH64_RECORD_SUCCESS;
4927}
4928
4929/* Record handler for branch, exception generation and system instructions. */
4930
4931static unsigned int
4748a9be 4932aarch64_record_branch_except_sys (aarch64_insn_decode_record *aarch64_insn_r)
99afc88b 4933{
345bd07c
SM
4934
4935 aarch64_gdbarch_tdep *tdep
08106042 4936 = gdbarch_tdep<aarch64_gdbarch_tdep> (aarch64_insn_r->gdbarch);
99afc88b
OJ
4937 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
4938 uint32_t record_buf[4];
4939
4940 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
4941 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
4942 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4943
4944 if (insn_bits28_31 == 0x0d)
4945 {
4946 /* Exception generation instructions. */
4947 if (insn_bits24_27 == 0x04)
4948 {
5d98d3cd
YQ
4949 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
4950 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
4951 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
99afc88b
OJ
4952 {
4953 ULONGEST svc_number;
4954
4955 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
4956 &svc_number);
4957 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
4958 svc_number);
4959 }
4960 else
4961 return AARCH64_RECORD_UNSUPPORTED;
4962 }
4963 /* System instructions. */
4964 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
4965 {
4966 uint32_t reg_rt, reg_crn;
4967
4968 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
4969 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
4970
4971 /* Record rt in case of sysl and mrs instructions. */
4972 if (bit (aarch64_insn_r->aarch64_insn, 21))
4973 {
4974 record_buf[0] = reg_rt;
4975 aarch64_insn_r->reg_rec_count = 1;
4976 }
4977 /* Record cpsr for hint and msr(immediate) instructions. */
4978 else if (reg_crn == 0x02 || reg_crn == 0x04)
4979 {
4980 record_buf[0] = AARCH64_CPSR_REGNUM;
4981 aarch64_insn_r->reg_rec_count = 1;
4982 }
4983 }
4984 /* Unconditional branch (register). */
4985 else if((insn_bits24_27 & 0x0e) == 0x06)
4986 {
4987 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
4988 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
4989 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
4990 }
4991 else
4992 return AARCH64_RECORD_UNKNOWN;
4993 }
4994 /* Unconditional branch (immediate). */
4995 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
4996 {
4997 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
4998 if (bit (aarch64_insn_r->aarch64_insn, 31))
4999 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
5000 }
5001 else
5002 /* Compare & branch (immediate), Test & branch (immediate) and
5003 Conditional branch (immediate). */
5004 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
5005
5006 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
5007 record_buf);
5008 return AARCH64_RECORD_SUCCESS;
5009}
5010
5011/* Record handler for advanced SIMD load and store instructions. */
5012
5013static unsigned int
4748a9be 5014aarch64_record_asimd_load_store (aarch64_insn_decode_record *aarch64_insn_r)
99afc88b
OJ
5015{
5016 CORE_ADDR address;
5017 uint64_t addr_offset = 0;
5018 uint32_t record_buf[24];
5019 uint64_t record_buf_mem[24];
5020 uint32_t reg_rn, reg_rt;
5021 uint32_t reg_index = 0, mem_index = 0;
5022 uint8_t opcode_bits, size_bits;
5023
5024 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
5025 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
5026 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
5027 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
5028 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
5029
5030 if (record_debug)
b277c936 5031 debug_printf ("Process record: Advanced SIMD load/store\n");
99afc88b
OJ
5032
5033 /* Load/store single structure. */
5034 if (bit (aarch64_insn_r->aarch64_insn, 24))
5035 {
5036 uint8_t sindex, scale, selem, esize, replicate = 0;
5037 scale = opcode_bits >> 2;
5038 selem = ((opcode_bits & 0x02) |
dda83cd7 5039 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
99afc88b 5040 switch (scale)
dda83cd7
SM
5041 {
5042 case 1:
5043 if (size_bits & 0x01)
5044 return AARCH64_RECORD_UNKNOWN;
5045 break;
5046 case 2:
5047 if ((size_bits >> 1) & 0x01)
5048 return AARCH64_RECORD_UNKNOWN;
5049 if (size_bits & 0x01)
5050 {
5051 if (!((opcode_bits >> 1) & 0x01))
5052 scale = 3;
5053 else
5054 return AARCH64_RECORD_UNKNOWN;
5055 }
5056 break;
5057 case 3:
5058 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
5059 {
5060 scale = size_bits;
5061 replicate = 1;
5062 break;
5063 }
5064 else
5065 return AARCH64_RECORD_UNKNOWN;
5066 default:
5067 break;
5068 }
99afc88b
OJ
5069 esize = 8 << scale;
5070 if (replicate)
dda83cd7
SM
5071 for (sindex = 0; sindex < selem; sindex++)
5072 {
5073 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
5074 reg_rt = (reg_rt + 1) % 32;
5075 }
99afc88b 5076 else
dda83cd7
SM
5077 {
5078 for (sindex = 0; sindex < selem; sindex++)
a2e3e93f
SM
5079 {
5080 if (bit (aarch64_insn_r->aarch64_insn, 22))
5081 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
5082 else
5083 {
5084 record_buf_mem[mem_index++] = esize / 8;
5085 record_buf_mem[mem_index++] = address + addr_offset;
5086 }
5087 addr_offset = addr_offset + (esize / 8);
5088 reg_rt = (reg_rt + 1) % 32;
5089 }
dda83cd7 5090 }
99afc88b
OJ
5091 }
5092 /* Load/store multiple structure. */
5093 else
5094 {
5095 uint8_t selem, esize, rpt, elements;
5096 uint8_t eindex, rindex;
5097
5098 esize = 8 << size_bits;
5099 if (bit (aarch64_insn_r->aarch64_insn, 30))
dda83cd7 5100 elements = 128 / esize;
99afc88b 5101 else
dda83cd7 5102 elements = 64 / esize;
99afc88b
OJ
5103
5104 switch (opcode_bits)
dda83cd7
SM
5105 {
5106 /*LD/ST4 (4 Registers). */
5107 case 0:
5108 rpt = 1;
5109 selem = 4;
5110 break;
5111 /*LD/ST1 (4 Registers). */
5112 case 2:
5113 rpt = 4;
5114 selem = 1;
5115 break;
5116 /*LD/ST3 (3 Registers). */
5117 case 4:
5118 rpt = 1;
5119 selem = 3;
5120 break;
5121 /*LD/ST1 (3 Registers). */
5122 case 6:
5123 rpt = 3;
5124 selem = 1;
5125 break;
5126 /*LD/ST1 (1 Register). */
5127 case 7:
5128 rpt = 1;
5129 selem = 1;
5130 break;
5131 /*LD/ST2 (2 Registers). */
5132 case 8:
5133 rpt = 1;
5134 selem = 2;
5135 break;
5136 /*LD/ST1 (2 Registers). */
5137 case 10:
5138 rpt = 2;
5139 selem = 1;
5140 break;
5141 default:
5142 return AARCH64_RECORD_UNSUPPORTED;
5143 break;
5144 }
99afc88b 5145 for (rindex = 0; rindex < rpt; rindex++)
dda83cd7
SM
5146 for (eindex = 0; eindex < elements; eindex++)
5147 {
5148 uint8_t reg_tt, sindex;
5149 reg_tt = (reg_rt + rindex) % 32;
5150 for (sindex = 0; sindex < selem; sindex++)
5151 {
5152 if (bit (aarch64_insn_r->aarch64_insn, 22))
5153 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
5154 else
5155 {
5156 record_buf_mem[mem_index++] = esize / 8;
5157 record_buf_mem[mem_index++] = address + addr_offset;
5158 }
5159 addr_offset = addr_offset + (esize / 8);
5160 reg_tt = (reg_tt + 1) % 32;
5161 }
5162 }
99afc88b
OJ
5163 }
5164
5165 if (bit (aarch64_insn_r->aarch64_insn, 23))
5166 record_buf[reg_index++] = reg_rn;
5167
5168 aarch64_insn_r->reg_rec_count = reg_index;
5169 aarch64_insn_r->mem_rec_count = mem_index / 2;
5170 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
dda83cd7 5171 record_buf_mem);
99afc88b 5172 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
dda83cd7 5173 record_buf);
99afc88b
OJ
5174 return AARCH64_RECORD_SUCCESS;
5175}
5176
5177/* Record handler for load and store instructions. */
5178
5179static unsigned int
4748a9be 5180aarch64_record_load_store (aarch64_insn_decode_record *aarch64_insn_r)
99afc88b
OJ
5181{
5182 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
5183 uint8_t insn_bit23, insn_bit21;
5184 uint8_t opc, size_bits, ld_flag, vector_flag;
5185 uint32_t reg_rn, reg_rt, reg_rt2;
5186 uint64_t datasize, offset;
5187 uint32_t record_buf[8];
5188 uint64_t record_buf_mem[8];
5189 CORE_ADDR address;
5190
5191 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
5192 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
5193 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
5194 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
5195 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
5196 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
5197 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
5198 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
5199 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
5200 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
5201 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
5202
5203 /* Load/store exclusive. */
5204 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
5205 {
5206 if (record_debug)
b277c936 5207 debug_printf ("Process record: load/store exclusive\n");
99afc88b
OJ
5208
5209 if (ld_flag)
5210 {
5211 record_buf[0] = reg_rt;
5212 aarch64_insn_r->reg_rec_count = 1;
5213 if (insn_bit21)
5214 {
5215 record_buf[1] = reg_rt2;
5216 aarch64_insn_r->reg_rec_count = 2;
5217 }
5218 }
5219 else
5220 {
5221 if (insn_bit21)
5222 datasize = (8 << size_bits) * 2;
5223 else
5224 datasize = (8 << size_bits);
5225 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
5226 &address);
5227 record_buf_mem[0] = datasize / 8;
5228 record_buf_mem[1] = address;
5229 aarch64_insn_r->mem_rec_count = 1;
5230 if (!insn_bit23)
5231 {
5232 /* Save register rs. */
5233 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
5234 aarch64_insn_r->reg_rec_count = 1;
5235 }
5236 }
5237 }
5238 /* Load register (literal) instructions decoding. */
5239 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
5240 {
5241 if (record_debug)
b277c936 5242 debug_printf ("Process record: load register (literal)\n");
99afc88b 5243 if (vector_flag)
dda83cd7 5244 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
99afc88b 5245 else
dda83cd7 5246 record_buf[0] = reg_rt;
99afc88b
OJ
5247 aarch64_insn_r->reg_rec_count = 1;
5248 }
5249 /* All types of load/store pair instructions decoding. */
5250 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
5251 {
5252 if (record_debug)
b277c936 5253 debug_printf ("Process record: load/store pair\n");
99afc88b
OJ
5254
5255 if (ld_flag)
dda83cd7
SM
5256 {
5257 if (vector_flag)
5258 {
5259 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
5260 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
5261 }
5262 else
5263 {
5264 record_buf[0] = reg_rt;
5265 record_buf[1] = reg_rt2;
5266 }
5267 aarch64_insn_r->reg_rec_count = 2;
5268 }
99afc88b 5269 else
dda83cd7
SM
5270 {
5271 uint16_t imm7_off;
5272 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
5273 if (!vector_flag)
5274 size_bits = size_bits >> 1;
5275 datasize = 8 << (2 + size_bits);
5276 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
5277 offset = offset << (2 + size_bits);
5278 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
5279 &address);
5280 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
5281 {
5282 if (imm7_off & 0x40)
5283 address = address - offset;
5284 else
5285 address = address + offset;
5286 }
5287
5288 record_buf_mem[0] = datasize / 8;
5289 record_buf_mem[1] = address;
5290 record_buf_mem[2] = datasize / 8;
5291 record_buf_mem[3] = address + (datasize / 8);
5292 aarch64_insn_r->mem_rec_count = 2;
5293 }
99afc88b 5294 if (bit (aarch64_insn_r->aarch64_insn, 23))
dda83cd7 5295 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
99afc88b
OJ
5296 }
5297 /* Load/store register (unsigned immediate) instructions. */
5298 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
5299 {
5300 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
5301 if (!(opc >> 1))
33877125
YQ
5302 {
5303 if (opc & 0x01)
5304 ld_flag = 0x01;
5305 else
5306 ld_flag = 0x0;
5307 }
99afc88b 5308 else
33877125 5309 {
1e2b521d
YQ
5310 if (size_bits == 0x3 && vector_flag == 0x0 && opc == 0x2)
5311 {
5312 /* PRFM (immediate) */
5313 return AARCH64_RECORD_SUCCESS;
5314 }
5315 else if (size_bits == 0x2 && vector_flag == 0x0 && opc == 0x2)
5316 {
5317 /* LDRSW (immediate) */
5318 ld_flag = 0x1;
5319 }
33877125 5320 else
1e2b521d
YQ
5321 {
5322 if (opc & 0x01)
5323 ld_flag = 0x01;
5324 else
5325 ld_flag = 0x0;
5326 }
33877125 5327 }
99afc88b
OJ
5328
5329 if (record_debug)
5330 {
b277c936
PL
5331 debug_printf ("Process record: load/store (unsigned immediate):"
5332 " size %x V %d opc %x\n", size_bits, vector_flag,
5333 opc);
99afc88b
OJ
5334 }
5335
5336 if (!ld_flag)
dda83cd7
SM
5337 {
5338 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
5339 datasize = 8 << size_bits;
5340 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
5341 &address);
5342 offset = offset << size_bits;
5343 address = address + offset;
5344
5345 record_buf_mem[0] = datasize >> 3;
5346 record_buf_mem[1] = address;
5347 aarch64_insn_r->mem_rec_count = 1;
5348 }
99afc88b 5349 else
dda83cd7
SM
5350 {
5351 if (vector_flag)
5352 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
5353 else
5354 record_buf[0] = reg_rt;
5355 aarch64_insn_r->reg_rec_count = 1;
5356 }
99afc88b
OJ
5357 }
5358 /* Load/store register (register offset) instructions. */
5d98d3cd
YQ
5359 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
5360 && insn_bits10_11 == 0x02 && insn_bit21)
99afc88b
OJ
5361 {
5362 if (record_debug)
b277c936 5363 debug_printf ("Process record: load/store (register offset)\n");
99afc88b
OJ
5364 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
5365 if (!(opc >> 1))
dda83cd7
SM
5366 if (opc & 0x01)
5367 ld_flag = 0x01;
5368 else
5369 ld_flag = 0x0;
99afc88b 5370 else
dda83cd7
SM
5371 if (size_bits != 0x03)
5372 ld_flag = 0x01;
5373 else
5374 return AARCH64_RECORD_UNKNOWN;
99afc88b
OJ
5375
5376 if (!ld_flag)
dda83cd7
SM
5377 {
5378 ULONGEST reg_rm_val;
5379
5380 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
5381 bits (aarch64_insn_r->aarch64_insn, 16, 20), &reg_rm_val);
5382 if (bit (aarch64_insn_r->aarch64_insn, 12))
5383 offset = reg_rm_val << size_bits;
5384 else
5385 offset = reg_rm_val;
5386 datasize = 8 << size_bits;
5387 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
5388 &address);
5389 address = address + offset;
5390 record_buf_mem[0] = datasize >> 3;
5391 record_buf_mem[1] = address;
5392 aarch64_insn_r->mem_rec_count = 1;
5393 }
99afc88b 5394 else
dda83cd7
SM
5395 {
5396 if (vector_flag)
5397 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
5398 else
5399 record_buf[0] = reg_rt;
5400 aarch64_insn_r->reg_rec_count = 1;
5401 }
99afc88b
OJ
5402 }
5403 /* Load/store register (immediate and unprivileged) instructions. */
5d98d3cd
YQ
5404 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
5405 && !insn_bit21)
99afc88b
OJ
5406 {
5407 if (record_debug)
5408 {
b277c936
PL
5409 debug_printf ("Process record: load/store "
5410 "(immediate and unprivileged)\n");
99afc88b
OJ
5411 }
5412 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
5413 if (!(opc >> 1))
dda83cd7
SM
5414 if (opc & 0x01)
5415 ld_flag = 0x01;
5416 else
5417 ld_flag = 0x0;
99afc88b 5418 else
dda83cd7
SM
5419 if (size_bits != 0x03)
5420 ld_flag = 0x01;
5421 else
5422 return AARCH64_RECORD_UNKNOWN;
99afc88b
OJ
5423
5424 if (!ld_flag)
dda83cd7
SM
5425 {
5426 uint16_t imm9_off;
5427 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
5428 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
5429 datasize = 8 << size_bits;
5430 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
5431 &address);
5432 if (insn_bits10_11 != 0x01)
5433 {
5434 if (imm9_off & 0x0100)
5435 address = address - offset;
5436 else
5437 address = address + offset;
5438 }
5439 record_buf_mem[0] = datasize >> 3;
5440 record_buf_mem[1] = address;
5441 aarch64_insn_r->mem_rec_count = 1;
5442 }
99afc88b 5443 else
dda83cd7
SM
5444 {
5445 if (vector_flag)
5446 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
5447 else
5448 record_buf[0] = reg_rt;
5449 aarch64_insn_r->reg_rec_count = 1;
5450 }
99afc88b 5451 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
dda83cd7 5452 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
99afc88b
OJ
5453 }
5454 /* Advanced SIMD load/store instructions. */
5455 else
5456 return aarch64_record_asimd_load_store (aarch64_insn_r);
5457
5458 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
dda83cd7 5459 record_buf_mem);
99afc88b 5460 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
dda83cd7 5461 record_buf);
99afc88b
OJ
5462 return AARCH64_RECORD_SUCCESS;
5463}
5464
5465/* Record handler for data processing SIMD and floating point instructions. */
5466
5467static unsigned int
4748a9be 5468aarch64_record_data_proc_simd_fp (aarch64_insn_decode_record *aarch64_insn_r)
99afc88b
OJ
5469{
5470 uint8_t insn_bit21, opcode, rmode, reg_rd;
5471 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
5472 uint8_t insn_bits11_14;
5473 uint32_t record_buf[2];
5474
5475 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
5476 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
5477 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
5478 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
5479 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
5480 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
5481 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
5482 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
5483 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
5484
5485 if (record_debug)
b277c936 5486 debug_printf ("Process record: data processing SIMD/FP: ");
99afc88b
OJ
5487
5488 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
5489 {
5490 /* Floating point - fixed point conversion instructions. */
5491 if (!insn_bit21)
5492 {
5493 if (record_debug)
b277c936 5494 debug_printf ("FP - fixed point conversion");
99afc88b
OJ
5495
5496 if ((opcode >> 1) == 0x0 && rmode == 0x03)
5497 record_buf[0] = reg_rd;
5498 else
5499 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
5500 }
5501 /* Floating point - conditional compare instructions. */
5502 else if (insn_bits10_11 == 0x01)
5503 {
5504 if (record_debug)
b277c936 5505 debug_printf ("FP - conditional compare");
99afc88b
OJ
5506
5507 record_buf[0] = AARCH64_CPSR_REGNUM;
5508 }
5509 /* Floating point - data processing (2-source) and
dda83cd7 5510 conditional select instructions. */
99afc88b
OJ
5511 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
5512 {
5513 if (record_debug)
b277c936 5514 debug_printf ("FP - DP (2-source)");
99afc88b
OJ
5515
5516 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
5517 }
5518 else if (insn_bits10_11 == 0x00)
5519 {
5520 /* Floating point - immediate instructions. */
5521 if ((insn_bits12_15 & 0x01) == 0x01
5522 || (insn_bits12_15 & 0x07) == 0x04)
5523 {
5524 if (record_debug)
b277c936 5525 debug_printf ("FP - immediate");
99afc88b
OJ
5526 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
5527 }
5528 /* Floating point - compare instructions. */
5529 else if ((insn_bits12_15 & 0x03) == 0x02)
5530 {
5531 if (record_debug)
b277c936 5532 debug_printf ("FP - immediate");
99afc88b
OJ
5533 record_buf[0] = AARCH64_CPSR_REGNUM;
5534 }
5535 /* Floating point - integer conversions instructions. */
f62fce35 5536 else if (insn_bits12_15 == 0x00)
99afc88b
OJ
5537 {
5538 /* Convert float to integer instruction. */
5539 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
5540 {
5541 if (record_debug)
b277c936 5542 debug_printf ("float to int conversion");
99afc88b
OJ
5543
5544 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
5545 }
5546 /* Convert integer to float instruction. */
5547 else if ((opcode >> 1) == 0x01 && !rmode)
5548 {
5549 if (record_debug)
b277c936 5550 debug_printf ("int to float conversion");
99afc88b
OJ
5551
5552 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
5553 }
5554 /* Move float to integer instruction. */
5555 else if ((opcode >> 1) == 0x03)
5556 {
5557 if (record_debug)
b277c936 5558 debug_printf ("move float to int");
99afc88b
OJ
5559
5560 if (!(opcode & 0x01))
5561 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
5562 else
5563 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
5564 }
f62fce35
YQ
5565 else
5566 return AARCH64_RECORD_UNKNOWN;
dda83cd7 5567 }
f62fce35
YQ
5568 else
5569 return AARCH64_RECORD_UNKNOWN;
dda83cd7 5570 }
f62fce35
YQ
5571 else
5572 return AARCH64_RECORD_UNKNOWN;
99afc88b
OJ
5573 }
5574 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
5575 {
5576 if (record_debug)
b277c936 5577 debug_printf ("SIMD copy");
99afc88b
OJ
5578
5579 /* Advanced SIMD copy instructions. */
5580 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
5581 && !bit (aarch64_insn_r->aarch64_insn, 15)
5582 && bit (aarch64_insn_r->aarch64_insn, 10))
5583 {
5584 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
5585 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
5586 else
5587 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
5588 }
5589 else
5590 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
5591 }
5592 /* All remaining floating point or advanced SIMD instructions. */
5593 else
5594 {
5595 if (record_debug)
b277c936 5596 debug_printf ("all remain");
99afc88b
OJ
5597
5598 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
5599 }
5600
5601 if (record_debug)
b277c936 5602 debug_printf ("\n");
99afc88b 5603
bfbe4b84 5604 /* Record the V/X register. */
99afc88b 5605 aarch64_insn_r->reg_rec_count++;
bfbe4b84
LM
5606
5607 /* Some of these instructions may set bits in the FPSR, so record it
5608 too. */
5609 record_buf[1] = AARCH64_FPSR_REGNUM;
5610 aarch64_insn_r->reg_rec_count++;
5611
5612 gdb_assert (aarch64_insn_r->reg_rec_count == 2);
99afc88b
OJ
5613 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
5614 record_buf);
5615 return AARCH64_RECORD_SUCCESS;
5616}
5617
5618/* Decodes insns type and invokes its record handler. */
5619
5620static unsigned int
4748a9be 5621aarch64_record_decode_insn_handler (aarch64_insn_decode_record *aarch64_insn_r)
99afc88b
OJ
5622{
5623 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
5624
5625 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
5626 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
5627 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
5628 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
5629
5630 /* Data processing - immediate instructions. */
5631 if (!ins_bit26 && !ins_bit27 && ins_bit28)
5632 return aarch64_record_data_proc_imm (aarch64_insn_r);
5633
5634 /* Branch, exception generation and system instructions. */
5635 if (ins_bit26 && !ins_bit27 && ins_bit28)
5636 return aarch64_record_branch_except_sys (aarch64_insn_r);
5637
5638 /* Load and store instructions. */
5639 if (!ins_bit25 && ins_bit27)
5640 return aarch64_record_load_store (aarch64_insn_r);
5641
5642 /* Data processing - register instructions. */
5643 if (ins_bit25 && !ins_bit26 && ins_bit27)
5644 return aarch64_record_data_proc_reg (aarch64_insn_r);
5645
5646 /* Data processing - SIMD and floating point instructions. */
5647 if (ins_bit25 && ins_bit26 && ins_bit27)
5648 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
5649
5650 return AARCH64_RECORD_UNSUPPORTED;
5651}
5652
5653/* Cleans up local record registers and memory allocations. */
5654
5655static void
4748a9be 5656deallocate_reg_mem (aarch64_insn_decode_record *record)
99afc88b
OJ
5657{
5658 xfree (record->aarch64_regs);
5659 xfree (record->aarch64_mems);
5660}
5661
1e2b521d
YQ
5662#if GDB_SELF_TEST
5663namespace selftests {
5664
5665static void
5666aarch64_process_record_test (void)
5667{
5668 struct gdbarch_info info;
5669 uint32_t ret;
5670
1e2b521d
YQ
5671 info.bfd_arch_info = bfd_scan_arch ("aarch64");
5672
5673 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
5674 SELF_CHECK (gdbarch != NULL);
5675
4748a9be 5676 aarch64_insn_decode_record aarch64_record;
1e2b521d 5677
4748a9be 5678 memset (&aarch64_record, 0, sizeof (aarch64_insn_decode_record));
1e2b521d
YQ
5679 aarch64_record.regcache = NULL;
5680 aarch64_record.this_addr = 0;
5681 aarch64_record.gdbarch = gdbarch;
5682
5683 /* 20 00 80 f9 prfm pldl1keep, [x1] */
5684 aarch64_record.aarch64_insn = 0xf9800020;
5685 ret = aarch64_record_decode_insn_handler (&aarch64_record);
5686 SELF_CHECK (ret == AARCH64_RECORD_SUCCESS);
5687 SELF_CHECK (aarch64_record.reg_rec_count == 0);
5688 SELF_CHECK (aarch64_record.mem_rec_count == 0);
5689
5690 deallocate_reg_mem (&aarch64_record);
5691}
5692
5693} // namespace selftests
5694#endif /* GDB_SELF_TEST */
5695
99afc88b
OJ
5696/* Parse the current instruction and record the values of the registers and
5697 memory that will be changed in current instruction to record_arch_list
5698 return -1 if something is wrong. */
5699
5700int
5701aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
5702 CORE_ADDR insn_addr)
5703{
5704 uint32_t rec_no = 0;
5705 uint8_t insn_size = 4;
5706 uint32_t ret = 0;
99afc88b 5707 gdb_byte buf[insn_size];
4748a9be 5708 aarch64_insn_decode_record aarch64_record;
99afc88b
OJ
5709
5710 memset (&buf[0], 0, insn_size);
4748a9be 5711 memset (&aarch64_record, 0, sizeof (aarch64_insn_decode_record));
99afc88b
OJ
5712 target_read_memory (insn_addr, &buf[0], insn_size);
5713 aarch64_record.aarch64_insn
5714 = (uint32_t) extract_unsigned_integer (&buf[0],
5715 insn_size,
5716 gdbarch_byte_order (gdbarch));
5717 aarch64_record.regcache = regcache;
5718 aarch64_record.this_addr = insn_addr;
5719 aarch64_record.gdbarch = gdbarch;
5720
5721 ret = aarch64_record_decode_insn_handler (&aarch64_record);
5722 if (ret == AARCH64_RECORD_UNSUPPORTED)
5723 {
6cb06a8c
TT
5724 gdb_printf (gdb_stderr,
5725 _("Process record does not support instruction "
5726 "0x%0x at address %s.\n"),
5727 aarch64_record.aarch64_insn,
5728 paddress (gdbarch, insn_addr));
99afc88b
OJ
5729 ret = -1;
5730 }
5731
5732 if (0 == ret)
5733 {
5734 /* Record registers. */
5735 record_full_arch_list_add_reg (aarch64_record.regcache,
5736 AARCH64_PC_REGNUM);
5737 /* Always record register CPSR. */
5738 record_full_arch_list_add_reg (aarch64_record.regcache,
5739 AARCH64_CPSR_REGNUM);
5740 if (aarch64_record.aarch64_regs)
5741 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
5742 if (record_full_arch_list_add_reg (aarch64_record.regcache,
5743 aarch64_record.aarch64_regs[rec_no]))
5744 ret = -1;
5745
5746 /* Record memories. */
5747 if (aarch64_record.aarch64_mems)
5748 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
5749 if (record_full_arch_list_add_mem
5750 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
5751 aarch64_record.aarch64_mems[rec_no].len))
5752 ret = -1;
5753
5754 if (record_full_arch_list_add_end ())
5755 ret = -1;
5756 }
5757
5758 deallocate_reg_mem (&aarch64_record);
5759 return ret;
5760}