]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/aarch64-tdep.c
500ac77d75a8f3f551618e411f3b7d82e21155bb
[thirdparty/binutils-gdb.git] / gdb / aarch64-tdep.c
1 /* Common target dependent code for GDB on AArch64 systems.
2
3 Copyright (C) 2009-2025 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21
22 #include "extract-store-integer.h"
23 #include "frame.h"
24 #include "language.h"
25 #include "cli/cli-cmds.h"
26 #include "gdbcore.h"
27 #include "dis-asm.h"
28 #include "regcache.h"
29 #include "reggroups.h"
30 #include "value.h"
31 #include "arch-utils.h"
32 #include "osabi.h"
33 #include "frame-unwind.h"
34 #include "frame-base.h"
35 #include "trad-frame.h"
36 #include "objfiles.h"
37 #include "dwarf2.h"
38 #include "dwarf2/frame.h"
39 #include "gdbtypes.h"
40 #include "prologue-value.h"
41 #include "target-descriptions.h"
42 #include "user-regs.h"
43 #include "ax-gdb.h"
44 #include "gdbsupport/selftest.h"
45
46 #include "aarch64-tdep.h"
47 #include "aarch64-ravenscar-thread.h"
48 #include "arch/aarch64-mte.h"
49
50 #include "record.h"
51 #include "record-full.h"
52 #include "arch/aarch64-insn.h"
53 #include "gdbarch.h"
54
55 #include "opcode/aarch64.h"
56 #include <algorithm>
57 #include <unordered_map>
58
59 /* For inferior_ptid and current_inferior (). */
60 #include "inferior.h"
61 /* For std::sqrt and std::pow. */
62 #include <cmath>
63
64 /* A Homogeneous Floating-Point or Short-Vector Aggregate may have at most
65 four members. */
66 #define HA_MAX_NUM_FLDS 4
67
68 /* All possible aarch64 target descriptors. */
69 static std::unordered_map <aarch64_features, target_desc *> tdesc_aarch64_map;
70
71 /* The standard register names, and all the valid aliases for them.
72 We're not adding fp here, that name is already taken, see
73 _initialize_frame_reg. */
74 static const struct
75 {
76 const char *const name;
77 int regnum;
78 } aarch64_register_aliases[] =
79 {
80 /* Link register alias for x30. */
81 {"lr", AARCH64_LR_REGNUM},
82 /* SP is the canonical name for x31 according to aarch64_r_register_names,
83 so we're adding an x31 alias for sp. */
84 {"x31", AARCH64_SP_REGNUM},
85 /* specials */
86 {"ip0", AARCH64_X0_REGNUM + 16},
87 {"ip1", AARCH64_X0_REGNUM + 17}
88 };
89
90 /* The required core 'R' registers. */
91 static const char *const aarch64_r_register_names[] =
92 {
93 /* These registers must appear in consecutive RAW register number
94 order and they must begin with AARCH64_X0_REGNUM! */
95 "x0", "x1", "x2", "x3",
96 "x4", "x5", "x6", "x7",
97 "x8", "x9", "x10", "x11",
98 "x12", "x13", "x14", "x15",
99 "x16", "x17", "x18", "x19",
100 "x20", "x21", "x22", "x23",
101 "x24", "x25", "x26", "x27",
102 "x28", "x29", "x30", "sp",
103 "pc", "cpsr"
104 };
105
106 /* The FP/SIMD 'V' registers. */
107 static const char *const aarch64_v_register_names[] =
108 {
109 /* These registers must appear in consecutive RAW register number
110 order and they must begin with AARCH64_V0_REGNUM! */
111 "v0", "v1", "v2", "v3",
112 "v4", "v5", "v6", "v7",
113 "v8", "v9", "v10", "v11",
114 "v12", "v13", "v14", "v15",
115 "v16", "v17", "v18", "v19",
116 "v20", "v21", "v22", "v23",
117 "v24", "v25", "v26", "v27",
118 "v28", "v29", "v30", "v31",
119 "fpsr",
120 "fpcr"
121 };
122
123 /* The SVE 'Z' and 'P' registers. */
124 static const char *const aarch64_sve_register_names[] =
125 {
126 /* These registers must appear in consecutive RAW register number
127 order and they must begin with AARCH64_SVE_Z0_REGNUM! */
128 "z0", "z1", "z2", "z3",
129 "z4", "z5", "z6", "z7",
130 "z8", "z9", "z10", "z11",
131 "z12", "z13", "z14", "z15",
132 "z16", "z17", "z18", "z19",
133 "z20", "z21", "z22", "z23",
134 "z24", "z25", "z26", "z27",
135 "z28", "z29", "z30", "z31",
136 "fpsr", "fpcr",
137 "p0", "p1", "p2", "p3",
138 "p4", "p5", "p6", "p7",
139 "p8", "p9", "p10", "p11",
140 "p12", "p13", "p14", "p15",
141 "ffr", "vg"
142 };
143
144 static const char *const aarch64_pauth_register_names[] =
145 {
146 /* Authentication mask for data pointer, low half/user pointers. */
147 "pauth_dmask",
148 /* Authentication mask for code pointer, low half/user pointers. */
149 "pauth_cmask",
150 /* Authentication mask for data pointer, high half / kernel pointers. */
151 "pauth_dmask_high",
152 /* Authentication mask for code pointer, high half / kernel pointers. */
153 "pauth_cmask_high"
154 };
155
156 static const char *const aarch64_mte_register_names[] =
157 {
158 /* Tag Control Register. */
159 "tag_ctl"
160 };
161
162 static const char *const aarch64_gcs_register_names[] = {
163 /* Guarded Control Stack Pointer Register. */
164 "gcspr"
165 };
166
167 static const char *const aarch64_gcs_linux_register_names[] = {
168 /* Field in struct user_gcs. */
169 "gcs_features_enabled",
170 /* Field in struct user_gcs. */
171 "gcs_features_locked",
172 };
173
174 static int aarch64_stack_frame_destroyed_p (struct gdbarch *, CORE_ADDR);
175
176 /* AArch64 prologue cache structure. */
177 struct aarch64_prologue_cache
178 {
179 /* The program counter at the start of the function. It is used to
180 identify this frame as a prologue frame. */
181 CORE_ADDR func;
182
183 /* The program counter at the time this frame was created; i.e. where
184 this function was called from. It is used to identify this frame as a
185 stub frame. */
186 CORE_ADDR prev_pc;
187
188 /* The stack pointer at the time this frame was created; i.e. the
189 caller's stack pointer when this function was called. It is used
190 to identify this frame. */
191 CORE_ADDR prev_sp;
192
193 /* Is the target available to read from? */
194 int available_p;
195
196 /* The frame base for this frame is just prev_sp - frame size.
197 FRAMESIZE is the distance from the frame pointer to the
198 initial stack pointer. */
199 int framesize;
200
201 /* The register used to hold the frame pointer for this frame. */
202 int framereg;
203
204 /* Saved register offsets. */
205 trad_frame_saved_reg *saved_regs;
206 };
207
208 /* Holds information used to read/write from/to ZA
209 pseudo-registers.
210
211 With this information, the read/write code can be simplified so it
212 deals only with the required information to map a ZA pseudo-register
213 to the exact bytes into the ZA contents buffer. Otherwise we'd need
214 to use a lot of conditionals. */
215
216 struct za_offsets
217 {
218 /* Offset, into ZA, of the starting byte of the pseudo-register. */
219 size_t starting_offset;
220 /* The size of the contiguous chunks of the pseudo-register. */
221 size_t chunk_size;
222 /* The number of pseudo-register chunks contained in ZA. */
223 size_t chunks;
224 /* The offset between each contiguous chunk. */
225 size_t stride_size;
226 };
227
228 /* Holds data that is helpful to determine the individual fields that make
229 up the names of the ZA pseudo-registers. It is also very helpful to
230 determine offsets, stride and sizes for reading ZA tiles and tile
231 slices. */
232
233 struct za_pseudo_encoding
234 {
235 /* The slice index (0 ~ svl). Only used for tile slices. */
236 uint8_t slice_index;
237 /* The tile number (0 ~ 15). */
238 uint8_t tile_index;
239 /* Direction (horizontal/vertical). Only used for tile slices. */
240 bool horizontal;
241 /* Qualifier index (0 ~ 4). These map to B, H, S, D and Q. */
242 uint8_t qualifier_index;
243 };
244
245 static void
246 show_aarch64_debug (struct ui_file *file, int from_tty,
247 struct cmd_list_element *c, const char *value)
248 {
249 gdb_printf (file, _("AArch64 debugging is %s.\n"), value);
250 }
251
252 namespace {
253
254 /* Abstract instruction reader. */
255
256 class abstract_instruction_reader
257 {
258 public:
259 /* Read in one instruction. */
260 virtual ULONGEST read (CORE_ADDR memaddr, int len,
261 enum bfd_endian byte_order) = 0;
262 };
263
264 /* Instruction reader from real target. */
265
266 class instruction_reader : public abstract_instruction_reader
267 {
268 public:
269 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
270 override
271 {
272 return read_code_unsigned_integer (memaddr, len, byte_order);
273 }
274 };
275
276 } // namespace
277
278 /* If address signing is enabled, mask off the signature bits from the link
279 register, which is passed by value in ADDR, using the register values in
280 THIS_FRAME. */
281
282 static CORE_ADDR
283 aarch64_frame_unmask_lr (aarch64_gdbarch_tdep *tdep,
284 const frame_info_ptr &this_frame, CORE_ADDR addr)
285 {
286 if (tdep->has_pauth ()
287 && frame_unwind_register_unsigned (this_frame,
288 tdep->ra_sign_state_regnum))
289 {
290 /* VA range select (bit 55) tells us whether to use the low half masks
291 or the high half masks. */
292 int cmask_num;
293 if (tdep->pauth_reg_count > 2 && addr & VA_RANGE_SELECT_BIT_MASK)
294 cmask_num = AARCH64_PAUTH_CMASK_HIGH_REGNUM (tdep->pauth_reg_base);
295 else
296 cmask_num = AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base);
297
298 /* By default, we assume TBI and discard the top 8 bits plus the VA range
299 select bit (55). */
300 CORE_ADDR mask = AARCH64_TOP_BITS_MASK;
301 mask |= frame_unwind_register_unsigned (this_frame, cmask_num);
302 addr = aarch64_remove_top_bits (addr, mask);
303
304 /* Record in the frame that the link register required unmasking. */
305 set_frame_previous_pc_masked (this_frame);
306 }
307
308 return addr;
309 }
310
311 /* Implement the "get_pc_address_flags" gdbarch method. */
312
313 static std::string
314 aarch64_get_pc_address_flags (const frame_info_ptr &frame, CORE_ADDR pc)
315 {
316 if (pc != 0 && get_frame_pc_masked (frame))
317 return "PAC";
318
319 return "";
320 }
321
322 /* Analyze a prologue, looking for a recognizable stack frame
323 and frame pointer. Scan until we encounter a store that could
324 clobber the stack frame unexpectedly, or an unknown instruction. */
325
326 static CORE_ADDR
327 aarch64_analyze_prologue (struct gdbarch *gdbarch,
328 CORE_ADDR start, CORE_ADDR limit,
329 struct aarch64_prologue_cache *cache,
330 abstract_instruction_reader& reader)
331 {
332 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
333 int i;
334
335 /* Whether the stack has been set. This should be true when we notice a SP
336 to FP move or if we are using the SP as the base register for storing
337 data, in case the FP is omitted. */
338 bool seen_stack_set = false;
339
340 /* Track X registers and D registers in prologue. */
341 pv_t regs[AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT];
342
343 for (i = 0; i < AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT; i++)
344 regs[i] = pv_register (i, 0);
345 pv_area stack (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
346
347 for (; start < limit; start += 4)
348 {
349 uint32_t insn;
350 aarch64_inst inst;
351
352 insn = reader.read (start, 4, byte_order_for_code);
353
354 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
355 break;
356
357 if (inst.opcode->iclass == addsub_imm
358 && (inst.opcode->op == OP_ADD
359 || strcmp ("sub", inst.opcode->name) == 0))
360 {
361 unsigned rd = inst.operands[0].reg.regno;
362 unsigned rn = inst.operands[1].reg.regno;
363
364 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
365 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
366 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
367 gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM);
368
369 if (inst.opcode->op == OP_ADD)
370 {
371 regs[rd] = pv_add_constant (regs[rn],
372 inst.operands[2].imm.value);
373 }
374 else
375 {
376 regs[rd] = pv_add_constant (regs[rn],
377 -inst.operands[2].imm.value);
378 }
379
380 /* Did we move SP to FP? */
381 if (rn == AARCH64_SP_REGNUM && rd == AARCH64_FP_REGNUM)
382 seen_stack_set = true;
383 }
384 else if (inst.opcode->iclass == addsub_ext
385 && strcmp ("sub", inst.opcode->name) == 0)
386 {
387 unsigned rd = inst.operands[0].reg.regno;
388 unsigned rn = inst.operands[1].reg.regno;
389 unsigned rm = inst.operands[2].reg.regno;
390
391 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
392 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
393 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
394 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_EXT);
395
396 regs[rd] = pv_subtract (regs[rn], regs[rm]);
397 }
398 else if (inst.opcode->iclass == branch_imm)
399 {
400 /* Stop analysis on branch. */
401 break;
402 }
403 else if (inst.opcode->iclass == condbranch)
404 {
405 /* Stop analysis on branch. */
406 break;
407 }
408 else if (inst.opcode->iclass == branch_reg)
409 {
410 /* Stop analysis on branch. */
411 break;
412 }
413 else if (inst.opcode->iclass == compbranch)
414 {
415 /* Stop analysis on branch. */
416 break;
417 }
418 else if (inst.opcode->op == OP_MOVZ)
419 {
420 unsigned rd = inst.operands[0].reg.regno;
421
422 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
423 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
424 gdb_assert (inst.operands[1].type == AARCH64_OPND_HALF);
425 gdb_assert (inst.operands[1].shifter.kind == AARCH64_MOD_LSL);
426
427 /* If this shows up before we set the stack, keep going. Otherwise
428 stop the analysis. */
429 if (seen_stack_set)
430 break;
431
432 regs[rd] = pv_constant (inst.operands[1].imm.value
433 << inst.operands[1].shifter.amount);
434 }
435 else if (inst.opcode->iclass == log_shift
436 && strcmp (inst.opcode->name, "orr") == 0)
437 {
438 unsigned rd = inst.operands[0].reg.regno;
439 unsigned rn = inst.operands[1].reg.regno;
440 unsigned rm = inst.operands[2].reg.regno;
441
442 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
443 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn);
444 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT);
445
446 if (inst.operands[2].shifter.amount == 0
447 && rn == AARCH64_SP_REGNUM)
448 regs[rd] = regs[rm];
449 else
450 {
451 aarch64_debug_printf ("prologue analysis gave up "
452 "addr=%s opcode=0x%x (orr x register)",
453 core_addr_to_string_nz (start), insn);
454
455 break;
456 }
457 }
458 else if (inst.opcode->op == OP_STUR)
459 {
460 unsigned rt = inst.operands[0].reg.regno;
461 unsigned rn = inst.operands[1].addr.base_regno;
462 int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
463
464 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
465 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
466 gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9);
467 gdb_assert (!inst.operands[1].addr.offset.is_reg);
468
469 stack.store
470 (pv_add_constant (regs[rn], inst.operands[1].addr.offset.imm),
471 size, regs[rt]);
472
473 /* Are we storing with SP as a base? */
474 if (rn == AARCH64_SP_REGNUM)
475 seen_stack_set = true;
476 }
477 else if ((inst.opcode->iclass == ldstpair_off
478 || (inst.opcode->iclass == ldstpair_indexed
479 && inst.operands[2].addr.preind))
480 && strcmp ("stp", inst.opcode->name) == 0)
481 {
482 /* STP with addressing mode Pre-indexed and Base register. */
483 unsigned rt1;
484 unsigned rt2;
485 unsigned rn = inst.operands[2].addr.base_regno;
486 int32_t imm = inst.operands[2].addr.offset.imm;
487 int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
488
489 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
490 || inst.operands[0].type == AARCH64_OPND_Ft);
491 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2
492 || inst.operands[1].type == AARCH64_OPND_Ft2);
493 gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7);
494 gdb_assert (!inst.operands[2].addr.offset.is_reg);
495
496 /* If recording this store would invalidate the store area
497 (perhaps because rn is not known) then we should abandon
498 further prologue analysis. */
499 if (stack.store_would_trash (pv_add_constant (regs[rn], imm)))
500 break;
501
502 if (stack.store_would_trash (pv_add_constant (regs[rn], imm + 8)))
503 break;
504
505 rt1 = inst.operands[0].reg.regno;
506 rt2 = inst.operands[1].reg.regno;
507 if (inst.operands[0].type == AARCH64_OPND_Ft)
508 {
509 rt1 += AARCH64_X_REGISTER_COUNT;
510 rt2 += AARCH64_X_REGISTER_COUNT;
511 }
512
513 stack.store (pv_add_constant (regs[rn], imm), size, regs[rt1]);
514 stack.store (pv_add_constant (regs[rn], imm + size), size, regs[rt2]);
515
516 if (inst.operands[2].addr.writeback)
517 regs[rn] = pv_add_constant (regs[rn], imm);
518
519 /* Ignore the instruction that allocates stack space and sets
520 the SP. */
521 if (rn == AARCH64_SP_REGNUM && !inst.operands[2].addr.writeback)
522 seen_stack_set = true;
523 }
524 else if ((inst.opcode->iclass == ldst_imm9 /* Signed immediate. */
525 || (inst.opcode->iclass == ldst_pos /* Unsigned immediate. */
526 && (inst.opcode->op == OP_STR_POS
527 || inst.opcode->op == OP_STRF_POS)))
528 && inst.operands[1].addr.base_regno == AARCH64_SP_REGNUM
529 && strcmp ("str", inst.opcode->name) == 0)
530 {
531 /* STR (immediate) */
532 unsigned int rt = inst.operands[0].reg.regno;
533 int32_t imm = inst.operands[1].addr.offset.imm;
534 unsigned int rn = inst.operands[1].addr.base_regno;
535 int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
536 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
537 || inst.operands[0].type == AARCH64_OPND_Ft);
538
539 if (inst.operands[0].type == AARCH64_OPND_Ft)
540 rt += AARCH64_X_REGISTER_COUNT;
541
542 stack.store (pv_add_constant (regs[rn], imm), size, regs[rt]);
543 if (inst.operands[1].addr.writeback)
544 regs[rn] = pv_add_constant (regs[rn], imm);
545
546 /* Are we storing with SP as a base? */
547 if (rn == AARCH64_SP_REGNUM)
548 seen_stack_set = true;
549 }
550 else if (inst.opcode->iclass == testbranch)
551 {
552 /* Stop analysis on branch. */
553 break;
554 }
555 else if (inst.opcode->iclass == ic_system)
556 {
557 aarch64_gdbarch_tdep *tdep
558 = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
559 int ra_state_val = 0;
560
561 if (insn == 0xd503233f /* paciasp. */
562 || insn == 0xd503237f /* pacibsp. */)
563 {
564 /* Return addresses are mangled. */
565 ra_state_val = 1;
566 }
567 else if (insn == 0xd50323bf /* autiasp. */
568 || insn == 0xd50323ff /* autibsp. */)
569 {
570 /* Return addresses are not mangled. */
571 ra_state_val = 0;
572 }
573 else if (IS_BTI (insn))
574 /* We don't need to do anything special for a BTI instruction. */
575 continue;
576 else
577 {
578 aarch64_debug_printf ("prologue analysis gave up addr=%s"
579 " opcode=0x%x (iclass)",
580 core_addr_to_string_nz (start), insn);
581 break;
582 }
583
584 if (tdep->has_pauth () && cache != nullptr)
585 {
586 int regnum = tdep->ra_sign_state_regnum;
587 cache->saved_regs[regnum].set_value (ra_state_val);
588 }
589 }
590 else
591 {
592 aarch64_debug_printf ("prologue analysis gave up addr=%s"
593 " opcode=0x%x",
594 core_addr_to_string_nz (start), insn);
595
596 break;
597 }
598 }
599
600 if (cache == NULL)
601 return start;
602
603 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
604 {
605 /* Frame pointer is fp. Frame size is constant. */
606 cache->framereg = AARCH64_FP_REGNUM;
607 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
608 }
609 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
610 {
611 /* Try the stack pointer. */
612 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
613 cache->framereg = AARCH64_SP_REGNUM;
614 }
615 else
616 {
617 /* We're just out of luck. We don't know where the frame is. */
618 cache->framereg = -1;
619 cache->framesize = 0;
620 }
621
622 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
623 {
624 CORE_ADDR offset;
625
626 if (stack.find_reg (gdbarch, i, &offset))
627 cache->saved_regs[i].set_addr (offset);
628 }
629
630 for (i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
631 {
632 int regnum = gdbarch_num_regs (gdbarch);
633 CORE_ADDR offset;
634
635 if (stack.find_reg (gdbarch, i + AARCH64_X_REGISTER_COUNT,
636 &offset))
637 cache->saved_regs[i + regnum + AARCH64_D0_REGNUM].set_addr (offset);
638 }
639
640 return start;
641 }
642
643 static CORE_ADDR
644 aarch64_analyze_prologue (struct gdbarch *gdbarch,
645 CORE_ADDR start, CORE_ADDR limit,
646 struct aarch64_prologue_cache *cache)
647 {
648 instruction_reader reader;
649
650 return aarch64_analyze_prologue (gdbarch, start, limit, cache,
651 reader);
652 }
653
654 #if GDB_SELF_TEST
655
656 namespace selftests {
657
658 /* Instruction reader from manually cooked instruction sequences. */
659
660 class instruction_reader_test : public abstract_instruction_reader
661 {
662 public:
663 template<size_t SIZE>
664 explicit instruction_reader_test (const uint32_t (&insns)[SIZE])
665 : m_insns (insns), m_insns_size (SIZE)
666 {}
667
668 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
669 override
670 {
671 SELF_CHECK (len == 4);
672 SELF_CHECK (memaddr % 4 == 0);
673 SELF_CHECK (memaddr / 4 < m_insns_size);
674
675 return m_insns[memaddr / 4];
676 }
677
678 private:
679 const uint32_t *m_insns;
680 size_t m_insns_size;
681 };
682
683 static void
684 aarch64_analyze_prologue_test (void)
685 {
686 struct gdbarch_info info;
687
688 info.bfd_arch_info = bfd_scan_arch ("aarch64");
689
690 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
691 SELF_CHECK (gdbarch != NULL);
692
693 struct aarch64_prologue_cache cache;
694 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
695
696 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
697
698 /* Test the simple prologue in which frame pointer is used. */
699 {
700 static const uint32_t insns[] = {
701 0xa9af7bfd, /* stp x29, x30, [sp,#-272]! */
702 0x910003fd, /* mov x29, sp */
703 0x97ffffe6, /* bl 0x400580 */
704 };
705 instruction_reader_test reader (insns);
706
707 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
708 SELF_CHECK (end == 4 * 2);
709
710 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
711 SELF_CHECK (cache.framesize == 272);
712
713 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
714 {
715 if (i == AARCH64_FP_REGNUM)
716 SELF_CHECK (cache.saved_regs[i].addr () == -272);
717 else if (i == AARCH64_LR_REGNUM)
718 SELF_CHECK (cache.saved_regs[i].addr () == -264);
719 else
720 SELF_CHECK (cache.saved_regs[i].is_realreg ()
721 && cache.saved_regs[i].realreg () == i);
722 }
723
724 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
725 {
726 int num_regs = gdbarch_num_regs (gdbarch);
727 int regnum = i + num_regs + AARCH64_D0_REGNUM;
728
729 SELF_CHECK (cache.saved_regs[regnum].is_realreg ()
730 && cache.saved_regs[regnum].realreg () == regnum);
731 }
732 }
733
734 /* Test a prologue in which STR is used and frame pointer is not
735 used. */
736 {
737 static const uint32_t insns[] = {
738 0xf81d0ff3, /* str x19, [sp, #-48]! */
739 0xb9002fe0, /* str w0, [sp, #44] */
740 0xf90013e1, /* str x1, [sp, #32]*/
741 0xfd000fe0, /* str d0, [sp, #24] */
742 0xaa0203f3, /* mov x19, x2 */
743 0xf94013e0, /* ldr x0, [sp, #32] */
744 };
745 instruction_reader_test reader (insns);
746
747 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
748 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
749
750 SELF_CHECK (end == 4 * 5);
751
752 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
753 SELF_CHECK (cache.framesize == 48);
754
755 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
756 {
757 if (i == 1)
758 SELF_CHECK (cache.saved_regs[i].addr () == -16);
759 else if (i == 19)
760 SELF_CHECK (cache.saved_regs[i].addr () == -48);
761 else
762 SELF_CHECK (cache.saved_regs[i].is_realreg ()
763 && cache.saved_regs[i].realreg () == i);
764 }
765
766 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
767 {
768 int num_regs = gdbarch_num_regs (gdbarch);
769 int regnum = i + num_regs + AARCH64_D0_REGNUM;
770
771
772 if (i == 0)
773 SELF_CHECK (cache.saved_regs[regnum].addr () == -24);
774 else
775 SELF_CHECK (cache.saved_regs[regnum].is_realreg ()
776 && cache.saved_regs[regnum].realreg () == regnum);
777 }
778 }
779
780 /* Test handling of movz before setting the frame pointer. */
781 {
782 static const uint32_t insns[] = {
783 0xa9bf7bfd, /* stp x29, x30, [sp, #-16]! */
784 0x52800020, /* mov w0, #0x1 */
785 0x910003fd, /* mov x29, sp */
786 0x528000a2, /* mov w2, #0x5 */
787 0x97fffff8, /* bl 6e4 */
788 };
789
790 instruction_reader_test reader (insns);
791
792 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
793 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
794
795 /* We should stop at the 4th instruction. */
796 SELF_CHECK (end == (4 - 1) * 4);
797 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
798 SELF_CHECK (cache.framesize == 16);
799 }
800
801 /* Test handling of movz/stp when using the stack pointer as frame
802 pointer. */
803 {
804 static const uint32_t insns[] = {
805 0xa9bc7bfd, /* stp x29, x30, [sp, #-64]! */
806 0x52800020, /* mov w0, #0x1 */
807 0x290207e0, /* stp w0, w1, [sp, #16] */
808 0xa9018fe2, /* stp x2, x3, [sp, #24] */
809 0x528000a2, /* mov w2, #0x5 */
810 0x97fffff8, /* bl 6e4 */
811 };
812
813 instruction_reader_test reader (insns);
814
815 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
816 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
817
818 /* We should stop at the 5th instruction. */
819 SELF_CHECK (end == (5 - 1) * 4);
820 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
821 SELF_CHECK (cache.framesize == 64);
822 }
823
824 /* Test handling of movz/str when using the stack pointer as frame
825 pointer */
826 {
827 static const uint32_t insns[] = {
828 0xa9bc7bfd, /* stp x29, x30, [sp, #-64]! */
829 0x52800020, /* mov w0, #0x1 */
830 0xb9002be4, /* str w4, [sp, #40] */
831 0xf9001be5, /* str x5, [sp, #48] */
832 0x528000a2, /* mov w2, #0x5 */
833 0x97fffff8, /* bl 6e4 */
834 };
835
836 instruction_reader_test reader (insns);
837
838 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
839 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
840
841 /* We should stop at the 5th instruction. */
842 SELF_CHECK (end == (5 - 1) * 4);
843 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
844 SELF_CHECK (cache.framesize == 64);
845 }
846
847 /* Test handling of movz/stur when using the stack pointer as frame
848 pointer. */
849 {
850 static const uint32_t insns[] = {
851 0xa9bc7bfd, /* stp x29, x30, [sp, #-64]! */
852 0x52800020, /* mov w0, #0x1 */
853 0xb80343e6, /* stur w6, [sp, #52] */
854 0xf80383e7, /* stur x7, [sp, #56] */
855 0x528000a2, /* mov w2, #0x5 */
856 0x97fffff8, /* bl 6e4 */
857 };
858
859 instruction_reader_test reader (insns);
860
861 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
862 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
863
864 /* We should stop at the 5th instruction. */
865 SELF_CHECK (end == (5 - 1) * 4);
866 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
867 SELF_CHECK (cache.framesize == 64);
868 }
869
870 /* Test handling of movz when there is no frame pointer set or no stack
871 pointer used. */
872 {
873 static const uint32_t insns[] = {
874 0xa9bf7bfd, /* stp x29, x30, [sp, #-16]! */
875 0x52800020, /* mov w0, #0x1 */
876 0x528000a2, /* mov w2, #0x5 */
877 0x97fffff8, /* bl 6e4 */
878 };
879
880 instruction_reader_test reader (insns);
881
882 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
883 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
884
885 /* We should stop at the 4th instruction. */
886 SELF_CHECK (end == (4 - 1) * 4);
887 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
888 SELF_CHECK (cache.framesize == 16);
889 }
890
891 /* Test a prologue in which there is a return address signing instruction. */
892 if (tdep->has_pauth ())
893 {
894 static const uint32_t insns[] = {
895 0xd503233f, /* paciasp */
896 0xa9bd7bfd, /* stp x29, x30, [sp, #-48]! */
897 0x910003fd, /* mov x29, sp */
898 0xf801c3f3, /* str x19, [sp, #28] */
899 0xb9401fa0, /* ldr x19, [x29, #28] */
900 };
901 instruction_reader_test reader (insns);
902
903 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
904 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache,
905 reader);
906
907 SELF_CHECK (end == 4 * 4);
908 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
909 SELF_CHECK (cache.framesize == 48);
910
911 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
912 {
913 if (i == 19)
914 SELF_CHECK (cache.saved_regs[i].addr () == -20);
915 else if (i == AARCH64_FP_REGNUM)
916 SELF_CHECK (cache.saved_regs[i].addr () == -48);
917 else if (i == AARCH64_LR_REGNUM)
918 SELF_CHECK (cache.saved_regs[i].addr () == -40);
919 else
920 SELF_CHECK (cache.saved_regs[i].is_realreg ()
921 && cache.saved_regs[i].realreg () == i);
922 }
923
924 if (tdep->has_pauth ())
925 {
926 int regnum = tdep->ra_sign_state_regnum;
927 SELF_CHECK (cache.saved_regs[regnum].is_value ());
928 }
929 }
930
931 /* Test a prologue with a BTI instruction. */
932 {
933 static const uint32_t insns[] = {
934 0xd503245f, /* bti */
935 0xa9bd7bfd, /* stp x29, x30, [sp, #-48]! */
936 0x910003fd, /* mov x29, sp */
937 0xf801c3f3, /* str x19, [sp, #28] */
938 0xb9401fa0, /* ldr x19, [x29, #28] */
939 };
940 instruction_reader_test reader (insns);
941
942 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
943 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache,
944 reader);
945
946 SELF_CHECK (end == 4 * 4);
947 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
948 SELF_CHECK (cache.framesize == 48);
949
950 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
951 {
952 if (i == 19)
953 SELF_CHECK (cache.saved_regs[i].addr () == -20);
954 else if (i == AARCH64_FP_REGNUM)
955 SELF_CHECK (cache.saved_regs[i].addr () == -48);
956 else if (i == AARCH64_LR_REGNUM)
957 SELF_CHECK (cache.saved_regs[i].addr () == -40);
958 else
959 SELF_CHECK (cache.saved_regs[i].is_realreg ()
960 && cache.saved_regs[i].realreg () == i);
961 }
962 }
963 }
964 } /* namespace selftests */
965 #endif /* GDB_SELF_TEST */
966
967 /* Implement the "skip_prologue" gdbarch method. */
968
969 static CORE_ADDR
970 aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
971 {
972 CORE_ADDR func_addr, func_end_addr, limit_pc;
973
974 /* See if we can determine the end of the prologue via the symbol
975 table. If so, then return either PC, or the PC after the
976 prologue, whichever is greater. */
977 bool func_addr_found
978 = find_pc_partial_function (pc, NULL, &func_addr, &func_end_addr);
979
980 if (func_addr_found)
981 {
982 CORE_ADDR post_prologue_pc
983 = skip_prologue_using_sal (gdbarch, func_addr);
984
985 if (post_prologue_pc != 0)
986 return std::max (pc, post_prologue_pc);
987 }
988
989 /* Can't determine prologue from the symbol table, need to examine
990 instructions. */
991
992 /* Find an upper limit on the function prologue using the debug
993 information. If the debug information could not be used to
994 provide that bound, then use an arbitrary large number as the
995 upper bound. */
996 limit_pc = skip_prologue_using_sal (gdbarch, pc);
997 if (limit_pc == 0)
998 limit_pc = pc + 128; /* Magic. */
999
1000 limit_pc
1001 = func_end_addr == 0 ? limit_pc : std::min (limit_pc, func_end_addr - 4);
1002
1003 /* Try disassembling prologue. */
1004 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
1005 }
1006
1007 /* Scan the function prologue for THIS_FRAME and populate the prologue
1008 cache CACHE. */
1009
1010 static void
1011 aarch64_scan_prologue (const frame_info_ptr &this_frame,
1012 struct aarch64_prologue_cache *cache)
1013 {
1014 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
1015 CORE_ADDR prologue_start;
1016 CORE_ADDR prologue_end;
1017 CORE_ADDR prev_pc = get_frame_pc (this_frame);
1018 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1019
1020 cache->prev_pc = prev_pc;
1021
1022 /* Assume we do not find a frame. */
1023 cache->framereg = -1;
1024 cache->framesize = 0;
1025
1026 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
1027 &prologue_end))
1028 {
1029 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
1030
1031 if (sal.line == 0)
1032 {
1033 /* No line info so use the current PC. */
1034 prologue_end = prev_pc;
1035 }
1036 else if (sal.end < prologue_end)
1037 {
1038 /* The next line begins after the function end. */
1039 prologue_end = sal.end;
1040 }
1041
1042 prologue_end = std::min (prologue_end, prev_pc);
1043 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
1044 }
1045 else
1046 {
1047 CORE_ADDR frame_loc;
1048
1049 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
1050 if (frame_loc == 0)
1051 return;
1052
1053 cache->framereg = AARCH64_FP_REGNUM;
1054 cache->framesize = 16;
1055 cache->saved_regs[29].set_addr (0);
1056 cache->saved_regs[30].set_addr (8);
1057 }
1058 }
1059
1060 /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
1061 function may throw an exception if the inferior's registers or memory is
1062 not available. */
1063
1064 static void
1065 aarch64_make_prologue_cache_1 (const frame_info_ptr &this_frame,
1066 struct aarch64_prologue_cache *cache)
1067 {
1068 CORE_ADDR unwound_fp;
1069 int reg;
1070
1071 aarch64_scan_prologue (this_frame, cache);
1072
1073 if (cache->framereg == -1)
1074 return;
1075
1076 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
1077 if (unwound_fp == 0)
1078 return;
1079
1080 cache->prev_sp = unwound_fp;
1081 if (!aarch64_stack_frame_destroyed_p (get_frame_arch (this_frame),
1082 cache->prev_pc))
1083 cache->prev_sp += cache->framesize;
1084
1085 /* Calculate actual addresses of saved registers using offsets
1086 determined by aarch64_analyze_prologue. */
1087 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
1088 if (cache->saved_regs[reg].is_addr ())
1089 cache->saved_regs[reg].set_addr (cache->saved_regs[reg].addr ()
1090 + cache->prev_sp);
1091
1092 cache->func = get_frame_func (this_frame);
1093
1094 cache->available_p = 1;
1095 }
1096
1097 /* Allocate and fill in *THIS_CACHE with information about the prologue of
1098 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
1099 Return a pointer to the current aarch64_prologue_cache in
1100 *THIS_CACHE. */
1101
1102 static struct aarch64_prologue_cache *
1103 aarch64_make_prologue_cache (const frame_info_ptr &this_frame, void **this_cache)
1104 {
1105 struct aarch64_prologue_cache *cache;
1106
1107 if (*this_cache != NULL)
1108 return (struct aarch64_prologue_cache *) *this_cache;
1109
1110 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
1111 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
1112 *this_cache = cache;
1113
1114 try
1115 {
1116 aarch64_make_prologue_cache_1 (this_frame, cache);
1117 }
1118 catch (const gdb_exception_error &ex)
1119 {
1120 if (ex.error != NOT_AVAILABLE_ERROR)
1121 throw;
1122 }
1123
1124 return cache;
1125 }
1126
1127 /* Implement the "stop_reason" frame_unwind method. */
1128
1129 static enum unwind_stop_reason
1130 aarch64_prologue_frame_unwind_stop_reason (const frame_info_ptr &this_frame,
1131 void **this_cache)
1132 {
1133 struct aarch64_prologue_cache *cache
1134 = aarch64_make_prologue_cache (this_frame, this_cache);
1135
1136 if (!cache->available_p)
1137 return UNWIND_UNAVAILABLE;
1138
1139 /* Halt the backtrace at "_start". */
1140 gdbarch *arch = get_frame_arch (this_frame);
1141 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (arch);
1142 if (cache->prev_pc <= tdep->lowest_pc)
1143 return UNWIND_OUTERMOST;
1144
1145 /* We've hit a wall, stop. */
1146 if (cache->prev_sp == 0)
1147 return UNWIND_OUTERMOST;
1148
1149 return UNWIND_NO_REASON;
1150 }
1151
1152 /* Our frame ID for a normal frame is the current function's starting
1153 PC and the caller's SP when we were called. */
1154
1155 static void
1156 aarch64_prologue_this_id (const frame_info_ptr &this_frame,
1157 void **this_cache, struct frame_id *this_id)
1158 {
1159 struct aarch64_prologue_cache *cache
1160 = aarch64_make_prologue_cache (this_frame, this_cache);
1161
1162 if (!cache->available_p)
1163 *this_id = frame_id_build_unavailable_stack (cache->func);
1164 else
1165 *this_id = frame_id_build (cache->prev_sp, cache->func);
1166 }
1167
1168 /* Implement the "prev_register" frame_unwind method. */
1169
1170 static struct value *
1171 aarch64_prologue_prev_register (const frame_info_ptr &this_frame,
1172 void **this_cache, int prev_regnum)
1173 {
1174 struct aarch64_prologue_cache *cache
1175 = aarch64_make_prologue_cache (this_frame, this_cache);
1176
1177 /* If we are asked to unwind the PC, then we need to return the LR
1178 instead. The prologue may save PC, but it will point into this
1179 frame's prologue, not the next frame's resume location. */
1180 if (prev_regnum == AARCH64_PC_REGNUM)
1181 {
1182 CORE_ADDR lr;
1183 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1184 aarch64_gdbarch_tdep *tdep
1185 = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
1186
1187 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1188
1189 if (tdep->has_pauth ()
1190 && cache->saved_regs[tdep->ra_sign_state_regnum].is_value ())
1191 lr = aarch64_frame_unmask_lr (tdep, this_frame, lr);
1192
1193 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
1194 }
1195
1196 /* SP is generally not saved to the stack, but this frame is
1197 identified by the next frame's stack pointer at the time of the
1198 call. The value was already reconstructed into PREV_SP. */
1199 /*
1200 +----------+ ^
1201 | saved lr | |
1202 +->| saved fp |--+
1203 | | |
1204 | | | <- Previous SP
1205 | +----------+
1206 | | saved lr |
1207 +--| saved fp |<- FP
1208 | |
1209 | |<- SP
1210 +----------+ */
1211 if (prev_regnum == AARCH64_SP_REGNUM)
1212 return frame_unwind_got_constant (this_frame, prev_regnum,
1213 cache->prev_sp);
1214
1215 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
1216 prev_regnum);
1217 }
1218
1219 /* AArch64 prologue unwinder. */
1220 static const frame_unwind_legacy aarch64_prologue_unwind (
1221 "aarch64 prologue",
1222 NORMAL_FRAME,
1223 FRAME_UNWIND_ARCH,
1224 aarch64_prologue_frame_unwind_stop_reason,
1225 aarch64_prologue_this_id,
1226 aarch64_prologue_prev_register,
1227 NULL,
1228 default_frame_sniffer
1229 );
1230
1231 /* Allocate and fill in *THIS_CACHE with information about the prologue of
1232 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
1233 Return a pointer to the current aarch64_prologue_cache in
1234 *THIS_CACHE. */
1235
1236 static struct aarch64_prologue_cache *
1237 aarch64_make_stub_cache (const frame_info_ptr &this_frame, void **this_cache)
1238 {
1239 struct aarch64_prologue_cache *cache;
1240
1241 if (*this_cache != NULL)
1242 return (struct aarch64_prologue_cache *) *this_cache;
1243
1244 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
1245 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
1246 *this_cache = cache;
1247
1248 try
1249 {
1250 cache->prev_sp = get_frame_register_unsigned (this_frame,
1251 AARCH64_SP_REGNUM);
1252 cache->prev_pc = get_frame_pc (this_frame);
1253 cache->available_p = 1;
1254 }
1255 catch (const gdb_exception_error &ex)
1256 {
1257 if (ex.error != NOT_AVAILABLE_ERROR)
1258 throw;
1259 }
1260
1261 return cache;
1262 }
1263
1264 /* Implement the "stop_reason" frame_unwind method. */
1265
1266 static enum unwind_stop_reason
1267 aarch64_stub_frame_unwind_stop_reason (const frame_info_ptr &this_frame,
1268 void **this_cache)
1269 {
1270 struct aarch64_prologue_cache *cache
1271 = aarch64_make_stub_cache (this_frame, this_cache);
1272
1273 if (!cache->available_p)
1274 return UNWIND_UNAVAILABLE;
1275
1276 return UNWIND_NO_REASON;
1277 }
1278
1279 /* Our frame ID for a stub frame is the current SP and LR. */
1280
1281 static void
1282 aarch64_stub_this_id (const frame_info_ptr &this_frame,
1283 void **this_cache, struct frame_id *this_id)
1284 {
1285 struct aarch64_prologue_cache *cache
1286 = aarch64_make_stub_cache (this_frame, this_cache);
1287
1288 if (cache->available_p)
1289 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
1290 else
1291 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
1292 }
1293
1294 /* Implement the "sniffer" frame_unwind method. */
1295
1296 static int
1297 aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
1298 const frame_info_ptr &this_frame,
1299 void **this_prologue_cache)
1300 {
1301 CORE_ADDR addr_in_block;
1302 gdb_byte dummy[4];
1303
1304 addr_in_block = get_frame_address_in_block (this_frame);
1305 if (in_plt_section (addr_in_block)
1306 /* We also use the stub winder if the target memory is unreadable
1307 to avoid having the prologue unwinder trying to read it. */
1308 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
1309 return 1;
1310
1311 return 0;
1312 }
1313
1314 /* AArch64 stub unwinder. */
1315 static const frame_unwind_legacy aarch64_stub_unwind (
1316 "aarch64 stub",
1317 NORMAL_FRAME,
1318 FRAME_UNWIND_ARCH,
1319 aarch64_stub_frame_unwind_stop_reason,
1320 aarch64_stub_this_id,
1321 aarch64_prologue_prev_register,
1322 NULL,
1323 aarch64_stub_unwind_sniffer
1324 );
1325
1326 /* Return the frame base address of *THIS_FRAME. */
1327
1328 static CORE_ADDR
1329 aarch64_normal_frame_base (const frame_info_ptr &this_frame, void **this_cache)
1330 {
1331 struct aarch64_prologue_cache *cache
1332 = aarch64_make_prologue_cache (this_frame, this_cache);
1333
1334 return cache->prev_sp - cache->framesize;
1335 }
1336
1337 /* AArch64 default frame base information. */
1338 static frame_base aarch64_normal_base =
1339 {
1340 &aarch64_prologue_unwind,
1341 aarch64_normal_frame_base,
1342 aarch64_normal_frame_base,
1343 aarch64_normal_frame_base
1344 };
1345
1346 /* Return the value of the REGNUM register in the previous frame of
1347 *THIS_FRAME. */
1348
1349 static struct value *
1350 aarch64_dwarf2_prev_register (const frame_info_ptr &this_frame,
1351 void **this_cache, int regnum)
1352 {
1353 gdbarch *arch = get_frame_arch (this_frame);
1354 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (arch);
1355 CORE_ADDR lr;
1356
1357 switch (regnum)
1358 {
1359 case AARCH64_PC_REGNUM:
1360 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1361 lr = aarch64_frame_unmask_lr (tdep, this_frame, lr);
1362 return frame_unwind_got_constant (this_frame, regnum, lr);
1363
1364 default:
1365 internal_error (_("Unexpected register %d"), regnum);
1366 }
1367 }
1368
1369 static const unsigned char op_lit0 = DW_OP_lit0;
1370 static const unsigned char op_lit1 = DW_OP_lit1;
1371
1372 /* Implement the "init_reg" dwarf2_frame_ops method. */
1373
1374 static void
1375 aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1376 struct dwarf2_frame_state_reg *reg,
1377 const frame_info_ptr &this_frame)
1378 {
1379 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
1380
1381 switch (regnum)
1382 {
1383 case AARCH64_PC_REGNUM:
1384 reg->how = DWARF2_FRAME_REG_FN;
1385 reg->loc.fn = aarch64_dwarf2_prev_register;
1386 return;
1387
1388 case AARCH64_SP_REGNUM:
1389 reg->how = DWARF2_FRAME_REG_CFA;
1390 return;
1391 }
1392
1393 /* Init pauth registers. */
1394 if (tdep->has_pauth ())
1395 {
1396 if (regnum == tdep->ra_sign_state_regnum)
1397 {
1398 /* Initialize RA_STATE to zero. */
1399 reg->how = DWARF2_FRAME_REG_SAVED_VAL_EXP;
1400 reg->loc.exp.start = &op_lit0;
1401 reg->loc.exp.len = 1;
1402 return;
1403 }
1404 else if (regnum >= tdep->pauth_reg_base
1405 && regnum < tdep->pauth_reg_base + tdep->pauth_reg_count)
1406 {
1407 reg->how = DWARF2_FRAME_REG_SAME_VALUE;
1408 return;
1409 }
1410 }
1411 if (tdep->has_gcs () && tdep->fn_prev_gcspr != nullptr
1412 && regnum == tdep->gcs_reg_base)
1413 {
1414 reg->how = DWARF2_FRAME_REG_FN;
1415 reg->loc.fn = tdep->fn_prev_gcspr;
1416 }
1417 }
1418
1419 /* Implement the execute_dwarf_cfa_vendor_op method. */
1420
1421 static bool
1422 aarch64_execute_dwarf_cfa_vendor_op (struct gdbarch *gdbarch, gdb_byte op,
1423 struct dwarf2_frame_state *fs)
1424 {
1425 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
1426 struct dwarf2_frame_state_reg *ra_state;
1427
1428 if (op == DW_CFA_AARCH64_negate_ra_state)
1429 {
1430 /* On systems without pauth, treat as a nop. */
1431 if (!tdep->has_pauth ())
1432 return true;
1433
1434 /* Allocate RA_STATE column if it's not allocated yet. */
1435 fs->regs.alloc_regs (AARCH64_DWARF_RA_SIGN_STATE + 1);
1436
1437 /* Toggle the status of RA_STATE between 0 and 1. */
1438 ra_state = &(fs->regs.reg[AARCH64_DWARF_RA_SIGN_STATE]);
1439 ra_state->how = DWARF2_FRAME_REG_SAVED_VAL_EXP;
1440
1441 if (ra_state->loc.exp.start == nullptr
1442 || ra_state->loc.exp.start == &op_lit0)
1443 ra_state->loc.exp.start = &op_lit1;
1444 else
1445 ra_state->loc.exp.start = &op_lit0;
1446
1447 ra_state->loc.exp.len = 1;
1448
1449 return true;
1450 }
1451
1452 return false;
1453 }
1454
1455 /* Used for matching BRK instructions for AArch64. */
1456 static constexpr uint32_t BRK_INSN_MASK = 0xffe0001f;
1457 static constexpr uint32_t BRK_INSN_BASE = 0xd4200000;
1458
1459 /* Implementation of gdbarch_program_breakpoint_here_p for aarch64. */
1460
1461 static bool
1462 aarch64_program_breakpoint_here_p (gdbarch *gdbarch, CORE_ADDR address)
1463 {
1464 const uint32_t insn_len = 4;
1465 gdb_byte target_mem[4];
1466
1467 /* Enable the automatic memory restoration from breakpoints while
1468 we read the memory. Otherwise we may find temporary breakpoints, ones
1469 inserted by GDB, and flag them as permanent breakpoints. */
1470 scoped_restore restore_memory
1471 = make_scoped_restore_show_memory_breakpoints (0);
1472
1473 if (target_read_memory (address, target_mem, insn_len) == 0)
1474 {
1475 uint32_t insn =
1476 (uint32_t) extract_unsigned_integer (target_mem, insn_len,
1477 gdbarch_byte_order_for_code (gdbarch));
1478
1479 /* Check if INSN is a BRK instruction pattern. There are multiple choices
1480 of such instructions with different immediate values. Different OS'
1481 may use a different variation, but they have the same outcome. */
1482 return ((insn & BRK_INSN_MASK) == BRK_INSN_BASE);
1483 }
1484
1485 return false;
1486 }
1487
1488 /* When arguments must be pushed onto the stack, they go on in reverse
1489 order. The code below implements a FILO (stack) to do this. */
1490
1491 struct stack_item_t
1492 {
1493 /* Value to pass on stack. It can be NULL if this item is for stack
1494 padding. */
1495 const gdb_byte *data;
1496
1497 /* Size in bytes of value to pass on stack. */
1498 int len;
1499 };
1500
1501 /* Implement the gdbarch type alignment method, overrides the generic
1502 alignment algorithm for anything that is aarch64 specific. */
1503
1504 static ULONGEST
1505 aarch64_type_align (gdbarch *gdbarch, struct type *t)
1506 {
1507 t = check_typedef (t);
1508 if (t->code () == TYPE_CODE_ARRAY && t->is_vector ())
1509 {
1510 /* Use the natural alignment for vector types (the same for
1511 scalar type), but the maximum alignment is 128-bit. */
1512 if (t->length () > 16)
1513 return 16;
1514 else
1515 return t->length ();
1516 }
1517
1518 /* Allow the common code to calculate the alignment. */
1519 return 0;
1520 }
1521
1522 /* Worker function for aapcs_is_vfp_call_or_return_candidate.
1523
1524 Return the number of register required, or -1 on failure.
1525
1526 When encountering a base element, if FUNDAMENTAL_TYPE is not set then set it
1527 to the element, else fail if the type of this element does not match the
1528 existing value. */
1529
1530 static int
1531 aapcs_is_vfp_call_or_return_candidate_1 (struct type *type,
1532 struct type **fundamental_type)
1533 {
1534 if (type == nullptr)
1535 return -1;
1536
1537 switch (type->code ())
1538 {
1539 case TYPE_CODE_FLT:
1540 case TYPE_CODE_DECFLOAT:
1541 if (type->length () > 16)
1542 return -1;
1543
1544 if (*fundamental_type == nullptr)
1545 *fundamental_type = type;
1546 else if (type->length () != (*fundamental_type)->length ()
1547 || type->code () != (*fundamental_type)->code ())
1548 return -1;
1549
1550 return 1;
1551
1552 case TYPE_CODE_COMPLEX:
1553 {
1554 struct type *target_type = check_typedef (type->target_type ());
1555 if (target_type->length () > 16)
1556 return -1;
1557
1558 if (*fundamental_type == nullptr)
1559 *fundamental_type = target_type;
1560 else if (target_type->length () != (*fundamental_type)->length ()
1561 || target_type->code () != (*fundamental_type)->code ())
1562 return -1;
1563
1564 return 2;
1565 }
1566
1567 case TYPE_CODE_ARRAY:
1568 {
1569 if (type->is_vector ())
1570 {
1571 if (type->length () != 8 && type->length () != 16)
1572 return -1;
1573
1574 if (*fundamental_type == nullptr)
1575 *fundamental_type = type;
1576 else if (type->length () != (*fundamental_type)->length ()
1577 || type->code () != (*fundamental_type)->code ())
1578 return -1;
1579
1580 return 1;
1581 }
1582 else
1583 {
1584 struct type *target_type = type->target_type ();
1585 int count = aapcs_is_vfp_call_or_return_candidate_1
1586 (target_type, fundamental_type);
1587
1588 if (count == -1)
1589 return count;
1590
1591 count *= (type->length () / target_type->length ());
1592 return count;
1593 }
1594 }
1595
1596 case TYPE_CODE_STRUCT:
1597 case TYPE_CODE_UNION:
1598 {
1599 int count = 0;
1600
1601 for (int i = 0; i < type->num_fields (); i++)
1602 {
1603 /* Ignore any static fields. */
1604 if (type->field (i).is_static ())
1605 continue;
1606
1607 struct type *member = check_typedef (type->field (i).type ());
1608
1609 int sub_count = aapcs_is_vfp_call_or_return_candidate_1
1610 (member, fundamental_type);
1611 if (sub_count == -1)
1612 return -1;
1613 count += sub_count;
1614 }
1615
1616 /* Ensure there is no padding between the fields (allowing for empty
1617 zero length structs) */
1618 int ftype_length = (*fundamental_type == nullptr)
1619 ? 0 : (*fundamental_type)->length ();
1620 if (count * ftype_length != type->length ())
1621 return -1;
1622
1623 return count;
1624 }
1625
1626 default:
1627 break;
1628 }
1629
1630 return -1;
1631 }
1632
1633 /* Return true if an argument, whose type is described by TYPE, can be passed or
1634 returned in simd/fp registers, providing enough parameter passing registers
1635 are available. This is as described in the AAPCS64.
1636
1637 Upon successful return, *COUNT returns the number of needed registers,
1638 *FUNDAMENTAL_TYPE contains the type of those registers.
1639
1640 Candidate as per the AAPCS64 5.4.2.C is either a:
1641 - float.
1642 - short-vector.
1643 - HFA (Homogeneous Floating-point Aggregate, 4.3.5.1). A Composite type where
1644 all the members are floats and has at most 4 members.
1645 - HVA (Homogeneous Short-vector Aggregate, 4.3.5.2). A Composite type where
1646 all the members are short vectors and has at most 4 members.
1647 - Complex (7.1.1)
1648
1649 Note that HFAs and HVAs can include nested structures and arrays. */
1650
1651 static bool
1652 aapcs_is_vfp_call_or_return_candidate (struct type *type, int *count,
1653 struct type **fundamental_type)
1654 {
1655 if (type == nullptr)
1656 return false;
1657
1658 *fundamental_type = nullptr;
1659
1660 int ag_count = aapcs_is_vfp_call_or_return_candidate_1 (type,
1661 fundamental_type);
1662
1663 if (ag_count > 0 && ag_count <= HA_MAX_NUM_FLDS)
1664 {
1665 *count = ag_count;
1666 return true;
1667 }
1668 else
1669 return false;
1670 }
1671
1672 /* AArch64 function call information structure. */
1673 struct aarch64_call_info
1674 {
1675 /* the current argument number. */
1676 unsigned argnum = 0;
1677
1678 /* The next general purpose register number, equivalent to NGRN as
1679 described in the AArch64 Procedure Call Standard. */
1680 unsigned ngrn = 0;
1681
1682 /* The next SIMD and floating point register number, equivalent to
1683 NSRN as described in the AArch64 Procedure Call Standard. */
1684 unsigned nsrn = 0;
1685
1686 /* The next stacked argument address, equivalent to NSAA as
1687 described in the AArch64 Procedure Call Standard. */
1688 unsigned nsaa = 0;
1689
1690 /* Stack item vector. */
1691 std::vector<stack_item_t> si;
1692 };
1693
1694 /* Pass a value in a sequence of consecutive X registers. The caller
1695 is responsible for ensuring sufficient registers are available. */
1696
1697 static void
1698 pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1699 struct aarch64_call_info *info, struct type *type,
1700 struct value *arg)
1701 {
1702 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1703 int len = type->length ();
1704 enum type_code typecode = type->code ();
1705 int regnum = AARCH64_X0_REGNUM + info->ngrn;
1706 const bfd_byte *buf = arg->contents ().data ();
1707
1708 info->argnum++;
1709
1710 while (len > 0)
1711 {
1712 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1713 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1714 byte_order);
1715
1716
1717 /* Adjust sub-word struct/union args when big-endian. */
1718 if (byte_order == BFD_ENDIAN_BIG
1719 && partial_len < X_REGISTER_SIZE
1720 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1721 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1722
1723 aarch64_debug_printf ("arg %d in %s = 0x%s", info->argnum,
1724 gdbarch_register_name (gdbarch, regnum),
1725 phex (regval, X_REGISTER_SIZE));
1726
1727 regcache_cooked_write_unsigned (regcache, regnum, regval);
1728 len -= partial_len;
1729 buf += partial_len;
1730 regnum++;
1731 }
1732 }
1733
1734 /* Attempt to marshall a value in a V register. Return 1 if
1735 successful, or 0 if insufficient registers are available. This
1736 function, unlike the equivalent pass_in_x() function does not
1737 handle arguments spread across multiple registers. */
1738
1739 static int
1740 pass_in_v (struct gdbarch *gdbarch,
1741 struct regcache *regcache,
1742 struct aarch64_call_info *info,
1743 int len, const bfd_byte *buf)
1744 {
1745 if (info->nsrn < 8)
1746 {
1747 int regnum = AARCH64_V0_REGNUM + info->nsrn;
1748 /* Enough space for a full vector register. */
1749 gdb::byte_vector reg (register_size (gdbarch, regnum), 0);
1750 gdb_assert (len <= reg.size ());
1751
1752 info->argnum++;
1753 info->nsrn++;
1754
1755 /* PCS C.1, the argument is allocated to the least significant
1756 bits of V register. */
1757 memcpy (reg.data (), buf, len);
1758 regcache->cooked_write (regnum, reg);
1759
1760 aarch64_debug_printf ("arg %d in %s", info->argnum,
1761 gdbarch_register_name (gdbarch, regnum));
1762
1763 return 1;
1764 }
1765 info->nsrn = 8;
1766 return 0;
1767 }
1768
1769 /* Marshall an argument onto the stack. */
1770
1771 static void
1772 pass_on_stack (struct aarch64_call_info *info, struct type *type,
1773 struct value *arg)
1774 {
1775 const bfd_byte *buf = arg->contents ().data ();
1776 int len = type->length ();
1777 int align;
1778 stack_item_t item;
1779
1780 info->argnum++;
1781
1782 align = type_align (type);
1783
1784 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1785 Natural alignment of the argument's type. */
1786 align = align_up (align, 8);
1787
1788 /* The AArch64 PCS requires at most doubleword alignment. */
1789 if (align > 16)
1790 align = 16;
1791
1792 aarch64_debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1793 info->nsaa);
1794
1795 item.len = len;
1796 item.data = buf;
1797 info->si.push_back (item);
1798
1799 info->nsaa += len;
1800 if (info->nsaa & (align - 1))
1801 {
1802 /* Push stack alignment padding. */
1803 int pad = align - (info->nsaa & (align - 1));
1804
1805 item.len = pad;
1806 item.data = NULL;
1807
1808 info->si.push_back (item);
1809 info->nsaa += pad;
1810 }
1811 }
1812
1813 /* Marshall an argument into a sequence of one or more consecutive X
1814 registers or, if insufficient X registers are available then onto
1815 the stack. */
1816
1817 static void
1818 pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1819 struct aarch64_call_info *info, struct type *type,
1820 struct value *arg)
1821 {
1822 int len = type->length ();
1823 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1824
1825 /* PCS C.13 - Pass in registers if we have enough spare */
1826 if (info->ngrn + nregs <= 8)
1827 {
1828 pass_in_x (gdbarch, regcache, info, type, arg);
1829 info->ngrn += nregs;
1830 }
1831 else
1832 {
1833 info->ngrn = 8;
1834 pass_on_stack (info, type, arg);
1835 }
1836 }
1837
1838 /* Pass a value, which is of type arg_type, in a V register. Assumes value is a
1839 aapcs_is_vfp_call_or_return_candidate and there are enough spare V
1840 registers. A return value of false is an error state as the value will have
1841 been partially passed to the stack. */
1842 static bool
1843 pass_in_v_vfp_candidate (struct gdbarch *gdbarch, struct regcache *regcache,
1844 struct aarch64_call_info *info, struct type *arg_type,
1845 struct value *arg)
1846 {
1847 switch (arg_type->code ())
1848 {
1849 case TYPE_CODE_FLT:
1850 case TYPE_CODE_DECFLOAT:
1851 return pass_in_v (gdbarch, regcache, info, arg_type->length (),
1852 arg->contents ().data ());
1853 break;
1854
1855 case TYPE_CODE_COMPLEX:
1856 {
1857 const bfd_byte *buf = arg->contents ().data ();
1858 struct type *target_type = check_typedef (arg_type->target_type ());
1859
1860 if (!pass_in_v (gdbarch, regcache, info, target_type->length (),
1861 buf))
1862 return false;
1863
1864 return pass_in_v (gdbarch, regcache, info, target_type->length (),
1865 buf + target_type->length ());
1866 }
1867
1868 case TYPE_CODE_ARRAY:
1869 if (arg_type->is_vector ())
1870 return pass_in_v (gdbarch, regcache, info, arg_type->length (),
1871 arg->contents ().data ());
1872 [[fallthrough]];
1873
1874 case TYPE_CODE_STRUCT:
1875 case TYPE_CODE_UNION:
1876 for (int i = 0; i < arg_type->num_fields (); i++)
1877 {
1878 /* Don't include static fields. */
1879 if (arg_type->field (i).is_static ())
1880 continue;
1881
1882 struct value *field = arg->primitive_field (0, i, arg_type);
1883 struct type *field_type = check_typedef (field->type ());
1884
1885 if (!pass_in_v_vfp_candidate (gdbarch, regcache, info, field_type,
1886 field))
1887 return false;
1888 }
1889 return true;
1890
1891 default:
1892 return false;
1893 }
1894 }
1895
1896 /* Push LR_VALUE to the Guarded Control Stack. */
1897
1898 static void
1899 aarch64_push_gcs_entry (regcache *regs, CORE_ADDR lr_value)
1900 {
1901 gdbarch *arch = regs->arch ();
1902 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (arch);
1903 CORE_ADDR gcs_addr;
1904
1905 register_status status = regs->cooked_read (tdep->gcs_reg_base, &gcs_addr);
1906 if (status != REG_VALID)
1907 error (_("Can't read $gcspr."));
1908
1909 gcs_addr -= 8;
1910 gdb_byte buf[8];
1911 store_integer (buf, gdbarch_byte_order (arch), lr_value);
1912 if (target_write_memory (gcs_addr, buf, sizeof (buf)) != 0)
1913 error (_("Can't write to Guarded Control Stack."));
1914
1915 /* Update GCSPR. */
1916 regcache_cooked_write_unsigned (regs, tdep->gcs_reg_base, gcs_addr);
1917 }
1918
1919 /* Remove the newest entry from the Guarded Control Stack. */
1920
1921 static void
1922 aarch64_pop_gcs_entry (regcache *regs)
1923 {
1924 gdbarch *arch = regs->arch ();
1925 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (arch);
1926 CORE_ADDR gcs_addr;
1927
1928 register_status status = regs->cooked_read (tdep->gcs_reg_base, &gcs_addr);
1929 if (status != REG_VALID)
1930 error (_("Can't read $gcspr."));
1931
1932 /* Update GCSPR. */
1933 regcache_cooked_write_unsigned (regs, tdep->gcs_reg_base, gcs_addr + 8);
1934 }
1935
1936 /* Implement the "shadow_stack_push" gdbarch method. */
1937
1938 static void
1939 aarch64_shadow_stack_push (gdbarch *gdbarch, CORE_ADDR new_addr,
1940 regcache *regcache)
1941 {
1942 aarch64_push_gcs_entry (regcache, new_addr);
1943 }
1944
1945 /* Implement the "push_dummy_call" gdbarch method. */
1946
1947 static CORE_ADDR
1948 aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1949 struct regcache *regcache, CORE_ADDR bp_addr,
1950 int nargs,
1951 struct value **args, CORE_ADDR sp,
1952 function_call_return_method return_method,
1953 CORE_ADDR struct_addr)
1954 {
1955 int argnum;
1956 struct aarch64_call_info info;
1957
1958 /* We need to know what the type of the called function is in order
1959 to determine the number of named/anonymous arguments for the
1960 actual argument placement, and the return type in order to handle
1961 return value correctly.
1962
1963 The generic code above us views the decision of return in memory
1964 or return in registers as a two stage processes. The language
1965 handler is consulted first and may decide to return in memory (eg
1966 class with copy constructor returned by value), this will cause
1967 the generic code to allocate space AND insert an initial leading
1968 argument.
1969
1970 If the language code does not decide to pass in memory then the
1971 target code is consulted.
1972
1973 If the language code decides to pass in memory we want to move
1974 the pointer inserted as the initial argument from the argument
1975 list and into X8, the conventional AArch64 struct return pointer
1976 register. */
1977
1978 /* Set the return address. For the AArch64, the return breakpoint
1979 is always at BP_ADDR. */
1980 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1981
1982 /* If we were given an initial argument for the return slot, lose it. */
1983 if (return_method == return_method_hidden_param)
1984 {
1985 args++;
1986 nargs--;
1987 }
1988
1989 /* The struct_return pointer occupies X8. */
1990 if (return_method != return_method_normal)
1991 {
1992 aarch64_debug_printf ("struct return in %s = 0x%s",
1993 gdbarch_register_name
1994 (gdbarch, AARCH64_STRUCT_RETURN_REGNUM),
1995 paddress (gdbarch, struct_addr));
1996
1997 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1998 struct_addr);
1999 }
2000
2001 for (argnum = 0; argnum < nargs; argnum++)
2002 {
2003 struct value *arg = args[argnum];
2004 struct type *arg_type, *fundamental_type;
2005 int len, elements;
2006
2007 arg_type = check_typedef (arg->type ());
2008 len = arg_type->length ();
2009
2010 /* If arg can be passed in v registers as per the AAPCS64, then do so if
2011 if there are enough spare registers. */
2012 if (aapcs_is_vfp_call_or_return_candidate (arg_type, &elements,
2013 &fundamental_type))
2014 {
2015 if (info.nsrn + elements <= 8)
2016 {
2017 /* We know that we have sufficient registers available therefore
2018 this will never need to fallback to the stack. */
2019 if (!pass_in_v_vfp_candidate (gdbarch, regcache, &info, arg_type,
2020 arg))
2021 gdb_assert_not_reached ("Failed to push args");
2022 }
2023 else
2024 {
2025 info.nsrn = 8;
2026 pass_on_stack (&info, arg_type, arg);
2027 }
2028 continue;
2029 }
2030
2031 switch (arg_type->code ())
2032 {
2033 case TYPE_CODE_INT:
2034 case TYPE_CODE_BOOL:
2035 case TYPE_CODE_CHAR:
2036 case TYPE_CODE_RANGE:
2037 case TYPE_CODE_ENUM:
2038 if (len < 4 && !is_fixed_point_type (arg_type))
2039 {
2040 /* Promote to 32 bit integer. */
2041 if (arg_type->is_unsigned ())
2042 arg_type = builtin_type (gdbarch)->builtin_uint32;
2043 else
2044 arg_type = builtin_type (gdbarch)->builtin_int32;
2045 arg = value_cast (arg_type, arg);
2046 }
2047 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
2048 break;
2049
2050 case TYPE_CODE_STRUCT:
2051 case TYPE_CODE_ARRAY:
2052 case TYPE_CODE_UNION:
2053 if (len > 16)
2054 {
2055 /* PCS B.7 Aggregates larger than 16 bytes are passed by
2056 invisible reference. */
2057
2058 /* Allocate aligned storage. */
2059 sp = align_down (sp - len, 16);
2060
2061 /* Write the real data into the stack. */
2062 write_memory (sp, arg->contents ().data (), len);
2063
2064 /* Construct the indirection. */
2065 arg_type = lookup_pointer_type (arg_type);
2066 arg = value_from_pointer (arg_type, sp);
2067 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
2068 }
2069 else
2070 /* PCS C.15 / C.18 multiple values pass. */
2071 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
2072 break;
2073
2074 default:
2075 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
2076 break;
2077 }
2078 }
2079
2080 /* Make sure stack retains 16 byte alignment. */
2081 if (info.nsaa & 15)
2082 sp -= 16 - (info.nsaa & 15);
2083
2084 while (!info.si.empty ())
2085 {
2086 const stack_item_t &si = info.si.back ();
2087
2088 sp -= si.len;
2089 if (si.data != NULL)
2090 write_memory (sp, si.data, si.len);
2091 info.si.pop_back ();
2092 }
2093
2094 /* Finally, update the SP register. */
2095 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
2096
2097 return sp;
2098 }
2099
2100 /* Implement the "frame_align" gdbarch method. */
2101
2102 static CORE_ADDR
2103 aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
2104 {
2105 /* Align the stack to sixteen bytes. */
2106 return sp & ~(CORE_ADDR) 15;
2107 }
2108
2109 /* Return the type for an AdvSISD Q register. */
2110
2111 static struct type *
2112 aarch64_vnq_type (struct gdbarch *gdbarch)
2113 {
2114 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2115
2116 if (tdep->vnq_type == NULL)
2117 {
2118 struct type *t;
2119 struct type *elem;
2120
2121 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
2122 TYPE_CODE_UNION);
2123
2124 elem = builtin_type (gdbarch)->builtin_uint128;
2125 append_composite_type_field (t, "u", elem);
2126
2127 elem = builtin_type (gdbarch)->builtin_int128;
2128 append_composite_type_field (t, "s", elem);
2129
2130 tdep->vnq_type = t;
2131 }
2132
2133 return tdep->vnq_type;
2134 }
2135
2136 /* Return the type for an AdvSISD D register. */
2137
2138 static struct type *
2139 aarch64_vnd_type (struct gdbarch *gdbarch)
2140 {
2141 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2142
2143 if (tdep->vnd_type == NULL)
2144 {
2145 struct type *t;
2146 struct type *elem;
2147
2148 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
2149 TYPE_CODE_UNION);
2150
2151 elem = builtin_type (gdbarch)->builtin_double;
2152 append_composite_type_field (t, "f", elem);
2153
2154 elem = builtin_type (gdbarch)->builtin_uint64;
2155 append_composite_type_field (t, "u", elem);
2156
2157 elem = builtin_type (gdbarch)->builtin_int64;
2158 append_composite_type_field (t, "s", elem);
2159
2160 tdep->vnd_type = t;
2161 }
2162
2163 return tdep->vnd_type;
2164 }
2165
2166 /* Return the type for an AdvSISD S register. */
2167
2168 static struct type *
2169 aarch64_vns_type (struct gdbarch *gdbarch)
2170 {
2171 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2172
2173 if (tdep->vns_type == NULL)
2174 {
2175 struct type *t;
2176 struct type *elem;
2177
2178 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
2179 TYPE_CODE_UNION);
2180
2181 elem = builtin_type (gdbarch)->builtin_float;
2182 append_composite_type_field (t, "f", elem);
2183
2184 elem = builtin_type (gdbarch)->builtin_uint32;
2185 append_composite_type_field (t, "u", elem);
2186
2187 elem = builtin_type (gdbarch)->builtin_int32;
2188 append_composite_type_field (t, "s", elem);
2189
2190 tdep->vns_type = t;
2191 }
2192
2193 return tdep->vns_type;
2194 }
2195
2196 /* Return the type for an AdvSISD H register. */
2197
2198 static struct type *
2199 aarch64_vnh_type (struct gdbarch *gdbarch)
2200 {
2201 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2202
2203 if (tdep->vnh_type == NULL)
2204 {
2205 struct type *t;
2206 struct type *elem;
2207
2208 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
2209 TYPE_CODE_UNION);
2210
2211 elem = builtin_type (gdbarch)->builtin_bfloat16;
2212 append_composite_type_field (t, "bf", elem);
2213
2214 elem = builtin_type (gdbarch)->builtin_half;
2215 append_composite_type_field (t, "f", elem);
2216
2217 elem = builtin_type (gdbarch)->builtin_uint16;
2218 append_composite_type_field (t, "u", elem);
2219
2220 elem = builtin_type (gdbarch)->builtin_int16;
2221 append_composite_type_field (t, "s", elem);
2222
2223 tdep->vnh_type = t;
2224 }
2225
2226 return tdep->vnh_type;
2227 }
2228
2229 /* Return the type for an AdvSISD B register. */
2230
2231 static struct type *
2232 aarch64_vnb_type (struct gdbarch *gdbarch)
2233 {
2234 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2235
2236 if (tdep->vnb_type == NULL)
2237 {
2238 struct type *t;
2239 struct type *elem;
2240
2241 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
2242 TYPE_CODE_UNION);
2243
2244 elem = builtin_type (gdbarch)->builtin_uint8;
2245 append_composite_type_field (t, "u", elem);
2246
2247 elem = builtin_type (gdbarch)->builtin_int8;
2248 append_composite_type_field (t, "s", elem);
2249
2250 tdep->vnb_type = t;
2251 }
2252
2253 return tdep->vnb_type;
2254 }
2255
2256 /* Return TRUE if REGNUM is a ZA tile slice pseudo-register number. Return
2257 FALSE otherwise. */
2258
2259 static bool
2260 is_sme_tile_slice_pseudo_register (struct gdbarch *gdbarch, int regnum)
2261 {
2262 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2263
2264 gdb_assert (tdep->has_sme ());
2265 gdb_assert (tdep->sme_svq > 0);
2266 gdb_assert (tdep->sme_pseudo_base <= regnum);
2267 gdb_assert (regnum < tdep->sme_pseudo_base + tdep->sme_pseudo_count);
2268
2269 if (tdep->sme_tile_slice_pseudo_base <= regnum
2270 && regnum < tdep->sme_tile_slice_pseudo_base
2271 + tdep->sme_tile_slice_pseudo_count)
2272 return true;
2273
2274 return false;
2275 }
2276
2277 /* Given REGNUM, a ZA pseudo-register number, return, in ENCODING, the
2278 decoded fields that make up its name. */
2279
2280 static void
2281 aarch64_za_decode_pseudos (struct gdbarch *gdbarch, int regnum,
2282 struct za_pseudo_encoding &encoding)
2283 {
2284 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2285
2286 gdb_assert (tdep->has_sme ());
2287 gdb_assert (tdep->sme_svq > 0);
2288 gdb_assert (tdep->sme_pseudo_base <= regnum);
2289 gdb_assert (regnum < tdep->sme_pseudo_base + tdep->sme_pseudo_count);
2290
2291 if (is_sme_tile_slice_pseudo_register (gdbarch, regnum))
2292 {
2293 /* Calculate the tile slice pseudo-register offset relative to the other
2294 tile slice pseudo-registers. */
2295 int offset = regnum - tdep->sme_tile_slice_pseudo_base;
2296
2297 /* Fetch the qualifier. We can have 160 to 2560 possible tile slice
2298 pseudo-registers. Each qualifier (we have 5 of them: B, H, S, D
2299 and Q) covers 32 * svq pseudo-registers, so we divide the offset by
2300 that constant. */
2301 size_t qualifier = offset / (tdep->sme_svq * 32);
2302 encoding.qualifier_index = qualifier;
2303
2304 /* Prepare to fetch the direction (d), tile number (t) and slice
2305 number (s). */
2306 int dts = offset % (tdep->sme_svq * 32);
2307
2308 /* The direction is represented by the even/odd numbers. Even-numbered
2309 pseudo-registers are horizontal tile slices and odd-numbered
2310 pseudo-registers are vertical tile slices. */
2311 encoding.horizontal = !(dts & 1);
2312
2313 /* Fetch the tile number. The tile number is closely related to the
2314 qualifier. B has 1 tile, H has 2 tiles, S has 4 tiles, D has 8 tiles
2315 and Q has 16 tiles. */
2316 encoding.tile_index = (dts >> 1) & ((1 << qualifier) - 1);
2317
2318 /* Fetch the slice number. The slice number is closely related to the
2319 qualifier and the svl. */
2320 encoding.slice_index = dts >> (qualifier + 1);
2321 }
2322 else
2323 {
2324 /* Calculate the tile pseudo-register offset relative to the other
2325 tile pseudo-registers. */
2326 int offset = regnum - tdep->sme_tile_pseudo_base;
2327
2328 encoding.qualifier_index = std::floor (std::log2 (offset + 1));
2329 /* Calculate the tile number. */
2330 encoding.tile_index = (offset + 1) - (1 << encoding.qualifier_index);
2331 /* Direction and slice index don't get used for tiles. Set them to
2332 0/false values. */
2333 encoding.slice_index = 0;
2334 encoding.horizontal = false;
2335 }
2336 }
2337
2338 /* Return the type for a ZA tile slice pseudo-register based on ENCODING. */
2339
2340 static struct type *
2341 aarch64_za_tile_slice_type (struct gdbarch *gdbarch,
2342 const struct za_pseudo_encoding &encoding)
2343 {
2344 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2345
2346 gdb_assert (tdep->has_sme ());
2347 gdb_assert (tdep->sme_svq > 0);
2348
2349 if (tdep->sme_tile_slice_type_q == nullptr)
2350 {
2351 /* Q tile slice type. */
2352 tdep->sme_tile_slice_type_q
2353 = init_vector_type (builtin_type (gdbarch)->builtin_uint128,
2354 tdep->sme_svq);
2355 /* D tile slice type. */
2356 tdep->sme_tile_slice_type_d
2357 = init_vector_type (builtin_type (gdbarch)->builtin_uint64,
2358 tdep->sme_svq * 2);
2359 /* S tile slice type. */
2360 tdep->sme_tile_slice_type_s
2361 = init_vector_type (builtin_type (gdbarch)->builtin_uint32,
2362 tdep->sme_svq * 4);
2363 /* H tile slice type. */
2364 tdep->sme_tile_slice_type_h
2365 = init_vector_type (builtin_type (gdbarch)->builtin_uint16,
2366 tdep->sme_svq * 8);
2367 /* B tile slice type. */
2368 tdep->sme_tile_slice_type_b
2369 = init_vector_type (builtin_type (gdbarch)->builtin_uint8,
2370 tdep->sme_svq * 16);
2371 }
2372
2373 switch (encoding.qualifier_index)
2374 {
2375 case 4:
2376 return tdep->sme_tile_slice_type_q;
2377 case 3:
2378 return tdep->sme_tile_slice_type_d;
2379 case 2:
2380 return tdep->sme_tile_slice_type_s;
2381 case 1:
2382 return tdep->sme_tile_slice_type_h;
2383 case 0:
2384 return tdep->sme_tile_slice_type_b;
2385 default:
2386 error (_("Invalid qualifier index %s for tile slice pseudo register."),
2387 pulongest (encoding.qualifier_index));
2388 }
2389
2390 gdb_assert_not_reached ("Unknown qualifier for ZA tile slice register");
2391 }
2392
2393 /* Return the type for a ZA tile pseudo-register based on ENCODING. */
2394
2395 static struct type *
2396 aarch64_za_tile_type (struct gdbarch *gdbarch,
2397 const struct za_pseudo_encoding &encoding)
2398 {
2399 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2400
2401 gdb_assert (tdep->has_sme ());
2402 gdb_assert (tdep->sme_svq > 0);
2403
2404 if (tdep->sme_tile_type_q == nullptr)
2405 {
2406 struct type *inner_vectors_type;
2407
2408 /* Q tile type. */
2409 inner_vectors_type
2410 = init_vector_type (builtin_type (gdbarch)->builtin_uint128,
2411 tdep->sme_svq);
2412 tdep->sme_tile_type_q
2413 = init_vector_type (inner_vectors_type, tdep->sme_svq);
2414
2415 /* D tile type. */
2416 inner_vectors_type
2417 = init_vector_type (builtin_type (gdbarch)->builtin_uint64,
2418 tdep->sme_svq * 2);
2419 tdep->sme_tile_type_d
2420 = init_vector_type (inner_vectors_type, tdep->sme_svq * 2);
2421
2422 /* S tile type. */
2423 inner_vectors_type
2424 = init_vector_type (builtin_type (gdbarch)->builtin_uint32,
2425 tdep->sme_svq * 4);
2426 tdep->sme_tile_type_s
2427 = init_vector_type (inner_vectors_type, tdep->sme_svq * 4);
2428
2429 /* H tile type. */
2430 inner_vectors_type
2431 = init_vector_type (builtin_type (gdbarch)->builtin_uint16,
2432 tdep->sme_svq * 8);
2433 tdep->sme_tile_type_h
2434 = init_vector_type (inner_vectors_type, tdep->sme_svq * 8);
2435
2436 /* B tile type. */
2437 inner_vectors_type
2438 = init_vector_type (builtin_type (gdbarch)->builtin_uint8,
2439 tdep->sme_svq * 16);
2440 tdep->sme_tile_type_b
2441 = init_vector_type (inner_vectors_type, tdep->sme_svq * 16);
2442 }
2443
2444 switch (encoding.qualifier_index)
2445 {
2446 case 4:
2447 return tdep->sme_tile_type_q;
2448 case 3:
2449 return tdep->sme_tile_type_d;
2450 case 2:
2451 return tdep->sme_tile_type_s;
2452 case 1:
2453 return tdep->sme_tile_type_h;
2454 case 0:
2455 return tdep->sme_tile_type_b;
2456 default:
2457 error (_("Invalid qualifier index %s for ZA tile pseudo register."),
2458 pulongest (encoding.qualifier_index));
2459 }
2460
2461 gdb_assert_not_reached ("unknown qualifier for tile pseudo-register");
2462 }
2463
2464 /* Return the type for an AdvSISD V register. */
2465
2466 static struct type *
2467 aarch64_vnv_type (struct gdbarch *gdbarch)
2468 {
2469 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2470
2471 if (tdep->vnv_type == NULL)
2472 {
2473 /* The other AArch64 pseudo registers (Q,D,H,S,B) refer to a single value
2474 slice from the non-pseudo vector registers. However NEON V registers
2475 are always vector registers, and need constructing as such. */
2476 const struct builtin_type *bt = builtin_type (gdbarch);
2477
2478 struct type *t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnv",
2479 TYPE_CODE_UNION);
2480
2481 struct type *sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
2482 TYPE_CODE_UNION);
2483 append_composite_type_field (sub, "f",
2484 init_vector_type (bt->builtin_double, 2));
2485 append_composite_type_field (sub, "u",
2486 init_vector_type (bt->builtin_uint64, 2));
2487 append_composite_type_field (sub, "s",
2488 init_vector_type (bt->builtin_int64, 2));
2489 append_composite_type_field (t, "d", sub);
2490
2491 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
2492 TYPE_CODE_UNION);
2493 append_composite_type_field (sub, "f",
2494 init_vector_type (bt->builtin_float, 4));
2495 append_composite_type_field (sub, "u",
2496 init_vector_type (bt->builtin_uint32, 4));
2497 append_composite_type_field (sub, "s",
2498 init_vector_type (bt->builtin_int32, 4));
2499 append_composite_type_field (t, "s", sub);
2500
2501 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
2502 TYPE_CODE_UNION);
2503 append_composite_type_field (sub, "bf",
2504 init_vector_type (bt->builtin_bfloat16, 8));
2505 append_composite_type_field (sub, "f",
2506 init_vector_type (bt->builtin_half, 8));
2507 append_composite_type_field (sub, "u",
2508 init_vector_type (bt->builtin_uint16, 8));
2509 append_composite_type_field (sub, "s",
2510 init_vector_type (bt->builtin_int16, 8));
2511 append_composite_type_field (t, "h", sub);
2512
2513 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
2514 TYPE_CODE_UNION);
2515 append_composite_type_field (sub, "u",
2516 init_vector_type (bt->builtin_uint8, 16));
2517 append_composite_type_field (sub, "s",
2518 init_vector_type (bt->builtin_int8, 16));
2519 append_composite_type_field (t, "b", sub);
2520
2521 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
2522 TYPE_CODE_UNION);
2523 append_composite_type_field (sub, "u",
2524 init_vector_type (bt->builtin_uint128, 1));
2525 append_composite_type_field (sub, "s",
2526 init_vector_type (bt->builtin_int128, 1));
2527 append_composite_type_field (t, "q", sub);
2528
2529 tdep->vnv_type = t;
2530 }
2531
2532 return tdep->vnv_type;
2533 }
2534
2535 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
2536
2537 static int
2538 aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
2539 {
2540 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2541
2542 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
2543 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
2544
2545 if (reg == AARCH64_DWARF_SP)
2546 return AARCH64_SP_REGNUM;
2547
2548 if (reg == AARCH64_DWARF_PC)
2549 return AARCH64_PC_REGNUM;
2550
2551 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
2552 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
2553
2554 if (reg == AARCH64_DWARF_SVE_VG)
2555 return AARCH64_SVE_VG_REGNUM;
2556
2557 if (reg == AARCH64_DWARF_SVE_FFR)
2558 return AARCH64_SVE_FFR_REGNUM;
2559
2560 if (reg >= AARCH64_DWARF_SVE_P0 && reg <= AARCH64_DWARF_SVE_P0 + 15)
2561 return AARCH64_SVE_P0_REGNUM + reg - AARCH64_DWARF_SVE_P0;
2562
2563 if (reg >= AARCH64_DWARF_SVE_Z0 && reg <= AARCH64_DWARF_SVE_Z0 + 15)
2564 return AARCH64_SVE_Z0_REGNUM + reg - AARCH64_DWARF_SVE_Z0;
2565
2566 if (tdep->has_pauth ())
2567 {
2568 if (reg == AARCH64_DWARF_RA_SIGN_STATE)
2569 return tdep->ra_sign_state_regnum;
2570 }
2571
2572 return -1;
2573 }
2574
2575 /* Implement the "print_insn" gdbarch method. */
2576
2577 static int
2578 aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
2579 {
2580 info->symbols = NULL;
2581 return default_print_insn (memaddr, info);
2582 }
2583
2584 /* AArch64 BRK software debug mode instruction.
2585 Note that AArch64 code is always little-endian.
2586 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
2587 constexpr gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
2588
2589 typedef BP_MANIPULATION (aarch64_default_breakpoint) aarch64_breakpoint;
2590
2591 /* Extract from an array REGS containing the (raw) register state a
2592 function return value of type TYPE, and copy that, in virtual
2593 format, into VALBUF. */
2594
2595 static void
2596 aarch64_extract_return_value (struct type *type, struct regcache *regs,
2597 gdb_byte *valbuf)
2598 {
2599 struct gdbarch *gdbarch = regs->arch ();
2600 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2601 int elements;
2602 struct type *fundamental_type;
2603
2604 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2605 &fundamental_type))
2606 {
2607 int len = fundamental_type->length ();
2608
2609 for (int i = 0; i < elements; i++)
2610 {
2611 int regno = AARCH64_V0_REGNUM + i;
2612 /* Enough space for a full vector register. */
2613 gdb::byte_vector buf (register_size (gdbarch, regno));
2614 gdb_assert (len <= buf.size ());
2615
2616 aarch64_debug_printf
2617 ("read HFA or HVA return value element %d from %s",
2618 i + 1, gdbarch_register_name (gdbarch, regno));
2619
2620 regs->cooked_read (regno, buf);
2621
2622 memcpy (valbuf, buf.data (), len);
2623 valbuf += len;
2624 }
2625 }
2626 else if (type->code () == TYPE_CODE_INT
2627 || type->code () == TYPE_CODE_CHAR
2628 || type->code () == TYPE_CODE_BOOL
2629 || type->code () == TYPE_CODE_PTR
2630 || TYPE_IS_REFERENCE (type)
2631 || type->code () == TYPE_CODE_ENUM)
2632 {
2633 /* If the type is a plain integer, then the access is
2634 straight-forward. Otherwise we have to play around a bit
2635 more. */
2636 int len = type->length ();
2637 int regno = AARCH64_X0_REGNUM;
2638 ULONGEST tmp;
2639
2640 while (len > 0)
2641 {
2642 /* By using store_unsigned_integer we avoid having to do
2643 anything special for small big-endian values. */
2644 regcache_cooked_read_unsigned (regs, regno++, &tmp);
2645 store_unsigned_integer (valbuf,
2646 (len > X_REGISTER_SIZE
2647 ? X_REGISTER_SIZE : len), byte_order, tmp);
2648 len -= X_REGISTER_SIZE;
2649 valbuf += X_REGISTER_SIZE;
2650 }
2651 }
2652 else
2653 {
2654 /* For a structure or union the behavior is as if the value had
2655 been stored to word-aligned memory and then loaded into
2656 registers with 64-bit load instruction(s). */
2657 int len = type->length ();
2658 int regno = AARCH64_X0_REGNUM;
2659 bfd_byte buf[X_REGISTER_SIZE];
2660
2661 while (len > 0)
2662 {
2663 regs->cooked_read (regno++, buf);
2664 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2665 len -= X_REGISTER_SIZE;
2666 valbuf += X_REGISTER_SIZE;
2667 }
2668 }
2669 }
2670
2671
2672 /* Will a function return an aggregate type in memory or in a
2673 register? Return 0 if an aggregate type can be returned in a
2674 register, 1 if it must be returned in memory. */
2675
2676 static int
2677 aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
2678 {
2679 type = check_typedef (type);
2680 int elements;
2681 struct type *fundamental_type;
2682
2683 if (TYPE_HAS_DYNAMIC_LENGTH (type))
2684 return 1;
2685
2686 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2687 &fundamental_type))
2688 {
2689 /* v0-v7 are used to return values and one register is allocated
2690 for one member. However, HFA or HVA has at most four members. */
2691 return 0;
2692 }
2693
2694 if (type->length () > 16
2695 || !language_pass_by_reference (type).trivially_copyable)
2696 {
2697 /* PCS B.6 Aggregates larger than 16 bytes are passed by
2698 invisible reference. */
2699
2700 return 1;
2701 }
2702
2703 return 0;
2704 }
2705
2706 /* Write into appropriate registers a function return value of type
2707 TYPE, given in virtual format. */
2708
2709 static void
2710 aarch64_store_return_value (struct type *type, struct regcache *regs,
2711 const gdb_byte *valbuf)
2712 {
2713 struct gdbarch *gdbarch = regs->arch ();
2714 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2715 int elements;
2716 struct type *fundamental_type;
2717
2718 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2719 &fundamental_type))
2720 {
2721 int len = fundamental_type->length ();
2722
2723 for (int i = 0; i < elements; i++)
2724 {
2725 int regno = AARCH64_V0_REGNUM + i;
2726 /* Enough space for a full vector register. */
2727 gdb::byte_vector tmpbuf (register_size (gdbarch, regno));
2728 gdb_assert (len <= tmpbuf.size ());
2729
2730 aarch64_debug_printf
2731 ("write HFA or HVA return value element %d to %s",
2732 i + 1, gdbarch_register_name (gdbarch, regno));
2733
2734 /* Depending on whether the target supports SVE or not, the V
2735 registers may report a size > 16 bytes. In that case, read the
2736 original contents of the register before overriding it with a new
2737 value that has a potential size <= 16 bytes. */
2738 regs->cooked_read (regno, tmpbuf);
2739 memcpy (tmpbuf.data (), valbuf,
2740 len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
2741 regs->cooked_write (regno, tmpbuf);
2742 valbuf += len;
2743 }
2744 }
2745 else if (type->code () == TYPE_CODE_INT
2746 || type->code () == TYPE_CODE_CHAR
2747 || type->code () == TYPE_CODE_BOOL
2748 || type->code () == TYPE_CODE_PTR
2749 || TYPE_IS_REFERENCE (type)
2750 || type->code () == TYPE_CODE_ENUM)
2751 {
2752 if (type->length () <= X_REGISTER_SIZE)
2753 {
2754 /* Values of one word or less are zero/sign-extended and
2755 returned in r0. */
2756 bfd_byte tmpbuf[X_REGISTER_SIZE];
2757 LONGEST val = unpack_long (type, valbuf);
2758
2759 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
2760 regs->cooked_write (AARCH64_X0_REGNUM, tmpbuf);
2761 }
2762 else
2763 {
2764 /* Integral values greater than one word are stored in
2765 consecutive registers starting with r0. This will always
2766 be a multiple of the register size. */
2767 int len = type->length ();
2768 int regno = AARCH64_X0_REGNUM;
2769
2770 while (len > 0)
2771 {
2772 regs->cooked_write (regno++, valbuf);
2773 len -= X_REGISTER_SIZE;
2774 valbuf += X_REGISTER_SIZE;
2775 }
2776 }
2777 }
2778 else
2779 {
2780 /* For a structure or union the behavior is as if the value had
2781 been stored to word-aligned memory and then loaded into
2782 registers with 64-bit load instruction(s). */
2783 int len = type->length ();
2784 int regno = AARCH64_X0_REGNUM;
2785 bfd_byte tmpbuf[X_REGISTER_SIZE];
2786
2787 while (len > 0)
2788 {
2789 memcpy (tmpbuf, valbuf,
2790 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2791 regs->cooked_write (regno++, tmpbuf);
2792 len -= X_REGISTER_SIZE;
2793 valbuf += X_REGISTER_SIZE;
2794 }
2795 }
2796 }
2797
2798 /* Implement the "return_value" gdbarch method. */
2799
2800 static enum return_value_convention
2801 aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2802 struct type *valtype, struct regcache *regcache,
2803 struct value **read_value, const gdb_byte *writebuf)
2804 {
2805 if (valtype->code () == TYPE_CODE_STRUCT
2806 || valtype->code () == TYPE_CODE_UNION
2807 || valtype->code () == TYPE_CODE_ARRAY)
2808 {
2809 if (aarch64_return_in_memory (gdbarch, valtype))
2810 {
2811 /* From the AAPCS64's Result Return section:
2812
2813 "Otherwise, the caller shall reserve a block of memory of
2814 sufficient size and alignment to hold the result. The address
2815 of the memory block shall be passed as an additional argument to
2816 the function in x8. */
2817
2818 aarch64_debug_printf ("return value in memory");
2819
2820 if (read_value != nullptr)
2821 {
2822 CORE_ADDR addr;
2823
2824 regcache->cooked_read (AARCH64_STRUCT_RETURN_REGNUM, &addr);
2825 *read_value = value_at_non_lval (valtype, addr);
2826 }
2827
2828 return RETURN_VALUE_ABI_RETURNS_ADDRESS;
2829 }
2830 }
2831
2832 if (writebuf)
2833 aarch64_store_return_value (valtype, regcache, writebuf);
2834
2835 if (read_value)
2836 {
2837 *read_value = value::allocate (valtype);
2838 aarch64_extract_return_value (valtype, regcache,
2839 (*read_value)->contents_raw ().data ());
2840 }
2841
2842 aarch64_debug_printf ("return value in registers");
2843
2844 return RETURN_VALUE_REGISTER_CONVENTION;
2845 }
2846
2847 /* Implement the "get_longjmp_target" gdbarch method. */
2848
2849 static int
2850 aarch64_get_longjmp_target (const frame_info_ptr &frame, CORE_ADDR *pc)
2851 {
2852 CORE_ADDR jb_addr;
2853 gdb_byte buf[X_REGISTER_SIZE];
2854 struct gdbarch *gdbarch = get_frame_arch (frame);
2855 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2856 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2857
2858 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
2859
2860 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2861 X_REGISTER_SIZE))
2862 return 0;
2863
2864 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2865 return 1;
2866 }
2867
2868 /* Implement the "gen_return_address" gdbarch method. */
2869
2870 static void
2871 aarch64_gen_return_address (struct gdbarch *gdbarch,
2872 struct agent_expr *ax, struct axs_value *value,
2873 CORE_ADDR scope)
2874 {
2875 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
2876 value->kind = axs_lvalue_register;
2877 value->u.reg = AARCH64_LR_REGNUM;
2878 }
2879 \f
2880
2881 /* Return TRUE if REGNUM is a W pseudo-register number. Return FALSE
2882 otherwise. */
2883
2884 static bool
2885 is_w_pseudo_register (struct gdbarch *gdbarch, int regnum)
2886 {
2887 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2888
2889 if (tdep->w_pseudo_base <= regnum
2890 && regnum < tdep->w_pseudo_base + tdep->w_pseudo_count)
2891 return true;
2892
2893 return false;
2894 }
2895
2896 /* Return TRUE if REGNUM is a SME pseudo-register number. Return FALSE
2897 otherwise. */
2898
2899 static bool
2900 is_sme_pseudo_register (struct gdbarch *gdbarch, int regnum)
2901 {
2902 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2903
2904 if (tdep->has_sme () && tdep->sme_pseudo_base <= regnum
2905 && regnum < tdep->sme_pseudo_base + tdep->sme_pseudo_count)
2906 return true;
2907
2908 return false;
2909 }
2910
2911 /* Convert ENCODING into a ZA tile slice name. */
2912
2913 static const std::string
2914 aarch64_za_tile_slice_name (const struct za_pseudo_encoding &encoding)
2915 {
2916 gdb_assert (encoding.qualifier_index >= 0);
2917 gdb_assert (encoding.qualifier_index <= 4);
2918 gdb_assert (encoding.tile_index >= 0);
2919 gdb_assert (encoding.tile_index <= 15);
2920 gdb_assert (encoding.slice_index >= 0);
2921 gdb_assert (encoding.slice_index <= 255);
2922
2923 const char orientation = encoding.horizontal ? 'h' : 'v';
2924
2925 const char qualifiers[6] = "bhsdq";
2926 const char qualifier = qualifiers [encoding.qualifier_index];
2927 return string_printf ("za%d%c%c%d", encoding.tile_index, orientation,
2928 qualifier, encoding.slice_index);
2929 }
2930
2931 /* Convert ENCODING into a ZA tile name. */
2932
2933 static const std::string
2934 aarch64_za_tile_name (const struct za_pseudo_encoding &encoding)
2935 {
2936 /* Tiles don't use the slice number and the direction fields. */
2937 gdb_assert (encoding.qualifier_index >= 0);
2938 gdb_assert (encoding.qualifier_index <= 4);
2939 gdb_assert (encoding.tile_index >= 0);
2940 gdb_assert (encoding.tile_index <= 15);
2941
2942 const char qualifiers[6] = "bhsdq";
2943 const char qualifier = qualifiers [encoding.qualifier_index];
2944 return (string_printf ("za%d%c", encoding.tile_index, qualifier));
2945 }
2946
2947 /* Given a SME pseudo-register REGNUM, return its type. */
2948
2949 static struct type *
2950 aarch64_sme_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2951 {
2952 struct za_pseudo_encoding encoding;
2953
2954 /* Decode the SME pseudo-register number. */
2955 aarch64_za_decode_pseudos (gdbarch, regnum, encoding);
2956
2957 if (is_sme_tile_slice_pseudo_register (gdbarch, regnum))
2958 return aarch64_za_tile_slice_type (gdbarch, encoding);
2959 else
2960 return aarch64_za_tile_type (gdbarch, encoding);
2961 }
2962
2963 /* Return the pseudo register name corresponding to register regnum. */
2964
2965 static const char *
2966 aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2967 {
2968 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2969
2970 /* W pseudo-registers. Bottom halves of the X registers. */
2971 static const char *const w_name[] =
2972 {
2973 "w0", "w1", "w2", "w3",
2974 "w4", "w5", "w6", "w7",
2975 "w8", "w9", "w10", "w11",
2976 "w12", "w13", "w14", "w15",
2977 "w16", "w17", "w18", "w19",
2978 "w20", "w21", "w22", "w23",
2979 "w24", "w25", "w26", "w27",
2980 "w28", "w29", "w30",
2981 };
2982
2983 static const char *const q_name[] =
2984 {
2985 "q0", "q1", "q2", "q3",
2986 "q4", "q5", "q6", "q7",
2987 "q8", "q9", "q10", "q11",
2988 "q12", "q13", "q14", "q15",
2989 "q16", "q17", "q18", "q19",
2990 "q20", "q21", "q22", "q23",
2991 "q24", "q25", "q26", "q27",
2992 "q28", "q29", "q30", "q31",
2993 };
2994
2995 static const char *const d_name[] =
2996 {
2997 "d0", "d1", "d2", "d3",
2998 "d4", "d5", "d6", "d7",
2999 "d8", "d9", "d10", "d11",
3000 "d12", "d13", "d14", "d15",
3001 "d16", "d17", "d18", "d19",
3002 "d20", "d21", "d22", "d23",
3003 "d24", "d25", "d26", "d27",
3004 "d28", "d29", "d30", "d31",
3005 };
3006
3007 static const char *const s_name[] =
3008 {
3009 "s0", "s1", "s2", "s3",
3010 "s4", "s5", "s6", "s7",
3011 "s8", "s9", "s10", "s11",
3012 "s12", "s13", "s14", "s15",
3013 "s16", "s17", "s18", "s19",
3014 "s20", "s21", "s22", "s23",
3015 "s24", "s25", "s26", "s27",
3016 "s28", "s29", "s30", "s31",
3017 };
3018
3019 static const char *const h_name[] =
3020 {
3021 "h0", "h1", "h2", "h3",
3022 "h4", "h5", "h6", "h7",
3023 "h8", "h9", "h10", "h11",
3024 "h12", "h13", "h14", "h15",
3025 "h16", "h17", "h18", "h19",
3026 "h20", "h21", "h22", "h23",
3027 "h24", "h25", "h26", "h27",
3028 "h28", "h29", "h30", "h31",
3029 };
3030
3031 static const char *const b_name[] =
3032 {
3033 "b0", "b1", "b2", "b3",
3034 "b4", "b5", "b6", "b7",
3035 "b8", "b9", "b10", "b11",
3036 "b12", "b13", "b14", "b15",
3037 "b16", "b17", "b18", "b19",
3038 "b20", "b21", "b22", "b23",
3039 "b24", "b25", "b26", "b27",
3040 "b28", "b29", "b30", "b31",
3041 };
3042
3043 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
3044
3045 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
3046 return q_name[p_regnum - AARCH64_Q0_REGNUM];
3047
3048 if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
3049 return d_name[p_regnum - AARCH64_D0_REGNUM];
3050
3051 if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
3052 return s_name[p_regnum - AARCH64_S0_REGNUM];
3053
3054 if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
3055 return h_name[p_regnum - AARCH64_H0_REGNUM];
3056
3057 if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
3058 return b_name[p_regnum - AARCH64_B0_REGNUM];
3059
3060 /* W pseudo-registers? */
3061 if (is_w_pseudo_register (gdbarch, regnum))
3062 return w_name[regnum - tdep->w_pseudo_base];
3063
3064 if (tdep->has_sve ())
3065 {
3066 static const char *const sve_v_name[] =
3067 {
3068 "v0", "v1", "v2", "v3",
3069 "v4", "v5", "v6", "v7",
3070 "v8", "v9", "v10", "v11",
3071 "v12", "v13", "v14", "v15",
3072 "v16", "v17", "v18", "v19",
3073 "v20", "v21", "v22", "v23",
3074 "v24", "v25", "v26", "v27",
3075 "v28", "v29", "v30", "v31",
3076 };
3077
3078 if (p_regnum >= AARCH64_SVE_V0_REGNUM
3079 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
3080 return sve_v_name[p_regnum - AARCH64_SVE_V0_REGNUM];
3081 }
3082
3083 if (is_sme_pseudo_register (gdbarch, regnum))
3084 return tdep->sme_pseudo_names[regnum - tdep->sme_pseudo_base].c_str ();
3085
3086 /* RA_STATE is used for unwinding only. Do not assign it a name - this
3087 prevents it from being read by methods such as
3088 mi_cmd_trace_frame_collected. */
3089 if (tdep->has_pauth () && regnum == tdep->ra_sign_state_regnum)
3090 return "";
3091
3092 internal_error (_("aarch64_pseudo_register_name: bad register number %d"),
3093 p_regnum);
3094 }
3095
3096 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
3097
3098 static struct type *
3099 aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
3100 {
3101 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
3102
3103 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
3104
3105 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
3106 return aarch64_vnq_type (gdbarch);
3107
3108 if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
3109 return aarch64_vnd_type (gdbarch);
3110
3111 if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
3112 return aarch64_vns_type (gdbarch);
3113
3114 if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
3115 return aarch64_vnh_type (gdbarch);
3116
3117 if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
3118 return aarch64_vnb_type (gdbarch);
3119
3120 if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
3121 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
3122 return aarch64_vnv_type (gdbarch);
3123
3124 /* W pseudo-registers are 32-bit. */
3125 if (is_w_pseudo_register (gdbarch, regnum))
3126 return builtin_type (gdbarch)->builtin_uint32;
3127
3128 if (is_sme_pseudo_register (gdbarch, regnum))
3129 return aarch64_sme_pseudo_register_type (gdbarch, regnum);
3130
3131 if (tdep->has_pauth () && regnum == tdep->ra_sign_state_regnum)
3132 return builtin_type (gdbarch)->builtin_uint64;
3133
3134 internal_error (_("aarch64_pseudo_register_type: bad register number %d"),
3135 p_regnum);
3136 }
3137
3138 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
3139
3140 static int
3141 aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
3142 const struct reggroup *group)
3143 {
3144 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
3145
3146 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
3147
3148 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
3149 return group == all_reggroup || group == vector_reggroup;
3150 else if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
3151 return (group == all_reggroup || group == vector_reggroup
3152 || group == float_reggroup);
3153 else if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
3154 return (group == all_reggroup || group == vector_reggroup
3155 || group == float_reggroup);
3156 else if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
3157 return group == all_reggroup || group == vector_reggroup;
3158 else if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
3159 return group == all_reggroup || group == vector_reggroup;
3160 else if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
3161 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
3162 return group == all_reggroup || group == vector_reggroup;
3163 else if (is_sme_pseudo_register (gdbarch, regnum))
3164 return group == all_reggroup || group == vector_reggroup;
3165 /* RA_STATE is used for unwinding only. Do not assign it to any groups. */
3166 if (tdep->has_pauth () && regnum == tdep->ra_sign_state_regnum)
3167 return 0;
3168
3169 return group == all_reggroup;
3170 }
3171
3172 /* Helper for aarch64_pseudo_read_value. */
3173
3174 static value *
3175 aarch64_pseudo_read_value_1 (const frame_info_ptr &next_frame,
3176 const int pseudo_reg_num, int raw_regnum_offset)
3177 {
3178 unsigned v_regnum = AARCH64_V0_REGNUM + raw_regnum_offset;
3179
3180 return pseudo_from_raw_part (next_frame, pseudo_reg_num, v_regnum, 0);
3181 }
3182
3183 /* Helper function for reading/writing ZA pseudo-registers. Given REGNUM,
3184 a ZA pseudo-register number, return the information on positioning of the
3185 bytes that must be read from/written to. */
3186
3187 static za_offsets
3188 aarch64_za_offsets_from_regnum (struct gdbarch *gdbarch, int regnum)
3189 {
3190 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
3191
3192 gdb_assert (tdep->has_sme ());
3193 gdb_assert (tdep->sme_svq > 0);
3194 gdb_assert (tdep->sme_pseudo_base <= regnum);
3195 gdb_assert (regnum < tdep->sme_pseudo_base + tdep->sme_pseudo_count);
3196
3197 struct za_pseudo_encoding encoding;
3198
3199 /* Decode the ZA pseudo-register number. */
3200 aarch64_za_decode_pseudos (gdbarch, regnum, encoding);
3201
3202 /* Fetch the streaming vector length. */
3203 size_t svl = sve_vl_from_vq (tdep->sme_svq);
3204 za_offsets offsets;
3205
3206 if (is_sme_tile_slice_pseudo_register (gdbarch, regnum))
3207 {
3208 if (encoding.horizontal)
3209 {
3210 /* Horizontal tile slices are contiguous ranges of svl bytes. */
3211
3212 /* The starting offset depends on the tile index (to locate the tile
3213 in the ZA buffer), the slice index (to locate the slice within the
3214 tile) and the qualifier. */
3215 offsets.starting_offset
3216 = encoding.tile_index * svl + encoding.slice_index
3217 * (svl >> encoding.qualifier_index);
3218 /* Horizontal tile slice data is contiguous and thus doesn't have
3219 a stride. */
3220 offsets.stride_size = 0;
3221 /* Horizontal tile slice data is contiguous and thus only has 1
3222 chunk. */
3223 offsets.chunks = 1;
3224 /* The chunk size is always svl bytes. */
3225 offsets.chunk_size = svl;
3226 }
3227 else
3228 {
3229 /* Vertical tile slices are non-contiguous ranges of
3230 (1 << qualifier_index) bytes. */
3231
3232 /* The starting offset depends on the tile number (to locate the
3233 tile in the ZA buffer), the slice index (to locate the element
3234 within the tile slice) and the qualifier. */
3235 offsets.starting_offset
3236 = encoding.tile_index * svl + encoding.slice_index
3237 * (1 << encoding.qualifier_index);
3238 /* The offset between vertical tile slices depends on the qualifier
3239 and svl. */
3240 offsets.stride_size = svl << encoding.qualifier_index;
3241 /* The number of chunks depends on svl and the qualifier size. */
3242 offsets.chunks = svl >> encoding.qualifier_index;
3243 /* The chunk size depends on the qualifier. */
3244 offsets.chunk_size = 1 << encoding.qualifier_index;
3245 }
3246 }
3247 else
3248 {
3249 /* ZA tile pseudo-register. */
3250
3251 /* Starting offset depends on the tile index and qualifier. */
3252 offsets.starting_offset = encoding.tile_index * svl;
3253 /* The offset between tile slices depends on the qualifier and svl. */
3254 offsets.stride_size = svl << encoding.qualifier_index;
3255 /* The number of chunks depends on the qualifier and svl. */
3256 offsets.chunks = svl >> encoding.qualifier_index;
3257 /* The chunk size is always svl bytes. */
3258 offsets.chunk_size = svl;
3259 }
3260
3261 return offsets;
3262 }
3263
3264 /* Given REGNUM, a SME pseudo-register number, return its value in RESULT. */
3265
3266 static value *
3267 aarch64_sme_pseudo_register_read (gdbarch *gdbarch, const frame_info_ptr &next_frame,
3268 const int pseudo_reg_num)
3269 {
3270 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
3271
3272 gdb_assert (tdep->has_sme ());
3273 gdb_assert (tdep->sme_svq > 0);
3274 gdb_assert (tdep->sme_pseudo_base <= pseudo_reg_num);
3275 gdb_assert (pseudo_reg_num < tdep->sme_pseudo_base + tdep->sme_pseudo_count);
3276
3277 /* Fetch the offsets that we need in order to read from the correct blocks
3278 of ZA. */
3279 za_offsets offsets
3280 = aarch64_za_offsets_from_regnum (gdbarch, pseudo_reg_num);
3281
3282 /* Fetch the contents of ZA. */
3283 value *za_value = value_of_register (tdep->sme_za_regnum, next_frame);
3284 value *result = value::allocate_register (next_frame, pseudo_reg_num);
3285
3286 /* Copy the requested data. */
3287 for (int chunks = 0; chunks < offsets.chunks; chunks++)
3288 {
3289 int src_offset = offsets.starting_offset + chunks * offsets.stride_size;
3290 int dst_offset = chunks * offsets.chunk_size;
3291 za_value->contents_copy (result, dst_offset, src_offset,
3292 offsets.chunk_size);
3293 }
3294
3295 return result;
3296 }
3297
3298 /* Implement the "pseudo_register_read_value" gdbarch method. */
3299
3300 static value *
3301 aarch64_pseudo_read_value (gdbarch *gdbarch, const frame_info_ptr &next_frame,
3302 const int pseudo_reg_num)
3303 {
3304 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
3305
3306 if (is_w_pseudo_register (gdbarch, pseudo_reg_num))
3307 {
3308 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3309 /* Default offset for little endian. */
3310 int offset = 0;
3311
3312 if (byte_order == BFD_ENDIAN_BIG)
3313 offset = 4;
3314
3315 /* Find the correct X register to extract the data from. */
3316 int x_regnum
3317 = AARCH64_X0_REGNUM + (pseudo_reg_num - tdep->w_pseudo_base);
3318
3319 /* Read the bottom 4 bytes of X. */
3320 return pseudo_from_raw_part (next_frame, pseudo_reg_num, x_regnum,
3321 offset);
3322 }
3323 else if (is_sme_pseudo_register (gdbarch, pseudo_reg_num))
3324 return aarch64_sme_pseudo_register_read (gdbarch, next_frame,
3325 pseudo_reg_num);
3326
3327 /* Offset in the "pseudo-register space". */
3328 int pseudo_offset = pseudo_reg_num - gdbarch_num_regs (gdbarch);
3329
3330 if (pseudo_offset >= AARCH64_Q0_REGNUM
3331 && pseudo_offset < AARCH64_Q0_REGNUM + 32)
3332 return aarch64_pseudo_read_value_1 (next_frame, pseudo_reg_num,
3333 pseudo_offset - AARCH64_Q0_REGNUM);
3334
3335 if (pseudo_offset >= AARCH64_D0_REGNUM
3336 && pseudo_offset < AARCH64_D0_REGNUM + 32)
3337 return aarch64_pseudo_read_value_1 (next_frame, pseudo_reg_num,
3338 pseudo_offset - AARCH64_D0_REGNUM);
3339
3340 if (pseudo_offset >= AARCH64_S0_REGNUM
3341 && pseudo_offset < AARCH64_S0_REGNUM + 32)
3342 return aarch64_pseudo_read_value_1 (next_frame, pseudo_reg_num,
3343 pseudo_offset - AARCH64_S0_REGNUM);
3344
3345 if (pseudo_offset >= AARCH64_H0_REGNUM
3346 && pseudo_offset < AARCH64_H0_REGNUM + 32)
3347 return aarch64_pseudo_read_value_1 (next_frame, pseudo_reg_num,
3348 pseudo_offset - AARCH64_H0_REGNUM);
3349
3350 if (pseudo_offset >= AARCH64_B0_REGNUM
3351 && pseudo_offset < AARCH64_B0_REGNUM + 32)
3352 return aarch64_pseudo_read_value_1 (next_frame, pseudo_reg_num,
3353 pseudo_offset - AARCH64_B0_REGNUM);
3354
3355 if (tdep->has_sve () && pseudo_offset >= AARCH64_SVE_V0_REGNUM
3356 && pseudo_offset < AARCH64_SVE_V0_REGNUM + 32)
3357 return aarch64_pseudo_read_value_1 (next_frame, pseudo_reg_num,
3358 pseudo_offset - AARCH64_SVE_V0_REGNUM);
3359
3360 if (tdep->has_pauth () && pseudo_reg_num == tdep->ra_sign_state_regnum)
3361 return value::zero (builtin_type (gdbarch)->builtin_uint64, lval_register);
3362
3363 gdb_assert_not_reached ("regnum out of bound");
3364 }
3365
3366 /* Helper for aarch64_pseudo_write. */
3367
3368 static void
3369 aarch64_pseudo_write_1 (gdbarch *gdbarch, const frame_info_ptr &next_frame,
3370 int regnum_offset,
3371 gdb::array_view<const gdb_byte> buf)
3372 {
3373 unsigned raw_regnum = AARCH64_V0_REGNUM + regnum_offset;
3374
3375 /* Enough space for a full vector register.
3376
3377 Ensure the register buffer is zero, we want gdb writes of the
3378 various 'scalar' pseudo registers to behavior like architectural
3379 writes, register width bytes are written the remainder are set to
3380 zero. */
3381 gdb::byte_vector raw_buf (register_size (gdbarch, raw_regnum), 0);
3382 static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
3383
3384 gdb::array_view<gdb_byte> raw_view (raw_buf);
3385 copy (buf, raw_view.slice (0, buf.size ()));
3386 put_frame_register (next_frame, raw_regnum, raw_view);
3387 }
3388
3389 /* Given REGNUM, a SME pseudo-register number, store the bytes from DATA to the
3390 pseudo-register. */
3391
3392 static void
3393 aarch64_sme_pseudo_register_write (gdbarch *gdbarch, const frame_info_ptr &next_frame,
3394 const int regnum,
3395 gdb::array_view<const gdb_byte> data)
3396 {
3397 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
3398
3399 gdb_assert (tdep->has_sme ());
3400 gdb_assert (tdep->sme_svq > 0);
3401 gdb_assert (tdep->sme_pseudo_base <= regnum);
3402 gdb_assert (regnum < tdep->sme_pseudo_base + tdep->sme_pseudo_count);
3403
3404 /* Fetch the offsets that we need in order to write to the correct blocks
3405 of ZA. */
3406 za_offsets offsets = aarch64_za_offsets_from_regnum (gdbarch, regnum);
3407
3408 /* Fetch the contents of ZA. */
3409 value *za_value = value_of_register (tdep->sme_za_regnum, next_frame);
3410
3411 {
3412 /* Create a view only on the portion of za we want to write. */
3413 gdb::array_view<gdb_byte> za_view
3414 = za_value->contents_writeable ().slice (offsets.starting_offset);
3415
3416 /* Copy the requested data. */
3417 for (int chunks = 0; chunks < offsets.chunks; chunks++)
3418 {
3419 gdb::array_view<const gdb_byte> src
3420 = data.slice (chunks * offsets.chunk_size, offsets.chunk_size);
3421 gdb::array_view<gdb_byte> dst
3422 = za_view.slice (chunks * offsets.stride_size, offsets.chunk_size);
3423 copy (src, dst);
3424 }
3425 }
3426
3427 /* Write back to ZA. */
3428 put_frame_register (next_frame, tdep->sme_za_regnum,
3429 za_value->contents_raw ());
3430 }
3431
3432 /* Implement the "pseudo_register_write" gdbarch method. */
3433
3434 static void
3435 aarch64_pseudo_write (gdbarch *gdbarch, const frame_info_ptr &next_frame,
3436 const int pseudo_reg_num,
3437 gdb::array_view<const gdb_byte> buf)
3438 {
3439 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
3440
3441 if (is_w_pseudo_register (gdbarch, pseudo_reg_num))
3442 {
3443 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3444 /* Default offset for little endian. */
3445 int offset = 0;
3446
3447 if (byte_order == BFD_ENDIAN_BIG)
3448 offset = 4;
3449
3450 /* Find the correct X register to extract the data from. */
3451 int x_regnum = AARCH64_X0_REGNUM + (pseudo_reg_num - tdep->w_pseudo_base);
3452
3453 /* First zero-out the contents of X. */
3454 gdb_byte bytes[8] {};
3455 gdb::array_view<gdb_byte> bytes_view (bytes);
3456 copy (buf, bytes_view.slice (offset, 4));
3457
3458 /* Write to the bottom 4 bytes of X. */
3459 put_frame_register (next_frame, x_regnum, bytes_view);
3460 return;
3461 }
3462 else if (is_sme_pseudo_register (gdbarch, pseudo_reg_num))
3463 {
3464 aarch64_sme_pseudo_register_write (gdbarch, next_frame, pseudo_reg_num,
3465 buf);
3466 return;
3467 }
3468
3469 /* Offset in the "pseudo-register space". */
3470 int pseudo_offset = pseudo_reg_num - gdbarch_num_regs (gdbarch);
3471
3472 if (pseudo_offset >= AARCH64_Q0_REGNUM
3473 && pseudo_offset < AARCH64_Q0_REGNUM + 32)
3474 return aarch64_pseudo_write_1 (gdbarch, next_frame,
3475 pseudo_offset - AARCH64_Q0_REGNUM, buf);
3476
3477 if (pseudo_offset >= AARCH64_D0_REGNUM
3478 && pseudo_offset < AARCH64_D0_REGNUM + 32)
3479 return aarch64_pseudo_write_1 (gdbarch, next_frame,
3480 pseudo_offset - AARCH64_D0_REGNUM, buf);
3481
3482 if (pseudo_offset >= AARCH64_S0_REGNUM
3483 && pseudo_offset < AARCH64_S0_REGNUM + 32)
3484 return aarch64_pseudo_write_1 (gdbarch, next_frame,
3485 pseudo_offset - AARCH64_S0_REGNUM, buf);
3486
3487 if (pseudo_offset >= AARCH64_H0_REGNUM
3488 && pseudo_offset < AARCH64_H0_REGNUM + 32)
3489 return aarch64_pseudo_write_1 (gdbarch, next_frame,
3490 pseudo_offset - AARCH64_H0_REGNUM, buf);
3491
3492 if (pseudo_offset >= AARCH64_B0_REGNUM
3493 && pseudo_offset < AARCH64_B0_REGNUM + 32)
3494 return aarch64_pseudo_write_1 (gdbarch, next_frame,
3495 pseudo_offset - AARCH64_B0_REGNUM, buf);
3496
3497 if (tdep->has_sve () && pseudo_offset >= AARCH64_SVE_V0_REGNUM
3498 && pseudo_offset < AARCH64_SVE_V0_REGNUM + 32)
3499 return aarch64_pseudo_write_1 (gdbarch, next_frame,
3500 pseudo_offset - AARCH64_SVE_V0_REGNUM, buf);
3501
3502 gdb_assert_not_reached ("regnum out of bound");
3503 }
3504
3505 /* Callback function for user_reg_add. */
3506
3507 static struct value *
3508 value_of_aarch64_user_reg (const frame_info_ptr &frame, const void *baton)
3509 {
3510 const int *reg_p = (const int *) baton;
3511
3512 return value_of_register (*reg_p, get_next_frame_sentinel_okay (frame));
3513 }
3514
3515 /* Implement the "software_single_step" gdbarch method, needed to
3516 single step through atomic sequences on AArch64. */
3517
3518 static std::vector<CORE_ADDR>
3519 aarch64_software_single_step (struct regcache *regcache)
3520 {
3521 struct gdbarch *gdbarch = regcache->arch ();
3522 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3523 const int insn_size = 4;
3524 const int atomic_sequence_length = 16; /* Instruction sequence length. */
3525 CORE_ADDR pc = regcache_read_pc (regcache);
3526 CORE_ADDR breaks[2] = { CORE_ADDR_MAX, CORE_ADDR_MAX };
3527 CORE_ADDR loc = pc;
3528 CORE_ADDR closing_insn = 0;
3529
3530 ULONGEST insn_from_memory;
3531 if (!safe_read_memory_unsigned_integer (loc, insn_size,
3532 byte_order_for_code,
3533 &insn_from_memory))
3534 {
3535 /* Assume we don't have a atomic sequence, as we couldn't read the
3536 instruction in this location. */
3537 return {};
3538 }
3539
3540 uint32_t insn = insn_from_memory;
3541 int index;
3542 int insn_count;
3543 int bc_insn_count = 0; /* Conditional branch instruction count. */
3544 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
3545 aarch64_inst inst;
3546
3547 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
3548 return {};
3549
3550 /* Look for a Load Exclusive instruction which begins the sequence. */
3551 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
3552 return {};
3553
3554 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
3555 {
3556 loc += insn_size;
3557
3558 if (!safe_read_memory_unsigned_integer (loc, insn_size,
3559 byte_order_for_code,
3560 &insn_from_memory))
3561 {
3562 /* Assume we don't have a atomic sequence, as we couldn't read the
3563 instruction in this location. */
3564 return {};
3565 }
3566
3567 insn = insn_from_memory;
3568 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
3569 return {};
3570 /* Check if the instruction is a conditional branch. */
3571 if (inst.opcode->iclass == condbranch)
3572 {
3573 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
3574
3575 if (bc_insn_count >= 1)
3576 return {};
3577
3578 /* It is, so we'll try to set a breakpoint at the destination. */
3579 breaks[1] = loc + inst.operands[0].imm.value;
3580
3581 bc_insn_count++;
3582 last_breakpoint++;
3583 }
3584
3585 /* Look for the Store Exclusive which closes the atomic sequence. */
3586 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
3587 {
3588 closing_insn = loc;
3589 break;
3590 }
3591 }
3592
3593 /* We didn't find a closing Store Exclusive instruction, fall back. */
3594 if (!closing_insn)
3595 return {};
3596
3597 /* Insert breakpoint after the end of the atomic sequence. */
3598 breaks[0] = loc + insn_size;
3599
3600 /* Check for duplicated breakpoints, and also check that the second
3601 breakpoint is not within the atomic sequence. */
3602 if (last_breakpoint
3603 && (breaks[1] == breaks[0]
3604 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
3605 last_breakpoint = 0;
3606
3607 std::vector<CORE_ADDR> next_pcs;
3608
3609 /* Insert the breakpoint at the end of the sequence, and one at the
3610 destination of the conditional branch, if it exists. */
3611 for (index = 0; index <= last_breakpoint; index++)
3612 next_pcs.push_back (breaks[index]);
3613
3614 return next_pcs;
3615 }
3616
3617 struct aarch64_displaced_step_copy_insn_closure
3618 : public displaced_step_copy_insn_closure
3619 {
3620 /* It is true when condition instruction, such as B.CON, TBZ, etc,
3621 is being displaced stepping. */
3622 bool cond = false;
3623
3624 /* PC adjustment offset after displaced stepping. If 0, then we don't
3625 write the PC back, assuming the PC is already the right address. */
3626 int32_t pc_adjust = 0;
3627
3628 /* True if it's a branch instruction that saves the link register. */
3629 bool linked_branch = false;
3630 };
3631
3632 /* Data when visiting instructions for displaced stepping. */
3633
3634 struct aarch64_displaced_step_data
3635 {
3636 struct aarch64_insn_data base;
3637
3638 /* The address where the instruction will be executed at. */
3639 CORE_ADDR new_addr;
3640 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
3641 uint32_t insn_buf[AARCH64_DISPLACED_MODIFIED_INSNS];
3642 /* Number of instructions in INSN_BUF. */
3643 unsigned insn_count;
3644 /* Registers when doing displaced stepping. */
3645 struct regcache *regs;
3646
3647 aarch64_displaced_step_copy_insn_closure *dsc;
3648 };
3649
3650 /* Implementation of aarch64_insn_visitor method "b". */
3651
3652 static void
3653 aarch64_displaced_step_b (const int is_bl, const int32_t offset,
3654 struct aarch64_insn_data *data)
3655 {
3656 struct aarch64_displaced_step_data *dsd
3657 = (struct aarch64_displaced_step_data *) data;
3658 int64_t new_offset = data->insn_addr - dsd->new_addr + offset;
3659
3660 if (can_encode_int32 (new_offset, 28))
3661 {
3662 /* Emit B rather than BL, because executing BL on a new address
3663 will get the wrong address into LR. In order to avoid this,
3664 we emit B, and update LR if the instruction is BL. */
3665 emit_b (dsd->insn_buf, 0, new_offset);
3666 dsd->insn_count++;
3667 }
3668 else
3669 {
3670 /* Write NOP. */
3671 emit_nop (dsd->insn_buf);
3672 dsd->insn_count++;
3673 dsd->dsc->pc_adjust = offset;
3674 }
3675
3676 if (is_bl)
3677 {
3678 /* Update LR. */
3679 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
3680 data->insn_addr + 4);
3681 dsd->dsc->linked_branch = true;
3682 bool gcs_is_enabled;
3683 gdbarch_get_shadow_stack_pointer (dsd->regs->arch (), dsd->regs,
3684 gcs_is_enabled);
3685 if (gcs_is_enabled)
3686 aarch64_push_gcs_entry (dsd->regs, data->insn_addr + 4);
3687 }
3688 }
3689
3690 /* Implementation of aarch64_insn_visitor method "b_cond". */
3691
3692 static void
3693 aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
3694 struct aarch64_insn_data *data)
3695 {
3696 struct aarch64_displaced_step_data *dsd
3697 = (struct aarch64_displaced_step_data *) data;
3698
3699 /* GDB has to fix up PC after displaced step this instruction
3700 differently according to the condition is true or false. Instead
3701 of checking COND against conditional flags, we can use
3702 the following instructions, and GDB can tell how to fix up PC
3703 according to the PC value.
3704
3705 B.COND TAKEN ; If cond is true, then jump to TAKEN.
3706 INSN1 ;
3707 TAKEN:
3708 INSN2
3709 */
3710
3711 emit_bcond (dsd->insn_buf, cond, 8);
3712 dsd->dsc->cond = true;
3713 dsd->dsc->pc_adjust = offset;
3714 dsd->insn_count = 1;
3715 }
3716
3717 /* Dynamically allocate a new register. If we know the register
3718 statically, we should make it a global as above instead of using this
3719 helper function. */
3720
3721 static struct aarch64_register
3722 aarch64_register (unsigned num, int is64)
3723 {
3724 return (struct aarch64_register) { num, is64 };
3725 }
3726
3727 /* Implementation of aarch64_insn_visitor method "cb". */
3728
3729 static void
3730 aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
3731 const unsigned rn, int is64,
3732 struct aarch64_insn_data *data)
3733 {
3734 struct aarch64_displaced_step_data *dsd
3735 = (struct aarch64_displaced_step_data *) data;
3736
3737 /* The offset is out of range for a compare and branch
3738 instruction. We can use the following instructions instead:
3739
3740 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
3741 INSN1 ;
3742 TAKEN:
3743 INSN2
3744 */
3745 emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
3746 dsd->insn_count = 1;
3747 dsd->dsc->cond = true;
3748 dsd->dsc->pc_adjust = offset;
3749 }
3750
3751 /* Implementation of aarch64_insn_visitor method "tb". */
3752
3753 static void
3754 aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
3755 const unsigned rt, unsigned bit,
3756 struct aarch64_insn_data *data)
3757 {
3758 struct aarch64_displaced_step_data *dsd
3759 = (struct aarch64_displaced_step_data *) data;
3760
3761 /* The offset is out of range for a test bit and branch
3762 instruction We can use the following instructions instead:
3763
3764 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
3765 INSN1 ;
3766 TAKEN:
3767 INSN2
3768
3769 */
3770 emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
3771 dsd->insn_count = 1;
3772 dsd->dsc->cond = true;
3773 dsd->dsc->pc_adjust = offset;
3774 }
3775
3776 /* Implementation of aarch64_insn_visitor method "adr". */
3777
3778 static void
3779 aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
3780 const int is_adrp, struct aarch64_insn_data *data)
3781 {
3782 struct aarch64_displaced_step_data *dsd
3783 = (struct aarch64_displaced_step_data *) data;
3784 /* We know exactly the address the ADR{P,} instruction will compute.
3785 We can just write it to the destination register. */
3786 CORE_ADDR address = data->insn_addr + offset;
3787
3788 if (is_adrp)
3789 {
3790 /* Clear the lower 12 bits of the offset to get the 4K page. */
3791 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
3792 address & ~0xfff);
3793 }
3794 else
3795 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
3796 address);
3797
3798 dsd->dsc->pc_adjust = 4;
3799 emit_nop (dsd->insn_buf);
3800 dsd->insn_count = 1;
3801 }
3802
3803 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
3804
3805 static void
3806 aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
3807 const unsigned rt, const int is64,
3808 struct aarch64_insn_data *data)
3809 {
3810 struct aarch64_displaced_step_data *dsd
3811 = (struct aarch64_displaced_step_data *) data;
3812 CORE_ADDR address = data->insn_addr + offset;
3813 struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
3814
3815 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
3816 address);
3817
3818 if (is_sw)
3819 dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
3820 aarch64_register (rt, 1), zero);
3821 else
3822 dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
3823 aarch64_register (rt, 1), zero);
3824
3825 dsd->dsc->pc_adjust = 4;
3826 }
3827
3828 /* Implementation of aarch64_insn_visitor method "others". */
3829
3830 static void
3831 aarch64_displaced_step_others (const uint32_t insn,
3832 struct aarch64_insn_data *data)
3833 {
3834 struct aarch64_displaced_step_data *dsd
3835 = (struct aarch64_displaced_step_data *) data;
3836
3837 uint32_t masked_insn = (insn & CLEAR_Rn_MASK);
3838 if (masked_insn == BLR)
3839 {
3840 /* Emit a BR to the same register and then update LR to the original
3841 address (similar to aarch64_displaced_step_b). */
3842 aarch64_emit_insn (dsd->insn_buf, insn & 0xffdfffff);
3843 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
3844 data->insn_addr + 4);
3845 dsd->dsc->linked_branch = true;
3846 bool gcs_is_enabled;
3847 gdbarch_get_shadow_stack_pointer (dsd->regs->arch (), dsd->regs,
3848 gcs_is_enabled);
3849 if (gcs_is_enabled)
3850 aarch64_push_gcs_entry (dsd->regs, data->insn_addr + 4);
3851 }
3852 else
3853 aarch64_emit_insn (dsd->insn_buf, insn);
3854 dsd->insn_count = 1;
3855
3856 if (masked_insn == RET || masked_insn == BR || masked_insn == BLR)
3857 dsd->dsc->pc_adjust = 0;
3858 else
3859 dsd->dsc->pc_adjust = 4;
3860 }
3861
3862 static const struct aarch64_insn_visitor visitor =
3863 {
3864 aarch64_displaced_step_b,
3865 aarch64_displaced_step_b_cond,
3866 aarch64_displaced_step_cb,
3867 aarch64_displaced_step_tb,
3868 aarch64_displaced_step_adr,
3869 aarch64_displaced_step_ldr_literal,
3870 aarch64_displaced_step_others,
3871 };
3872
3873 /* Implement the "displaced_step_copy_insn" gdbarch method. */
3874
3875 displaced_step_copy_insn_closure_up
3876 aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
3877 CORE_ADDR from, CORE_ADDR to,
3878 struct regcache *regs)
3879 {
3880 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3881 struct aarch64_displaced_step_data dsd;
3882 aarch64_inst inst;
3883 ULONGEST insn_from_memory;
3884
3885 if (!safe_read_memory_unsigned_integer (from, 4, byte_order_for_code,
3886 &insn_from_memory))
3887 return nullptr;
3888
3889 uint32_t insn = insn_from_memory;
3890
3891 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
3892 return NULL;
3893
3894 /* Look for a Load Exclusive instruction which begins the sequence,
3895 or for a MOPS instruction. */
3896 if ((inst.opcode->iclass == ldstexcl && bit (insn, 22))
3897 || AARCH64_CPU_HAS_FEATURE (*inst.opcode->avariant, MOPS))
3898 {
3899 /* We can't displaced step atomic sequences nor MOPS instructions. */
3900 return NULL;
3901 }
3902
3903 std::unique_ptr<aarch64_displaced_step_copy_insn_closure> dsc
3904 (new aarch64_displaced_step_copy_insn_closure);
3905 dsd.base.insn_addr = from;
3906 dsd.new_addr = to;
3907 dsd.regs = regs;
3908 dsd.dsc = dsc.get ();
3909 dsd.insn_count = 0;
3910 aarch64_relocate_instruction (insn, &visitor,
3911 (struct aarch64_insn_data *) &dsd);
3912 gdb_assert (dsd.insn_count <= AARCH64_DISPLACED_MODIFIED_INSNS);
3913
3914 if (dsd.insn_count != 0)
3915 {
3916 int i;
3917
3918 /* Instruction can be relocated to scratch pad. Copy
3919 relocated instruction(s) there. */
3920 for (i = 0; i < dsd.insn_count; i++)
3921 {
3922 displaced_debug_printf ("writing insn %.8x at %s",
3923 dsd.insn_buf[i],
3924 paddress (gdbarch, to + i * 4));
3925
3926 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
3927 (ULONGEST) dsd.insn_buf[i]);
3928 }
3929 }
3930 else
3931 {
3932 dsc = NULL;
3933 }
3934
3935 /* This is a work around for a problem with g++ 4.8. */
3936 return displaced_step_copy_insn_closure_up (dsc.release ());
3937 }
3938
3939 /* Implement the "displaced_step_fixup" gdbarch method. */
3940
3941 void
3942 aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
3943 struct displaced_step_copy_insn_closure *dsc_,
3944 CORE_ADDR from, CORE_ADDR to,
3945 struct regcache *regs, bool completed_p)
3946 {
3947 aarch64_displaced_step_copy_insn_closure *dsc
3948 = (aarch64_displaced_step_copy_insn_closure *) dsc_;
3949 CORE_ADDR pc = regcache_read_pc (regs);
3950
3951 /* If the displaced instruction didn't complete successfully then we need
3952 to restore the program counter, and perhaps the Guarded Control Stack. */
3953 if (!completed_p)
3954 {
3955 bool gcs_is_enabled;
3956 gdbarch_get_shadow_stack_pointer (gdbarch, regs, gcs_is_enabled);
3957 if (dsc->linked_branch && gcs_is_enabled)
3958 aarch64_pop_gcs_entry (regs);
3959
3960 pc = from + (pc - to);
3961 regcache_write_pc (regs, pc);
3962 return;
3963 }
3964
3965 displaced_debug_printf ("PC after stepping: %s (was %s).",
3966 paddress (gdbarch, pc), paddress (gdbarch, to));
3967
3968 if (dsc->cond)
3969 {
3970 displaced_debug_printf ("[Conditional] pc_adjust before: %d",
3971 dsc->pc_adjust);
3972
3973 if (pc - to == 8)
3974 {
3975 /* Condition is true. */
3976 }
3977 else if (pc - to == 4)
3978 {
3979 /* Condition is false. */
3980 dsc->pc_adjust = 4;
3981 }
3982 else
3983 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
3984
3985 displaced_debug_printf ("[Conditional] pc_adjust after: %d",
3986 dsc->pc_adjust);
3987 }
3988
3989 displaced_debug_printf ("%s PC by %d",
3990 dsc->pc_adjust ? "adjusting" : "not adjusting",
3991 dsc->pc_adjust);
3992
3993 if (dsc->pc_adjust != 0)
3994 {
3995 /* Make sure the previous instruction was executed (that is, the PC
3996 has changed). If the PC didn't change, then discard the adjustment
3997 offset. Otherwise we may skip an instruction before its execution
3998 took place. */
3999 if ((pc - to) == 0)
4000 {
4001 displaced_debug_printf ("PC did not move. Discarding PC adjustment.");
4002 dsc->pc_adjust = 0;
4003 }
4004
4005 displaced_debug_printf ("fixup: set PC to %s:%d",
4006 paddress (gdbarch, from), dsc->pc_adjust);
4007
4008 regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
4009 from + dsc->pc_adjust);
4010 }
4011 }
4012
4013 /* Implement the "displaced_step_hw_singlestep" gdbarch method. */
4014
4015 bool
4016 aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch)
4017 {
4018 return true;
4019 }
4020
4021 /* Get the correct target description for the given VQ value.
4022 If VQ is zero then it is assumed SVE is not supported.
4023 (It is not possible to set VQ to zero on an SVE system).
4024
4025 MTE_P indicates the presence of the Memory Tagging Extension feature.
4026
4027 TLS_P indicates the presence of the Thread Local Storage feature. */
4028
4029 const target_desc *
4030 aarch64_read_description (const aarch64_features &features)
4031 {
4032 if (features.vq > AARCH64_MAX_SVE_VQ)
4033 error (_("VQ is %" PRIu64 ", maximum supported value is %d"), features.vq,
4034 AARCH64_MAX_SVE_VQ);
4035
4036 struct target_desc *tdesc = tdesc_aarch64_map[features];
4037
4038 if (tdesc == NULL)
4039 {
4040 tdesc = aarch64_create_target_description (features);
4041 tdesc_aarch64_map[features] = tdesc;
4042 }
4043
4044 return tdesc;
4045 }
4046
4047 /* Return the VQ used when creating the target description TDESC. */
4048
4049 static uint64_t
4050 aarch64_get_tdesc_vq (const struct target_desc *tdesc)
4051 {
4052 const struct tdesc_feature *feature_sve;
4053
4054 if (!tdesc_has_registers (tdesc))
4055 return 0;
4056
4057 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
4058
4059 if (feature_sve == nullptr)
4060 return 0;
4061
4062 uint64_t vl = tdesc_register_bitsize (feature_sve,
4063 aarch64_sve_register_names[0]) / 8;
4064 return sve_vq_from_vl (vl);
4065 }
4066
4067
4068 /* Return the svq (streaming vector quotient) used when creating the target
4069 description TDESC. */
4070
4071 static uint64_t
4072 aarch64_get_tdesc_svq (const struct target_desc *tdesc)
4073 {
4074 const struct tdesc_feature *feature_sme;
4075
4076 if (!tdesc_has_registers (tdesc))
4077 return 0;
4078
4079 feature_sme = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sme");
4080
4081 if (feature_sme == nullptr)
4082 return 0;
4083
4084 size_t svl_squared = tdesc_register_bitsize (feature_sme, "za");
4085
4086 /* We have the total size of the ZA matrix, in bits. Figure out the svl
4087 value. */
4088 size_t svl = std::sqrt (svl_squared / 8);
4089
4090 /* Now extract svq. */
4091 return sve_vq_from_vl (svl);
4092 }
4093
4094 /* Get the AArch64 features present in the given target description. */
4095
4096 aarch64_features
4097 aarch64_features_from_target_desc (const struct target_desc *tdesc)
4098 {
4099 aarch64_features features;
4100
4101 if (tdesc == nullptr)
4102 return features;
4103
4104 features.vq = aarch64_get_tdesc_vq (tdesc);
4105
4106 /* We need to look for a couple pauth feature name variations. */
4107 features.pauth
4108 = (tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.pauth") != nullptr);
4109
4110 if (!features.pauth)
4111 features.pauth = (tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.pauth_v2")
4112 != nullptr);
4113
4114 features.mte
4115 = (tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.mte") != nullptr);
4116
4117 const struct tdesc_feature *tls_feature
4118 = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.tls");
4119
4120 if (tls_feature != nullptr)
4121 {
4122 /* We have TLS registers. Find out how many. */
4123 if (tdesc_unnumbered_register (tls_feature, "tpidr2"))
4124 features.tls = 2;
4125 else
4126 features.tls = 1;
4127 }
4128
4129 features.svq = aarch64_get_tdesc_svq (tdesc);
4130
4131 /* Check for the SME2 feature. */
4132 features.sme2 = (tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sme2")
4133 != nullptr);
4134
4135 /* Check for the GCS feature. */
4136 features.gcs = (tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.gcs")
4137 != nullptr);
4138
4139 /* Check for the GCS Linux feature. */
4140 features.gcs_linux = (tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.gcs.linux")
4141 != nullptr);
4142
4143 return features;
4144 }
4145
4146 /* Implement the "cannot_store_register" gdbarch method. */
4147
4148 static int
4149 aarch64_cannot_store_register (struct gdbarch *gdbarch, int regnum)
4150 {
4151 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
4152
4153 if (!tdep->has_pauth ())
4154 return 0;
4155
4156 /* Pointer authentication registers are read-only. */
4157 return (regnum >= tdep->pauth_reg_base
4158 && regnum < tdep->pauth_reg_base + tdep->pauth_reg_count);
4159 }
4160
4161 /* Implement the stack_frame_destroyed_p gdbarch method. */
4162
4163 static int
4164 aarch64_stack_frame_destroyed_p (struct gdbarch *gdbarch, CORE_ADDR pc)
4165 {
4166 CORE_ADDR func_start, func_end;
4167 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
4168 return 0;
4169
4170 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
4171
4172 ULONGEST insn_from_memory;
4173 if (!safe_read_memory_unsigned_integer (pc, 4, byte_order_for_code,
4174 &insn_from_memory))
4175 return 0;
4176
4177 uint32_t insn = insn_from_memory;
4178
4179 aarch64_inst inst;
4180 if (aarch64_decode_insn (insn, &inst, 1, nullptr) != 0)
4181 return 0;
4182
4183 return streq (inst.opcode->name, "ret");
4184 }
4185
4186 /* Helper to get the allocation tag from a 64-bit ADDRESS.
4187
4188 Return the allocation tag if successful and nullopt otherwise. */
4189
4190 std::optional<CORE_ADDR>
4191 aarch64_mte_get_atag (CORE_ADDR address)
4192 {
4193 gdb::byte_vector tags;
4194
4195 /* Attempt to fetch the allocation tag. */
4196 if (!target_fetch_memtags (address, 1, tags,
4197 static_cast<int> (memtag_type::allocation)))
4198 return {};
4199
4200 /* Only one tag should've been returned. Make sure we got exactly that. */
4201 if (tags.size () != 1)
4202 error (_("Target returned an unexpected number of tags."));
4203
4204 /* Although our tags are 4 bits in size, they are stored in a
4205 byte. */
4206 return tags[0];
4207 }
4208
4209 /* Implement the memtag_matches_p gdbarch method. */
4210
4211 static bool
4212 aarch64_memtag_matches_p (struct gdbarch *gdbarch,
4213 struct value *address)
4214 {
4215 gdb_assert (address != nullptr);
4216
4217 CORE_ADDR addr = value_as_address (address);
4218
4219 /* Fetch the allocation tag for ADDRESS. */
4220 std::optional<CORE_ADDR> atag
4221 = aarch64_mte_get_atag (aarch64_remove_non_address_bits (gdbarch, addr));
4222
4223 if (!atag.has_value ())
4224 return true;
4225
4226 /* Fetch the logical tag for ADDRESS. */
4227 gdb_byte ltag = aarch64_mte_get_ltag (addr);
4228
4229 /* Are the tags the same? */
4230 return ltag == *atag;
4231 }
4232
4233 /* Implement the set_memtags gdbarch method. */
4234
4235 static bool
4236 aarch64_set_memtags (struct gdbarch *gdbarch, struct value *address,
4237 size_t length, const gdb::byte_vector &tags,
4238 memtag_type tag_type)
4239 {
4240 gdb_assert (!tags.empty ());
4241 gdb_assert (address != nullptr);
4242
4243 CORE_ADDR addr = value_as_address (address);
4244
4245 /* Set the logical tag or the allocation tag. */
4246 if (tag_type == memtag_type::logical)
4247 {
4248 /* When setting logical tags, we don't care about the length, since
4249 we are only setting a single logical tag. */
4250 addr = aarch64_mte_set_ltag (addr, tags[0]);
4251
4252 /* Update the value's content with the tag. */
4253 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
4254 gdb_byte *srcbuf = address->contents_raw ().data ();
4255 store_unsigned_integer (srcbuf, sizeof (addr), byte_order, addr);
4256 }
4257 else
4258 {
4259 /* Remove the top byte. */
4260 addr = aarch64_remove_non_address_bits (gdbarch, addr);
4261
4262 /* With G being the number of tag granules and N the number of tags
4263 passed in, we can have the following cases:
4264
4265 1 - G == N: Store all the N tags to memory.
4266
4267 2 - G < N : Warn about having more tags than granules, but write G
4268 tags.
4269
4270 3 - G > N : This is a "fill tags" operation. We should use the tags
4271 as a pattern to fill the granules repeatedly until we have
4272 written G tags to memory.
4273 */
4274
4275 size_t g = aarch64_mte_get_tag_granules (addr, length,
4276 AARCH64_MTE_GRANULE_SIZE);
4277 size_t n = tags.size ();
4278
4279 if (g < n)
4280 warning (_("Got more tags than memory granules. Tags will be "
4281 "truncated."));
4282 else if (g > n)
4283 warning (_("Using tag pattern to fill memory range."));
4284
4285 if (!target_store_memtags (addr, length, tags,
4286 static_cast<int> (memtag_type::allocation)))
4287 return false;
4288 }
4289 return true;
4290 }
4291
4292 /* Implement the get_memtag gdbarch method. */
4293
4294 static struct value *
4295 aarch64_get_memtag (struct gdbarch *gdbarch, struct value *address,
4296 memtag_type tag_type)
4297 {
4298 gdb_assert (address != nullptr);
4299
4300 CORE_ADDR addr = value_as_address (address);
4301 CORE_ADDR tag = 0;
4302
4303 /* Get the logical tag or the allocation tag. */
4304 if (tag_type == memtag_type::logical)
4305 tag = aarch64_mte_get_ltag (addr);
4306 else
4307 {
4308 /* Remove the top byte. */
4309 addr = aarch64_remove_non_address_bits (gdbarch, addr);
4310 std::optional<CORE_ADDR> atag = aarch64_mte_get_atag (addr);
4311
4312 if (!atag.has_value ())
4313 return nullptr;
4314
4315 tag = *atag;
4316 }
4317
4318 /* Convert the tag to a value. */
4319 return value_from_ulongest (builtin_type (gdbarch)->builtin_unsigned_int,
4320 tag);
4321 }
4322
4323 /* Implement the memtag_to_string gdbarch method. */
4324
4325 static std::string
4326 aarch64_memtag_to_string (struct gdbarch *gdbarch, struct value *tag_value)
4327 {
4328 if (tag_value == nullptr)
4329 return "";
4330
4331 CORE_ADDR tag = value_as_address (tag_value);
4332
4333 return string_printf ("0x%s", phex_nz (tag));
4334 }
4335
4336 /* See aarch64-tdep.h. */
4337
4338 CORE_ADDR
4339 aarch64_remove_non_address_bits (struct gdbarch *gdbarch, CORE_ADDR pointer)
4340 {
4341 /* By default, we assume TBI and discard the top 8 bits plus the VA range
4342 select bit (55). Below we try to fetch information about pointer
4343 authentication masks in order to make non-address removal more
4344 precise. */
4345 CORE_ADDR mask = AARCH64_TOP_BITS_MASK;
4346
4347 /* Check if we have an inferior first. If not, just use the default
4348 mask.
4349
4350 We use the inferior_ptid here because the pointer authentication masks
4351 should be the same across threads of a process. Since we may not have
4352 access to the current thread (gdb may have switched to no inferiors
4353 momentarily), we use the inferior ptid. */
4354 if (inferior_ptid != null_ptid)
4355 {
4356 /* If we do have an inferior, attempt to fetch its thread's thread_info
4357 struct. */
4358 thread_info *thread = current_inferior ()->find_thread (inferior_ptid);
4359
4360 /* If the thread is running, we will not be able to fetch the mask
4361 registers. */
4362 if (thread != nullptr && thread->state != THREAD_RUNNING)
4363 {
4364 /* Otherwise, fetch the register cache and the masks. */
4365 struct regcache *regs
4366 = get_thread_regcache (current_inferior ()->process_target (),
4367 inferior_ptid);
4368
4369 /* Use the gdbarch from the register cache to check for pointer
4370 authentication support, as it matches the features found in
4371 that particular thread. */
4372 aarch64_gdbarch_tdep *tdep
4373 = gdbarch_tdep<aarch64_gdbarch_tdep> (regs->arch ());
4374
4375 /* Is there pointer authentication support? */
4376 if (tdep->has_pauth ())
4377 {
4378 CORE_ADDR cmask, dmask;
4379 int dmask_regnum
4380 = AARCH64_PAUTH_DMASK_REGNUM (tdep->pauth_reg_base);
4381 int cmask_regnum
4382 = AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base);
4383
4384 /* If we have a kernel address and we have kernel-mode address
4385 mask registers, use those instead. */
4386 if (tdep->pauth_reg_count > 2
4387 && pointer & VA_RANGE_SELECT_BIT_MASK)
4388 {
4389 dmask_regnum
4390 = AARCH64_PAUTH_DMASK_HIGH_REGNUM (tdep->pauth_reg_base);
4391 cmask_regnum
4392 = AARCH64_PAUTH_CMASK_HIGH_REGNUM (tdep->pauth_reg_base);
4393 }
4394
4395 /* We have both a code mask and a data mask. For now they are
4396 the same, but this may change in the future. */
4397 if (regs->cooked_read (dmask_regnum, &dmask) != REG_VALID)
4398 dmask = mask;
4399
4400 if (regs->cooked_read (cmask_regnum, &cmask) != REG_VALID)
4401 cmask = mask;
4402
4403 mask |= aarch64_mask_from_pac_registers (cmask, dmask);
4404 }
4405 }
4406 }
4407
4408 return aarch64_remove_top_bits (pointer, mask);
4409 }
4410
4411 /* Given NAMES, a vector of strings, initialize it with all the SME
4412 pseudo-register names for the current streaming vector length. */
4413
4414 static void
4415 aarch64_initialize_sme_pseudo_names (struct gdbarch *gdbarch,
4416 std::vector<std::string> &names)
4417 {
4418 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
4419
4420 gdb_assert (tdep->has_sme ());
4421 gdb_assert (tdep->sme_tile_slice_pseudo_base > 0);
4422 gdb_assert (tdep->sme_tile_pseudo_base > 0);
4423
4424 for (int i = 0; i < tdep->sme_tile_slice_pseudo_count; i++)
4425 {
4426 int regnum = tdep->sme_tile_slice_pseudo_base + i;
4427 struct za_pseudo_encoding encoding;
4428 aarch64_za_decode_pseudos (gdbarch, regnum, encoding);
4429 names.push_back (aarch64_za_tile_slice_name (encoding));
4430 }
4431 for (int i = 0; i < AARCH64_ZA_TILES_NUM; i++)
4432 {
4433 int regnum = tdep->sme_tile_pseudo_base + i;
4434 struct za_pseudo_encoding encoding;
4435 aarch64_za_decode_pseudos (gdbarch, regnum, encoding);
4436 names.push_back (aarch64_za_tile_name (encoding));
4437 }
4438 }
4439
4440 /* Initialize the current architecture based on INFO. If possible,
4441 reuse an architecture from ARCHES, which is a list of
4442 architectures already created during this debugging session.
4443
4444 Called e.g. at program startup, when reading a core file, and when
4445 reading a binary file. */
4446
4447 static struct gdbarch *
4448 aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
4449 {
4450 const struct tdesc_feature *feature_core, *feature_fpu, *feature_sve;
4451 const struct tdesc_feature *feature_pauth;
4452 bool valid_p = true;
4453 int i, num_regs = 0, num_pseudo_regs = 0;
4454 int first_pauth_regnum = -1, ra_sign_state_offset = -1;
4455 int first_mte_regnum = -1, first_tls_regnum = -1;
4456 uint64_t vq = aarch64_get_tdesc_vq (info.target_desc);
4457 uint64_t svq = aarch64_get_tdesc_svq (info.target_desc);
4458
4459 if (vq > AARCH64_MAX_SVE_VQ)
4460 internal_error (_("VQ out of bounds: %s (max %d)"),
4461 pulongest (vq), AARCH64_MAX_SVE_VQ);
4462
4463 if (svq > AARCH64_MAX_SVE_VQ)
4464 internal_error (_("Streaming vector quotient (svq) out of bounds: %s"
4465 " (max %d)"),
4466 pulongest (svq), AARCH64_MAX_SVE_VQ);
4467
4468 /* If there is already a candidate, use it. */
4469 for (gdbarch_list *best_arch = gdbarch_list_lookup_by_info (arches, &info);
4470 best_arch != nullptr;
4471 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
4472 {
4473 aarch64_gdbarch_tdep *tdep
4474 = gdbarch_tdep<aarch64_gdbarch_tdep> (best_arch->gdbarch);
4475 if (tdep && tdep->vq == vq && tdep->sme_svq == svq)
4476 return best_arch->gdbarch;
4477 }
4478
4479 /* Ensure we always have a target descriptor, and that it is for the given VQ
4480 value. */
4481 const struct target_desc *tdesc = info.target_desc;
4482 if (!tdesc_has_registers (tdesc) || vq != aarch64_get_tdesc_vq (tdesc)
4483 || svq != aarch64_get_tdesc_svq (tdesc))
4484 {
4485 aarch64_features features;
4486 features.vq = vq;
4487 features.svq = svq;
4488 tdesc = aarch64_read_description (features);
4489 }
4490 gdb_assert (tdesc);
4491
4492 feature_core = tdesc_find_feature (tdesc,"org.gnu.gdb.aarch64.core");
4493 feature_fpu = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
4494 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
4495 const struct tdesc_feature *feature_mte
4496 = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.mte");
4497 const struct tdesc_feature *feature_tls
4498 = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.tls");
4499
4500 if (feature_core == nullptr)
4501 return nullptr;
4502
4503 tdesc_arch_data_up tdesc_data = tdesc_data_alloc ();
4504
4505 /* Validate the description provides the mandatory core R registers
4506 and allocate their numbers. */
4507 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
4508 valid_p &= tdesc_numbered_register (feature_core, tdesc_data.get (),
4509 AARCH64_X0_REGNUM + i,
4510 aarch64_r_register_names[i]);
4511
4512 num_regs = AARCH64_X0_REGNUM + i;
4513
4514 /* Add the V registers. */
4515 if (feature_fpu != nullptr)
4516 {
4517 if (feature_sve != nullptr)
4518 error (_("Program contains both fpu and SVE features."));
4519
4520 /* Validate the description provides the mandatory V registers
4521 and allocate their numbers. */
4522 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
4523 valid_p &= tdesc_numbered_register (feature_fpu, tdesc_data.get (),
4524 AARCH64_V0_REGNUM + i,
4525 aarch64_v_register_names[i]);
4526
4527 num_regs = AARCH64_V0_REGNUM + i;
4528 }
4529
4530 /* Add the SVE registers. */
4531 if (feature_sve != nullptr)
4532 {
4533 /* Validate the description provides the mandatory SVE registers
4534 and allocate their numbers. */
4535 for (i = 0; i < ARRAY_SIZE (aarch64_sve_register_names); i++)
4536 valid_p &= tdesc_numbered_register (feature_sve, tdesc_data.get (),
4537 AARCH64_SVE_Z0_REGNUM + i,
4538 aarch64_sve_register_names[i]);
4539
4540 num_regs = AARCH64_SVE_Z0_REGNUM + i;
4541 num_pseudo_regs += 32; /* add the Vn register pseudos. */
4542 }
4543
4544 if (feature_fpu != nullptr || feature_sve != nullptr)
4545 {
4546 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
4547 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
4548 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
4549 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
4550 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
4551 }
4552
4553 int first_sme_regnum = -1;
4554 int first_sme2_regnum = -1;
4555 int first_sme_pseudo_regnum = -1;
4556 const struct tdesc_feature *feature_sme
4557 = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sme");
4558 if (feature_sme != nullptr)
4559 {
4560 /* Record the first SME register. */
4561 first_sme_regnum = num_regs;
4562
4563 valid_p &= tdesc_numbered_register (feature_sme, tdesc_data.get (),
4564 num_regs++, "svg");
4565
4566 valid_p &= tdesc_numbered_register (feature_sme, tdesc_data.get (),
4567 num_regs++, "svcr");
4568
4569 valid_p &= tdesc_numbered_register (feature_sme, tdesc_data.get (),
4570 num_regs++, "za");
4571
4572 /* Record the first SME pseudo register. */
4573 first_sme_pseudo_regnum = num_pseudo_regs;
4574
4575 /* Add the ZA tile slice pseudo registers. The number of tile slice
4576 pseudo-registers depend on the svl, and is always a multiple of 5. */
4577 num_pseudo_regs += (svq << 5) * 5;
4578
4579 /* Add the ZA tile pseudo registers. */
4580 num_pseudo_regs += AARCH64_ZA_TILES_NUM;
4581
4582 /* Now check for the SME2 feature. SME2 is only available if SME is
4583 available. */
4584 const struct tdesc_feature *feature_sme2
4585 = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sme2");
4586 if (feature_sme2 != nullptr)
4587 {
4588 /* Record the first SME2 register. */
4589 first_sme2_regnum = num_regs;
4590
4591 valid_p &= tdesc_numbered_register (feature_sme2, tdesc_data.get (),
4592 num_regs++, "zt0");
4593 }
4594 }
4595
4596 /* Add the TLS register. */
4597 int tls_register_count = 0;
4598 if (feature_tls != nullptr)
4599 {
4600 first_tls_regnum = num_regs;
4601
4602 /* Look for the TLS registers. tpidr is required, but tpidr2 is
4603 optional. */
4604 valid_p
4605 = tdesc_numbered_register (feature_tls, tdesc_data.get (),
4606 first_tls_regnum, "tpidr");
4607
4608 if (valid_p)
4609 {
4610 tls_register_count++;
4611
4612 bool has_tpidr2
4613 = tdesc_numbered_register (feature_tls, tdesc_data.get (),
4614 first_tls_regnum + tls_register_count,
4615 "tpidr2");
4616
4617 /* Figure out how many TLS registers we have. */
4618 if (has_tpidr2)
4619 tls_register_count++;
4620
4621 num_regs += tls_register_count;
4622 }
4623 else
4624 {
4625 warning (_("Provided TLS register feature doesn't contain "
4626 "required tpidr register."));
4627 return nullptr;
4628 }
4629 }
4630
4631 /* We have two versions of the pauth target description due to a past bug
4632 where GDB would crash when seeing the first version of the pauth target
4633 description. */
4634 feature_pauth = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.pauth");
4635 if (feature_pauth == nullptr)
4636 feature_pauth = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.pauth_v2");
4637
4638 /* Add the pauth registers. */
4639 int pauth_masks = 0;
4640 if (feature_pauth != NULL)
4641 {
4642 first_pauth_regnum = num_regs;
4643 ra_sign_state_offset = num_pseudo_regs;
4644
4645 /* Size of the expected register set with all 4 masks. */
4646 int set_size = ARRAY_SIZE (aarch64_pauth_register_names);
4647
4648 /* QEMU exposes a couple additional masks for the high half of the
4649 address. We should either have 2 registers or 4 registers. */
4650 if (tdesc_unnumbered_register (feature_pauth,
4651 "pauth_dmask_high") == 0)
4652 {
4653 /* We did not find pauth_dmask_high, assume we only have
4654 2 masks. We are not dealing with QEMU/Emulators then. */
4655 set_size -= 2;
4656 }
4657
4658 /* Validate the descriptor provides the mandatory PAUTH registers and
4659 allocate their numbers. */
4660 for (i = 0; i < set_size; i++)
4661 valid_p &= tdesc_numbered_register (feature_pauth, tdesc_data.get (),
4662 first_pauth_regnum + i,
4663 aarch64_pauth_register_names[i]);
4664
4665 num_regs += i;
4666 num_pseudo_regs += 1; /* Count RA_STATE pseudo register. */
4667 pauth_masks = set_size;
4668 }
4669
4670 /* Add the MTE registers. */
4671 if (feature_mte != NULL)
4672 {
4673 first_mte_regnum = num_regs;
4674 /* Validate the descriptor provides the mandatory MTE registers and
4675 allocate their numbers. */
4676 for (i = 0; i < ARRAY_SIZE (aarch64_mte_register_names); i++)
4677 valid_p &= tdesc_numbered_register (feature_mte, tdesc_data.get (),
4678 first_mte_regnum + i,
4679 aarch64_mte_register_names[i]);
4680
4681 num_regs += i;
4682 }
4683 /* W pseudo-registers */
4684 int first_w_regnum = num_pseudo_regs;
4685 num_pseudo_regs += 31;
4686
4687 const tdesc_feature *feature_gcs
4688 = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.gcs");
4689 int first_gcs_regnum = -1;
4690 /* Add the GCS registers. */
4691 if (feature_gcs != nullptr)
4692 {
4693 first_gcs_regnum = num_regs;
4694 /* Validate the descriptor provides the mandatory GCS registers and
4695 allocate their numbers. */
4696 for (i = 0; i < ARRAY_SIZE (aarch64_gcs_register_names); i++)
4697 valid_p &= tdesc_numbered_register (feature_gcs, tdesc_data.get (),
4698 first_gcs_regnum + i,
4699 aarch64_gcs_register_names[i]);
4700
4701 num_regs += i;
4702 }
4703
4704 if (!valid_p)
4705 return nullptr;
4706
4707 const tdesc_feature *feature_gcs_linux
4708 = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.gcs.linux");
4709 int first_gcs_linux_regnum = -1;
4710 /* Add the GCS Linux registers. */
4711 if (feature_gcs_linux != nullptr && feature_gcs == nullptr)
4712 {
4713 /* This feature depends on the GCS feature. */
4714 return nullptr;
4715 }
4716 else if (feature_gcs_linux != nullptr)
4717 {
4718 first_gcs_linux_regnum = num_regs;
4719 /* Validate the descriptor provides the mandatory GCS Linux registers
4720 and allocate their numbers. */
4721 for (i = 0; i < ARRAY_SIZE (aarch64_gcs_linux_register_names); i++)
4722 valid_p &= tdesc_numbered_register (feature_gcs_linux, tdesc_data.get (),
4723 first_gcs_linux_regnum + i,
4724 aarch64_gcs_linux_register_names[i]);
4725
4726 num_regs += i;
4727 }
4728
4729 if (!valid_p)
4730 return nullptr;
4731
4732 /* AArch64 code is always little-endian. */
4733 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
4734
4735 gdbarch *gdbarch
4736 = gdbarch_alloc (&info, gdbarch_tdep_up (new aarch64_gdbarch_tdep));
4737 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
4738
4739 /* This should be low enough for everything. */
4740 tdep->lowest_pc = 0x20;
4741 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
4742 tdep->jb_elt_size = 8;
4743 tdep->vq = vq;
4744 tdep->pauth_reg_base = first_pauth_regnum;
4745 tdep->pauth_reg_count = pauth_masks;
4746 tdep->ra_sign_state_regnum = -1;
4747 tdep->mte_reg_base = first_mte_regnum;
4748 tdep->tls_regnum_base = first_tls_regnum;
4749 tdep->tls_register_count = tls_register_count;
4750 tdep->gcs_reg_base = first_gcs_regnum;
4751 tdep->gcs_linux_reg_base = first_gcs_linux_regnum;
4752
4753 /* Set the SME register set details. The pseudo-registers will be adjusted
4754 later. */
4755 tdep->sme_reg_base = first_sme_regnum;
4756 tdep->sme_svg_regnum = first_sme_regnum;
4757 tdep->sme_svcr_regnum = first_sme_regnum + 1;
4758 tdep->sme_za_regnum = first_sme_regnum + 2;
4759 tdep->sme_svq = svq;
4760
4761 /* Set the SME2 register set details. */
4762 tdep->sme2_zt0_regnum = first_sme2_regnum;
4763
4764 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
4765 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
4766
4767 /* Advance PC across function entry code. */
4768 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
4769
4770 /* The stack grows downward. */
4771 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
4772
4773 /* Breakpoint manipulation. */
4774 set_gdbarch_breakpoint_kind_from_pc (gdbarch,
4775 aarch64_breakpoint::kind_from_pc);
4776 set_gdbarch_sw_breakpoint_from_kind (gdbarch,
4777 aarch64_breakpoint::bp_from_kind);
4778 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
4779 set_gdbarch_get_next_pcs (gdbarch, aarch64_software_single_step);
4780
4781 /* Information about registers, etc. */
4782 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
4783 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
4784 set_gdbarch_num_regs (gdbarch, num_regs);
4785
4786 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
4787 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
4788 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
4789 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
4790 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
4791 set_tdesc_pseudo_register_reggroup_p (gdbarch,
4792 aarch64_pseudo_register_reggroup_p);
4793 set_gdbarch_cannot_store_register (gdbarch, aarch64_cannot_store_register);
4794
4795 /* Set the allocation tag granule size to 16 bytes. */
4796 set_gdbarch_memtag_granule_size (gdbarch, AARCH64_MTE_GRANULE_SIZE);
4797
4798 /* Register a hook for checking if there is a memory tag match. */
4799 set_gdbarch_memtag_matches_p (gdbarch, aarch64_memtag_matches_p);
4800
4801 /* Register a hook for setting the logical/allocation tags for
4802 a range of addresses. */
4803 set_gdbarch_set_memtags (gdbarch, aarch64_set_memtags);
4804
4805 /* Register a hook for extracting the logical/allocation tag from an
4806 address. */
4807 set_gdbarch_get_memtag (gdbarch, aarch64_get_memtag);
4808
4809 /* Register a hook for converting a memory tag to a string. */
4810 set_gdbarch_memtag_to_string (gdbarch, aarch64_memtag_to_string);
4811
4812 /* ABI */
4813 set_gdbarch_short_bit (gdbarch, 16);
4814 set_gdbarch_int_bit (gdbarch, 32);
4815 set_gdbarch_float_bit (gdbarch, 32);
4816 set_gdbarch_double_bit (gdbarch, 64);
4817 set_gdbarch_long_double_bit (gdbarch, 128);
4818 set_gdbarch_long_bit (gdbarch, 64);
4819 set_gdbarch_long_long_bit (gdbarch, 64);
4820 set_gdbarch_ptr_bit (gdbarch, 64);
4821 set_gdbarch_char_signed (gdbarch, 0);
4822 set_gdbarch_wchar_signed (gdbarch, 0);
4823 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
4824 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
4825 set_gdbarch_long_double_format (gdbarch, floatformats_ieee_quad);
4826 set_gdbarch_type_align (gdbarch, aarch64_type_align);
4827
4828 /* Detect whether PC is at a point where the stack has been destroyed. */
4829 set_gdbarch_stack_frame_destroyed_p (gdbarch, aarch64_stack_frame_destroyed_p);
4830
4831 /* Internal <-> external register number maps. */
4832 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
4833
4834 /* Returning results. */
4835 set_gdbarch_return_value_as_value (gdbarch, aarch64_return_value);
4836
4837 /* Disassembly. */
4838 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
4839
4840 /* Virtual tables. */
4841 set_gdbarch_vbit_in_delta (gdbarch, 1);
4842
4843 /* Hook in the ABI-specific overrides, if they have been registered. */
4844 info.target_desc = tdesc;
4845 info.tdesc_data = tdesc_data.get ();
4846 gdbarch_init_osabi (info, gdbarch);
4847
4848 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
4849 /* Register DWARF CFA vendor handler. */
4850 set_gdbarch_execute_dwarf_cfa_vendor_op (gdbarch,
4851 aarch64_execute_dwarf_cfa_vendor_op);
4852
4853 /* Permanent/Program breakpoint handling. */
4854 set_gdbarch_program_breakpoint_here_p (gdbarch,
4855 aarch64_program_breakpoint_here_p);
4856
4857 /* Add some default predicates. */
4858 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
4859 dwarf2_append_unwinders (gdbarch);
4860 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
4861
4862 frame_base_set_default (gdbarch, &aarch64_normal_base);
4863
4864 /* Now we have tuned the configuration, set a few final things,
4865 based on what the OS ABI has told us. */
4866
4867 if (tdep->jb_pc >= 0)
4868 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
4869
4870 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
4871
4872 set_gdbarch_get_pc_address_flags (gdbarch, aarch64_get_pc_address_flags);
4873
4874 if (tdep->has_gcs ())
4875 set_gdbarch_shadow_stack_push (gdbarch, aarch64_shadow_stack_push);
4876
4877 tdesc_use_registers (gdbarch, tdesc, std::move (tdesc_data));
4878
4879 /* Fetch the updated number of registers after we're done adding all
4880 entries from features we don't explicitly care about. This is the case
4881 for bare metal debugging stubs that include a lot of system registers. */
4882 num_regs = gdbarch_num_regs (gdbarch);
4883
4884 /* With the number of real registers updated, setup the pseudo-registers and
4885 record their numbers. */
4886
4887 /* Setup W pseudo-register numbers. */
4888 tdep->w_pseudo_base = first_w_regnum + num_regs;
4889 tdep->w_pseudo_count = 31;
4890
4891 /* Pointer authentication pseudo-registers. */
4892 if (tdep->has_pauth ())
4893 tdep->ra_sign_state_regnum = ra_sign_state_offset + num_regs;
4894
4895 /* Architecture hook to remove bits of a pointer that are not part of the
4896 address, like memory tags (MTE) and pointer authentication signatures.
4897 Configure address adjustment for watchpoints, breakpoints and memory
4898 transfer. */
4899 set_gdbarch_remove_non_address_bits_watchpoint
4900 (gdbarch, aarch64_remove_non_address_bits);
4901 set_gdbarch_remove_non_address_bits_breakpoint
4902 (gdbarch, aarch64_remove_non_address_bits);
4903 set_gdbarch_remove_non_address_bits_memory
4904 (gdbarch, aarch64_remove_non_address_bits);
4905
4906 /* SME pseudo-registers. */
4907 if (tdep->has_sme ())
4908 {
4909 tdep->sme_pseudo_base = num_regs + first_sme_pseudo_regnum;
4910 tdep->sme_tile_slice_pseudo_base = tdep->sme_pseudo_base;
4911 tdep->sme_tile_slice_pseudo_count = (svq * 32) * 5;
4912 tdep->sme_tile_pseudo_base
4913 = tdep->sme_pseudo_base + tdep->sme_tile_slice_pseudo_count;
4914 tdep->sme_pseudo_count
4915 = tdep->sme_tile_slice_pseudo_count + AARCH64_ZA_TILES_NUM;
4916
4917 /* The SME ZA pseudo-registers are a set of 160 to 2560 pseudo-registers
4918 depending on the value of svl.
4919
4920 The tile pseudo-registers are organized around their qualifiers
4921 (b, h, s, d and q). Their numbers are distributed as follows:
4922
4923 b 0
4924 h 1~2
4925 s 3~6
4926 d 7~14
4927 q 15~30
4928
4929 The naming of the tile pseudo-registers follows the pattern za<t><q>,
4930 where:
4931
4932 <t> is the tile number, with the following possible values based on
4933 the qualifiers:
4934
4935 Qualifier - Allocated indexes
4936
4937 b - 0
4938 h - 0~1
4939 s - 0~3
4940 d - 0~7
4941 q - 0~15
4942
4943 <q> is the qualifier: b, h, s, d and q.
4944
4945 The tile slice pseudo-registers are organized around their
4946 qualifiers as well (b, h, s, d and q), but also around their
4947 direction (h - horizontal and v - vertical).
4948
4949 Even-numbered tile slice pseudo-registers are horizontally-oriented
4950 and odd-numbered tile slice pseudo-registers are vertically-oriented.
4951
4952 Their numbers are distributed as follows:
4953
4954 Qualifier - Allocated indexes
4955
4956 b tile slices - 0~511
4957 h tile slices - 512~1023
4958 s tile slices - 1024~1535
4959 d tile slices - 1536~2047
4960 q tile slices - 2048~2559
4961
4962 The naming of the tile slice pseudo-registers follows the pattern
4963 za<t><d><q><s>, where:
4964
4965 <t> is the tile number as described for the tile pseudo-registers.
4966 <d> is the direction of the tile slice (h or v)
4967 <q> is the qualifier of the tile slice (b, h, s, d or q)
4968 <s> is the slice number, defined as follows:
4969
4970 Qualifier - Allocated indexes
4971
4972 b - 0~15
4973 h - 0~7
4974 s - 0~3
4975 d - 0~1
4976 q - 0
4977
4978 We have helper functions to translate to/from register index from/to
4979 the set of fields that make the pseudo-register names. */
4980
4981 /* Build the array of pseudo-register names available for this
4982 particular gdbarch configuration. */
4983 aarch64_initialize_sme_pseudo_names (gdbarch, tdep->sme_pseudo_names);
4984 }
4985
4986 /* Add standard register aliases. */
4987 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
4988 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
4989 value_of_aarch64_user_reg,
4990 &aarch64_register_aliases[i].regnum);
4991
4992 register_aarch64_ravenscar_ops (gdbarch);
4993
4994 return gdbarch;
4995 }
4996
4997 static void
4998 aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
4999 {
5000 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
5001
5002 if (tdep == NULL)
5003 return;
5004
5005 gdb_printf (file, _("aarch64_dump_tdep: Lowest pc = 0x%s\n"),
5006 paddress (gdbarch, tdep->lowest_pc));
5007
5008 /* SME fields. */
5009 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_type_q = %s\n"),
5010 host_address_to_string (tdep->sme_tile_type_q));
5011 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_type_d = %s\n"),
5012 host_address_to_string (tdep->sme_tile_type_d));
5013 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_type_s = %s\n"),
5014 host_address_to_string (tdep->sme_tile_type_s));
5015 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_type_h = %s\n"),
5016 host_address_to_string (tdep->sme_tile_type_h));
5017 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_type_n = %s\n"),
5018 host_address_to_string (tdep->sme_tile_type_b));
5019 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_slice_type_q = %s\n"),
5020 host_address_to_string (tdep->sme_tile_slice_type_q));
5021 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_slice_type_d = %s\n"),
5022 host_address_to_string (tdep->sme_tile_slice_type_d));
5023 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_slice_type_s = %s\n"),
5024 host_address_to_string (tdep->sme_tile_slice_type_s));
5025 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_slice_type_h = %s\n"),
5026 host_address_to_string (tdep->sme_tile_slice_type_h));
5027 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_slice_type_b = %s\n"),
5028 host_address_to_string (tdep->sme_tile_slice_type_b));
5029 gdb_printf (file, _("aarch64_dump_tdep: sme_reg_base = %s\n"),
5030 pulongest (tdep->sme_reg_base));
5031 gdb_printf (file, _("aarch64_dump_tdep: sme_svg_regnum = %s\n"),
5032 pulongest (tdep->sme_svg_regnum));
5033 gdb_printf (file, _("aarch64_dump_tdep: sme_svcr_regnum = %s\n"),
5034 pulongest (tdep->sme_svcr_regnum));
5035 gdb_printf (file, _("aarch64_dump_tdep: sme_za_regnum = %s\n"),
5036 pulongest (tdep->sme_za_regnum));
5037 gdb_printf (file, _("aarch64_dump_tdep: sme_pseudo_base = %s\n"),
5038 pulongest (tdep->sme_pseudo_base));
5039 gdb_printf (file, _("aarch64_dump_tdep: sme_pseudo_count = %s\n"),
5040 pulongest (tdep->sme_pseudo_count));
5041 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_slice_pseudo_base = %s\n"),
5042 pulongest (tdep->sme_tile_slice_pseudo_base));
5043 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_slice_pseudo_count = %s\n"),
5044 pulongest (tdep->sme_tile_slice_pseudo_count));
5045 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_pseudo_base = %s\n"),
5046 pulongest (tdep->sme_tile_pseudo_base));
5047 gdb_printf (file, _("aarch64_dump_tdep: sme_svq = %s\n"),
5048 pulongest (tdep->sme_svq));
5049
5050 gdb_printf (file, _("aarch64_dump_tdep: gcs_reg_base = %d\n"),
5051 tdep->gcs_reg_base);
5052 gdb_printf (file, _("aarch64_dump_tdep: gcs_linux_reg_base = %d\n"),
5053 tdep->gcs_linux_reg_base);
5054 }
5055
5056 #if GDB_SELF_TEST
5057 namespace selftests
5058 {
5059 static void aarch64_process_record_test (void);
5060 }
5061 #endif
5062
5063 INIT_GDB_FILE (aarch64_tdep)
5064 {
5065 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
5066 aarch64_dump_tdep);
5067
5068 /* Debug this file's internals. */
5069 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
5070 Set AArch64 debugging."), _("\
5071 Show AArch64 debugging."), _("\
5072 When on, AArch64 specific debugging is enabled."),
5073 NULL,
5074 show_aarch64_debug,
5075 &setdebuglist, &showdebuglist);
5076
5077 #if GDB_SELF_TEST
5078 selftests::register_test ("aarch64-analyze-prologue",
5079 selftests::aarch64_analyze_prologue_test);
5080 selftests::register_test ("aarch64-process-record",
5081 selftests::aarch64_process_record_test);
5082 #endif
5083 }
5084
5085 /* AArch64 process record-replay related structures, defines etc. */
5086
5087 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
5088 do \
5089 { \
5090 unsigned int reg_len = LENGTH; \
5091 if (reg_len) \
5092 { \
5093 REGS = XNEWVEC (uint32_t, reg_len); \
5094 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
5095 } \
5096 } \
5097 while (0)
5098
5099 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
5100 do \
5101 { \
5102 unsigned int mem_len = LENGTH; \
5103 if (mem_len) \
5104 { \
5105 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
5106 memcpy(MEMS, &RECORD_BUF[0], \
5107 sizeof(struct aarch64_mem_r) * LENGTH); \
5108 } \
5109 } \
5110 while (0)
5111
5112 /* AArch64 record/replay structures and enumerations. */
5113
5114 struct aarch64_mem_r
5115 {
5116 uint64_t len; /* Record length. */
5117 uint64_t addr; /* Memory address. */
5118 };
5119
5120 enum aarch64_record_result
5121 {
5122 AARCH64_RECORD_SUCCESS,
5123 AARCH64_RECORD_UNSUPPORTED,
5124 AARCH64_RECORD_UNKNOWN
5125 };
5126
5127 struct aarch64_insn_decode_record
5128 {
5129 struct gdbarch *gdbarch;
5130 struct regcache *regcache;
5131 CORE_ADDR this_addr; /* Address of insn to be recorded. */
5132 uint32_t aarch64_insn; /* Insn to be recorded. */
5133 uint32_t mem_rec_count; /* Count of memory records. */
5134 uint32_t reg_rec_count; /* Count of register records. */
5135 uint32_t *aarch64_regs; /* Registers to be recorded. */
5136 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
5137 };
5138
5139 /* Record handler for data processing - register instructions. */
5140
5141 static unsigned int
5142 aarch64_record_data_proc_reg (aarch64_insn_decode_record *aarch64_insn_r)
5143 {
5144 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
5145 uint32_t record_buf[4];
5146
5147 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
5148 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
5149 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
5150
5151 if (!bit (aarch64_insn_r->aarch64_insn, 28))
5152 {
5153 uint8_t setflags;
5154
5155 /* Logical (shifted register). */
5156 if (insn_bits24_27 == 0x0a)
5157 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
5158 /* Add/subtract. */
5159 else if (insn_bits24_27 == 0x0b)
5160 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
5161 else
5162 return AARCH64_RECORD_UNKNOWN;
5163
5164 record_buf[0] = reg_rd;
5165 aarch64_insn_r->reg_rec_count = 1;
5166 if (setflags)
5167 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
5168 }
5169 else
5170 {
5171 if (insn_bits24_27 == 0x0b)
5172 {
5173 /* Data-processing (3 source). */
5174 record_buf[0] = reg_rd;
5175 aarch64_insn_r->reg_rec_count = 1;
5176 }
5177 else if (insn_bits24_27 == 0x0a)
5178 {
5179 if (insn_bits21_23 == 0x00)
5180 {
5181 /* Add/subtract (with carry). */
5182 record_buf[0] = reg_rd;
5183 aarch64_insn_r->reg_rec_count = 1;
5184 if (bit (aarch64_insn_r->aarch64_insn, 29))
5185 {
5186 record_buf[1] = AARCH64_CPSR_REGNUM;
5187 aarch64_insn_r->reg_rec_count = 2;
5188 }
5189 }
5190 else if (insn_bits21_23 == 0x02)
5191 {
5192 /* Conditional compare (register) and conditional compare
5193 (immediate) instructions. */
5194 record_buf[0] = AARCH64_CPSR_REGNUM;
5195 aarch64_insn_r->reg_rec_count = 1;
5196 }
5197 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
5198 {
5199 /* Conditional select. */
5200 /* Data-processing (2 source). */
5201 /* Data-processing (1 source). */
5202 record_buf[0] = reg_rd;
5203 aarch64_insn_r->reg_rec_count = 1;
5204 }
5205 else
5206 return AARCH64_RECORD_UNKNOWN;
5207 }
5208 }
5209
5210 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
5211 record_buf);
5212 return AARCH64_RECORD_SUCCESS;
5213 }
5214
5215 /* Record handler for data processing - immediate instructions. */
5216
5217 static unsigned int
5218 aarch64_record_data_proc_imm (aarch64_insn_decode_record *aarch64_insn_r)
5219 {
5220 uint8_t reg_rd, insn_bit23, insn_bits24_27, setflags;
5221 uint32_t record_buf[4];
5222
5223 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
5224 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
5225 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
5226
5227 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
5228 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
5229 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
5230 {
5231 record_buf[0] = reg_rd;
5232 aarch64_insn_r->reg_rec_count = 1;
5233 }
5234 else if (insn_bits24_27 == 0x01)
5235 {
5236 /* Add/Subtract (immediate). */
5237 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
5238 record_buf[0] = reg_rd;
5239 aarch64_insn_r->reg_rec_count = 1;
5240 if (setflags)
5241 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
5242 }
5243 else if (insn_bits24_27 == 0x02 && !insn_bit23)
5244 {
5245 /* Logical (immediate). */
5246 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
5247 record_buf[0] = reg_rd;
5248 aarch64_insn_r->reg_rec_count = 1;
5249 if (setflags)
5250 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
5251 }
5252 else
5253 return AARCH64_RECORD_UNKNOWN;
5254
5255 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
5256 record_buf);
5257 return AARCH64_RECORD_SUCCESS;
5258 }
5259
5260 /* Record handler for branch, exception generation and system instructions. */
5261
5262 static unsigned int
5263 aarch64_record_branch_except_sys (aarch64_insn_decode_record *aarch64_insn_r)
5264 {
5265
5266 aarch64_gdbarch_tdep *tdep
5267 = gdbarch_tdep<aarch64_gdbarch_tdep> (aarch64_insn_r->gdbarch);
5268 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
5269 uint32_t record_buf[4];
5270
5271 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
5272 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
5273 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
5274
5275 if (insn_bits28_31 == 0x0d)
5276 {
5277 /* Exception generation instructions. */
5278 if (insn_bits24_27 == 0x04)
5279 {
5280 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
5281 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
5282 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
5283 {
5284 ULONGEST svc_number;
5285
5286 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
5287 &svc_number);
5288 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
5289 svc_number);
5290 }
5291 else
5292 return AARCH64_RECORD_UNSUPPORTED;
5293 }
5294 /* System instructions. */
5295 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
5296 {
5297 uint32_t reg_rt, reg_crn;
5298
5299 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
5300 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
5301
5302 /* Record rt in case of sysl and mrs instructions. */
5303 if (bit (aarch64_insn_r->aarch64_insn, 21))
5304 {
5305 record_buf[0] = reg_rt;
5306 aarch64_insn_r->reg_rec_count = 1;
5307 }
5308 /* Record cpsr for hint and msr(immediate) instructions. */
5309 else if (reg_crn == 0x02 || reg_crn == 0x04)
5310 {
5311 record_buf[0] = AARCH64_CPSR_REGNUM;
5312 aarch64_insn_r->reg_rec_count = 1;
5313 }
5314 }
5315 /* Unconditional branch (register). */
5316 else if((insn_bits24_27 & 0x0e) == 0x06)
5317 {
5318 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
5319 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
5320 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
5321 }
5322 else
5323 return AARCH64_RECORD_UNKNOWN;
5324 }
5325 /* Unconditional branch (immediate). */
5326 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
5327 {
5328 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
5329 if (bit (aarch64_insn_r->aarch64_insn, 31))
5330 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
5331 }
5332 else
5333 /* Compare & branch (immediate), Test & branch (immediate) and
5334 Conditional branch (immediate). */
5335 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
5336
5337 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
5338 record_buf);
5339 return AARCH64_RECORD_SUCCESS;
5340 }
5341
5342 /* Record handler for advanced SIMD load and store instructions. */
5343
5344 static unsigned int
5345 aarch64_record_asimd_load_store (aarch64_insn_decode_record *aarch64_insn_r)
5346 {
5347 CORE_ADDR address;
5348 uint64_t addr_offset = 0;
5349 uint32_t record_buf[24];
5350 std::vector<uint64_t> record_buf_mem;
5351 uint32_t reg_rn, reg_rt;
5352 uint32_t reg_index = 0;
5353 uint8_t opcode_bits, size_bits;
5354
5355 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
5356 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
5357 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
5358 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
5359 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
5360
5361 if (record_debug)
5362 debug_printf ("Process record: Advanced SIMD load/store\n");
5363
5364 /* Load/store single structure. */
5365 if (bit (aarch64_insn_r->aarch64_insn, 24))
5366 {
5367 uint8_t sindex, scale, selem, esize, replicate = 0;
5368 scale = opcode_bits >> 2;
5369 selem = ((opcode_bits & 0x02) |
5370 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
5371 switch (scale)
5372 {
5373 case 1:
5374 if (size_bits & 0x01)
5375 return AARCH64_RECORD_UNKNOWN;
5376 break;
5377 case 2:
5378 if ((size_bits >> 1) & 0x01)
5379 return AARCH64_RECORD_UNKNOWN;
5380 if (size_bits & 0x01)
5381 {
5382 if (!((opcode_bits >> 1) & 0x01))
5383 scale = 3;
5384 else
5385 return AARCH64_RECORD_UNKNOWN;
5386 }
5387 break;
5388 case 3:
5389 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
5390 {
5391 scale = size_bits;
5392 replicate = 1;
5393 break;
5394 }
5395 else
5396 return AARCH64_RECORD_UNKNOWN;
5397 default:
5398 break;
5399 }
5400 esize = 8 << scale;
5401 if (replicate)
5402 for (sindex = 0; sindex < selem; sindex++)
5403 {
5404 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
5405 reg_rt = (reg_rt + 1) % 32;
5406 }
5407 else
5408 {
5409 for (sindex = 0; sindex < selem; sindex++)
5410 {
5411 if (bit (aarch64_insn_r->aarch64_insn, 22))
5412 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
5413 else
5414 {
5415 record_buf_mem.push_back (esize / 8);
5416 record_buf_mem.push_back (address + addr_offset);
5417 }
5418 addr_offset = addr_offset + (esize / 8);
5419 reg_rt = (reg_rt + 1) % 32;
5420 }
5421 }
5422 }
5423 /* Load/store multiple structure. */
5424 else
5425 {
5426 uint8_t selem, esize, rpt, elements;
5427 uint8_t eindex, rindex;
5428
5429 esize = 8 << size_bits;
5430 if (bit (aarch64_insn_r->aarch64_insn, 30))
5431 elements = 128 / esize;
5432 else
5433 elements = 64 / esize;
5434
5435 switch (opcode_bits)
5436 {
5437 /*LD/ST4 (4 Registers). */
5438 case 0:
5439 rpt = 1;
5440 selem = 4;
5441 break;
5442 /*LD/ST1 (4 Registers). */
5443 case 2:
5444 rpt = 4;
5445 selem = 1;
5446 break;
5447 /*LD/ST3 (3 Registers). */
5448 case 4:
5449 rpt = 1;
5450 selem = 3;
5451 break;
5452 /*LD/ST1 (3 Registers). */
5453 case 6:
5454 rpt = 3;
5455 selem = 1;
5456 break;
5457 /*LD/ST1 (1 Register). */
5458 case 7:
5459 rpt = 1;
5460 selem = 1;
5461 break;
5462 /*LD/ST2 (2 Registers). */
5463 case 8:
5464 rpt = 1;
5465 selem = 2;
5466 break;
5467 /*LD/ST1 (2 Registers). */
5468 case 10:
5469 rpt = 2;
5470 selem = 1;
5471 break;
5472 default:
5473 return AARCH64_RECORD_UNSUPPORTED;
5474 break;
5475 }
5476 for (rindex = 0; rindex < rpt; rindex++)
5477 for (eindex = 0; eindex < elements; eindex++)
5478 {
5479 uint8_t reg_tt, sindex;
5480 reg_tt = (reg_rt + rindex) % 32;
5481 for (sindex = 0; sindex < selem; sindex++)
5482 {
5483 if (bit (aarch64_insn_r->aarch64_insn, 22))
5484 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
5485 else
5486 {
5487 record_buf_mem.push_back (esize / 8);
5488 record_buf_mem.push_back (address + addr_offset);
5489 }
5490 addr_offset = addr_offset + (esize / 8);
5491 reg_tt = (reg_tt + 1) % 32;
5492 }
5493 }
5494 }
5495
5496 if (bit (aarch64_insn_r->aarch64_insn, 23))
5497 record_buf[reg_index++] = reg_rn;
5498
5499 aarch64_insn_r->reg_rec_count = reg_index;
5500 aarch64_insn_r->mem_rec_count = record_buf_mem.size () / 2;
5501 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
5502 record_buf_mem.data ());
5503 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
5504 record_buf);
5505 return AARCH64_RECORD_SUCCESS;
5506 }
5507
5508 /* Record handler for Memory Copy and Memory Set instructions. */
5509
5510 static unsigned int
5511 aarch64_record_memcopy_memset (aarch64_insn_decode_record *aarch64_insn_r)
5512 {
5513 if (record_debug)
5514 debug_printf ("Process record: memory copy and memory set\n");
5515
5516 uint8_t op1 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
5517 uint8_t op2 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
5518 uint32_t reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
5519 uint32_t reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
5520 uint32_t record_buf[3];
5521 uint64_t record_buf_mem[4];
5522
5523 if (op1 == 3 && op2 > 11)
5524 /* Unallocated instructions. */
5525 return AARCH64_RECORD_UNKNOWN;
5526
5527 /* Set instructions have two registers and one memory region to be
5528 recorded. */
5529 record_buf[0] = reg_rd;
5530 record_buf[1] = reg_rn;
5531 aarch64_insn_r->reg_rec_count = 2;
5532
5533 ULONGEST dest_addr;
5534 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rd, &dest_addr);
5535
5536 LONGEST length;
5537 regcache_raw_read_signed (aarch64_insn_r->regcache, reg_rn, &length);
5538
5539 /* In one of the algorithm options a processor can implement, the length
5540 in Rn has an inverted sign. */
5541 if (length < 0)
5542 length *= -1;
5543
5544 record_buf_mem[0] = length;
5545 record_buf_mem[1] = dest_addr;
5546 aarch64_insn_r->mem_rec_count = 1;
5547
5548 if (op1 != 3)
5549 {
5550 /* Copy instructions have an additional register and an additional
5551 memory region to be recorded. */
5552 uint32_t reg_rs = bits (aarch64_insn_r->aarch64_insn, 16, 20);
5553
5554 record_buf[2] = reg_rs;
5555 aarch64_insn_r->reg_rec_count++;
5556
5557 ULONGEST source_addr;
5558 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rs,
5559 &source_addr);
5560
5561 record_buf_mem[2] = length;
5562 record_buf_mem[3] = source_addr;
5563 aarch64_insn_r->mem_rec_count++;
5564 }
5565
5566 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
5567 record_buf_mem);
5568 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
5569 record_buf);
5570 return AARCH64_RECORD_SUCCESS;
5571 }
5572
5573 /* Record handler for load and store instructions. */
5574
5575 static unsigned int
5576 aarch64_record_load_store (aarch64_insn_decode_record *aarch64_insn_r)
5577 {
5578 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
5579 uint8_t insn_bit23, insn_bit21;
5580 uint8_t opc, size_bits, ld_flag, vector_flag;
5581 uint32_t reg_rn, reg_rt, reg_rt2;
5582 uint64_t datasize, offset;
5583 uint32_t record_buf[8];
5584 uint64_t record_buf_mem[8];
5585 CORE_ADDR address;
5586
5587 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
5588 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
5589 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
5590 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
5591 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
5592 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
5593 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
5594 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
5595 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
5596 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
5597 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
5598
5599 /* Load/store exclusive. */
5600 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
5601 {
5602 if (record_debug)
5603 debug_printf ("Process record: load/store exclusive\n");
5604
5605 if (ld_flag)
5606 {
5607 record_buf[0] = reg_rt;
5608 aarch64_insn_r->reg_rec_count = 1;
5609 if (insn_bit21)
5610 {
5611 record_buf[1] = reg_rt2;
5612 aarch64_insn_r->reg_rec_count = 2;
5613 }
5614 }
5615 else
5616 {
5617 if (insn_bit21)
5618 datasize = (8 << size_bits) * 2;
5619 else
5620 datasize = (8 << size_bits);
5621 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
5622 &address);
5623 record_buf_mem[0] = datasize / 8;
5624 record_buf_mem[1] = address;
5625 aarch64_insn_r->mem_rec_count = 1;
5626 if (!insn_bit23)
5627 {
5628 /* Save register rs. */
5629 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
5630 aarch64_insn_r->reg_rec_count = 1;
5631 }
5632 }
5633 }
5634 /* Load register (literal) instructions decoding. */
5635 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
5636 {
5637 if (record_debug)
5638 debug_printf ("Process record: load register (literal)\n");
5639 if (vector_flag)
5640 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
5641 else
5642 record_buf[0] = reg_rt;
5643 aarch64_insn_r->reg_rec_count = 1;
5644 }
5645 /* All types of load/store pair instructions decoding. */
5646 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
5647 {
5648 if (record_debug)
5649 debug_printf ("Process record: load/store pair\n");
5650
5651 if (ld_flag)
5652 {
5653 if (vector_flag)
5654 {
5655 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
5656 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
5657 }
5658 else
5659 {
5660 record_buf[0] = reg_rt;
5661 record_buf[1] = reg_rt2;
5662 }
5663 aarch64_insn_r->reg_rec_count = 2;
5664 }
5665 else
5666 {
5667 uint16_t imm7_off;
5668 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
5669 if (!vector_flag)
5670 size_bits = size_bits >> 1;
5671 datasize = 8 << (2 + size_bits);
5672 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
5673 offset = offset << (2 + size_bits);
5674 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
5675 &address);
5676 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
5677 {
5678 if (imm7_off & 0x40)
5679 address = address - offset;
5680 else
5681 address = address + offset;
5682 }
5683
5684 record_buf_mem[0] = datasize / 8;
5685 record_buf_mem[1] = address;
5686 record_buf_mem[2] = datasize / 8;
5687 record_buf_mem[3] = address + (datasize / 8);
5688 aarch64_insn_r->mem_rec_count = 2;
5689 }
5690 if (bit (aarch64_insn_r->aarch64_insn, 23))
5691 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
5692 }
5693 /* Load/store register (unsigned immediate) instructions. */
5694 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
5695 {
5696 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
5697 if (!(opc >> 1))
5698 {
5699 if (opc & 0x01)
5700 ld_flag = 0x01;
5701 else
5702 ld_flag = 0x0;
5703 }
5704 else
5705 {
5706 if (size_bits == 0x3 && vector_flag == 0x0 && opc == 0x2)
5707 {
5708 /* PRFM (immediate) */
5709 return AARCH64_RECORD_SUCCESS;
5710 }
5711 else if (size_bits == 0x2 && vector_flag == 0x0 && opc == 0x2)
5712 {
5713 /* LDRSW (immediate) */
5714 ld_flag = 0x1;
5715 }
5716 else
5717 {
5718 if (opc & 0x01)
5719 ld_flag = 0x01;
5720 else
5721 ld_flag = 0x0;
5722 }
5723 }
5724
5725 if (record_debug)
5726 {
5727 debug_printf ("Process record: load/store (unsigned immediate):"
5728 " size %x V %d opc %x\n", size_bits, vector_flag,
5729 opc);
5730 }
5731
5732 if (!ld_flag)
5733 {
5734 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
5735 datasize = 8 << size_bits;
5736 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
5737 &address);
5738 offset = offset << size_bits;
5739 address = address + offset;
5740
5741 record_buf_mem[0] = datasize >> 3;
5742 record_buf_mem[1] = address;
5743 aarch64_insn_r->mem_rec_count = 1;
5744 }
5745 else
5746 {
5747 if (vector_flag)
5748 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
5749 else
5750 record_buf[0] = reg_rt;
5751 aarch64_insn_r->reg_rec_count = 1;
5752 }
5753 }
5754 /* Load/store register (register offset) instructions. */
5755 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
5756 && insn_bits10_11 == 0x02 && insn_bit21)
5757 {
5758 if (record_debug)
5759 debug_printf ("Process record: load/store (register offset)\n");
5760 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
5761 if (!(opc >> 1))
5762 if (opc & 0x01)
5763 ld_flag = 0x01;
5764 else
5765 ld_flag = 0x0;
5766 else
5767 if (size_bits != 0x03)
5768 ld_flag = 0x01;
5769 else
5770 return AARCH64_RECORD_UNKNOWN;
5771
5772 if (!ld_flag)
5773 {
5774 ULONGEST reg_rm_val;
5775
5776 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
5777 bits (aarch64_insn_r->aarch64_insn, 16, 20), &reg_rm_val);
5778 if (bit (aarch64_insn_r->aarch64_insn, 12))
5779 offset = reg_rm_val << size_bits;
5780 else
5781 offset = reg_rm_val;
5782 datasize = 8 << size_bits;
5783 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
5784 &address);
5785 address = address + offset;
5786 record_buf_mem[0] = datasize >> 3;
5787 record_buf_mem[1] = address;
5788 aarch64_insn_r->mem_rec_count = 1;
5789 }
5790 else
5791 {
5792 if (vector_flag)
5793 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
5794 else
5795 record_buf[0] = reg_rt;
5796 aarch64_insn_r->reg_rec_count = 1;
5797 }
5798 }
5799 /* Load/store register (immediate and unprivileged) instructions. */
5800 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
5801 && !insn_bit21)
5802 {
5803 if (record_debug)
5804 {
5805 debug_printf ("Process record: load/store "
5806 "(immediate and unprivileged)\n");
5807 }
5808 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
5809 if (!(opc >> 1))
5810 if (opc & 0x01)
5811 ld_flag = 0x01;
5812 else
5813 ld_flag = 0x0;
5814 else
5815 if (size_bits != 0x03)
5816 ld_flag = 0x01;
5817 else
5818 return AARCH64_RECORD_UNKNOWN;
5819
5820 if (!ld_flag)
5821 {
5822 uint16_t imm9_off;
5823 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
5824 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
5825 datasize = 8 << size_bits;
5826 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
5827 &address);
5828 if (insn_bits10_11 != 0x01)
5829 {
5830 if (imm9_off & 0x0100)
5831 address = address - offset;
5832 else
5833 address = address + offset;
5834 }
5835 record_buf_mem[0] = datasize >> 3;
5836 record_buf_mem[1] = address;
5837 aarch64_insn_r->mem_rec_count = 1;
5838 }
5839 else
5840 {
5841 if (vector_flag)
5842 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
5843 else
5844 record_buf[0] = reg_rt;
5845 aarch64_insn_r->reg_rec_count = 1;
5846 }
5847 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
5848 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
5849 }
5850 /* Memory Copy and Memory Set instructions. */
5851 else if ((insn_bits24_27 & 1) == 1 && insn_bits28_29 == 1
5852 && insn_bits10_11 == 1 && !insn_bit21)
5853 return aarch64_record_memcopy_memset (aarch64_insn_r);
5854 /* Advanced SIMD load/store instructions. */
5855 else
5856 return aarch64_record_asimd_load_store (aarch64_insn_r);
5857
5858 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
5859 record_buf_mem);
5860 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
5861 record_buf);
5862 return AARCH64_RECORD_SUCCESS;
5863 }
5864
5865 /* Record handler for data processing SIMD and floating point instructions. */
5866
5867 static unsigned int
5868 aarch64_record_data_proc_simd_fp (aarch64_insn_decode_record *aarch64_insn_r)
5869 {
5870 uint8_t insn_bit21, opcode, rmode, reg_rd;
5871 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
5872 uint8_t insn_bits11_14;
5873 uint32_t record_buf[2];
5874
5875 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
5876 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
5877 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
5878 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
5879 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
5880 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
5881 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
5882 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
5883 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
5884
5885 if (record_debug)
5886 debug_printf ("Process record: data processing SIMD/FP: ");
5887
5888 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
5889 {
5890 /* Floating point - fixed point conversion instructions. */
5891 if (!insn_bit21)
5892 {
5893 if (record_debug)
5894 debug_printf ("FP - fixed point conversion");
5895
5896 if ((opcode >> 1) == 0x0 && rmode == 0x03)
5897 record_buf[0] = reg_rd;
5898 else
5899 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
5900 }
5901 /* Floating point - conditional compare instructions. */
5902 else if (insn_bits10_11 == 0x01)
5903 {
5904 if (record_debug)
5905 debug_printf ("FP - conditional compare");
5906
5907 record_buf[0] = AARCH64_CPSR_REGNUM;
5908 }
5909 /* Floating point - data processing (2-source) and
5910 conditional select instructions. */
5911 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
5912 {
5913 if (record_debug)
5914 debug_printf ("FP - DP (2-source)");
5915
5916 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
5917 }
5918 else if (insn_bits10_11 == 0x00)
5919 {
5920 /* Floating point - immediate instructions. */
5921 if ((insn_bits12_15 & 0x01) == 0x01
5922 || (insn_bits12_15 & 0x07) == 0x04)
5923 {
5924 if (record_debug)
5925 debug_printf ("FP - immediate");
5926 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
5927 }
5928 /* Floating point - compare instructions. */
5929 else if ((insn_bits12_15 & 0x03) == 0x02)
5930 {
5931 if (record_debug)
5932 debug_printf ("FP - immediate");
5933 record_buf[0] = AARCH64_CPSR_REGNUM;
5934 }
5935 /* Floating point - integer conversions instructions. */
5936 else if (insn_bits12_15 == 0x00)
5937 {
5938 /* Convert float to integer instruction. */
5939 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
5940 {
5941 if (record_debug)
5942 debug_printf ("float to int conversion");
5943
5944 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
5945 }
5946 /* Convert integer to float instruction. */
5947 else if ((opcode >> 1) == 0x01 && !rmode)
5948 {
5949 if (record_debug)
5950 debug_printf ("int to float conversion");
5951
5952 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
5953 }
5954 /* Move float to integer instruction. */
5955 else if ((opcode >> 1) == 0x03)
5956 {
5957 if (record_debug)
5958 debug_printf ("move float to int");
5959
5960 if (!(opcode & 0x01))
5961 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
5962 else
5963 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
5964 }
5965 else
5966 return AARCH64_RECORD_UNKNOWN;
5967 }
5968 else
5969 return AARCH64_RECORD_UNKNOWN;
5970 }
5971 else
5972 return AARCH64_RECORD_UNKNOWN;
5973 }
5974 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
5975 {
5976 if (record_debug)
5977 debug_printf ("SIMD copy");
5978
5979 /* Advanced SIMD copy instructions. */
5980 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
5981 && !bit (aarch64_insn_r->aarch64_insn, 15)
5982 && bit (aarch64_insn_r->aarch64_insn, 10))
5983 {
5984 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
5985 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
5986 else
5987 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
5988 }
5989 else
5990 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
5991 }
5992 /* All remaining floating point or advanced SIMD instructions. */
5993 else
5994 {
5995 if (record_debug)
5996 debug_printf ("all remain");
5997
5998 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
5999 }
6000
6001 if (record_debug)
6002 debug_printf ("\n");
6003
6004 /* Record the V/X register. */
6005 aarch64_insn_r->reg_rec_count++;
6006
6007 /* Some of these instructions may set bits in the FPSR, so record it
6008 too. */
6009 record_buf[1] = AARCH64_FPSR_REGNUM;
6010 aarch64_insn_r->reg_rec_count++;
6011
6012 gdb_assert (aarch64_insn_r->reg_rec_count == 2);
6013 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
6014 record_buf);
6015 return AARCH64_RECORD_SUCCESS;
6016 }
6017
6018 /* Decodes insns type and invokes its record handler. */
6019
6020 static unsigned int
6021 aarch64_record_decode_insn_handler (aarch64_insn_decode_record *aarch64_insn_r)
6022 {
6023 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
6024
6025 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
6026 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
6027 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
6028 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
6029
6030 /* Data processing - immediate instructions. */
6031 if (!ins_bit26 && !ins_bit27 && ins_bit28)
6032 return aarch64_record_data_proc_imm (aarch64_insn_r);
6033
6034 /* Branch, exception generation and system instructions. */
6035 if (ins_bit26 && !ins_bit27 && ins_bit28)
6036 return aarch64_record_branch_except_sys (aarch64_insn_r);
6037
6038 /* Load and store instructions. */
6039 if (!ins_bit25 && ins_bit27)
6040 return aarch64_record_load_store (aarch64_insn_r);
6041
6042 /* Data processing - register instructions. */
6043 if (ins_bit25 && !ins_bit26 && ins_bit27)
6044 return aarch64_record_data_proc_reg (aarch64_insn_r);
6045
6046 /* Data processing - SIMD and floating point instructions. */
6047 if (ins_bit25 && ins_bit26 && ins_bit27)
6048 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
6049
6050 return AARCH64_RECORD_UNSUPPORTED;
6051 }
6052
6053 /* Cleans up local record registers and memory allocations. */
6054
6055 static void
6056 deallocate_reg_mem (aarch64_insn_decode_record *record)
6057 {
6058 xfree (record->aarch64_regs);
6059 xfree (record->aarch64_mems);
6060 }
6061
6062 #if GDB_SELF_TEST
6063 namespace selftests {
6064
6065 static void
6066 aarch64_process_record_test (void)
6067 {
6068 struct gdbarch_info info;
6069 uint32_t ret;
6070
6071 info.bfd_arch_info = bfd_scan_arch ("aarch64");
6072
6073 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
6074 SELF_CHECK (gdbarch != NULL);
6075
6076 aarch64_insn_decode_record aarch64_record;
6077
6078 memset (&aarch64_record, 0, sizeof (aarch64_insn_decode_record));
6079 aarch64_record.regcache = NULL;
6080 aarch64_record.this_addr = 0;
6081 aarch64_record.gdbarch = gdbarch;
6082
6083 /* 20 00 80 f9 prfm pldl1keep, [x1] */
6084 aarch64_record.aarch64_insn = 0xf9800020;
6085 ret = aarch64_record_decode_insn_handler (&aarch64_record);
6086 SELF_CHECK (ret == AARCH64_RECORD_SUCCESS);
6087 SELF_CHECK (aarch64_record.reg_rec_count == 0);
6088 SELF_CHECK (aarch64_record.mem_rec_count == 0);
6089
6090 deallocate_reg_mem (&aarch64_record);
6091 }
6092
6093 } /* namespace selftests */
6094 #endif /* GDB_SELF_TEST */
6095
6096 /* Parse the current instruction and record the values of the registers and
6097 memory that will be changed in current instruction to record_arch_list
6098 return -1 if something is wrong. */
6099
6100 int
6101 aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
6102 CORE_ADDR insn_addr)
6103 {
6104 uint32_t rec_no = 0;
6105 const uint8_t insn_size = 4;
6106 uint32_t ret = 0;
6107 gdb_byte buf[insn_size];
6108 aarch64_insn_decode_record aarch64_record;
6109
6110 memset (&buf[0], 0, insn_size);
6111 memset (&aarch64_record, 0, sizeof (aarch64_insn_decode_record));
6112 target_read_memory (insn_addr, &buf[0], insn_size);
6113 aarch64_record.aarch64_insn
6114 = (uint32_t) extract_unsigned_integer (&buf[0],
6115 insn_size,
6116 gdbarch_byte_order (gdbarch));
6117 aarch64_record.regcache = regcache;
6118 aarch64_record.this_addr = insn_addr;
6119 aarch64_record.gdbarch = gdbarch;
6120
6121 ret = aarch64_record_decode_insn_handler (&aarch64_record);
6122 if (ret == AARCH64_RECORD_UNSUPPORTED)
6123 {
6124 gdb_printf (gdb_stderr,
6125 _("Process record does not support instruction "
6126 "0x%0x at address %s.\n"),
6127 aarch64_record.aarch64_insn,
6128 paddress (gdbarch, insn_addr));
6129 ret = -1;
6130 }
6131
6132 if (0 == ret)
6133 {
6134 /* Record registers. */
6135 record_full_arch_list_add_reg (aarch64_record.regcache,
6136 AARCH64_PC_REGNUM);
6137 /* Always record register CPSR. */
6138 record_full_arch_list_add_reg (aarch64_record.regcache,
6139 AARCH64_CPSR_REGNUM);
6140 if (aarch64_record.aarch64_regs)
6141 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
6142 if (record_full_arch_list_add_reg (aarch64_record.regcache,
6143 aarch64_record.aarch64_regs[rec_no]))
6144 ret = -1;
6145
6146 /* Record memories. */
6147 if (aarch64_record.aarch64_mems)
6148 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
6149 if (record_full_arch_list_add_mem
6150 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
6151 aarch64_record.aarch64_mems[rec_no].len))
6152 ret = -1;
6153
6154 if (record_full_arch_list_add_end ())
6155 ret = -1;
6156 }
6157
6158 deallocate_reg_mem (&aarch64_record);
6159 return ret;
6160 }