]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/aarch64-tdep.c
202b02108744b13fb93fb46441941740be35fc1f
[thirdparty/binutils-gdb.git] / gdb / aarch64-tdep.c
1 /* Common target dependent code for GDB on AArch64 systems.
2
3 Copyright (C) 2009-2021 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22
23 #include "frame.h"
24 #include "gdbcmd.h"
25 #include "gdbcore.h"
26 #include "dis-asm.h"
27 #include "regcache.h"
28 #include "reggroups.h"
29 #include "value.h"
30 #include "arch-utils.h"
31 #include "osabi.h"
32 #include "frame-unwind.h"
33 #include "frame-base.h"
34 #include "trad-frame.h"
35 #include "objfiles.h"
36 #include "dwarf2.h"
37 #include "dwarf2/frame.h"
38 #include "gdbtypes.h"
39 #include "prologue-value.h"
40 #include "target-descriptions.h"
41 #include "user-regs.h"
42 #include "ax-gdb.h"
43 #include "gdbsupport/selftest.h"
44
45 #include "aarch64-tdep.h"
46 #include "aarch64-ravenscar-thread.h"
47
48 #include "record.h"
49 #include "record-full.h"
50 #include "arch/aarch64-insn.h"
51 #include "gdbarch.h"
52
53 #include "opcode/aarch64.h"
54 #include <algorithm>
55
56 /* A Homogeneous Floating-Point or Short-Vector Aggregate may have at most
57 four members. */
58 #define HA_MAX_NUM_FLDS 4
59
60 /* All possible aarch64 target descriptors. */
61 static target_desc *tdesc_aarch64_list[AARCH64_MAX_SVE_VQ + 1][2/*pauth*/][2 /* mte */];
62
63 /* The standard register names, and all the valid aliases for them. */
64 static const struct
65 {
66 const char *const name;
67 int regnum;
68 } aarch64_register_aliases[] =
69 {
70 /* 64-bit register names. */
71 {"fp", AARCH64_FP_REGNUM},
72 {"lr", AARCH64_LR_REGNUM},
73 {"sp", AARCH64_SP_REGNUM},
74
75 /* 32-bit register names. */
76 {"w0", AARCH64_X0_REGNUM + 0},
77 {"w1", AARCH64_X0_REGNUM + 1},
78 {"w2", AARCH64_X0_REGNUM + 2},
79 {"w3", AARCH64_X0_REGNUM + 3},
80 {"w4", AARCH64_X0_REGNUM + 4},
81 {"w5", AARCH64_X0_REGNUM + 5},
82 {"w6", AARCH64_X0_REGNUM + 6},
83 {"w7", AARCH64_X0_REGNUM + 7},
84 {"w8", AARCH64_X0_REGNUM + 8},
85 {"w9", AARCH64_X0_REGNUM + 9},
86 {"w10", AARCH64_X0_REGNUM + 10},
87 {"w11", AARCH64_X0_REGNUM + 11},
88 {"w12", AARCH64_X0_REGNUM + 12},
89 {"w13", AARCH64_X0_REGNUM + 13},
90 {"w14", AARCH64_X0_REGNUM + 14},
91 {"w15", AARCH64_X0_REGNUM + 15},
92 {"w16", AARCH64_X0_REGNUM + 16},
93 {"w17", AARCH64_X0_REGNUM + 17},
94 {"w18", AARCH64_X0_REGNUM + 18},
95 {"w19", AARCH64_X0_REGNUM + 19},
96 {"w20", AARCH64_X0_REGNUM + 20},
97 {"w21", AARCH64_X0_REGNUM + 21},
98 {"w22", AARCH64_X0_REGNUM + 22},
99 {"w23", AARCH64_X0_REGNUM + 23},
100 {"w24", AARCH64_X0_REGNUM + 24},
101 {"w25", AARCH64_X0_REGNUM + 25},
102 {"w26", AARCH64_X0_REGNUM + 26},
103 {"w27", AARCH64_X0_REGNUM + 27},
104 {"w28", AARCH64_X0_REGNUM + 28},
105 {"w29", AARCH64_X0_REGNUM + 29},
106 {"w30", AARCH64_X0_REGNUM + 30},
107
108 /* specials */
109 {"ip0", AARCH64_X0_REGNUM + 16},
110 {"ip1", AARCH64_X0_REGNUM + 17}
111 };
112
113 /* The required core 'R' registers. */
114 static const char *const aarch64_r_register_names[] =
115 {
116 /* These registers must appear in consecutive RAW register number
117 order and they must begin with AARCH64_X0_REGNUM! */
118 "x0", "x1", "x2", "x3",
119 "x4", "x5", "x6", "x7",
120 "x8", "x9", "x10", "x11",
121 "x12", "x13", "x14", "x15",
122 "x16", "x17", "x18", "x19",
123 "x20", "x21", "x22", "x23",
124 "x24", "x25", "x26", "x27",
125 "x28", "x29", "x30", "sp",
126 "pc", "cpsr"
127 };
128
129 /* The FP/SIMD 'V' registers. */
130 static const char *const aarch64_v_register_names[] =
131 {
132 /* These registers must appear in consecutive RAW register number
133 order and they must begin with AARCH64_V0_REGNUM! */
134 "v0", "v1", "v2", "v3",
135 "v4", "v5", "v6", "v7",
136 "v8", "v9", "v10", "v11",
137 "v12", "v13", "v14", "v15",
138 "v16", "v17", "v18", "v19",
139 "v20", "v21", "v22", "v23",
140 "v24", "v25", "v26", "v27",
141 "v28", "v29", "v30", "v31",
142 "fpsr",
143 "fpcr"
144 };
145
146 /* The SVE 'Z' and 'P' registers. */
147 static const char *const aarch64_sve_register_names[] =
148 {
149 /* These registers must appear in consecutive RAW register number
150 order and they must begin with AARCH64_SVE_Z0_REGNUM! */
151 "z0", "z1", "z2", "z3",
152 "z4", "z5", "z6", "z7",
153 "z8", "z9", "z10", "z11",
154 "z12", "z13", "z14", "z15",
155 "z16", "z17", "z18", "z19",
156 "z20", "z21", "z22", "z23",
157 "z24", "z25", "z26", "z27",
158 "z28", "z29", "z30", "z31",
159 "fpsr", "fpcr",
160 "p0", "p1", "p2", "p3",
161 "p4", "p5", "p6", "p7",
162 "p8", "p9", "p10", "p11",
163 "p12", "p13", "p14", "p15",
164 "ffr", "vg"
165 };
166
167 static const char *const aarch64_pauth_register_names[] =
168 {
169 /* Authentication mask for data pointer. */
170 "pauth_dmask",
171 /* Authentication mask for code pointer. */
172 "pauth_cmask"
173 };
174
175 static const char *const aarch64_mte_register_names[] =
176 {
177 /* Tag Control Register. */
178 "tag_ctl"
179 };
180
181 /* AArch64 prologue cache structure. */
182 struct aarch64_prologue_cache
183 {
184 /* The program counter at the start of the function. It is used to
185 identify this frame as a prologue frame. */
186 CORE_ADDR func;
187
188 /* The program counter at the time this frame was created; i.e. where
189 this function was called from. It is used to identify this frame as a
190 stub frame. */
191 CORE_ADDR prev_pc;
192
193 /* The stack pointer at the time this frame was created; i.e. the
194 caller's stack pointer when this function was called. It is used
195 to identify this frame. */
196 CORE_ADDR prev_sp;
197
198 /* Is the target available to read from? */
199 int available_p;
200
201 /* The frame base for this frame is just prev_sp - frame size.
202 FRAMESIZE is the distance from the frame pointer to the
203 initial stack pointer. */
204 int framesize;
205
206 /* The register used to hold the frame pointer for this frame. */
207 int framereg;
208
209 /* Saved register offsets. */
210 trad_frame_saved_reg *saved_regs;
211 };
212
213 static void
214 show_aarch64_debug (struct ui_file *file, int from_tty,
215 struct cmd_list_element *c, const char *value)
216 {
217 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
218 }
219
220 namespace {
221
222 /* Abstract instruction reader. */
223
224 class abstract_instruction_reader
225 {
226 public:
227 /* Read in one instruction. */
228 virtual ULONGEST read (CORE_ADDR memaddr, int len,
229 enum bfd_endian byte_order) = 0;
230 };
231
232 /* Instruction reader from real target. */
233
234 class instruction_reader : public abstract_instruction_reader
235 {
236 public:
237 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
238 override
239 {
240 return read_code_unsigned_integer (memaddr, len, byte_order);
241 }
242 };
243
244 } // namespace
245
246 /* If address signing is enabled, mask off the signature bits from the link
247 register, which is passed by value in ADDR, using the register values in
248 THIS_FRAME. */
249
250 static CORE_ADDR
251 aarch64_frame_unmask_lr (struct gdbarch_tdep *tdep,
252 struct frame_info *this_frame, CORE_ADDR addr)
253 {
254 if (tdep->has_pauth ()
255 && frame_unwind_register_unsigned (this_frame,
256 tdep->pauth_ra_state_regnum))
257 {
258 int cmask_num = AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base);
259 CORE_ADDR cmask = frame_unwind_register_unsigned (this_frame, cmask_num);
260 addr = addr & ~cmask;
261
262 /* Record in the frame that the link register required unmasking. */
263 set_frame_previous_pc_masked (this_frame);
264 }
265
266 return addr;
267 }
268
269 /* Implement the "get_pc_address_flags" gdbarch method. */
270
271 static std::string
272 aarch64_get_pc_address_flags (frame_info *frame, CORE_ADDR pc)
273 {
274 if (pc != 0 && get_frame_pc_masked (frame))
275 return "PAC";
276
277 return "";
278 }
279
280 /* Analyze a prologue, looking for a recognizable stack frame
281 and frame pointer. Scan until we encounter a store that could
282 clobber the stack frame unexpectedly, or an unknown instruction. */
283
284 static CORE_ADDR
285 aarch64_analyze_prologue (struct gdbarch *gdbarch,
286 CORE_ADDR start, CORE_ADDR limit,
287 struct aarch64_prologue_cache *cache,
288 abstract_instruction_reader& reader)
289 {
290 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
291 int i;
292
293 /* Whether the stack has been set. This should be true when we notice a SP
294 to FP move or if we are using the SP as the base register for storing
295 data, in case the FP is ommitted. */
296 bool seen_stack_set = false;
297
298 /* Track X registers and D registers in prologue. */
299 pv_t regs[AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT];
300
301 for (i = 0; i < AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT; i++)
302 regs[i] = pv_register (i, 0);
303 pv_area stack (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
304
305 for (; start < limit; start += 4)
306 {
307 uint32_t insn;
308 aarch64_inst inst;
309
310 insn = reader.read (start, 4, byte_order_for_code);
311
312 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
313 break;
314
315 if (inst.opcode->iclass == addsub_imm
316 && (inst.opcode->op == OP_ADD
317 || strcmp ("sub", inst.opcode->name) == 0))
318 {
319 unsigned rd = inst.operands[0].reg.regno;
320 unsigned rn = inst.operands[1].reg.regno;
321
322 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
323 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
324 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
325 gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM);
326
327 if (inst.opcode->op == OP_ADD)
328 {
329 regs[rd] = pv_add_constant (regs[rn],
330 inst.operands[2].imm.value);
331 }
332 else
333 {
334 regs[rd] = pv_add_constant (regs[rn],
335 -inst.operands[2].imm.value);
336 }
337
338 /* Did we move SP to FP? */
339 if (rn == AARCH64_SP_REGNUM && rd == AARCH64_FP_REGNUM)
340 seen_stack_set = true;
341 }
342 else if (inst.opcode->iclass == pcreladdr
343 && inst.operands[1].type == AARCH64_OPND_ADDR_ADRP)
344 {
345 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
346 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
347
348 regs[inst.operands[0].reg.regno] = pv_unknown ();
349 }
350 else if (inst.opcode->iclass == branch_imm)
351 {
352 /* Stop analysis on branch. */
353 break;
354 }
355 else if (inst.opcode->iclass == condbranch)
356 {
357 /* Stop analysis on branch. */
358 break;
359 }
360 else if (inst.opcode->iclass == branch_reg)
361 {
362 /* Stop analysis on branch. */
363 break;
364 }
365 else if (inst.opcode->iclass == compbranch)
366 {
367 /* Stop analysis on branch. */
368 break;
369 }
370 else if (inst.opcode->op == OP_MOVZ)
371 {
372 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
373
374 /* If this shows up before we set the stack, keep going. Otherwise
375 stop the analysis. */
376 if (seen_stack_set)
377 break;
378
379 regs[inst.operands[0].reg.regno] = pv_unknown ();
380 }
381 else if (inst.opcode->iclass == log_shift
382 && strcmp (inst.opcode->name, "orr") == 0)
383 {
384 unsigned rd = inst.operands[0].reg.regno;
385 unsigned rn = inst.operands[1].reg.regno;
386 unsigned rm = inst.operands[2].reg.regno;
387
388 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
389 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn);
390 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT);
391
392 if (inst.operands[2].shifter.amount == 0
393 && rn == AARCH64_SP_REGNUM)
394 regs[rd] = regs[rm];
395 else
396 {
397 aarch64_debug_printf ("prologue analysis gave up "
398 "addr=%s opcode=0x%x (orr x register)",
399 core_addr_to_string_nz (start), insn);
400
401 break;
402 }
403 }
404 else if (inst.opcode->op == OP_STUR)
405 {
406 unsigned rt = inst.operands[0].reg.regno;
407 unsigned rn = inst.operands[1].addr.base_regno;
408 int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
409
410 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
411 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
412 gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9);
413 gdb_assert (!inst.operands[1].addr.offset.is_reg);
414
415 stack.store
416 (pv_add_constant (regs[rn], inst.operands[1].addr.offset.imm),
417 size, regs[rt]);
418
419 /* Are we storing with SP as a base? */
420 if (rn == AARCH64_SP_REGNUM)
421 seen_stack_set = true;
422 }
423 else if ((inst.opcode->iclass == ldstpair_off
424 || (inst.opcode->iclass == ldstpair_indexed
425 && inst.operands[2].addr.preind))
426 && strcmp ("stp", inst.opcode->name) == 0)
427 {
428 /* STP with addressing mode Pre-indexed and Base register. */
429 unsigned rt1;
430 unsigned rt2;
431 unsigned rn = inst.operands[2].addr.base_regno;
432 int32_t imm = inst.operands[2].addr.offset.imm;
433 int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
434
435 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
436 || inst.operands[0].type == AARCH64_OPND_Ft);
437 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2
438 || inst.operands[1].type == AARCH64_OPND_Ft2);
439 gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7);
440 gdb_assert (!inst.operands[2].addr.offset.is_reg);
441
442 /* If recording this store would invalidate the store area
443 (perhaps because rn is not known) then we should abandon
444 further prologue analysis. */
445 if (stack.store_would_trash (pv_add_constant (regs[rn], imm)))
446 break;
447
448 if (stack.store_would_trash (pv_add_constant (regs[rn], imm + 8)))
449 break;
450
451 rt1 = inst.operands[0].reg.regno;
452 rt2 = inst.operands[1].reg.regno;
453 if (inst.operands[0].type == AARCH64_OPND_Ft)
454 {
455 rt1 += AARCH64_X_REGISTER_COUNT;
456 rt2 += AARCH64_X_REGISTER_COUNT;
457 }
458
459 stack.store (pv_add_constant (regs[rn], imm), size, regs[rt1]);
460 stack.store (pv_add_constant (regs[rn], imm + size), size, regs[rt2]);
461
462 if (inst.operands[2].addr.writeback)
463 regs[rn] = pv_add_constant (regs[rn], imm);
464
465 /* Ignore the instruction that allocates stack space and sets
466 the SP. */
467 if (rn == AARCH64_SP_REGNUM && !inst.operands[2].addr.writeback)
468 seen_stack_set = true;
469 }
470 else if ((inst.opcode->iclass == ldst_imm9 /* Signed immediate. */
471 || (inst.opcode->iclass == ldst_pos /* Unsigned immediate. */
472 && (inst.opcode->op == OP_STR_POS
473 || inst.opcode->op == OP_STRF_POS)))
474 && inst.operands[1].addr.base_regno == AARCH64_SP_REGNUM
475 && strcmp ("str", inst.opcode->name) == 0)
476 {
477 /* STR (immediate) */
478 unsigned int rt = inst.operands[0].reg.regno;
479 int32_t imm = inst.operands[1].addr.offset.imm;
480 unsigned int rn = inst.operands[1].addr.base_regno;
481 int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
482 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
483 || inst.operands[0].type == AARCH64_OPND_Ft);
484
485 if (inst.operands[0].type == AARCH64_OPND_Ft)
486 rt += AARCH64_X_REGISTER_COUNT;
487
488 stack.store (pv_add_constant (regs[rn], imm), size, regs[rt]);
489 if (inst.operands[1].addr.writeback)
490 regs[rn] = pv_add_constant (regs[rn], imm);
491
492 /* Are we storing with SP as a base? */
493 if (rn == AARCH64_SP_REGNUM)
494 seen_stack_set = true;
495 }
496 else if (inst.opcode->iclass == testbranch)
497 {
498 /* Stop analysis on branch. */
499 break;
500 }
501 else if (inst.opcode->iclass == ic_system)
502 {
503 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
504 int ra_state_val = 0;
505
506 if (insn == 0xd503233f /* paciasp. */
507 || insn == 0xd503237f /* pacibsp. */)
508 {
509 /* Return addresses are mangled. */
510 ra_state_val = 1;
511 }
512 else if (insn == 0xd50323bf /* autiasp. */
513 || insn == 0xd50323ff /* autibsp. */)
514 {
515 /* Return addresses are not mangled. */
516 ra_state_val = 0;
517 }
518 else
519 {
520 aarch64_debug_printf ("prologue analysis gave up addr=%s"
521 " opcode=0x%x (iclass)",
522 core_addr_to_string_nz (start), insn);
523 break;
524 }
525
526 if (tdep->has_pauth () && cache != nullptr)
527 {
528 int regnum = tdep->pauth_ra_state_regnum;
529 cache->saved_regs[regnum].set_value (ra_state_val);
530 }
531 }
532 else
533 {
534 aarch64_debug_printf ("prologue analysis gave up addr=%s"
535 " opcode=0x%x",
536 core_addr_to_string_nz (start), insn);
537
538 break;
539 }
540 }
541
542 if (cache == NULL)
543 return start;
544
545 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
546 {
547 /* Frame pointer is fp. Frame size is constant. */
548 cache->framereg = AARCH64_FP_REGNUM;
549 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
550 }
551 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
552 {
553 /* Try the stack pointer. */
554 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
555 cache->framereg = AARCH64_SP_REGNUM;
556 }
557 else
558 {
559 /* We're just out of luck. We don't know where the frame is. */
560 cache->framereg = -1;
561 cache->framesize = 0;
562 }
563
564 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
565 {
566 CORE_ADDR offset;
567
568 if (stack.find_reg (gdbarch, i, &offset))
569 cache->saved_regs[i].set_addr (offset);
570 }
571
572 for (i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
573 {
574 int regnum = gdbarch_num_regs (gdbarch);
575 CORE_ADDR offset;
576
577 if (stack.find_reg (gdbarch, i + AARCH64_X_REGISTER_COUNT,
578 &offset))
579 cache->saved_regs[i + regnum + AARCH64_D0_REGNUM].set_addr (offset);
580 }
581
582 return start;
583 }
584
585 static CORE_ADDR
586 aarch64_analyze_prologue (struct gdbarch *gdbarch,
587 CORE_ADDR start, CORE_ADDR limit,
588 struct aarch64_prologue_cache *cache)
589 {
590 instruction_reader reader;
591
592 return aarch64_analyze_prologue (gdbarch, start, limit, cache,
593 reader);
594 }
595
596 #if GDB_SELF_TEST
597
598 namespace selftests {
599
600 /* Instruction reader from manually cooked instruction sequences. */
601
602 class instruction_reader_test : public abstract_instruction_reader
603 {
604 public:
605 template<size_t SIZE>
606 explicit instruction_reader_test (const uint32_t (&insns)[SIZE])
607 : m_insns (insns), m_insns_size (SIZE)
608 {}
609
610 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
611 override
612 {
613 SELF_CHECK (len == 4);
614 SELF_CHECK (memaddr % 4 == 0);
615 SELF_CHECK (memaddr / 4 < m_insns_size);
616
617 return m_insns[memaddr / 4];
618 }
619
620 private:
621 const uint32_t *m_insns;
622 size_t m_insns_size;
623 };
624
625 static void
626 aarch64_analyze_prologue_test (void)
627 {
628 struct gdbarch_info info;
629
630 gdbarch_info_init (&info);
631 info.bfd_arch_info = bfd_scan_arch ("aarch64");
632
633 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
634 SELF_CHECK (gdbarch != NULL);
635
636 struct aarch64_prologue_cache cache;
637 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
638
639 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
640
641 /* Test the simple prologue in which frame pointer is used. */
642 {
643 static const uint32_t insns[] = {
644 0xa9af7bfd, /* stp x29, x30, [sp,#-272]! */
645 0x910003fd, /* mov x29, sp */
646 0x97ffffe6, /* bl 0x400580 */
647 };
648 instruction_reader_test reader (insns);
649
650 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
651 SELF_CHECK (end == 4 * 2);
652
653 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
654 SELF_CHECK (cache.framesize == 272);
655
656 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
657 {
658 if (i == AARCH64_FP_REGNUM)
659 SELF_CHECK (cache.saved_regs[i].addr () == -272);
660 else if (i == AARCH64_LR_REGNUM)
661 SELF_CHECK (cache.saved_regs[i].addr () == -264);
662 else
663 SELF_CHECK (cache.saved_regs[i].is_realreg ()
664 && cache.saved_regs[i].realreg () == i);
665 }
666
667 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
668 {
669 int num_regs = gdbarch_num_regs (gdbarch);
670 int regnum = i + num_regs + AARCH64_D0_REGNUM;
671
672 SELF_CHECK (cache.saved_regs[regnum].is_realreg ()
673 && cache.saved_regs[regnum].realreg () == regnum);
674 }
675 }
676
677 /* Test a prologue in which STR is used and frame pointer is not
678 used. */
679 {
680 static const uint32_t insns[] = {
681 0xf81d0ff3, /* str x19, [sp, #-48]! */
682 0xb9002fe0, /* str w0, [sp, #44] */
683 0xf90013e1, /* str x1, [sp, #32]*/
684 0xfd000fe0, /* str d0, [sp, #24] */
685 0xaa0203f3, /* mov x19, x2 */
686 0xf94013e0, /* ldr x0, [sp, #32] */
687 };
688 instruction_reader_test reader (insns);
689
690 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
691 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
692
693 SELF_CHECK (end == 4 * 5);
694
695 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
696 SELF_CHECK (cache.framesize == 48);
697
698 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
699 {
700 if (i == 1)
701 SELF_CHECK (cache.saved_regs[i].addr () == -16);
702 else if (i == 19)
703 SELF_CHECK (cache.saved_regs[i].addr () == -48);
704 else
705 SELF_CHECK (cache.saved_regs[i].is_realreg ()
706 && cache.saved_regs[i].realreg () == i);
707 }
708
709 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
710 {
711 int num_regs = gdbarch_num_regs (gdbarch);
712 int regnum = i + num_regs + AARCH64_D0_REGNUM;
713
714
715 if (i == 0)
716 SELF_CHECK (cache.saved_regs[regnum].addr () == -24);
717 else
718 SELF_CHECK (cache.saved_regs[regnum].is_realreg ()
719 && cache.saved_regs[regnum].realreg () == regnum);
720 }
721 }
722
723 /* Test handling of movz before setting the frame pointer. */
724 {
725 static const uint32_t insns[] = {
726 0xa9bf7bfd, /* stp x29, x30, [sp, #-16]! */
727 0x52800020, /* mov w0, #0x1 */
728 0x910003fd, /* mov x29, sp */
729 0x528000a2, /* mov w2, #0x5 */
730 0x97fffff8, /* bl 6e4 */
731 };
732
733 instruction_reader_test reader (insns);
734
735 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
736 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
737
738 /* We should stop at the 4th instruction. */
739 SELF_CHECK (end == (4 - 1) * 4);
740 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
741 SELF_CHECK (cache.framesize == 16);
742 }
743
744 /* Test handling of movz/stp when using the stack pointer as frame
745 pointer. */
746 {
747 static const uint32_t insns[] = {
748 0xa9bc7bfd, /* stp x29, x30, [sp, #-64]! */
749 0x52800020, /* mov w0, #0x1 */
750 0x290207e0, /* stp w0, w1, [sp, #16] */
751 0xa9018fe2, /* stp x2, x3, [sp, #24] */
752 0x528000a2, /* mov w2, #0x5 */
753 0x97fffff8, /* bl 6e4 */
754 };
755
756 instruction_reader_test reader (insns);
757
758 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
759 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
760
761 /* We should stop at the 5th instruction. */
762 SELF_CHECK (end == (5 - 1) * 4);
763 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
764 SELF_CHECK (cache.framesize == 64);
765 }
766
767 /* Test handling of movz/str when using the stack pointer as frame
768 pointer */
769 {
770 static const uint32_t insns[] = {
771 0xa9bc7bfd, /* stp x29, x30, [sp, #-64]! */
772 0x52800020, /* mov w0, #0x1 */
773 0xb9002be4, /* str w4, [sp, #40] */
774 0xf9001be5, /* str x5, [sp, #48] */
775 0x528000a2, /* mov w2, #0x5 */
776 0x97fffff8, /* bl 6e4 */
777 };
778
779 instruction_reader_test reader (insns);
780
781 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
782 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
783
784 /* We should stop at the 5th instruction. */
785 SELF_CHECK (end == (5 - 1) * 4);
786 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
787 SELF_CHECK (cache.framesize == 64);
788 }
789
790 /* Test handling of movz/stur when using the stack pointer as frame
791 pointer. */
792 {
793 static const uint32_t insns[] = {
794 0xa9bc7bfd, /* stp x29, x30, [sp, #-64]! */
795 0x52800020, /* mov w0, #0x1 */
796 0xb80343e6, /* stur w6, [sp, #52] */
797 0xf80383e7, /* stur x7, [sp, #56] */
798 0x528000a2, /* mov w2, #0x5 */
799 0x97fffff8, /* bl 6e4 */
800 };
801
802 instruction_reader_test reader (insns);
803
804 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
805 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
806
807 /* We should stop at the 5th instruction. */
808 SELF_CHECK (end == (5 - 1) * 4);
809 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
810 SELF_CHECK (cache.framesize == 64);
811 }
812
813 /* Test handling of movz when there is no frame pointer set or no stack
814 pointer used. */
815 {
816 static const uint32_t insns[] = {
817 0xa9bf7bfd, /* stp x29, x30, [sp, #-16]! */
818 0x52800020, /* mov w0, #0x1 */
819 0x528000a2, /* mov w2, #0x5 */
820 0x97fffff8, /* bl 6e4 */
821 };
822
823 instruction_reader_test reader (insns);
824
825 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
826 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
827
828 /* We should stop at the 4th instruction. */
829 SELF_CHECK (end == (4 - 1) * 4);
830 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
831 SELF_CHECK (cache.framesize == 16);
832 }
833
834 /* Test a prologue in which there is a return address signing instruction. */
835 if (tdep->has_pauth ())
836 {
837 static const uint32_t insns[] = {
838 0xd503233f, /* paciasp */
839 0xa9bd7bfd, /* stp x29, x30, [sp, #-48]! */
840 0x910003fd, /* mov x29, sp */
841 0xf801c3f3, /* str x19, [sp, #28] */
842 0xb9401fa0, /* ldr x19, [x29, #28] */
843 };
844 instruction_reader_test reader (insns);
845
846 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
847 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache,
848 reader);
849
850 SELF_CHECK (end == 4 * 4);
851 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
852 SELF_CHECK (cache.framesize == 48);
853
854 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
855 {
856 if (i == 19)
857 SELF_CHECK (cache.saved_regs[i].addr () == -20);
858 else if (i == AARCH64_FP_REGNUM)
859 SELF_CHECK (cache.saved_regs[i].addr () == -48);
860 else if (i == AARCH64_LR_REGNUM)
861 SELF_CHECK (cache.saved_regs[i].addr () == -40);
862 else
863 SELF_CHECK (cache.saved_regs[i].is_realreg ()
864 && cache.saved_regs[i].realreg () == i);
865 }
866
867 if (tdep->has_pauth ())
868 {
869 int regnum = tdep->pauth_ra_state_regnum;
870 SELF_CHECK (cache.saved_regs[regnum].is_value ());
871 }
872 }
873 }
874 } // namespace selftests
875 #endif /* GDB_SELF_TEST */
876
877 /* Implement the "skip_prologue" gdbarch method. */
878
879 static CORE_ADDR
880 aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
881 {
882 CORE_ADDR func_addr, limit_pc;
883
884 /* See if we can determine the end of the prologue via the symbol
885 table. If so, then return either PC, or the PC after the
886 prologue, whichever is greater. */
887 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
888 {
889 CORE_ADDR post_prologue_pc
890 = skip_prologue_using_sal (gdbarch, func_addr);
891
892 if (post_prologue_pc != 0)
893 return std::max (pc, post_prologue_pc);
894 }
895
896 /* Can't determine prologue from the symbol table, need to examine
897 instructions. */
898
899 /* Find an upper limit on the function prologue using the debug
900 information. If the debug information could not be used to
901 provide that bound, then use an arbitrary large number as the
902 upper bound. */
903 limit_pc = skip_prologue_using_sal (gdbarch, pc);
904 if (limit_pc == 0)
905 limit_pc = pc + 128; /* Magic. */
906
907 /* Try disassembling prologue. */
908 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
909 }
910
911 /* Scan the function prologue for THIS_FRAME and populate the prologue
912 cache CACHE. */
913
914 static void
915 aarch64_scan_prologue (struct frame_info *this_frame,
916 struct aarch64_prologue_cache *cache)
917 {
918 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
919 CORE_ADDR prologue_start;
920 CORE_ADDR prologue_end;
921 CORE_ADDR prev_pc = get_frame_pc (this_frame);
922 struct gdbarch *gdbarch = get_frame_arch (this_frame);
923
924 cache->prev_pc = prev_pc;
925
926 /* Assume we do not find a frame. */
927 cache->framereg = -1;
928 cache->framesize = 0;
929
930 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
931 &prologue_end))
932 {
933 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
934
935 if (sal.line == 0)
936 {
937 /* No line info so use the current PC. */
938 prologue_end = prev_pc;
939 }
940 else if (sal.end < prologue_end)
941 {
942 /* The next line begins after the function end. */
943 prologue_end = sal.end;
944 }
945
946 prologue_end = std::min (prologue_end, prev_pc);
947 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
948 }
949 else
950 {
951 CORE_ADDR frame_loc;
952
953 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
954 if (frame_loc == 0)
955 return;
956
957 cache->framereg = AARCH64_FP_REGNUM;
958 cache->framesize = 16;
959 cache->saved_regs[29].set_addr (0);
960 cache->saved_regs[30].set_addr (8);
961 }
962 }
963
964 /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
965 function may throw an exception if the inferior's registers or memory is
966 not available. */
967
968 static void
969 aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
970 struct aarch64_prologue_cache *cache)
971 {
972 CORE_ADDR unwound_fp;
973 int reg;
974
975 aarch64_scan_prologue (this_frame, cache);
976
977 if (cache->framereg == -1)
978 return;
979
980 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
981 if (unwound_fp == 0)
982 return;
983
984 cache->prev_sp = unwound_fp + cache->framesize;
985
986 /* Calculate actual addresses of saved registers using offsets
987 determined by aarch64_analyze_prologue. */
988 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
989 if (cache->saved_regs[reg].is_addr ())
990 cache->saved_regs[reg].set_addr (cache->saved_regs[reg].addr ()
991 + cache->prev_sp);
992
993 cache->func = get_frame_func (this_frame);
994
995 cache->available_p = 1;
996 }
997
998 /* Allocate and fill in *THIS_CACHE with information about the prologue of
999 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
1000 Return a pointer to the current aarch64_prologue_cache in
1001 *THIS_CACHE. */
1002
1003 static struct aarch64_prologue_cache *
1004 aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
1005 {
1006 struct aarch64_prologue_cache *cache;
1007
1008 if (*this_cache != NULL)
1009 return (struct aarch64_prologue_cache *) *this_cache;
1010
1011 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
1012 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
1013 *this_cache = cache;
1014
1015 try
1016 {
1017 aarch64_make_prologue_cache_1 (this_frame, cache);
1018 }
1019 catch (const gdb_exception_error &ex)
1020 {
1021 if (ex.error != NOT_AVAILABLE_ERROR)
1022 throw;
1023 }
1024
1025 return cache;
1026 }
1027
1028 /* Implement the "stop_reason" frame_unwind method. */
1029
1030 static enum unwind_stop_reason
1031 aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
1032 void **this_cache)
1033 {
1034 struct aarch64_prologue_cache *cache
1035 = aarch64_make_prologue_cache (this_frame, this_cache);
1036
1037 if (!cache->available_p)
1038 return UNWIND_UNAVAILABLE;
1039
1040 /* Halt the backtrace at "_start". */
1041 if (cache->prev_pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
1042 return UNWIND_OUTERMOST;
1043
1044 /* We've hit a wall, stop. */
1045 if (cache->prev_sp == 0)
1046 return UNWIND_OUTERMOST;
1047
1048 return UNWIND_NO_REASON;
1049 }
1050
1051 /* Our frame ID for a normal frame is the current function's starting
1052 PC and the caller's SP when we were called. */
1053
1054 static void
1055 aarch64_prologue_this_id (struct frame_info *this_frame,
1056 void **this_cache, struct frame_id *this_id)
1057 {
1058 struct aarch64_prologue_cache *cache
1059 = aarch64_make_prologue_cache (this_frame, this_cache);
1060
1061 if (!cache->available_p)
1062 *this_id = frame_id_build_unavailable_stack (cache->func);
1063 else
1064 *this_id = frame_id_build (cache->prev_sp, cache->func);
1065 }
1066
1067 /* Implement the "prev_register" frame_unwind method. */
1068
1069 static struct value *
1070 aarch64_prologue_prev_register (struct frame_info *this_frame,
1071 void **this_cache, int prev_regnum)
1072 {
1073 struct aarch64_prologue_cache *cache
1074 = aarch64_make_prologue_cache (this_frame, this_cache);
1075
1076 /* If we are asked to unwind the PC, then we need to return the LR
1077 instead. The prologue may save PC, but it will point into this
1078 frame's prologue, not the next frame's resume location. */
1079 if (prev_regnum == AARCH64_PC_REGNUM)
1080 {
1081 CORE_ADDR lr;
1082 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1083 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1084
1085 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1086
1087 if (tdep->has_pauth ()
1088 && cache->saved_regs[tdep->pauth_ra_state_regnum].is_value ())
1089 lr = aarch64_frame_unmask_lr (tdep, this_frame, lr);
1090
1091 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
1092 }
1093
1094 /* SP is generally not saved to the stack, but this frame is
1095 identified by the next frame's stack pointer at the time of the
1096 call. The value was already reconstructed into PREV_SP. */
1097 /*
1098 +----------+ ^
1099 | saved lr | |
1100 +->| saved fp |--+
1101 | | |
1102 | | | <- Previous SP
1103 | +----------+
1104 | | saved lr |
1105 +--| saved fp |<- FP
1106 | |
1107 | |<- SP
1108 +----------+ */
1109 if (prev_regnum == AARCH64_SP_REGNUM)
1110 return frame_unwind_got_constant (this_frame, prev_regnum,
1111 cache->prev_sp);
1112
1113 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
1114 prev_regnum);
1115 }
1116
1117 /* AArch64 prologue unwinder. */
1118 static frame_unwind aarch64_prologue_unwind =
1119 {
1120 NORMAL_FRAME,
1121 aarch64_prologue_frame_unwind_stop_reason,
1122 aarch64_prologue_this_id,
1123 aarch64_prologue_prev_register,
1124 NULL,
1125 default_frame_sniffer
1126 };
1127
1128 /* Allocate and fill in *THIS_CACHE with information about the prologue of
1129 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
1130 Return a pointer to the current aarch64_prologue_cache in
1131 *THIS_CACHE. */
1132
1133 static struct aarch64_prologue_cache *
1134 aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
1135 {
1136 struct aarch64_prologue_cache *cache;
1137
1138 if (*this_cache != NULL)
1139 return (struct aarch64_prologue_cache *) *this_cache;
1140
1141 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
1142 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
1143 *this_cache = cache;
1144
1145 try
1146 {
1147 cache->prev_sp = get_frame_register_unsigned (this_frame,
1148 AARCH64_SP_REGNUM);
1149 cache->prev_pc = get_frame_pc (this_frame);
1150 cache->available_p = 1;
1151 }
1152 catch (const gdb_exception_error &ex)
1153 {
1154 if (ex.error != NOT_AVAILABLE_ERROR)
1155 throw;
1156 }
1157
1158 return cache;
1159 }
1160
1161 /* Implement the "stop_reason" frame_unwind method. */
1162
1163 static enum unwind_stop_reason
1164 aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
1165 void **this_cache)
1166 {
1167 struct aarch64_prologue_cache *cache
1168 = aarch64_make_stub_cache (this_frame, this_cache);
1169
1170 if (!cache->available_p)
1171 return UNWIND_UNAVAILABLE;
1172
1173 return UNWIND_NO_REASON;
1174 }
1175
1176 /* Our frame ID for a stub frame is the current SP and LR. */
1177
1178 static void
1179 aarch64_stub_this_id (struct frame_info *this_frame,
1180 void **this_cache, struct frame_id *this_id)
1181 {
1182 struct aarch64_prologue_cache *cache
1183 = aarch64_make_stub_cache (this_frame, this_cache);
1184
1185 if (cache->available_p)
1186 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
1187 else
1188 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
1189 }
1190
1191 /* Implement the "sniffer" frame_unwind method. */
1192
1193 static int
1194 aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
1195 struct frame_info *this_frame,
1196 void **this_prologue_cache)
1197 {
1198 CORE_ADDR addr_in_block;
1199 gdb_byte dummy[4];
1200
1201 addr_in_block = get_frame_address_in_block (this_frame);
1202 if (in_plt_section (addr_in_block)
1203 /* We also use the stub winder if the target memory is unreadable
1204 to avoid having the prologue unwinder trying to read it. */
1205 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
1206 return 1;
1207
1208 return 0;
1209 }
1210
1211 /* AArch64 stub unwinder. */
1212 static frame_unwind aarch64_stub_unwind =
1213 {
1214 NORMAL_FRAME,
1215 aarch64_stub_frame_unwind_stop_reason,
1216 aarch64_stub_this_id,
1217 aarch64_prologue_prev_register,
1218 NULL,
1219 aarch64_stub_unwind_sniffer
1220 };
1221
1222 /* Return the frame base address of *THIS_FRAME. */
1223
1224 static CORE_ADDR
1225 aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
1226 {
1227 struct aarch64_prologue_cache *cache
1228 = aarch64_make_prologue_cache (this_frame, this_cache);
1229
1230 return cache->prev_sp - cache->framesize;
1231 }
1232
1233 /* AArch64 default frame base information. */
1234 static frame_base aarch64_normal_base =
1235 {
1236 &aarch64_prologue_unwind,
1237 aarch64_normal_frame_base,
1238 aarch64_normal_frame_base,
1239 aarch64_normal_frame_base
1240 };
1241
1242 /* Return the value of the REGNUM register in the previous frame of
1243 *THIS_FRAME. */
1244
1245 static struct value *
1246 aarch64_dwarf2_prev_register (struct frame_info *this_frame,
1247 void **this_cache, int regnum)
1248 {
1249 struct gdbarch_tdep *tdep = gdbarch_tdep (get_frame_arch (this_frame));
1250 CORE_ADDR lr;
1251
1252 switch (regnum)
1253 {
1254 case AARCH64_PC_REGNUM:
1255 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1256 lr = aarch64_frame_unmask_lr (tdep, this_frame, lr);
1257 return frame_unwind_got_constant (this_frame, regnum, lr);
1258
1259 default:
1260 internal_error (__FILE__, __LINE__,
1261 _("Unexpected register %d"), regnum);
1262 }
1263 }
1264
1265 static const unsigned char op_lit0 = DW_OP_lit0;
1266 static const unsigned char op_lit1 = DW_OP_lit1;
1267
1268 /* Implement the "init_reg" dwarf2_frame_ops method. */
1269
1270 static void
1271 aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1272 struct dwarf2_frame_state_reg *reg,
1273 struct frame_info *this_frame)
1274 {
1275 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1276
1277 switch (regnum)
1278 {
1279 case AARCH64_PC_REGNUM:
1280 reg->how = DWARF2_FRAME_REG_FN;
1281 reg->loc.fn = aarch64_dwarf2_prev_register;
1282 return;
1283
1284 case AARCH64_SP_REGNUM:
1285 reg->how = DWARF2_FRAME_REG_CFA;
1286 return;
1287 }
1288
1289 /* Init pauth registers. */
1290 if (tdep->has_pauth ())
1291 {
1292 if (regnum == tdep->pauth_ra_state_regnum)
1293 {
1294 /* Initialize RA_STATE to zero. */
1295 reg->how = DWARF2_FRAME_REG_SAVED_VAL_EXP;
1296 reg->loc.exp.start = &op_lit0;
1297 reg->loc.exp.len = 1;
1298 return;
1299 }
1300 else if (regnum == AARCH64_PAUTH_DMASK_REGNUM (tdep->pauth_reg_base)
1301 || regnum == AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base))
1302 {
1303 reg->how = DWARF2_FRAME_REG_SAME_VALUE;
1304 return;
1305 }
1306 }
1307 }
1308
1309 /* Implement the execute_dwarf_cfa_vendor_op method. */
1310
1311 static bool
1312 aarch64_execute_dwarf_cfa_vendor_op (struct gdbarch *gdbarch, gdb_byte op,
1313 struct dwarf2_frame_state *fs)
1314 {
1315 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1316 struct dwarf2_frame_state_reg *ra_state;
1317
1318 if (op == DW_CFA_AARCH64_negate_ra_state)
1319 {
1320 /* On systems without pauth, treat as a nop. */
1321 if (!tdep->has_pauth ())
1322 return true;
1323
1324 /* Allocate RA_STATE column if it's not allocated yet. */
1325 fs->regs.alloc_regs (AARCH64_DWARF_PAUTH_RA_STATE + 1);
1326
1327 /* Toggle the status of RA_STATE between 0 and 1. */
1328 ra_state = &(fs->regs.reg[AARCH64_DWARF_PAUTH_RA_STATE]);
1329 ra_state->how = DWARF2_FRAME_REG_SAVED_VAL_EXP;
1330
1331 if (ra_state->loc.exp.start == nullptr
1332 || ra_state->loc.exp.start == &op_lit0)
1333 ra_state->loc.exp.start = &op_lit1;
1334 else
1335 ra_state->loc.exp.start = &op_lit0;
1336
1337 ra_state->loc.exp.len = 1;
1338
1339 return true;
1340 }
1341
1342 return false;
1343 }
1344
1345 /* Used for matching BRK instructions for AArch64. */
1346 static constexpr uint32_t BRK_INSN_MASK = 0xffe0001f;
1347 static constexpr uint32_t BRK_INSN_BASE = 0xd4200000;
1348
1349 /* Implementation of gdbarch_program_breakpoint_here_p for aarch64. */
1350
1351 static bool
1352 aarch64_program_breakpoint_here_p (gdbarch *gdbarch, CORE_ADDR address)
1353 {
1354 const uint32_t insn_len = 4;
1355 gdb_byte target_mem[4];
1356
1357 /* Enable the automatic memory restoration from breakpoints while
1358 we read the memory. Otherwise we may find temporary breakpoints, ones
1359 inserted by GDB, and flag them as permanent breakpoints. */
1360 scoped_restore restore_memory
1361 = make_scoped_restore_show_memory_breakpoints (0);
1362
1363 if (target_read_memory (address, target_mem, insn_len) == 0)
1364 {
1365 uint32_t insn =
1366 (uint32_t) extract_unsigned_integer (target_mem, insn_len,
1367 gdbarch_byte_order_for_code (gdbarch));
1368
1369 /* Check if INSN is a BRK instruction pattern. There are multiple choices
1370 of such instructions with different immediate values. Different OS'
1371 may use a different variation, but they have the same outcome. */
1372 return ((insn & BRK_INSN_MASK) == BRK_INSN_BASE);
1373 }
1374
1375 return false;
1376 }
1377
1378 /* When arguments must be pushed onto the stack, they go on in reverse
1379 order. The code below implements a FILO (stack) to do this. */
1380
1381 struct stack_item_t
1382 {
1383 /* Value to pass on stack. It can be NULL if this item is for stack
1384 padding. */
1385 const gdb_byte *data;
1386
1387 /* Size in bytes of value to pass on stack. */
1388 int len;
1389 };
1390
1391 /* Implement the gdbarch type alignment method, overrides the generic
1392 alignment algorithm for anything that is aarch64 specific. */
1393
1394 static ULONGEST
1395 aarch64_type_align (gdbarch *gdbarch, struct type *t)
1396 {
1397 t = check_typedef (t);
1398 if (t->code () == TYPE_CODE_ARRAY && t->is_vector ())
1399 {
1400 /* Use the natural alignment for vector types (the same for
1401 scalar type), but the maximum alignment is 128-bit. */
1402 if (TYPE_LENGTH (t) > 16)
1403 return 16;
1404 else
1405 return TYPE_LENGTH (t);
1406 }
1407
1408 /* Allow the common code to calculate the alignment. */
1409 return 0;
1410 }
1411
1412 /* Worker function for aapcs_is_vfp_call_or_return_candidate.
1413
1414 Return the number of register required, or -1 on failure.
1415
1416 When encountering a base element, if FUNDAMENTAL_TYPE is not set then set it
1417 to the element, else fail if the type of this element does not match the
1418 existing value. */
1419
1420 static int
1421 aapcs_is_vfp_call_or_return_candidate_1 (struct type *type,
1422 struct type **fundamental_type)
1423 {
1424 if (type == nullptr)
1425 return -1;
1426
1427 switch (type->code ())
1428 {
1429 case TYPE_CODE_FLT:
1430 if (TYPE_LENGTH (type) > 16)
1431 return -1;
1432
1433 if (*fundamental_type == nullptr)
1434 *fundamental_type = type;
1435 else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
1436 || type->code () != (*fundamental_type)->code ())
1437 return -1;
1438
1439 return 1;
1440
1441 case TYPE_CODE_COMPLEX:
1442 {
1443 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
1444 if (TYPE_LENGTH (target_type) > 16)
1445 return -1;
1446
1447 if (*fundamental_type == nullptr)
1448 *fundamental_type = target_type;
1449 else if (TYPE_LENGTH (target_type) != TYPE_LENGTH (*fundamental_type)
1450 || target_type->code () != (*fundamental_type)->code ())
1451 return -1;
1452
1453 return 2;
1454 }
1455
1456 case TYPE_CODE_ARRAY:
1457 {
1458 if (type->is_vector ())
1459 {
1460 if (TYPE_LENGTH (type) != 8 && TYPE_LENGTH (type) != 16)
1461 return -1;
1462
1463 if (*fundamental_type == nullptr)
1464 *fundamental_type = type;
1465 else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
1466 || type->code () != (*fundamental_type)->code ())
1467 return -1;
1468
1469 return 1;
1470 }
1471 else
1472 {
1473 struct type *target_type = TYPE_TARGET_TYPE (type);
1474 int count = aapcs_is_vfp_call_or_return_candidate_1
1475 (target_type, fundamental_type);
1476
1477 if (count == -1)
1478 return count;
1479
1480 count *= (TYPE_LENGTH (type) / TYPE_LENGTH (target_type));
1481 return count;
1482 }
1483 }
1484
1485 case TYPE_CODE_STRUCT:
1486 case TYPE_CODE_UNION:
1487 {
1488 int count = 0;
1489
1490 for (int i = 0; i < type->num_fields (); i++)
1491 {
1492 /* Ignore any static fields. */
1493 if (field_is_static (&type->field (i)))
1494 continue;
1495
1496 struct type *member = check_typedef (type->field (i).type ());
1497
1498 int sub_count = aapcs_is_vfp_call_or_return_candidate_1
1499 (member, fundamental_type);
1500 if (sub_count == -1)
1501 return -1;
1502 count += sub_count;
1503 }
1504
1505 /* Ensure there is no padding between the fields (allowing for empty
1506 zero length structs) */
1507 int ftype_length = (*fundamental_type == nullptr)
1508 ? 0 : TYPE_LENGTH (*fundamental_type);
1509 if (count * ftype_length != TYPE_LENGTH (type))
1510 return -1;
1511
1512 return count;
1513 }
1514
1515 default:
1516 break;
1517 }
1518
1519 return -1;
1520 }
1521
1522 /* Return true if an argument, whose type is described by TYPE, can be passed or
1523 returned in simd/fp registers, providing enough parameter passing registers
1524 are available. This is as described in the AAPCS64.
1525
1526 Upon successful return, *COUNT returns the number of needed registers,
1527 *FUNDAMENTAL_TYPE contains the type of those registers.
1528
1529 Candidate as per the AAPCS64 5.4.2.C is either a:
1530 - float.
1531 - short-vector.
1532 - HFA (Homogeneous Floating-point Aggregate, 4.3.5.1). A Composite type where
1533 all the members are floats and has at most 4 members.
1534 - HVA (Homogeneous Short-vector Aggregate, 4.3.5.2). A Composite type where
1535 all the members are short vectors and has at most 4 members.
1536 - Complex (7.1.1)
1537
1538 Note that HFAs and HVAs can include nested structures and arrays. */
1539
1540 static bool
1541 aapcs_is_vfp_call_or_return_candidate (struct type *type, int *count,
1542 struct type **fundamental_type)
1543 {
1544 if (type == nullptr)
1545 return false;
1546
1547 *fundamental_type = nullptr;
1548
1549 int ag_count = aapcs_is_vfp_call_or_return_candidate_1 (type,
1550 fundamental_type);
1551
1552 if (ag_count > 0 && ag_count <= HA_MAX_NUM_FLDS)
1553 {
1554 *count = ag_count;
1555 return true;
1556 }
1557 else
1558 return false;
1559 }
1560
1561 /* AArch64 function call information structure. */
1562 struct aarch64_call_info
1563 {
1564 /* the current argument number. */
1565 unsigned argnum = 0;
1566
1567 /* The next general purpose register number, equivalent to NGRN as
1568 described in the AArch64 Procedure Call Standard. */
1569 unsigned ngrn = 0;
1570
1571 /* The next SIMD and floating point register number, equivalent to
1572 NSRN as described in the AArch64 Procedure Call Standard. */
1573 unsigned nsrn = 0;
1574
1575 /* The next stacked argument address, equivalent to NSAA as
1576 described in the AArch64 Procedure Call Standard. */
1577 unsigned nsaa = 0;
1578
1579 /* Stack item vector. */
1580 std::vector<stack_item_t> si;
1581 };
1582
1583 /* Pass a value in a sequence of consecutive X registers. The caller
1584 is responsible for ensuring sufficient registers are available. */
1585
1586 static void
1587 pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1588 struct aarch64_call_info *info, struct type *type,
1589 struct value *arg)
1590 {
1591 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1592 int len = TYPE_LENGTH (type);
1593 enum type_code typecode = type->code ();
1594 int regnum = AARCH64_X0_REGNUM + info->ngrn;
1595 const bfd_byte *buf = value_contents (arg);
1596
1597 info->argnum++;
1598
1599 while (len > 0)
1600 {
1601 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1602 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1603 byte_order);
1604
1605
1606 /* Adjust sub-word struct/union args when big-endian. */
1607 if (byte_order == BFD_ENDIAN_BIG
1608 && partial_len < X_REGISTER_SIZE
1609 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1610 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1611
1612 aarch64_debug_printf ("arg %d in %s = 0x%s", info->argnum,
1613 gdbarch_register_name (gdbarch, regnum),
1614 phex (regval, X_REGISTER_SIZE));
1615
1616 regcache_cooked_write_unsigned (regcache, regnum, regval);
1617 len -= partial_len;
1618 buf += partial_len;
1619 regnum++;
1620 }
1621 }
1622
1623 /* Attempt to marshall a value in a V register. Return 1 if
1624 successful, or 0 if insufficient registers are available. This
1625 function, unlike the equivalent pass_in_x() function does not
1626 handle arguments spread across multiple registers. */
1627
1628 static int
1629 pass_in_v (struct gdbarch *gdbarch,
1630 struct regcache *regcache,
1631 struct aarch64_call_info *info,
1632 int len, const bfd_byte *buf)
1633 {
1634 if (info->nsrn < 8)
1635 {
1636 int regnum = AARCH64_V0_REGNUM + info->nsrn;
1637 /* Enough space for a full vector register. */
1638 gdb_byte reg[register_size (gdbarch, regnum)];
1639 gdb_assert (len <= sizeof (reg));
1640
1641 info->argnum++;
1642 info->nsrn++;
1643
1644 memset (reg, 0, sizeof (reg));
1645 /* PCS C.1, the argument is allocated to the least significant
1646 bits of V register. */
1647 memcpy (reg, buf, len);
1648 regcache->cooked_write (regnum, reg);
1649
1650 aarch64_debug_printf ("arg %d in %s", info->argnum,
1651 gdbarch_register_name (gdbarch, regnum));
1652
1653 return 1;
1654 }
1655 info->nsrn = 8;
1656 return 0;
1657 }
1658
1659 /* Marshall an argument onto the stack. */
1660
1661 static void
1662 pass_on_stack (struct aarch64_call_info *info, struct type *type,
1663 struct value *arg)
1664 {
1665 const bfd_byte *buf = value_contents (arg);
1666 int len = TYPE_LENGTH (type);
1667 int align;
1668 stack_item_t item;
1669
1670 info->argnum++;
1671
1672 align = type_align (type);
1673
1674 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1675 Natural alignment of the argument's type. */
1676 align = align_up (align, 8);
1677
1678 /* The AArch64 PCS requires at most doubleword alignment. */
1679 if (align > 16)
1680 align = 16;
1681
1682 aarch64_debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1683 info->nsaa);
1684
1685 item.len = len;
1686 item.data = buf;
1687 info->si.push_back (item);
1688
1689 info->nsaa += len;
1690 if (info->nsaa & (align - 1))
1691 {
1692 /* Push stack alignment padding. */
1693 int pad = align - (info->nsaa & (align - 1));
1694
1695 item.len = pad;
1696 item.data = NULL;
1697
1698 info->si.push_back (item);
1699 info->nsaa += pad;
1700 }
1701 }
1702
1703 /* Marshall an argument into a sequence of one or more consecutive X
1704 registers or, if insufficient X registers are available then onto
1705 the stack. */
1706
1707 static void
1708 pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1709 struct aarch64_call_info *info, struct type *type,
1710 struct value *arg)
1711 {
1712 int len = TYPE_LENGTH (type);
1713 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1714
1715 /* PCS C.13 - Pass in registers if we have enough spare */
1716 if (info->ngrn + nregs <= 8)
1717 {
1718 pass_in_x (gdbarch, regcache, info, type, arg);
1719 info->ngrn += nregs;
1720 }
1721 else
1722 {
1723 info->ngrn = 8;
1724 pass_on_stack (info, type, arg);
1725 }
1726 }
1727
1728 /* Pass a value, which is of type arg_type, in a V register. Assumes value is a
1729 aapcs_is_vfp_call_or_return_candidate and there are enough spare V
1730 registers. A return value of false is an error state as the value will have
1731 been partially passed to the stack. */
1732 static bool
1733 pass_in_v_vfp_candidate (struct gdbarch *gdbarch, struct regcache *regcache,
1734 struct aarch64_call_info *info, struct type *arg_type,
1735 struct value *arg)
1736 {
1737 switch (arg_type->code ())
1738 {
1739 case TYPE_CODE_FLT:
1740 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type),
1741 value_contents (arg));
1742 break;
1743
1744 case TYPE_CODE_COMPLEX:
1745 {
1746 const bfd_byte *buf = value_contents (arg);
1747 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (arg_type));
1748
1749 if (!pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type),
1750 buf))
1751 return false;
1752
1753 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type),
1754 buf + TYPE_LENGTH (target_type));
1755 }
1756
1757 case TYPE_CODE_ARRAY:
1758 if (arg_type->is_vector ())
1759 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type),
1760 value_contents (arg));
1761 /* fall through. */
1762
1763 case TYPE_CODE_STRUCT:
1764 case TYPE_CODE_UNION:
1765 for (int i = 0; i < arg_type->num_fields (); i++)
1766 {
1767 /* Don't include static fields. */
1768 if (field_is_static (&arg_type->field (i)))
1769 continue;
1770
1771 struct value *field = value_primitive_field (arg, 0, i, arg_type);
1772 struct type *field_type = check_typedef (value_type (field));
1773
1774 if (!pass_in_v_vfp_candidate (gdbarch, regcache, info, field_type,
1775 field))
1776 return false;
1777 }
1778 return true;
1779
1780 default:
1781 return false;
1782 }
1783 }
1784
1785 /* Implement the "push_dummy_call" gdbarch method. */
1786
1787 static CORE_ADDR
1788 aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1789 struct regcache *regcache, CORE_ADDR bp_addr,
1790 int nargs,
1791 struct value **args, CORE_ADDR sp,
1792 function_call_return_method return_method,
1793 CORE_ADDR struct_addr)
1794 {
1795 int argnum;
1796 struct aarch64_call_info info;
1797
1798 /* We need to know what the type of the called function is in order
1799 to determine the number of named/anonymous arguments for the
1800 actual argument placement, and the return type in order to handle
1801 return value correctly.
1802
1803 The generic code above us views the decision of return in memory
1804 or return in registers as a two stage processes. The language
1805 handler is consulted first and may decide to return in memory (eg
1806 class with copy constructor returned by value), this will cause
1807 the generic code to allocate space AND insert an initial leading
1808 argument.
1809
1810 If the language code does not decide to pass in memory then the
1811 target code is consulted.
1812
1813 If the language code decides to pass in memory we want to move
1814 the pointer inserted as the initial argument from the argument
1815 list and into X8, the conventional AArch64 struct return pointer
1816 register. */
1817
1818 /* Set the return address. For the AArch64, the return breakpoint
1819 is always at BP_ADDR. */
1820 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1821
1822 /* If we were given an initial argument for the return slot, lose it. */
1823 if (return_method == return_method_hidden_param)
1824 {
1825 args++;
1826 nargs--;
1827 }
1828
1829 /* The struct_return pointer occupies X8. */
1830 if (return_method != return_method_normal)
1831 {
1832 aarch64_debug_printf ("struct return in %s = 0x%s",
1833 gdbarch_register_name
1834 (gdbarch, AARCH64_STRUCT_RETURN_REGNUM),
1835 paddress (gdbarch, struct_addr));
1836
1837 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1838 struct_addr);
1839 }
1840
1841 for (argnum = 0; argnum < nargs; argnum++)
1842 {
1843 struct value *arg = args[argnum];
1844 struct type *arg_type, *fundamental_type;
1845 int len, elements;
1846
1847 arg_type = check_typedef (value_type (arg));
1848 len = TYPE_LENGTH (arg_type);
1849
1850 /* If arg can be passed in v registers as per the AAPCS64, then do so if
1851 if there are enough spare registers. */
1852 if (aapcs_is_vfp_call_or_return_candidate (arg_type, &elements,
1853 &fundamental_type))
1854 {
1855 if (info.nsrn + elements <= 8)
1856 {
1857 /* We know that we have sufficient registers available therefore
1858 this will never need to fallback to the stack. */
1859 if (!pass_in_v_vfp_candidate (gdbarch, regcache, &info, arg_type,
1860 arg))
1861 gdb_assert_not_reached ("Failed to push args");
1862 }
1863 else
1864 {
1865 info.nsrn = 8;
1866 pass_on_stack (&info, arg_type, arg);
1867 }
1868 continue;
1869 }
1870
1871 switch (arg_type->code ())
1872 {
1873 case TYPE_CODE_INT:
1874 case TYPE_CODE_BOOL:
1875 case TYPE_CODE_CHAR:
1876 case TYPE_CODE_RANGE:
1877 case TYPE_CODE_ENUM:
1878 if (len < 4)
1879 {
1880 /* Promote to 32 bit integer. */
1881 if (arg_type->is_unsigned ())
1882 arg_type = builtin_type (gdbarch)->builtin_uint32;
1883 else
1884 arg_type = builtin_type (gdbarch)->builtin_int32;
1885 arg = value_cast (arg_type, arg);
1886 }
1887 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1888 break;
1889
1890 case TYPE_CODE_STRUCT:
1891 case TYPE_CODE_ARRAY:
1892 case TYPE_CODE_UNION:
1893 if (len > 16)
1894 {
1895 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1896 invisible reference. */
1897
1898 /* Allocate aligned storage. */
1899 sp = align_down (sp - len, 16);
1900
1901 /* Write the real data into the stack. */
1902 write_memory (sp, value_contents (arg), len);
1903
1904 /* Construct the indirection. */
1905 arg_type = lookup_pointer_type (arg_type);
1906 arg = value_from_pointer (arg_type, sp);
1907 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1908 }
1909 else
1910 /* PCS C.15 / C.18 multiple values pass. */
1911 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1912 break;
1913
1914 default:
1915 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1916 break;
1917 }
1918 }
1919
1920 /* Make sure stack retains 16 byte alignment. */
1921 if (info.nsaa & 15)
1922 sp -= 16 - (info.nsaa & 15);
1923
1924 while (!info.si.empty ())
1925 {
1926 const stack_item_t &si = info.si.back ();
1927
1928 sp -= si.len;
1929 if (si.data != NULL)
1930 write_memory (sp, si.data, si.len);
1931 info.si.pop_back ();
1932 }
1933
1934 /* Finally, update the SP register. */
1935 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1936
1937 return sp;
1938 }
1939
1940 /* Implement the "frame_align" gdbarch method. */
1941
1942 static CORE_ADDR
1943 aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1944 {
1945 /* Align the stack to sixteen bytes. */
1946 return sp & ~(CORE_ADDR) 15;
1947 }
1948
1949 /* Return the type for an AdvSISD Q register. */
1950
1951 static struct type *
1952 aarch64_vnq_type (struct gdbarch *gdbarch)
1953 {
1954 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1955
1956 if (tdep->vnq_type == NULL)
1957 {
1958 struct type *t;
1959 struct type *elem;
1960
1961 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1962 TYPE_CODE_UNION);
1963
1964 elem = builtin_type (gdbarch)->builtin_uint128;
1965 append_composite_type_field (t, "u", elem);
1966
1967 elem = builtin_type (gdbarch)->builtin_int128;
1968 append_composite_type_field (t, "s", elem);
1969
1970 tdep->vnq_type = t;
1971 }
1972
1973 return tdep->vnq_type;
1974 }
1975
1976 /* Return the type for an AdvSISD D register. */
1977
1978 static struct type *
1979 aarch64_vnd_type (struct gdbarch *gdbarch)
1980 {
1981 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1982
1983 if (tdep->vnd_type == NULL)
1984 {
1985 struct type *t;
1986 struct type *elem;
1987
1988 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1989 TYPE_CODE_UNION);
1990
1991 elem = builtin_type (gdbarch)->builtin_double;
1992 append_composite_type_field (t, "f", elem);
1993
1994 elem = builtin_type (gdbarch)->builtin_uint64;
1995 append_composite_type_field (t, "u", elem);
1996
1997 elem = builtin_type (gdbarch)->builtin_int64;
1998 append_composite_type_field (t, "s", elem);
1999
2000 tdep->vnd_type = t;
2001 }
2002
2003 return tdep->vnd_type;
2004 }
2005
2006 /* Return the type for an AdvSISD S register. */
2007
2008 static struct type *
2009 aarch64_vns_type (struct gdbarch *gdbarch)
2010 {
2011 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2012
2013 if (tdep->vns_type == NULL)
2014 {
2015 struct type *t;
2016 struct type *elem;
2017
2018 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
2019 TYPE_CODE_UNION);
2020
2021 elem = builtin_type (gdbarch)->builtin_float;
2022 append_composite_type_field (t, "f", elem);
2023
2024 elem = builtin_type (gdbarch)->builtin_uint32;
2025 append_composite_type_field (t, "u", elem);
2026
2027 elem = builtin_type (gdbarch)->builtin_int32;
2028 append_composite_type_field (t, "s", elem);
2029
2030 tdep->vns_type = t;
2031 }
2032
2033 return tdep->vns_type;
2034 }
2035
2036 /* Return the type for an AdvSISD H register. */
2037
2038 static struct type *
2039 aarch64_vnh_type (struct gdbarch *gdbarch)
2040 {
2041 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2042
2043 if (tdep->vnh_type == NULL)
2044 {
2045 struct type *t;
2046 struct type *elem;
2047
2048 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
2049 TYPE_CODE_UNION);
2050
2051 elem = builtin_type (gdbarch)->builtin_bfloat16;
2052 append_composite_type_field (t, "bf", elem);
2053
2054 elem = builtin_type (gdbarch)->builtin_half;
2055 append_composite_type_field (t, "f", elem);
2056
2057 elem = builtin_type (gdbarch)->builtin_uint16;
2058 append_composite_type_field (t, "u", elem);
2059
2060 elem = builtin_type (gdbarch)->builtin_int16;
2061 append_composite_type_field (t, "s", elem);
2062
2063 tdep->vnh_type = t;
2064 }
2065
2066 return tdep->vnh_type;
2067 }
2068
2069 /* Return the type for an AdvSISD B register. */
2070
2071 static struct type *
2072 aarch64_vnb_type (struct gdbarch *gdbarch)
2073 {
2074 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2075
2076 if (tdep->vnb_type == NULL)
2077 {
2078 struct type *t;
2079 struct type *elem;
2080
2081 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
2082 TYPE_CODE_UNION);
2083
2084 elem = builtin_type (gdbarch)->builtin_uint8;
2085 append_composite_type_field (t, "u", elem);
2086
2087 elem = builtin_type (gdbarch)->builtin_int8;
2088 append_composite_type_field (t, "s", elem);
2089
2090 tdep->vnb_type = t;
2091 }
2092
2093 return tdep->vnb_type;
2094 }
2095
2096 /* Return the type for an AdvSISD V register. */
2097
2098 static struct type *
2099 aarch64_vnv_type (struct gdbarch *gdbarch)
2100 {
2101 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2102
2103 if (tdep->vnv_type == NULL)
2104 {
2105 /* The other AArch64 pseudo registers (Q,D,H,S,B) refer to a single value
2106 slice from the non-pseudo vector registers. However NEON V registers
2107 are always vector registers, and need constructing as such. */
2108 const struct builtin_type *bt = builtin_type (gdbarch);
2109
2110 struct type *t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnv",
2111 TYPE_CODE_UNION);
2112
2113 struct type *sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
2114 TYPE_CODE_UNION);
2115 append_composite_type_field (sub, "f",
2116 init_vector_type (bt->builtin_double, 2));
2117 append_composite_type_field (sub, "u",
2118 init_vector_type (bt->builtin_uint64, 2));
2119 append_composite_type_field (sub, "s",
2120 init_vector_type (bt->builtin_int64, 2));
2121 append_composite_type_field (t, "d", sub);
2122
2123 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
2124 TYPE_CODE_UNION);
2125 append_composite_type_field (sub, "f",
2126 init_vector_type (bt->builtin_float, 4));
2127 append_composite_type_field (sub, "u",
2128 init_vector_type (bt->builtin_uint32, 4));
2129 append_composite_type_field (sub, "s",
2130 init_vector_type (bt->builtin_int32, 4));
2131 append_composite_type_field (t, "s", sub);
2132
2133 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
2134 TYPE_CODE_UNION);
2135 append_composite_type_field (sub, "bf",
2136 init_vector_type (bt->builtin_bfloat16, 8));
2137 append_composite_type_field (sub, "f",
2138 init_vector_type (bt->builtin_half, 8));
2139 append_composite_type_field (sub, "u",
2140 init_vector_type (bt->builtin_uint16, 8));
2141 append_composite_type_field (sub, "s",
2142 init_vector_type (bt->builtin_int16, 8));
2143 append_composite_type_field (t, "h", sub);
2144
2145 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
2146 TYPE_CODE_UNION);
2147 append_composite_type_field (sub, "u",
2148 init_vector_type (bt->builtin_uint8, 16));
2149 append_composite_type_field (sub, "s",
2150 init_vector_type (bt->builtin_int8, 16));
2151 append_composite_type_field (t, "b", sub);
2152
2153 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
2154 TYPE_CODE_UNION);
2155 append_composite_type_field (sub, "u",
2156 init_vector_type (bt->builtin_uint128, 1));
2157 append_composite_type_field (sub, "s",
2158 init_vector_type (bt->builtin_int128, 1));
2159 append_composite_type_field (t, "q", sub);
2160
2161 tdep->vnv_type = t;
2162 }
2163
2164 return tdep->vnv_type;
2165 }
2166
2167 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
2168
2169 static int
2170 aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
2171 {
2172 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2173
2174 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
2175 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
2176
2177 if (reg == AARCH64_DWARF_SP)
2178 return AARCH64_SP_REGNUM;
2179
2180 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
2181 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
2182
2183 if (reg == AARCH64_DWARF_SVE_VG)
2184 return AARCH64_SVE_VG_REGNUM;
2185
2186 if (reg == AARCH64_DWARF_SVE_FFR)
2187 return AARCH64_SVE_FFR_REGNUM;
2188
2189 if (reg >= AARCH64_DWARF_SVE_P0 && reg <= AARCH64_DWARF_SVE_P0 + 15)
2190 return AARCH64_SVE_P0_REGNUM + reg - AARCH64_DWARF_SVE_P0;
2191
2192 if (reg >= AARCH64_DWARF_SVE_Z0 && reg <= AARCH64_DWARF_SVE_Z0 + 15)
2193 return AARCH64_SVE_Z0_REGNUM + reg - AARCH64_DWARF_SVE_Z0;
2194
2195 if (tdep->has_pauth ())
2196 {
2197 if (reg >= AARCH64_DWARF_PAUTH_DMASK && reg <= AARCH64_DWARF_PAUTH_CMASK)
2198 return tdep->pauth_reg_base + reg - AARCH64_DWARF_PAUTH_DMASK;
2199
2200 if (reg == AARCH64_DWARF_PAUTH_RA_STATE)
2201 return tdep->pauth_ra_state_regnum;
2202 }
2203
2204 return -1;
2205 }
2206
2207 /* Implement the "print_insn" gdbarch method. */
2208
2209 static int
2210 aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
2211 {
2212 info->symbols = NULL;
2213 return default_print_insn (memaddr, info);
2214 }
2215
2216 /* AArch64 BRK software debug mode instruction.
2217 Note that AArch64 code is always little-endian.
2218 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
2219 constexpr gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
2220
2221 typedef BP_MANIPULATION (aarch64_default_breakpoint) aarch64_breakpoint;
2222
2223 /* Extract from an array REGS containing the (raw) register state a
2224 function return value of type TYPE, and copy that, in virtual
2225 format, into VALBUF. */
2226
2227 static void
2228 aarch64_extract_return_value (struct type *type, struct regcache *regs,
2229 gdb_byte *valbuf)
2230 {
2231 struct gdbarch *gdbarch = regs->arch ();
2232 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2233 int elements;
2234 struct type *fundamental_type;
2235
2236 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2237 &fundamental_type))
2238 {
2239 int len = TYPE_LENGTH (fundamental_type);
2240
2241 for (int i = 0; i < elements; i++)
2242 {
2243 int regno = AARCH64_V0_REGNUM + i;
2244 /* Enough space for a full vector register. */
2245 gdb_byte buf[register_size (gdbarch, regno)];
2246 gdb_assert (len <= sizeof (buf));
2247
2248 aarch64_debug_printf
2249 ("read HFA or HVA return value element %d from %s",
2250 i + 1, gdbarch_register_name (gdbarch, regno));
2251
2252 regs->cooked_read (regno, buf);
2253
2254 memcpy (valbuf, buf, len);
2255 valbuf += len;
2256 }
2257 }
2258 else if (type->code () == TYPE_CODE_INT
2259 || type->code () == TYPE_CODE_CHAR
2260 || type->code () == TYPE_CODE_BOOL
2261 || type->code () == TYPE_CODE_PTR
2262 || TYPE_IS_REFERENCE (type)
2263 || type->code () == TYPE_CODE_ENUM)
2264 {
2265 /* If the type is a plain integer, then the access is
2266 straight-forward. Otherwise we have to play around a bit
2267 more. */
2268 int len = TYPE_LENGTH (type);
2269 int regno = AARCH64_X0_REGNUM;
2270 ULONGEST tmp;
2271
2272 while (len > 0)
2273 {
2274 /* By using store_unsigned_integer we avoid having to do
2275 anything special for small big-endian values. */
2276 regcache_cooked_read_unsigned (regs, regno++, &tmp);
2277 store_unsigned_integer (valbuf,
2278 (len > X_REGISTER_SIZE
2279 ? X_REGISTER_SIZE : len), byte_order, tmp);
2280 len -= X_REGISTER_SIZE;
2281 valbuf += X_REGISTER_SIZE;
2282 }
2283 }
2284 else
2285 {
2286 /* For a structure or union the behaviour is as if the value had
2287 been stored to word-aligned memory and then loaded into
2288 registers with 64-bit load instruction(s). */
2289 int len = TYPE_LENGTH (type);
2290 int regno = AARCH64_X0_REGNUM;
2291 bfd_byte buf[X_REGISTER_SIZE];
2292
2293 while (len > 0)
2294 {
2295 regs->cooked_read (regno++, buf);
2296 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2297 len -= X_REGISTER_SIZE;
2298 valbuf += X_REGISTER_SIZE;
2299 }
2300 }
2301 }
2302
2303
2304 /* Will a function return an aggregate type in memory or in a
2305 register? Return 0 if an aggregate type can be returned in a
2306 register, 1 if it must be returned in memory. */
2307
2308 static int
2309 aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
2310 {
2311 type = check_typedef (type);
2312 int elements;
2313 struct type *fundamental_type;
2314
2315 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2316 &fundamental_type))
2317 {
2318 /* v0-v7 are used to return values and one register is allocated
2319 for one member. However, HFA or HVA has at most four members. */
2320 return 0;
2321 }
2322
2323 if (TYPE_LENGTH (type) > 16)
2324 {
2325 /* PCS B.6 Aggregates larger than 16 bytes are passed by
2326 invisible reference. */
2327
2328 return 1;
2329 }
2330
2331 return 0;
2332 }
2333
2334 /* Write into appropriate registers a function return value of type
2335 TYPE, given in virtual format. */
2336
2337 static void
2338 aarch64_store_return_value (struct type *type, struct regcache *regs,
2339 const gdb_byte *valbuf)
2340 {
2341 struct gdbarch *gdbarch = regs->arch ();
2342 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2343 int elements;
2344 struct type *fundamental_type;
2345
2346 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2347 &fundamental_type))
2348 {
2349 int len = TYPE_LENGTH (fundamental_type);
2350
2351 for (int i = 0; i < elements; i++)
2352 {
2353 int regno = AARCH64_V0_REGNUM + i;
2354 /* Enough space for a full vector register. */
2355 gdb_byte tmpbuf[register_size (gdbarch, regno)];
2356 gdb_assert (len <= sizeof (tmpbuf));
2357
2358 aarch64_debug_printf
2359 ("write HFA or HVA return value element %d to %s",
2360 i + 1, gdbarch_register_name (gdbarch, regno));
2361
2362 memcpy (tmpbuf, valbuf,
2363 len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
2364 regs->cooked_write (regno, tmpbuf);
2365 valbuf += len;
2366 }
2367 }
2368 else if (type->code () == TYPE_CODE_INT
2369 || type->code () == TYPE_CODE_CHAR
2370 || type->code () == TYPE_CODE_BOOL
2371 || type->code () == TYPE_CODE_PTR
2372 || TYPE_IS_REFERENCE (type)
2373 || type->code () == TYPE_CODE_ENUM)
2374 {
2375 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
2376 {
2377 /* Values of one word or less are zero/sign-extended and
2378 returned in r0. */
2379 bfd_byte tmpbuf[X_REGISTER_SIZE];
2380 LONGEST val = unpack_long (type, valbuf);
2381
2382 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
2383 regs->cooked_write (AARCH64_X0_REGNUM, tmpbuf);
2384 }
2385 else
2386 {
2387 /* Integral values greater than one word are stored in
2388 consecutive registers starting with r0. This will always
2389 be a multiple of the regiser size. */
2390 int len = TYPE_LENGTH (type);
2391 int regno = AARCH64_X0_REGNUM;
2392
2393 while (len > 0)
2394 {
2395 regs->cooked_write (regno++, valbuf);
2396 len -= X_REGISTER_SIZE;
2397 valbuf += X_REGISTER_SIZE;
2398 }
2399 }
2400 }
2401 else
2402 {
2403 /* For a structure or union the behaviour is as if the value had
2404 been stored to word-aligned memory and then loaded into
2405 registers with 64-bit load instruction(s). */
2406 int len = TYPE_LENGTH (type);
2407 int regno = AARCH64_X0_REGNUM;
2408 bfd_byte tmpbuf[X_REGISTER_SIZE];
2409
2410 while (len > 0)
2411 {
2412 memcpy (tmpbuf, valbuf,
2413 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2414 regs->cooked_write (regno++, tmpbuf);
2415 len -= X_REGISTER_SIZE;
2416 valbuf += X_REGISTER_SIZE;
2417 }
2418 }
2419 }
2420
2421 /* Implement the "return_value" gdbarch method. */
2422
2423 static enum return_value_convention
2424 aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2425 struct type *valtype, struct regcache *regcache,
2426 gdb_byte *readbuf, const gdb_byte *writebuf)
2427 {
2428
2429 if (valtype->code () == TYPE_CODE_STRUCT
2430 || valtype->code () == TYPE_CODE_UNION
2431 || valtype->code () == TYPE_CODE_ARRAY)
2432 {
2433 if (aarch64_return_in_memory (gdbarch, valtype))
2434 {
2435 aarch64_debug_printf ("return value in memory");
2436 return RETURN_VALUE_STRUCT_CONVENTION;
2437 }
2438 }
2439
2440 if (writebuf)
2441 aarch64_store_return_value (valtype, regcache, writebuf);
2442
2443 if (readbuf)
2444 aarch64_extract_return_value (valtype, regcache, readbuf);
2445
2446 aarch64_debug_printf ("return value in registers");
2447
2448 return RETURN_VALUE_REGISTER_CONVENTION;
2449 }
2450
2451 /* Implement the "get_longjmp_target" gdbarch method. */
2452
2453 static int
2454 aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2455 {
2456 CORE_ADDR jb_addr;
2457 gdb_byte buf[X_REGISTER_SIZE];
2458 struct gdbarch *gdbarch = get_frame_arch (frame);
2459 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2460 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2461
2462 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
2463
2464 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2465 X_REGISTER_SIZE))
2466 return 0;
2467
2468 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2469 return 1;
2470 }
2471
2472 /* Implement the "gen_return_address" gdbarch method. */
2473
2474 static void
2475 aarch64_gen_return_address (struct gdbarch *gdbarch,
2476 struct agent_expr *ax, struct axs_value *value,
2477 CORE_ADDR scope)
2478 {
2479 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
2480 value->kind = axs_lvalue_register;
2481 value->u.reg = AARCH64_LR_REGNUM;
2482 }
2483 \f
2484
2485 /* Return the pseudo register name corresponding to register regnum. */
2486
2487 static const char *
2488 aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2489 {
2490 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2491
2492 static const char *const q_name[] =
2493 {
2494 "q0", "q1", "q2", "q3",
2495 "q4", "q5", "q6", "q7",
2496 "q8", "q9", "q10", "q11",
2497 "q12", "q13", "q14", "q15",
2498 "q16", "q17", "q18", "q19",
2499 "q20", "q21", "q22", "q23",
2500 "q24", "q25", "q26", "q27",
2501 "q28", "q29", "q30", "q31",
2502 };
2503
2504 static const char *const d_name[] =
2505 {
2506 "d0", "d1", "d2", "d3",
2507 "d4", "d5", "d6", "d7",
2508 "d8", "d9", "d10", "d11",
2509 "d12", "d13", "d14", "d15",
2510 "d16", "d17", "d18", "d19",
2511 "d20", "d21", "d22", "d23",
2512 "d24", "d25", "d26", "d27",
2513 "d28", "d29", "d30", "d31",
2514 };
2515
2516 static const char *const s_name[] =
2517 {
2518 "s0", "s1", "s2", "s3",
2519 "s4", "s5", "s6", "s7",
2520 "s8", "s9", "s10", "s11",
2521 "s12", "s13", "s14", "s15",
2522 "s16", "s17", "s18", "s19",
2523 "s20", "s21", "s22", "s23",
2524 "s24", "s25", "s26", "s27",
2525 "s28", "s29", "s30", "s31",
2526 };
2527
2528 static const char *const h_name[] =
2529 {
2530 "h0", "h1", "h2", "h3",
2531 "h4", "h5", "h6", "h7",
2532 "h8", "h9", "h10", "h11",
2533 "h12", "h13", "h14", "h15",
2534 "h16", "h17", "h18", "h19",
2535 "h20", "h21", "h22", "h23",
2536 "h24", "h25", "h26", "h27",
2537 "h28", "h29", "h30", "h31",
2538 };
2539
2540 static const char *const b_name[] =
2541 {
2542 "b0", "b1", "b2", "b3",
2543 "b4", "b5", "b6", "b7",
2544 "b8", "b9", "b10", "b11",
2545 "b12", "b13", "b14", "b15",
2546 "b16", "b17", "b18", "b19",
2547 "b20", "b21", "b22", "b23",
2548 "b24", "b25", "b26", "b27",
2549 "b28", "b29", "b30", "b31",
2550 };
2551
2552 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
2553
2554 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2555 return q_name[p_regnum - AARCH64_Q0_REGNUM];
2556
2557 if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2558 return d_name[p_regnum - AARCH64_D0_REGNUM];
2559
2560 if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2561 return s_name[p_regnum - AARCH64_S0_REGNUM];
2562
2563 if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2564 return h_name[p_regnum - AARCH64_H0_REGNUM];
2565
2566 if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2567 return b_name[p_regnum - AARCH64_B0_REGNUM];
2568
2569 if (tdep->has_sve ())
2570 {
2571 static const char *const sve_v_name[] =
2572 {
2573 "v0", "v1", "v2", "v3",
2574 "v4", "v5", "v6", "v7",
2575 "v8", "v9", "v10", "v11",
2576 "v12", "v13", "v14", "v15",
2577 "v16", "v17", "v18", "v19",
2578 "v20", "v21", "v22", "v23",
2579 "v24", "v25", "v26", "v27",
2580 "v28", "v29", "v30", "v31",
2581 };
2582
2583 if (p_regnum >= AARCH64_SVE_V0_REGNUM
2584 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2585 return sve_v_name[p_regnum - AARCH64_SVE_V0_REGNUM];
2586 }
2587
2588 /* RA_STATE is used for unwinding only. Do not assign it a name - this
2589 prevents it from being read by methods such as
2590 mi_cmd_trace_frame_collected. */
2591 if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
2592 return "";
2593
2594 internal_error (__FILE__, __LINE__,
2595 _("aarch64_pseudo_register_name: bad register number %d"),
2596 p_regnum);
2597 }
2598
2599 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
2600
2601 static struct type *
2602 aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2603 {
2604 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2605
2606 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
2607
2608 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2609 return aarch64_vnq_type (gdbarch);
2610
2611 if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2612 return aarch64_vnd_type (gdbarch);
2613
2614 if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2615 return aarch64_vns_type (gdbarch);
2616
2617 if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2618 return aarch64_vnh_type (gdbarch);
2619
2620 if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2621 return aarch64_vnb_type (gdbarch);
2622
2623 if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
2624 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2625 return aarch64_vnv_type (gdbarch);
2626
2627 if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
2628 return builtin_type (gdbarch)->builtin_uint64;
2629
2630 internal_error (__FILE__, __LINE__,
2631 _("aarch64_pseudo_register_type: bad register number %d"),
2632 p_regnum);
2633 }
2634
2635 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2636
2637 static int
2638 aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2639 struct reggroup *group)
2640 {
2641 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2642
2643 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
2644
2645 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2646 return group == all_reggroup || group == vector_reggroup;
2647 else if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2648 return (group == all_reggroup || group == vector_reggroup
2649 || group == float_reggroup);
2650 else if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2651 return (group == all_reggroup || group == vector_reggroup
2652 || group == float_reggroup);
2653 else if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2654 return group == all_reggroup || group == vector_reggroup;
2655 else if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2656 return group == all_reggroup || group == vector_reggroup;
2657 else if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
2658 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2659 return group == all_reggroup || group == vector_reggroup;
2660 /* RA_STATE is used for unwinding only. Do not assign it to any groups. */
2661 if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
2662 return 0;
2663
2664 return group == all_reggroup;
2665 }
2666
2667 /* Helper for aarch64_pseudo_read_value. */
2668
2669 static struct value *
2670 aarch64_pseudo_read_value_1 (struct gdbarch *gdbarch,
2671 readable_regcache *regcache, int regnum_offset,
2672 int regsize, struct value *result_value)
2673 {
2674 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2675
2676 /* Enough space for a full vector register. */
2677 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2678 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2679
2680 if (regcache->raw_read (v_regnum, reg_buf) != REG_VALID)
2681 mark_value_bytes_unavailable (result_value, 0,
2682 TYPE_LENGTH (value_type (result_value)));
2683 else
2684 memcpy (value_contents_raw (result_value), reg_buf, regsize);
2685
2686 return result_value;
2687 }
2688
2689 /* Implement the "pseudo_register_read_value" gdbarch method. */
2690
2691 static struct value *
2692 aarch64_pseudo_read_value (struct gdbarch *gdbarch, readable_regcache *regcache,
2693 int regnum)
2694 {
2695 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2696 struct value *result_value = allocate_value (register_type (gdbarch, regnum));
2697
2698 VALUE_LVAL (result_value) = lval_register;
2699 VALUE_REGNUM (result_value) = regnum;
2700
2701 regnum -= gdbarch_num_regs (gdbarch);
2702
2703 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2704 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2705 regnum - AARCH64_Q0_REGNUM,
2706 Q_REGISTER_SIZE, result_value);
2707
2708 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2709 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2710 regnum - AARCH64_D0_REGNUM,
2711 D_REGISTER_SIZE, result_value);
2712
2713 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2714 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2715 regnum - AARCH64_S0_REGNUM,
2716 S_REGISTER_SIZE, result_value);
2717
2718 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2719 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2720 regnum - AARCH64_H0_REGNUM,
2721 H_REGISTER_SIZE, result_value);
2722
2723 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2724 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2725 regnum - AARCH64_B0_REGNUM,
2726 B_REGISTER_SIZE, result_value);
2727
2728 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2729 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2730 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2731 regnum - AARCH64_SVE_V0_REGNUM,
2732 V_REGISTER_SIZE, result_value);
2733
2734 gdb_assert_not_reached ("regnum out of bound");
2735 }
2736
2737 /* Helper for aarch64_pseudo_write. */
2738
2739 static void
2740 aarch64_pseudo_write_1 (struct gdbarch *gdbarch, struct regcache *regcache,
2741 int regnum_offset, int regsize, const gdb_byte *buf)
2742 {
2743 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2744
2745 /* Enough space for a full vector register. */
2746 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2747 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2748
2749 /* Ensure the register buffer is zero, we want gdb writes of the
2750 various 'scalar' pseudo registers to behavior like architectural
2751 writes, register width bytes are written the remainder are set to
2752 zero. */
2753 memset (reg_buf, 0, register_size (gdbarch, AARCH64_V0_REGNUM));
2754
2755 memcpy (reg_buf, buf, regsize);
2756 regcache->raw_write (v_regnum, reg_buf);
2757 }
2758
2759 /* Implement the "pseudo_register_write" gdbarch method. */
2760
2761 static void
2762 aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2763 int regnum, const gdb_byte *buf)
2764 {
2765 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2766 regnum -= gdbarch_num_regs (gdbarch);
2767
2768 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2769 return aarch64_pseudo_write_1 (gdbarch, regcache,
2770 regnum - AARCH64_Q0_REGNUM, Q_REGISTER_SIZE,
2771 buf);
2772
2773 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2774 return aarch64_pseudo_write_1 (gdbarch, regcache,
2775 regnum - AARCH64_D0_REGNUM, D_REGISTER_SIZE,
2776 buf);
2777
2778 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2779 return aarch64_pseudo_write_1 (gdbarch, regcache,
2780 regnum - AARCH64_S0_REGNUM, S_REGISTER_SIZE,
2781 buf);
2782
2783 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2784 return aarch64_pseudo_write_1 (gdbarch, regcache,
2785 regnum - AARCH64_H0_REGNUM, H_REGISTER_SIZE,
2786 buf);
2787
2788 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2789 return aarch64_pseudo_write_1 (gdbarch, regcache,
2790 regnum - AARCH64_B0_REGNUM, B_REGISTER_SIZE,
2791 buf);
2792
2793 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2794 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2795 return aarch64_pseudo_write_1 (gdbarch, regcache,
2796 regnum - AARCH64_SVE_V0_REGNUM,
2797 V_REGISTER_SIZE, buf);
2798
2799 gdb_assert_not_reached ("regnum out of bound");
2800 }
2801
2802 /* Callback function for user_reg_add. */
2803
2804 static struct value *
2805 value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2806 {
2807 const int *reg_p = (const int *) baton;
2808
2809 return value_of_register (*reg_p, frame);
2810 }
2811 \f
2812
2813 /* Implement the "software_single_step" gdbarch method, needed to
2814 single step through atomic sequences on AArch64. */
2815
2816 static std::vector<CORE_ADDR>
2817 aarch64_software_single_step (struct regcache *regcache)
2818 {
2819 struct gdbarch *gdbarch = regcache->arch ();
2820 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2821 const int insn_size = 4;
2822 const int atomic_sequence_length = 16; /* Instruction sequence length. */
2823 CORE_ADDR pc = regcache_read_pc (regcache);
2824 CORE_ADDR breaks[2] = { CORE_ADDR_MAX, CORE_ADDR_MAX };
2825 CORE_ADDR loc = pc;
2826 CORE_ADDR closing_insn = 0;
2827 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2828 byte_order_for_code);
2829 int index;
2830 int insn_count;
2831 int bc_insn_count = 0; /* Conditional branch instruction count. */
2832 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
2833 aarch64_inst inst;
2834
2835 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2836 return {};
2837
2838 /* Look for a Load Exclusive instruction which begins the sequence. */
2839 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
2840 return {};
2841
2842 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2843 {
2844 loc += insn_size;
2845 insn = read_memory_unsigned_integer (loc, insn_size,
2846 byte_order_for_code);
2847
2848 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2849 return {};
2850 /* Check if the instruction is a conditional branch. */
2851 if (inst.opcode->iclass == condbranch)
2852 {
2853 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
2854
2855 if (bc_insn_count >= 1)
2856 return {};
2857
2858 /* It is, so we'll try to set a breakpoint at the destination. */
2859 breaks[1] = loc + inst.operands[0].imm.value;
2860
2861 bc_insn_count++;
2862 last_breakpoint++;
2863 }
2864
2865 /* Look for the Store Exclusive which closes the atomic sequence. */
2866 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
2867 {
2868 closing_insn = loc;
2869 break;
2870 }
2871 }
2872
2873 /* We didn't find a closing Store Exclusive instruction, fall back. */
2874 if (!closing_insn)
2875 return {};
2876
2877 /* Insert breakpoint after the end of the atomic sequence. */
2878 breaks[0] = loc + insn_size;
2879
2880 /* Check for duplicated breakpoints, and also check that the second
2881 breakpoint is not within the atomic sequence. */
2882 if (last_breakpoint
2883 && (breaks[1] == breaks[0]
2884 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2885 last_breakpoint = 0;
2886
2887 std::vector<CORE_ADDR> next_pcs;
2888
2889 /* Insert the breakpoint at the end of the sequence, and one at the
2890 destination of the conditional branch, if it exists. */
2891 for (index = 0; index <= last_breakpoint; index++)
2892 next_pcs.push_back (breaks[index]);
2893
2894 return next_pcs;
2895 }
2896
2897 struct aarch64_displaced_step_copy_insn_closure
2898 : public displaced_step_copy_insn_closure
2899 {
2900 /* It is true when condition instruction, such as B.CON, TBZ, etc,
2901 is being displaced stepping. */
2902 bool cond = false;
2903
2904 /* PC adjustment offset after displaced stepping. If 0, then we don't
2905 write the PC back, assuming the PC is already the right address. */
2906 int32_t pc_adjust = 0;
2907 };
2908
2909 /* Data when visiting instructions for displaced stepping. */
2910
2911 struct aarch64_displaced_step_data
2912 {
2913 struct aarch64_insn_data base;
2914
2915 /* The address where the instruction will be executed at. */
2916 CORE_ADDR new_addr;
2917 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
2918 uint32_t insn_buf[AARCH64_DISPLACED_MODIFIED_INSNS];
2919 /* Number of instructions in INSN_BUF. */
2920 unsigned insn_count;
2921 /* Registers when doing displaced stepping. */
2922 struct regcache *regs;
2923
2924 aarch64_displaced_step_copy_insn_closure *dsc;
2925 };
2926
2927 /* Implementation of aarch64_insn_visitor method "b". */
2928
2929 static void
2930 aarch64_displaced_step_b (const int is_bl, const int32_t offset,
2931 struct aarch64_insn_data *data)
2932 {
2933 struct aarch64_displaced_step_data *dsd
2934 = (struct aarch64_displaced_step_data *) data;
2935 int64_t new_offset = data->insn_addr - dsd->new_addr + offset;
2936
2937 if (can_encode_int32 (new_offset, 28))
2938 {
2939 /* Emit B rather than BL, because executing BL on a new address
2940 will get the wrong address into LR. In order to avoid this,
2941 we emit B, and update LR if the instruction is BL. */
2942 emit_b (dsd->insn_buf, 0, new_offset);
2943 dsd->insn_count++;
2944 }
2945 else
2946 {
2947 /* Write NOP. */
2948 emit_nop (dsd->insn_buf);
2949 dsd->insn_count++;
2950 dsd->dsc->pc_adjust = offset;
2951 }
2952
2953 if (is_bl)
2954 {
2955 /* Update LR. */
2956 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
2957 data->insn_addr + 4);
2958 }
2959 }
2960
2961 /* Implementation of aarch64_insn_visitor method "b_cond". */
2962
2963 static void
2964 aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
2965 struct aarch64_insn_data *data)
2966 {
2967 struct aarch64_displaced_step_data *dsd
2968 = (struct aarch64_displaced_step_data *) data;
2969
2970 /* GDB has to fix up PC after displaced step this instruction
2971 differently according to the condition is true or false. Instead
2972 of checking COND against conditional flags, we can use
2973 the following instructions, and GDB can tell how to fix up PC
2974 according to the PC value.
2975
2976 B.COND TAKEN ; If cond is true, then jump to TAKEN.
2977 INSN1 ;
2978 TAKEN:
2979 INSN2
2980 */
2981
2982 emit_bcond (dsd->insn_buf, cond, 8);
2983 dsd->dsc->cond = true;
2984 dsd->dsc->pc_adjust = offset;
2985 dsd->insn_count = 1;
2986 }
2987
2988 /* Dynamically allocate a new register. If we know the register
2989 statically, we should make it a global as above instead of using this
2990 helper function. */
2991
2992 static struct aarch64_register
2993 aarch64_register (unsigned num, int is64)
2994 {
2995 return (struct aarch64_register) { num, is64 };
2996 }
2997
2998 /* Implementation of aarch64_insn_visitor method "cb". */
2999
3000 static void
3001 aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
3002 const unsigned rn, int is64,
3003 struct aarch64_insn_data *data)
3004 {
3005 struct aarch64_displaced_step_data *dsd
3006 = (struct aarch64_displaced_step_data *) data;
3007
3008 /* The offset is out of range for a compare and branch
3009 instruction. We can use the following instructions instead:
3010
3011 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
3012 INSN1 ;
3013 TAKEN:
3014 INSN2
3015 */
3016 emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
3017 dsd->insn_count = 1;
3018 dsd->dsc->cond = true;
3019 dsd->dsc->pc_adjust = offset;
3020 }
3021
3022 /* Implementation of aarch64_insn_visitor method "tb". */
3023
3024 static void
3025 aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
3026 const unsigned rt, unsigned bit,
3027 struct aarch64_insn_data *data)
3028 {
3029 struct aarch64_displaced_step_data *dsd
3030 = (struct aarch64_displaced_step_data *) data;
3031
3032 /* The offset is out of range for a test bit and branch
3033 instruction We can use the following instructions instead:
3034
3035 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
3036 INSN1 ;
3037 TAKEN:
3038 INSN2
3039
3040 */
3041 emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
3042 dsd->insn_count = 1;
3043 dsd->dsc->cond = true;
3044 dsd->dsc->pc_adjust = offset;
3045 }
3046
3047 /* Implementation of aarch64_insn_visitor method "adr". */
3048
3049 static void
3050 aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
3051 const int is_adrp, struct aarch64_insn_data *data)
3052 {
3053 struct aarch64_displaced_step_data *dsd
3054 = (struct aarch64_displaced_step_data *) data;
3055 /* We know exactly the address the ADR{P,} instruction will compute.
3056 We can just write it to the destination register. */
3057 CORE_ADDR address = data->insn_addr + offset;
3058
3059 if (is_adrp)
3060 {
3061 /* Clear the lower 12 bits of the offset to get the 4K page. */
3062 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
3063 address & ~0xfff);
3064 }
3065 else
3066 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
3067 address);
3068
3069 dsd->dsc->pc_adjust = 4;
3070 emit_nop (dsd->insn_buf);
3071 dsd->insn_count = 1;
3072 }
3073
3074 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
3075
3076 static void
3077 aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
3078 const unsigned rt, const int is64,
3079 struct aarch64_insn_data *data)
3080 {
3081 struct aarch64_displaced_step_data *dsd
3082 = (struct aarch64_displaced_step_data *) data;
3083 CORE_ADDR address = data->insn_addr + offset;
3084 struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
3085
3086 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
3087 address);
3088
3089 if (is_sw)
3090 dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
3091 aarch64_register (rt, 1), zero);
3092 else
3093 dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
3094 aarch64_register (rt, 1), zero);
3095
3096 dsd->dsc->pc_adjust = 4;
3097 }
3098
3099 /* Implementation of aarch64_insn_visitor method "others". */
3100
3101 static void
3102 aarch64_displaced_step_others (const uint32_t insn,
3103 struct aarch64_insn_data *data)
3104 {
3105 struct aarch64_displaced_step_data *dsd
3106 = (struct aarch64_displaced_step_data *) data;
3107
3108 uint32_t masked_insn = (insn & CLEAR_Rn_MASK);
3109 if (masked_insn == BLR)
3110 {
3111 /* Emit a BR to the same register and then update LR to the original
3112 address (similar to aarch64_displaced_step_b). */
3113 aarch64_emit_insn (dsd->insn_buf, insn & 0xffdfffff);
3114 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
3115 data->insn_addr + 4);
3116 }
3117 else
3118 aarch64_emit_insn (dsd->insn_buf, insn);
3119 dsd->insn_count = 1;
3120
3121 if (masked_insn == RET || masked_insn == BR || masked_insn == BLR)
3122 dsd->dsc->pc_adjust = 0;
3123 else
3124 dsd->dsc->pc_adjust = 4;
3125 }
3126
3127 static const struct aarch64_insn_visitor visitor =
3128 {
3129 aarch64_displaced_step_b,
3130 aarch64_displaced_step_b_cond,
3131 aarch64_displaced_step_cb,
3132 aarch64_displaced_step_tb,
3133 aarch64_displaced_step_adr,
3134 aarch64_displaced_step_ldr_literal,
3135 aarch64_displaced_step_others,
3136 };
3137
3138 /* Implement the "displaced_step_copy_insn" gdbarch method. */
3139
3140 displaced_step_copy_insn_closure_up
3141 aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
3142 CORE_ADDR from, CORE_ADDR to,
3143 struct regcache *regs)
3144 {
3145 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3146 uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
3147 struct aarch64_displaced_step_data dsd;
3148 aarch64_inst inst;
3149
3150 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
3151 return NULL;
3152
3153 /* Look for a Load Exclusive instruction which begins the sequence. */
3154 if (inst.opcode->iclass == ldstexcl && bit (insn, 22))
3155 {
3156 /* We can't displaced step atomic sequences. */
3157 return NULL;
3158 }
3159
3160 std::unique_ptr<aarch64_displaced_step_copy_insn_closure> dsc
3161 (new aarch64_displaced_step_copy_insn_closure);
3162 dsd.base.insn_addr = from;
3163 dsd.new_addr = to;
3164 dsd.regs = regs;
3165 dsd.dsc = dsc.get ();
3166 dsd.insn_count = 0;
3167 aarch64_relocate_instruction (insn, &visitor,
3168 (struct aarch64_insn_data *) &dsd);
3169 gdb_assert (dsd.insn_count <= AARCH64_DISPLACED_MODIFIED_INSNS);
3170
3171 if (dsd.insn_count != 0)
3172 {
3173 int i;
3174
3175 /* Instruction can be relocated to scratch pad. Copy
3176 relocated instruction(s) there. */
3177 for (i = 0; i < dsd.insn_count; i++)
3178 {
3179 displaced_debug_printf ("writing insn %.8x at %s",
3180 dsd.insn_buf[i],
3181 paddress (gdbarch, to + i * 4));
3182
3183 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
3184 (ULONGEST) dsd.insn_buf[i]);
3185 }
3186 }
3187 else
3188 {
3189 dsc = NULL;
3190 }
3191
3192 /* This is a work around for a problem with g++ 4.8. */
3193 return displaced_step_copy_insn_closure_up (dsc.release ());
3194 }
3195
3196 /* Implement the "displaced_step_fixup" gdbarch method. */
3197
3198 void
3199 aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
3200 struct displaced_step_copy_insn_closure *dsc_,
3201 CORE_ADDR from, CORE_ADDR to,
3202 struct regcache *regs)
3203 {
3204 aarch64_displaced_step_copy_insn_closure *dsc
3205 = (aarch64_displaced_step_copy_insn_closure *) dsc_;
3206
3207 ULONGEST pc;
3208
3209 regcache_cooked_read_unsigned (regs, AARCH64_PC_REGNUM, &pc);
3210
3211 displaced_debug_printf ("PC after stepping: %s (was %s).",
3212 paddress (gdbarch, pc), paddress (gdbarch, to));
3213
3214 if (dsc->cond)
3215 {
3216 displaced_debug_printf ("[Conditional] pc_adjust before: %d",
3217 dsc->pc_adjust);
3218
3219 if (pc - to == 8)
3220 {
3221 /* Condition is true. */
3222 }
3223 else if (pc - to == 4)
3224 {
3225 /* Condition is false. */
3226 dsc->pc_adjust = 4;
3227 }
3228 else
3229 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
3230
3231 displaced_debug_printf ("[Conditional] pc_adjust after: %d",
3232 dsc->pc_adjust);
3233 }
3234
3235 displaced_debug_printf ("%s PC by %d",
3236 dsc->pc_adjust ? "adjusting" : "not adjusting",
3237 dsc->pc_adjust);
3238
3239 if (dsc->pc_adjust != 0)
3240 {
3241 /* Make sure the previous instruction was executed (that is, the PC
3242 has changed). If the PC didn't change, then discard the adjustment
3243 offset. Otherwise we may skip an instruction before its execution
3244 took place. */
3245 if ((pc - to) == 0)
3246 {
3247 displaced_debug_printf ("PC did not move. Discarding PC adjustment.");
3248 dsc->pc_adjust = 0;
3249 }
3250
3251 displaced_debug_printf ("fixup: set PC to %s:%d",
3252 paddress (gdbarch, from), dsc->pc_adjust);
3253
3254 regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
3255 from + dsc->pc_adjust);
3256 }
3257 }
3258
3259 /* Implement the "displaced_step_hw_singlestep" gdbarch method. */
3260
3261 bool
3262 aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch)
3263 {
3264 return true;
3265 }
3266
3267 /* Get the correct target description for the given VQ value.
3268 If VQ is zero then it is assumed SVE is not supported.
3269 (It is not possible to set VQ to zero on an SVE system).
3270
3271 MTE_P indicates the presence of the Memory Tagging Extension feature. */
3272
3273 const target_desc *
3274 aarch64_read_description (uint64_t vq, bool pauth_p, bool mte_p)
3275 {
3276 if (vq > AARCH64_MAX_SVE_VQ)
3277 error (_("VQ is %" PRIu64 ", maximum supported value is %d"), vq,
3278 AARCH64_MAX_SVE_VQ);
3279
3280 struct target_desc *tdesc = tdesc_aarch64_list[vq][pauth_p][mte_p];
3281
3282 if (tdesc == NULL)
3283 {
3284 tdesc = aarch64_create_target_description (vq, pauth_p, mte_p);
3285 tdesc_aarch64_list[vq][pauth_p][mte_p] = tdesc;
3286 }
3287
3288 return tdesc;
3289 }
3290
3291 /* Return the VQ used when creating the target description TDESC. */
3292
3293 static uint64_t
3294 aarch64_get_tdesc_vq (const struct target_desc *tdesc)
3295 {
3296 const struct tdesc_feature *feature_sve;
3297
3298 if (!tdesc_has_registers (tdesc))
3299 return 0;
3300
3301 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
3302
3303 if (feature_sve == nullptr)
3304 return 0;
3305
3306 uint64_t vl = tdesc_register_bitsize (feature_sve,
3307 aarch64_sve_register_names[0]) / 8;
3308 return sve_vq_from_vl (vl);
3309 }
3310
3311 /* Add all the expected register sets into GDBARCH. */
3312
3313 static void
3314 aarch64_add_reggroups (struct gdbarch *gdbarch)
3315 {
3316 reggroup_add (gdbarch, general_reggroup);
3317 reggroup_add (gdbarch, float_reggroup);
3318 reggroup_add (gdbarch, system_reggroup);
3319 reggroup_add (gdbarch, vector_reggroup);
3320 reggroup_add (gdbarch, all_reggroup);
3321 reggroup_add (gdbarch, save_reggroup);
3322 reggroup_add (gdbarch, restore_reggroup);
3323 }
3324
3325 /* Implement the "cannot_store_register" gdbarch method. */
3326
3327 static int
3328 aarch64_cannot_store_register (struct gdbarch *gdbarch, int regnum)
3329 {
3330 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3331
3332 if (!tdep->has_pauth ())
3333 return 0;
3334
3335 /* Pointer authentication registers are read-only. */
3336 return (regnum == AARCH64_PAUTH_DMASK_REGNUM (tdep->pauth_reg_base)
3337 || regnum == AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base));
3338 }
3339
3340 /* Initialize the current architecture based on INFO. If possible,
3341 re-use an architecture from ARCHES, which is a list of
3342 architectures already created during this debugging session.
3343
3344 Called e.g. at program startup, when reading a core file, and when
3345 reading a binary file. */
3346
3347 static struct gdbarch *
3348 aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
3349 {
3350 const struct tdesc_feature *feature_core, *feature_fpu, *feature_sve;
3351 const struct tdesc_feature *feature_pauth;
3352 bool valid_p = true;
3353 int i, num_regs = 0, num_pseudo_regs = 0;
3354 int first_pauth_regnum = -1, pauth_ra_state_offset = -1;
3355 int first_mte_regnum = -1;
3356
3357 /* Use the vector length passed via the target info. Here -1 is used for no
3358 SVE, and 0 is unset. If unset then use the vector length from the existing
3359 tdesc. */
3360 uint64_t vq = 0;
3361 if (info.id == (int *) -1)
3362 vq = 0;
3363 else if (info.id != 0)
3364 vq = (uint64_t) info.id;
3365 else
3366 vq = aarch64_get_tdesc_vq (info.target_desc);
3367
3368 if (vq > AARCH64_MAX_SVE_VQ)
3369 internal_error (__FILE__, __LINE__, _("VQ out of bounds: %s (max %d)"),
3370 pulongest (vq), AARCH64_MAX_SVE_VQ);
3371
3372 /* If there is already a candidate, use it. */
3373 for (gdbarch_list *best_arch = gdbarch_list_lookup_by_info (arches, &info);
3374 best_arch != nullptr;
3375 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
3376 {
3377 struct gdbarch_tdep *tdep = gdbarch_tdep (best_arch->gdbarch);
3378 if (tdep && tdep->vq == vq)
3379 return best_arch->gdbarch;
3380 }
3381
3382 /* Ensure we always have a target descriptor, and that it is for the given VQ
3383 value. */
3384 const struct target_desc *tdesc = info.target_desc;
3385 if (!tdesc_has_registers (tdesc) || vq != aarch64_get_tdesc_vq (tdesc))
3386 tdesc = aarch64_read_description (vq, false, false);
3387 gdb_assert (tdesc);
3388
3389 feature_core = tdesc_find_feature (tdesc,"org.gnu.gdb.aarch64.core");
3390 feature_fpu = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
3391 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
3392 feature_pauth = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.pauth");
3393 const struct tdesc_feature *feature_mte
3394 = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.mte");
3395
3396 if (feature_core == nullptr)
3397 return nullptr;
3398
3399 tdesc_arch_data_up tdesc_data = tdesc_data_alloc ();
3400
3401 /* Validate the description provides the mandatory core R registers
3402 and allocate their numbers. */
3403 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
3404 valid_p &= tdesc_numbered_register (feature_core, tdesc_data.get (),
3405 AARCH64_X0_REGNUM + i,
3406 aarch64_r_register_names[i]);
3407
3408 num_regs = AARCH64_X0_REGNUM + i;
3409
3410 /* Add the V registers. */
3411 if (feature_fpu != nullptr)
3412 {
3413 if (feature_sve != nullptr)
3414 error (_("Program contains both fpu and SVE features."));
3415
3416 /* Validate the description provides the mandatory V registers
3417 and allocate their numbers. */
3418 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
3419 valid_p &= tdesc_numbered_register (feature_fpu, tdesc_data.get (),
3420 AARCH64_V0_REGNUM + i,
3421 aarch64_v_register_names[i]);
3422
3423 num_regs = AARCH64_V0_REGNUM + i;
3424 }
3425
3426 /* Add the SVE registers. */
3427 if (feature_sve != nullptr)
3428 {
3429 /* Validate the description provides the mandatory SVE registers
3430 and allocate their numbers. */
3431 for (i = 0; i < ARRAY_SIZE (aarch64_sve_register_names); i++)
3432 valid_p &= tdesc_numbered_register (feature_sve, tdesc_data.get (),
3433 AARCH64_SVE_Z0_REGNUM + i,
3434 aarch64_sve_register_names[i]);
3435
3436 num_regs = AARCH64_SVE_Z0_REGNUM + i;
3437 num_pseudo_regs += 32; /* add the Vn register pseudos. */
3438 }
3439
3440 if (feature_fpu != nullptr || feature_sve != nullptr)
3441 {
3442 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
3443 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
3444 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
3445 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
3446 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
3447 }
3448
3449 /* Add the pauth registers. */
3450 if (feature_pauth != NULL)
3451 {
3452 first_pauth_regnum = num_regs;
3453 pauth_ra_state_offset = num_pseudo_regs;
3454 /* Validate the descriptor provides the mandatory PAUTH registers and
3455 allocate their numbers. */
3456 for (i = 0; i < ARRAY_SIZE (aarch64_pauth_register_names); i++)
3457 valid_p &= tdesc_numbered_register (feature_pauth, tdesc_data.get (),
3458 first_pauth_regnum + i,
3459 aarch64_pauth_register_names[i]);
3460
3461 num_regs += i;
3462 num_pseudo_regs += 1; /* Count RA_STATE pseudo register. */
3463 }
3464
3465 /* Add the MTE registers. */
3466 if (feature_mte != NULL)
3467 {
3468 first_mte_regnum = num_regs;
3469 /* Validate the descriptor provides the mandatory MTE registers and
3470 allocate their numbers. */
3471 for (i = 0; i < ARRAY_SIZE (aarch64_mte_register_names); i++)
3472 valid_p &= tdesc_numbered_register (feature_mte, tdesc_data.get (),
3473 first_mte_regnum + i,
3474 aarch64_mte_register_names[i]);
3475
3476 num_regs += i;
3477 }
3478
3479 if (!valid_p)
3480 return nullptr;
3481
3482 /* AArch64 code is always little-endian. */
3483 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
3484
3485 struct gdbarch_tdep *tdep = XCNEW (struct gdbarch_tdep);
3486 struct gdbarch *gdbarch = gdbarch_alloc (&info, tdep);
3487
3488 /* This should be low enough for everything. */
3489 tdep->lowest_pc = 0x20;
3490 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
3491 tdep->jb_elt_size = 8;
3492 tdep->vq = vq;
3493 tdep->pauth_reg_base = first_pauth_regnum;
3494 tdep->pauth_ra_state_regnum = (feature_pauth == NULL) ? -1
3495 : pauth_ra_state_offset + num_regs;
3496 tdep->mte_reg_base = first_mte_regnum;
3497
3498 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
3499 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
3500
3501 /* Advance PC across function entry code. */
3502 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
3503
3504 /* The stack grows downward. */
3505 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
3506
3507 /* Breakpoint manipulation. */
3508 set_gdbarch_breakpoint_kind_from_pc (gdbarch,
3509 aarch64_breakpoint::kind_from_pc);
3510 set_gdbarch_sw_breakpoint_from_kind (gdbarch,
3511 aarch64_breakpoint::bp_from_kind);
3512 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
3513 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
3514
3515 /* Information about registers, etc. */
3516 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
3517 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
3518 set_gdbarch_num_regs (gdbarch, num_regs);
3519
3520 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
3521 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
3522 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
3523 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
3524 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
3525 set_tdesc_pseudo_register_reggroup_p (gdbarch,
3526 aarch64_pseudo_register_reggroup_p);
3527 set_gdbarch_cannot_store_register (gdbarch, aarch64_cannot_store_register);
3528
3529 /* ABI */
3530 set_gdbarch_short_bit (gdbarch, 16);
3531 set_gdbarch_int_bit (gdbarch, 32);
3532 set_gdbarch_float_bit (gdbarch, 32);
3533 set_gdbarch_double_bit (gdbarch, 64);
3534 set_gdbarch_long_double_bit (gdbarch, 128);
3535 set_gdbarch_long_bit (gdbarch, 64);
3536 set_gdbarch_long_long_bit (gdbarch, 64);
3537 set_gdbarch_ptr_bit (gdbarch, 64);
3538 set_gdbarch_char_signed (gdbarch, 0);
3539 set_gdbarch_wchar_signed (gdbarch, 0);
3540 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
3541 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
3542 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
3543 set_gdbarch_type_align (gdbarch, aarch64_type_align);
3544
3545 /* Internal <-> external register number maps. */
3546 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
3547
3548 /* Returning results. */
3549 set_gdbarch_return_value (gdbarch, aarch64_return_value);
3550
3551 /* Disassembly. */
3552 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
3553
3554 /* Virtual tables. */
3555 set_gdbarch_vbit_in_delta (gdbarch, 1);
3556
3557 /* Register architecture. */
3558 aarch64_add_reggroups (gdbarch);
3559
3560 /* Hook in the ABI-specific overrides, if they have been registered. */
3561 info.target_desc = tdesc;
3562 info.tdesc_data = tdesc_data.get ();
3563 gdbarch_init_osabi (info, gdbarch);
3564
3565 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
3566 /* Register DWARF CFA vendor handler. */
3567 set_gdbarch_execute_dwarf_cfa_vendor_op (gdbarch,
3568 aarch64_execute_dwarf_cfa_vendor_op);
3569
3570 /* Permanent/Program breakpoint handling. */
3571 set_gdbarch_program_breakpoint_here_p (gdbarch,
3572 aarch64_program_breakpoint_here_p);
3573
3574 /* Add some default predicates. */
3575 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
3576 dwarf2_append_unwinders (gdbarch);
3577 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
3578
3579 frame_base_set_default (gdbarch, &aarch64_normal_base);
3580
3581 /* Now we have tuned the configuration, set a few final things,
3582 based on what the OS ABI has told us. */
3583
3584 if (tdep->jb_pc >= 0)
3585 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
3586
3587 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
3588
3589 set_gdbarch_get_pc_address_flags (gdbarch, aarch64_get_pc_address_flags);
3590
3591 tdesc_use_registers (gdbarch, tdesc, std::move (tdesc_data));
3592
3593 /* Add standard register aliases. */
3594 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
3595 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
3596 value_of_aarch64_user_reg,
3597 &aarch64_register_aliases[i].regnum);
3598
3599 register_aarch64_ravenscar_ops (gdbarch);
3600
3601 return gdbarch;
3602 }
3603
3604 static void
3605 aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
3606 {
3607 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3608
3609 if (tdep == NULL)
3610 return;
3611
3612 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
3613 paddress (gdbarch, tdep->lowest_pc));
3614 }
3615
3616 #if GDB_SELF_TEST
3617 namespace selftests
3618 {
3619 static void aarch64_process_record_test (void);
3620 }
3621 #endif
3622
3623 void _initialize_aarch64_tdep ();
3624 void
3625 _initialize_aarch64_tdep ()
3626 {
3627 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
3628 aarch64_dump_tdep);
3629
3630 /* Debug this file's internals. */
3631 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
3632 Set AArch64 debugging."), _("\
3633 Show AArch64 debugging."), _("\
3634 When on, AArch64 specific debugging is enabled."),
3635 NULL,
3636 show_aarch64_debug,
3637 &setdebuglist, &showdebuglist);
3638
3639 #if GDB_SELF_TEST
3640 selftests::register_test ("aarch64-analyze-prologue",
3641 selftests::aarch64_analyze_prologue_test);
3642 selftests::register_test ("aarch64-process-record",
3643 selftests::aarch64_process_record_test);
3644 #endif
3645 }
3646
3647 /* AArch64 process record-replay related structures, defines etc. */
3648
3649 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
3650 do \
3651 { \
3652 unsigned int reg_len = LENGTH; \
3653 if (reg_len) \
3654 { \
3655 REGS = XNEWVEC (uint32_t, reg_len); \
3656 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
3657 } \
3658 } \
3659 while (0)
3660
3661 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
3662 do \
3663 { \
3664 unsigned int mem_len = LENGTH; \
3665 if (mem_len) \
3666 { \
3667 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
3668 memcpy(&MEMS->len, &RECORD_BUF[0], \
3669 sizeof(struct aarch64_mem_r) * LENGTH); \
3670 } \
3671 } \
3672 while (0)
3673
3674 /* AArch64 record/replay structures and enumerations. */
3675
3676 struct aarch64_mem_r
3677 {
3678 uint64_t len; /* Record length. */
3679 uint64_t addr; /* Memory address. */
3680 };
3681
3682 enum aarch64_record_result
3683 {
3684 AARCH64_RECORD_SUCCESS,
3685 AARCH64_RECORD_UNSUPPORTED,
3686 AARCH64_RECORD_UNKNOWN
3687 };
3688
3689 typedef struct insn_decode_record_t
3690 {
3691 struct gdbarch *gdbarch;
3692 struct regcache *regcache;
3693 CORE_ADDR this_addr; /* Address of insn to be recorded. */
3694 uint32_t aarch64_insn; /* Insn to be recorded. */
3695 uint32_t mem_rec_count; /* Count of memory records. */
3696 uint32_t reg_rec_count; /* Count of register records. */
3697 uint32_t *aarch64_regs; /* Registers to be recorded. */
3698 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
3699 } insn_decode_record;
3700
3701 /* Record handler for data processing - register instructions. */
3702
3703 static unsigned int
3704 aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
3705 {
3706 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
3707 uint32_t record_buf[4];
3708
3709 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3710 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3711 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
3712
3713 if (!bit (aarch64_insn_r->aarch64_insn, 28))
3714 {
3715 uint8_t setflags;
3716
3717 /* Logical (shifted register). */
3718 if (insn_bits24_27 == 0x0a)
3719 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
3720 /* Add/subtract. */
3721 else if (insn_bits24_27 == 0x0b)
3722 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3723 else
3724 return AARCH64_RECORD_UNKNOWN;
3725
3726 record_buf[0] = reg_rd;
3727 aarch64_insn_r->reg_rec_count = 1;
3728 if (setflags)
3729 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3730 }
3731 else
3732 {
3733 if (insn_bits24_27 == 0x0b)
3734 {
3735 /* Data-processing (3 source). */
3736 record_buf[0] = reg_rd;
3737 aarch64_insn_r->reg_rec_count = 1;
3738 }
3739 else if (insn_bits24_27 == 0x0a)
3740 {
3741 if (insn_bits21_23 == 0x00)
3742 {
3743 /* Add/subtract (with carry). */
3744 record_buf[0] = reg_rd;
3745 aarch64_insn_r->reg_rec_count = 1;
3746 if (bit (aarch64_insn_r->aarch64_insn, 29))
3747 {
3748 record_buf[1] = AARCH64_CPSR_REGNUM;
3749 aarch64_insn_r->reg_rec_count = 2;
3750 }
3751 }
3752 else if (insn_bits21_23 == 0x02)
3753 {
3754 /* Conditional compare (register) and conditional compare
3755 (immediate) instructions. */
3756 record_buf[0] = AARCH64_CPSR_REGNUM;
3757 aarch64_insn_r->reg_rec_count = 1;
3758 }
3759 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
3760 {
3761 /* Conditional select. */
3762 /* Data-processing (2 source). */
3763 /* Data-processing (1 source). */
3764 record_buf[0] = reg_rd;
3765 aarch64_insn_r->reg_rec_count = 1;
3766 }
3767 else
3768 return AARCH64_RECORD_UNKNOWN;
3769 }
3770 }
3771
3772 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3773 record_buf);
3774 return AARCH64_RECORD_SUCCESS;
3775 }
3776
3777 /* Record handler for data processing - immediate instructions. */
3778
3779 static unsigned int
3780 aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
3781 {
3782 uint8_t reg_rd, insn_bit23, insn_bits24_27, setflags;
3783 uint32_t record_buf[4];
3784
3785 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3786 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3787 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3788
3789 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
3790 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
3791 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
3792 {
3793 record_buf[0] = reg_rd;
3794 aarch64_insn_r->reg_rec_count = 1;
3795 }
3796 else if (insn_bits24_27 == 0x01)
3797 {
3798 /* Add/Subtract (immediate). */
3799 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3800 record_buf[0] = reg_rd;
3801 aarch64_insn_r->reg_rec_count = 1;
3802 if (setflags)
3803 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3804 }
3805 else if (insn_bits24_27 == 0x02 && !insn_bit23)
3806 {
3807 /* Logical (immediate). */
3808 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
3809 record_buf[0] = reg_rd;
3810 aarch64_insn_r->reg_rec_count = 1;
3811 if (setflags)
3812 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3813 }
3814 else
3815 return AARCH64_RECORD_UNKNOWN;
3816
3817 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3818 record_buf);
3819 return AARCH64_RECORD_SUCCESS;
3820 }
3821
3822 /* Record handler for branch, exception generation and system instructions. */
3823
3824 static unsigned int
3825 aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
3826 {
3827 struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
3828 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
3829 uint32_t record_buf[4];
3830
3831 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3832 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3833 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3834
3835 if (insn_bits28_31 == 0x0d)
3836 {
3837 /* Exception generation instructions. */
3838 if (insn_bits24_27 == 0x04)
3839 {
3840 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
3841 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
3842 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
3843 {
3844 ULONGEST svc_number;
3845
3846 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
3847 &svc_number);
3848 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
3849 svc_number);
3850 }
3851 else
3852 return AARCH64_RECORD_UNSUPPORTED;
3853 }
3854 /* System instructions. */
3855 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
3856 {
3857 uint32_t reg_rt, reg_crn;
3858
3859 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3860 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3861
3862 /* Record rt in case of sysl and mrs instructions. */
3863 if (bit (aarch64_insn_r->aarch64_insn, 21))
3864 {
3865 record_buf[0] = reg_rt;
3866 aarch64_insn_r->reg_rec_count = 1;
3867 }
3868 /* Record cpsr for hint and msr(immediate) instructions. */
3869 else if (reg_crn == 0x02 || reg_crn == 0x04)
3870 {
3871 record_buf[0] = AARCH64_CPSR_REGNUM;
3872 aarch64_insn_r->reg_rec_count = 1;
3873 }
3874 }
3875 /* Unconditional branch (register). */
3876 else if((insn_bits24_27 & 0x0e) == 0x06)
3877 {
3878 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3879 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3880 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3881 }
3882 else
3883 return AARCH64_RECORD_UNKNOWN;
3884 }
3885 /* Unconditional branch (immediate). */
3886 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3887 {
3888 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3889 if (bit (aarch64_insn_r->aarch64_insn, 31))
3890 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3891 }
3892 else
3893 /* Compare & branch (immediate), Test & branch (immediate) and
3894 Conditional branch (immediate). */
3895 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3896
3897 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3898 record_buf);
3899 return AARCH64_RECORD_SUCCESS;
3900 }
3901
3902 /* Record handler for advanced SIMD load and store instructions. */
3903
3904 static unsigned int
3905 aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3906 {
3907 CORE_ADDR address;
3908 uint64_t addr_offset = 0;
3909 uint32_t record_buf[24];
3910 uint64_t record_buf_mem[24];
3911 uint32_t reg_rn, reg_rt;
3912 uint32_t reg_index = 0, mem_index = 0;
3913 uint8_t opcode_bits, size_bits;
3914
3915 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3916 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3917 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3918 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3919 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3920
3921 if (record_debug)
3922 debug_printf ("Process record: Advanced SIMD load/store\n");
3923
3924 /* Load/store single structure. */
3925 if (bit (aarch64_insn_r->aarch64_insn, 24))
3926 {
3927 uint8_t sindex, scale, selem, esize, replicate = 0;
3928 scale = opcode_bits >> 2;
3929 selem = ((opcode_bits & 0x02) |
3930 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3931 switch (scale)
3932 {
3933 case 1:
3934 if (size_bits & 0x01)
3935 return AARCH64_RECORD_UNKNOWN;
3936 break;
3937 case 2:
3938 if ((size_bits >> 1) & 0x01)
3939 return AARCH64_RECORD_UNKNOWN;
3940 if (size_bits & 0x01)
3941 {
3942 if (!((opcode_bits >> 1) & 0x01))
3943 scale = 3;
3944 else
3945 return AARCH64_RECORD_UNKNOWN;
3946 }
3947 break;
3948 case 3:
3949 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3950 {
3951 scale = size_bits;
3952 replicate = 1;
3953 break;
3954 }
3955 else
3956 return AARCH64_RECORD_UNKNOWN;
3957 default:
3958 break;
3959 }
3960 esize = 8 << scale;
3961 if (replicate)
3962 for (sindex = 0; sindex < selem; sindex++)
3963 {
3964 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3965 reg_rt = (reg_rt + 1) % 32;
3966 }
3967 else
3968 {
3969 for (sindex = 0; sindex < selem; sindex++)
3970 {
3971 if (bit (aarch64_insn_r->aarch64_insn, 22))
3972 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3973 else
3974 {
3975 record_buf_mem[mem_index++] = esize / 8;
3976 record_buf_mem[mem_index++] = address + addr_offset;
3977 }
3978 addr_offset = addr_offset + (esize / 8);
3979 reg_rt = (reg_rt + 1) % 32;
3980 }
3981 }
3982 }
3983 /* Load/store multiple structure. */
3984 else
3985 {
3986 uint8_t selem, esize, rpt, elements;
3987 uint8_t eindex, rindex;
3988
3989 esize = 8 << size_bits;
3990 if (bit (aarch64_insn_r->aarch64_insn, 30))
3991 elements = 128 / esize;
3992 else
3993 elements = 64 / esize;
3994
3995 switch (opcode_bits)
3996 {
3997 /*LD/ST4 (4 Registers). */
3998 case 0:
3999 rpt = 1;
4000 selem = 4;
4001 break;
4002 /*LD/ST1 (4 Registers). */
4003 case 2:
4004 rpt = 4;
4005 selem = 1;
4006 break;
4007 /*LD/ST3 (3 Registers). */
4008 case 4:
4009 rpt = 1;
4010 selem = 3;
4011 break;
4012 /*LD/ST1 (3 Registers). */
4013 case 6:
4014 rpt = 3;
4015 selem = 1;
4016 break;
4017 /*LD/ST1 (1 Register). */
4018 case 7:
4019 rpt = 1;
4020 selem = 1;
4021 break;
4022 /*LD/ST2 (2 Registers). */
4023 case 8:
4024 rpt = 1;
4025 selem = 2;
4026 break;
4027 /*LD/ST1 (2 Registers). */
4028 case 10:
4029 rpt = 2;
4030 selem = 1;
4031 break;
4032 default:
4033 return AARCH64_RECORD_UNSUPPORTED;
4034 break;
4035 }
4036 for (rindex = 0; rindex < rpt; rindex++)
4037 for (eindex = 0; eindex < elements; eindex++)
4038 {
4039 uint8_t reg_tt, sindex;
4040 reg_tt = (reg_rt + rindex) % 32;
4041 for (sindex = 0; sindex < selem; sindex++)
4042 {
4043 if (bit (aarch64_insn_r->aarch64_insn, 22))
4044 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
4045 else
4046 {
4047 record_buf_mem[mem_index++] = esize / 8;
4048 record_buf_mem[mem_index++] = address + addr_offset;
4049 }
4050 addr_offset = addr_offset + (esize / 8);
4051 reg_tt = (reg_tt + 1) % 32;
4052 }
4053 }
4054 }
4055
4056 if (bit (aarch64_insn_r->aarch64_insn, 23))
4057 record_buf[reg_index++] = reg_rn;
4058
4059 aarch64_insn_r->reg_rec_count = reg_index;
4060 aarch64_insn_r->mem_rec_count = mem_index / 2;
4061 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
4062 record_buf_mem);
4063 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4064 record_buf);
4065 return AARCH64_RECORD_SUCCESS;
4066 }
4067
4068 /* Record handler for load and store instructions. */
4069
4070 static unsigned int
4071 aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
4072 {
4073 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
4074 uint8_t insn_bit23, insn_bit21;
4075 uint8_t opc, size_bits, ld_flag, vector_flag;
4076 uint32_t reg_rn, reg_rt, reg_rt2;
4077 uint64_t datasize, offset;
4078 uint32_t record_buf[8];
4079 uint64_t record_buf_mem[8];
4080 CORE_ADDR address;
4081
4082 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
4083 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
4084 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
4085 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
4086 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
4087 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
4088 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
4089 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
4090 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
4091 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
4092 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
4093
4094 /* Load/store exclusive. */
4095 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
4096 {
4097 if (record_debug)
4098 debug_printf ("Process record: load/store exclusive\n");
4099
4100 if (ld_flag)
4101 {
4102 record_buf[0] = reg_rt;
4103 aarch64_insn_r->reg_rec_count = 1;
4104 if (insn_bit21)
4105 {
4106 record_buf[1] = reg_rt2;
4107 aarch64_insn_r->reg_rec_count = 2;
4108 }
4109 }
4110 else
4111 {
4112 if (insn_bit21)
4113 datasize = (8 << size_bits) * 2;
4114 else
4115 datasize = (8 << size_bits);
4116 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4117 &address);
4118 record_buf_mem[0] = datasize / 8;
4119 record_buf_mem[1] = address;
4120 aarch64_insn_r->mem_rec_count = 1;
4121 if (!insn_bit23)
4122 {
4123 /* Save register rs. */
4124 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
4125 aarch64_insn_r->reg_rec_count = 1;
4126 }
4127 }
4128 }
4129 /* Load register (literal) instructions decoding. */
4130 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
4131 {
4132 if (record_debug)
4133 debug_printf ("Process record: load register (literal)\n");
4134 if (vector_flag)
4135 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4136 else
4137 record_buf[0] = reg_rt;
4138 aarch64_insn_r->reg_rec_count = 1;
4139 }
4140 /* All types of load/store pair instructions decoding. */
4141 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
4142 {
4143 if (record_debug)
4144 debug_printf ("Process record: load/store pair\n");
4145
4146 if (ld_flag)
4147 {
4148 if (vector_flag)
4149 {
4150 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4151 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
4152 }
4153 else
4154 {
4155 record_buf[0] = reg_rt;
4156 record_buf[1] = reg_rt2;
4157 }
4158 aarch64_insn_r->reg_rec_count = 2;
4159 }
4160 else
4161 {
4162 uint16_t imm7_off;
4163 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
4164 if (!vector_flag)
4165 size_bits = size_bits >> 1;
4166 datasize = 8 << (2 + size_bits);
4167 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
4168 offset = offset << (2 + size_bits);
4169 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4170 &address);
4171 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
4172 {
4173 if (imm7_off & 0x40)
4174 address = address - offset;
4175 else
4176 address = address + offset;
4177 }
4178
4179 record_buf_mem[0] = datasize / 8;
4180 record_buf_mem[1] = address;
4181 record_buf_mem[2] = datasize / 8;
4182 record_buf_mem[3] = address + (datasize / 8);
4183 aarch64_insn_r->mem_rec_count = 2;
4184 }
4185 if (bit (aarch64_insn_r->aarch64_insn, 23))
4186 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
4187 }
4188 /* Load/store register (unsigned immediate) instructions. */
4189 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
4190 {
4191 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4192 if (!(opc >> 1))
4193 {
4194 if (opc & 0x01)
4195 ld_flag = 0x01;
4196 else
4197 ld_flag = 0x0;
4198 }
4199 else
4200 {
4201 if (size_bits == 0x3 && vector_flag == 0x0 && opc == 0x2)
4202 {
4203 /* PRFM (immediate) */
4204 return AARCH64_RECORD_SUCCESS;
4205 }
4206 else if (size_bits == 0x2 && vector_flag == 0x0 && opc == 0x2)
4207 {
4208 /* LDRSW (immediate) */
4209 ld_flag = 0x1;
4210 }
4211 else
4212 {
4213 if (opc & 0x01)
4214 ld_flag = 0x01;
4215 else
4216 ld_flag = 0x0;
4217 }
4218 }
4219
4220 if (record_debug)
4221 {
4222 debug_printf ("Process record: load/store (unsigned immediate):"
4223 " size %x V %d opc %x\n", size_bits, vector_flag,
4224 opc);
4225 }
4226
4227 if (!ld_flag)
4228 {
4229 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
4230 datasize = 8 << size_bits;
4231 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4232 &address);
4233 offset = offset << size_bits;
4234 address = address + offset;
4235
4236 record_buf_mem[0] = datasize >> 3;
4237 record_buf_mem[1] = address;
4238 aarch64_insn_r->mem_rec_count = 1;
4239 }
4240 else
4241 {
4242 if (vector_flag)
4243 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4244 else
4245 record_buf[0] = reg_rt;
4246 aarch64_insn_r->reg_rec_count = 1;
4247 }
4248 }
4249 /* Load/store register (register offset) instructions. */
4250 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
4251 && insn_bits10_11 == 0x02 && insn_bit21)
4252 {
4253 if (record_debug)
4254 debug_printf ("Process record: load/store (register offset)\n");
4255 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4256 if (!(opc >> 1))
4257 if (opc & 0x01)
4258 ld_flag = 0x01;
4259 else
4260 ld_flag = 0x0;
4261 else
4262 if (size_bits != 0x03)
4263 ld_flag = 0x01;
4264 else
4265 return AARCH64_RECORD_UNKNOWN;
4266
4267 if (!ld_flag)
4268 {
4269 ULONGEST reg_rm_val;
4270
4271 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
4272 bits (aarch64_insn_r->aarch64_insn, 16, 20), &reg_rm_val);
4273 if (bit (aarch64_insn_r->aarch64_insn, 12))
4274 offset = reg_rm_val << size_bits;
4275 else
4276 offset = reg_rm_val;
4277 datasize = 8 << size_bits;
4278 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4279 &address);
4280 address = address + offset;
4281 record_buf_mem[0] = datasize >> 3;
4282 record_buf_mem[1] = address;
4283 aarch64_insn_r->mem_rec_count = 1;
4284 }
4285 else
4286 {
4287 if (vector_flag)
4288 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4289 else
4290 record_buf[0] = reg_rt;
4291 aarch64_insn_r->reg_rec_count = 1;
4292 }
4293 }
4294 /* Load/store register (immediate and unprivileged) instructions. */
4295 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
4296 && !insn_bit21)
4297 {
4298 if (record_debug)
4299 {
4300 debug_printf ("Process record: load/store "
4301 "(immediate and unprivileged)\n");
4302 }
4303 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4304 if (!(opc >> 1))
4305 if (opc & 0x01)
4306 ld_flag = 0x01;
4307 else
4308 ld_flag = 0x0;
4309 else
4310 if (size_bits != 0x03)
4311 ld_flag = 0x01;
4312 else
4313 return AARCH64_RECORD_UNKNOWN;
4314
4315 if (!ld_flag)
4316 {
4317 uint16_t imm9_off;
4318 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
4319 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
4320 datasize = 8 << size_bits;
4321 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4322 &address);
4323 if (insn_bits10_11 != 0x01)
4324 {
4325 if (imm9_off & 0x0100)
4326 address = address - offset;
4327 else
4328 address = address + offset;
4329 }
4330 record_buf_mem[0] = datasize >> 3;
4331 record_buf_mem[1] = address;
4332 aarch64_insn_r->mem_rec_count = 1;
4333 }
4334 else
4335 {
4336 if (vector_flag)
4337 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4338 else
4339 record_buf[0] = reg_rt;
4340 aarch64_insn_r->reg_rec_count = 1;
4341 }
4342 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
4343 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
4344 }
4345 /* Advanced SIMD load/store instructions. */
4346 else
4347 return aarch64_record_asimd_load_store (aarch64_insn_r);
4348
4349 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
4350 record_buf_mem);
4351 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4352 record_buf);
4353 return AARCH64_RECORD_SUCCESS;
4354 }
4355
4356 /* Record handler for data processing SIMD and floating point instructions. */
4357
4358 static unsigned int
4359 aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
4360 {
4361 uint8_t insn_bit21, opcode, rmode, reg_rd;
4362 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
4363 uint8_t insn_bits11_14;
4364 uint32_t record_buf[2];
4365
4366 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
4367 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
4368 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
4369 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
4370 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
4371 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
4372 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
4373 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
4374 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
4375
4376 if (record_debug)
4377 debug_printf ("Process record: data processing SIMD/FP: ");
4378
4379 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
4380 {
4381 /* Floating point - fixed point conversion instructions. */
4382 if (!insn_bit21)
4383 {
4384 if (record_debug)
4385 debug_printf ("FP - fixed point conversion");
4386
4387 if ((opcode >> 1) == 0x0 && rmode == 0x03)
4388 record_buf[0] = reg_rd;
4389 else
4390 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4391 }
4392 /* Floating point - conditional compare instructions. */
4393 else if (insn_bits10_11 == 0x01)
4394 {
4395 if (record_debug)
4396 debug_printf ("FP - conditional compare");
4397
4398 record_buf[0] = AARCH64_CPSR_REGNUM;
4399 }
4400 /* Floating point - data processing (2-source) and
4401 conditional select instructions. */
4402 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
4403 {
4404 if (record_debug)
4405 debug_printf ("FP - DP (2-source)");
4406
4407 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4408 }
4409 else if (insn_bits10_11 == 0x00)
4410 {
4411 /* Floating point - immediate instructions. */
4412 if ((insn_bits12_15 & 0x01) == 0x01
4413 || (insn_bits12_15 & 0x07) == 0x04)
4414 {
4415 if (record_debug)
4416 debug_printf ("FP - immediate");
4417 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4418 }
4419 /* Floating point - compare instructions. */
4420 else if ((insn_bits12_15 & 0x03) == 0x02)
4421 {
4422 if (record_debug)
4423 debug_printf ("FP - immediate");
4424 record_buf[0] = AARCH64_CPSR_REGNUM;
4425 }
4426 /* Floating point - integer conversions instructions. */
4427 else if (insn_bits12_15 == 0x00)
4428 {
4429 /* Convert float to integer instruction. */
4430 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
4431 {
4432 if (record_debug)
4433 debug_printf ("float to int conversion");
4434
4435 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4436 }
4437 /* Convert integer to float instruction. */
4438 else if ((opcode >> 1) == 0x01 && !rmode)
4439 {
4440 if (record_debug)
4441 debug_printf ("int to float conversion");
4442
4443 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4444 }
4445 /* Move float to integer instruction. */
4446 else if ((opcode >> 1) == 0x03)
4447 {
4448 if (record_debug)
4449 debug_printf ("move float to int");
4450
4451 if (!(opcode & 0x01))
4452 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4453 else
4454 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4455 }
4456 else
4457 return AARCH64_RECORD_UNKNOWN;
4458 }
4459 else
4460 return AARCH64_RECORD_UNKNOWN;
4461 }
4462 else
4463 return AARCH64_RECORD_UNKNOWN;
4464 }
4465 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
4466 {
4467 if (record_debug)
4468 debug_printf ("SIMD copy");
4469
4470 /* Advanced SIMD copy instructions. */
4471 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
4472 && !bit (aarch64_insn_r->aarch64_insn, 15)
4473 && bit (aarch64_insn_r->aarch64_insn, 10))
4474 {
4475 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
4476 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4477 else
4478 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4479 }
4480 else
4481 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4482 }
4483 /* All remaining floating point or advanced SIMD instructions. */
4484 else
4485 {
4486 if (record_debug)
4487 debug_printf ("all remain");
4488
4489 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4490 }
4491
4492 if (record_debug)
4493 debug_printf ("\n");
4494
4495 /* Record the V/X register. */
4496 aarch64_insn_r->reg_rec_count++;
4497
4498 /* Some of these instructions may set bits in the FPSR, so record it
4499 too. */
4500 record_buf[1] = AARCH64_FPSR_REGNUM;
4501 aarch64_insn_r->reg_rec_count++;
4502
4503 gdb_assert (aarch64_insn_r->reg_rec_count == 2);
4504 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4505 record_buf);
4506 return AARCH64_RECORD_SUCCESS;
4507 }
4508
4509 /* Decodes insns type and invokes its record handler. */
4510
4511 static unsigned int
4512 aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
4513 {
4514 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
4515
4516 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
4517 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
4518 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
4519 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
4520
4521 /* Data processing - immediate instructions. */
4522 if (!ins_bit26 && !ins_bit27 && ins_bit28)
4523 return aarch64_record_data_proc_imm (aarch64_insn_r);
4524
4525 /* Branch, exception generation and system instructions. */
4526 if (ins_bit26 && !ins_bit27 && ins_bit28)
4527 return aarch64_record_branch_except_sys (aarch64_insn_r);
4528
4529 /* Load and store instructions. */
4530 if (!ins_bit25 && ins_bit27)
4531 return aarch64_record_load_store (aarch64_insn_r);
4532
4533 /* Data processing - register instructions. */
4534 if (ins_bit25 && !ins_bit26 && ins_bit27)
4535 return aarch64_record_data_proc_reg (aarch64_insn_r);
4536
4537 /* Data processing - SIMD and floating point instructions. */
4538 if (ins_bit25 && ins_bit26 && ins_bit27)
4539 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
4540
4541 return AARCH64_RECORD_UNSUPPORTED;
4542 }
4543
4544 /* Cleans up local record registers and memory allocations. */
4545
4546 static void
4547 deallocate_reg_mem (insn_decode_record *record)
4548 {
4549 xfree (record->aarch64_regs);
4550 xfree (record->aarch64_mems);
4551 }
4552
4553 #if GDB_SELF_TEST
4554 namespace selftests {
4555
4556 static void
4557 aarch64_process_record_test (void)
4558 {
4559 struct gdbarch_info info;
4560 uint32_t ret;
4561
4562 gdbarch_info_init (&info);
4563 info.bfd_arch_info = bfd_scan_arch ("aarch64");
4564
4565 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
4566 SELF_CHECK (gdbarch != NULL);
4567
4568 insn_decode_record aarch64_record;
4569
4570 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4571 aarch64_record.regcache = NULL;
4572 aarch64_record.this_addr = 0;
4573 aarch64_record.gdbarch = gdbarch;
4574
4575 /* 20 00 80 f9 prfm pldl1keep, [x1] */
4576 aarch64_record.aarch64_insn = 0xf9800020;
4577 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4578 SELF_CHECK (ret == AARCH64_RECORD_SUCCESS);
4579 SELF_CHECK (aarch64_record.reg_rec_count == 0);
4580 SELF_CHECK (aarch64_record.mem_rec_count == 0);
4581
4582 deallocate_reg_mem (&aarch64_record);
4583 }
4584
4585 } // namespace selftests
4586 #endif /* GDB_SELF_TEST */
4587
4588 /* Parse the current instruction and record the values of the registers and
4589 memory that will be changed in current instruction to record_arch_list
4590 return -1 if something is wrong. */
4591
4592 int
4593 aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
4594 CORE_ADDR insn_addr)
4595 {
4596 uint32_t rec_no = 0;
4597 uint8_t insn_size = 4;
4598 uint32_t ret = 0;
4599 gdb_byte buf[insn_size];
4600 insn_decode_record aarch64_record;
4601
4602 memset (&buf[0], 0, insn_size);
4603 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4604 target_read_memory (insn_addr, &buf[0], insn_size);
4605 aarch64_record.aarch64_insn
4606 = (uint32_t) extract_unsigned_integer (&buf[0],
4607 insn_size,
4608 gdbarch_byte_order (gdbarch));
4609 aarch64_record.regcache = regcache;
4610 aarch64_record.this_addr = insn_addr;
4611 aarch64_record.gdbarch = gdbarch;
4612
4613 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4614 if (ret == AARCH64_RECORD_UNSUPPORTED)
4615 {
4616 printf_unfiltered (_("Process record does not support instruction "
4617 "0x%0x at address %s.\n"),
4618 aarch64_record.aarch64_insn,
4619 paddress (gdbarch, insn_addr));
4620 ret = -1;
4621 }
4622
4623 if (0 == ret)
4624 {
4625 /* Record registers. */
4626 record_full_arch_list_add_reg (aarch64_record.regcache,
4627 AARCH64_PC_REGNUM);
4628 /* Always record register CPSR. */
4629 record_full_arch_list_add_reg (aarch64_record.regcache,
4630 AARCH64_CPSR_REGNUM);
4631 if (aarch64_record.aarch64_regs)
4632 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
4633 if (record_full_arch_list_add_reg (aarch64_record.regcache,
4634 aarch64_record.aarch64_regs[rec_no]))
4635 ret = -1;
4636
4637 /* Record memories. */
4638 if (aarch64_record.aarch64_mems)
4639 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
4640 if (record_full_arch_list_add_mem
4641 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
4642 aarch64_record.aarch64_mems[rec_no].len))
4643 ret = -1;
4644
4645 if (record_full_arch_list_add_end ())
4646 ret = -1;
4647 }
4648
4649 deallocate_reg_mem (&aarch64_record);
4650 return ret;
4651 }