]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/aarch64-tdep.c
dea16e729c0324dd66c70266b0b3575b85e4b775
[thirdparty/binutils-gdb.git] / gdb / aarch64-tdep.c
1 /* Common target dependent code for GDB on AArch64 systems.
2
3 Copyright (C) 2009-2021 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22
23 #include "frame.h"
24 #include "gdbcmd.h"
25 #include "gdbcore.h"
26 #include "dis-asm.h"
27 #include "regcache.h"
28 #include "reggroups.h"
29 #include "value.h"
30 #include "arch-utils.h"
31 #include "osabi.h"
32 #include "frame-unwind.h"
33 #include "frame-base.h"
34 #include "trad-frame.h"
35 #include "objfiles.h"
36 #include "dwarf2.h"
37 #include "dwarf2/frame.h"
38 #include "gdbtypes.h"
39 #include "prologue-value.h"
40 #include "target-descriptions.h"
41 #include "user-regs.h"
42 #include "ax-gdb.h"
43 #include "gdbsupport/selftest.h"
44
45 #include "aarch64-tdep.h"
46 #include "aarch64-ravenscar-thread.h"
47
48 #include "record.h"
49 #include "record-full.h"
50 #include "arch/aarch64-insn.h"
51 #include "gdbarch.h"
52
53 #include "opcode/aarch64.h"
54 #include <algorithm>
55
56 /* A Homogeneous Floating-Point or Short-Vector Aggregate may have at most
57 four members. */
58 #define HA_MAX_NUM_FLDS 4
59
60 /* All possible aarch64 target descriptors. */
61 static target_desc *tdesc_aarch64_list[AARCH64_MAX_SVE_VQ + 1][2/*pauth*/][2 /* mte */];
62
63 /* The standard register names, and all the valid aliases for them. */
64 static const struct
65 {
66 const char *const name;
67 int regnum;
68 } aarch64_register_aliases[] =
69 {
70 /* 64-bit register names. */
71 {"fp", AARCH64_FP_REGNUM},
72 {"lr", AARCH64_LR_REGNUM},
73 {"sp", AARCH64_SP_REGNUM},
74
75 /* 32-bit register names. */
76 {"w0", AARCH64_X0_REGNUM + 0},
77 {"w1", AARCH64_X0_REGNUM + 1},
78 {"w2", AARCH64_X0_REGNUM + 2},
79 {"w3", AARCH64_X0_REGNUM + 3},
80 {"w4", AARCH64_X0_REGNUM + 4},
81 {"w5", AARCH64_X0_REGNUM + 5},
82 {"w6", AARCH64_X0_REGNUM + 6},
83 {"w7", AARCH64_X0_REGNUM + 7},
84 {"w8", AARCH64_X0_REGNUM + 8},
85 {"w9", AARCH64_X0_REGNUM + 9},
86 {"w10", AARCH64_X0_REGNUM + 10},
87 {"w11", AARCH64_X0_REGNUM + 11},
88 {"w12", AARCH64_X0_REGNUM + 12},
89 {"w13", AARCH64_X0_REGNUM + 13},
90 {"w14", AARCH64_X0_REGNUM + 14},
91 {"w15", AARCH64_X0_REGNUM + 15},
92 {"w16", AARCH64_X0_REGNUM + 16},
93 {"w17", AARCH64_X0_REGNUM + 17},
94 {"w18", AARCH64_X0_REGNUM + 18},
95 {"w19", AARCH64_X0_REGNUM + 19},
96 {"w20", AARCH64_X0_REGNUM + 20},
97 {"w21", AARCH64_X0_REGNUM + 21},
98 {"w22", AARCH64_X0_REGNUM + 22},
99 {"w23", AARCH64_X0_REGNUM + 23},
100 {"w24", AARCH64_X0_REGNUM + 24},
101 {"w25", AARCH64_X0_REGNUM + 25},
102 {"w26", AARCH64_X0_REGNUM + 26},
103 {"w27", AARCH64_X0_REGNUM + 27},
104 {"w28", AARCH64_X0_REGNUM + 28},
105 {"w29", AARCH64_X0_REGNUM + 29},
106 {"w30", AARCH64_X0_REGNUM + 30},
107
108 /* specials */
109 {"ip0", AARCH64_X0_REGNUM + 16},
110 {"ip1", AARCH64_X0_REGNUM + 17}
111 };
112
113 /* The required core 'R' registers. */
114 static const char *const aarch64_r_register_names[] =
115 {
116 /* These registers must appear in consecutive RAW register number
117 order and they must begin with AARCH64_X0_REGNUM! */
118 "x0", "x1", "x2", "x3",
119 "x4", "x5", "x6", "x7",
120 "x8", "x9", "x10", "x11",
121 "x12", "x13", "x14", "x15",
122 "x16", "x17", "x18", "x19",
123 "x20", "x21", "x22", "x23",
124 "x24", "x25", "x26", "x27",
125 "x28", "x29", "x30", "sp",
126 "pc", "cpsr"
127 };
128
129 /* The FP/SIMD 'V' registers. */
130 static const char *const aarch64_v_register_names[] =
131 {
132 /* These registers must appear in consecutive RAW register number
133 order and they must begin with AARCH64_V0_REGNUM! */
134 "v0", "v1", "v2", "v3",
135 "v4", "v5", "v6", "v7",
136 "v8", "v9", "v10", "v11",
137 "v12", "v13", "v14", "v15",
138 "v16", "v17", "v18", "v19",
139 "v20", "v21", "v22", "v23",
140 "v24", "v25", "v26", "v27",
141 "v28", "v29", "v30", "v31",
142 "fpsr",
143 "fpcr"
144 };
145
146 /* The SVE 'Z' and 'P' registers. */
147 static const char *const aarch64_sve_register_names[] =
148 {
149 /* These registers must appear in consecutive RAW register number
150 order and they must begin with AARCH64_SVE_Z0_REGNUM! */
151 "z0", "z1", "z2", "z3",
152 "z4", "z5", "z6", "z7",
153 "z8", "z9", "z10", "z11",
154 "z12", "z13", "z14", "z15",
155 "z16", "z17", "z18", "z19",
156 "z20", "z21", "z22", "z23",
157 "z24", "z25", "z26", "z27",
158 "z28", "z29", "z30", "z31",
159 "fpsr", "fpcr",
160 "p0", "p1", "p2", "p3",
161 "p4", "p5", "p6", "p7",
162 "p8", "p9", "p10", "p11",
163 "p12", "p13", "p14", "p15",
164 "ffr", "vg"
165 };
166
167 static const char *const aarch64_pauth_register_names[] =
168 {
169 /* Authentication mask for data pointer. */
170 "pauth_dmask",
171 /* Authentication mask for code pointer. */
172 "pauth_cmask"
173 };
174
175 static const char *const aarch64_mte_register_names[] =
176 {
177 /* Tag Control Register. */
178 "tag_ctl"
179 };
180
181 /* AArch64 prologue cache structure. */
182 struct aarch64_prologue_cache
183 {
184 /* The program counter at the start of the function. It is used to
185 identify this frame as a prologue frame. */
186 CORE_ADDR func;
187
188 /* The program counter at the time this frame was created; i.e. where
189 this function was called from. It is used to identify this frame as a
190 stub frame. */
191 CORE_ADDR prev_pc;
192
193 /* The stack pointer at the time this frame was created; i.e. the
194 caller's stack pointer when this function was called. It is used
195 to identify this frame. */
196 CORE_ADDR prev_sp;
197
198 /* Is the target available to read from? */
199 int available_p;
200
201 /* The frame base for this frame is just prev_sp - frame size.
202 FRAMESIZE is the distance from the frame pointer to the
203 initial stack pointer. */
204 int framesize;
205
206 /* The register used to hold the frame pointer for this frame. */
207 int framereg;
208
209 /* Saved register offsets. */
210 trad_frame_saved_reg *saved_regs;
211 };
212
213 static void
214 show_aarch64_debug (struct ui_file *file, int from_tty,
215 struct cmd_list_element *c, const char *value)
216 {
217 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
218 }
219
220 namespace {
221
222 /* Abstract instruction reader. */
223
224 class abstract_instruction_reader
225 {
226 public:
227 /* Read in one instruction. */
228 virtual ULONGEST read (CORE_ADDR memaddr, int len,
229 enum bfd_endian byte_order) = 0;
230 };
231
232 /* Instruction reader from real target. */
233
234 class instruction_reader : public abstract_instruction_reader
235 {
236 public:
237 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
238 override
239 {
240 return read_code_unsigned_integer (memaddr, len, byte_order);
241 }
242 };
243
244 } // namespace
245
246 /* If address signing is enabled, mask off the signature bits from the link
247 register, which is passed by value in ADDR, using the register values in
248 THIS_FRAME. */
249
250 static CORE_ADDR
251 aarch64_frame_unmask_lr (struct gdbarch_tdep *tdep,
252 struct frame_info *this_frame, CORE_ADDR addr)
253 {
254 if (tdep->has_pauth ()
255 && frame_unwind_register_unsigned (this_frame,
256 tdep->pauth_ra_state_regnum))
257 {
258 int cmask_num = AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base);
259 CORE_ADDR cmask = frame_unwind_register_unsigned (this_frame, cmask_num);
260 addr = addr & ~cmask;
261
262 /* Record in the frame that the link register required unmasking. */
263 set_frame_previous_pc_masked (this_frame);
264 }
265
266 return addr;
267 }
268
269 /* Implement the "get_pc_address_flags" gdbarch method. */
270
271 static std::string
272 aarch64_get_pc_address_flags (frame_info *frame, CORE_ADDR pc)
273 {
274 if (pc != 0 && get_frame_pc_masked (frame))
275 return "PAC";
276
277 return "";
278 }
279
280 /* Analyze a prologue, looking for a recognizable stack frame
281 and frame pointer. Scan until we encounter a store that could
282 clobber the stack frame unexpectedly, or an unknown instruction. */
283
284 static CORE_ADDR
285 aarch64_analyze_prologue (struct gdbarch *gdbarch,
286 CORE_ADDR start, CORE_ADDR limit,
287 struct aarch64_prologue_cache *cache,
288 abstract_instruction_reader& reader)
289 {
290 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
291 int i;
292
293 /* Whether the stack has been set. This should be true when we notice a SP
294 to FP move or if we are using the SP as the base register for storing
295 data, in case the FP is ommitted. */
296 bool seen_stack_set = false;
297
298 /* Track X registers and D registers in prologue. */
299 pv_t regs[AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT];
300
301 for (i = 0; i < AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT; i++)
302 regs[i] = pv_register (i, 0);
303 pv_area stack (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
304
305 for (; start < limit; start += 4)
306 {
307 uint32_t insn;
308 aarch64_inst inst;
309
310 insn = reader.read (start, 4, byte_order_for_code);
311
312 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
313 break;
314
315 if (inst.opcode->iclass == addsub_imm
316 && (inst.opcode->op == OP_ADD
317 || strcmp ("sub", inst.opcode->name) == 0))
318 {
319 unsigned rd = inst.operands[0].reg.regno;
320 unsigned rn = inst.operands[1].reg.regno;
321
322 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
323 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
324 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
325 gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM);
326
327 if (inst.opcode->op == OP_ADD)
328 {
329 regs[rd] = pv_add_constant (regs[rn],
330 inst.operands[2].imm.value);
331 }
332 else
333 {
334 regs[rd] = pv_add_constant (regs[rn],
335 -inst.operands[2].imm.value);
336 }
337
338 /* Did we move SP to FP? */
339 if (rn == AARCH64_SP_REGNUM && rd == AARCH64_FP_REGNUM)
340 seen_stack_set = true;
341 }
342 else if (inst.opcode->iclass == pcreladdr
343 && inst.operands[1].type == AARCH64_OPND_ADDR_ADRP)
344 {
345 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
346 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
347
348 regs[inst.operands[0].reg.regno] = pv_unknown ();
349 }
350 else if (inst.opcode->iclass == branch_imm)
351 {
352 /* Stop analysis on branch. */
353 break;
354 }
355 else if (inst.opcode->iclass == condbranch)
356 {
357 /* Stop analysis on branch. */
358 break;
359 }
360 else if (inst.opcode->iclass == branch_reg)
361 {
362 /* Stop analysis on branch. */
363 break;
364 }
365 else if (inst.opcode->iclass == compbranch)
366 {
367 /* Stop analysis on branch. */
368 break;
369 }
370 else if (inst.opcode->op == OP_MOVZ)
371 {
372 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
373
374 /* If this shows up before we set the stack, keep going. Otherwise
375 stop the analysis. */
376 if (seen_stack_set)
377 break;
378
379 regs[inst.operands[0].reg.regno] = pv_unknown ();
380 }
381 else if (inst.opcode->iclass == log_shift
382 && strcmp (inst.opcode->name, "orr") == 0)
383 {
384 unsigned rd = inst.operands[0].reg.regno;
385 unsigned rn = inst.operands[1].reg.regno;
386 unsigned rm = inst.operands[2].reg.regno;
387
388 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
389 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn);
390 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT);
391
392 if (inst.operands[2].shifter.amount == 0
393 && rn == AARCH64_SP_REGNUM)
394 regs[rd] = regs[rm];
395 else
396 {
397 aarch64_debug_printf ("prologue analysis gave up "
398 "addr=%s opcode=0x%x (orr x register)",
399 core_addr_to_string_nz (start), insn);
400
401 break;
402 }
403 }
404 else if (inst.opcode->op == OP_STUR)
405 {
406 unsigned rt = inst.operands[0].reg.regno;
407 unsigned rn = inst.operands[1].addr.base_regno;
408 int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
409
410 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
411 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
412 gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9);
413 gdb_assert (!inst.operands[1].addr.offset.is_reg);
414
415 stack.store
416 (pv_add_constant (regs[rn], inst.operands[1].addr.offset.imm),
417 size, regs[rt]);
418
419 /* Are we storing with SP as a base? */
420 if (rn == AARCH64_SP_REGNUM)
421 seen_stack_set = true;
422 }
423 else if ((inst.opcode->iclass == ldstpair_off
424 || (inst.opcode->iclass == ldstpair_indexed
425 && inst.operands[2].addr.preind))
426 && strcmp ("stp", inst.opcode->name) == 0)
427 {
428 /* STP with addressing mode Pre-indexed and Base register. */
429 unsigned rt1;
430 unsigned rt2;
431 unsigned rn = inst.operands[2].addr.base_regno;
432 int32_t imm = inst.operands[2].addr.offset.imm;
433 int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
434
435 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
436 || inst.operands[0].type == AARCH64_OPND_Ft);
437 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2
438 || inst.operands[1].type == AARCH64_OPND_Ft2);
439 gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7);
440 gdb_assert (!inst.operands[2].addr.offset.is_reg);
441
442 /* If recording this store would invalidate the store area
443 (perhaps because rn is not known) then we should abandon
444 further prologue analysis. */
445 if (stack.store_would_trash (pv_add_constant (regs[rn], imm)))
446 break;
447
448 if (stack.store_would_trash (pv_add_constant (regs[rn], imm + 8)))
449 break;
450
451 rt1 = inst.operands[0].reg.regno;
452 rt2 = inst.operands[1].reg.regno;
453 if (inst.operands[0].type == AARCH64_OPND_Ft)
454 {
455 rt1 += AARCH64_X_REGISTER_COUNT;
456 rt2 += AARCH64_X_REGISTER_COUNT;
457 }
458
459 stack.store (pv_add_constant (regs[rn], imm), size, regs[rt1]);
460 stack.store (pv_add_constant (regs[rn], imm + size), size, regs[rt2]);
461
462 if (inst.operands[2].addr.writeback)
463 regs[rn] = pv_add_constant (regs[rn], imm);
464
465 /* Ignore the instruction that allocates stack space and sets
466 the SP. */
467 if (rn == AARCH64_SP_REGNUM && !inst.operands[2].addr.writeback)
468 seen_stack_set = true;
469 }
470 else if ((inst.opcode->iclass == ldst_imm9 /* Signed immediate. */
471 || (inst.opcode->iclass == ldst_pos /* Unsigned immediate. */
472 && (inst.opcode->op == OP_STR_POS
473 || inst.opcode->op == OP_STRF_POS)))
474 && inst.operands[1].addr.base_regno == AARCH64_SP_REGNUM
475 && strcmp ("str", inst.opcode->name) == 0)
476 {
477 /* STR (immediate) */
478 unsigned int rt = inst.operands[0].reg.regno;
479 int32_t imm = inst.operands[1].addr.offset.imm;
480 unsigned int rn = inst.operands[1].addr.base_regno;
481 int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
482 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
483 || inst.operands[0].type == AARCH64_OPND_Ft);
484
485 if (inst.operands[0].type == AARCH64_OPND_Ft)
486 rt += AARCH64_X_REGISTER_COUNT;
487
488 stack.store (pv_add_constant (regs[rn], imm), size, regs[rt]);
489 if (inst.operands[1].addr.writeback)
490 regs[rn] = pv_add_constant (regs[rn], imm);
491
492 /* Are we storing with SP as a base? */
493 if (rn == AARCH64_SP_REGNUM)
494 seen_stack_set = true;
495 }
496 else if (inst.opcode->iclass == testbranch)
497 {
498 /* Stop analysis on branch. */
499 break;
500 }
501 else if (inst.opcode->iclass == ic_system)
502 {
503 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
504 int ra_state_val = 0;
505
506 if (insn == 0xd503233f /* paciasp. */
507 || insn == 0xd503237f /* pacibsp. */)
508 {
509 /* Return addresses are mangled. */
510 ra_state_val = 1;
511 }
512 else if (insn == 0xd50323bf /* autiasp. */
513 || insn == 0xd50323ff /* autibsp. */)
514 {
515 /* Return addresses are not mangled. */
516 ra_state_val = 0;
517 }
518 else
519 {
520 aarch64_debug_printf ("prologue analysis gave up addr=%s"
521 " opcode=0x%x (iclass)",
522 core_addr_to_string_nz (start), insn);
523 break;
524 }
525
526 if (tdep->has_pauth () && cache != nullptr)
527 {
528 int regnum = tdep->pauth_ra_state_regnum;
529 cache->saved_regs[regnum].set_value (ra_state_val);
530 }
531 }
532 else
533 {
534 aarch64_debug_printf ("prologue analysis gave up addr=%s"
535 " opcode=0x%x",
536 core_addr_to_string_nz (start), insn);
537
538 break;
539 }
540 }
541
542 if (cache == NULL)
543 return start;
544
545 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
546 {
547 /* Frame pointer is fp. Frame size is constant. */
548 cache->framereg = AARCH64_FP_REGNUM;
549 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
550 }
551 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
552 {
553 /* Try the stack pointer. */
554 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
555 cache->framereg = AARCH64_SP_REGNUM;
556 }
557 else
558 {
559 /* We're just out of luck. We don't know where the frame is. */
560 cache->framereg = -1;
561 cache->framesize = 0;
562 }
563
564 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
565 {
566 CORE_ADDR offset;
567
568 if (stack.find_reg (gdbarch, i, &offset))
569 cache->saved_regs[i].set_addr (offset);
570 }
571
572 for (i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
573 {
574 int regnum = gdbarch_num_regs (gdbarch);
575 CORE_ADDR offset;
576
577 if (stack.find_reg (gdbarch, i + AARCH64_X_REGISTER_COUNT,
578 &offset))
579 cache->saved_regs[i + regnum + AARCH64_D0_REGNUM].set_addr (offset);
580 }
581
582 return start;
583 }
584
585 static CORE_ADDR
586 aarch64_analyze_prologue (struct gdbarch *gdbarch,
587 CORE_ADDR start, CORE_ADDR limit,
588 struct aarch64_prologue_cache *cache)
589 {
590 instruction_reader reader;
591
592 return aarch64_analyze_prologue (gdbarch, start, limit, cache,
593 reader);
594 }
595
596 #if GDB_SELF_TEST
597
598 namespace selftests {
599
600 /* Instruction reader from manually cooked instruction sequences. */
601
602 class instruction_reader_test : public abstract_instruction_reader
603 {
604 public:
605 template<size_t SIZE>
606 explicit instruction_reader_test (const uint32_t (&insns)[SIZE])
607 : m_insns (insns), m_insns_size (SIZE)
608 {}
609
610 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
611 override
612 {
613 SELF_CHECK (len == 4);
614 SELF_CHECK (memaddr % 4 == 0);
615 SELF_CHECK (memaddr / 4 < m_insns_size);
616
617 return m_insns[memaddr / 4];
618 }
619
620 private:
621 const uint32_t *m_insns;
622 size_t m_insns_size;
623 };
624
625 static void
626 aarch64_analyze_prologue_test (void)
627 {
628 struct gdbarch_info info;
629
630 info.bfd_arch_info = bfd_scan_arch ("aarch64");
631
632 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
633 SELF_CHECK (gdbarch != NULL);
634
635 struct aarch64_prologue_cache cache;
636 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
637
638 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
639
640 /* Test the simple prologue in which frame pointer is used. */
641 {
642 static const uint32_t insns[] = {
643 0xa9af7bfd, /* stp x29, x30, [sp,#-272]! */
644 0x910003fd, /* mov x29, sp */
645 0x97ffffe6, /* bl 0x400580 */
646 };
647 instruction_reader_test reader (insns);
648
649 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
650 SELF_CHECK (end == 4 * 2);
651
652 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
653 SELF_CHECK (cache.framesize == 272);
654
655 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
656 {
657 if (i == AARCH64_FP_REGNUM)
658 SELF_CHECK (cache.saved_regs[i].addr () == -272);
659 else if (i == AARCH64_LR_REGNUM)
660 SELF_CHECK (cache.saved_regs[i].addr () == -264);
661 else
662 SELF_CHECK (cache.saved_regs[i].is_realreg ()
663 && cache.saved_regs[i].realreg () == i);
664 }
665
666 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
667 {
668 int num_regs = gdbarch_num_regs (gdbarch);
669 int regnum = i + num_regs + AARCH64_D0_REGNUM;
670
671 SELF_CHECK (cache.saved_regs[regnum].is_realreg ()
672 && cache.saved_regs[regnum].realreg () == regnum);
673 }
674 }
675
676 /* Test a prologue in which STR is used and frame pointer is not
677 used. */
678 {
679 static const uint32_t insns[] = {
680 0xf81d0ff3, /* str x19, [sp, #-48]! */
681 0xb9002fe0, /* str w0, [sp, #44] */
682 0xf90013e1, /* str x1, [sp, #32]*/
683 0xfd000fe0, /* str d0, [sp, #24] */
684 0xaa0203f3, /* mov x19, x2 */
685 0xf94013e0, /* ldr x0, [sp, #32] */
686 };
687 instruction_reader_test reader (insns);
688
689 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
690 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
691
692 SELF_CHECK (end == 4 * 5);
693
694 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
695 SELF_CHECK (cache.framesize == 48);
696
697 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
698 {
699 if (i == 1)
700 SELF_CHECK (cache.saved_regs[i].addr () == -16);
701 else if (i == 19)
702 SELF_CHECK (cache.saved_regs[i].addr () == -48);
703 else
704 SELF_CHECK (cache.saved_regs[i].is_realreg ()
705 && cache.saved_regs[i].realreg () == i);
706 }
707
708 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
709 {
710 int num_regs = gdbarch_num_regs (gdbarch);
711 int regnum = i + num_regs + AARCH64_D0_REGNUM;
712
713
714 if (i == 0)
715 SELF_CHECK (cache.saved_regs[regnum].addr () == -24);
716 else
717 SELF_CHECK (cache.saved_regs[regnum].is_realreg ()
718 && cache.saved_regs[regnum].realreg () == regnum);
719 }
720 }
721
722 /* Test handling of movz before setting the frame pointer. */
723 {
724 static const uint32_t insns[] = {
725 0xa9bf7bfd, /* stp x29, x30, [sp, #-16]! */
726 0x52800020, /* mov w0, #0x1 */
727 0x910003fd, /* mov x29, sp */
728 0x528000a2, /* mov w2, #0x5 */
729 0x97fffff8, /* bl 6e4 */
730 };
731
732 instruction_reader_test reader (insns);
733
734 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
735 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
736
737 /* We should stop at the 4th instruction. */
738 SELF_CHECK (end == (4 - 1) * 4);
739 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
740 SELF_CHECK (cache.framesize == 16);
741 }
742
743 /* Test handling of movz/stp when using the stack pointer as frame
744 pointer. */
745 {
746 static const uint32_t insns[] = {
747 0xa9bc7bfd, /* stp x29, x30, [sp, #-64]! */
748 0x52800020, /* mov w0, #0x1 */
749 0x290207e0, /* stp w0, w1, [sp, #16] */
750 0xa9018fe2, /* stp x2, x3, [sp, #24] */
751 0x528000a2, /* mov w2, #0x5 */
752 0x97fffff8, /* bl 6e4 */
753 };
754
755 instruction_reader_test reader (insns);
756
757 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
758 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
759
760 /* We should stop at the 5th instruction. */
761 SELF_CHECK (end == (5 - 1) * 4);
762 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
763 SELF_CHECK (cache.framesize == 64);
764 }
765
766 /* Test handling of movz/str when using the stack pointer as frame
767 pointer */
768 {
769 static const uint32_t insns[] = {
770 0xa9bc7bfd, /* stp x29, x30, [sp, #-64]! */
771 0x52800020, /* mov w0, #0x1 */
772 0xb9002be4, /* str w4, [sp, #40] */
773 0xf9001be5, /* str x5, [sp, #48] */
774 0x528000a2, /* mov w2, #0x5 */
775 0x97fffff8, /* bl 6e4 */
776 };
777
778 instruction_reader_test reader (insns);
779
780 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
781 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
782
783 /* We should stop at the 5th instruction. */
784 SELF_CHECK (end == (5 - 1) * 4);
785 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
786 SELF_CHECK (cache.framesize == 64);
787 }
788
789 /* Test handling of movz/stur when using the stack pointer as frame
790 pointer. */
791 {
792 static const uint32_t insns[] = {
793 0xa9bc7bfd, /* stp x29, x30, [sp, #-64]! */
794 0x52800020, /* mov w0, #0x1 */
795 0xb80343e6, /* stur w6, [sp, #52] */
796 0xf80383e7, /* stur x7, [sp, #56] */
797 0x528000a2, /* mov w2, #0x5 */
798 0x97fffff8, /* bl 6e4 */
799 };
800
801 instruction_reader_test reader (insns);
802
803 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
804 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
805
806 /* We should stop at the 5th instruction. */
807 SELF_CHECK (end == (5 - 1) * 4);
808 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
809 SELF_CHECK (cache.framesize == 64);
810 }
811
812 /* Test handling of movz when there is no frame pointer set or no stack
813 pointer used. */
814 {
815 static const uint32_t insns[] = {
816 0xa9bf7bfd, /* stp x29, x30, [sp, #-16]! */
817 0x52800020, /* mov w0, #0x1 */
818 0x528000a2, /* mov w2, #0x5 */
819 0x97fffff8, /* bl 6e4 */
820 };
821
822 instruction_reader_test reader (insns);
823
824 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
825 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
826
827 /* We should stop at the 4th instruction. */
828 SELF_CHECK (end == (4 - 1) * 4);
829 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
830 SELF_CHECK (cache.framesize == 16);
831 }
832
833 /* Test a prologue in which there is a return address signing instruction. */
834 if (tdep->has_pauth ())
835 {
836 static const uint32_t insns[] = {
837 0xd503233f, /* paciasp */
838 0xa9bd7bfd, /* stp x29, x30, [sp, #-48]! */
839 0x910003fd, /* mov x29, sp */
840 0xf801c3f3, /* str x19, [sp, #28] */
841 0xb9401fa0, /* ldr x19, [x29, #28] */
842 };
843 instruction_reader_test reader (insns);
844
845 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
846 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache,
847 reader);
848
849 SELF_CHECK (end == 4 * 4);
850 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
851 SELF_CHECK (cache.framesize == 48);
852
853 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
854 {
855 if (i == 19)
856 SELF_CHECK (cache.saved_regs[i].addr () == -20);
857 else if (i == AARCH64_FP_REGNUM)
858 SELF_CHECK (cache.saved_regs[i].addr () == -48);
859 else if (i == AARCH64_LR_REGNUM)
860 SELF_CHECK (cache.saved_regs[i].addr () == -40);
861 else
862 SELF_CHECK (cache.saved_regs[i].is_realreg ()
863 && cache.saved_regs[i].realreg () == i);
864 }
865
866 if (tdep->has_pauth ())
867 {
868 int regnum = tdep->pauth_ra_state_regnum;
869 SELF_CHECK (cache.saved_regs[regnum].is_value ());
870 }
871 }
872 }
873 } // namespace selftests
874 #endif /* GDB_SELF_TEST */
875
876 /* Implement the "skip_prologue" gdbarch method. */
877
878 static CORE_ADDR
879 aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
880 {
881 CORE_ADDR func_addr, limit_pc;
882
883 /* See if we can determine the end of the prologue via the symbol
884 table. If so, then return either PC, or the PC after the
885 prologue, whichever is greater. */
886 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
887 {
888 CORE_ADDR post_prologue_pc
889 = skip_prologue_using_sal (gdbarch, func_addr);
890
891 if (post_prologue_pc != 0)
892 return std::max (pc, post_prologue_pc);
893 }
894
895 /* Can't determine prologue from the symbol table, need to examine
896 instructions. */
897
898 /* Find an upper limit on the function prologue using the debug
899 information. If the debug information could not be used to
900 provide that bound, then use an arbitrary large number as the
901 upper bound. */
902 limit_pc = skip_prologue_using_sal (gdbarch, pc);
903 if (limit_pc == 0)
904 limit_pc = pc + 128; /* Magic. */
905
906 /* Try disassembling prologue. */
907 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
908 }
909
910 /* Scan the function prologue for THIS_FRAME and populate the prologue
911 cache CACHE. */
912
913 static void
914 aarch64_scan_prologue (struct frame_info *this_frame,
915 struct aarch64_prologue_cache *cache)
916 {
917 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
918 CORE_ADDR prologue_start;
919 CORE_ADDR prologue_end;
920 CORE_ADDR prev_pc = get_frame_pc (this_frame);
921 struct gdbarch *gdbarch = get_frame_arch (this_frame);
922
923 cache->prev_pc = prev_pc;
924
925 /* Assume we do not find a frame. */
926 cache->framereg = -1;
927 cache->framesize = 0;
928
929 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
930 &prologue_end))
931 {
932 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
933
934 if (sal.line == 0)
935 {
936 /* No line info so use the current PC. */
937 prologue_end = prev_pc;
938 }
939 else if (sal.end < prologue_end)
940 {
941 /* The next line begins after the function end. */
942 prologue_end = sal.end;
943 }
944
945 prologue_end = std::min (prologue_end, prev_pc);
946 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
947 }
948 else
949 {
950 CORE_ADDR frame_loc;
951
952 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
953 if (frame_loc == 0)
954 return;
955
956 cache->framereg = AARCH64_FP_REGNUM;
957 cache->framesize = 16;
958 cache->saved_regs[29].set_addr (0);
959 cache->saved_regs[30].set_addr (8);
960 }
961 }
962
963 /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
964 function may throw an exception if the inferior's registers or memory is
965 not available. */
966
967 static void
968 aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
969 struct aarch64_prologue_cache *cache)
970 {
971 CORE_ADDR unwound_fp;
972 int reg;
973
974 aarch64_scan_prologue (this_frame, cache);
975
976 if (cache->framereg == -1)
977 return;
978
979 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
980 if (unwound_fp == 0)
981 return;
982
983 cache->prev_sp = unwound_fp + cache->framesize;
984
985 /* Calculate actual addresses of saved registers using offsets
986 determined by aarch64_analyze_prologue. */
987 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
988 if (cache->saved_regs[reg].is_addr ())
989 cache->saved_regs[reg].set_addr (cache->saved_regs[reg].addr ()
990 + cache->prev_sp);
991
992 cache->func = get_frame_func (this_frame);
993
994 cache->available_p = 1;
995 }
996
997 /* Allocate and fill in *THIS_CACHE with information about the prologue of
998 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
999 Return a pointer to the current aarch64_prologue_cache in
1000 *THIS_CACHE. */
1001
1002 static struct aarch64_prologue_cache *
1003 aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
1004 {
1005 struct aarch64_prologue_cache *cache;
1006
1007 if (*this_cache != NULL)
1008 return (struct aarch64_prologue_cache *) *this_cache;
1009
1010 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
1011 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
1012 *this_cache = cache;
1013
1014 try
1015 {
1016 aarch64_make_prologue_cache_1 (this_frame, cache);
1017 }
1018 catch (const gdb_exception_error &ex)
1019 {
1020 if (ex.error != NOT_AVAILABLE_ERROR)
1021 throw;
1022 }
1023
1024 return cache;
1025 }
1026
1027 /* Implement the "stop_reason" frame_unwind method. */
1028
1029 static enum unwind_stop_reason
1030 aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
1031 void **this_cache)
1032 {
1033 struct aarch64_prologue_cache *cache
1034 = aarch64_make_prologue_cache (this_frame, this_cache);
1035
1036 if (!cache->available_p)
1037 return UNWIND_UNAVAILABLE;
1038
1039 /* Halt the backtrace at "_start". */
1040 if (cache->prev_pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
1041 return UNWIND_OUTERMOST;
1042
1043 /* We've hit a wall, stop. */
1044 if (cache->prev_sp == 0)
1045 return UNWIND_OUTERMOST;
1046
1047 return UNWIND_NO_REASON;
1048 }
1049
1050 /* Our frame ID for a normal frame is the current function's starting
1051 PC and the caller's SP when we were called. */
1052
1053 static void
1054 aarch64_prologue_this_id (struct frame_info *this_frame,
1055 void **this_cache, struct frame_id *this_id)
1056 {
1057 struct aarch64_prologue_cache *cache
1058 = aarch64_make_prologue_cache (this_frame, this_cache);
1059
1060 if (!cache->available_p)
1061 *this_id = frame_id_build_unavailable_stack (cache->func);
1062 else
1063 *this_id = frame_id_build (cache->prev_sp, cache->func);
1064 }
1065
1066 /* Implement the "prev_register" frame_unwind method. */
1067
1068 static struct value *
1069 aarch64_prologue_prev_register (struct frame_info *this_frame,
1070 void **this_cache, int prev_regnum)
1071 {
1072 struct aarch64_prologue_cache *cache
1073 = aarch64_make_prologue_cache (this_frame, this_cache);
1074
1075 /* If we are asked to unwind the PC, then we need to return the LR
1076 instead. The prologue may save PC, but it will point into this
1077 frame's prologue, not the next frame's resume location. */
1078 if (prev_regnum == AARCH64_PC_REGNUM)
1079 {
1080 CORE_ADDR lr;
1081 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1082 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1083
1084 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1085
1086 if (tdep->has_pauth ()
1087 && cache->saved_regs[tdep->pauth_ra_state_regnum].is_value ())
1088 lr = aarch64_frame_unmask_lr (tdep, this_frame, lr);
1089
1090 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
1091 }
1092
1093 /* SP is generally not saved to the stack, but this frame is
1094 identified by the next frame's stack pointer at the time of the
1095 call. The value was already reconstructed into PREV_SP. */
1096 /*
1097 +----------+ ^
1098 | saved lr | |
1099 +->| saved fp |--+
1100 | | |
1101 | | | <- Previous SP
1102 | +----------+
1103 | | saved lr |
1104 +--| saved fp |<- FP
1105 | |
1106 | |<- SP
1107 +----------+ */
1108 if (prev_regnum == AARCH64_SP_REGNUM)
1109 return frame_unwind_got_constant (this_frame, prev_regnum,
1110 cache->prev_sp);
1111
1112 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
1113 prev_regnum);
1114 }
1115
1116 /* AArch64 prologue unwinder. */
1117 static frame_unwind aarch64_prologue_unwind =
1118 {
1119 NORMAL_FRAME,
1120 aarch64_prologue_frame_unwind_stop_reason,
1121 aarch64_prologue_this_id,
1122 aarch64_prologue_prev_register,
1123 NULL,
1124 default_frame_sniffer
1125 };
1126
1127 /* Allocate and fill in *THIS_CACHE with information about the prologue of
1128 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
1129 Return a pointer to the current aarch64_prologue_cache in
1130 *THIS_CACHE. */
1131
1132 static struct aarch64_prologue_cache *
1133 aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
1134 {
1135 struct aarch64_prologue_cache *cache;
1136
1137 if (*this_cache != NULL)
1138 return (struct aarch64_prologue_cache *) *this_cache;
1139
1140 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
1141 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
1142 *this_cache = cache;
1143
1144 try
1145 {
1146 cache->prev_sp = get_frame_register_unsigned (this_frame,
1147 AARCH64_SP_REGNUM);
1148 cache->prev_pc = get_frame_pc (this_frame);
1149 cache->available_p = 1;
1150 }
1151 catch (const gdb_exception_error &ex)
1152 {
1153 if (ex.error != NOT_AVAILABLE_ERROR)
1154 throw;
1155 }
1156
1157 return cache;
1158 }
1159
1160 /* Implement the "stop_reason" frame_unwind method. */
1161
1162 static enum unwind_stop_reason
1163 aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
1164 void **this_cache)
1165 {
1166 struct aarch64_prologue_cache *cache
1167 = aarch64_make_stub_cache (this_frame, this_cache);
1168
1169 if (!cache->available_p)
1170 return UNWIND_UNAVAILABLE;
1171
1172 return UNWIND_NO_REASON;
1173 }
1174
1175 /* Our frame ID for a stub frame is the current SP and LR. */
1176
1177 static void
1178 aarch64_stub_this_id (struct frame_info *this_frame,
1179 void **this_cache, struct frame_id *this_id)
1180 {
1181 struct aarch64_prologue_cache *cache
1182 = aarch64_make_stub_cache (this_frame, this_cache);
1183
1184 if (cache->available_p)
1185 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
1186 else
1187 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
1188 }
1189
1190 /* Implement the "sniffer" frame_unwind method. */
1191
1192 static int
1193 aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
1194 struct frame_info *this_frame,
1195 void **this_prologue_cache)
1196 {
1197 CORE_ADDR addr_in_block;
1198 gdb_byte dummy[4];
1199
1200 addr_in_block = get_frame_address_in_block (this_frame);
1201 if (in_plt_section (addr_in_block)
1202 /* We also use the stub winder if the target memory is unreadable
1203 to avoid having the prologue unwinder trying to read it. */
1204 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
1205 return 1;
1206
1207 return 0;
1208 }
1209
1210 /* AArch64 stub unwinder. */
1211 static frame_unwind aarch64_stub_unwind =
1212 {
1213 NORMAL_FRAME,
1214 aarch64_stub_frame_unwind_stop_reason,
1215 aarch64_stub_this_id,
1216 aarch64_prologue_prev_register,
1217 NULL,
1218 aarch64_stub_unwind_sniffer
1219 };
1220
1221 /* Return the frame base address of *THIS_FRAME. */
1222
1223 static CORE_ADDR
1224 aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
1225 {
1226 struct aarch64_prologue_cache *cache
1227 = aarch64_make_prologue_cache (this_frame, this_cache);
1228
1229 return cache->prev_sp - cache->framesize;
1230 }
1231
1232 /* AArch64 default frame base information. */
1233 static frame_base aarch64_normal_base =
1234 {
1235 &aarch64_prologue_unwind,
1236 aarch64_normal_frame_base,
1237 aarch64_normal_frame_base,
1238 aarch64_normal_frame_base
1239 };
1240
1241 /* Return the value of the REGNUM register in the previous frame of
1242 *THIS_FRAME. */
1243
1244 static struct value *
1245 aarch64_dwarf2_prev_register (struct frame_info *this_frame,
1246 void **this_cache, int regnum)
1247 {
1248 struct gdbarch_tdep *tdep = gdbarch_tdep (get_frame_arch (this_frame));
1249 CORE_ADDR lr;
1250
1251 switch (regnum)
1252 {
1253 case AARCH64_PC_REGNUM:
1254 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1255 lr = aarch64_frame_unmask_lr (tdep, this_frame, lr);
1256 return frame_unwind_got_constant (this_frame, regnum, lr);
1257
1258 default:
1259 internal_error (__FILE__, __LINE__,
1260 _("Unexpected register %d"), regnum);
1261 }
1262 }
1263
1264 static const unsigned char op_lit0 = DW_OP_lit0;
1265 static const unsigned char op_lit1 = DW_OP_lit1;
1266
1267 /* Implement the "init_reg" dwarf2_frame_ops method. */
1268
1269 static void
1270 aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1271 struct dwarf2_frame_state_reg *reg,
1272 struct frame_info *this_frame)
1273 {
1274 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1275
1276 switch (regnum)
1277 {
1278 case AARCH64_PC_REGNUM:
1279 reg->how = DWARF2_FRAME_REG_FN;
1280 reg->loc.fn = aarch64_dwarf2_prev_register;
1281 return;
1282
1283 case AARCH64_SP_REGNUM:
1284 reg->how = DWARF2_FRAME_REG_CFA;
1285 return;
1286 }
1287
1288 /* Init pauth registers. */
1289 if (tdep->has_pauth ())
1290 {
1291 if (regnum == tdep->pauth_ra_state_regnum)
1292 {
1293 /* Initialize RA_STATE to zero. */
1294 reg->how = DWARF2_FRAME_REG_SAVED_VAL_EXP;
1295 reg->loc.exp.start = &op_lit0;
1296 reg->loc.exp.len = 1;
1297 return;
1298 }
1299 else if (regnum == AARCH64_PAUTH_DMASK_REGNUM (tdep->pauth_reg_base)
1300 || regnum == AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base))
1301 {
1302 reg->how = DWARF2_FRAME_REG_SAME_VALUE;
1303 return;
1304 }
1305 }
1306 }
1307
1308 /* Implement the execute_dwarf_cfa_vendor_op method. */
1309
1310 static bool
1311 aarch64_execute_dwarf_cfa_vendor_op (struct gdbarch *gdbarch, gdb_byte op,
1312 struct dwarf2_frame_state *fs)
1313 {
1314 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1315 struct dwarf2_frame_state_reg *ra_state;
1316
1317 if (op == DW_CFA_AARCH64_negate_ra_state)
1318 {
1319 /* On systems without pauth, treat as a nop. */
1320 if (!tdep->has_pauth ())
1321 return true;
1322
1323 /* Allocate RA_STATE column if it's not allocated yet. */
1324 fs->regs.alloc_regs (AARCH64_DWARF_PAUTH_RA_STATE + 1);
1325
1326 /* Toggle the status of RA_STATE between 0 and 1. */
1327 ra_state = &(fs->regs.reg[AARCH64_DWARF_PAUTH_RA_STATE]);
1328 ra_state->how = DWARF2_FRAME_REG_SAVED_VAL_EXP;
1329
1330 if (ra_state->loc.exp.start == nullptr
1331 || ra_state->loc.exp.start == &op_lit0)
1332 ra_state->loc.exp.start = &op_lit1;
1333 else
1334 ra_state->loc.exp.start = &op_lit0;
1335
1336 ra_state->loc.exp.len = 1;
1337
1338 return true;
1339 }
1340
1341 return false;
1342 }
1343
1344 /* Used for matching BRK instructions for AArch64. */
1345 static constexpr uint32_t BRK_INSN_MASK = 0xffe0001f;
1346 static constexpr uint32_t BRK_INSN_BASE = 0xd4200000;
1347
1348 /* Implementation of gdbarch_program_breakpoint_here_p for aarch64. */
1349
1350 static bool
1351 aarch64_program_breakpoint_here_p (gdbarch *gdbarch, CORE_ADDR address)
1352 {
1353 const uint32_t insn_len = 4;
1354 gdb_byte target_mem[4];
1355
1356 /* Enable the automatic memory restoration from breakpoints while
1357 we read the memory. Otherwise we may find temporary breakpoints, ones
1358 inserted by GDB, and flag them as permanent breakpoints. */
1359 scoped_restore restore_memory
1360 = make_scoped_restore_show_memory_breakpoints (0);
1361
1362 if (target_read_memory (address, target_mem, insn_len) == 0)
1363 {
1364 uint32_t insn =
1365 (uint32_t) extract_unsigned_integer (target_mem, insn_len,
1366 gdbarch_byte_order_for_code (gdbarch));
1367
1368 /* Check if INSN is a BRK instruction pattern. There are multiple choices
1369 of such instructions with different immediate values. Different OS'
1370 may use a different variation, but they have the same outcome. */
1371 return ((insn & BRK_INSN_MASK) == BRK_INSN_BASE);
1372 }
1373
1374 return false;
1375 }
1376
1377 /* When arguments must be pushed onto the stack, they go on in reverse
1378 order. The code below implements a FILO (stack) to do this. */
1379
1380 struct stack_item_t
1381 {
1382 /* Value to pass on stack. It can be NULL if this item is for stack
1383 padding. */
1384 const gdb_byte *data;
1385
1386 /* Size in bytes of value to pass on stack. */
1387 int len;
1388 };
1389
1390 /* Implement the gdbarch type alignment method, overrides the generic
1391 alignment algorithm for anything that is aarch64 specific. */
1392
1393 static ULONGEST
1394 aarch64_type_align (gdbarch *gdbarch, struct type *t)
1395 {
1396 t = check_typedef (t);
1397 if (t->code () == TYPE_CODE_ARRAY && t->is_vector ())
1398 {
1399 /* Use the natural alignment for vector types (the same for
1400 scalar type), but the maximum alignment is 128-bit. */
1401 if (TYPE_LENGTH (t) > 16)
1402 return 16;
1403 else
1404 return TYPE_LENGTH (t);
1405 }
1406
1407 /* Allow the common code to calculate the alignment. */
1408 return 0;
1409 }
1410
1411 /* Worker function for aapcs_is_vfp_call_or_return_candidate.
1412
1413 Return the number of register required, or -1 on failure.
1414
1415 When encountering a base element, if FUNDAMENTAL_TYPE is not set then set it
1416 to the element, else fail if the type of this element does not match the
1417 existing value. */
1418
1419 static int
1420 aapcs_is_vfp_call_or_return_candidate_1 (struct type *type,
1421 struct type **fundamental_type)
1422 {
1423 if (type == nullptr)
1424 return -1;
1425
1426 switch (type->code ())
1427 {
1428 case TYPE_CODE_FLT:
1429 if (TYPE_LENGTH (type) > 16)
1430 return -1;
1431
1432 if (*fundamental_type == nullptr)
1433 *fundamental_type = type;
1434 else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
1435 || type->code () != (*fundamental_type)->code ())
1436 return -1;
1437
1438 return 1;
1439
1440 case TYPE_CODE_COMPLEX:
1441 {
1442 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
1443 if (TYPE_LENGTH (target_type) > 16)
1444 return -1;
1445
1446 if (*fundamental_type == nullptr)
1447 *fundamental_type = target_type;
1448 else if (TYPE_LENGTH (target_type) != TYPE_LENGTH (*fundamental_type)
1449 || target_type->code () != (*fundamental_type)->code ())
1450 return -1;
1451
1452 return 2;
1453 }
1454
1455 case TYPE_CODE_ARRAY:
1456 {
1457 if (type->is_vector ())
1458 {
1459 if (TYPE_LENGTH (type) != 8 && TYPE_LENGTH (type) != 16)
1460 return -1;
1461
1462 if (*fundamental_type == nullptr)
1463 *fundamental_type = type;
1464 else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
1465 || type->code () != (*fundamental_type)->code ())
1466 return -1;
1467
1468 return 1;
1469 }
1470 else
1471 {
1472 struct type *target_type = TYPE_TARGET_TYPE (type);
1473 int count = aapcs_is_vfp_call_or_return_candidate_1
1474 (target_type, fundamental_type);
1475
1476 if (count == -1)
1477 return count;
1478
1479 count *= (TYPE_LENGTH (type) / TYPE_LENGTH (target_type));
1480 return count;
1481 }
1482 }
1483
1484 case TYPE_CODE_STRUCT:
1485 case TYPE_CODE_UNION:
1486 {
1487 int count = 0;
1488
1489 for (int i = 0; i < type->num_fields (); i++)
1490 {
1491 /* Ignore any static fields. */
1492 if (field_is_static (&type->field (i)))
1493 continue;
1494
1495 struct type *member = check_typedef (type->field (i).type ());
1496
1497 int sub_count = aapcs_is_vfp_call_or_return_candidate_1
1498 (member, fundamental_type);
1499 if (sub_count == -1)
1500 return -1;
1501 count += sub_count;
1502 }
1503
1504 /* Ensure there is no padding between the fields (allowing for empty
1505 zero length structs) */
1506 int ftype_length = (*fundamental_type == nullptr)
1507 ? 0 : TYPE_LENGTH (*fundamental_type);
1508 if (count * ftype_length != TYPE_LENGTH (type))
1509 return -1;
1510
1511 return count;
1512 }
1513
1514 default:
1515 break;
1516 }
1517
1518 return -1;
1519 }
1520
1521 /* Return true if an argument, whose type is described by TYPE, can be passed or
1522 returned in simd/fp registers, providing enough parameter passing registers
1523 are available. This is as described in the AAPCS64.
1524
1525 Upon successful return, *COUNT returns the number of needed registers,
1526 *FUNDAMENTAL_TYPE contains the type of those registers.
1527
1528 Candidate as per the AAPCS64 5.4.2.C is either a:
1529 - float.
1530 - short-vector.
1531 - HFA (Homogeneous Floating-point Aggregate, 4.3.5.1). A Composite type where
1532 all the members are floats and has at most 4 members.
1533 - HVA (Homogeneous Short-vector Aggregate, 4.3.5.2). A Composite type where
1534 all the members are short vectors and has at most 4 members.
1535 - Complex (7.1.1)
1536
1537 Note that HFAs and HVAs can include nested structures and arrays. */
1538
1539 static bool
1540 aapcs_is_vfp_call_or_return_candidate (struct type *type, int *count,
1541 struct type **fundamental_type)
1542 {
1543 if (type == nullptr)
1544 return false;
1545
1546 *fundamental_type = nullptr;
1547
1548 int ag_count = aapcs_is_vfp_call_or_return_candidate_1 (type,
1549 fundamental_type);
1550
1551 if (ag_count > 0 && ag_count <= HA_MAX_NUM_FLDS)
1552 {
1553 *count = ag_count;
1554 return true;
1555 }
1556 else
1557 return false;
1558 }
1559
1560 /* AArch64 function call information structure. */
1561 struct aarch64_call_info
1562 {
1563 /* the current argument number. */
1564 unsigned argnum = 0;
1565
1566 /* The next general purpose register number, equivalent to NGRN as
1567 described in the AArch64 Procedure Call Standard. */
1568 unsigned ngrn = 0;
1569
1570 /* The next SIMD and floating point register number, equivalent to
1571 NSRN as described in the AArch64 Procedure Call Standard. */
1572 unsigned nsrn = 0;
1573
1574 /* The next stacked argument address, equivalent to NSAA as
1575 described in the AArch64 Procedure Call Standard. */
1576 unsigned nsaa = 0;
1577
1578 /* Stack item vector. */
1579 std::vector<stack_item_t> si;
1580 };
1581
1582 /* Pass a value in a sequence of consecutive X registers. The caller
1583 is responsible for ensuring sufficient registers are available. */
1584
1585 static void
1586 pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1587 struct aarch64_call_info *info, struct type *type,
1588 struct value *arg)
1589 {
1590 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1591 int len = TYPE_LENGTH (type);
1592 enum type_code typecode = type->code ();
1593 int regnum = AARCH64_X0_REGNUM + info->ngrn;
1594 const bfd_byte *buf = value_contents (arg);
1595
1596 info->argnum++;
1597
1598 while (len > 0)
1599 {
1600 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1601 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1602 byte_order);
1603
1604
1605 /* Adjust sub-word struct/union args when big-endian. */
1606 if (byte_order == BFD_ENDIAN_BIG
1607 && partial_len < X_REGISTER_SIZE
1608 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1609 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1610
1611 aarch64_debug_printf ("arg %d in %s = 0x%s", info->argnum,
1612 gdbarch_register_name (gdbarch, regnum),
1613 phex (regval, X_REGISTER_SIZE));
1614
1615 regcache_cooked_write_unsigned (regcache, regnum, regval);
1616 len -= partial_len;
1617 buf += partial_len;
1618 regnum++;
1619 }
1620 }
1621
1622 /* Attempt to marshall a value in a V register. Return 1 if
1623 successful, or 0 if insufficient registers are available. This
1624 function, unlike the equivalent pass_in_x() function does not
1625 handle arguments spread across multiple registers. */
1626
1627 static int
1628 pass_in_v (struct gdbarch *gdbarch,
1629 struct regcache *regcache,
1630 struct aarch64_call_info *info,
1631 int len, const bfd_byte *buf)
1632 {
1633 if (info->nsrn < 8)
1634 {
1635 int regnum = AARCH64_V0_REGNUM + info->nsrn;
1636 /* Enough space for a full vector register. */
1637 gdb_byte reg[register_size (gdbarch, regnum)];
1638 gdb_assert (len <= sizeof (reg));
1639
1640 info->argnum++;
1641 info->nsrn++;
1642
1643 memset (reg, 0, sizeof (reg));
1644 /* PCS C.1, the argument is allocated to the least significant
1645 bits of V register. */
1646 memcpy (reg, buf, len);
1647 regcache->cooked_write (regnum, reg);
1648
1649 aarch64_debug_printf ("arg %d in %s", info->argnum,
1650 gdbarch_register_name (gdbarch, regnum));
1651
1652 return 1;
1653 }
1654 info->nsrn = 8;
1655 return 0;
1656 }
1657
1658 /* Marshall an argument onto the stack. */
1659
1660 static void
1661 pass_on_stack (struct aarch64_call_info *info, struct type *type,
1662 struct value *arg)
1663 {
1664 const bfd_byte *buf = value_contents (arg);
1665 int len = TYPE_LENGTH (type);
1666 int align;
1667 stack_item_t item;
1668
1669 info->argnum++;
1670
1671 align = type_align (type);
1672
1673 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1674 Natural alignment of the argument's type. */
1675 align = align_up (align, 8);
1676
1677 /* The AArch64 PCS requires at most doubleword alignment. */
1678 if (align > 16)
1679 align = 16;
1680
1681 aarch64_debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1682 info->nsaa);
1683
1684 item.len = len;
1685 item.data = buf;
1686 info->si.push_back (item);
1687
1688 info->nsaa += len;
1689 if (info->nsaa & (align - 1))
1690 {
1691 /* Push stack alignment padding. */
1692 int pad = align - (info->nsaa & (align - 1));
1693
1694 item.len = pad;
1695 item.data = NULL;
1696
1697 info->si.push_back (item);
1698 info->nsaa += pad;
1699 }
1700 }
1701
1702 /* Marshall an argument into a sequence of one or more consecutive X
1703 registers or, if insufficient X registers are available then onto
1704 the stack. */
1705
1706 static void
1707 pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1708 struct aarch64_call_info *info, struct type *type,
1709 struct value *arg)
1710 {
1711 int len = TYPE_LENGTH (type);
1712 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1713
1714 /* PCS C.13 - Pass in registers if we have enough spare */
1715 if (info->ngrn + nregs <= 8)
1716 {
1717 pass_in_x (gdbarch, regcache, info, type, arg);
1718 info->ngrn += nregs;
1719 }
1720 else
1721 {
1722 info->ngrn = 8;
1723 pass_on_stack (info, type, arg);
1724 }
1725 }
1726
1727 /* Pass a value, which is of type arg_type, in a V register. Assumes value is a
1728 aapcs_is_vfp_call_or_return_candidate and there are enough spare V
1729 registers. A return value of false is an error state as the value will have
1730 been partially passed to the stack. */
1731 static bool
1732 pass_in_v_vfp_candidate (struct gdbarch *gdbarch, struct regcache *regcache,
1733 struct aarch64_call_info *info, struct type *arg_type,
1734 struct value *arg)
1735 {
1736 switch (arg_type->code ())
1737 {
1738 case TYPE_CODE_FLT:
1739 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type),
1740 value_contents (arg));
1741 break;
1742
1743 case TYPE_CODE_COMPLEX:
1744 {
1745 const bfd_byte *buf = value_contents (arg);
1746 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (arg_type));
1747
1748 if (!pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type),
1749 buf))
1750 return false;
1751
1752 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type),
1753 buf + TYPE_LENGTH (target_type));
1754 }
1755
1756 case TYPE_CODE_ARRAY:
1757 if (arg_type->is_vector ())
1758 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type),
1759 value_contents (arg));
1760 /* fall through. */
1761
1762 case TYPE_CODE_STRUCT:
1763 case TYPE_CODE_UNION:
1764 for (int i = 0; i < arg_type->num_fields (); i++)
1765 {
1766 /* Don't include static fields. */
1767 if (field_is_static (&arg_type->field (i)))
1768 continue;
1769
1770 struct value *field = value_primitive_field (arg, 0, i, arg_type);
1771 struct type *field_type = check_typedef (value_type (field));
1772
1773 if (!pass_in_v_vfp_candidate (gdbarch, regcache, info, field_type,
1774 field))
1775 return false;
1776 }
1777 return true;
1778
1779 default:
1780 return false;
1781 }
1782 }
1783
1784 /* Implement the "push_dummy_call" gdbarch method. */
1785
1786 static CORE_ADDR
1787 aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1788 struct regcache *regcache, CORE_ADDR bp_addr,
1789 int nargs,
1790 struct value **args, CORE_ADDR sp,
1791 function_call_return_method return_method,
1792 CORE_ADDR struct_addr)
1793 {
1794 int argnum;
1795 struct aarch64_call_info info;
1796
1797 /* We need to know what the type of the called function is in order
1798 to determine the number of named/anonymous arguments for the
1799 actual argument placement, and the return type in order to handle
1800 return value correctly.
1801
1802 The generic code above us views the decision of return in memory
1803 or return in registers as a two stage processes. The language
1804 handler is consulted first and may decide to return in memory (eg
1805 class with copy constructor returned by value), this will cause
1806 the generic code to allocate space AND insert an initial leading
1807 argument.
1808
1809 If the language code does not decide to pass in memory then the
1810 target code is consulted.
1811
1812 If the language code decides to pass in memory we want to move
1813 the pointer inserted as the initial argument from the argument
1814 list and into X8, the conventional AArch64 struct return pointer
1815 register. */
1816
1817 /* Set the return address. For the AArch64, the return breakpoint
1818 is always at BP_ADDR. */
1819 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1820
1821 /* If we were given an initial argument for the return slot, lose it. */
1822 if (return_method == return_method_hidden_param)
1823 {
1824 args++;
1825 nargs--;
1826 }
1827
1828 /* The struct_return pointer occupies X8. */
1829 if (return_method != return_method_normal)
1830 {
1831 aarch64_debug_printf ("struct return in %s = 0x%s",
1832 gdbarch_register_name
1833 (gdbarch, AARCH64_STRUCT_RETURN_REGNUM),
1834 paddress (gdbarch, struct_addr));
1835
1836 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1837 struct_addr);
1838 }
1839
1840 for (argnum = 0; argnum < nargs; argnum++)
1841 {
1842 struct value *arg = args[argnum];
1843 struct type *arg_type, *fundamental_type;
1844 int len, elements;
1845
1846 arg_type = check_typedef (value_type (arg));
1847 len = TYPE_LENGTH (arg_type);
1848
1849 /* If arg can be passed in v registers as per the AAPCS64, then do so if
1850 if there are enough spare registers. */
1851 if (aapcs_is_vfp_call_or_return_candidate (arg_type, &elements,
1852 &fundamental_type))
1853 {
1854 if (info.nsrn + elements <= 8)
1855 {
1856 /* We know that we have sufficient registers available therefore
1857 this will never need to fallback to the stack. */
1858 if (!pass_in_v_vfp_candidate (gdbarch, regcache, &info, arg_type,
1859 arg))
1860 gdb_assert_not_reached ("Failed to push args");
1861 }
1862 else
1863 {
1864 info.nsrn = 8;
1865 pass_on_stack (&info, arg_type, arg);
1866 }
1867 continue;
1868 }
1869
1870 switch (arg_type->code ())
1871 {
1872 case TYPE_CODE_INT:
1873 case TYPE_CODE_BOOL:
1874 case TYPE_CODE_CHAR:
1875 case TYPE_CODE_RANGE:
1876 case TYPE_CODE_ENUM:
1877 if (len < 4)
1878 {
1879 /* Promote to 32 bit integer. */
1880 if (arg_type->is_unsigned ())
1881 arg_type = builtin_type (gdbarch)->builtin_uint32;
1882 else
1883 arg_type = builtin_type (gdbarch)->builtin_int32;
1884 arg = value_cast (arg_type, arg);
1885 }
1886 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1887 break;
1888
1889 case TYPE_CODE_STRUCT:
1890 case TYPE_CODE_ARRAY:
1891 case TYPE_CODE_UNION:
1892 if (len > 16)
1893 {
1894 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1895 invisible reference. */
1896
1897 /* Allocate aligned storage. */
1898 sp = align_down (sp - len, 16);
1899
1900 /* Write the real data into the stack. */
1901 write_memory (sp, value_contents (arg), len);
1902
1903 /* Construct the indirection. */
1904 arg_type = lookup_pointer_type (arg_type);
1905 arg = value_from_pointer (arg_type, sp);
1906 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1907 }
1908 else
1909 /* PCS C.15 / C.18 multiple values pass. */
1910 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1911 break;
1912
1913 default:
1914 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1915 break;
1916 }
1917 }
1918
1919 /* Make sure stack retains 16 byte alignment. */
1920 if (info.nsaa & 15)
1921 sp -= 16 - (info.nsaa & 15);
1922
1923 while (!info.si.empty ())
1924 {
1925 const stack_item_t &si = info.si.back ();
1926
1927 sp -= si.len;
1928 if (si.data != NULL)
1929 write_memory (sp, si.data, si.len);
1930 info.si.pop_back ();
1931 }
1932
1933 /* Finally, update the SP register. */
1934 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1935
1936 return sp;
1937 }
1938
1939 /* Implement the "frame_align" gdbarch method. */
1940
1941 static CORE_ADDR
1942 aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1943 {
1944 /* Align the stack to sixteen bytes. */
1945 return sp & ~(CORE_ADDR) 15;
1946 }
1947
1948 /* Return the type for an AdvSISD Q register. */
1949
1950 static struct type *
1951 aarch64_vnq_type (struct gdbarch *gdbarch)
1952 {
1953 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1954
1955 if (tdep->vnq_type == NULL)
1956 {
1957 struct type *t;
1958 struct type *elem;
1959
1960 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1961 TYPE_CODE_UNION);
1962
1963 elem = builtin_type (gdbarch)->builtin_uint128;
1964 append_composite_type_field (t, "u", elem);
1965
1966 elem = builtin_type (gdbarch)->builtin_int128;
1967 append_composite_type_field (t, "s", elem);
1968
1969 tdep->vnq_type = t;
1970 }
1971
1972 return tdep->vnq_type;
1973 }
1974
1975 /* Return the type for an AdvSISD D register. */
1976
1977 static struct type *
1978 aarch64_vnd_type (struct gdbarch *gdbarch)
1979 {
1980 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1981
1982 if (tdep->vnd_type == NULL)
1983 {
1984 struct type *t;
1985 struct type *elem;
1986
1987 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1988 TYPE_CODE_UNION);
1989
1990 elem = builtin_type (gdbarch)->builtin_double;
1991 append_composite_type_field (t, "f", elem);
1992
1993 elem = builtin_type (gdbarch)->builtin_uint64;
1994 append_composite_type_field (t, "u", elem);
1995
1996 elem = builtin_type (gdbarch)->builtin_int64;
1997 append_composite_type_field (t, "s", elem);
1998
1999 tdep->vnd_type = t;
2000 }
2001
2002 return tdep->vnd_type;
2003 }
2004
2005 /* Return the type for an AdvSISD S register. */
2006
2007 static struct type *
2008 aarch64_vns_type (struct gdbarch *gdbarch)
2009 {
2010 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2011
2012 if (tdep->vns_type == NULL)
2013 {
2014 struct type *t;
2015 struct type *elem;
2016
2017 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
2018 TYPE_CODE_UNION);
2019
2020 elem = builtin_type (gdbarch)->builtin_float;
2021 append_composite_type_field (t, "f", elem);
2022
2023 elem = builtin_type (gdbarch)->builtin_uint32;
2024 append_composite_type_field (t, "u", elem);
2025
2026 elem = builtin_type (gdbarch)->builtin_int32;
2027 append_composite_type_field (t, "s", elem);
2028
2029 tdep->vns_type = t;
2030 }
2031
2032 return tdep->vns_type;
2033 }
2034
2035 /* Return the type for an AdvSISD H register. */
2036
2037 static struct type *
2038 aarch64_vnh_type (struct gdbarch *gdbarch)
2039 {
2040 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2041
2042 if (tdep->vnh_type == NULL)
2043 {
2044 struct type *t;
2045 struct type *elem;
2046
2047 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
2048 TYPE_CODE_UNION);
2049
2050 elem = builtin_type (gdbarch)->builtin_bfloat16;
2051 append_composite_type_field (t, "bf", elem);
2052
2053 elem = builtin_type (gdbarch)->builtin_half;
2054 append_composite_type_field (t, "f", elem);
2055
2056 elem = builtin_type (gdbarch)->builtin_uint16;
2057 append_composite_type_field (t, "u", elem);
2058
2059 elem = builtin_type (gdbarch)->builtin_int16;
2060 append_composite_type_field (t, "s", elem);
2061
2062 tdep->vnh_type = t;
2063 }
2064
2065 return tdep->vnh_type;
2066 }
2067
2068 /* Return the type for an AdvSISD B register. */
2069
2070 static struct type *
2071 aarch64_vnb_type (struct gdbarch *gdbarch)
2072 {
2073 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2074
2075 if (tdep->vnb_type == NULL)
2076 {
2077 struct type *t;
2078 struct type *elem;
2079
2080 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
2081 TYPE_CODE_UNION);
2082
2083 elem = builtin_type (gdbarch)->builtin_uint8;
2084 append_composite_type_field (t, "u", elem);
2085
2086 elem = builtin_type (gdbarch)->builtin_int8;
2087 append_composite_type_field (t, "s", elem);
2088
2089 tdep->vnb_type = t;
2090 }
2091
2092 return tdep->vnb_type;
2093 }
2094
2095 /* Return the type for an AdvSISD V register. */
2096
2097 static struct type *
2098 aarch64_vnv_type (struct gdbarch *gdbarch)
2099 {
2100 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2101
2102 if (tdep->vnv_type == NULL)
2103 {
2104 /* The other AArch64 pseudo registers (Q,D,H,S,B) refer to a single value
2105 slice from the non-pseudo vector registers. However NEON V registers
2106 are always vector registers, and need constructing as such. */
2107 const struct builtin_type *bt = builtin_type (gdbarch);
2108
2109 struct type *t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnv",
2110 TYPE_CODE_UNION);
2111
2112 struct type *sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
2113 TYPE_CODE_UNION);
2114 append_composite_type_field (sub, "f",
2115 init_vector_type (bt->builtin_double, 2));
2116 append_composite_type_field (sub, "u",
2117 init_vector_type (bt->builtin_uint64, 2));
2118 append_composite_type_field (sub, "s",
2119 init_vector_type (bt->builtin_int64, 2));
2120 append_composite_type_field (t, "d", sub);
2121
2122 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
2123 TYPE_CODE_UNION);
2124 append_composite_type_field (sub, "f",
2125 init_vector_type (bt->builtin_float, 4));
2126 append_composite_type_field (sub, "u",
2127 init_vector_type (bt->builtin_uint32, 4));
2128 append_composite_type_field (sub, "s",
2129 init_vector_type (bt->builtin_int32, 4));
2130 append_composite_type_field (t, "s", sub);
2131
2132 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
2133 TYPE_CODE_UNION);
2134 append_composite_type_field (sub, "bf",
2135 init_vector_type (bt->builtin_bfloat16, 8));
2136 append_composite_type_field (sub, "f",
2137 init_vector_type (bt->builtin_half, 8));
2138 append_composite_type_field (sub, "u",
2139 init_vector_type (bt->builtin_uint16, 8));
2140 append_composite_type_field (sub, "s",
2141 init_vector_type (bt->builtin_int16, 8));
2142 append_composite_type_field (t, "h", sub);
2143
2144 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
2145 TYPE_CODE_UNION);
2146 append_composite_type_field (sub, "u",
2147 init_vector_type (bt->builtin_uint8, 16));
2148 append_composite_type_field (sub, "s",
2149 init_vector_type (bt->builtin_int8, 16));
2150 append_composite_type_field (t, "b", sub);
2151
2152 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
2153 TYPE_CODE_UNION);
2154 append_composite_type_field (sub, "u",
2155 init_vector_type (bt->builtin_uint128, 1));
2156 append_composite_type_field (sub, "s",
2157 init_vector_type (bt->builtin_int128, 1));
2158 append_composite_type_field (t, "q", sub);
2159
2160 tdep->vnv_type = t;
2161 }
2162
2163 return tdep->vnv_type;
2164 }
2165
2166 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
2167
2168 static int
2169 aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
2170 {
2171 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2172
2173 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
2174 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
2175
2176 if (reg == AARCH64_DWARF_SP)
2177 return AARCH64_SP_REGNUM;
2178
2179 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
2180 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
2181
2182 if (reg == AARCH64_DWARF_SVE_VG)
2183 return AARCH64_SVE_VG_REGNUM;
2184
2185 if (reg == AARCH64_DWARF_SVE_FFR)
2186 return AARCH64_SVE_FFR_REGNUM;
2187
2188 if (reg >= AARCH64_DWARF_SVE_P0 && reg <= AARCH64_DWARF_SVE_P0 + 15)
2189 return AARCH64_SVE_P0_REGNUM + reg - AARCH64_DWARF_SVE_P0;
2190
2191 if (reg >= AARCH64_DWARF_SVE_Z0 && reg <= AARCH64_DWARF_SVE_Z0 + 15)
2192 return AARCH64_SVE_Z0_REGNUM + reg - AARCH64_DWARF_SVE_Z0;
2193
2194 if (tdep->has_pauth ())
2195 {
2196 if (reg >= AARCH64_DWARF_PAUTH_DMASK && reg <= AARCH64_DWARF_PAUTH_CMASK)
2197 return tdep->pauth_reg_base + reg - AARCH64_DWARF_PAUTH_DMASK;
2198
2199 if (reg == AARCH64_DWARF_PAUTH_RA_STATE)
2200 return tdep->pauth_ra_state_regnum;
2201 }
2202
2203 return -1;
2204 }
2205
2206 /* Implement the "print_insn" gdbarch method. */
2207
2208 static int
2209 aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
2210 {
2211 info->symbols = NULL;
2212 return default_print_insn (memaddr, info);
2213 }
2214
2215 /* AArch64 BRK software debug mode instruction.
2216 Note that AArch64 code is always little-endian.
2217 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
2218 constexpr gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
2219
2220 typedef BP_MANIPULATION (aarch64_default_breakpoint) aarch64_breakpoint;
2221
2222 /* Extract from an array REGS containing the (raw) register state a
2223 function return value of type TYPE, and copy that, in virtual
2224 format, into VALBUF. */
2225
2226 static void
2227 aarch64_extract_return_value (struct type *type, struct regcache *regs,
2228 gdb_byte *valbuf)
2229 {
2230 struct gdbarch *gdbarch = regs->arch ();
2231 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2232 int elements;
2233 struct type *fundamental_type;
2234
2235 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2236 &fundamental_type))
2237 {
2238 int len = TYPE_LENGTH (fundamental_type);
2239
2240 for (int i = 0; i < elements; i++)
2241 {
2242 int regno = AARCH64_V0_REGNUM + i;
2243 /* Enough space for a full vector register. */
2244 gdb_byte buf[register_size (gdbarch, regno)];
2245 gdb_assert (len <= sizeof (buf));
2246
2247 aarch64_debug_printf
2248 ("read HFA or HVA return value element %d from %s",
2249 i + 1, gdbarch_register_name (gdbarch, regno));
2250
2251 regs->cooked_read (regno, buf);
2252
2253 memcpy (valbuf, buf, len);
2254 valbuf += len;
2255 }
2256 }
2257 else if (type->code () == TYPE_CODE_INT
2258 || type->code () == TYPE_CODE_CHAR
2259 || type->code () == TYPE_CODE_BOOL
2260 || type->code () == TYPE_CODE_PTR
2261 || TYPE_IS_REFERENCE (type)
2262 || type->code () == TYPE_CODE_ENUM)
2263 {
2264 /* If the type is a plain integer, then the access is
2265 straight-forward. Otherwise we have to play around a bit
2266 more. */
2267 int len = TYPE_LENGTH (type);
2268 int regno = AARCH64_X0_REGNUM;
2269 ULONGEST tmp;
2270
2271 while (len > 0)
2272 {
2273 /* By using store_unsigned_integer we avoid having to do
2274 anything special for small big-endian values. */
2275 regcache_cooked_read_unsigned (regs, regno++, &tmp);
2276 store_unsigned_integer (valbuf,
2277 (len > X_REGISTER_SIZE
2278 ? X_REGISTER_SIZE : len), byte_order, tmp);
2279 len -= X_REGISTER_SIZE;
2280 valbuf += X_REGISTER_SIZE;
2281 }
2282 }
2283 else
2284 {
2285 /* For a structure or union the behaviour is as if the value had
2286 been stored to word-aligned memory and then loaded into
2287 registers with 64-bit load instruction(s). */
2288 int len = TYPE_LENGTH (type);
2289 int regno = AARCH64_X0_REGNUM;
2290 bfd_byte buf[X_REGISTER_SIZE];
2291
2292 while (len > 0)
2293 {
2294 regs->cooked_read (regno++, buf);
2295 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2296 len -= X_REGISTER_SIZE;
2297 valbuf += X_REGISTER_SIZE;
2298 }
2299 }
2300 }
2301
2302
2303 /* Will a function return an aggregate type in memory or in a
2304 register? Return 0 if an aggregate type can be returned in a
2305 register, 1 if it must be returned in memory. */
2306
2307 static int
2308 aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
2309 {
2310 type = check_typedef (type);
2311 int elements;
2312 struct type *fundamental_type;
2313
2314 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2315 &fundamental_type))
2316 {
2317 /* v0-v7 are used to return values and one register is allocated
2318 for one member. However, HFA or HVA has at most four members. */
2319 return 0;
2320 }
2321
2322 if (TYPE_LENGTH (type) > 16)
2323 {
2324 /* PCS B.6 Aggregates larger than 16 bytes are passed by
2325 invisible reference. */
2326
2327 return 1;
2328 }
2329
2330 return 0;
2331 }
2332
2333 /* Write into appropriate registers a function return value of type
2334 TYPE, given in virtual format. */
2335
2336 static void
2337 aarch64_store_return_value (struct type *type, struct regcache *regs,
2338 const gdb_byte *valbuf)
2339 {
2340 struct gdbarch *gdbarch = regs->arch ();
2341 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2342 int elements;
2343 struct type *fundamental_type;
2344
2345 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2346 &fundamental_type))
2347 {
2348 int len = TYPE_LENGTH (fundamental_type);
2349
2350 for (int i = 0; i < elements; i++)
2351 {
2352 int regno = AARCH64_V0_REGNUM + i;
2353 /* Enough space for a full vector register. */
2354 gdb_byte tmpbuf[register_size (gdbarch, regno)];
2355 gdb_assert (len <= sizeof (tmpbuf));
2356
2357 aarch64_debug_printf
2358 ("write HFA or HVA return value element %d to %s",
2359 i + 1, gdbarch_register_name (gdbarch, regno));
2360
2361 memcpy (tmpbuf, valbuf,
2362 len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
2363 regs->cooked_write (regno, tmpbuf);
2364 valbuf += len;
2365 }
2366 }
2367 else if (type->code () == TYPE_CODE_INT
2368 || type->code () == TYPE_CODE_CHAR
2369 || type->code () == TYPE_CODE_BOOL
2370 || type->code () == TYPE_CODE_PTR
2371 || TYPE_IS_REFERENCE (type)
2372 || type->code () == TYPE_CODE_ENUM)
2373 {
2374 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
2375 {
2376 /* Values of one word or less are zero/sign-extended and
2377 returned in r0. */
2378 bfd_byte tmpbuf[X_REGISTER_SIZE];
2379 LONGEST val = unpack_long (type, valbuf);
2380
2381 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
2382 regs->cooked_write (AARCH64_X0_REGNUM, tmpbuf);
2383 }
2384 else
2385 {
2386 /* Integral values greater than one word are stored in
2387 consecutive registers starting with r0. This will always
2388 be a multiple of the regiser size. */
2389 int len = TYPE_LENGTH (type);
2390 int regno = AARCH64_X0_REGNUM;
2391
2392 while (len > 0)
2393 {
2394 regs->cooked_write (regno++, valbuf);
2395 len -= X_REGISTER_SIZE;
2396 valbuf += X_REGISTER_SIZE;
2397 }
2398 }
2399 }
2400 else
2401 {
2402 /* For a structure or union the behaviour is as if the value had
2403 been stored to word-aligned memory and then loaded into
2404 registers with 64-bit load instruction(s). */
2405 int len = TYPE_LENGTH (type);
2406 int regno = AARCH64_X0_REGNUM;
2407 bfd_byte tmpbuf[X_REGISTER_SIZE];
2408
2409 while (len > 0)
2410 {
2411 memcpy (tmpbuf, valbuf,
2412 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2413 regs->cooked_write (regno++, tmpbuf);
2414 len -= X_REGISTER_SIZE;
2415 valbuf += X_REGISTER_SIZE;
2416 }
2417 }
2418 }
2419
2420 /* Implement the "return_value" gdbarch method. */
2421
2422 static enum return_value_convention
2423 aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2424 struct type *valtype, struct regcache *regcache,
2425 gdb_byte *readbuf, const gdb_byte *writebuf)
2426 {
2427
2428 if (valtype->code () == TYPE_CODE_STRUCT
2429 || valtype->code () == TYPE_CODE_UNION
2430 || valtype->code () == TYPE_CODE_ARRAY)
2431 {
2432 if (aarch64_return_in_memory (gdbarch, valtype))
2433 {
2434 aarch64_debug_printf ("return value in memory");
2435 return RETURN_VALUE_STRUCT_CONVENTION;
2436 }
2437 }
2438
2439 if (writebuf)
2440 aarch64_store_return_value (valtype, regcache, writebuf);
2441
2442 if (readbuf)
2443 aarch64_extract_return_value (valtype, regcache, readbuf);
2444
2445 aarch64_debug_printf ("return value in registers");
2446
2447 return RETURN_VALUE_REGISTER_CONVENTION;
2448 }
2449
2450 /* Implement the "get_longjmp_target" gdbarch method. */
2451
2452 static int
2453 aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2454 {
2455 CORE_ADDR jb_addr;
2456 gdb_byte buf[X_REGISTER_SIZE];
2457 struct gdbarch *gdbarch = get_frame_arch (frame);
2458 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2459 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2460
2461 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
2462
2463 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2464 X_REGISTER_SIZE))
2465 return 0;
2466
2467 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2468 return 1;
2469 }
2470
2471 /* Implement the "gen_return_address" gdbarch method. */
2472
2473 static void
2474 aarch64_gen_return_address (struct gdbarch *gdbarch,
2475 struct agent_expr *ax, struct axs_value *value,
2476 CORE_ADDR scope)
2477 {
2478 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
2479 value->kind = axs_lvalue_register;
2480 value->u.reg = AARCH64_LR_REGNUM;
2481 }
2482 \f
2483
2484 /* Return the pseudo register name corresponding to register regnum. */
2485
2486 static const char *
2487 aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2488 {
2489 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2490
2491 static const char *const q_name[] =
2492 {
2493 "q0", "q1", "q2", "q3",
2494 "q4", "q5", "q6", "q7",
2495 "q8", "q9", "q10", "q11",
2496 "q12", "q13", "q14", "q15",
2497 "q16", "q17", "q18", "q19",
2498 "q20", "q21", "q22", "q23",
2499 "q24", "q25", "q26", "q27",
2500 "q28", "q29", "q30", "q31",
2501 };
2502
2503 static const char *const d_name[] =
2504 {
2505 "d0", "d1", "d2", "d3",
2506 "d4", "d5", "d6", "d7",
2507 "d8", "d9", "d10", "d11",
2508 "d12", "d13", "d14", "d15",
2509 "d16", "d17", "d18", "d19",
2510 "d20", "d21", "d22", "d23",
2511 "d24", "d25", "d26", "d27",
2512 "d28", "d29", "d30", "d31",
2513 };
2514
2515 static const char *const s_name[] =
2516 {
2517 "s0", "s1", "s2", "s3",
2518 "s4", "s5", "s6", "s7",
2519 "s8", "s9", "s10", "s11",
2520 "s12", "s13", "s14", "s15",
2521 "s16", "s17", "s18", "s19",
2522 "s20", "s21", "s22", "s23",
2523 "s24", "s25", "s26", "s27",
2524 "s28", "s29", "s30", "s31",
2525 };
2526
2527 static const char *const h_name[] =
2528 {
2529 "h0", "h1", "h2", "h3",
2530 "h4", "h5", "h6", "h7",
2531 "h8", "h9", "h10", "h11",
2532 "h12", "h13", "h14", "h15",
2533 "h16", "h17", "h18", "h19",
2534 "h20", "h21", "h22", "h23",
2535 "h24", "h25", "h26", "h27",
2536 "h28", "h29", "h30", "h31",
2537 };
2538
2539 static const char *const b_name[] =
2540 {
2541 "b0", "b1", "b2", "b3",
2542 "b4", "b5", "b6", "b7",
2543 "b8", "b9", "b10", "b11",
2544 "b12", "b13", "b14", "b15",
2545 "b16", "b17", "b18", "b19",
2546 "b20", "b21", "b22", "b23",
2547 "b24", "b25", "b26", "b27",
2548 "b28", "b29", "b30", "b31",
2549 };
2550
2551 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
2552
2553 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2554 return q_name[p_regnum - AARCH64_Q0_REGNUM];
2555
2556 if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2557 return d_name[p_regnum - AARCH64_D0_REGNUM];
2558
2559 if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2560 return s_name[p_regnum - AARCH64_S0_REGNUM];
2561
2562 if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2563 return h_name[p_regnum - AARCH64_H0_REGNUM];
2564
2565 if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2566 return b_name[p_regnum - AARCH64_B0_REGNUM];
2567
2568 if (tdep->has_sve ())
2569 {
2570 static const char *const sve_v_name[] =
2571 {
2572 "v0", "v1", "v2", "v3",
2573 "v4", "v5", "v6", "v7",
2574 "v8", "v9", "v10", "v11",
2575 "v12", "v13", "v14", "v15",
2576 "v16", "v17", "v18", "v19",
2577 "v20", "v21", "v22", "v23",
2578 "v24", "v25", "v26", "v27",
2579 "v28", "v29", "v30", "v31",
2580 };
2581
2582 if (p_regnum >= AARCH64_SVE_V0_REGNUM
2583 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2584 return sve_v_name[p_regnum - AARCH64_SVE_V0_REGNUM];
2585 }
2586
2587 /* RA_STATE is used for unwinding only. Do not assign it a name - this
2588 prevents it from being read by methods such as
2589 mi_cmd_trace_frame_collected. */
2590 if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
2591 return "";
2592
2593 internal_error (__FILE__, __LINE__,
2594 _("aarch64_pseudo_register_name: bad register number %d"),
2595 p_regnum);
2596 }
2597
2598 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
2599
2600 static struct type *
2601 aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2602 {
2603 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2604
2605 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
2606
2607 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2608 return aarch64_vnq_type (gdbarch);
2609
2610 if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2611 return aarch64_vnd_type (gdbarch);
2612
2613 if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2614 return aarch64_vns_type (gdbarch);
2615
2616 if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2617 return aarch64_vnh_type (gdbarch);
2618
2619 if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2620 return aarch64_vnb_type (gdbarch);
2621
2622 if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
2623 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2624 return aarch64_vnv_type (gdbarch);
2625
2626 if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
2627 return builtin_type (gdbarch)->builtin_uint64;
2628
2629 internal_error (__FILE__, __LINE__,
2630 _("aarch64_pseudo_register_type: bad register number %d"),
2631 p_regnum);
2632 }
2633
2634 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2635
2636 static int
2637 aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2638 struct reggroup *group)
2639 {
2640 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2641
2642 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
2643
2644 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2645 return group == all_reggroup || group == vector_reggroup;
2646 else if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2647 return (group == all_reggroup || group == vector_reggroup
2648 || group == float_reggroup);
2649 else if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2650 return (group == all_reggroup || group == vector_reggroup
2651 || group == float_reggroup);
2652 else if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2653 return group == all_reggroup || group == vector_reggroup;
2654 else if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2655 return group == all_reggroup || group == vector_reggroup;
2656 else if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
2657 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2658 return group == all_reggroup || group == vector_reggroup;
2659 /* RA_STATE is used for unwinding only. Do not assign it to any groups. */
2660 if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
2661 return 0;
2662
2663 return group == all_reggroup;
2664 }
2665
2666 /* Helper for aarch64_pseudo_read_value. */
2667
2668 static struct value *
2669 aarch64_pseudo_read_value_1 (struct gdbarch *gdbarch,
2670 readable_regcache *regcache, int regnum_offset,
2671 int regsize, struct value *result_value)
2672 {
2673 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2674
2675 /* Enough space for a full vector register. */
2676 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2677 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2678
2679 if (regcache->raw_read (v_regnum, reg_buf) != REG_VALID)
2680 mark_value_bytes_unavailable (result_value, 0,
2681 TYPE_LENGTH (value_type (result_value)));
2682 else
2683 memcpy (value_contents_raw (result_value), reg_buf, regsize);
2684
2685 return result_value;
2686 }
2687
2688 /* Implement the "pseudo_register_read_value" gdbarch method. */
2689
2690 static struct value *
2691 aarch64_pseudo_read_value (struct gdbarch *gdbarch, readable_regcache *regcache,
2692 int regnum)
2693 {
2694 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2695 struct value *result_value = allocate_value (register_type (gdbarch, regnum));
2696
2697 VALUE_LVAL (result_value) = lval_register;
2698 VALUE_REGNUM (result_value) = regnum;
2699
2700 regnum -= gdbarch_num_regs (gdbarch);
2701
2702 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2703 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2704 regnum - AARCH64_Q0_REGNUM,
2705 Q_REGISTER_SIZE, result_value);
2706
2707 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2708 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2709 regnum - AARCH64_D0_REGNUM,
2710 D_REGISTER_SIZE, result_value);
2711
2712 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2713 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2714 regnum - AARCH64_S0_REGNUM,
2715 S_REGISTER_SIZE, result_value);
2716
2717 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2718 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2719 regnum - AARCH64_H0_REGNUM,
2720 H_REGISTER_SIZE, result_value);
2721
2722 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2723 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2724 regnum - AARCH64_B0_REGNUM,
2725 B_REGISTER_SIZE, result_value);
2726
2727 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2728 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2729 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2730 regnum - AARCH64_SVE_V0_REGNUM,
2731 V_REGISTER_SIZE, result_value);
2732
2733 gdb_assert_not_reached ("regnum out of bound");
2734 }
2735
2736 /* Helper for aarch64_pseudo_write. */
2737
2738 static void
2739 aarch64_pseudo_write_1 (struct gdbarch *gdbarch, struct regcache *regcache,
2740 int regnum_offset, int regsize, const gdb_byte *buf)
2741 {
2742 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2743
2744 /* Enough space for a full vector register. */
2745 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2746 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2747
2748 /* Ensure the register buffer is zero, we want gdb writes of the
2749 various 'scalar' pseudo registers to behavior like architectural
2750 writes, register width bytes are written the remainder are set to
2751 zero. */
2752 memset (reg_buf, 0, register_size (gdbarch, AARCH64_V0_REGNUM));
2753
2754 memcpy (reg_buf, buf, regsize);
2755 regcache->raw_write (v_regnum, reg_buf);
2756 }
2757
2758 /* Implement the "pseudo_register_write" gdbarch method. */
2759
2760 static void
2761 aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2762 int regnum, const gdb_byte *buf)
2763 {
2764 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2765 regnum -= gdbarch_num_regs (gdbarch);
2766
2767 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2768 return aarch64_pseudo_write_1 (gdbarch, regcache,
2769 regnum - AARCH64_Q0_REGNUM, Q_REGISTER_SIZE,
2770 buf);
2771
2772 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2773 return aarch64_pseudo_write_1 (gdbarch, regcache,
2774 regnum - AARCH64_D0_REGNUM, D_REGISTER_SIZE,
2775 buf);
2776
2777 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2778 return aarch64_pseudo_write_1 (gdbarch, regcache,
2779 regnum - AARCH64_S0_REGNUM, S_REGISTER_SIZE,
2780 buf);
2781
2782 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2783 return aarch64_pseudo_write_1 (gdbarch, regcache,
2784 regnum - AARCH64_H0_REGNUM, H_REGISTER_SIZE,
2785 buf);
2786
2787 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2788 return aarch64_pseudo_write_1 (gdbarch, regcache,
2789 regnum - AARCH64_B0_REGNUM, B_REGISTER_SIZE,
2790 buf);
2791
2792 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2793 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2794 return aarch64_pseudo_write_1 (gdbarch, regcache,
2795 regnum - AARCH64_SVE_V0_REGNUM,
2796 V_REGISTER_SIZE, buf);
2797
2798 gdb_assert_not_reached ("regnum out of bound");
2799 }
2800
2801 /* Callback function for user_reg_add. */
2802
2803 static struct value *
2804 value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2805 {
2806 const int *reg_p = (const int *) baton;
2807
2808 return value_of_register (*reg_p, frame);
2809 }
2810 \f
2811
2812 /* Implement the "software_single_step" gdbarch method, needed to
2813 single step through atomic sequences on AArch64. */
2814
2815 static std::vector<CORE_ADDR>
2816 aarch64_software_single_step (struct regcache *regcache)
2817 {
2818 struct gdbarch *gdbarch = regcache->arch ();
2819 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2820 const int insn_size = 4;
2821 const int atomic_sequence_length = 16; /* Instruction sequence length. */
2822 CORE_ADDR pc = regcache_read_pc (regcache);
2823 CORE_ADDR breaks[2] = { CORE_ADDR_MAX, CORE_ADDR_MAX };
2824 CORE_ADDR loc = pc;
2825 CORE_ADDR closing_insn = 0;
2826 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2827 byte_order_for_code);
2828 int index;
2829 int insn_count;
2830 int bc_insn_count = 0; /* Conditional branch instruction count. */
2831 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
2832 aarch64_inst inst;
2833
2834 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2835 return {};
2836
2837 /* Look for a Load Exclusive instruction which begins the sequence. */
2838 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
2839 return {};
2840
2841 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2842 {
2843 loc += insn_size;
2844 insn = read_memory_unsigned_integer (loc, insn_size,
2845 byte_order_for_code);
2846
2847 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2848 return {};
2849 /* Check if the instruction is a conditional branch. */
2850 if (inst.opcode->iclass == condbranch)
2851 {
2852 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
2853
2854 if (bc_insn_count >= 1)
2855 return {};
2856
2857 /* It is, so we'll try to set a breakpoint at the destination. */
2858 breaks[1] = loc + inst.operands[0].imm.value;
2859
2860 bc_insn_count++;
2861 last_breakpoint++;
2862 }
2863
2864 /* Look for the Store Exclusive which closes the atomic sequence. */
2865 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
2866 {
2867 closing_insn = loc;
2868 break;
2869 }
2870 }
2871
2872 /* We didn't find a closing Store Exclusive instruction, fall back. */
2873 if (!closing_insn)
2874 return {};
2875
2876 /* Insert breakpoint after the end of the atomic sequence. */
2877 breaks[0] = loc + insn_size;
2878
2879 /* Check for duplicated breakpoints, and also check that the second
2880 breakpoint is not within the atomic sequence. */
2881 if (last_breakpoint
2882 && (breaks[1] == breaks[0]
2883 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2884 last_breakpoint = 0;
2885
2886 std::vector<CORE_ADDR> next_pcs;
2887
2888 /* Insert the breakpoint at the end of the sequence, and one at the
2889 destination of the conditional branch, if it exists. */
2890 for (index = 0; index <= last_breakpoint; index++)
2891 next_pcs.push_back (breaks[index]);
2892
2893 return next_pcs;
2894 }
2895
2896 struct aarch64_displaced_step_copy_insn_closure
2897 : public displaced_step_copy_insn_closure
2898 {
2899 /* It is true when condition instruction, such as B.CON, TBZ, etc,
2900 is being displaced stepping. */
2901 bool cond = false;
2902
2903 /* PC adjustment offset after displaced stepping. If 0, then we don't
2904 write the PC back, assuming the PC is already the right address. */
2905 int32_t pc_adjust = 0;
2906 };
2907
2908 /* Data when visiting instructions for displaced stepping. */
2909
2910 struct aarch64_displaced_step_data
2911 {
2912 struct aarch64_insn_data base;
2913
2914 /* The address where the instruction will be executed at. */
2915 CORE_ADDR new_addr;
2916 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
2917 uint32_t insn_buf[AARCH64_DISPLACED_MODIFIED_INSNS];
2918 /* Number of instructions in INSN_BUF. */
2919 unsigned insn_count;
2920 /* Registers when doing displaced stepping. */
2921 struct regcache *regs;
2922
2923 aarch64_displaced_step_copy_insn_closure *dsc;
2924 };
2925
2926 /* Implementation of aarch64_insn_visitor method "b". */
2927
2928 static void
2929 aarch64_displaced_step_b (const int is_bl, const int32_t offset,
2930 struct aarch64_insn_data *data)
2931 {
2932 struct aarch64_displaced_step_data *dsd
2933 = (struct aarch64_displaced_step_data *) data;
2934 int64_t new_offset = data->insn_addr - dsd->new_addr + offset;
2935
2936 if (can_encode_int32 (new_offset, 28))
2937 {
2938 /* Emit B rather than BL, because executing BL on a new address
2939 will get the wrong address into LR. In order to avoid this,
2940 we emit B, and update LR if the instruction is BL. */
2941 emit_b (dsd->insn_buf, 0, new_offset);
2942 dsd->insn_count++;
2943 }
2944 else
2945 {
2946 /* Write NOP. */
2947 emit_nop (dsd->insn_buf);
2948 dsd->insn_count++;
2949 dsd->dsc->pc_adjust = offset;
2950 }
2951
2952 if (is_bl)
2953 {
2954 /* Update LR. */
2955 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
2956 data->insn_addr + 4);
2957 }
2958 }
2959
2960 /* Implementation of aarch64_insn_visitor method "b_cond". */
2961
2962 static void
2963 aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
2964 struct aarch64_insn_data *data)
2965 {
2966 struct aarch64_displaced_step_data *dsd
2967 = (struct aarch64_displaced_step_data *) data;
2968
2969 /* GDB has to fix up PC after displaced step this instruction
2970 differently according to the condition is true or false. Instead
2971 of checking COND against conditional flags, we can use
2972 the following instructions, and GDB can tell how to fix up PC
2973 according to the PC value.
2974
2975 B.COND TAKEN ; If cond is true, then jump to TAKEN.
2976 INSN1 ;
2977 TAKEN:
2978 INSN2
2979 */
2980
2981 emit_bcond (dsd->insn_buf, cond, 8);
2982 dsd->dsc->cond = true;
2983 dsd->dsc->pc_adjust = offset;
2984 dsd->insn_count = 1;
2985 }
2986
2987 /* Dynamically allocate a new register. If we know the register
2988 statically, we should make it a global as above instead of using this
2989 helper function. */
2990
2991 static struct aarch64_register
2992 aarch64_register (unsigned num, int is64)
2993 {
2994 return (struct aarch64_register) { num, is64 };
2995 }
2996
2997 /* Implementation of aarch64_insn_visitor method "cb". */
2998
2999 static void
3000 aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
3001 const unsigned rn, int is64,
3002 struct aarch64_insn_data *data)
3003 {
3004 struct aarch64_displaced_step_data *dsd
3005 = (struct aarch64_displaced_step_data *) data;
3006
3007 /* The offset is out of range for a compare and branch
3008 instruction. We can use the following instructions instead:
3009
3010 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
3011 INSN1 ;
3012 TAKEN:
3013 INSN2
3014 */
3015 emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
3016 dsd->insn_count = 1;
3017 dsd->dsc->cond = true;
3018 dsd->dsc->pc_adjust = offset;
3019 }
3020
3021 /* Implementation of aarch64_insn_visitor method "tb". */
3022
3023 static void
3024 aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
3025 const unsigned rt, unsigned bit,
3026 struct aarch64_insn_data *data)
3027 {
3028 struct aarch64_displaced_step_data *dsd
3029 = (struct aarch64_displaced_step_data *) data;
3030
3031 /* The offset is out of range for a test bit and branch
3032 instruction We can use the following instructions instead:
3033
3034 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
3035 INSN1 ;
3036 TAKEN:
3037 INSN2
3038
3039 */
3040 emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
3041 dsd->insn_count = 1;
3042 dsd->dsc->cond = true;
3043 dsd->dsc->pc_adjust = offset;
3044 }
3045
3046 /* Implementation of aarch64_insn_visitor method "adr". */
3047
3048 static void
3049 aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
3050 const int is_adrp, struct aarch64_insn_data *data)
3051 {
3052 struct aarch64_displaced_step_data *dsd
3053 = (struct aarch64_displaced_step_data *) data;
3054 /* We know exactly the address the ADR{P,} instruction will compute.
3055 We can just write it to the destination register. */
3056 CORE_ADDR address = data->insn_addr + offset;
3057
3058 if (is_adrp)
3059 {
3060 /* Clear the lower 12 bits of the offset to get the 4K page. */
3061 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
3062 address & ~0xfff);
3063 }
3064 else
3065 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
3066 address);
3067
3068 dsd->dsc->pc_adjust = 4;
3069 emit_nop (dsd->insn_buf);
3070 dsd->insn_count = 1;
3071 }
3072
3073 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
3074
3075 static void
3076 aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
3077 const unsigned rt, const int is64,
3078 struct aarch64_insn_data *data)
3079 {
3080 struct aarch64_displaced_step_data *dsd
3081 = (struct aarch64_displaced_step_data *) data;
3082 CORE_ADDR address = data->insn_addr + offset;
3083 struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
3084
3085 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
3086 address);
3087
3088 if (is_sw)
3089 dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
3090 aarch64_register (rt, 1), zero);
3091 else
3092 dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
3093 aarch64_register (rt, 1), zero);
3094
3095 dsd->dsc->pc_adjust = 4;
3096 }
3097
3098 /* Implementation of aarch64_insn_visitor method "others". */
3099
3100 static void
3101 aarch64_displaced_step_others (const uint32_t insn,
3102 struct aarch64_insn_data *data)
3103 {
3104 struct aarch64_displaced_step_data *dsd
3105 = (struct aarch64_displaced_step_data *) data;
3106
3107 uint32_t masked_insn = (insn & CLEAR_Rn_MASK);
3108 if (masked_insn == BLR)
3109 {
3110 /* Emit a BR to the same register and then update LR to the original
3111 address (similar to aarch64_displaced_step_b). */
3112 aarch64_emit_insn (dsd->insn_buf, insn & 0xffdfffff);
3113 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
3114 data->insn_addr + 4);
3115 }
3116 else
3117 aarch64_emit_insn (dsd->insn_buf, insn);
3118 dsd->insn_count = 1;
3119
3120 if (masked_insn == RET || masked_insn == BR || masked_insn == BLR)
3121 dsd->dsc->pc_adjust = 0;
3122 else
3123 dsd->dsc->pc_adjust = 4;
3124 }
3125
3126 static const struct aarch64_insn_visitor visitor =
3127 {
3128 aarch64_displaced_step_b,
3129 aarch64_displaced_step_b_cond,
3130 aarch64_displaced_step_cb,
3131 aarch64_displaced_step_tb,
3132 aarch64_displaced_step_adr,
3133 aarch64_displaced_step_ldr_literal,
3134 aarch64_displaced_step_others,
3135 };
3136
3137 /* Implement the "displaced_step_copy_insn" gdbarch method. */
3138
3139 displaced_step_copy_insn_closure_up
3140 aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
3141 CORE_ADDR from, CORE_ADDR to,
3142 struct regcache *regs)
3143 {
3144 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3145 uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
3146 struct aarch64_displaced_step_data dsd;
3147 aarch64_inst inst;
3148
3149 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
3150 return NULL;
3151
3152 /* Look for a Load Exclusive instruction which begins the sequence. */
3153 if (inst.opcode->iclass == ldstexcl && bit (insn, 22))
3154 {
3155 /* We can't displaced step atomic sequences. */
3156 return NULL;
3157 }
3158
3159 std::unique_ptr<aarch64_displaced_step_copy_insn_closure> dsc
3160 (new aarch64_displaced_step_copy_insn_closure);
3161 dsd.base.insn_addr = from;
3162 dsd.new_addr = to;
3163 dsd.regs = regs;
3164 dsd.dsc = dsc.get ();
3165 dsd.insn_count = 0;
3166 aarch64_relocate_instruction (insn, &visitor,
3167 (struct aarch64_insn_data *) &dsd);
3168 gdb_assert (dsd.insn_count <= AARCH64_DISPLACED_MODIFIED_INSNS);
3169
3170 if (dsd.insn_count != 0)
3171 {
3172 int i;
3173
3174 /* Instruction can be relocated to scratch pad. Copy
3175 relocated instruction(s) there. */
3176 for (i = 0; i < dsd.insn_count; i++)
3177 {
3178 displaced_debug_printf ("writing insn %.8x at %s",
3179 dsd.insn_buf[i],
3180 paddress (gdbarch, to + i * 4));
3181
3182 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
3183 (ULONGEST) dsd.insn_buf[i]);
3184 }
3185 }
3186 else
3187 {
3188 dsc = NULL;
3189 }
3190
3191 /* This is a work around for a problem with g++ 4.8. */
3192 return displaced_step_copy_insn_closure_up (dsc.release ());
3193 }
3194
3195 /* Implement the "displaced_step_fixup" gdbarch method. */
3196
3197 void
3198 aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
3199 struct displaced_step_copy_insn_closure *dsc_,
3200 CORE_ADDR from, CORE_ADDR to,
3201 struct regcache *regs)
3202 {
3203 aarch64_displaced_step_copy_insn_closure *dsc
3204 = (aarch64_displaced_step_copy_insn_closure *) dsc_;
3205
3206 ULONGEST pc;
3207
3208 regcache_cooked_read_unsigned (regs, AARCH64_PC_REGNUM, &pc);
3209
3210 displaced_debug_printf ("PC after stepping: %s (was %s).",
3211 paddress (gdbarch, pc), paddress (gdbarch, to));
3212
3213 if (dsc->cond)
3214 {
3215 displaced_debug_printf ("[Conditional] pc_adjust before: %d",
3216 dsc->pc_adjust);
3217
3218 if (pc - to == 8)
3219 {
3220 /* Condition is true. */
3221 }
3222 else if (pc - to == 4)
3223 {
3224 /* Condition is false. */
3225 dsc->pc_adjust = 4;
3226 }
3227 else
3228 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
3229
3230 displaced_debug_printf ("[Conditional] pc_adjust after: %d",
3231 dsc->pc_adjust);
3232 }
3233
3234 displaced_debug_printf ("%s PC by %d",
3235 dsc->pc_adjust ? "adjusting" : "not adjusting",
3236 dsc->pc_adjust);
3237
3238 if (dsc->pc_adjust != 0)
3239 {
3240 /* Make sure the previous instruction was executed (that is, the PC
3241 has changed). If the PC didn't change, then discard the adjustment
3242 offset. Otherwise we may skip an instruction before its execution
3243 took place. */
3244 if ((pc - to) == 0)
3245 {
3246 displaced_debug_printf ("PC did not move. Discarding PC adjustment.");
3247 dsc->pc_adjust = 0;
3248 }
3249
3250 displaced_debug_printf ("fixup: set PC to %s:%d",
3251 paddress (gdbarch, from), dsc->pc_adjust);
3252
3253 regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
3254 from + dsc->pc_adjust);
3255 }
3256 }
3257
3258 /* Implement the "displaced_step_hw_singlestep" gdbarch method. */
3259
3260 bool
3261 aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch)
3262 {
3263 return true;
3264 }
3265
3266 /* Get the correct target description for the given VQ value.
3267 If VQ is zero then it is assumed SVE is not supported.
3268 (It is not possible to set VQ to zero on an SVE system).
3269
3270 MTE_P indicates the presence of the Memory Tagging Extension feature. */
3271
3272 const target_desc *
3273 aarch64_read_description (uint64_t vq, bool pauth_p, bool mte_p)
3274 {
3275 if (vq > AARCH64_MAX_SVE_VQ)
3276 error (_("VQ is %" PRIu64 ", maximum supported value is %d"), vq,
3277 AARCH64_MAX_SVE_VQ);
3278
3279 struct target_desc *tdesc = tdesc_aarch64_list[vq][pauth_p][mte_p];
3280
3281 if (tdesc == NULL)
3282 {
3283 tdesc = aarch64_create_target_description (vq, pauth_p, mte_p);
3284 tdesc_aarch64_list[vq][pauth_p][mte_p] = tdesc;
3285 }
3286
3287 return tdesc;
3288 }
3289
3290 /* Return the VQ used when creating the target description TDESC. */
3291
3292 static uint64_t
3293 aarch64_get_tdesc_vq (const struct target_desc *tdesc)
3294 {
3295 const struct tdesc_feature *feature_sve;
3296
3297 if (!tdesc_has_registers (tdesc))
3298 return 0;
3299
3300 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
3301
3302 if (feature_sve == nullptr)
3303 return 0;
3304
3305 uint64_t vl = tdesc_register_bitsize (feature_sve,
3306 aarch64_sve_register_names[0]) / 8;
3307 return sve_vq_from_vl (vl);
3308 }
3309
3310 /* Add all the expected register sets into GDBARCH. */
3311
3312 static void
3313 aarch64_add_reggroups (struct gdbarch *gdbarch)
3314 {
3315 reggroup_add (gdbarch, general_reggroup);
3316 reggroup_add (gdbarch, float_reggroup);
3317 reggroup_add (gdbarch, system_reggroup);
3318 reggroup_add (gdbarch, vector_reggroup);
3319 reggroup_add (gdbarch, all_reggroup);
3320 reggroup_add (gdbarch, save_reggroup);
3321 reggroup_add (gdbarch, restore_reggroup);
3322 }
3323
3324 /* Implement the "cannot_store_register" gdbarch method. */
3325
3326 static int
3327 aarch64_cannot_store_register (struct gdbarch *gdbarch, int regnum)
3328 {
3329 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3330
3331 if (!tdep->has_pauth ())
3332 return 0;
3333
3334 /* Pointer authentication registers are read-only. */
3335 return (regnum == AARCH64_PAUTH_DMASK_REGNUM (tdep->pauth_reg_base)
3336 || regnum == AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base));
3337 }
3338
3339 /* Initialize the current architecture based on INFO. If possible,
3340 re-use an architecture from ARCHES, which is a list of
3341 architectures already created during this debugging session.
3342
3343 Called e.g. at program startup, when reading a core file, and when
3344 reading a binary file. */
3345
3346 static struct gdbarch *
3347 aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
3348 {
3349 const struct tdesc_feature *feature_core, *feature_fpu, *feature_sve;
3350 const struct tdesc_feature *feature_pauth;
3351 bool valid_p = true;
3352 int i, num_regs = 0, num_pseudo_regs = 0;
3353 int first_pauth_regnum = -1, pauth_ra_state_offset = -1;
3354 int first_mte_regnum = -1;
3355
3356 /* Use the vector length passed via the target info. Here -1 is used for no
3357 SVE, and 0 is unset. If unset then use the vector length from the existing
3358 tdesc. */
3359 uint64_t vq = 0;
3360 if (info.id == (int *) -1)
3361 vq = 0;
3362 else if (info.id != 0)
3363 vq = (uint64_t) info.id;
3364 else
3365 vq = aarch64_get_tdesc_vq (info.target_desc);
3366
3367 if (vq > AARCH64_MAX_SVE_VQ)
3368 internal_error (__FILE__, __LINE__, _("VQ out of bounds: %s (max %d)"),
3369 pulongest (vq), AARCH64_MAX_SVE_VQ);
3370
3371 /* If there is already a candidate, use it. */
3372 for (gdbarch_list *best_arch = gdbarch_list_lookup_by_info (arches, &info);
3373 best_arch != nullptr;
3374 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
3375 {
3376 struct gdbarch_tdep *tdep = gdbarch_tdep (best_arch->gdbarch);
3377 if (tdep && tdep->vq == vq)
3378 return best_arch->gdbarch;
3379 }
3380
3381 /* Ensure we always have a target descriptor, and that it is for the given VQ
3382 value. */
3383 const struct target_desc *tdesc = info.target_desc;
3384 if (!tdesc_has_registers (tdesc) || vq != aarch64_get_tdesc_vq (tdesc))
3385 tdesc = aarch64_read_description (vq, false, false);
3386 gdb_assert (tdesc);
3387
3388 feature_core = tdesc_find_feature (tdesc,"org.gnu.gdb.aarch64.core");
3389 feature_fpu = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
3390 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
3391 feature_pauth = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.pauth");
3392 const struct tdesc_feature *feature_mte
3393 = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.mte");
3394
3395 if (feature_core == nullptr)
3396 return nullptr;
3397
3398 tdesc_arch_data_up tdesc_data = tdesc_data_alloc ();
3399
3400 /* Validate the description provides the mandatory core R registers
3401 and allocate their numbers. */
3402 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
3403 valid_p &= tdesc_numbered_register (feature_core, tdesc_data.get (),
3404 AARCH64_X0_REGNUM + i,
3405 aarch64_r_register_names[i]);
3406
3407 num_regs = AARCH64_X0_REGNUM + i;
3408
3409 /* Add the V registers. */
3410 if (feature_fpu != nullptr)
3411 {
3412 if (feature_sve != nullptr)
3413 error (_("Program contains both fpu and SVE features."));
3414
3415 /* Validate the description provides the mandatory V registers
3416 and allocate their numbers. */
3417 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
3418 valid_p &= tdesc_numbered_register (feature_fpu, tdesc_data.get (),
3419 AARCH64_V0_REGNUM + i,
3420 aarch64_v_register_names[i]);
3421
3422 num_regs = AARCH64_V0_REGNUM + i;
3423 }
3424
3425 /* Add the SVE registers. */
3426 if (feature_sve != nullptr)
3427 {
3428 /* Validate the description provides the mandatory SVE registers
3429 and allocate their numbers. */
3430 for (i = 0; i < ARRAY_SIZE (aarch64_sve_register_names); i++)
3431 valid_p &= tdesc_numbered_register (feature_sve, tdesc_data.get (),
3432 AARCH64_SVE_Z0_REGNUM + i,
3433 aarch64_sve_register_names[i]);
3434
3435 num_regs = AARCH64_SVE_Z0_REGNUM + i;
3436 num_pseudo_regs += 32; /* add the Vn register pseudos. */
3437 }
3438
3439 if (feature_fpu != nullptr || feature_sve != nullptr)
3440 {
3441 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
3442 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
3443 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
3444 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
3445 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
3446 }
3447
3448 /* Add the pauth registers. */
3449 if (feature_pauth != NULL)
3450 {
3451 first_pauth_regnum = num_regs;
3452 pauth_ra_state_offset = num_pseudo_regs;
3453 /* Validate the descriptor provides the mandatory PAUTH registers and
3454 allocate their numbers. */
3455 for (i = 0; i < ARRAY_SIZE (aarch64_pauth_register_names); i++)
3456 valid_p &= tdesc_numbered_register (feature_pauth, tdesc_data.get (),
3457 first_pauth_regnum + i,
3458 aarch64_pauth_register_names[i]);
3459
3460 num_regs += i;
3461 num_pseudo_regs += 1; /* Count RA_STATE pseudo register. */
3462 }
3463
3464 /* Add the MTE registers. */
3465 if (feature_mte != NULL)
3466 {
3467 first_mte_regnum = num_regs;
3468 /* Validate the descriptor provides the mandatory MTE registers and
3469 allocate their numbers. */
3470 for (i = 0; i < ARRAY_SIZE (aarch64_mte_register_names); i++)
3471 valid_p &= tdesc_numbered_register (feature_mte, tdesc_data.get (),
3472 first_mte_regnum + i,
3473 aarch64_mte_register_names[i]);
3474
3475 num_regs += i;
3476 }
3477
3478 if (!valid_p)
3479 return nullptr;
3480
3481 /* AArch64 code is always little-endian. */
3482 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
3483
3484 struct gdbarch_tdep *tdep = XCNEW (struct gdbarch_tdep);
3485 struct gdbarch *gdbarch = gdbarch_alloc (&info, tdep);
3486
3487 /* This should be low enough for everything. */
3488 tdep->lowest_pc = 0x20;
3489 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
3490 tdep->jb_elt_size = 8;
3491 tdep->vq = vq;
3492 tdep->pauth_reg_base = first_pauth_regnum;
3493 tdep->pauth_ra_state_regnum = (feature_pauth == NULL) ? -1
3494 : pauth_ra_state_offset + num_regs;
3495 tdep->mte_reg_base = first_mte_regnum;
3496
3497 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
3498 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
3499
3500 /* Advance PC across function entry code. */
3501 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
3502
3503 /* The stack grows downward. */
3504 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
3505
3506 /* Breakpoint manipulation. */
3507 set_gdbarch_breakpoint_kind_from_pc (gdbarch,
3508 aarch64_breakpoint::kind_from_pc);
3509 set_gdbarch_sw_breakpoint_from_kind (gdbarch,
3510 aarch64_breakpoint::bp_from_kind);
3511 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
3512 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
3513
3514 /* Information about registers, etc. */
3515 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
3516 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
3517 set_gdbarch_num_regs (gdbarch, num_regs);
3518
3519 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
3520 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
3521 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
3522 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
3523 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
3524 set_tdesc_pseudo_register_reggroup_p (gdbarch,
3525 aarch64_pseudo_register_reggroup_p);
3526 set_gdbarch_cannot_store_register (gdbarch, aarch64_cannot_store_register);
3527
3528 /* ABI */
3529 set_gdbarch_short_bit (gdbarch, 16);
3530 set_gdbarch_int_bit (gdbarch, 32);
3531 set_gdbarch_float_bit (gdbarch, 32);
3532 set_gdbarch_double_bit (gdbarch, 64);
3533 set_gdbarch_long_double_bit (gdbarch, 128);
3534 set_gdbarch_long_bit (gdbarch, 64);
3535 set_gdbarch_long_long_bit (gdbarch, 64);
3536 set_gdbarch_ptr_bit (gdbarch, 64);
3537 set_gdbarch_char_signed (gdbarch, 0);
3538 set_gdbarch_wchar_signed (gdbarch, 0);
3539 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
3540 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
3541 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
3542 set_gdbarch_type_align (gdbarch, aarch64_type_align);
3543
3544 /* Internal <-> external register number maps. */
3545 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
3546
3547 /* Returning results. */
3548 set_gdbarch_return_value (gdbarch, aarch64_return_value);
3549
3550 /* Disassembly. */
3551 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
3552
3553 /* Virtual tables. */
3554 set_gdbarch_vbit_in_delta (gdbarch, 1);
3555
3556 /* Register architecture. */
3557 aarch64_add_reggroups (gdbarch);
3558
3559 /* Hook in the ABI-specific overrides, if they have been registered. */
3560 info.target_desc = tdesc;
3561 info.tdesc_data = tdesc_data.get ();
3562 gdbarch_init_osabi (info, gdbarch);
3563
3564 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
3565 /* Register DWARF CFA vendor handler. */
3566 set_gdbarch_execute_dwarf_cfa_vendor_op (gdbarch,
3567 aarch64_execute_dwarf_cfa_vendor_op);
3568
3569 /* Permanent/Program breakpoint handling. */
3570 set_gdbarch_program_breakpoint_here_p (gdbarch,
3571 aarch64_program_breakpoint_here_p);
3572
3573 /* Add some default predicates. */
3574 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
3575 dwarf2_append_unwinders (gdbarch);
3576 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
3577
3578 frame_base_set_default (gdbarch, &aarch64_normal_base);
3579
3580 /* Now we have tuned the configuration, set a few final things,
3581 based on what the OS ABI has told us. */
3582
3583 if (tdep->jb_pc >= 0)
3584 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
3585
3586 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
3587
3588 set_gdbarch_get_pc_address_flags (gdbarch, aarch64_get_pc_address_flags);
3589
3590 tdesc_use_registers (gdbarch, tdesc, std::move (tdesc_data));
3591
3592 /* Add standard register aliases. */
3593 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
3594 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
3595 value_of_aarch64_user_reg,
3596 &aarch64_register_aliases[i].regnum);
3597
3598 register_aarch64_ravenscar_ops (gdbarch);
3599
3600 return gdbarch;
3601 }
3602
3603 static void
3604 aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
3605 {
3606 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3607
3608 if (tdep == NULL)
3609 return;
3610
3611 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
3612 paddress (gdbarch, tdep->lowest_pc));
3613 }
3614
3615 #if GDB_SELF_TEST
3616 namespace selftests
3617 {
3618 static void aarch64_process_record_test (void);
3619 }
3620 #endif
3621
3622 void _initialize_aarch64_tdep ();
3623 void
3624 _initialize_aarch64_tdep ()
3625 {
3626 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
3627 aarch64_dump_tdep);
3628
3629 /* Debug this file's internals. */
3630 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
3631 Set AArch64 debugging."), _("\
3632 Show AArch64 debugging."), _("\
3633 When on, AArch64 specific debugging is enabled."),
3634 NULL,
3635 show_aarch64_debug,
3636 &setdebuglist, &showdebuglist);
3637
3638 #if GDB_SELF_TEST
3639 selftests::register_test ("aarch64-analyze-prologue",
3640 selftests::aarch64_analyze_prologue_test);
3641 selftests::register_test ("aarch64-process-record",
3642 selftests::aarch64_process_record_test);
3643 #endif
3644 }
3645
3646 /* AArch64 process record-replay related structures, defines etc. */
3647
3648 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
3649 do \
3650 { \
3651 unsigned int reg_len = LENGTH; \
3652 if (reg_len) \
3653 { \
3654 REGS = XNEWVEC (uint32_t, reg_len); \
3655 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
3656 } \
3657 } \
3658 while (0)
3659
3660 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
3661 do \
3662 { \
3663 unsigned int mem_len = LENGTH; \
3664 if (mem_len) \
3665 { \
3666 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
3667 memcpy(&MEMS->len, &RECORD_BUF[0], \
3668 sizeof(struct aarch64_mem_r) * LENGTH); \
3669 } \
3670 } \
3671 while (0)
3672
3673 /* AArch64 record/replay structures and enumerations. */
3674
3675 struct aarch64_mem_r
3676 {
3677 uint64_t len; /* Record length. */
3678 uint64_t addr; /* Memory address. */
3679 };
3680
3681 enum aarch64_record_result
3682 {
3683 AARCH64_RECORD_SUCCESS,
3684 AARCH64_RECORD_UNSUPPORTED,
3685 AARCH64_RECORD_UNKNOWN
3686 };
3687
3688 typedef struct insn_decode_record_t
3689 {
3690 struct gdbarch *gdbarch;
3691 struct regcache *regcache;
3692 CORE_ADDR this_addr; /* Address of insn to be recorded. */
3693 uint32_t aarch64_insn; /* Insn to be recorded. */
3694 uint32_t mem_rec_count; /* Count of memory records. */
3695 uint32_t reg_rec_count; /* Count of register records. */
3696 uint32_t *aarch64_regs; /* Registers to be recorded. */
3697 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
3698 } insn_decode_record;
3699
3700 /* Record handler for data processing - register instructions. */
3701
3702 static unsigned int
3703 aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
3704 {
3705 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
3706 uint32_t record_buf[4];
3707
3708 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3709 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3710 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
3711
3712 if (!bit (aarch64_insn_r->aarch64_insn, 28))
3713 {
3714 uint8_t setflags;
3715
3716 /* Logical (shifted register). */
3717 if (insn_bits24_27 == 0x0a)
3718 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
3719 /* Add/subtract. */
3720 else if (insn_bits24_27 == 0x0b)
3721 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3722 else
3723 return AARCH64_RECORD_UNKNOWN;
3724
3725 record_buf[0] = reg_rd;
3726 aarch64_insn_r->reg_rec_count = 1;
3727 if (setflags)
3728 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3729 }
3730 else
3731 {
3732 if (insn_bits24_27 == 0x0b)
3733 {
3734 /* Data-processing (3 source). */
3735 record_buf[0] = reg_rd;
3736 aarch64_insn_r->reg_rec_count = 1;
3737 }
3738 else if (insn_bits24_27 == 0x0a)
3739 {
3740 if (insn_bits21_23 == 0x00)
3741 {
3742 /* Add/subtract (with carry). */
3743 record_buf[0] = reg_rd;
3744 aarch64_insn_r->reg_rec_count = 1;
3745 if (bit (aarch64_insn_r->aarch64_insn, 29))
3746 {
3747 record_buf[1] = AARCH64_CPSR_REGNUM;
3748 aarch64_insn_r->reg_rec_count = 2;
3749 }
3750 }
3751 else if (insn_bits21_23 == 0x02)
3752 {
3753 /* Conditional compare (register) and conditional compare
3754 (immediate) instructions. */
3755 record_buf[0] = AARCH64_CPSR_REGNUM;
3756 aarch64_insn_r->reg_rec_count = 1;
3757 }
3758 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
3759 {
3760 /* Conditional select. */
3761 /* Data-processing (2 source). */
3762 /* Data-processing (1 source). */
3763 record_buf[0] = reg_rd;
3764 aarch64_insn_r->reg_rec_count = 1;
3765 }
3766 else
3767 return AARCH64_RECORD_UNKNOWN;
3768 }
3769 }
3770
3771 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3772 record_buf);
3773 return AARCH64_RECORD_SUCCESS;
3774 }
3775
3776 /* Record handler for data processing - immediate instructions. */
3777
3778 static unsigned int
3779 aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
3780 {
3781 uint8_t reg_rd, insn_bit23, insn_bits24_27, setflags;
3782 uint32_t record_buf[4];
3783
3784 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3785 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3786 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3787
3788 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
3789 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
3790 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
3791 {
3792 record_buf[0] = reg_rd;
3793 aarch64_insn_r->reg_rec_count = 1;
3794 }
3795 else if (insn_bits24_27 == 0x01)
3796 {
3797 /* Add/Subtract (immediate). */
3798 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3799 record_buf[0] = reg_rd;
3800 aarch64_insn_r->reg_rec_count = 1;
3801 if (setflags)
3802 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3803 }
3804 else if (insn_bits24_27 == 0x02 && !insn_bit23)
3805 {
3806 /* Logical (immediate). */
3807 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
3808 record_buf[0] = reg_rd;
3809 aarch64_insn_r->reg_rec_count = 1;
3810 if (setflags)
3811 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3812 }
3813 else
3814 return AARCH64_RECORD_UNKNOWN;
3815
3816 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3817 record_buf);
3818 return AARCH64_RECORD_SUCCESS;
3819 }
3820
3821 /* Record handler for branch, exception generation and system instructions. */
3822
3823 static unsigned int
3824 aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
3825 {
3826 struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
3827 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
3828 uint32_t record_buf[4];
3829
3830 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3831 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3832 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3833
3834 if (insn_bits28_31 == 0x0d)
3835 {
3836 /* Exception generation instructions. */
3837 if (insn_bits24_27 == 0x04)
3838 {
3839 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
3840 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
3841 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
3842 {
3843 ULONGEST svc_number;
3844
3845 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
3846 &svc_number);
3847 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
3848 svc_number);
3849 }
3850 else
3851 return AARCH64_RECORD_UNSUPPORTED;
3852 }
3853 /* System instructions. */
3854 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
3855 {
3856 uint32_t reg_rt, reg_crn;
3857
3858 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3859 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3860
3861 /* Record rt in case of sysl and mrs instructions. */
3862 if (bit (aarch64_insn_r->aarch64_insn, 21))
3863 {
3864 record_buf[0] = reg_rt;
3865 aarch64_insn_r->reg_rec_count = 1;
3866 }
3867 /* Record cpsr for hint and msr(immediate) instructions. */
3868 else if (reg_crn == 0x02 || reg_crn == 0x04)
3869 {
3870 record_buf[0] = AARCH64_CPSR_REGNUM;
3871 aarch64_insn_r->reg_rec_count = 1;
3872 }
3873 }
3874 /* Unconditional branch (register). */
3875 else if((insn_bits24_27 & 0x0e) == 0x06)
3876 {
3877 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3878 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3879 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3880 }
3881 else
3882 return AARCH64_RECORD_UNKNOWN;
3883 }
3884 /* Unconditional branch (immediate). */
3885 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3886 {
3887 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3888 if (bit (aarch64_insn_r->aarch64_insn, 31))
3889 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3890 }
3891 else
3892 /* Compare & branch (immediate), Test & branch (immediate) and
3893 Conditional branch (immediate). */
3894 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3895
3896 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3897 record_buf);
3898 return AARCH64_RECORD_SUCCESS;
3899 }
3900
3901 /* Record handler for advanced SIMD load and store instructions. */
3902
3903 static unsigned int
3904 aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3905 {
3906 CORE_ADDR address;
3907 uint64_t addr_offset = 0;
3908 uint32_t record_buf[24];
3909 uint64_t record_buf_mem[24];
3910 uint32_t reg_rn, reg_rt;
3911 uint32_t reg_index = 0, mem_index = 0;
3912 uint8_t opcode_bits, size_bits;
3913
3914 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3915 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3916 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3917 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3918 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3919
3920 if (record_debug)
3921 debug_printf ("Process record: Advanced SIMD load/store\n");
3922
3923 /* Load/store single structure. */
3924 if (bit (aarch64_insn_r->aarch64_insn, 24))
3925 {
3926 uint8_t sindex, scale, selem, esize, replicate = 0;
3927 scale = opcode_bits >> 2;
3928 selem = ((opcode_bits & 0x02) |
3929 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3930 switch (scale)
3931 {
3932 case 1:
3933 if (size_bits & 0x01)
3934 return AARCH64_RECORD_UNKNOWN;
3935 break;
3936 case 2:
3937 if ((size_bits >> 1) & 0x01)
3938 return AARCH64_RECORD_UNKNOWN;
3939 if (size_bits & 0x01)
3940 {
3941 if (!((opcode_bits >> 1) & 0x01))
3942 scale = 3;
3943 else
3944 return AARCH64_RECORD_UNKNOWN;
3945 }
3946 break;
3947 case 3:
3948 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3949 {
3950 scale = size_bits;
3951 replicate = 1;
3952 break;
3953 }
3954 else
3955 return AARCH64_RECORD_UNKNOWN;
3956 default:
3957 break;
3958 }
3959 esize = 8 << scale;
3960 if (replicate)
3961 for (sindex = 0; sindex < selem; sindex++)
3962 {
3963 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3964 reg_rt = (reg_rt + 1) % 32;
3965 }
3966 else
3967 {
3968 for (sindex = 0; sindex < selem; sindex++)
3969 {
3970 if (bit (aarch64_insn_r->aarch64_insn, 22))
3971 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3972 else
3973 {
3974 record_buf_mem[mem_index++] = esize / 8;
3975 record_buf_mem[mem_index++] = address + addr_offset;
3976 }
3977 addr_offset = addr_offset + (esize / 8);
3978 reg_rt = (reg_rt + 1) % 32;
3979 }
3980 }
3981 }
3982 /* Load/store multiple structure. */
3983 else
3984 {
3985 uint8_t selem, esize, rpt, elements;
3986 uint8_t eindex, rindex;
3987
3988 esize = 8 << size_bits;
3989 if (bit (aarch64_insn_r->aarch64_insn, 30))
3990 elements = 128 / esize;
3991 else
3992 elements = 64 / esize;
3993
3994 switch (opcode_bits)
3995 {
3996 /*LD/ST4 (4 Registers). */
3997 case 0:
3998 rpt = 1;
3999 selem = 4;
4000 break;
4001 /*LD/ST1 (4 Registers). */
4002 case 2:
4003 rpt = 4;
4004 selem = 1;
4005 break;
4006 /*LD/ST3 (3 Registers). */
4007 case 4:
4008 rpt = 1;
4009 selem = 3;
4010 break;
4011 /*LD/ST1 (3 Registers). */
4012 case 6:
4013 rpt = 3;
4014 selem = 1;
4015 break;
4016 /*LD/ST1 (1 Register). */
4017 case 7:
4018 rpt = 1;
4019 selem = 1;
4020 break;
4021 /*LD/ST2 (2 Registers). */
4022 case 8:
4023 rpt = 1;
4024 selem = 2;
4025 break;
4026 /*LD/ST1 (2 Registers). */
4027 case 10:
4028 rpt = 2;
4029 selem = 1;
4030 break;
4031 default:
4032 return AARCH64_RECORD_UNSUPPORTED;
4033 break;
4034 }
4035 for (rindex = 0; rindex < rpt; rindex++)
4036 for (eindex = 0; eindex < elements; eindex++)
4037 {
4038 uint8_t reg_tt, sindex;
4039 reg_tt = (reg_rt + rindex) % 32;
4040 for (sindex = 0; sindex < selem; sindex++)
4041 {
4042 if (bit (aarch64_insn_r->aarch64_insn, 22))
4043 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
4044 else
4045 {
4046 record_buf_mem[mem_index++] = esize / 8;
4047 record_buf_mem[mem_index++] = address + addr_offset;
4048 }
4049 addr_offset = addr_offset + (esize / 8);
4050 reg_tt = (reg_tt + 1) % 32;
4051 }
4052 }
4053 }
4054
4055 if (bit (aarch64_insn_r->aarch64_insn, 23))
4056 record_buf[reg_index++] = reg_rn;
4057
4058 aarch64_insn_r->reg_rec_count = reg_index;
4059 aarch64_insn_r->mem_rec_count = mem_index / 2;
4060 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
4061 record_buf_mem);
4062 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4063 record_buf);
4064 return AARCH64_RECORD_SUCCESS;
4065 }
4066
4067 /* Record handler for load and store instructions. */
4068
4069 static unsigned int
4070 aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
4071 {
4072 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
4073 uint8_t insn_bit23, insn_bit21;
4074 uint8_t opc, size_bits, ld_flag, vector_flag;
4075 uint32_t reg_rn, reg_rt, reg_rt2;
4076 uint64_t datasize, offset;
4077 uint32_t record_buf[8];
4078 uint64_t record_buf_mem[8];
4079 CORE_ADDR address;
4080
4081 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
4082 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
4083 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
4084 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
4085 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
4086 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
4087 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
4088 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
4089 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
4090 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
4091 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
4092
4093 /* Load/store exclusive. */
4094 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
4095 {
4096 if (record_debug)
4097 debug_printf ("Process record: load/store exclusive\n");
4098
4099 if (ld_flag)
4100 {
4101 record_buf[0] = reg_rt;
4102 aarch64_insn_r->reg_rec_count = 1;
4103 if (insn_bit21)
4104 {
4105 record_buf[1] = reg_rt2;
4106 aarch64_insn_r->reg_rec_count = 2;
4107 }
4108 }
4109 else
4110 {
4111 if (insn_bit21)
4112 datasize = (8 << size_bits) * 2;
4113 else
4114 datasize = (8 << size_bits);
4115 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4116 &address);
4117 record_buf_mem[0] = datasize / 8;
4118 record_buf_mem[1] = address;
4119 aarch64_insn_r->mem_rec_count = 1;
4120 if (!insn_bit23)
4121 {
4122 /* Save register rs. */
4123 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
4124 aarch64_insn_r->reg_rec_count = 1;
4125 }
4126 }
4127 }
4128 /* Load register (literal) instructions decoding. */
4129 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
4130 {
4131 if (record_debug)
4132 debug_printf ("Process record: load register (literal)\n");
4133 if (vector_flag)
4134 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4135 else
4136 record_buf[0] = reg_rt;
4137 aarch64_insn_r->reg_rec_count = 1;
4138 }
4139 /* All types of load/store pair instructions decoding. */
4140 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
4141 {
4142 if (record_debug)
4143 debug_printf ("Process record: load/store pair\n");
4144
4145 if (ld_flag)
4146 {
4147 if (vector_flag)
4148 {
4149 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4150 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
4151 }
4152 else
4153 {
4154 record_buf[0] = reg_rt;
4155 record_buf[1] = reg_rt2;
4156 }
4157 aarch64_insn_r->reg_rec_count = 2;
4158 }
4159 else
4160 {
4161 uint16_t imm7_off;
4162 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
4163 if (!vector_flag)
4164 size_bits = size_bits >> 1;
4165 datasize = 8 << (2 + size_bits);
4166 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
4167 offset = offset << (2 + size_bits);
4168 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4169 &address);
4170 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
4171 {
4172 if (imm7_off & 0x40)
4173 address = address - offset;
4174 else
4175 address = address + offset;
4176 }
4177
4178 record_buf_mem[0] = datasize / 8;
4179 record_buf_mem[1] = address;
4180 record_buf_mem[2] = datasize / 8;
4181 record_buf_mem[3] = address + (datasize / 8);
4182 aarch64_insn_r->mem_rec_count = 2;
4183 }
4184 if (bit (aarch64_insn_r->aarch64_insn, 23))
4185 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
4186 }
4187 /* Load/store register (unsigned immediate) instructions. */
4188 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
4189 {
4190 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4191 if (!(opc >> 1))
4192 {
4193 if (opc & 0x01)
4194 ld_flag = 0x01;
4195 else
4196 ld_flag = 0x0;
4197 }
4198 else
4199 {
4200 if (size_bits == 0x3 && vector_flag == 0x0 && opc == 0x2)
4201 {
4202 /* PRFM (immediate) */
4203 return AARCH64_RECORD_SUCCESS;
4204 }
4205 else if (size_bits == 0x2 && vector_flag == 0x0 && opc == 0x2)
4206 {
4207 /* LDRSW (immediate) */
4208 ld_flag = 0x1;
4209 }
4210 else
4211 {
4212 if (opc & 0x01)
4213 ld_flag = 0x01;
4214 else
4215 ld_flag = 0x0;
4216 }
4217 }
4218
4219 if (record_debug)
4220 {
4221 debug_printf ("Process record: load/store (unsigned immediate):"
4222 " size %x V %d opc %x\n", size_bits, vector_flag,
4223 opc);
4224 }
4225
4226 if (!ld_flag)
4227 {
4228 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
4229 datasize = 8 << size_bits;
4230 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4231 &address);
4232 offset = offset << size_bits;
4233 address = address + offset;
4234
4235 record_buf_mem[0] = datasize >> 3;
4236 record_buf_mem[1] = address;
4237 aarch64_insn_r->mem_rec_count = 1;
4238 }
4239 else
4240 {
4241 if (vector_flag)
4242 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4243 else
4244 record_buf[0] = reg_rt;
4245 aarch64_insn_r->reg_rec_count = 1;
4246 }
4247 }
4248 /* Load/store register (register offset) instructions. */
4249 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
4250 && insn_bits10_11 == 0x02 && insn_bit21)
4251 {
4252 if (record_debug)
4253 debug_printf ("Process record: load/store (register offset)\n");
4254 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4255 if (!(opc >> 1))
4256 if (opc & 0x01)
4257 ld_flag = 0x01;
4258 else
4259 ld_flag = 0x0;
4260 else
4261 if (size_bits != 0x03)
4262 ld_flag = 0x01;
4263 else
4264 return AARCH64_RECORD_UNKNOWN;
4265
4266 if (!ld_flag)
4267 {
4268 ULONGEST reg_rm_val;
4269
4270 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
4271 bits (aarch64_insn_r->aarch64_insn, 16, 20), &reg_rm_val);
4272 if (bit (aarch64_insn_r->aarch64_insn, 12))
4273 offset = reg_rm_val << size_bits;
4274 else
4275 offset = reg_rm_val;
4276 datasize = 8 << size_bits;
4277 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4278 &address);
4279 address = address + offset;
4280 record_buf_mem[0] = datasize >> 3;
4281 record_buf_mem[1] = address;
4282 aarch64_insn_r->mem_rec_count = 1;
4283 }
4284 else
4285 {
4286 if (vector_flag)
4287 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4288 else
4289 record_buf[0] = reg_rt;
4290 aarch64_insn_r->reg_rec_count = 1;
4291 }
4292 }
4293 /* Load/store register (immediate and unprivileged) instructions. */
4294 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
4295 && !insn_bit21)
4296 {
4297 if (record_debug)
4298 {
4299 debug_printf ("Process record: load/store "
4300 "(immediate and unprivileged)\n");
4301 }
4302 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4303 if (!(opc >> 1))
4304 if (opc & 0x01)
4305 ld_flag = 0x01;
4306 else
4307 ld_flag = 0x0;
4308 else
4309 if (size_bits != 0x03)
4310 ld_flag = 0x01;
4311 else
4312 return AARCH64_RECORD_UNKNOWN;
4313
4314 if (!ld_flag)
4315 {
4316 uint16_t imm9_off;
4317 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
4318 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
4319 datasize = 8 << size_bits;
4320 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4321 &address);
4322 if (insn_bits10_11 != 0x01)
4323 {
4324 if (imm9_off & 0x0100)
4325 address = address - offset;
4326 else
4327 address = address + offset;
4328 }
4329 record_buf_mem[0] = datasize >> 3;
4330 record_buf_mem[1] = address;
4331 aarch64_insn_r->mem_rec_count = 1;
4332 }
4333 else
4334 {
4335 if (vector_flag)
4336 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4337 else
4338 record_buf[0] = reg_rt;
4339 aarch64_insn_r->reg_rec_count = 1;
4340 }
4341 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
4342 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
4343 }
4344 /* Advanced SIMD load/store instructions. */
4345 else
4346 return aarch64_record_asimd_load_store (aarch64_insn_r);
4347
4348 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
4349 record_buf_mem);
4350 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4351 record_buf);
4352 return AARCH64_RECORD_SUCCESS;
4353 }
4354
4355 /* Record handler for data processing SIMD and floating point instructions. */
4356
4357 static unsigned int
4358 aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
4359 {
4360 uint8_t insn_bit21, opcode, rmode, reg_rd;
4361 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
4362 uint8_t insn_bits11_14;
4363 uint32_t record_buf[2];
4364
4365 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
4366 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
4367 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
4368 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
4369 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
4370 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
4371 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
4372 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
4373 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
4374
4375 if (record_debug)
4376 debug_printf ("Process record: data processing SIMD/FP: ");
4377
4378 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
4379 {
4380 /* Floating point - fixed point conversion instructions. */
4381 if (!insn_bit21)
4382 {
4383 if (record_debug)
4384 debug_printf ("FP - fixed point conversion");
4385
4386 if ((opcode >> 1) == 0x0 && rmode == 0x03)
4387 record_buf[0] = reg_rd;
4388 else
4389 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4390 }
4391 /* Floating point - conditional compare instructions. */
4392 else if (insn_bits10_11 == 0x01)
4393 {
4394 if (record_debug)
4395 debug_printf ("FP - conditional compare");
4396
4397 record_buf[0] = AARCH64_CPSR_REGNUM;
4398 }
4399 /* Floating point - data processing (2-source) and
4400 conditional select instructions. */
4401 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
4402 {
4403 if (record_debug)
4404 debug_printf ("FP - DP (2-source)");
4405
4406 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4407 }
4408 else if (insn_bits10_11 == 0x00)
4409 {
4410 /* Floating point - immediate instructions. */
4411 if ((insn_bits12_15 & 0x01) == 0x01
4412 || (insn_bits12_15 & 0x07) == 0x04)
4413 {
4414 if (record_debug)
4415 debug_printf ("FP - immediate");
4416 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4417 }
4418 /* Floating point - compare instructions. */
4419 else if ((insn_bits12_15 & 0x03) == 0x02)
4420 {
4421 if (record_debug)
4422 debug_printf ("FP - immediate");
4423 record_buf[0] = AARCH64_CPSR_REGNUM;
4424 }
4425 /* Floating point - integer conversions instructions. */
4426 else if (insn_bits12_15 == 0x00)
4427 {
4428 /* Convert float to integer instruction. */
4429 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
4430 {
4431 if (record_debug)
4432 debug_printf ("float to int conversion");
4433
4434 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4435 }
4436 /* Convert integer to float instruction. */
4437 else if ((opcode >> 1) == 0x01 && !rmode)
4438 {
4439 if (record_debug)
4440 debug_printf ("int to float conversion");
4441
4442 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4443 }
4444 /* Move float to integer instruction. */
4445 else if ((opcode >> 1) == 0x03)
4446 {
4447 if (record_debug)
4448 debug_printf ("move float to int");
4449
4450 if (!(opcode & 0x01))
4451 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4452 else
4453 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4454 }
4455 else
4456 return AARCH64_RECORD_UNKNOWN;
4457 }
4458 else
4459 return AARCH64_RECORD_UNKNOWN;
4460 }
4461 else
4462 return AARCH64_RECORD_UNKNOWN;
4463 }
4464 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
4465 {
4466 if (record_debug)
4467 debug_printf ("SIMD copy");
4468
4469 /* Advanced SIMD copy instructions. */
4470 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
4471 && !bit (aarch64_insn_r->aarch64_insn, 15)
4472 && bit (aarch64_insn_r->aarch64_insn, 10))
4473 {
4474 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
4475 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4476 else
4477 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4478 }
4479 else
4480 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4481 }
4482 /* All remaining floating point or advanced SIMD instructions. */
4483 else
4484 {
4485 if (record_debug)
4486 debug_printf ("all remain");
4487
4488 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4489 }
4490
4491 if (record_debug)
4492 debug_printf ("\n");
4493
4494 /* Record the V/X register. */
4495 aarch64_insn_r->reg_rec_count++;
4496
4497 /* Some of these instructions may set bits in the FPSR, so record it
4498 too. */
4499 record_buf[1] = AARCH64_FPSR_REGNUM;
4500 aarch64_insn_r->reg_rec_count++;
4501
4502 gdb_assert (aarch64_insn_r->reg_rec_count == 2);
4503 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4504 record_buf);
4505 return AARCH64_RECORD_SUCCESS;
4506 }
4507
4508 /* Decodes insns type and invokes its record handler. */
4509
4510 static unsigned int
4511 aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
4512 {
4513 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
4514
4515 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
4516 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
4517 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
4518 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
4519
4520 /* Data processing - immediate instructions. */
4521 if (!ins_bit26 && !ins_bit27 && ins_bit28)
4522 return aarch64_record_data_proc_imm (aarch64_insn_r);
4523
4524 /* Branch, exception generation and system instructions. */
4525 if (ins_bit26 && !ins_bit27 && ins_bit28)
4526 return aarch64_record_branch_except_sys (aarch64_insn_r);
4527
4528 /* Load and store instructions. */
4529 if (!ins_bit25 && ins_bit27)
4530 return aarch64_record_load_store (aarch64_insn_r);
4531
4532 /* Data processing - register instructions. */
4533 if (ins_bit25 && !ins_bit26 && ins_bit27)
4534 return aarch64_record_data_proc_reg (aarch64_insn_r);
4535
4536 /* Data processing - SIMD and floating point instructions. */
4537 if (ins_bit25 && ins_bit26 && ins_bit27)
4538 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
4539
4540 return AARCH64_RECORD_UNSUPPORTED;
4541 }
4542
4543 /* Cleans up local record registers and memory allocations. */
4544
4545 static void
4546 deallocate_reg_mem (insn_decode_record *record)
4547 {
4548 xfree (record->aarch64_regs);
4549 xfree (record->aarch64_mems);
4550 }
4551
4552 #if GDB_SELF_TEST
4553 namespace selftests {
4554
4555 static void
4556 aarch64_process_record_test (void)
4557 {
4558 struct gdbarch_info info;
4559 uint32_t ret;
4560
4561 info.bfd_arch_info = bfd_scan_arch ("aarch64");
4562
4563 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
4564 SELF_CHECK (gdbarch != NULL);
4565
4566 insn_decode_record aarch64_record;
4567
4568 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4569 aarch64_record.regcache = NULL;
4570 aarch64_record.this_addr = 0;
4571 aarch64_record.gdbarch = gdbarch;
4572
4573 /* 20 00 80 f9 prfm pldl1keep, [x1] */
4574 aarch64_record.aarch64_insn = 0xf9800020;
4575 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4576 SELF_CHECK (ret == AARCH64_RECORD_SUCCESS);
4577 SELF_CHECK (aarch64_record.reg_rec_count == 0);
4578 SELF_CHECK (aarch64_record.mem_rec_count == 0);
4579
4580 deallocate_reg_mem (&aarch64_record);
4581 }
4582
4583 } // namespace selftests
4584 #endif /* GDB_SELF_TEST */
4585
4586 /* Parse the current instruction and record the values of the registers and
4587 memory that will be changed in current instruction to record_arch_list
4588 return -1 if something is wrong. */
4589
4590 int
4591 aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
4592 CORE_ADDR insn_addr)
4593 {
4594 uint32_t rec_no = 0;
4595 uint8_t insn_size = 4;
4596 uint32_t ret = 0;
4597 gdb_byte buf[insn_size];
4598 insn_decode_record aarch64_record;
4599
4600 memset (&buf[0], 0, insn_size);
4601 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4602 target_read_memory (insn_addr, &buf[0], insn_size);
4603 aarch64_record.aarch64_insn
4604 = (uint32_t) extract_unsigned_integer (&buf[0],
4605 insn_size,
4606 gdbarch_byte_order (gdbarch));
4607 aarch64_record.regcache = regcache;
4608 aarch64_record.this_addr = insn_addr;
4609 aarch64_record.gdbarch = gdbarch;
4610
4611 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4612 if (ret == AARCH64_RECORD_UNSUPPORTED)
4613 {
4614 printf_unfiltered (_("Process record does not support instruction "
4615 "0x%0x at address %s.\n"),
4616 aarch64_record.aarch64_insn,
4617 paddress (gdbarch, insn_addr));
4618 ret = -1;
4619 }
4620
4621 if (0 == ret)
4622 {
4623 /* Record registers. */
4624 record_full_arch_list_add_reg (aarch64_record.regcache,
4625 AARCH64_PC_REGNUM);
4626 /* Always record register CPSR. */
4627 record_full_arch_list_add_reg (aarch64_record.regcache,
4628 AARCH64_CPSR_REGNUM);
4629 if (aarch64_record.aarch64_regs)
4630 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
4631 if (record_full_arch_list_add_reg (aarch64_record.regcache,
4632 aarch64_record.aarch64_regs[rec_no]))
4633 ret = -1;
4634
4635 /* Record memories. */
4636 if (aarch64_record.aarch64_mems)
4637 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
4638 if (record_full_arch_list_add_mem
4639 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
4640 aarch64_record.aarch64_mems[rec_no].len))
4641 ret = -1;
4642
4643 if (record_full_arch_list_add_end ())
4644 ret = -1;
4645 }
4646
4647 deallocate_reg_mem (&aarch64_record);
4648 return ret;
4649 }