]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/aarch64-tdep.c
Fix TID parser bug
[thirdparty/binutils-gdb.git] / gdb / aarch64-tdep.c
1 /* Common target dependent code for GDB on AArch64 systems.
2
3 Copyright (C) 2009-2019 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22
23 #include "frame.h"
24 #include "inferior.h"
25 #include "gdbcmd.h"
26 #include "gdbcore.h"
27 #include "dis-asm.h"
28 #include "regcache.h"
29 #include "reggroups.h"
30 #include "value.h"
31 #include "arch-utils.h"
32 #include "osabi.h"
33 #include "frame-unwind.h"
34 #include "frame-base.h"
35 #include "trad-frame.h"
36 #include "objfiles.h"
37 #include "dwarf2.h"
38 #include "dwarf2-frame.h"
39 #include "gdbtypes.h"
40 #include "prologue-value.h"
41 #include "target-descriptions.h"
42 #include "user-regs.h"
43 #include "language.h"
44 #include "infcall.h"
45 #include "ax.h"
46 #include "ax-gdb.h"
47 #include "common/selftest.h"
48
49 #include "aarch64-tdep.h"
50 #include "aarch64-ravenscar-thread.h"
51
52 #include "elf-bfd.h"
53 #include "elf/aarch64.h"
54
55 #include "common/vec.h"
56
57 #include "record.h"
58 #include "record-full.h"
59 #include "arch/aarch64-insn.h"
60
61 #include "opcode/aarch64.h"
62 #include <algorithm>
63
64 #define submask(x) ((1L << ((x) + 1)) - 1)
65 #define bit(obj,st) (((obj) >> (st)) & 1)
66 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
67
68 /* A Homogeneous Floating-Point or Short-Vector Aggregate may have at most
69 four members. */
70 #define HA_MAX_NUM_FLDS 4
71
72 /* All possible aarch64 target descriptors. */
73 struct target_desc *tdesc_aarch64_list[AARCH64_MAX_SVE_VQ + 1][2/*pauth*/];
74
75 /* The standard register names, and all the valid aliases for them. */
76 static const struct
77 {
78 const char *const name;
79 int regnum;
80 } aarch64_register_aliases[] =
81 {
82 /* 64-bit register names. */
83 {"fp", AARCH64_FP_REGNUM},
84 {"lr", AARCH64_LR_REGNUM},
85 {"sp", AARCH64_SP_REGNUM},
86
87 /* 32-bit register names. */
88 {"w0", AARCH64_X0_REGNUM + 0},
89 {"w1", AARCH64_X0_REGNUM + 1},
90 {"w2", AARCH64_X0_REGNUM + 2},
91 {"w3", AARCH64_X0_REGNUM + 3},
92 {"w4", AARCH64_X0_REGNUM + 4},
93 {"w5", AARCH64_X0_REGNUM + 5},
94 {"w6", AARCH64_X0_REGNUM + 6},
95 {"w7", AARCH64_X0_REGNUM + 7},
96 {"w8", AARCH64_X0_REGNUM + 8},
97 {"w9", AARCH64_X0_REGNUM + 9},
98 {"w10", AARCH64_X0_REGNUM + 10},
99 {"w11", AARCH64_X0_REGNUM + 11},
100 {"w12", AARCH64_X0_REGNUM + 12},
101 {"w13", AARCH64_X0_REGNUM + 13},
102 {"w14", AARCH64_X0_REGNUM + 14},
103 {"w15", AARCH64_X0_REGNUM + 15},
104 {"w16", AARCH64_X0_REGNUM + 16},
105 {"w17", AARCH64_X0_REGNUM + 17},
106 {"w18", AARCH64_X0_REGNUM + 18},
107 {"w19", AARCH64_X0_REGNUM + 19},
108 {"w20", AARCH64_X0_REGNUM + 20},
109 {"w21", AARCH64_X0_REGNUM + 21},
110 {"w22", AARCH64_X0_REGNUM + 22},
111 {"w23", AARCH64_X0_REGNUM + 23},
112 {"w24", AARCH64_X0_REGNUM + 24},
113 {"w25", AARCH64_X0_REGNUM + 25},
114 {"w26", AARCH64_X0_REGNUM + 26},
115 {"w27", AARCH64_X0_REGNUM + 27},
116 {"w28", AARCH64_X0_REGNUM + 28},
117 {"w29", AARCH64_X0_REGNUM + 29},
118 {"w30", AARCH64_X0_REGNUM + 30},
119
120 /* specials */
121 {"ip0", AARCH64_X0_REGNUM + 16},
122 {"ip1", AARCH64_X0_REGNUM + 17}
123 };
124
125 /* The required core 'R' registers. */
126 static const char *const aarch64_r_register_names[] =
127 {
128 /* These registers must appear in consecutive RAW register number
129 order and they must begin with AARCH64_X0_REGNUM! */
130 "x0", "x1", "x2", "x3",
131 "x4", "x5", "x6", "x7",
132 "x8", "x9", "x10", "x11",
133 "x12", "x13", "x14", "x15",
134 "x16", "x17", "x18", "x19",
135 "x20", "x21", "x22", "x23",
136 "x24", "x25", "x26", "x27",
137 "x28", "x29", "x30", "sp",
138 "pc", "cpsr"
139 };
140
141 /* The FP/SIMD 'V' registers. */
142 static const char *const aarch64_v_register_names[] =
143 {
144 /* These registers must appear in consecutive RAW register number
145 order and they must begin with AARCH64_V0_REGNUM! */
146 "v0", "v1", "v2", "v3",
147 "v4", "v5", "v6", "v7",
148 "v8", "v9", "v10", "v11",
149 "v12", "v13", "v14", "v15",
150 "v16", "v17", "v18", "v19",
151 "v20", "v21", "v22", "v23",
152 "v24", "v25", "v26", "v27",
153 "v28", "v29", "v30", "v31",
154 "fpsr",
155 "fpcr"
156 };
157
158 /* The SVE 'Z' and 'P' registers. */
159 static const char *const aarch64_sve_register_names[] =
160 {
161 /* These registers must appear in consecutive RAW register number
162 order and they must begin with AARCH64_SVE_Z0_REGNUM! */
163 "z0", "z1", "z2", "z3",
164 "z4", "z5", "z6", "z7",
165 "z8", "z9", "z10", "z11",
166 "z12", "z13", "z14", "z15",
167 "z16", "z17", "z18", "z19",
168 "z20", "z21", "z22", "z23",
169 "z24", "z25", "z26", "z27",
170 "z28", "z29", "z30", "z31",
171 "fpsr", "fpcr",
172 "p0", "p1", "p2", "p3",
173 "p4", "p5", "p6", "p7",
174 "p8", "p9", "p10", "p11",
175 "p12", "p13", "p14", "p15",
176 "ffr", "vg"
177 };
178
179 static const char *const aarch64_pauth_register_names[] =
180 {
181 /* Authentication mask for data pointer. */
182 "pauth_dmask",
183 /* Authentication mask for code pointer. */
184 "pauth_cmask"
185 };
186
187 /* AArch64 prologue cache structure. */
188 struct aarch64_prologue_cache
189 {
190 /* The program counter at the start of the function. It is used to
191 identify this frame as a prologue frame. */
192 CORE_ADDR func;
193
194 /* The program counter at the time this frame was created; i.e. where
195 this function was called from. It is used to identify this frame as a
196 stub frame. */
197 CORE_ADDR prev_pc;
198
199 /* The stack pointer at the time this frame was created; i.e. the
200 caller's stack pointer when this function was called. It is used
201 to identify this frame. */
202 CORE_ADDR prev_sp;
203
204 /* Is the target available to read from? */
205 int available_p;
206
207 /* The frame base for this frame is just prev_sp - frame size.
208 FRAMESIZE is the distance from the frame pointer to the
209 initial stack pointer. */
210 int framesize;
211
212 /* The register used to hold the frame pointer for this frame. */
213 int framereg;
214
215 /* Saved register offsets. */
216 struct trad_frame_saved_reg *saved_regs;
217 };
218
219 static void
220 show_aarch64_debug (struct ui_file *file, int from_tty,
221 struct cmd_list_element *c, const char *value)
222 {
223 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
224 }
225
226 namespace {
227
228 /* Abstract instruction reader. */
229
230 class abstract_instruction_reader
231 {
232 public:
233 /* Read in one instruction. */
234 virtual ULONGEST read (CORE_ADDR memaddr, int len,
235 enum bfd_endian byte_order) = 0;
236 };
237
238 /* Instruction reader from real target. */
239
240 class instruction_reader : public abstract_instruction_reader
241 {
242 public:
243 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
244 override
245 {
246 return read_code_unsigned_integer (memaddr, len, byte_order);
247 }
248 };
249
250 } // namespace
251
252 /* If address signing is enabled, mask off the signature bits from ADDR, using
253 the register values in THIS_FRAME. */
254
255 static CORE_ADDR
256 aarch64_frame_unmask_address (struct gdbarch_tdep *tdep,
257 struct frame_info *this_frame,
258 CORE_ADDR addr)
259 {
260 if (tdep->has_pauth ()
261 && frame_unwind_register_unsigned (this_frame,
262 tdep->pauth_ra_state_regnum))
263 {
264 int cmask_num = AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base);
265 CORE_ADDR cmask = frame_unwind_register_unsigned (this_frame, cmask_num);
266 addr = addr & ~cmask;
267 }
268
269 return addr;
270 }
271
272 /* Analyze a prologue, looking for a recognizable stack frame
273 and frame pointer. Scan until we encounter a store that could
274 clobber the stack frame unexpectedly, or an unknown instruction. */
275
276 static CORE_ADDR
277 aarch64_analyze_prologue (struct gdbarch *gdbarch,
278 CORE_ADDR start, CORE_ADDR limit,
279 struct aarch64_prologue_cache *cache,
280 abstract_instruction_reader& reader)
281 {
282 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
283 int i;
284 /* Track X registers and D registers in prologue. */
285 pv_t regs[AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT];
286
287 for (i = 0; i < AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT; i++)
288 regs[i] = pv_register (i, 0);
289 pv_area stack (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
290
291 for (; start < limit; start += 4)
292 {
293 uint32_t insn;
294 aarch64_inst inst;
295
296 insn = reader.read (start, 4, byte_order_for_code);
297
298 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
299 break;
300
301 if (inst.opcode->iclass == addsub_imm
302 && (inst.opcode->op == OP_ADD
303 || strcmp ("sub", inst.opcode->name) == 0))
304 {
305 unsigned rd = inst.operands[0].reg.regno;
306 unsigned rn = inst.operands[1].reg.regno;
307
308 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
309 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
310 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
311 gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM);
312
313 if (inst.opcode->op == OP_ADD)
314 {
315 regs[rd] = pv_add_constant (regs[rn],
316 inst.operands[2].imm.value);
317 }
318 else
319 {
320 regs[rd] = pv_add_constant (regs[rn],
321 -inst.operands[2].imm.value);
322 }
323 }
324 else if (inst.opcode->iclass == pcreladdr
325 && inst.operands[1].type == AARCH64_OPND_ADDR_ADRP)
326 {
327 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
328 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
329
330 regs[inst.operands[0].reg.regno] = pv_unknown ();
331 }
332 else if (inst.opcode->iclass == branch_imm)
333 {
334 /* Stop analysis on branch. */
335 break;
336 }
337 else if (inst.opcode->iclass == condbranch)
338 {
339 /* Stop analysis on branch. */
340 break;
341 }
342 else if (inst.opcode->iclass == branch_reg)
343 {
344 /* Stop analysis on branch. */
345 break;
346 }
347 else if (inst.opcode->iclass == compbranch)
348 {
349 /* Stop analysis on branch. */
350 break;
351 }
352 else if (inst.opcode->op == OP_MOVZ)
353 {
354 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
355 regs[inst.operands[0].reg.regno] = pv_unknown ();
356 }
357 else if (inst.opcode->iclass == log_shift
358 && strcmp (inst.opcode->name, "orr") == 0)
359 {
360 unsigned rd = inst.operands[0].reg.regno;
361 unsigned rn = inst.operands[1].reg.regno;
362 unsigned rm = inst.operands[2].reg.regno;
363
364 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
365 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn);
366 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT);
367
368 if (inst.operands[2].shifter.amount == 0
369 && rn == AARCH64_SP_REGNUM)
370 regs[rd] = regs[rm];
371 else
372 {
373 if (aarch64_debug)
374 {
375 debug_printf ("aarch64: prologue analysis gave up "
376 "addr=%s opcode=0x%x (orr x register)\n",
377 core_addr_to_string_nz (start), insn);
378 }
379 break;
380 }
381 }
382 else if (inst.opcode->op == OP_STUR)
383 {
384 unsigned rt = inst.operands[0].reg.regno;
385 unsigned rn = inst.operands[1].addr.base_regno;
386 int is64
387 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
388
389 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
390 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
391 gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9);
392 gdb_assert (!inst.operands[1].addr.offset.is_reg);
393
394 stack.store (pv_add_constant (regs[rn],
395 inst.operands[1].addr.offset.imm),
396 is64 ? 8 : 4, regs[rt]);
397 }
398 else if ((inst.opcode->iclass == ldstpair_off
399 || (inst.opcode->iclass == ldstpair_indexed
400 && inst.operands[2].addr.preind))
401 && strcmp ("stp", inst.opcode->name) == 0)
402 {
403 /* STP with addressing mode Pre-indexed and Base register. */
404 unsigned rt1;
405 unsigned rt2;
406 unsigned rn = inst.operands[2].addr.base_regno;
407 int32_t imm = inst.operands[2].addr.offset.imm;
408
409 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
410 || inst.operands[0].type == AARCH64_OPND_Ft);
411 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2
412 || inst.operands[1].type == AARCH64_OPND_Ft2);
413 gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7);
414 gdb_assert (!inst.operands[2].addr.offset.is_reg);
415
416 /* If recording this store would invalidate the store area
417 (perhaps because rn is not known) then we should abandon
418 further prologue analysis. */
419 if (stack.store_would_trash (pv_add_constant (regs[rn], imm)))
420 break;
421
422 if (stack.store_would_trash (pv_add_constant (regs[rn], imm + 8)))
423 break;
424
425 rt1 = inst.operands[0].reg.regno;
426 rt2 = inst.operands[1].reg.regno;
427 if (inst.operands[0].type == AARCH64_OPND_Ft)
428 {
429 /* Only bottom 64-bit of each V register (D register) need
430 to be preserved. */
431 gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D);
432 rt1 += AARCH64_X_REGISTER_COUNT;
433 rt2 += AARCH64_X_REGISTER_COUNT;
434 }
435
436 stack.store (pv_add_constant (regs[rn], imm), 8,
437 regs[rt1]);
438 stack.store (pv_add_constant (regs[rn], imm + 8), 8,
439 regs[rt2]);
440
441 if (inst.operands[2].addr.writeback)
442 regs[rn] = pv_add_constant (regs[rn], imm);
443
444 }
445 else if ((inst.opcode->iclass == ldst_imm9 /* Signed immediate. */
446 || (inst.opcode->iclass == ldst_pos /* Unsigned immediate. */
447 && (inst.opcode->op == OP_STR_POS
448 || inst.opcode->op == OP_STRF_POS)))
449 && inst.operands[1].addr.base_regno == AARCH64_SP_REGNUM
450 && strcmp ("str", inst.opcode->name) == 0)
451 {
452 /* STR (immediate) */
453 unsigned int rt = inst.operands[0].reg.regno;
454 int32_t imm = inst.operands[1].addr.offset.imm;
455 unsigned int rn = inst.operands[1].addr.base_regno;
456 bool is64
457 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
458 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
459 || inst.operands[0].type == AARCH64_OPND_Ft);
460
461 if (inst.operands[0].type == AARCH64_OPND_Ft)
462 {
463 /* Only bottom 64-bit of each V register (D register) need
464 to be preserved. */
465 gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D);
466 rt += AARCH64_X_REGISTER_COUNT;
467 }
468
469 stack.store (pv_add_constant (regs[rn], imm),
470 is64 ? 8 : 4, regs[rt]);
471 if (inst.operands[1].addr.writeback)
472 regs[rn] = pv_add_constant (regs[rn], imm);
473 }
474 else if (inst.opcode->iclass == testbranch)
475 {
476 /* Stop analysis on branch. */
477 break;
478 }
479 else if (inst.opcode->iclass == ic_system)
480 {
481 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
482 int ra_state_val = 0;
483
484 if (insn == 0xd503233f /* paciasp. */
485 || insn == 0xd503237f /* pacibsp. */)
486 {
487 /* Return addresses are mangled. */
488 ra_state_val = 1;
489 }
490 else if (insn == 0xd50323bf /* autiasp. */
491 || insn == 0xd50323ff /* autibsp. */)
492 {
493 /* Return addresses are not mangled. */
494 ra_state_val = 0;
495 }
496 else
497 {
498 if (aarch64_debug)
499 debug_printf ("aarch64: prologue analysis gave up addr=%s"
500 " opcode=0x%x (iclass)\n",
501 core_addr_to_string_nz (start), insn);
502 break;
503 }
504
505 if (tdep->has_pauth () && cache != nullptr)
506 trad_frame_set_value (cache->saved_regs,
507 tdep->pauth_ra_state_regnum,
508 ra_state_val);
509 }
510 else
511 {
512 if (aarch64_debug)
513 {
514 debug_printf ("aarch64: prologue analysis gave up addr=%s"
515 " opcode=0x%x\n",
516 core_addr_to_string_nz (start), insn);
517 }
518 break;
519 }
520 }
521
522 if (cache == NULL)
523 return start;
524
525 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
526 {
527 /* Frame pointer is fp. Frame size is constant. */
528 cache->framereg = AARCH64_FP_REGNUM;
529 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
530 }
531 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
532 {
533 /* Try the stack pointer. */
534 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
535 cache->framereg = AARCH64_SP_REGNUM;
536 }
537 else
538 {
539 /* We're just out of luck. We don't know where the frame is. */
540 cache->framereg = -1;
541 cache->framesize = 0;
542 }
543
544 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
545 {
546 CORE_ADDR offset;
547
548 if (stack.find_reg (gdbarch, i, &offset))
549 cache->saved_regs[i].addr = offset;
550 }
551
552 for (i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
553 {
554 int regnum = gdbarch_num_regs (gdbarch);
555 CORE_ADDR offset;
556
557 if (stack.find_reg (gdbarch, i + AARCH64_X_REGISTER_COUNT,
558 &offset))
559 cache->saved_regs[i + regnum + AARCH64_D0_REGNUM].addr = offset;
560 }
561
562 return start;
563 }
564
565 static CORE_ADDR
566 aarch64_analyze_prologue (struct gdbarch *gdbarch,
567 CORE_ADDR start, CORE_ADDR limit,
568 struct aarch64_prologue_cache *cache)
569 {
570 instruction_reader reader;
571
572 return aarch64_analyze_prologue (gdbarch, start, limit, cache,
573 reader);
574 }
575
576 #if GDB_SELF_TEST
577
578 namespace selftests {
579
580 /* Instruction reader from manually cooked instruction sequences. */
581
582 class instruction_reader_test : public abstract_instruction_reader
583 {
584 public:
585 template<size_t SIZE>
586 explicit instruction_reader_test (const uint32_t (&insns)[SIZE])
587 : m_insns (insns), m_insns_size (SIZE)
588 {}
589
590 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
591 override
592 {
593 SELF_CHECK (len == 4);
594 SELF_CHECK (memaddr % 4 == 0);
595 SELF_CHECK (memaddr / 4 < m_insns_size);
596
597 return m_insns[memaddr / 4];
598 }
599
600 private:
601 const uint32_t *m_insns;
602 size_t m_insns_size;
603 };
604
605 static void
606 aarch64_analyze_prologue_test (void)
607 {
608 struct gdbarch_info info;
609
610 gdbarch_info_init (&info);
611 info.bfd_arch_info = bfd_scan_arch ("aarch64");
612
613 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
614 SELF_CHECK (gdbarch != NULL);
615
616 struct aarch64_prologue_cache cache;
617 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
618
619 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
620
621 /* Test the simple prologue in which frame pointer is used. */
622 {
623 static const uint32_t insns[] = {
624 0xa9af7bfd, /* stp x29, x30, [sp,#-272]! */
625 0x910003fd, /* mov x29, sp */
626 0x97ffffe6, /* bl 0x400580 */
627 };
628 instruction_reader_test reader (insns);
629
630 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
631 SELF_CHECK (end == 4 * 2);
632
633 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
634 SELF_CHECK (cache.framesize == 272);
635
636 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
637 {
638 if (i == AARCH64_FP_REGNUM)
639 SELF_CHECK (cache.saved_regs[i].addr == -272);
640 else if (i == AARCH64_LR_REGNUM)
641 SELF_CHECK (cache.saved_regs[i].addr == -264);
642 else
643 SELF_CHECK (cache.saved_regs[i].addr == -1);
644 }
645
646 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
647 {
648 int regnum = gdbarch_num_regs (gdbarch);
649
650 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
651 == -1);
652 }
653 }
654
655 /* Test a prologue in which STR is used and frame pointer is not
656 used. */
657 {
658 static const uint32_t insns[] = {
659 0xf81d0ff3, /* str x19, [sp, #-48]! */
660 0xb9002fe0, /* str w0, [sp, #44] */
661 0xf90013e1, /* str x1, [sp, #32]*/
662 0xfd000fe0, /* str d0, [sp, #24] */
663 0xaa0203f3, /* mov x19, x2 */
664 0xf94013e0, /* ldr x0, [sp, #32] */
665 };
666 instruction_reader_test reader (insns);
667
668 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
669 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
670
671 SELF_CHECK (end == 4 * 5);
672
673 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
674 SELF_CHECK (cache.framesize == 48);
675
676 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
677 {
678 if (i == 1)
679 SELF_CHECK (cache.saved_regs[i].addr == -16);
680 else if (i == 19)
681 SELF_CHECK (cache.saved_regs[i].addr == -48);
682 else
683 SELF_CHECK (cache.saved_regs[i].addr == -1);
684 }
685
686 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
687 {
688 int regnum = gdbarch_num_regs (gdbarch);
689
690 if (i == 0)
691 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
692 == -24);
693 else
694 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
695 == -1);
696 }
697 }
698
699 /* Test a prologue in which there is a return address signing instruction. */
700 if (tdep->has_pauth ())
701 {
702 static const uint32_t insns[] = {
703 0xd503233f, /* paciasp */
704 0xa9bd7bfd, /* stp x29, x30, [sp, #-48]! */
705 0x910003fd, /* mov x29, sp */
706 0xf801c3f3, /* str x19, [sp, #28] */
707 0xb9401fa0, /* ldr x19, [x29, #28] */
708 };
709 instruction_reader_test reader (insns);
710
711 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
712 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache,
713 reader);
714
715 SELF_CHECK (end == 4 * 4);
716 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
717 SELF_CHECK (cache.framesize == 48);
718
719 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
720 {
721 if (i == 19)
722 SELF_CHECK (cache.saved_regs[i].addr == -20);
723 else if (i == AARCH64_FP_REGNUM)
724 SELF_CHECK (cache.saved_regs[i].addr == -48);
725 else if (i == AARCH64_LR_REGNUM)
726 SELF_CHECK (cache.saved_regs[i].addr == -40);
727 else
728 SELF_CHECK (cache.saved_regs[i].addr == -1);
729 }
730
731 if (tdep->has_pauth ())
732 {
733 SELF_CHECK (trad_frame_value_p (cache.saved_regs,
734 tdep->pauth_ra_state_regnum));
735 SELF_CHECK (cache.saved_regs[tdep->pauth_ra_state_regnum].addr == 1);
736 }
737 }
738 }
739 } // namespace selftests
740 #endif /* GDB_SELF_TEST */
741
742 /* Implement the "skip_prologue" gdbarch method. */
743
744 static CORE_ADDR
745 aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
746 {
747 CORE_ADDR func_addr, limit_pc;
748
749 /* See if we can determine the end of the prologue via the symbol
750 table. If so, then return either PC, or the PC after the
751 prologue, whichever is greater. */
752 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
753 {
754 CORE_ADDR post_prologue_pc
755 = skip_prologue_using_sal (gdbarch, func_addr);
756
757 if (post_prologue_pc != 0)
758 return std::max (pc, post_prologue_pc);
759 }
760
761 /* Can't determine prologue from the symbol table, need to examine
762 instructions. */
763
764 /* Find an upper limit on the function prologue using the debug
765 information. If the debug information could not be used to
766 provide that bound, then use an arbitrary large number as the
767 upper bound. */
768 limit_pc = skip_prologue_using_sal (gdbarch, pc);
769 if (limit_pc == 0)
770 limit_pc = pc + 128; /* Magic. */
771
772 /* Try disassembling prologue. */
773 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
774 }
775
776 /* Scan the function prologue for THIS_FRAME and populate the prologue
777 cache CACHE. */
778
779 static void
780 aarch64_scan_prologue (struct frame_info *this_frame,
781 struct aarch64_prologue_cache *cache)
782 {
783 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
784 CORE_ADDR prologue_start;
785 CORE_ADDR prologue_end;
786 CORE_ADDR prev_pc = get_frame_pc (this_frame);
787 struct gdbarch *gdbarch = get_frame_arch (this_frame);
788
789 cache->prev_pc = prev_pc;
790
791 /* Assume we do not find a frame. */
792 cache->framereg = -1;
793 cache->framesize = 0;
794
795 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
796 &prologue_end))
797 {
798 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
799
800 if (sal.line == 0)
801 {
802 /* No line info so use the current PC. */
803 prologue_end = prev_pc;
804 }
805 else if (sal.end < prologue_end)
806 {
807 /* The next line begins after the function end. */
808 prologue_end = sal.end;
809 }
810
811 prologue_end = std::min (prologue_end, prev_pc);
812 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
813 }
814 else
815 {
816 CORE_ADDR frame_loc;
817
818 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
819 if (frame_loc == 0)
820 return;
821
822 cache->framereg = AARCH64_FP_REGNUM;
823 cache->framesize = 16;
824 cache->saved_regs[29].addr = 0;
825 cache->saved_regs[30].addr = 8;
826 }
827 }
828
829 /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
830 function may throw an exception if the inferior's registers or memory is
831 not available. */
832
833 static void
834 aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
835 struct aarch64_prologue_cache *cache)
836 {
837 CORE_ADDR unwound_fp;
838 int reg;
839
840 aarch64_scan_prologue (this_frame, cache);
841
842 if (cache->framereg == -1)
843 return;
844
845 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
846 if (unwound_fp == 0)
847 return;
848
849 cache->prev_sp = unwound_fp + cache->framesize;
850
851 /* Calculate actual addresses of saved registers using offsets
852 determined by aarch64_analyze_prologue. */
853 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
854 if (trad_frame_addr_p (cache->saved_regs, reg))
855 cache->saved_regs[reg].addr += cache->prev_sp;
856
857 cache->func = get_frame_func (this_frame);
858
859 cache->available_p = 1;
860 }
861
862 /* Allocate and fill in *THIS_CACHE with information about the prologue of
863 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
864 Return a pointer to the current aarch64_prologue_cache in
865 *THIS_CACHE. */
866
867 static struct aarch64_prologue_cache *
868 aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
869 {
870 struct aarch64_prologue_cache *cache;
871
872 if (*this_cache != NULL)
873 return (struct aarch64_prologue_cache *) *this_cache;
874
875 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
876 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
877 *this_cache = cache;
878
879 try
880 {
881 aarch64_make_prologue_cache_1 (this_frame, cache);
882 }
883 catch (const gdb_exception_error &ex)
884 {
885 if (ex.error != NOT_AVAILABLE_ERROR)
886 throw;
887 }
888
889 return cache;
890 }
891
892 /* Implement the "stop_reason" frame_unwind method. */
893
894 static enum unwind_stop_reason
895 aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
896 void **this_cache)
897 {
898 struct aarch64_prologue_cache *cache
899 = aarch64_make_prologue_cache (this_frame, this_cache);
900
901 if (!cache->available_p)
902 return UNWIND_UNAVAILABLE;
903
904 /* Halt the backtrace at "_start". */
905 if (cache->prev_pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
906 return UNWIND_OUTERMOST;
907
908 /* We've hit a wall, stop. */
909 if (cache->prev_sp == 0)
910 return UNWIND_OUTERMOST;
911
912 return UNWIND_NO_REASON;
913 }
914
915 /* Our frame ID for a normal frame is the current function's starting
916 PC and the caller's SP when we were called. */
917
918 static void
919 aarch64_prologue_this_id (struct frame_info *this_frame,
920 void **this_cache, struct frame_id *this_id)
921 {
922 struct aarch64_prologue_cache *cache
923 = aarch64_make_prologue_cache (this_frame, this_cache);
924
925 if (!cache->available_p)
926 *this_id = frame_id_build_unavailable_stack (cache->func);
927 else
928 *this_id = frame_id_build (cache->prev_sp, cache->func);
929 }
930
931 /* Implement the "prev_register" frame_unwind method. */
932
933 static struct value *
934 aarch64_prologue_prev_register (struct frame_info *this_frame,
935 void **this_cache, int prev_regnum)
936 {
937 struct aarch64_prologue_cache *cache
938 = aarch64_make_prologue_cache (this_frame, this_cache);
939
940 /* If we are asked to unwind the PC, then we need to return the LR
941 instead. The prologue may save PC, but it will point into this
942 frame's prologue, not the next frame's resume location. */
943 if (prev_regnum == AARCH64_PC_REGNUM)
944 {
945 CORE_ADDR lr;
946 struct gdbarch *gdbarch = get_frame_arch (this_frame);
947 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
948
949 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
950
951 if (tdep->has_pauth ()
952 && trad_frame_value_p (cache->saved_regs,
953 tdep->pauth_ra_state_regnum))
954 lr = aarch64_frame_unmask_address (tdep, this_frame, lr);
955
956 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
957 }
958
959 /* SP is generally not saved to the stack, but this frame is
960 identified by the next frame's stack pointer at the time of the
961 call. The value was already reconstructed into PREV_SP. */
962 /*
963 +----------+ ^
964 | saved lr | |
965 +->| saved fp |--+
966 | | |
967 | | | <- Previous SP
968 | +----------+
969 | | saved lr |
970 +--| saved fp |<- FP
971 | |
972 | |<- SP
973 +----------+ */
974 if (prev_regnum == AARCH64_SP_REGNUM)
975 return frame_unwind_got_constant (this_frame, prev_regnum,
976 cache->prev_sp);
977
978 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
979 prev_regnum);
980 }
981
982 /* AArch64 prologue unwinder. */
983 struct frame_unwind aarch64_prologue_unwind =
984 {
985 NORMAL_FRAME,
986 aarch64_prologue_frame_unwind_stop_reason,
987 aarch64_prologue_this_id,
988 aarch64_prologue_prev_register,
989 NULL,
990 default_frame_sniffer
991 };
992
993 /* Allocate and fill in *THIS_CACHE with information about the prologue of
994 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
995 Return a pointer to the current aarch64_prologue_cache in
996 *THIS_CACHE. */
997
998 static struct aarch64_prologue_cache *
999 aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
1000 {
1001 struct aarch64_prologue_cache *cache;
1002
1003 if (*this_cache != NULL)
1004 return (struct aarch64_prologue_cache *) *this_cache;
1005
1006 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
1007 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
1008 *this_cache = cache;
1009
1010 try
1011 {
1012 cache->prev_sp = get_frame_register_unsigned (this_frame,
1013 AARCH64_SP_REGNUM);
1014 cache->prev_pc = get_frame_pc (this_frame);
1015 cache->available_p = 1;
1016 }
1017 catch (const gdb_exception_error &ex)
1018 {
1019 if (ex.error != NOT_AVAILABLE_ERROR)
1020 throw;
1021 }
1022
1023 return cache;
1024 }
1025
1026 /* Implement the "stop_reason" frame_unwind method. */
1027
1028 static enum unwind_stop_reason
1029 aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
1030 void **this_cache)
1031 {
1032 struct aarch64_prologue_cache *cache
1033 = aarch64_make_stub_cache (this_frame, this_cache);
1034
1035 if (!cache->available_p)
1036 return UNWIND_UNAVAILABLE;
1037
1038 return UNWIND_NO_REASON;
1039 }
1040
1041 /* Our frame ID for a stub frame is the current SP and LR. */
1042
1043 static void
1044 aarch64_stub_this_id (struct frame_info *this_frame,
1045 void **this_cache, struct frame_id *this_id)
1046 {
1047 struct aarch64_prologue_cache *cache
1048 = aarch64_make_stub_cache (this_frame, this_cache);
1049
1050 if (cache->available_p)
1051 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
1052 else
1053 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
1054 }
1055
1056 /* Implement the "sniffer" frame_unwind method. */
1057
1058 static int
1059 aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
1060 struct frame_info *this_frame,
1061 void **this_prologue_cache)
1062 {
1063 CORE_ADDR addr_in_block;
1064 gdb_byte dummy[4];
1065
1066 addr_in_block = get_frame_address_in_block (this_frame);
1067 if (in_plt_section (addr_in_block)
1068 /* We also use the stub winder if the target memory is unreadable
1069 to avoid having the prologue unwinder trying to read it. */
1070 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
1071 return 1;
1072
1073 return 0;
1074 }
1075
1076 /* AArch64 stub unwinder. */
1077 struct frame_unwind aarch64_stub_unwind =
1078 {
1079 NORMAL_FRAME,
1080 aarch64_stub_frame_unwind_stop_reason,
1081 aarch64_stub_this_id,
1082 aarch64_prologue_prev_register,
1083 NULL,
1084 aarch64_stub_unwind_sniffer
1085 };
1086
1087 /* Return the frame base address of *THIS_FRAME. */
1088
1089 static CORE_ADDR
1090 aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
1091 {
1092 struct aarch64_prologue_cache *cache
1093 = aarch64_make_prologue_cache (this_frame, this_cache);
1094
1095 return cache->prev_sp - cache->framesize;
1096 }
1097
1098 /* AArch64 default frame base information. */
1099 struct frame_base aarch64_normal_base =
1100 {
1101 &aarch64_prologue_unwind,
1102 aarch64_normal_frame_base,
1103 aarch64_normal_frame_base,
1104 aarch64_normal_frame_base
1105 };
1106
1107 /* Return the value of the REGNUM register in the previous frame of
1108 *THIS_FRAME. */
1109
1110 static struct value *
1111 aarch64_dwarf2_prev_register (struct frame_info *this_frame,
1112 void **this_cache, int regnum)
1113 {
1114 struct gdbarch_tdep *tdep = gdbarch_tdep (get_frame_arch (this_frame));
1115 CORE_ADDR lr;
1116
1117 switch (regnum)
1118 {
1119 case AARCH64_PC_REGNUM:
1120 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1121 lr = aarch64_frame_unmask_address (tdep, this_frame, lr);
1122 return frame_unwind_got_constant (this_frame, regnum, lr);
1123
1124 default:
1125 internal_error (__FILE__, __LINE__,
1126 _("Unexpected register %d"), regnum);
1127 }
1128 }
1129
1130 static const unsigned char op_lit0 = DW_OP_lit0;
1131 static const unsigned char op_lit1 = DW_OP_lit1;
1132
1133 /* Implement the "init_reg" dwarf2_frame_ops method. */
1134
1135 static void
1136 aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1137 struct dwarf2_frame_state_reg *reg,
1138 struct frame_info *this_frame)
1139 {
1140 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1141
1142 switch (regnum)
1143 {
1144 case AARCH64_PC_REGNUM:
1145 reg->how = DWARF2_FRAME_REG_FN;
1146 reg->loc.fn = aarch64_dwarf2_prev_register;
1147 return;
1148
1149 case AARCH64_SP_REGNUM:
1150 reg->how = DWARF2_FRAME_REG_CFA;
1151 return;
1152 }
1153
1154 /* Init pauth registers. */
1155 if (tdep->has_pauth ())
1156 {
1157 if (regnum == tdep->pauth_ra_state_regnum)
1158 {
1159 /* Initialize RA_STATE to zero. */
1160 reg->how = DWARF2_FRAME_REG_SAVED_VAL_EXP;
1161 reg->loc.exp.start = &op_lit0;
1162 reg->loc.exp.len = 1;
1163 return;
1164 }
1165 else if (regnum == AARCH64_PAUTH_DMASK_REGNUM (tdep->pauth_reg_base)
1166 || regnum == AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base))
1167 {
1168 reg->how = DWARF2_FRAME_REG_SAME_VALUE;
1169 return;
1170 }
1171 }
1172 }
1173
1174 /* Implement the execute_dwarf_cfa_vendor_op method. */
1175
1176 static bool
1177 aarch64_execute_dwarf_cfa_vendor_op (struct gdbarch *gdbarch, gdb_byte op,
1178 struct dwarf2_frame_state *fs)
1179 {
1180 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1181 struct dwarf2_frame_state_reg *ra_state;
1182
1183 if (op == DW_CFA_AARCH64_negate_ra_state)
1184 {
1185 /* On systems without pauth, treat as a nop. */
1186 if (!tdep->has_pauth ())
1187 return true;
1188
1189 /* Allocate RA_STATE column if it's not allocated yet. */
1190 fs->regs.alloc_regs (AARCH64_DWARF_PAUTH_RA_STATE + 1);
1191
1192 /* Toggle the status of RA_STATE between 0 and 1. */
1193 ra_state = &(fs->regs.reg[AARCH64_DWARF_PAUTH_RA_STATE]);
1194 ra_state->how = DWARF2_FRAME_REG_SAVED_VAL_EXP;
1195
1196 if (ra_state->loc.exp.start == nullptr
1197 || ra_state->loc.exp.start == &op_lit0)
1198 ra_state->loc.exp.start = &op_lit1;
1199 else
1200 ra_state->loc.exp.start = &op_lit0;
1201
1202 ra_state->loc.exp.len = 1;
1203
1204 return true;
1205 }
1206
1207 return false;
1208 }
1209
1210 /* When arguments must be pushed onto the stack, they go on in reverse
1211 order. The code below implements a FILO (stack) to do this. */
1212
1213 struct stack_item_t
1214 {
1215 /* Value to pass on stack. It can be NULL if this item is for stack
1216 padding. */
1217 const gdb_byte *data;
1218
1219 /* Size in bytes of value to pass on stack. */
1220 int len;
1221 };
1222
1223 /* Implement the gdbarch type alignment method, overrides the generic
1224 alignment algorithm for anything that is aarch64 specific. */
1225
1226 static ULONGEST
1227 aarch64_type_align (gdbarch *gdbarch, struct type *t)
1228 {
1229 t = check_typedef (t);
1230 if (TYPE_CODE (t) == TYPE_CODE_ARRAY && TYPE_VECTOR (t))
1231 {
1232 /* Use the natural alignment for vector types (the same for
1233 scalar type), but the maximum alignment is 128-bit. */
1234 if (TYPE_LENGTH (t) > 16)
1235 return 16;
1236 else
1237 return TYPE_LENGTH (t);
1238 }
1239
1240 /* Allow the common code to calculate the alignment. */
1241 return 0;
1242 }
1243
1244 /* Worker function for aapcs_is_vfp_call_or_return_candidate.
1245
1246 Return the number of register required, or -1 on failure.
1247
1248 When encountering a base element, if FUNDAMENTAL_TYPE is not set then set it
1249 to the element, else fail if the type of this element does not match the
1250 existing value. */
1251
1252 static int
1253 aapcs_is_vfp_call_or_return_candidate_1 (struct type *type,
1254 struct type **fundamental_type)
1255 {
1256 if (type == nullptr)
1257 return -1;
1258
1259 switch (TYPE_CODE (type))
1260 {
1261 case TYPE_CODE_FLT:
1262 if (TYPE_LENGTH (type) > 16)
1263 return -1;
1264
1265 if (*fundamental_type == nullptr)
1266 *fundamental_type = type;
1267 else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
1268 || TYPE_CODE (type) != TYPE_CODE (*fundamental_type))
1269 return -1;
1270
1271 return 1;
1272
1273 case TYPE_CODE_COMPLEX:
1274 {
1275 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
1276 if (TYPE_LENGTH (target_type) > 16)
1277 return -1;
1278
1279 if (*fundamental_type == nullptr)
1280 *fundamental_type = target_type;
1281 else if (TYPE_LENGTH (target_type) != TYPE_LENGTH (*fundamental_type)
1282 || TYPE_CODE (target_type) != TYPE_CODE (*fundamental_type))
1283 return -1;
1284
1285 return 2;
1286 }
1287
1288 case TYPE_CODE_ARRAY:
1289 {
1290 if (TYPE_VECTOR (type))
1291 {
1292 if (TYPE_LENGTH (type) != 8 && TYPE_LENGTH (type) != 16)
1293 return -1;
1294
1295 if (*fundamental_type == nullptr)
1296 *fundamental_type = type;
1297 else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
1298 || TYPE_CODE (type) != TYPE_CODE (*fundamental_type))
1299 return -1;
1300
1301 return 1;
1302 }
1303 else
1304 {
1305 struct type *target_type = TYPE_TARGET_TYPE (type);
1306 int count = aapcs_is_vfp_call_or_return_candidate_1
1307 (target_type, fundamental_type);
1308
1309 if (count == -1)
1310 return count;
1311
1312 count *= (TYPE_LENGTH (type) / TYPE_LENGTH (target_type));
1313 return count;
1314 }
1315 }
1316
1317 case TYPE_CODE_STRUCT:
1318 case TYPE_CODE_UNION:
1319 {
1320 int count = 0;
1321
1322 for (int i = 0; i < TYPE_NFIELDS (type); i++)
1323 {
1324 /* Ignore any static fields. */
1325 if (field_is_static (&TYPE_FIELD (type, i)))
1326 continue;
1327
1328 struct type *member = check_typedef (TYPE_FIELD_TYPE (type, i));
1329
1330 int sub_count = aapcs_is_vfp_call_or_return_candidate_1
1331 (member, fundamental_type);
1332 if (sub_count == -1)
1333 return -1;
1334 count += sub_count;
1335 }
1336
1337 /* Ensure there is no padding between the fields (allowing for empty
1338 zero length structs) */
1339 int ftype_length = (*fundamental_type == nullptr)
1340 ? 0 : TYPE_LENGTH (*fundamental_type);
1341 if (count * ftype_length != TYPE_LENGTH (type))
1342 return -1;
1343
1344 return count;
1345 }
1346
1347 default:
1348 break;
1349 }
1350
1351 return -1;
1352 }
1353
1354 /* Return true if an argument, whose type is described by TYPE, can be passed or
1355 returned in simd/fp registers, providing enough parameter passing registers
1356 are available. This is as described in the AAPCS64.
1357
1358 Upon successful return, *COUNT returns the number of needed registers,
1359 *FUNDAMENTAL_TYPE contains the type of those registers.
1360
1361 Candidate as per the AAPCS64 5.4.2.C is either a:
1362 - float.
1363 - short-vector.
1364 - HFA (Homogeneous Floating-point Aggregate, 4.3.5.1). A Composite type where
1365 all the members are floats and has at most 4 members.
1366 - HVA (Homogeneous Short-vector Aggregate, 4.3.5.2). A Composite type where
1367 all the members are short vectors and has at most 4 members.
1368 - Complex (7.1.1)
1369
1370 Note that HFAs and HVAs can include nested structures and arrays. */
1371
1372 static bool
1373 aapcs_is_vfp_call_or_return_candidate (struct type *type, int *count,
1374 struct type **fundamental_type)
1375 {
1376 if (type == nullptr)
1377 return false;
1378
1379 *fundamental_type = nullptr;
1380
1381 int ag_count = aapcs_is_vfp_call_or_return_candidate_1 (type,
1382 fundamental_type);
1383
1384 if (ag_count > 0 && ag_count <= HA_MAX_NUM_FLDS)
1385 {
1386 *count = ag_count;
1387 return true;
1388 }
1389 else
1390 return false;
1391 }
1392
1393 /* AArch64 function call information structure. */
1394 struct aarch64_call_info
1395 {
1396 /* the current argument number. */
1397 unsigned argnum = 0;
1398
1399 /* The next general purpose register number, equivalent to NGRN as
1400 described in the AArch64 Procedure Call Standard. */
1401 unsigned ngrn = 0;
1402
1403 /* The next SIMD and floating point register number, equivalent to
1404 NSRN as described in the AArch64 Procedure Call Standard. */
1405 unsigned nsrn = 0;
1406
1407 /* The next stacked argument address, equivalent to NSAA as
1408 described in the AArch64 Procedure Call Standard. */
1409 unsigned nsaa = 0;
1410
1411 /* Stack item vector. */
1412 std::vector<stack_item_t> si;
1413 };
1414
1415 /* Pass a value in a sequence of consecutive X registers. The caller
1416 is responsbile for ensuring sufficient registers are available. */
1417
1418 static void
1419 pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1420 struct aarch64_call_info *info, struct type *type,
1421 struct value *arg)
1422 {
1423 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1424 int len = TYPE_LENGTH (type);
1425 enum type_code typecode = TYPE_CODE (type);
1426 int regnum = AARCH64_X0_REGNUM + info->ngrn;
1427 const bfd_byte *buf = value_contents (arg);
1428
1429 info->argnum++;
1430
1431 while (len > 0)
1432 {
1433 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1434 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1435 byte_order);
1436
1437
1438 /* Adjust sub-word struct/union args when big-endian. */
1439 if (byte_order == BFD_ENDIAN_BIG
1440 && partial_len < X_REGISTER_SIZE
1441 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1442 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1443
1444 if (aarch64_debug)
1445 {
1446 debug_printf ("arg %d in %s = 0x%s\n", info->argnum,
1447 gdbarch_register_name (gdbarch, regnum),
1448 phex (regval, X_REGISTER_SIZE));
1449 }
1450 regcache_cooked_write_unsigned (regcache, regnum, regval);
1451 len -= partial_len;
1452 buf += partial_len;
1453 regnum++;
1454 }
1455 }
1456
1457 /* Attempt to marshall a value in a V register. Return 1 if
1458 successful, or 0 if insufficient registers are available. This
1459 function, unlike the equivalent pass_in_x() function does not
1460 handle arguments spread across multiple registers. */
1461
1462 static int
1463 pass_in_v (struct gdbarch *gdbarch,
1464 struct regcache *regcache,
1465 struct aarch64_call_info *info,
1466 int len, const bfd_byte *buf)
1467 {
1468 if (info->nsrn < 8)
1469 {
1470 int regnum = AARCH64_V0_REGNUM + info->nsrn;
1471 /* Enough space for a full vector register. */
1472 gdb_byte reg[register_size (gdbarch, regnum)];
1473 gdb_assert (len <= sizeof (reg));
1474
1475 info->argnum++;
1476 info->nsrn++;
1477
1478 memset (reg, 0, sizeof (reg));
1479 /* PCS C.1, the argument is allocated to the least significant
1480 bits of V register. */
1481 memcpy (reg, buf, len);
1482 regcache->cooked_write (regnum, reg);
1483
1484 if (aarch64_debug)
1485 {
1486 debug_printf ("arg %d in %s\n", info->argnum,
1487 gdbarch_register_name (gdbarch, regnum));
1488 }
1489 return 1;
1490 }
1491 info->nsrn = 8;
1492 return 0;
1493 }
1494
1495 /* Marshall an argument onto the stack. */
1496
1497 static void
1498 pass_on_stack (struct aarch64_call_info *info, struct type *type,
1499 struct value *arg)
1500 {
1501 const bfd_byte *buf = value_contents (arg);
1502 int len = TYPE_LENGTH (type);
1503 int align;
1504 stack_item_t item;
1505
1506 info->argnum++;
1507
1508 align = type_align (type);
1509
1510 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1511 Natural alignment of the argument's type. */
1512 align = align_up (align, 8);
1513
1514 /* The AArch64 PCS requires at most doubleword alignment. */
1515 if (align > 16)
1516 align = 16;
1517
1518 if (aarch64_debug)
1519 {
1520 debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1521 info->nsaa);
1522 }
1523
1524 item.len = len;
1525 item.data = buf;
1526 info->si.push_back (item);
1527
1528 info->nsaa += len;
1529 if (info->nsaa & (align - 1))
1530 {
1531 /* Push stack alignment padding. */
1532 int pad = align - (info->nsaa & (align - 1));
1533
1534 item.len = pad;
1535 item.data = NULL;
1536
1537 info->si.push_back (item);
1538 info->nsaa += pad;
1539 }
1540 }
1541
1542 /* Marshall an argument into a sequence of one or more consecutive X
1543 registers or, if insufficient X registers are available then onto
1544 the stack. */
1545
1546 static void
1547 pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1548 struct aarch64_call_info *info, struct type *type,
1549 struct value *arg)
1550 {
1551 int len = TYPE_LENGTH (type);
1552 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1553
1554 /* PCS C.13 - Pass in registers if we have enough spare */
1555 if (info->ngrn + nregs <= 8)
1556 {
1557 pass_in_x (gdbarch, regcache, info, type, arg);
1558 info->ngrn += nregs;
1559 }
1560 else
1561 {
1562 info->ngrn = 8;
1563 pass_on_stack (info, type, arg);
1564 }
1565 }
1566
1567 /* Pass a value, which is of type arg_type, in a V register. Assumes value is a
1568 aapcs_is_vfp_call_or_return_candidate and there are enough spare V
1569 registers. A return value of false is an error state as the value will have
1570 been partially passed to the stack. */
1571 static bool
1572 pass_in_v_vfp_candidate (struct gdbarch *gdbarch, struct regcache *regcache,
1573 struct aarch64_call_info *info, struct type *arg_type,
1574 struct value *arg)
1575 {
1576 switch (TYPE_CODE (arg_type))
1577 {
1578 case TYPE_CODE_FLT:
1579 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type),
1580 value_contents (arg));
1581 break;
1582
1583 case TYPE_CODE_COMPLEX:
1584 {
1585 const bfd_byte *buf = value_contents (arg);
1586 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (arg_type));
1587
1588 if (!pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type),
1589 buf))
1590 return false;
1591
1592 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type),
1593 buf + TYPE_LENGTH (target_type));
1594 }
1595
1596 case TYPE_CODE_ARRAY:
1597 if (TYPE_VECTOR (arg_type))
1598 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type),
1599 value_contents (arg));
1600 /* fall through. */
1601
1602 case TYPE_CODE_STRUCT:
1603 case TYPE_CODE_UNION:
1604 for (int i = 0; i < TYPE_NFIELDS (arg_type); i++)
1605 {
1606 /* Don't include static fields. */
1607 if (field_is_static (&TYPE_FIELD (arg_type, i)))
1608 continue;
1609
1610 struct value *field = value_primitive_field (arg, 0, i, arg_type);
1611 struct type *field_type = check_typedef (value_type (field));
1612
1613 if (!pass_in_v_vfp_candidate (gdbarch, regcache, info, field_type,
1614 field))
1615 return false;
1616 }
1617 return true;
1618
1619 default:
1620 return false;
1621 }
1622 }
1623
1624 /* Implement the "push_dummy_call" gdbarch method. */
1625
1626 static CORE_ADDR
1627 aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1628 struct regcache *regcache, CORE_ADDR bp_addr,
1629 int nargs,
1630 struct value **args, CORE_ADDR sp,
1631 function_call_return_method return_method,
1632 CORE_ADDR struct_addr)
1633 {
1634 int argnum;
1635 struct aarch64_call_info info;
1636
1637 /* We need to know what the type of the called function is in order
1638 to determine the number of named/anonymous arguments for the
1639 actual argument placement, and the return type in order to handle
1640 return value correctly.
1641
1642 The generic code above us views the decision of return in memory
1643 or return in registers as a two stage processes. The language
1644 handler is consulted first and may decide to return in memory (eg
1645 class with copy constructor returned by value), this will cause
1646 the generic code to allocate space AND insert an initial leading
1647 argument.
1648
1649 If the language code does not decide to pass in memory then the
1650 target code is consulted.
1651
1652 If the language code decides to pass in memory we want to move
1653 the pointer inserted as the initial argument from the argument
1654 list and into X8, the conventional AArch64 struct return pointer
1655 register. */
1656
1657 /* Set the return address. For the AArch64, the return breakpoint
1658 is always at BP_ADDR. */
1659 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1660
1661 /* If we were given an initial argument for the return slot, lose it. */
1662 if (return_method == return_method_hidden_param)
1663 {
1664 args++;
1665 nargs--;
1666 }
1667
1668 /* The struct_return pointer occupies X8. */
1669 if (return_method != return_method_normal)
1670 {
1671 if (aarch64_debug)
1672 {
1673 debug_printf ("struct return in %s = 0x%s\n",
1674 gdbarch_register_name (gdbarch,
1675 AARCH64_STRUCT_RETURN_REGNUM),
1676 paddress (gdbarch, struct_addr));
1677 }
1678 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1679 struct_addr);
1680 }
1681
1682 for (argnum = 0; argnum < nargs; argnum++)
1683 {
1684 struct value *arg = args[argnum];
1685 struct type *arg_type, *fundamental_type;
1686 int len, elements;
1687
1688 arg_type = check_typedef (value_type (arg));
1689 len = TYPE_LENGTH (arg_type);
1690
1691 /* If arg can be passed in v registers as per the AAPCS64, then do so if
1692 if there are enough spare registers. */
1693 if (aapcs_is_vfp_call_or_return_candidate (arg_type, &elements,
1694 &fundamental_type))
1695 {
1696 if (info.nsrn + elements <= 8)
1697 {
1698 /* We know that we have sufficient registers available therefore
1699 this will never need to fallback to the stack. */
1700 if (!pass_in_v_vfp_candidate (gdbarch, regcache, &info, arg_type,
1701 arg))
1702 gdb_assert_not_reached ("Failed to push args");
1703 }
1704 else
1705 {
1706 info.nsrn = 8;
1707 pass_on_stack (&info, arg_type, arg);
1708 }
1709 continue;
1710 }
1711
1712 switch (TYPE_CODE (arg_type))
1713 {
1714 case TYPE_CODE_INT:
1715 case TYPE_CODE_BOOL:
1716 case TYPE_CODE_CHAR:
1717 case TYPE_CODE_RANGE:
1718 case TYPE_CODE_ENUM:
1719 if (len < 4)
1720 {
1721 /* Promote to 32 bit integer. */
1722 if (TYPE_UNSIGNED (arg_type))
1723 arg_type = builtin_type (gdbarch)->builtin_uint32;
1724 else
1725 arg_type = builtin_type (gdbarch)->builtin_int32;
1726 arg = value_cast (arg_type, arg);
1727 }
1728 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1729 break;
1730
1731 case TYPE_CODE_STRUCT:
1732 case TYPE_CODE_ARRAY:
1733 case TYPE_CODE_UNION:
1734 if (len > 16)
1735 {
1736 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1737 invisible reference. */
1738
1739 /* Allocate aligned storage. */
1740 sp = align_down (sp - len, 16);
1741
1742 /* Write the real data into the stack. */
1743 write_memory (sp, value_contents (arg), len);
1744
1745 /* Construct the indirection. */
1746 arg_type = lookup_pointer_type (arg_type);
1747 arg = value_from_pointer (arg_type, sp);
1748 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1749 }
1750 else
1751 /* PCS C.15 / C.18 multiple values pass. */
1752 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1753 break;
1754
1755 default:
1756 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1757 break;
1758 }
1759 }
1760
1761 /* Make sure stack retains 16 byte alignment. */
1762 if (info.nsaa & 15)
1763 sp -= 16 - (info.nsaa & 15);
1764
1765 while (!info.si.empty ())
1766 {
1767 const stack_item_t &si = info.si.back ();
1768
1769 sp -= si.len;
1770 if (si.data != NULL)
1771 write_memory (sp, si.data, si.len);
1772 info.si.pop_back ();
1773 }
1774
1775 /* Finally, update the SP register. */
1776 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1777
1778 return sp;
1779 }
1780
1781 /* Implement the "frame_align" gdbarch method. */
1782
1783 static CORE_ADDR
1784 aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1785 {
1786 /* Align the stack to sixteen bytes. */
1787 return sp & ~(CORE_ADDR) 15;
1788 }
1789
1790 /* Return the type for an AdvSISD Q register. */
1791
1792 static struct type *
1793 aarch64_vnq_type (struct gdbarch *gdbarch)
1794 {
1795 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1796
1797 if (tdep->vnq_type == NULL)
1798 {
1799 struct type *t;
1800 struct type *elem;
1801
1802 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1803 TYPE_CODE_UNION);
1804
1805 elem = builtin_type (gdbarch)->builtin_uint128;
1806 append_composite_type_field (t, "u", elem);
1807
1808 elem = builtin_type (gdbarch)->builtin_int128;
1809 append_composite_type_field (t, "s", elem);
1810
1811 tdep->vnq_type = t;
1812 }
1813
1814 return tdep->vnq_type;
1815 }
1816
1817 /* Return the type for an AdvSISD D register. */
1818
1819 static struct type *
1820 aarch64_vnd_type (struct gdbarch *gdbarch)
1821 {
1822 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1823
1824 if (tdep->vnd_type == NULL)
1825 {
1826 struct type *t;
1827 struct type *elem;
1828
1829 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1830 TYPE_CODE_UNION);
1831
1832 elem = builtin_type (gdbarch)->builtin_double;
1833 append_composite_type_field (t, "f", elem);
1834
1835 elem = builtin_type (gdbarch)->builtin_uint64;
1836 append_composite_type_field (t, "u", elem);
1837
1838 elem = builtin_type (gdbarch)->builtin_int64;
1839 append_composite_type_field (t, "s", elem);
1840
1841 tdep->vnd_type = t;
1842 }
1843
1844 return tdep->vnd_type;
1845 }
1846
1847 /* Return the type for an AdvSISD S register. */
1848
1849 static struct type *
1850 aarch64_vns_type (struct gdbarch *gdbarch)
1851 {
1852 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1853
1854 if (tdep->vns_type == NULL)
1855 {
1856 struct type *t;
1857 struct type *elem;
1858
1859 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1860 TYPE_CODE_UNION);
1861
1862 elem = builtin_type (gdbarch)->builtin_float;
1863 append_composite_type_field (t, "f", elem);
1864
1865 elem = builtin_type (gdbarch)->builtin_uint32;
1866 append_composite_type_field (t, "u", elem);
1867
1868 elem = builtin_type (gdbarch)->builtin_int32;
1869 append_composite_type_field (t, "s", elem);
1870
1871 tdep->vns_type = t;
1872 }
1873
1874 return tdep->vns_type;
1875 }
1876
1877 /* Return the type for an AdvSISD H register. */
1878
1879 static struct type *
1880 aarch64_vnh_type (struct gdbarch *gdbarch)
1881 {
1882 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1883
1884 if (tdep->vnh_type == NULL)
1885 {
1886 struct type *t;
1887 struct type *elem;
1888
1889 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1890 TYPE_CODE_UNION);
1891
1892 elem = builtin_type (gdbarch)->builtin_half;
1893 append_composite_type_field (t, "f", elem);
1894
1895 elem = builtin_type (gdbarch)->builtin_uint16;
1896 append_composite_type_field (t, "u", elem);
1897
1898 elem = builtin_type (gdbarch)->builtin_int16;
1899 append_composite_type_field (t, "s", elem);
1900
1901 tdep->vnh_type = t;
1902 }
1903
1904 return tdep->vnh_type;
1905 }
1906
1907 /* Return the type for an AdvSISD B register. */
1908
1909 static struct type *
1910 aarch64_vnb_type (struct gdbarch *gdbarch)
1911 {
1912 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1913
1914 if (tdep->vnb_type == NULL)
1915 {
1916 struct type *t;
1917 struct type *elem;
1918
1919 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1920 TYPE_CODE_UNION);
1921
1922 elem = builtin_type (gdbarch)->builtin_uint8;
1923 append_composite_type_field (t, "u", elem);
1924
1925 elem = builtin_type (gdbarch)->builtin_int8;
1926 append_composite_type_field (t, "s", elem);
1927
1928 tdep->vnb_type = t;
1929 }
1930
1931 return tdep->vnb_type;
1932 }
1933
1934 /* Return the type for an AdvSISD V register. */
1935
1936 static struct type *
1937 aarch64_vnv_type (struct gdbarch *gdbarch)
1938 {
1939 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1940
1941 if (tdep->vnv_type == NULL)
1942 {
1943 /* The other AArch64 psuedo registers (Q,D,H,S,B) refer to a single value
1944 slice from the non-pseudo vector registers. However NEON V registers
1945 are always vector registers, and need constructing as such. */
1946 const struct builtin_type *bt = builtin_type (gdbarch);
1947
1948 struct type *t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnv",
1949 TYPE_CODE_UNION);
1950
1951 struct type *sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1952 TYPE_CODE_UNION);
1953 append_composite_type_field (sub, "f",
1954 init_vector_type (bt->builtin_double, 2));
1955 append_composite_type_field (sub, "u",
1956 init_vector_type (bt->builtin_uint64, 2));
1957 append_composite_type_field (sub, "s",
1958 init_vector_type (bt->builtin_int64, 2));
1959 append_composite_type_field (t, "d", sub);
1960
1961 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1962 TYPE_CODE_UNION);
1963 append_composite_type_field (sub, "f",
1964 init_vector_type (bt->builtin_float, 4));
1965 append_composite_type_field (sub, "u",
1966 init_vector_type (bt->builtin_uint32, 4));
1967 append_composite_type_field (sub, "s",
1968 init_vector_type (bt->builtin_int32, 4));
1969 append_composite_type_field (t, "s", sub);
1970
1971 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1972 TYPE_CODE_UNION);
1973 append_composite_type_field (sub, "f",
1974 init_vector_type (bt->builtin_half, 8));
1975 append_composite_type_field (sub, "u",
1976 init_vector_type (bt->builtin_uint16, 8));
1977 append_composite_type_field (sub, "s",
1978 init_vector_type (bt->builtin_int16, 8));
1979 append_composite_type_field (t, "h", sub);
1980
1981 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1982 TYPE_CODE_UNION);
1983 append_composite_type_field (sub, "u",
1984 init_vector_type (bt->builtin_uint8, 16));
1985 append_composite_type_field (sub, "s",
1986 init_vector_type (bt->builtin_int8, 16));
1987 append_composite_type_field (t, "b", sub);
1988
1989 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1990 TYPE_CODE_UNION);
1991 append_composite_type_field (sub, "u",
1992 init_vector_type (bt->builtin_uint128, 1));
1993 append_composite_type_field (sub, "s",
1994 init_vector_type (bt->builtin_int128, 1));
1995 append_composite_type_field (t, "q", sub);
1996
1997 tdep->vnv_type = t;
1998 }
1999
2000 return tdep->vnv_type;
2001 }
2002
2003 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
2004
2005 static int
2006 aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
2007 {
2008 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2009
2010 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
2011 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
2012
2013 if (reg == AARCH64_DWARF_SP)
2014 return AARCH64_SP_REGNUM;
2015
2016 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
2017 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
2018
2019 if (reg == AARCH64_DWARF_SVE_VG)
2020 return AARCH64_SVE_VG_REGNUM;
2021
2022 if (reg == AARCH64_DWARF_SVE_FFR)
2023 return AARCH64_SVE_FFR_REGNUM;
2024
2025 if (reg >= AARCH64_DWARF_SVE_P0 && reg <= AARCH64_DWARF_SVE_P0 + 15)
2026 return AARCH64_SVE_P0_REGNUM + reg - AARCH64_DWARF_SVE_P0;
2027
2028 if (reg >= AARCH64_DWARF_SVE_Z0 && reg <= AARCH64_DWARF_SVE_Z0 + 15)
2029 return AARCH64_SVE_Z0_REGNUM + reg - AARCH64_DWARF_SVE_Z0;
2030
2031 if (tdep->has_pauth ())
2032 {
2033 if (reg >= AARCH64_DWARF_PAUTH_DMASK && reg <= AARCH64_DWARF_PAUTH_CMASK)
2034 return tdep->pauth_reg_base + reg - AARCH64_DWARF_PAUTH_DMASK;
2035
2036 if (reg == AARCH64_DWARF_PAUTH_RA_STATE)
2037 return tdep->pauth_ra_state_regnum;
2038 }
2039
2040 return -1;
2041 }
2042
2043 /* Implement the "print_insn" gdbarch method. */
2044
2045 static int
2046 aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
2047 {
2048 info->symbols = NULL;
2049 return default_print_insn (memaddr, info);
2050 }
2051
2052 /* AArch64 BRK software debug mode instruction.
2053 Note that AArch64 code is always little-endian.
2054 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
2055 constexpr gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
2056
2057 typedef BP_MANIPULATION (aarch64_default_breakpoint) aarch64_breakpoint;
2058
2059 /* Extract from an array REGS containing the (raw) register state a
2060 function return value of type TYPE, and copy that, in virtual
2061 format, into VALBUF. */
2062
2063 static void
2064 aarch64_extract_return_value (struct type *type, struct regcache *regs,
2065 gdb_byte *valbuf)
2066 {
2067 struct gdbarch *gdbarch = regs->arch ();
2068 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2069 int elements;
2070 struct type *fundamental_type;
2071
2072 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2073 &fundamental_type))
2074 {
2075 int len = TYPE_LENGTH (fundamental_type);
2076
2077 for (int i = 0; i < elements; i++)
2078 {
2079 int regno = AARCH64_V0_REGNUM + i;
2080 /* Enough space for a full vector register. */
2081 gdb_byte buf[register_size (gdbarch, regno)];
2082 gdb_assert (len <= sizeof (buf));
2083
2084 if (aarch64_debug)
2085 {
2086 debug_printf ("read HFA or HVA return value element %d from %s\n",
2087 i + 1,
2088 gdbarch_register_name (gdbarch, regno));
2089 }
2090 regs->cooked_read (regno, buf);
2091
2092 memcpy (valbuf, buf, len);
2093 valbuf += len;
2094 }
2095 }
2096 else if (TYPE_CODE (type) == TYPE_CODE_INT
2097 || TYPE_CODE (type) == TYPE_CODE_CHAR
2098 || TYPE_CODE (type) == TYPE_CODE_BOOL
2099 || TYPE_CODE (type) == TYPE_CODE_PTR
2100 || TYPE_IS_REFERENCE (type)
2101 || TYPE_CODE (type) == TYPE_CODE_ENUM)
2102 {
2103 /* If the type is a plain integer, then the access is
2104 straight-forward. Otherwise we have to play around a bit
2105 more. */
2106 int len = TYPE_LENGTH (type);
2107 int regno = AARCH64_X0_REGNUM;
2108 ULONGEST tmp;
2109
2110 while (len > 0)
2111 {
2112 /* By using store_unsigned_integer we avoid having to do
2113 anything special for small big-endian values. */
2114 regcache_cooked_read_unsigned (regs, regno++, &tmp);
2115 store_unsigned_integer (valbuf,
2116 (len > X_REGISTER_SIZE
2117 ? X_REGISTER_SIZE : len), byte_order, tmp);
2118 len -= X_REGISTER_SIZE;
2119 valbuf += X_REGISTER_SIZE;
2120 }
2121 }
2122 else
2123 {
2124 /* For a structure or union the behaviour is as if the value had
2125 been stored to word-aligned memory and then loaded into
2126 registers with 64-bit load instruction(s). */
2127 int len = TYPE_LENGTH (type);
2128 int regno = AARCH64_X0_REGNUM;
2129 bfd_byte buf[X_REGISTER_SIZE];
2130
2131 while (len > 0)
2132 {
2133 regs->cooked_read (regno++, buf);
2134 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2135 len -= X_REGISTER_SIZE;
2136 valbuf += X_REGISTER_SIZE;
2137 }
2138 }
2139 }
2140
2141
2142 /* Will a function return an aggregate type in memory or in a
2143 register? Return 0 if an aggregate type can be returned in a
2144 register, 1 if it must be returned in memory. */
2145
2146 static int
2147 aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
2148 {
2149 type = check_typedef (type);
2150 int elements;
2151 struct type *fundamental_type;
2152
2153 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2154 &fundamental_type))
2155 {
2156 /* v0-v7 are used to return values and one register is allocated
2157 for one member. However, HFA or HVA has at most four members. */
2158 return 0;
2159 }
2160
2161 if (TYPE_LENGTH (type) > 16)
2162 {
2163 /* PCS B.6 Aggregates larger than 16 bytes are passed by
2164 invisible reference. */
2165
2166 return 1;
2167 }
2168
2169 return 0;
2170 }
2171
2172 /* Write into appropriate registers a function return value of type
2173 TYPE, given in virtual format. */
2174
2175 static void
2176 aarch64_store_return_value (struct type *type, struct regcache *regs,
2177 const gdb_byte *valbuf)
2178 {
2179 struct gdbarch *gdbarch = regs->arch ();
2180 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2181 int elements;
2182 struct type *fundamental_type;
2183
2184 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2185 &fundamental_type))
2186 {
2187 int len = TYPE_LENGTH (fundamental_type);
2188
2189 for (int i = 0; i < elements; i++)
2190 {
2191 int regno = AARCH64_V0_REGNUM + i;
2192 /* Enough space for a full vector register. */
2193 gdb_byte tmpbuf[register_size (gdbarch, regno)];
2194 gdb_assert (len <= sizeof (tmpbuf));
2195
2196 if (aarch64_debug)
2197 {
2198 debug_printf ("write HFA or HVA return value element %d to %s\n",
2199 i + 1,
2200 gdbarch_register_name (gdbarch, regno));
2201 }
2202
2203 memcpy (tmpbuf, valbuf,
2204 len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
2205 regs->cooked_write (regno, tmpbuf);
2206 valbuf += len;
2207 }
2208 }
2209 else if (TYPE_CODE (type) == TYPE_CODE_INT
2210 || TYPE_CODE (type) == TYPE_CODE_CHAR
2211 || TYPE_CODE (type) == TYPE_CODE_BOOL
2212 || TYPE_CODE (type) == TYPE_CODE_PTR
2213 || TYPE_IS_REFERENCE (type)
2214 || TYPE_CODE (type) == TYPE_CODE_ENUM)
2215 {
2216 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
2217 {
2218 /* Values of one word or less are zero/sign-extended and
2219 returned in r0. */
2220 bfd_byte tmpbuf[X_REGISTER_SIZE];
2221 LONGEST val = unpack_long (type, valbuf);
2222
2223 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
2224 regs->cooked_write (AARCH64_X0_REGNUM, tmpbuf);
2225 }
2226 else
2227 {
2228 /* Integral values greater than one word are stored in
2229 consecutive registers starting with r0. This will always
2230 be a multiple of the regiser size. */
2231 int len = TYPE_LENGTH (type);
2232 int regno = AARCH64_X0_REGNUM;
2233
2234 while (len > 0)
2235 {
2236 regs->cooked_write (regno++, valbuf);
2237 len -= X_REGISTER_SIZE;
2238 valbuf += X_REGISTER_SIZE;
2239 }
2240 }
2241 }
2242 else
2243 {
2244 /* For a structure or union the behaviour is as if the value had
2245 been stored to word-aligned memory and then loaded into
2246 registers with 64-bit load instruction(s). */
2247 int len = TYPE_LENGTH (type);
2248 int regno = AARCH64_X0_REGNUM;
2249 bfd_byte tmpbuf[X_REGISTER_SIZE];
2250
2251 while (len > 0)
2252 {
2253 memcpy (tmpbuf, valbuf,
2254 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2255 regs->cooked_write (regno++, tmpbuf);
2256 len -= X_REGISTER_SIZE;
2257 valbuf += X_REGISTER_SIZE;
2258 }
2259 }
2260 }
2261
2262 /* Implement the "return_value" gdbarch method. */
2263
2264 static enum return_value_convention
2265 aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2266 struct type *valtype, struct regcache *regcache,
2267 gdb_byte *readbuf, const gdb_byte *writebuf)
2268 {
2269
2270 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
2271 || TYPE_CODE (valtype) == TYPE_CODE_UNION
2272 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
2273 {
2274 if (aarch64_return_in_memory (gdbarch, valtype))
2275 {
2276 if (aarch64_debug)
2277 debug_printf ("return value in memory\n");
2278 return RETURN_VALUE_STRUCT_CONVENTION;
2279 }
2280 }
2281
2282 if (writebuf)
2283 aarch64_store_return_value (valtype, regcache, writebuf);
2284
2285 if (readbuf)
2286 aarch64_extract_return_value (valtype, regcache, readbuf);
2287
2288 if (aarch64_debug)
2289 debug_printf ("return value in registers\n");
2290
2291 return RETURN_VALUE_REGISTER_CONVENTION;
2292 }
2293
2294 /* Implement the "get_longjmp_target" gdbarch method. */
2295
2296 static int
2297 aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2298 {
2299 CORE_ADDR jb_addr;
2300 gdb_byte buf[X_REGISTER_SIZE];
2301 struct gdbarch *gdbarch = get_frame_arch (frame);
2302 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2303 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2304
2305 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
2306
2307 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2308 X_REGISTER_SIZE))
2309 return 0;
2310
2311 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2312 return 1;
2313 }
2314
2315 /* Implement the "gen_return_address" gdbarch method. */
2316
2317 static void
2318 aarch64_gen_return_address (struct gdbarch *gdbarch,
2319 struct agent_expr *ax, struct axs_value *value,
2320 CORE_ADDR scope)
2321 {
2322 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
2323 value->kind = axs_lvalue_register;
2324 value->u.reg = AARCH64_LR_REGNUM;
2325 }
2326 \f
2327
2328 /* Return the pseudo register name corresponding to register regnum. */
2329
2330 static const char *
2331 aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2332 {
2333 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2334
2335 static const char *const q_name[] =
2336 {
2337 "q0", "q1", "q2", "q3",
2338 "q4", "q5", "q6", "q7",
2339 "q8", "q9", "q10", "q11",
2340 "q12", "q13", "q14", "q15",
2341 "q16", "q17", "q18", "q19",
2342 "q20", "q21", "q22", "q23",
2343 "q24", "q25", "q26", "q27",
2344 "q28", "q29", "q30", "q31",
2345 };
2346
2347 static const char *const d_name[] =
2348 {
2349 "d0", "d1", "d2", "d3",
2350 "d4", "d5", "d6", "d7",
2351 "d8", "d9", "d10", "d11",
2352 "d12", "d13", "d14", "d15",
2353 "d16", "d17", "d18", "d19",
2354 "d20", "d21", "d22", "d23",
2355 "d24", "d25", "d26", "d27",
2356 "d28", "d29", "d30", "d31",
2357 };
2358
2359 static const char *const s_name[] =
2360 {
2361 "s0", "s1", "s2", "s3",
2362 "s4", "s5", "s6", "s7",
2363 "s8", "s9", "s10", "s11",
2364 "s12", "s13", "s14", "s15",
2365 "s16", "s17", "s18", "s19",
2366 "s20", "s21", "s22", "s23",
2367 "s24", "s25", "s26", "s27",
2368 "s28", "s29", "s30", "s31",
2369 };
2370
2371 static const char *const h_name[] =
2372 {
2373 "h0", "h1", "h2", "h3",
2374 "h4", "h5", "h6", "h7",
2375 "h8", "h9", "h10", "h11",
2376 "h12", "h13", "h14", "h15",
2377 "h16", "h17", "h18", "h19",
2378 "h20", "h21", "h22", "h23",
2379 "h24", "h25", "h26", "h27",
2380 "h28", "h29", "h30", "h31",
2381 };
2382
2383 static const char *const b_name[] =
2384 {
2385 "b0", "b1", "b2", "b3",
2386 "b4", "b5", "b6", "b7",
2387 "b8", "b9", "b10", "b11",
2388 "b12", "b13", "b14", "b15",
2389 "b16", "b17", "b18", "b19",
2390 "b20", "b21", "b22", "b23",
2391 "b24", "b25", "b26", "b27",
2392 "b28", "b29", "b30", "b31",
2393 };
2394
2395 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
2396
2397 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2398 return q_name[p_regnum - AARCH64_Q0_REGNUM];
2399
2400 if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2401 return d_name[p_regnum - AARCH64_D0_REGNUM];
2402
2403 if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2404 return s_name[p_regnum - AARCH64_S0_REGNUM];
2405
2406 if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2407 return h_name[p_regnum - AARCH64_H0_REGNUM];
2408
2409 if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2410 return b_name[p_regnum - AARCH64_B0_REGNUM];
2411
2412 if (tdep->has_sve ())
2413 {
2414 static const char *const sve_v_name[] =
2415 {
2416 "v0", "v1", "v2", "v3",
2417 "v4", "v5", "v6", "v7",
2418 "v8", "v9", "v10", "v11",
2419 "v12", "v13", "v14", "v15",
2420 "v16", "v17", "v18", "v19",
2421 "v20", "v21", "v22", "v23",
2422 "v24", "v25", "v26", "v27",
2423 "v28", "v29", "v30", "v31",
2424 };
2425
2426 if (p_regnum >= AARCH64_SVE_V0_REGNUM
2427 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2428 return sve_v_name[p_regnum - AARCH64_SVE_V0_REGNUM];
2429 }
2430
2431 /* RA_STATE is used for unwinding only. Do not assign it a name - this
2432 prevents it from being read by methods such as
2433 mi_cmd_trace_frame_collected. */
2434 if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
2435 return "";
2436
2437 internal_error (__FILE__, __LINE__,
2438 _("aarch64_pseudo_register_name: bad register number %d"),
2439 p_regnum);
2440 }
2441
2442 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
2443
2444 static struct type *
2445 aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2446 {
2447 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2448
2449 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
2450
2451 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2452 return aarch64_vnq_type (gdbarch);
2453
2454 if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2455 return aarch64_vnd_type (gdbarch);
2456
2457 if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2458 return aarch64_vns_type (gdbarch);
2459
2460 if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2461 return aarch64_vnh_type (gdbarch);
2462
2463 if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2464 return aarch64_vnb_type (gdbarch);
2465
2466 if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
2467 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2468 return aarch64_vnv_type (gdbarch);
2469
2470 if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
2471 return builtin_type (gdbarch)->builtin_uint64;
2472
2473 internal_error (__FILE__, __LINE__,
2474 _("aarch64_pseudo_register_type: bad register number %d"),
2475 p_regnum);
2476 }
2477
2478 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2479
2480 static int
2481 aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2482 struct reggroup *group)
2483 {
2484 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2485
2486 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
2487
2488 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2489 return group == all_reggroup || group == vector_reggroup;
2490 else if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2491 return (group == all_reggroup || group == vector_reggroup
2492 || group == float_reggroup);
2493 else if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2494 return (group == all_reggroup || group == vector_reggroup
2495 || group == float_reggroup);
2496 else if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2497 return group == all_reggroup || group == vector_reggroup;
2498 else if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2499 return group == all_reggroup || group == vector_reggroup;
2500 else if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
2501 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2502 return group == all_reggroup || group == vector_reggroup;
2503 /* RA_STATE is used for unwinding only. Do not assign it to any groups. */
2504 if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
2505 return 0;
2506
2507 return group == all_reggroup;
2508 }
2509
2510 /* Helper for aarch64_pseudo_read_value. */
2511
2512 static struct value *
2513 aarch64_pseudo_read_value_1 (struct gdbarch *gdbarch,
2514 readable_regcache *regcache, int regnum_offset,
2515 int regsize, struct value *result_value)
2516 {
2517 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2518
2519 /* Enough space for a full vector register. */
2520 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2521 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2522
2523 if (regcache->raw_read (v_regnum, reg_buf) != REG_VALID)
2524 mark_value_bytes_unavailable (result_value, 0,
2525 TYPE_LENGTH (value_type (result_value)));
2526 else
2527 memcpy (value_contents_raw (result_value), reg_buf, regsize);
2528
2529 return result_value;
2530 }
2531
2532 /* Implement the "pseudo_register_read_value" gdbarch method. */
2533
2534 static struct value *
2535 aarch64_pseudo_read_value (struct gdbarch *gdbarch, readable_regcache *regcache,
2536 int regnum)
2537 {
2538 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2539 struct value *result_value = allocate_value (register_type (gdbarch, regnum));
2540
2541 VALUE_LVAL (result_value) = lval_register;
2542 VALUE_REGNUM (result_value) = regnum;
2543
2544 regnum -= gdbarch_num_regs (gdbarch);
2545
2546 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2547 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2548 regnum - AARCH64_Q0_REGNUM,
2549 Q_REGISTER_SIZE, result_value);
2550
2551 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2552 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2553 regnum - AARCH64_D0_REGNUM,
2554 D_REGISTER_SIZE, result_value);
2555
2556 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2557 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2558 regnum - AARCH64_S0_REGNUM,
2559 S_REGISTER_SIZE, result_value);
2560
2561 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2562 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2563 regnum - AARCH64_H0_REGNUM,
2564 H_REGISTER_SIZE, result_value);
2565
2566 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2567 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2568 regnum - AARCH64_B0_REGNUM,
2569 B_REGISTER_SIZE, result_value);
2570
2571 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2572 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2573 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2574 regnum - AARCH64_SVE_V0_REGNUM,
2575 V_REGISTER_SIZE, result_value);
2576
2577 gdb_assert_not_reached ("regnum out of bound");
2578 }
2579
2580 /* Helper for aarch64_pseudo_write. */
2581
2582 static void
2583 aarch64_pseudo_write_1 (struct gdbarch *gdbarch, struct regcache *regcache,
2584 int regnum_offset, int regsize, const gdb_byte *buf)
2585 {
2586 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2587
2588 /* Enough space for a full vector register. */
2589 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2590 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2591
2592 /* Ensure the register buffer is zero, we want gdb writes of the
2593 various 'scalar' pseudo registers to behavior like architectural
2594 writes, register width bytes are written the remainder are set to
2595 zero. */
2596 memset (reg_buf, 0, register_size (gdbarch, AARCH64_V0_REGNUM));
2597
2598 memcpy (reg_buf, buf, regsize);
2599 regcache->raw_write (v_regnum, reg_buf);
2600 }
2601
2602 /* Implement the "pseudo_register_write" gdbarch method. */
2603
2604 static void
2605 aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2606 int regnum, const gdb_byte *buf)
2607 {
2608 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2609 regnum -= gdbarch_num_regs (gdbarch);
2610
2611 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2612 return aarch64_pseudo_write_1 (gdbarch, regcache,
2613 regnum - AARCH64_Q0_REGNUM, Q_REGISTER_SIZE,
2614 buf);
2615
2616 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2617 return aarch64_pseudo_write_1 (gdbarch, regcache,
2618 regnum - AARCH64_D0_REGNUM, D_REGISTER_SIZE,
2619 buf);
2620
2621 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2622 return aarch64_pseudo_write_1 (gdbarch, regcache,
2623 regnum - AARCH64_S0_REGNUM, S_REGISTER_SIZE,
2624 buf);
2625
2626 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2627 return aarch64_pseudo_write_1 (gdbarch, regcache,
2628 regnum - AARCH64_H0_REGNUM, H_REGISTER_SIZE,
2629 buf);
2630
2631 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2632 return aarch64_pseudo_write_1 (gdbarch, regcache,
2633 regnum - AARCH64_B0_REGNUM, B_REGISTER_SIZE,
2634 buf);
2635
2636 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2637 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2638 return aarch64_pseudo_write_1 (gdbarch, regcache,
2639 regnum - AARCH64_SVE_V0_REGNUM,
2640 V_REGISTER_SIZE, buf);
2641
2642 gdb_assert_not_reached ("regnum out of bound");
2643 }
2644
2645 /* Callback function for user_reg_add. */
2646
2647 static struct value *
2648 value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2649 {
2650 const int *reg_p = (const int *) baton;
2651
2652 return value_of_register (*reg_p, frame);
2653 }
2654 \f
2655
2656 /* Implement the "software_single_step" gdbarch method, needed to
2657 single step through atomic sequences on AArch64. */
2658
2659 static std::vector<CORE_ADDR>
2660 aarch64_software_single_step (struct regcache *regcache)
2661 {
2662 struct gdbarch *gdbarch = regcache->arch ();
2663 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2664 const int insn_size = 4;
2665 const int atomic_sequence_length = 16; /* Instruction sequence length. */
2666 CORE_ADDR pc = regcache_read_pc (regcache);
2667 CORE_ADDR breaks[2] = { CORE_ADDR_MAX, CORE_ADDR_MAX };
2668 CORE_ADDR loc = pc;
2669 CORE_ADDR closing_insn = 0;
2670 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2671 byte_order_for_code);
2672 int index;
2673 int insn_count;
2674 int bc_insn_count = 0; /* Conditional branch instruction count. */
2675 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
2676 aarch64_inst inst;
2677
2678 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2679 return {};
2680
2681 /* Look for a Load Exclusive instruction which begins the sequence. */
2682 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
2683 return {};
2684
2685 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2686 {
2687 loc += insn_size;
2688 insn = read_memory_unsigned_integer (loc, insn_size,
2689 byte_order_for_code);
2690
2691 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2692 return {};
2693 /* Check if the instruction is a conditional branch. */
2694 if (inst.opcode->iclass == condbranch)
2695 {
2696 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
2697
2698 if (bc_insn_count >= 1)
2699 return {};
2700
2701 /* It is, so we'll try to set a breakpoint at the destination. */
2702 breaks[1] = loc + inst.operands[0].imm.value;
2703
2704 bc_insn_count++;
2705 last_breakpoint++;
2706 }
2707
2708 /* Look for the Store Exclusive which closes the atomic sequence. */
2709 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
2710 {
2711 closing_insn = loc;
2712 break;
2713 }
2714 }
2715
2716 /* We didn't find a closing Store Exclusive instruction, fall back. */
2717 if (!closing_insn)
2718 return {};
2719
2720 /* Insert breakpoint after the end of the atomic sequence. */
2721 breaks[0] = loc + insn_size;
2722
2723 /* Check for duplicated breakpoints, and also check that the second
2724 breakpoint is not within the atomic sequence. */
2725 if (last_breakpoint
2726 && (breaks[1] == breaks[0]
2727 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2728 last_breakpoint = 0;
2729
2730 std::vector<CORE_ADDR> next_pcs;
2731
2732 /* Insert the breakpoint at the end of the sequence, and one at the
2733 destination of the conditional branch, if it exists. */
2734 for (index = 0; index <= last_breakpoint; index++)
2735 next_pcs.push_back (breaks[index]);
2736
2737 return next_pcs;
2738 }
2739
2740 struct aarch64_displaced_step_closure : public displaced_step_closure
2741 {
2742 /* It is true when condition instruction, such as B.CON, TBZ, etc,
2743 is being displaced stepping. */
2744 int cond = 0;
2745
2746 /* PC adjustment offset after displaced stepping. */
2747 int32_t pc_adjust = 0;
2748 };
2749
2750 /* Data when visiting instructions for displaced stepping. */
2751
2752 struct aarch64_displaced_step_data
2753 {
2754 struct aarch64_insn_data base;
2755
2756 /* The address where the instruction will be executed at. */
2757 CORE_ADDR new_addr;
2758 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
2759 uint32_t insn_buf[DISPLACED_MODIFIED_INSNS];
2760 /* Number of instructions in INSN_BUF. */
2761 unsigned insn_count;
2762 /* Registers when doing displaced stepping. */
2763 struct regcache *regs;
2764
2765 aarch64_displaced_step_closure *dsc;
2766 };
2767
2768 /* Implementation of aarch64_insn_visitor method "b". */
2769
2770 static void
2771 aarch64_displaced_step_b (const int is_bl, const int32_t offset,
2772 struct aarch64_insn_data *data)
2773 {
2774 struct aarch64_displaced_step_data *dsd
2775 = (struct aarch64_displaced_step_data *) data;
2776 int64_t new_offset = data->insn_addr - dsd->new_addr + offset;
2777
2778 if (can_encode_int32 (new_offset, 28))
2779 {
2780 /* Emit B rather than BL, because executing BL on a new address
2781 will get the wrong address into LR. In order to avoid this,
2782 we emit B, and update LR if the instruction is BL. */
2783 emit_b (dsd->insn_buf, 0, new_offset);
2784 dsd->insn_count++;
2785 }
2786 else
2787 {
2788 /* Write NOP. */
2789 emit_nop (dsd->insn_buf);
2790 dsd->insn_count++;
2791 dsd->dsc->pc_adjust = offset;
2792 }
2793
2794 if (is_bl)
2795 {
2796 /* Update LR. */
2797 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
2798 data->insn_addr + 4);
2799 }
2800 }
2801
2802 /* Implementation of aarch64_insn_visitor method "b_cond". */
2803
2804 static void
2805 aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
2806 struct aarch64_insn_data *data)
2807 {
2808 struct aarch64_displaced_step_data *dsd
2809 = (struct aarch64_displaced_step_data *) data;
2810
2811 /* GDB has to fix up PC after displaced step this instruction
2812 differently according to the condition is true or false. Instead
2813 of checking COND against conditional flags, we can use
2814 the following instructions, and GDB can tell how to fix up PC
2815 according to the PC value.
2816
2817 B.COND TAKEN ; If cond is true, then jump to TAKEN.
2818 INSN1 ;
2819 TAKEN:
2820 INSN2
2821 */
2822
2823 emit_bcond (dsd->insn_buf, cond, 8);
2824 dsd->dsc->cond = 1;
2825 dsd->dsc->pc_adjust = offset;
2826 dsd->insn_count = 1;
2827 }
2828
2829 /* Dynamically allocate a new register. If we know the register
2830 statically, we should make it a global as above instead of using this
2831 helper function. */
2832
2833 static struct aarch64_register
2834 aarch64_register (unsigned num, int is64)
2835 {
2836 return (struct aarch64_register) { num, is64 };
2837 }
2838
2839 /* Implementation of aarch64_insn_visitor method "cb". */
2840
2841 static void
2842 aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
2843 const unsigned rn, int is64,
2844 struct aarch64_insn_data *data)
2845 {
2846 struct aarch64_displaced_step_data *dsd
2847 = (struct aarch64_displaced_step_data *) data;
2848
2849 /* The offset is out of range for a compare and branch
2850 instruction. We can use the following instructions instead:
2851
2852 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
2853 INSN1 ;
2854 TAKEN:
2855 INSN2
2856 */
2857 emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
2858 dsd->insn_count = 1;
2859 dsd->dsc->cond = 1;
2860 dsd->dsc->pc_adjust = offset;
2861 }
2862
2863 /* Implementation of aarch64_insn_visitor method "tb". */
2864
2865 static void
2866 aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
2867 const unsigned rt, unsigned bit,
2868 struct aarch64_insn_data *data)
2869 {
2870 struct aarch64_displaced_step_data *dsd
2871 = (struct aarch64_displaced_step_data *) data;
2872
2873 /* The offset is out of range for a test bit and branch
2874 instruction We can use the following instructions instead:
2875
2876 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
2877 INSN1 ;
2878 TAKEN:
2879 INSN2
2880
2881 */
2882 emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
2883 dsd->insn_count = 1;
2884 dsd->dsc->cond = 1;
2885 dsd->dsc->pc_adjust = offset;
2886 }
2887
2888 /* Implementation of aarch64_insn_visitor method "adr". */
2889
2890 static void
2891 aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
2892 const int is_adrp, struct aarch64_insn_data *data)
2893 {
2894 struct aarch64_displaced_step_data *dsd
2895 = (struct aarch64_displaced_step_data *) data;
2896 /* We know exactly the address the ADR{P,} instruction will compute.
2897 We can just write it to the destination register. */
2898 CORE_ADDR address = data->insn_addr + offset;
2899
2900 if (is_adrp)
2901 {
2902 /* Clear the lower 12 bits of the offset to get the 4K page. */
2903 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2904 address & ~0xfff);
2905 }
2906 else
2907 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2908 address);
2909
2910 dsd->dsc->pc_adjust = 4;
2911 emit_nop (dsd->insn_buf);
2912 dsd->insn_count = 1;
2913 }
2914
2915 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
2916
2917 static void
2918 aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
2919 const unsigned rt, const int is64,
2920 struct aarch64_insn_data *data)
2921 {
2922 struct aarch64_displaced_step_data *dsd
2923 = (struct aarch64_displaced_step_data *) data;
2924 CORE_ADDR address = data->insn_addr + offset;
2925 struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
2926
2927 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
2928 address);
2929
2930 if (is_sw)
2931 dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
2932 aarch64_register (rt, 1), zero);
2933 else
2934 dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
2935 aarch64_register (rt, 1), zero);
2936
2937 dsd->dsc->pc_adjust = 4;
2938 }
2939
2940 /* Implementation of aarch64_insn_visitor method "others". */
2941
2942 static void
2943 aarch64_displaced_step_others (const uint32_t insn,
2944 struct aarch64_insn_data *data)
2945 {
2946 struct aarch64_displaced_step_data *dsd
2947 = (struct aarch64_displaced_step_data *) data;
2948
2949 aarch64_emit_insn (dsd->insn_buf, insn);
2950 dsd->insn_count = 1;
2951
2952 if ((insn & 0xfffffc1f) == 0xd65f0000)
2953 {
2954 /* RET */
2955 dsd->dsc->pc_adjust = 0;
2956 }
2957 else
2958 dsd->dsc->pc_adjust = 4;
2959 }
2960
2961 static const struct aarch64_insn_visitor visitor =
2962 {
2963 aarch64_displaced_step_b,
2964 aarch64_displaced_step_b_cond,
2965 aarch64_displaced_step_cb,
2966 aarch64_displaced_step_tb,
2967 aarch64_displaced_step_adr,
2968 aarch64_displaced_step_ldr_literal,
2969 aarch64_displaced_step_others,
2970 };
2971
2972 /* Implement the "displaced_step_copy_insn" gdbarch method. */
2973
2974 struct displaced_step_closure *
2975 aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
2976 CORE_ADDR from, CORE_ADDR to,
2977 struct regcache *regs)
2978 {
2979 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2980 uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
2981 struct aarch64_displaced_step_data dsd;
2982 aarch64_inst inst;
2983
2984 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2985 return NULL;
2986
2987 /* Look for a Load Exclusive instruction which begins the sequence. */
2988 if (inst.opcode->iclass == ldstexcl && bit (insn, 22))
2989 {
2990 /* We can't displaced step atomic sequences. */
2991 return NULL;
2992 }
2993
2994 std::unique_ptr<aarch64_displaced_step_closure> dsc
2995 (new aarch64_displaced_step_closure);
2996 dsd.base.insn_addr = from;
2997 dsd.new_addr = to;
2998 dsd.regs = regs;
2999 dsd.dsc = dsc.get ();
3000 dsd.insn_count = 0;
3001 aarch64_relocate_instruction (insn, &visitor,
3002 (struct aarch64_insn_data *) &dsd);
3003 gdb_assert (dsd.insn_count <= DISPLACED_MODIFIED_INSNS);
3004
3005 if (dsd.insn_count != 0)
3006 {
3007 int i;
3008
3009 /* Instruction can be relocated to scratch pad. Copy
3010 relocated instruction(s) there. */
3011 for (i = 0; i < dsd.insn_count; i++)
3012 {
3013 if (debug_displaced)
3014 {
3015 debug_printf ("displaced: writing insn ");
3016 debug_printf ("%.8x", dsd.insn_buf[i]);
3017 debug_printf (" at %s\n", paddress (gdbarch, to + i * 4));
3018 }
3019 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
3020 (ULONGEST) dsd.insn_buf[i]);
3021 }
3022 }
3023 else
3024 {
3025 dsc = NULL;
3026 }
3027
3028 return dsc.release ();
3029 }
3030
3031 /* Implement the "displaced_step_fixup" gdbarch method. */
3032
3033 void
3034 aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
3035 struct displaced_step_closure *dsc_,
3036 CORE_ADDR from, CORE_ADDR to,
3037 struct regcache *regs)
3038 {
3039 aarch64_displaced_step_closure *dsc = (aarch64_displaced_step_closure *) dsc_;
3040
3041 if (dsc->cond)
3042 {
3043 ULONGEST pc;
3044
3045 regcache_cooked_read_unsigned (regs, AARCH64_PC_REGNUM, &pc);
3046 if (pc - to == 8)
3047 {
3048 /* Condition is true. */
3049 }
3050 else if (pc - to == 4)
3051 {
3052 /* Condition is false. */
3053 dsc->pc_adjust = 4;
3054 }
3055 else
3056 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
3057 }
3058
3059 if (dsc->pc_adjust != 0)
3060 {
3061 if (debug_displaced)
3062 {
3063 debug_printf ("displaced: fixup: set PC to %s:%d\n",
3064 paddress (gdbarch, from), dsc->pc_adjust);
3065 }
3066 regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
3067 from + dsc->pc_adjust);
3068 }
3069 }
3070
3071 /* Implement the "displaced_step_hw_singlestep" gdbarch method. */
3072
3073 int
3074 aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch,
3075 struct displaced_step_closure *closure)
3076 {
3077 return 1;
3078 }
3079
3080 /* Get the correct target description for the given VQ value.
3081 If VQ is zero then it is assumed SVE is not supported.
3082 (It is not possible to set VQ to zero on an SVE system). */
3083
3084 const target_desc *
3085 aarch64_read_description (uint64_t vq, bool pauth_p)
3086 {
3087 if (vq > AARCH64_MAX_SVE_VQ)
3088 error (_("VQ is %" PRIu64 ", maximum supported value is %d"), vq,
3089 AARCH64_MAX_SVE_VQ);
3090
3091 struct target_desc *tdesc = tdesc_aarch64_list[vq][pauth_p];
3092
3093 if (tdesc == NULL)
3094 {
3095 tdesc = aarch64_create_target_description (vq, pauth_p);
3096 tdesc_aarch64_list[vq][pauth_p] = tdesc;
3097 }
3098
3099 return tdesc;
3100 }
3101
3102 /* Return the VQ used when creating the target description TDESC. */
3103
3104 static uint64_t
3105 aarch64_get_tdesc_vq (const struct target_desc *tdesc)
3106 {
3107 const struct tdesc_feature *feature_sve;
3108
3109 if (!tdesc_has_registers (tdesc))
3110 return 0;
3111
3112 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
3113
3114 if (feature_sve == nullptr)
3115 return 0;
3116
3117 uint64_t vl = tdesc_register_bitsize (feature_sve,
3118 aarch64_sve_register_names[0]) / 8;
3119 return sve_vq_from_vl (vl);
3120 }
3121
3122 /* Add all the expected register sets into GDBARCH. */
3123
3124 static void
3125 aarch64_add_reggroups (struct gdbarch *gdbarch)
3126 {
3127 reggroup_add (gdbarch, general_reggroup);
3128 reggroup_add (gdbarch, float_reggroup);
3129 reggroup_add (gdbarch, system_reggroup);
3130 reggroup_add (gdbarch, vector_reggroup);
3131 reggroup_add (gdbarch, all_reggroup);
3132 reggroup_add (gdbarch, save_reggroup);
3133 reggroup_add (gdbarch, restore_reggroup);
3134 }
3135
3136 /* Implement the "cannot_store_register" gdbarch method. */
3137
3138 static int
3139 aarch64_cannot_store_register (struct gdbarch *gdbarch, int regnum)
3140 {
3141 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3142
3143 if (!tdep->has_pauth ())
3144 return 0;
3145
3146 /* Pointer authentication registers are read-only. */
3147 return (regnum == AARCH64_PAUTH_DMASK_REGNUM (tdep->pauth_reg_base)
3148 || regnum == AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base));
3149 }
3150
3151 /* Initialize the current architecture based on INFO. If possible,
3152 re-use an architecture from ARCHES, which is a list of
3153 architectures already created during this debugging session.
3154
3155 Called e.g. at program startup, when reading a core file, and when
3156 reading a binary file. */
3157
3158 static struct gdbarch *
3159 aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
3160 {
3161 const struct tdesc_feature *feature_core, *feature_fpu, *feature_sve;
3162 const struct tdesc_feature *feature_pauth;
3163 bool valid_p = true;
3164 int i, num_regs = 0, num_pseudo_regs = 0;
3165 int first_pauth_regnum = -1, pauth_ra_state_offset = -1;
3166
3167 /* Use the vector length passed via the target info. Here -1 is used for no
3168 SVE, and 0 is unset. If unset then use the vector length from the existing
3169 tdesc. */
3170 uint64_t vq = 0;
3171 if (info.id == (int *) -1)
3172 vq = 0;
3173 else if (info.id != 0)
3174 vq = (uint64_t) info.id;
3175 else
3176 vq = aarch64_get_tdesc_vq (info.target_desc);
3177
3178 if (vq > AARCH64_MAX_SVE_VQ)
3179 internal_error (__FILE__, __LINE__, _("VQ out of bounds: %s (max %d)"),
3180 pulongest (vq), AARCH64_MAX_SVE_VQ);
3181
3182 /* If there is already a candidate, use it. */
3183 for (gdbarch_list *best_arch = gdbarch_list_lookup_by_info (arches, &info);
3184 best_arch != nullptr;
3185 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
3186 {
3187 struct gdbarch_tdep *tdep = gdbarch_tdep (best_arch->gdbarch);
3188 if (tdep && tdep->vq == vq)
3189 return best_arch->gdbarch;
3190 }
3191
3192 /* Ensure we always have a target descriptor, and that it is for the given VQ
3193 value. */
3194 const struct target_desc *tdesc = info.target_desc;
3195 if (!tdesc_has_registers (tdesc) || vq != aarch64_get_tdesc_vq (tdesc))
3196 tdesc = aarch64_read_description (vq, false);
3197 gdb_assert (tdesc);
3198
3199 feature_core = tdesc_find_feature (tdesc,"org.gnu.gdb.aarch64.core");
3200 feature_fpu = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
3201 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
3202 feature_pauth = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.pauth");
3203
3204 if (feature_core == nullptr)
3205 return nullptr;
3206
3207 struct tdesc_arch_data *tdesc_data = tdesc_data_alloc ();
3208
3209 /* Validate the description provides the mandatory core R registers
3210 and allocate their numbers. */
3211 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
3212 valid_p &= tdesc_numbered_register (feature_core, tdesc_data,
3213 AARCH64_X0_REGNUM + i,
3214 aarch64_r_register_names[i]);
3215
3216 num_regs = AARCH64_X0_REGNUM + i;
3217
3218 /* Add the V registers. */
3219 if (feature_fpu != nullptr)
3220 {
3221 if (feature_sve != nullptr)
3222 error (_("Program contains both fpu and SVE features."));
3223
3224 /* Validate the description provides the mandatory V registers
3225 and allocate their numbers. */
3226 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
3227 valid_p &= tdesc_numbered_register (feature_fpu, tdesc_data,
3228 AARCH64_V0_REGNUM + i,
3229 aarch64_v_register_names[i]);
3230
3231 num_regs = AARCH64_V0_REGNUM + i;
3232 }
3233
3234 /* Add the SVE registers. */
3235 if (feature_sve != nullptr)
3236 {
3237 /* Validate the description provides the mandatory SVE registers
3238 and allocate their numbers. */
3239 for (i = 0; i < ARRAY_SIZE (aarch64_sve_register_names); i++)
3240 valid_p &= tdesc_numbered_register (feature_sve, tdesc_data,
3241 AARCH64_SVE_Z0_REGNUM + i,
3242 aarch64_sve_register_names[i]);
3243
3244 num_regs = AARCH64_SVE_Z0_REGNUM + i;
3245 num_pseudo_regs += 32; /* add the Vn register pseudos. */
3246 }
3247
3248 if (feature_fpu != nullptr || feature_sve != nullptr)
3249 {
3250 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
3251 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
3252 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
3253 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
3254 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
3255 }
3256
3257 /* Add the pauth registers. */
3258 if (feature_pauth != NULL)
3259 {
3260 first_pauth_regnum = num_regs;
3261 pauth_ra_state_offset = num_pseudo_regs;
3262 /* Validate the descriptor provides the mandatory PAUTH registers and
3263 allocate their numbers. */
3264 for (i = 0; i < ARRAY_SIZE (aarch64_pauth_register_names); i++)
3265 valid_p &= tdesc_numbered_register (feature_pauth, tdesc_data,
3266 first_pauth_regnum + i,
3267 aarch64_pauth_register_names[i]);
3268
3269 num_regs += i;
3270 num_pseudo_regs += 1; /* Count RA_STATE pseudo register. */
3271 }
3272
3273 if (!valid_p)
3274 {
3275 tdesc_data_cleanup (tdesc_data);
3276 return nullptr;
3277 }
3278
3279 /* AArch64 code is always little-endian. */
3280 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
3281
3282 struct gdbarch_tdep *tdep = XCNEW (struct gdbarch_tdep);
3283 struct gdbarch *gdbarch = gdbarch_alloc (&info, tdep);
3284
3285 /* This should be low enough for everything. */
3286 tdep->lowest_pc = 0x20;
3287 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
3288 tdep->jb_elt_size = 8;
3289 tdep->vq = vq;
3290 tdep->pauth_reg_base = first_pauth_regnum;
3291 tdep->pauth_ra_state_regnum = (feature_pauth == NULL) ? -1
3292 : pauth_ra_state_offset + num_regs;
3293
3294 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
3295 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
3296
3297 /* Advance PC across function entry code. */
3298 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
3299
3300 /* The stack grows downward. */
3301 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
3302
3303 /* Breakpoint manipulation. */
3304 set_gdbarch_breakpoint_kind_from_pc (gdbarch,
3305 aarch64_breakpoint::kind_from_pc);
3306 set_gdbarch_sw_breakpoint_from_kind (gdbarch,
3307 aarch64_breakpoint::bp_from_kind);
3308 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
3309 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
3310
3311 /* Information about registers, etc. */
3312 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
3313 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
3314 set_gdbarch_num_regs (gdbarch, num_regs);
3315
3316 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
3317 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
3318 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
3319 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
3320 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
3321 set_tdesc_pseudo_register_reggroup_p (gdbarch,
3322 aarch64_pseudo_register_reggroup_p);
3323 set_gdbarch_cannot_store_register (gdbarch, aarch64_cannot_store_register);
3324
3325 /* ABI */
3326 set_gdbarch_short_bit (gdbarch, 16);
3327 set_gdbarch_int_bit (gdbarch, 32);
3328 set_gdbarch_float_bit (gdbarch, 32);
3329 set_gdbarch_double_bit (gdbarch, 64);
3330 set_gdbarch_long_double_bit (gdbarch, 128);
3331 set_gdbarch_long_bit (gdbarch, 64);
3332 set_gdbarch_long_long_bit (gdbarch, 64);
3333 set_gdbarch_ptr_bit (gdbarch, 64);
3334 set_gdbarch_char_signed (gdbarch, 0);
3335 set_gdbarch_wchar_signed (gdbarch, 0);
3336 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
3337 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
3338 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
3339 set_gdbarch_type_align (gdbarch, aarch64_type_align);
3340
3341 /* Internal <-> external register number maps. */
3342 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
3343
3344 /* Returning results. */
3345 set_gdbarch_return_value (gdbarch, aarch64_return_value);
3346
3347 /* Disassembly. */
3348 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
3349
3350 /* Virtual tables. */
3351 set_gdbarch_vbit_in_delta (gdbarch, 1);
3352
3353 /* Register architecture. */
3354 aarch64_add_reggroups (gdbarch);
3355
3356 /* Hook in the ABI-specific overrides, if they have been registered. */
3357 info.target_desc = tdesc;
3358 info.tdesc_data = tdesc_data;
3359 gdbarch_init_osabi (info, gdbarch);
3360
3361 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
3362 /* Register DWARF CFA vendor handler. */
3363 set_gdbarch_execute_dwarf_cfa_vendor_op (gdbarch,
3364 aarch64_execute_dwarf_cfa_vendor_op);
3365
3366 /* Add some default predicates. */
3367 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
3368 dwarf2_append_unwinders (gdbarch);
3369 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
3370
3371 frame_base_set_default (gdbarch, &aarch64_normal_base);
3372
3373 /* Now we have tuned the configuration, set a few final things,
3374 based on what the OS ABI has told us. */
3375
3376 if (tdep->jb_pc >= 0)
3377 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
3378
3379 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
3380
3381 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
3382
3383 /* Add standard register aliases. */
3384 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
3385 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
3386 value_of_aarch64_user_reg,
3387 &aarch64_register_aliases[i].regnum);
3388
3389 register_aarch64_ravenscar_ops (gdbarch);
3390
3391 return gdbarch;
3392 }
3393
3394 static void
3395 aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
3396 {
3397 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3398
3399 if (tdep == NULL)
3400 return;
3401
3402 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
3403 paddress (gdbarch, tdep->lowest_pc));
3404 }
3405
3406 #if GDB_SELF_TEST
3407 namespace selftests
3408 {
3409 static void aarch64_process_record_test (void);
3410 }
3411 #endif
3412
3413 void
3414 _initialize_aarch64_tdep (void)
3415 {
3416 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
3417 aarch64_dump_tdep);
3418
3419 /* Debug this file's internals. */
3420 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
3421 Set AArch64 debugging."), _("\
3422 Show AArch64 debugging."), _("\
3423 When on, AArch64 specific debugging is enabled."),
3424 NULL,
3425 show_aarch64_debug,
3426 &setdebuglist, &showdebuglist);
3427
3428 #if GDB_SELF_TEST
3429 selftests::register_test ("aarch64-analyze-prologue",
3430 selftests::aarch64_analyze_prologue_test);
3431 selftests::register_test ("aarch64-process-record",
3432 selftests::aarch64_process_record_test);
3433 selftests::record_xml_tdesc ("aarch64.xml",
3434 aarch64_create_target_description (0, false));
3435 #endif
3436 }
3437
3438 /* AArch64 process record-replay related structures, defines etc. */
3439
3440 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
3441 do \
3442 { \
3443 unsigned int reg_len = LENGTH; \
3444 if (reg_len) \
3445 { \
3446 REGS = XNEWVEC (uint32_t, reg_len); \
3447 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
3448 } \
3449 } \
3450 while (0)
3451
3452 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
3453 do \
3454 { \
3455 unsigned int mem_len = LENGTH; \
3456 if (mem_len) \
3457 { \
3458 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
3459 memcpy(&MEMS->len, &RECORD_BUF[0], \
3460 sizeof(struct aarch64_mem_r) * LENGTH); \
3461 } \
3462 } \
3463 while (0)
3464
3465 /* AArch64 record/replay structures and enumerations. */
3466
3467 struct aarch64_mem_r
3468 {
3469 uint64_t len; /* Record length. */
3470 uint64_t addr; /* Memory address. */
3471 };
3472
3473 enum aarch64_record_result
3474 {
3475 AARCH64_RECORD_SUCCESS,
3476 AARCH64_RECORD_UNSUPPORTED,
3477 AARCH64_RECORD_UNKNOWN
3478 };
3479
3480 typedef struct insn_decode_record_t
3481 {
3482 struct gdbarch *gdbarch;
3483 struct regcache *regcache;
3484 CORE_ADDR this_addr; /* Address of insn to be recorded. */
3485 uint32_t aarch64_insn; /* Insn to be recorded. */
3486 uint32_t mem_rec_count; /* Count of memory records. */
3487 uint32_t reg_rec_count; /* Count of register records. */
3488 uint32_t *aarch64_regs; /* Registers to be recorded. */
3489 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
3490 } insn_decode_record;
3491
3492 /* Record handler for data processing - register instructions. */
3493
3494 static unsigned int
3495 aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
3496 {
3497 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
3498 uint32_t record_buf[4];
3499
3500 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3501 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3502 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
3503
3504 if (!bit (aarch64_insn_r->aarch64_insn, 28))
3505 {
3506 uint8_t setflags;
3507
3508 /* Logical (shifted register). */
3509 if (insn_bits24_27 == 0x0a)
3510 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
3511 /* Add/subtract. */
3512 else if (insn_bits24_27 == 0x0b)
3513 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3514 else
3515 return AARCH64_RECORD_UNKNOWN;
3516
3517 record_buf[0] = reg_rd;
3518 aarch64_insn_r->reg_rec_count = 1;
3519 if (setflags)
3520 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3521 }
3522 else
3523 {
3524 if (insn_bits24_27 == 0x0b)
3525 {
3526 /* Data-processing (3 source). */
3527 record_buf[0] = reg_rd;
3528 aarch64_insn_r->reg_rec_count = 1;
3529 }
3530 else if (insn_bits24_27 == 0x0a)
3531 {
3532 if (insn_bits21_23 == 0x00)
3533 {
3534 /* Add/subtract (with carry). */
3535 record_buf[0] = reg_rd;
3536 aarch64_insn_r->reg_rec_count = 1;
3537 if (bit (aarch64_insn_r->aarch64_insn, 29))
3538 {
3539 record_buf[1] = AARCH64_CPSR_REGNUM;
3540 aarch64_insn_r->reg_rec_count = 2;
3541 }
3542 }
3543 else if (insn_bits21_23 == 0x02)
3544 {
3545 /* Conditional compare (register) and conditional compare
3546 (immediate) instructions. */
3547 record_buf[0] = AARCH64_CPSR_REGNUM;
3548 aarch64_insn_r->reg_rec_count = 1;
3549 }
3550 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
3551 {
3552 /* CConditional select. */
3553 /* Data-processing (2 source). */
3554 /* Data-processing (1 source). */
3555 record_buf[0] = reg_rd;
3556 aarch64_insn_r->reg_rec_count = 1;
3557 }
3558 else
3559 return AARCH64_RECORD_UNKNOWN;
3560 }
3561 }
3562
3563 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3564 record_buf);
3565 return AARCH64_RECORD_SUCCESS;
3566 }
3567
3568 /* Record handler for data processing - immediate instructions. */
3569
3570 static unsigned int
3571 aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
3572 {
3573 uint8_t reg_rd, insn_bit23, insn_bits24_27, setflags;
3574 uint32_t record_buf[4];
3575
3576 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3577 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3578 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3579
3580 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
3581 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
3582 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
3583 {
3584 record_buf[0] = reg_rd;
3585 aarch64_insn_r->reg_rec_count = 1;
3586 }
3587 else if (insn_bits24_27 == 0x01)
3588 {
3589 /* Add/Subtract (immediate). */
3590 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3591 record_buf[0] = reg_rd;
3592 aarch64_insn_r->reg_rec_count = 1;
3593 if (setflags)
3594 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3595 }
3596 else if (insn_bits24_27 == 0x02 && !insn_bit23)
3597 {
3598 /* Logical (immediate). */
3599 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
3600 record_buf[0] = reg_rd;
3601 aarch64_insn_r->reg_rec_count = 1;
3602 if (setflags)
3603 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3604 }
3605 else
3606 return AARCH64_RECORD_UNKNOWN;
3607
3608 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3609 record_buf);
3610 return AARCH64_RECORD_SUCCESS;
3611 }
3612
3613 /* Record handler for branch, exception generation and system instructions. */
3614
3615 static unsigned int
3616 aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
3617 {
3618 struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
3619 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
3620 uint32_t record_buf[4];
3621
3622 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3623 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3624 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3625
3626 if (insn_bits28_31 == 0x0d)
3627 {
3628 /* Exception generation instructions. */
3629 if (insn_bits24_27 == 0x04)
3630 {
3631 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
3632 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
3633 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
3634 {
3635 ULONGEST svc_number;
3636
3637 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
3638 &svc_number);
3639 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
3640 svc_number);
3641 }
3642 else
3643 return AARCH64_RECORD_UNSUPPORTED;
3644 }
3645 /* System instructions. */
3646 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
3647 {
3648 uint32_t reg_rt, reg_crn;
3649
3650 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3651 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3652
3653 /* Record rt in case of sysl and mrs instructions. */
3654 if (bit (aarch64_insn_r->aarch64_insn, 21))
3655 {
3656 record_buf[0] = reg_rt;
3657 aarch64_insn_r->reg_rec_count = 1;
3658 }
3659 /* Record cpsr for hint and msr(immediate) instructions. */
3660 else if (reg_crn == 0x02 || reg_crn == 0x04)
3661 {
3662 record_buf[0] = AARCH64_CPSR_REGNUM;
3663 aarch64_insn_r->reg_rec_count = 1;
3664 }
3665 }
3666 /* Unconditional branch (register). */
3667 else if((insn_bits24_27 & 0x0e) == 0x06)
3668 {
3669 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3670 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3671 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3672 }
3673 else
3674 return AARCH64_RECORD_UNKNOWN;
3675 }
3676 /* Unconditional branch (immediate). */
3677 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3678 {
3679 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3680 if (bit (aarch64_insn_r->aarch64_insn, 31))
3681 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3682 }
3683 else
3684 /* Compare & branch (immediate), Test & branch (immediate) and
3685 Conditional branch (immediate). */
3686 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3687
3688 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3689 record_buf);
3690 return AARCH64_RECORD_SUCCESS;
3691 }
3692
3693 /* Record handler for advanced SIMD load and store instructions. */
3694
3695 static unsigned int
3696 aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3697 {
3698 CORE_ADDR address;
3699 uint64_t addr_offset = 0;
3700 uint32_t record_buf[24];
3701 uint64_t record_buf_mem[24];
3702 uint32_t reg_rn, reg_rt;
3703 uint32_t reg_index = 0, mem_index = 0;
3704 uint8_t opcode_bits, size_bits;
3705
3706 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3707 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3708 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3709 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3710 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3711
3712 if (record_debug)
3713 debug_printf ("Process record: Advanced SIMD load/store\n");
3714
3715 /* Load/store single structure. */
3716 if (bit (aarch64_insn_r->aarch64_insn, 24))
3717 {
3718 uint8_t sindex, scale, selem, esize, replicate = 0;
3719 scale = opcode_bits >> 2;
3720 selem = ((opcode_bits & 0x02) |
3721 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3722 switch (scale)
3723 {
3724 case 1:
3725 if (size_bits & 0x01)
3726 return AARCH64_RECORD_UNKNOWN;
3727 break;
3728 case 2:
3729 if ((size_bits >> 1) & 0x01)
3730 return AARCH64_RECORD_UNKNOWN;
3731 if (size_bits & 0x01)
3732 {
3733 if (!((opcode_bits >> 1) & 0x01))
3734 scale = 3;
3735 else
3736 return AARCH64_RECORD_UNKNOWN;
3737 }
3738 break;
3739 case 3:
3740 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3741 {
3742 scale = size_bits;
3743 replicate = 1;
3744 break;
3745 }
3746 else
3747 return AARCH64_RECORD_UNKNOWN;
3748 default:
3749 break;
3750 }
3751 esize = 8 << scale;
3752 if (replicate)
3753 for (sindex = 0; sindex < selem; sindex++)
3754 {
3755 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3756 reg_rt = (reg_rt + 1) % 32;
3757 }
3758 else
3759 {
3760 for (sindex = 0; sindex < selem; sindex++)
3761 {
3762 if (bit (aarch64_insn_r->aarch64_insn, 22))
3763 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3764 else
3765 {
3766 record_buf_mem[mem_index++] = esize / 8;
3767 record_buf_mem[mem_index++] = address + addr_offset;
3768 }
3769 addr_offset = addr_offset + (esize / 8);
3770 reg_rt = (reg_rt + 1) % 32;
3771 }
3772 }
3773 }
3774 /* Load/store multiple structure. */
3775 else
3776 {
3777 uint8_t selem, esize, rpt, elements;
3778 uint8_t eindex, rindex;
3779
3780 esize = 8 << size_bits;
3781 if (bit (aarch64_insn_r->aarch64_insn, 30))
3782 elements = 128 / esize;
3783 else
3784 elements = 64 / esize;
3785
3786 switch (opcode_bits)
3787 {
3788 /*LD/ST4 (4 Registers). */
3789 case 0:
3790 rpt = 1;
3791 selem = 4;
3792 break;
3793 /*LD/ST1 (4 Registers). */
3794 case 2:
3795 rpt = 4;
3796 selem = 1;
3797 break;
3798 /*LD/ST3 (3 Registers). */
3799 case 4:
3800 rpt = 1;
3801 selem = 3;
3802 break;
3803 /*LD/ST1 (3 Registers). */
3804 case 6:
3805 rpt = 3;
3806 selem = 1;
3807 break;
3808 /*LD/ST1 (1 Register). */
3809 case 7:
3810 rpt = 1;
3811 selem = 1;
3812 break;
3813 /*LD/ST2 (2 Registers). */
3814 case 8:
3815 rpt = 1;
3816 selem = 2;
3817 break;
3818 /*LD/ST1 (2 Registers). */
3819 case 10:
3820 rpt = 2;
3821 selem = 1;
3822 break;
3823 default:
3824 return AARCH64_RECORD_UNSUPPORTED;
3825 break;
3826 }
3827 for (rindex = 0; rindex < rpt; rindex++)
3828 for (eindex = 0; eindex < elements; eindex++)
3829 {
3830 uint8_t reg_tt, sindex;
3831 reg_tt = (reg_rt + rindex) % 32;
3832 for (sindex = 0; sindex < selem; sindex++)
3833 {
3834 if (bit (aarch64_insn_r->aarch64_insn, 22))
3835 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
3836 else
3837 {
3838 record_buf_mem[mem_index++] = esize / 8;
3839 record_buf_mem[mem_index++] = address + addr_offset;
3840 }
3841 addr_offset = addr_offset + (esize / 8);
3842 reg_tt = (reg_tt + 1) % 32;
3843 }
3844 }
3845 }
3846
3847 if (bit (aarch64_insn_r->aarch64_insn, 23))
3848 record_buf[reg_index++] = reg_rn;
3849
3850 aarch64_insn_r->reg_rec_count = reg_index;
3851 aarch64_insn_r->mem_rec_count = mem_index / 2;
3852 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3853 record_buf_mem);
3854 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3855 record_buf);
3856 return AARCH64_RECORD_SUCCESS;
3857 }
3858
3859 /* Record handler for load and store instructions. */
3860
3861 static unsigned int
3862 aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
3863 {
3864 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
3865 uint8_t insn_bit23, insn_bit21;
3866 uint8_t opc, size_bits, ld_flag, vector_flag;
3867 uint32_t reg_rn, reg_rt, reg_rt2;
3868 uint64_t datasize, offset;
3869 uint32_t record_buf[8];
3870 uint64_t record_buf_mem[8];
3871 CORE_ADDR address;
3872
3873 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3874 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3875 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
3876 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3877 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3878 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
3879 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
3880 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3881 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3882 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
3883 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
3884
3885 /* Load/store exclusive. */
3886 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
3887 {
3888 if (record_debug)
3889 debug_printf ("Process record: load/store exclusive\n");
3890
3891 if (ld_flag)
3892 {
3893 record_buf[0] = reg_rt;
3894 aarch64_insn_r->reg_rec_count = 1;
3895 if (insn_bit21)
3896 {
3897 record_buf[1] = reg_rt2;
3898 aarch64_insn_r->reg_rec_count = 2;
3899 }
3900 }
3901 else
3902 {
3903 if (insn_bit21)
3904 datasize = (8 << size_bits) * 2;
3905 else
3906 datasize = (8 << size_bits);
3907 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3908 &address);
3909 record_buf_mem[0] = datasize / 8;
3910 record_buf_mem[1] = address;
3911 aarch64_insn_r->mem_rec_count = 1;
3912 if (!insn_bit23)
3913 {
3914 /* Save register rs. */
3915 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
3916 aarch64_insn_r->reg_rec_count = 1;
3917 }
3918 }
3919 }
3920 /* Load register (literal) instructions decoding. */
3921 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
3922 {
3923 if (record_debug)
3924 debug_printf ("Process record: load register (literal)\n");
3925 if (vector_flag)
3926 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3927 else
3928 record_buf[0] = reg_rt;
3929 aarch64_insn_r->reg_rec_count = 1;
3930 }
3931 /* All types of load/store pair instructions decoding. */
3932 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
3933 {
3934 if (record_debug)
3935 debug_printf ("Process record: load/store pair\n");
3936
3937 if (ld_flag)
3938 {
3939 if (vector_flag)
3940 {
3941 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3942 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
3943 }
3944 else
3945 {
3946 record_buf[0] = reg_rt;
3947 record_buf[1] = reg_rt2;
3948 }
3949 aarch64_insn_r->reg_rec_count = 2;
3950 }
3951 else
3952 {
3953 uint16_t imm7_off;
3954 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
3955 if (!vector_flag)
3956 size_bits = size_bits >> 1;
3957 datasize = 8 << (2 + size_bits);
3958 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
3959 offset = offset << (2 + size_bits);
3960 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3961 &address);
3962 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
3963 {
3964 if (imm7_off & 0x40)
3965 address = address - offset;
3966 else
3967 address = address + offset;
3968 }
3969
3970 record_buf_mem[0] = datasize / 8;
3971 record_buf_mem[1] = address;
3972 record_buf_mem[2] = datasize / 8;
3973 record_buf_mem[3] = address + (datasize / 8);
3974 aarch64_insn_r->mem_rec_count = 2;
3975 }
3976 if (bit (aarch64_insn_r->aarch64_insn, 23))
3977 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3978 }
3979 /* Load/store register (unsigned immediate) instructions. */
3980 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
3981 {
3982 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3983 if (!(opc >> 1))
3984 {
3985 if (opc & 0x01)
3986 ld_flag = 0x01;
3987 else
3988 ld_flag = 0x0;
3989 }
3990 else
3991 {
3992 if (size_bits == 0x3 && vector_flag == 0x0 && opc == 0x2)
3993 {
3994 /* PRFM (immediate) */
3995 return AARCH64_RECORD_SUCCESS;
3996 }
3997 else if (size_bits == 0x2 && vector_flag == 0x0 && opc == 0x2)
3998 {
3999 /* LDRSW (immediate) */
4000 ld_flag = 0x1;
4001 }
4002 else
4003 {
4004 if (opc & 0x01)
4005 ld_flag = 0x01;
4006 else
4007 ld_flag = 0x0;
4008 }
4009 }
4010
4011 if (record_debug)
4012 {
4013 debug_printf ("Process record: load/store (unsigned immediate):"
4014 " size %x V %d opc %x\n", size_bits, vector_flag,
4015 opc);
4016 }
4017
4018 if (!ld_flag)
4019 {
4020 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
4021 datasize = 8 << size_bits;
4022 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4023 &address);
4024 offset = offset << size_bits;
4025 address = address + offset;
4026
4027 record_buf_mem[0] = datasize >> 3;
4028 record_buf_mem[1] = address;
4029 aarch64_insn_r->mem_rec_count = 1;
4030 }
4031 else
4032 {
4033 if (vector_flag)
4034 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4035 else
4036 record_buf[0] = reg_rt;
4037 aarch64_insn_r->reg_rec_count = 1;
4038 }
4039 }
4040 /* Load/store register (register offset) instructions. */
4041 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
4042 && insn_bits10_11 == 0x02 && insn_bit21)
4043 {
4044 if (record_debug)
4045 debug_printf ("Process record: load/store (register offset)\n");
4046 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4047 if (!(opc >> 1))
4048 if (opc & 0x01)
4049 ld_flag = 0x01;
4050 else
4051 ld_flag = 0x0;
4052 else
4053 if (size_bits != 0x03)
4054 ld_flag = 0x01;
4055 else
4056 return AARCH64_RECORD_UNKNOWN;
4057
4058 if (!ld_flag)
4059 {
4060 ULONGEST reg_rm_val;
4061
4062 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
4063 bits (aarch64_insn_r->aarch64_insn, 16, 20), &reg_rm_val);
4064 if (bit (aarch64_insn_r->aarch64_insn, 12))
4065 offset = reg_rm_val << size_bits;
4066 else
4067 offset = reg_rm_val;
4068 datasize = 8 << size_bits;
4069 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4070 &address);
4071 address = address + offset;
4072 record_buf_mem[0] = datasize >> 3;
4073 record_buf_mem[1] = address;
4074 aarch64_insn_r->mem_rec_count = 1;
4075 }
4076 else
4077 {
4078 if (vector_flag)
4079 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4080 else
4081 record_buf[0] = reg_rt;
4082 aarch64_insn_r->reg_rec_count = 1;
4083 }
4084 }
4085 /* Load/store register (immediate and unprivileged) instructions. */
4086 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
4087 && !insn_bit21)
4088 {
4089 if (record_debug)
4090 {
4091 debug_printf ("Process record: load/store "
4092 "(immediate and unprivileged)\n");
4093 }
4094 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4095 if (!(opc >> 1))
4096 if (opc & 0x01)
4097 ld_flag = 0x01;
4098 else
4099 ld_flag = 0x0;
4100 else
4101 if (size_bits != 0x03)
4102 ld_flag = 0x01;
4103 else
4104 return AARCH64_RECORD_UNKNOWN;
4105
4106 if (!ld_flag)
4107 {
4108 uint16_t imm9_off;
4109 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
4110 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
4111 datasize = 8 << size_bits;
4112 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4113 &address);
4114 if (insn_bits10_11 != 0x01)
4115 {
4116 if (imm9_off & 0x0100)
4117 address = address - offset;
4118 else
4119 address = address + offset;
4120 }
4121 record_buf_mem[0] = datasize >> 3;
4122 record_buf_mem[1] = address;
4123 aarch64_insn_r->mem_rec_count = 1;
4124 }
4125 else
4126 {
4127 if (vector_flag)
4128 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4129 else
4130 record_buf[0] = reg_rt;
4131 aarch64_insn_r->reg_rec_count = 1;
4132 }
4133 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
4134 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
4135 }
4136 /* Advanced SIMD load/store instructions. */
4137 else
4138 return aarch64_record_asimd_load_store (aarch64_insn_r);
4139
4140 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
4141 record_buf_mem);
4142 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4143 record_buf);
4144 return AARCH64_RECORD_SUCCESS;
4145 }
4146
4147 /* Record handler for data processing SIMD and floating point instructions. */
4148
4149 static unsigned int
4150 aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
4151 {
4152 uint8_t insn_bit21, opcode, rmode, reg_rd;
4153 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
4154 uint8_t insn_bits11_14;
4155 uint32_t record_buf[2];
4156
4157 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
4158 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
4159 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
4160 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
4161 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
4162 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
4163 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
4164 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
4165 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
4166
4167 if (record_debug)
4168 debug_printf ("Process record: data processing SIMD/FP: ");
4169
4170 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
4171 {
4172 /* Floating point - fixed point conversion instructions. */
4173 if (!insn_bit21)
4174 {
4175 if (record_debug)
4176 debug_printf ("FP - fixed point conversion");
4177
4178 if ((opcode >> 1) == 0x0 && rmode == 0x03)
4179 record_buf[0] = reg_rd;
4180 else
4181 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4182 }
4183 /* Floating point - conditional compare instructions. */
4184 else if (insn_bits10_11 == 0x01)
4185 {
4186 if (record_debug)
4187 debug_printf ("FP - conditional compare");
4188
4189 record_buf[0] = AARCH64_CPSR_REGNUM;
4190 }
4191 /* Floating point - data processing (2-source) and
4192 conditional select instructions. */
4193 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
4194 {
4195 if (record_debug)
4196 debug_printf ("FP - DP (2-source)");
4197
4198 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4199 }
4200 else if (insn_bits10_11 == 0x00)
4201 {
4202 /* Floating point - immediate instructions. */
4203 if ((insn_bits12_15 & 0x01) == 0x01
4204 || (insn_bits12_15 & 0x07) == 0x04)
4205 {
4206 if (record_debug)
4207 debug_printf ("FP - immediate");
4208 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4209 }
4210 /* Floating point - compare instructions. */
4211 else if ((insn_bits12_15 & 0x03) == 0x02)
4212 {
4213 if (record_debug)
4214 debug_printf ("FP - immediate");
4215 record_buf[0] = AARCH64_CPSR_REGNUM;
4216 }
4217 /* Floating point - integer conversions instructions. */
4218 else if (insn_bits12_15 == 0x00)
4219 {
4220 /* Convert float to integer instruction. */
4221 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
4222 {
4223 if (record_debug)
4224 debug_printf ("float to int conversion");
4225
4226 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4227 }
4228 /* Convert integer to float instruction. */
4229 else if ((opcode >> 1) == 0x01 && !rmode)
4230 {
4231 if (record_debug)
4232 debug_printf ("int to float conversion");
4233
4234 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4235 }
4236 /* Move float to integer instruction. */
4237 else if ((opcode >> 1) == 0x03)
4238 {
4239 if (record_debug)
4240 debug_printf ("move float to int");
4241
4242 if (!(opcode & 0x01))
4243 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4244 else
4245 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4246 }
4247 else
4248 return AARCH64_RECORD_UNKNOWN;
4249 }
4250 else
4251 return AARCH64_RECORD_UNKNOWN;
4252 }
4253 else
4254 return AARCH64_RECORD_UNKNOWN;
4255 }
4256 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
4257 {
4258 if (record_debug)
4259 debug_printf ("SIMD copy");
4260
4261 /* Advanced SIMD copy instructions. */
4262 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
4263 && !bit (aarch64_insn_r->aarch64_insn, 15)
4264 && bit (aarch64_insn_r->aarch64_insn, 10))
4265 {
4266 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
4267 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4268 else
4269 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4270 }
4271 else
4272 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4273 }
4274 /* All remaining floating point or advanced SIMD instructions. */
4275 else
4276 {
4277 if (record_debug)
4278 debug_printf ("all remain");
4279
4280 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4281 }
4282
4283 if (record_debug)
4284 debug_printf ("\n");
4285
4286 aarch64_insn_r->reg_rec_count++;
4287 gdb_assert (aarch64_insn_r->reg_rec_count == 1);
4288 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4289 record_buf);
4290 return AARCH64_RECORD_SUCCESS;
4291 }
4292
4293 /* Decodes insns type and invokes its record handler. */
4294
4295 static unsigned int
4296 aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
4297 {
4298 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
4299
4300 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
4301 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
4302 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
4303 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
4304
4305 /* Data processing - immediate instructions. */
4306 if (!ins_bit26 && !ins_bit27 && ins_bit28)
4307 return aarch64_record_data_proc_imm (aarch64_insn_r);
4308
4309 /* Branch, exception generation and system instructions. */
4310 if (ins_bit26 && !ins_bit27 && ins_bit28)
4311 return aarch64_record_branch_except_sys (aarch64_insn_r);
4312
4313 /* Load and store instructions. */
4314 if (!ins_bit25 && ins_bit27)
4315 return aarch64_record_load_store (aarch64_insn_r);
4316
4317 /* Data processing - register instructions. */
4318 if (ins_bit25 && !ins_bit26 && ins_bit27)
4319 return aarch64_record_data_proc_reg (aarch64_insn_r);
4320
4321 /* Data processing - SIMD and floating point instructions. */
4322 if (ins_bit25 && ins_bit26 && ins_bit27)
4323 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
4324
4325 return AARCH64_RECORD_UNSUPPORTED;
4326 }
4327
4328 /* Cleans up local record registers and memory allocations. */
4329
4330 static void
4331 deallocate_reg_mem (insn_decode_record *record)
4332 {
4333 xfree (record->aarch64_regs);
4334 xfree (record->aarch64_mems);
4335 }
4336
4337 #if GDB_SELF_TEST
4338 namespace selftests {
4339
4340 static void
4341 aarch64_process_record_test (void)
4342 {
4343 struct gdbarch_info info;
4344 uint32_t ret;
4345
4346 gdbarch_info_init (&info);
4347 info.bfd_arch_info = bfd_scan_arch ("aarch64");
4348
4349 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
4350 SELF_CHECK (gdbarch != NULL);
4351
4352 insn_decode_record aarch64_record;
4353
4354 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4355 aarch64_record.regcache = NULL;
4356 aarch64_record.this_addr = 0;
4357 aarch64_record.gdbarch = gdbarch;
4358
4359 /* 20 00 80 f9 prfm pldl1keep, [x1] */
4360 aarch64_record.aarch64_insn = 0xf9800020;
4361 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4362 SELF_CHECK (ret == AARCH64_RECORD_SUCCESS);
4363 SELF_CHECK (aarch64_record.reg_rec_count == 0);
4364 SELF_CHECK (aarch64_record.mem_rec_count == 0);
4365
4366 deallocate_reg_mem (&aarch64_record);
4367 }
4368
4369 } // namespace selftests
4370 #endif /* GDB_SELF_TEST */
4371
4372 /* Parse the current instruction and record the values of the registers and
4373 memory that will be changed in current instruction to record_arch_list
4374 return -1 if something is wrong. */
4375
4376 int
4377 aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
4378 CORE_ADDR insn_addr)
4379 {
4380 uint32_t rec_no = 0;
4381 uint8_t insn_size = 4;
4382 uint32_t ret = 0;
4383 gdb_byte buf[insn_size];
4384 insn_decode_record aarch64_record;
4385
4386 memset (&buf[0], 0, insn_size);
4387 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4388 target_read_memory (insn_addr, &buf[0], insn_size);
4389 aarch64_record.aarch64_insn
4390 = (uint32_t) extract_unsigned_integer (&buf[0],
4391 insn_size,
4392 gdbarch_byte_order (gdbarch));
4393 aarch64_record.regcache = regcache;
4394 aarch64_record.this_addr = insn_addr;
4395 aarch64_record.gdbarch = gdbarch;
4396
4397 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4398 if (ret == AARCH64_RECORD_UNSUPPORTED)
4399 {
4400 printf_unfiltered (_("Process record does not support instruction "
4401 "0x%0x at address %s.\n"),
4402 aarch64_record.aarch64_insn,
4403 paddress (gdbarch, insn_addr));
4404 ret = -1;
4405 }
4406
4407 if (0 == ret)
4408 {
4409 /* Record registers. */
4410 record_full_arch_list_add_reg (aarch64_record.regcache,
4411 AARCH64_PC_REGNUM);
4412 /* Always record register CPSR. */
4413 record_full_arch_list_add_reg (aarch64_record.regcache,
4414 AARCH64_CPSR_REGNUM);
4415 if (aarch64_record.aarch64_regs)
4416 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
4417 if (record_full_arch_list_add_reg (aarch64_record.regcache,
4418 aarch64_record.aarch64_regs[rec_no]))
4419 ret = -1;
4420
4421 /* Record memories. */
4422 if (aarch64_record.aarch64_mems)
4423 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
4424 if (record_full_arch_list_add_mem
4425 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
4426 aarch64_record.aarch64_mems[rec_no].len))
4427 ret = -1;
4428
4429 if (record_full_arch_list_add_end ())
4430 ret = -1;
4431 }
4432
4433 deallocate_reg_mem (&aarch64_record);
4434 return ret;
4435 }