]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/aarch64-tdep.c
Enable SVE for GDB
[thirdparty/binutils-gdb.git] / gdb / aarch64-tdep.c
1 /* Common target dependent code for GDB on AArch64 systems.
2
3 Copyright (C) 2009-2018 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22
23 #include "frame.h"
24 #include "inferior.h"
25 #include "gdbcmd.h"
26 #include "gdbcore.h"
27 #include "dis-asm.h"
28 #include "regcache.h"
29 #include "reggroups.h"
30 #include "value.h"
31 #include "arch-utils.h"
32 #include "osabi.h"
33 #include "frame-unwind.h"
34 #include "frame-base.h"
35 #include "trad-frame.h"
36 #include "objfiles.h"
37 #include "dwarf2-frame.h"
38 #include "gdbtypes.h"
39 #include "prologue-value.h"
40 #include "target-descriptions.h"
41 #include "user-regs.h"
42 #include "language.h"
43 #include "infcall.h"
44 #include "ax.h"
45 #include "ax-gdb.h"
46 #include "selftest.h"
47
48 #include "aarch64-tdep.h"
49
50 #include "elf-bfd.h"
51 #include "elf/aarch64.h"
52
53 #include "vec.h"
54
55 #include "record.h"
56 #include "record-full.h"
57 #include "arch/aarch64-insn.h"
58
59 #include "opcode/aarch64.h"
60 #include <algorithm>
61
62 #define submask(x) ((1L << ((x) + 1)) - 1)
63 #define bit(obj,st) (((obj) >> (st)) & 1)
64 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
65
66 /* Pseudo register base numbers. */
67 #define AARCH64_Q0_REGNUM 0
68 #define AARCH64_D0_REGNUM (AARCH64_Q0_REGNUM + AARCH64_D_REGISTER_COUNT)
69 #define AARCH64_S0_REGNUM (AARCH64_D0_REGNUM + 32)
70 #define AARCH64_H0_REGNUM (AARCH64_S0_REGNUM + 32)
71 #define AARCH64_B0_REGNUM (AARCH64_H0_REGNUM + 32)
72
73 /* All possible aarch64 target descriptors. */
74 struct target_desc *tdesc_aarch64_list[AARCH64_MAX_SVE_VQ + 1];
75
76 /* The standard register names, and all the valid aliases for them. */
77 static const struct
78 {
79 const char *const name;
80 int regnum;
81 } aarch64_register_aliases[] =
82 {
83 /* 64-bit register names. */
84 {"fp", AARCH64_FP_REGNUM},
85 {"lr", AARCH64_LR_REGNUM},
86 {"sp", AARCH64_SP_REGNUM},
87
88 /* 32-bit register names. */
89 {"w0", AARCH64_X0_REGNUM + 0},
90 {"w1", AARCH64_X0_REGNUM + 1},
91 {"w2", AARCH64_X0_REGNUM + 2},
92 {"w3", AARCH64_X0_REGNUM + 3},
93 {"w4", AARCH64_X0_REGNUM + 4},
94 {"w5", AARCH64_X0_REGNUM + 5},
95 {"w6", AARCH64_X0_REGNUM + 6},
96 {"w7", AARCH64_X0_REGNUM + 7},
97 {"w8", AARCH64_X0_REGNUM + 8},
98 {"w9", AARCH64_X0_REGNUM + 9},
99 {"w10", AARCH64_X0_REGNUM + 10},
100 {"w11", AARCH64_X0_REGNUM + 11},
101 {"w12", AARCH64_X0_REGNUM + 12},
102 {"w13", AARCH64_X0_REGNUM + 13},
103 {"w14", AARCH64_X0_REGNUM + 14},
104 {"w15", AARCH64_X0_REGNUM + 15},
105 {"w16", AARCH64_X0_REGNUM + 16},
106 {"w17", AARCH64_X0_REGNUM + 17},
107 {"w18", AARCH64_X0_REGNUM + 18},
108 {"w19", AARCH64_X0_REGNUM + 19},
109 {"w20", AARCH64_X0_REGNUM + 20},
110 {"w21", AARCH64_X0_REGNUM + 21},
111 {"w22", AARCH64_X0_REGNUM + 22},
112 {"w23", AARCH64_X0_REGNUM + 23},
113 {"w24", AARCH64_X0_REGNUM + 24},
114 {"w25", AARCH64_X0_REGNUM + 25},
115 {"w26", AARCH64_X0_REGNUM + 26},
116 {"w27", AARCH64_X0_REGNUM + 27},
117 {"w28", AARCH64_X0_REGNUM + 28},
118 {"w29", AARCH64_X0_REGNUM + 29},
119 {"w30", AARCH64_X0_REGNUM + 30},
120
121 /* specials */
122 {"ip0", AARCH64_X0_REGNUM + 16},
123 {"ip1", AARCH64_X0_REGNUM + 17}
124 };
125
126 /* The required core 'R' registers. */
127 static const char *const aarch64_r_register_names[] =
128 {
129 /* These registers must appear in consecutive RAW register number
130 order and they must begin with AARCH64_X0_REGNUM! */
131 "x0", "x1", "x2", "x3",
132 "x4", "x5", "x6", "x7",
133 "x8", "x9", "x10", "x11",
134 "x12", "x13", "x14", "x15",
135 "x16", "x17", "x18", "x19",
136 "x20", "x21", "x22", "x23",
137 "x24", "x25", "x26", "x27",
138 "x28", "x29", "x30", "sp",
139 "pc", "cpsr"
140 };
141
142 /* The FP/SIMD 'V' registers. */
143 static const char *const aarch64_v_register_names[] =
144 {
145 /* These registers must appear in consecutive RAW register number
146 order and they must begin with AARCH64_V0_REGNUM! */
147 "v0", "v1", "v2", "v3",
148 "v4", "v5", "v6", "v7",
149 "v8", "v9", "v10", "v11",
150 "v12", "v13", "v14", "v15",
151 "v16", "v17", "v18", "v19",
152 "v20", "v21", "v22", "v23",
153 "v24", "v25", "v26", "v27",
154 "v28", "v29", "v30", "v31",
155 "fpsr",
156 "fpcr"
157 };
158
159 /* The SVE 'Z' and 'P' registers. */
160 static const char *const aarch64_sve_register_names[] =
161 {
162 /* These registers must appear in consecutive RAW register number
163 order and they must begin with AARCH64_SVE_Z0_REGNUM! */
164 "z0", "z1", "z2", "z3",
165 "z4", "z5", "z6", "z7",
166 "z8", "z9", "z10", "z11",
167 "z12", "z13", "z14", "z15",
168 "z16", "z17", "z18", "z19",
169 "z20", "z21", "z22", "z23",
170 "z24", "z25", "z26", "z27",
171 "z28", "z29", "z30", "z31",
172 "fpsr", "fpcr",
173 "p0", "p1", "p2", "p3",
174 "p4", "p5", "p6", "p7",
175 "p8", "p9", "p10", "p11",
176 "p12", "p13", "p14", "p15",
177 "ffr", "vg"
178 };
179
180 /* AArch64 prologue cache structure. */
181 struct aarch64_prologue_cache
182 {
183 /* The program counter at the start of the function. It is used to
184 identify this frame as a prologue frame. */
185 CORE_ADDR func;
186
187 /* The program counter at the time this frame was created; i.e. where
188 this function was called from. It is used to identify this frame as a
189 stub frame. */
190 CORE_ADDR prev_pc;
191
192 /* The stack pointer at the time this frame was created; i.e. the
193 caller's stack pointer when this function was called. It is used
194 to identify this frame. */
195 CORE_ADDR prev_sp;
196
197 /* Is the target available to read from? */
198 int available_p;
199
200 /* The frame base for this frame is just prev_sp - frame size.
201 FRAMESIZE is the distance from the frame pointer to the
202 initial stack pointer. */
203 int framesize;
204
205 /* The register used to hold the frame pointer for this frame. */
206 int framereg;
207
208 /* Saved register offsets. */
209 struct trad_frame_saved_reg *saved_regs;
210 };
211
212 static void
213 show_aarch64_debug (struct ui_file *file, int from_tty,
214 struct cmd_list_element *c, const char *value)
215 {
216 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
217 }
218
219 namespace {
220
221 /* Abstract instruction reader. */
222
223 class abstract_instruction_reader
224 {
225 public:
226 /* Read in one instruction. */
227 virtual ULONGEST read (CORE_ADDR memaddr, int len,
228 enum bfd_endian byte_order) = 0;
229 };
230
231 /* Instruction reader from real target. */
232
233 class instruction_reader : public abstract_instruction_reader
234 {
235 public:
236 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
237 override
238 {
239 return read_code_unsigned_integer (memaddr, len, byte_order);
240 }
241 };
242
243 } // namespace
244
245 /* Analyze a prologue, looking for a recognizable stack frame
246 and frame pointer. Scan until we encounter a store that could
247 clobber the stack frame unexpectedly, or an unknown instruction. */
248
249 static CORE_ADDR
250 aarch64_analyze_prologue (struct gdbarch *gdbarch,
251 CORE_ADDR start, CORE_ADDR limit,
252 struct aarch64_prologue_cache *cache,
253 abstract_instruction_reader& reader)
254 {
255 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
256 int i;
257 /* Track X registers and D registers in prologue. */
258 pv_t regs[AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT];
259
260 for (i = 0; i < AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT; i++)
261 regs[i] = pv_register (i, 0);
262 pv_area stack (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
263
264 for (; start < limit; start += 4)
265 {
266 uint32_t insn;
267 aarch64_inst inst;
268
269 insn = reader.read (start, 4, byte_order_for_code);
270
271 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
272 break;
273
274 if (inst.opcode->iclass == addsub_imm
275 && (inst.opcode->op == OP_ADD
276 || strcmp ("sub", inst.opcode->name) == 0))
277 {
278 unsigned rd = inst.operands[0].reg.regno;
279 unsigned rn = inst.operands[1].reg.regno;
280
281 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
282 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
283 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
284 gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM);
285
286 if (inst.opcode->op == OP_ADD)
287 {
288 regs[rd] = pv_add_constant (regs[rn],
289 inst.operands[2].imm.value);
290 }
291 else
292 {
293 regs[rd] = pv_add_constant (regs[rn],
294 -inst.operands[2].imm.value);
295 }
296 }
297 else if (inst.opcode->iclass == pcreladdr
298 && inst.operands[1].type == AARCH64_OPND_ADDR_ADRP)
299 {
300 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
301 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
302
303 regs[inst.operands[0].reg.regno] = pv_unknown ();
304 }
305 else if (inst.opcode->iclass == branch_imm)
306 {
307 /* Stop analysis on branch. */
308 break;
309 }
310 else if (inst.opcode->iclass == condbranch)
311 {
312 /* Stop analysis on branch. */
313 break;
314 }
315 else if (inst.opcode->iclass == branch_reg)
316 {
317 /* Stop analysis on branch. */
318 break;
319 }
320 else if (inst.opcode->iclass == compbranch)
321 {
322 /* Stop analysis on branch. */
323 break;
324 }
325 else if (inst.opcode->op == OP_MOVZ)
326 {
327 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
328 regs[inst.operands[0].reg.regno] = pv_unknown ();
329 }
330 else if (inst.opcode->iclass == log_shift
331 && strcmp (inst.opcode->name, "orr") == 0)
332 {
333 unsigned rd = inst.operands[0].reg.regno;
334 unsigned rn = inst.operands[1].reg.regno;
335 unsigned rm = inst.operands[2].reg.regno;
336
337 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
338 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn);
339 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT);
340
341 if (inst.operands[2].shifter.amount == 0
342 && rn == AARCH64_SP_REGNUM)
343 regs[rd] = regs[rm];
344 else
345 {
346 if (aarch64_debug)
347 {
348 debug_printf ("aarch64: prologue analysis gave up "
349 "addr=%s opcode=0x%x (orr x register)\n",
350 core_addr_to_string_nz (start), insn);
351 }
352 break;
353 }
354 }
355 else if (inst.opcode->op == OP_STUR)
356 {
357 unsigned rt = inst.operands[0].reg.regno;
358 unsigned rn = inst.operands[1].addr.base_regno;
359 int is64
360 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
361
362 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
363 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
364 gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9);
365 gdb_assert (!inst.operands[1].addr.offset.is_reg);
366
367 stack.store (pv_add_constant (regs[rn],
368 inst.operands[1].addr.offset.imm),
369 is64 ? 8 : 4, regs[rt]);
370 }
371 else if ((inst.opcode->iclass == ldstpair_off
372 || (inst.opcode->iclass == ldstpair_indexed
373 && inst.operands[2].addr.preind))
374 && strcmp ("stp", inst.opcode->name) == 0)
375 {
376 /* STP with addressing mode Pre-indexed and Base register. */
377 unsigned rt1;
378 unsigned rt2;
379 unsigned rn = inst.operands[2].addr.base_regno;
380 int32_t imm = inst.operands[2].addr.offset.imm;
381
382 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
383 || inst.operands[0].type == AARCH64_OPND_Ft);
384 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2
385 || inst.operands[1].type == AARCH64_OPND_Ft2);
386 gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7);
387 gdb_assert (!inst.operands[2].addr.offset.is_reg);
388
389 /* If recording this store would invalidate the store area
390 (perhaps because rn is not known) then we should abandon
391 further prologue analysis. */
392 if (stack.store_would_trash (pv_add_constant (regs[rn], imm)))
393 break;
394
395 if (stack.store_would_trash (pv_add_constant (regs[rn], imm + 8)))
396 break;
397
398 rt1 = inst.operands[0].reg.regno;
399 rt2 = inst.operands[1].reg.regno;
400 if (inst.operands[0].type == AARCH64_OPND_Ft)
401 {
402 /* Only bottom 64-bit of each V register (D register) need
403 to be preserved. */
404 gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D);
405 rt1 += AARCH64_X_REGISTER_COUNT;
406 rt2 += AARCH64_X_REGISTER_COUNT;
407 }
408
409 stack.store (pv_add_constant (regs[rn], imm), 8,
410 regs[rt1]);
411 stack.store (pv_add_constant (regs[rn], imm + 8), 8,
412 regs[rt2]);
413
414 if (inst.operands[2].addr.writeback)
415 regs[rn] = pv_add_constant (regs[rn], imm);
416
417 }
418 else if ((inst.opcode->iclass == ldst_imm9 /* Signed immediate. */
419 || (inst.opcode->iclass == ldst_pos /* Unsigned immediate. */
420 && (inst.opcode->op == OP_STR_POS
421 || inst.opcode->op == OP_STRF_POS)))
422 && inst.operands[1].addr.base_regno == AARCH64_SP_REGNUM
423 && strcmp ("str", inst.opcode->name) == 0)
424 {
425 /* STR (immediate) */
426 unsigned int rt = inst.operands[0].reg.regno;
427 int32_t imm = inst.operands[1].addr.offset.imm;
428 unsigned int rn = inst.operands[1].addr.base_regno;
429 bool is64
430 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
431 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
432 || inst.operands[0].type == AARCH64_OPND_Ft);
433
434 if (inst.operands[0].type == AARCH64_OPND_Ft)
435 {
436 /* Only bottom 64-bit of each V register (D register) need
437 to be preserved. */
438 gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D);
439 rt += AARCH64_X_REGISTER_COUNT;
440 }
441
442 stack.store (pv_add_constant (regs[rn], imm),
443 is64 ? 8 : 4, regs[rt]);
444 if (inst.operands[1].addr.writeback)
445 regs[rn] = pv_add_constant (regs[rn], imm);
446 }
447 else if (inst.opcode->iclass == testbranch)
448 {
449 /* Stop analysis on branch. */
450 break;
451 }
452 else
453 {
454 if (aarch64_debug)
455 {
456 debug_printf ("aarch64: prologue analysis gave up addr=%s"
457 " opcode=0x%x\n",
458 core_addr_to_string_nz (start), insn);
459 }
460 break;
461 }
462 }
463
464 if (cache == NULL)
465 return start;
466
467 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
468 {
469 /* Frame pointer is fp. Frame size is constant. */
470 cache->framereg = AARCH64_FP_REGNUM;
471 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
472 }
473 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
474 {
475 /* Try the stack pointer. */
476 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
477 cache->framereg = AARCH64_SP_REGNUM;
478 }
479 else
480 {
481 /* We're just out of luck. We don't know where the frame is. */
482 cache->framereg = -1;
483 cache->framesize = 0;
484 }
485
486 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
487 {
488 CORE_ADDR offset;
489
490 if (stack.find_reg (gdbarch, i, &offset))
491 cache->saved_regs[i].addr = offset;
492 }
493
494 for (i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
495 {
496 int regnum = gdbarch_num_regs (gdbarch);
497 CORE_ADDR offset;
498
499 if (stack.find_reg (gdbarch, i + AARCH64_X_REGISTER_COUNT,
500 &offset))
501 cache->saved_regs[i + regnum + AARCH64_D0_REGNUM].addr = offset;
502 }
503
504 return start;
505 }
506
507 static CORE_ADDR
508 aarch64_analyze_prologue (struct gdbarch *gdbarch,
509 CORE_ADDR start, CORE_ADDR limit,
510 struct aarch64_prologue_cache *cache)
511 {
512 instruction_reader reader;
513
514 return aarch64_analyze_prologue (gdbarch, start, limit, cache,
515 reader);
516 }
517
518 #if GDB_SELF_TEST
519
520 namespace selftests {
521
522 /* Instruction reader from manually cooked instruction sequences. */
523
524 class instruction_reader_test : public abstract_instruction_reader
525 {
526 public:
527 template<size_t SIZE>
528 explicit instruction_reader_test (const uint32_t (&insns)[SIZE])
529 : m_insns (insns), m_insns_size (SIZE)
530 {}
531
532 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
533 override
534 {
535 SELF_CHECK (len == 4);
536 SELF_CHECK (memaddr % 4 == 0);
537 SELF_CHECK (memaddr / 4 < m_insns_size);
538
539 return m_insns[memaddr / 4];
540 }
541
542 private:
543 const uint32_t *m_insns;
544 size_t m_insns_size;
545 };
546
547 static void
548 aarch64_analyze_prologue_test (void)
549 {
550 struct gdbarch_info info;
551
552 gdbarch_info_init (&info);
553 info.bfd_arch_info = bfd_scan_arch ("aarch64");
554
555 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
556 SELF_CHECK (gdbarch != NULL);
557
558 /* Test the simple prologue in which frame pointer is used. */
559 {
560 struct aarch64_prologue_cache cache;
561 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
562
563 static const uint32_t insns[] = {
564 0xa9af7bfd, /* stp x29, x30, [sp,#-272]! */
565 0x910003fd, /* mov x29, sp */
566 0x97ffffe6, /* bl 0x400580 */
567 };
568 instruction_reader_test reader (insns);
569
570 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
571 SELF_CHECK (end == 4 * 2);
572
573 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
574 SELF_CHECK (cache.framesize == 272);
575
576 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
577 {
578 if (i == AARCH64_FP_REGNUM)
579 SELF_CHECK (cache.saved_regs[i].addr == -272);
580 else if (i == AARCH64_LR_REGNUM)
581 SELF_CHECK (cache.saved_regs[i].addr == -264);
582 else
583 SELF_CHECK (cache.saved_regs[i].addr == -1);
584 }
585
586 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
587 {
588 int regnum = gdbarch_num_regs (gdbarch);
589
590 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
591 == -1);
592 }
593 }
594
595 /* Test a prologue in which STR is used and frame pointer is not
596 used. */
597 {
598 struct aarch64_prologue_cache cache;
599 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
600
601 static const uint32_t insns[] = {
602 0xf81d0ff3, /* str x19, [sp, #-48]! */
603 0xb9002fe0, /* str w0, [sp, #44] */
604 0xf90013e1, /* str x1, [sp, #32]*/
605 0xfd000fe0, /* str d0, [sp, #24] */
606 0xaa0203f3, /* mov x19, x2 */
607 0xf94013e0, /* ldr x0, [sp, #32] */
608 };
609 instruction_reader_test reader (insns);
610
611 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
612
613 SELF_CHECK (end == 4 * 5);
614
615 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
616 SELF_CHECK (cache.framesize == 48);
617
618 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
619 {
620 if (i == 1)
621 SELF_CHECK (cache.saved_regs[i].addr == -16);
622 else if (i == 19)
623 SELF_CHECK (cache.saved_regs[i].addr == -48);
624 else
625 SELF_CHECK (cache.saved_regs[i].addr == -1);
626 }
627
628 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
629 {
630 int regnum = gdbarch_num_regs (gdbarch);
631
632 if (i == 0)
633 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
634 == -24);
635 else
636 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
637 == -1);
638 }
639 }
640 }
641 } // namespace selftests
642 #endif /* GDB_SELF_TEST */
643
644 /* Implement the "skip_prologue" gdbarch method. */
645
646 static CORE_ADDR
647 aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
648 {
649 CORE_ADDR func_addr, limit_pc;
650
651 /* See if we can determine the end of the prologue via the symbol
652 table. If so, then return either PC, or the PC after the
653 prologue, whichever is greater. */
654 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
655 {
656 CORE_ADDR post_prologue_pc
657 = skip_prologue_using_sal (gdbarch, func_addr);
658
659 if (post_prologue_pc != 0)
660 return std::max (pc, post_prologue_pc);
661 }
662
663 /* Can't determine prologue from the symbol table, need to examine
664 instructions. */
665
666 /* Find an upper limit on the function prologue using the debug
667 information. If the debug information could not be used to
668 provide that bound, then use an arbitrary large number as the
669 upper bound. */
670 limit_pc = skip_prologue_using_sal (gdbarch, pc);
671 if (limit_pc == 0)
672 limit_pc = pc + 128; /* Magic. */
673
674 /* Try disassembling prologue. */
675 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
676 }
677
678 /* Scan the function prologue for THIS_FRAME and populate the prologue
679 cache CACHE. */
680
681 static void
682 aarch64_scan_prologue (struct frame_info *this_frame,
683 struct aarch64_prologue_cache *cache)
684 {
685 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
686 CORE_ADDR prologue_start;
687 CORE_ADDR prologue_end;
688 CORE_ADDR prev_pc = get_frame_pc (this_frame);
689 struct gdbarch *gdbarch = get_frame_arch (this_frame);
690
691 cache->prev_pc = prev_pc;
692
693 /* Assume we do not find a frame. */
694 cache->framereg = -1;
695 cache->framesize = 0;
696
697 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
698 &prologue_end))
699 {
700 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
701
702 if (sal.line == 0)
703 {
704 /* No line info so use the current PC. */
705 prologue_end = prev_pc;
706 }
707 else if (sal.end < prologue_end)
708 {
709 /* The next line begins after the function end. */
710 prologue_end = sal.end;
711 }
712
713 prologue_end = std::min (prologue_end, prev_pc);
714 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
715 }
716 else
717 {
718 CORE_ADDR frame_loc;
719
720 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
721 if (frame_loc == 0)
722 return;
723
724 cache->framereg = AARCH64_FP_REGNUM;
725 cache->framesize = 16;
726 cache->saved_regs[29].addr = 0;
727 cache->saved_regs[30].addr = 8;
728 }
729 }
730
731 /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
732 function may throw an exception if the inferior's registers or memory is
733 not available. */
734
735 static void
736 aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
737 struct aarch64_prologue_cache *cache)
738 {
739 CORE_ADDR unwound_fp;
740 int reg;
741
742 aarch64_scan_prologue (this_frame, cache);
743
744 if (cache->framereg == -1)
745 return;
746
747 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
748 if (unwound_fp == 0)
749 return;
750
751 cache->prev_sp = unwound_fp + cache->framesize;
752
753 /* Calculate actual addresses of saved registers using offsets
754 determined by aarch64_analyze_prologue. */
755 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
756 if (trad_frame_addr_p (cache->saved_regs, reg))
757 cache->saved_regs[reg].addr += cache->prev_sp;
758
759 cache->func = get_frame_func (this_frame);
760
761 cache->available_p = 1;
762 }
763
764 /* Allocate and fill in *THIS_CACHE with information about the prologue of
765 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
766 Return a pointer to the current aarch64_prologue_cache in
767 *THIS_CACHE. */
768
769 static struct aarch64_prologue_cache *
770 aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
771 {
772 struct aarch64_prologue_cache *cache;
773
774 if (*this_cache != NULL)
775 return (struct aarch64_prologue_cache *) *this_cache;
776
777 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
778 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
779 *this_cache = cache;
780
781 TRY
782 {
783 aarch64_make_prologue_cache_1 (this_frame, cache);
784 }
785 CATCH (ex, RETURN_MASK_ERROR)
786 {
787 if (ex.error != NOT_AVAILABLE_ERROR)
788 throw_exception (ex);
789 }
790 END_CATCH
791
792 return cache;
793 }
794
795 /* Implement the "stop_reason" frame_unwind method. */
796
797 static enum unwind_stop_reason
798 aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
799 void **this_cache)
800 {
801 struct aarch64_prologue_cache *cache
802 = aarch64_make_prologue_cache (this_frame, this_cache);
803
804 if (!cache->available_p)
805 return UNWIND_UNAVAILABLE;
806
807 /* Halt the backtrace at "_start". */
808 if (cache->prev_pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
809 return UNWIND_OUTERMOST;
810
811 /* We've hit a wall, stop. */
812 if (cache->prev_sp == 0)
813 return UNWIND_OUTERMOST;
814
815 return UNWIND_NO_REASON;
816 }
817
818 /* Our frame ID for a normal frame is the current function's starting
819 PC and the caller's SP when we were called. */
820
821 static void
822 aarch64_prologue_this_id (struct frame_info *this_frame,
823 void **this_cache, struct frame_id *this_id)
824 {
825 struct aarch64_prologue_cache *cache
826 = aarch64_make_prologue_cache (this_frame, this_cache);
827
828 if (!cache->available_p)
829 *this_id = frame_id_build_unavailable_stack (cache->func);
830 else
831 *this_id = frame_id_build (cache->prev_sp, cache->func);
832 }
833
834 /* Implement the "prev_register" frame_unwind method. */
835
836 static struct value *
837 aarch64_prologue_prev_register (struct frame_info *this_frame,
838 void **this_cache, int prev_regnum)
839 {
840 struct aarch64_prologue_cache *cache
841 = aarch64_make_prologue_cache (this_frame, this_cache);
842
843 /* If we are asked to unwind the PC, then we need to return the LR
844 instead. The prologue may save PC, but it will point into this
845 frame's prologue, not the next frame's resume location. */
846 if (prev_regnum == AARCH64_PC_REGNUM)
847 {
848 CORE_ADDR lr;
849
850 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
851 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
852 }
853
854 /* SP is generally not saved to the stack, but this frame is
855 identified by the next frame's stack pointer at the time of the
856 call. The value was already reconstructed into PREV_SP. */
857 /*
858 +----------+ ^
859 | saved lr | |
860 +->| saved fp |--+
861 | | |
862 | | | <- Previous SP
863 | +----------+
864 | | saved lr |
865 +--| saved fp |<- FP
866 | |
867 | |<- SP
868 +----------+ */
869 if (prev_regnum == AARCH64_SP_REGNUM)
870 return frame_unwind_got_constant (this_frame, prev_regnum,
871 cache->prev_sp);
872
873 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
874 prev_regnum);
875 }
876
877 /* AArch64 prologue unwinder. */
878 struct frame_unwind aarch64_prologue_unwind =
879 {
880 NORMAL_FRAME,
881 aarch64_prologue_frame_unwind_stop_reason,
882 aarch64_prologue_this_id,
883 aarch64_prologue_prev_register,
884 NULL,
885 default_frame_sniffer
886 };
887
888 /* Allocate and fill in *THIS_CACHE with information about the prologue of
889 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
890 Return a pointer to the current aarch64_prologue_cache in
891 *THIS_CACHE. */
892
893 static struct aarch64_prologue_cache *
894 aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
895 {
896 struct aarch64_prologue_cache *cache;
897
898 if (*this_cache != NULL)
899 return (struct aarch64_prologue_cache *) *this_cache;
900
901 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
902 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
903 *this_cache = cache;
904
905 TRY
906 {
907 cache->prev_sp = get_frame_register_unsigned (this_frame,
908 AARCH64_SP_REGNUM);
909 cache->prev_pc = get_frame_pc (this_frame);
910 cache->available_p = 1;
911 }
912 CATCH (ex, RETURN_MASK_ERROR)
913 {
914 if (ex.error != NOT_AVAILABLE_ERROR)
915 throw_exception (ex);
916 }
917 END_CATCH
918
919 return cache;
920 }
921
922 /* Implement the "stop_reason" frame_unwind method. */
923
924 static enum unwind_stop_reason
925 aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
926 void **this_cache)
927 {
928 struct aarch64_prologue_cache *cache
929 = aarch64_make_stub_cache (this_frame, this_cache);
930
931 if (!cache->available_p)
932 return UNWIND_UNAVAILABLE;
933
934 return UNWIND_NO_REASON;
935 }
936
937 /* Our frame ID for a stub frame is the current SP and LR. */
938
939 static void
940 aarch64_stub_this_id (struct frame_info *this_frame,
941 void **this_cache, struct frame_id *this_id)
942 {
943 struct aarch64_prologue_cache *cache
944 = aarch64_make_stub_cache (this_frame, this_cache);
945
946 if (cache->available_p)
947 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
948 else
949 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
950 }
951
952 /* Implement the "sniffer" frame_unwind method. */
953
954 static int
955 aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
956 struct frame_info *this_frame,
957 void **this_prologue_cache)
958 {
959 CORE_ADDR addr_in_block;
960 gdb_byte dummy[4];
961
962 addr_in_block = get_frame_address_in_block (this_frame);
963 if (in_plt_section (addr_in_block)
964 /* We also use the stub winder if the target memory is unreadable
965 to avoid having the prologue unwinder trying to read it. */
966 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
967 return 1;
968
969 return 0;
970 }
971
972 /* AArch64 stub unwinder. */
973 struct frame_unwind aarch64_stub_unwind =
974 {
975 NORMAL_FRAME,
976 aarch64_stub_frame_unwind_stop_reason,
977 aarch64_stub_this_id,
978 aarch64_prologue_prev_register,
979 NULL,
980 aarch64_stub_unwind_sniffer
981 };
982
983 /* Return the frame base address of *THIS_FRAME. */
984
985 static CORE_ADDR
986 aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
987 {
988 struct aarch64_prologue_cache *cache
989 = aarch64_make_prologue_cache (this_frame, this_cache);
990
991 return cache->prev_sp - cache->framesize;
992 }
993
994 /* AArch64 default frame base information. */
995 struct frame_base aarch64_normal_base =
996 {
997 &aarch64_prologue_unwind,
998 aarch64_normal_frame_base,
999 aarch64_normal_frame_base,
1000 aarch64_normal_frame_base
1001 };
1002
1003 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
1004 dummy frame. The frame ID's base needs to match the TOS value
1005 saved by save_dummy_frame_tos () and returned from
1006 aarch64_push_dummy_call, and the PC needs to match the dummy
1007 frame's breakpoint. */
1008
1009 static struct frame_id
1010 aarch64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
1011 {
1012 return frame_id_build (get_frame_register_unsigned (this_frame,
1013 AARCH64_SP_REGNUM),
1014 get_frame_pc (this_frame));
1015 }
1016
1017 /* Implement the "unwind_pc" gdbarch method. */
1018
1019 static CORE_ADDR
1020 aarch64_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
1021 {
1022 CORE_ADDR pc
1023 = frame_unwind_register_unsigned (this_frame, AARCH64_PC_REGNUM);
1024
1025 return pc;
1026 }
1027
1028 /* Implement the "unwind_sp" gdbarch method. */
1029
1030 static CORE_ADDR
1031 aarch64_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
1032 {
1033 return frame_unwind_register_unsigned (this_frame, AARCH64_SP_REGNUM);
1034 }
1035
1036 /* Return the value of the REGNUM register in the previous frame of
1037 *THIS_FRAME. */
1038
1039 static struct value *
1040 aarch64_dwarf2_prev_register (struct frame_info *this_frame,
1041 void **this_cache, int regnum)
1042 {
1043 CORE_ADDR lr;
1044
1045 switch (regnum)
1046 {
1047 case AARCH64_PC_REGNUM:
1048 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1049 return frame_unwind_got_constant (this_frame, regnum, lr);
1050
1051 default:
1052 internal_error (__FILE__, __LINE__,
1053 _("Unexpected register %d"), regnum);
1054 }
1055 }
1056
1057 /* Implement the "init_reg" dwarf2_frame_ops method. */
1058
1059 static void
1060 aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1061 struct dwarf2_frame_state_reg *reg,
1062 struct frame_info *this_frame)
1063 {
1064 switch (regnum)
1065 {
1066 case AARCH64_PC_REGNUM:
1067 reg->how = DWARF2_FRAME_REG_FN;
1068 reg->loc.fn = aarch64_dwarf2_prev_register;
1069 break;
1070 case AARCH64_SP_REGNUM:
1071 reg->how = DWARF2_FRAME_REG_CFA;
1072 break;
1073 }
1074 }
1075
1076 /* When arguments must be pushed onto the stack, they go on in reverse
1077 order. The code below implements a FILO (stack) to do this. */
1078
1079 typedef struct
1080 {
1081 /* Value to pass on stack. It can be NULL if this item is for stack
1082 padding. */
1083 const gdb_byte *data;
1084
1085 /* Size in bytes of value to pass on stack. */
1086 int len;
1087 } stack_item_t;
1088
1089 DEF_VEC_O (stack_item_t);
1090
1091 /* Return the alignment (in bytes) of the given type. */
1092
1093 static int
1094 aarch64_type_align (struct type *t)
1095 {
1096 int n;
1097 int align;
1098 int falign;
1099
1100 t = check_typedef (t);
1101 switch (TYPE_CODE (t))
1102 {
1103 default:
1104 /* Should never happen. */
1105 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
1106 return 4;
1107
1108 case TYPE_CODE_PTR:
1109 case TYPE_CODE_ENUM:
1110 case TYPE_CODE_INT:
1111 case TYPE_CODE_FLT:
1112 case TYPE_CODE_SET:
1113 case TYPE_CODE_RANGE:
1114 case TYPE_CODE_BITSTRING:
1115 case TYPE_CODE_REF:
1116 case TYPE_CODE_RVALUE_REF:
1117 case TYPE_CODE_CHAR:
1118 case TYPE_CODE_BOOL:
1119 return TYPE_LENGTH (t);
1120
1121 case TYPE_CODE_ARRAY:
1122 if (TYPE_VECTOR (t))
1123 {
1124 /* Use the natural alignment for vector types (the same for
1125 scalar type), but the maximum alignment is 128-bit. */
1126 if (TYPE_LENGTH (t) > 16)
1127 return 16;
1128 else
1129 return TYPE_LENGTH (t);
1130 }
1131 else
1132 return aarch64_type_align (TYPE_TARGET_TYPE (t));
1133 case TYPE_CODE_COMPLEX:
1134 return aarch64_type_align (TYPE_TARGET_TYPE (t));
1135
1136 case TYPE_CODE_STRUCT:
1137 case TYPE_CODE_UNION:
1138 align = 1;
1139 for (n = 0; n < TYPE_NFIELDS (t); n++)
1140 {
1141 falign = aarch64_type_align (TYPE_FIELD_TYPE (t, n));
1142 if (falign > align)
1143 align = falign;
1144 }
1145 return align;
1146 }
1147 }
1148
1149 /* Return 1 if *TY is a homogeneous floating-point aggregate or
1150 homogeneous short-vector aggregate as defined in the AAPCS64 ABI
1151 document; otherwise return 0. */
1152
1153 static int
1154 is_hfa_or_hva (struct type *ty)
1155 {
1156 switch (TYPE_CODE (ty))
1157 {
1158 case TYPE_CODE_ARRAY:
1159 {
1160 struct type *target_ty = TYPE_TARGET_TYPE (ty);
1161
1162 if (TYPE_VECTOR (ty))
1163 return 0;
1164
1165 if (TYPE_LENGTH (ty) <= 4 /* HFA or HVA has at most 4 members. */
1166 && (TYPE_CODE (target_ty) == TYPE_CODE_FLT /* HFA */
1167 || (TYPE_CODE (target_ty) == TYPE_CODE_ARRAY /* HVA */
1168 && TYPE_VECTOR (target_ty))))
1169 return 1;
1170 break;
1171 }
1172
1173 case TYPE_CODE_UNION:
1174 case TYPE_CODE_STRUCT:
1175 {
1176 /* HFA or HVA has at most four members. */
1177 if (TYPE_NFIELDS (ty) > 0 && TYPE_NFIELDS (ty) <= 4)
1178 {
1179 struct type *member0_type;
1180
1181 member0_type = check_typedef (TYPE_FIELD_TYPE (ty, 0));
1182 if (TYPE_CODE (member0_type) == TYPE_CODE_FLT
1183 || (TYPE_CODE (member0_type) == TYPE_CODE_ARRAY
1184 && TYPE_VECTOR (member0_type)))
1185 {
1186 int i;
1187
1188 for (i = 0; i < TYPE_NFIELDS (ty); i++)
1189 {
1190 struct type *member1_type;
1191
1192 member1_type = check_typedef (TYPE_FIELD_TYPE (ty, i));
1193 if (TYPE_CODE (member0_type) != TYPE_CODE (member1_type)
1194 || (TYPE_LENGTH (member0_type)
1195 != TYPE_LENGTH (member1_type)))
1196 return 0;
1197 }
1198 return 1;
1199 }
1200 }
1201 return 0;
1202 }
1203
1204 default:
1205 break;
1206 }
1207
1208 return 0;
1209 }
1210
1211 /* AArch64 function call information structure. */
1212 struct aarch64_call_info
1213 {
1214 /* the current argument number. */
1215 unsigned argnum;
1216
1217 /* The next general purpose register number, equivalent to NGRN as
1218 described in the AArch64 Procedure Call Standard. */
1219 unsigned ngrn;
1220
1221 /* The next SIMD and floating point register number, equivalent to
1222 NSRN as described in the AArch64 Procedure Call Standard. */
1223 unsigned nsrn;
1224
1225 /* The next stacked argument address, equivalent to NSAA as
1226 described in the AArch64 Procedure Call Standard. */
1227 unsigned nsaa;
1228
1229 /* Stack item vector. */
1230 VEC(stack_item_t) *si;
1231 };
1232
1233 /* Pass a value in a sequence of consecutive X registers. The caller
1234 is responsbile for ensuring sufficient registers are available. */
1235
1236 static void
1237 pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1238 struct aarch64_call_info *info, struct type *type,
1239 struct value *arg)
1240 {
1241 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1242 int len = TYPE_LENGTH (type);
1243 enum type_code typecode = TYPE_CODE (type);
1244 int regnum = AARCH64_X0_REGNUM + info->ngrn;
1245 const bfd_byte *buf = value_contents (arg);
1246
1247 info->argnum++;
1248
1249 while (len > 0)
1250 {
1251 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1252 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1253 byte_order);
1254
1255
1256 /* Adjust sub-word struct/union args when big-endian. */
1257 if (byte_order == BFD_ENDIAN_BIG
1258 && partial_len < X_REGISTER_SIZE
1259 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1260 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1261
1262 if (aarch64_debug)
1263 {
1264 debug_printf ("arg %d in %s = 0x%s\n", info->argnum,
1265 gdbarch_register_name (gdbarch, regnum),
1266 phex (regval, X_REGISTER_SIZE));
1267 }
1268 regcache_cooked_write_unsigned (regcache, regnum, regval);
1269 len -= partial_len;
1270 buf += partial_len;
1271 regnum++;
1272 }
1273 }
1274
1275 /* Attempt to marshall a value in a V register. Return 1 if
1276 successful, or 0 if insufficient registers are available. This
1277 function, unlike the equivalent pass_in_x() function does not
1278 handle arguments spread across multiple registers. */
1279
1280 static int
1281 pass_in_v (struct gdbarch *gdbarch,
1282 struct regcache *regcache,
1283 struct aarch64_call_info *info,
1284 int len, const bfd_byte *buf)
1285 {
1286 if (info->nsrn < 8)
1287 {
1288 int regnum = AARCH64_V0_REGNUM + info->nsrn;
1289 gdb_byte reg[V_REGISTER_SIZE];
1290
1291 info->argnum++;
1292 info->nsrn++;
1293
1294 memset (reg, 0, sizeof (reg));
1295 /* PCS C.1, the argument is allocated to the least significant
1296 bits of V register. */
1297 memcpy (reg, buf, len);
1298 regcache->cooked_write (regnum, reg);
1299
1300 if (aarch64_debug)
1301 {
1302 debug_printf ("arg %d in %s\n", info->argnum,
1303 gdbarch_register_name (gdbarch, regnum));
1304 }
1305 return 1;
1306 }
1307 info->nsrn = 8;
1308 return 0;
1309 }
1310
1311 /* Marshall an argument onto the stack. */
1312
1313 static void
1314 pass_on_stack (struct aarch64_call_info *info, struct type *type,
1315 struct value *arg)
1316 {
1317 const bfd_byte *buf = value_contents (arg);
1318 int len = TYPE_LENGTH (type);
1319 int align;
1320 stack_item_t item;
1321
1322 info->argnum++;
1323
1324 align = aarch64_type_align (type);
1325
1326 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1327 Natural alignment of the argument's type. */
1328 align = align_up (align, 8);
1329
1330 /* The AArch64 PCS requires at most doubleword alignment. */
1331 if (align > 16)
1332 align = 16;
1333
1334 if (aarch64_debug)
1335 {
1336 debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1337 info->nsaa);
1338 }
1339
1340 item.len = len;
1341 item.data = buf;
1342 VEC_safe_push (stack_item_t, info->si, &item);
1343
1344 info->nsaa += len;
1345 if (info->nsaa & (align - 1))
1346 {
1347 /* Push stack alignment padding. */
1348 int pad = align - (info->nsaa & (align - 1));
1349
1350 item.len = pad;
1351 item.data = NULL;
1352
1353 VEC_safe_push (stack_item_t, info->si, &item);
1354 info->nsaa += pad;
1355 }
1356 }
1357
1358 /* Marshall an argument into a sequence of one or more consecutive X
1359 registers or, if insufficient X registers are available then onto
1360 the stack. */
1361
1362 static void
1363 pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1364 struct aarch64_call_info *info, struct type *type,
1365 struct value *arg)
1366 {
1367 int len = TYPE_LENGTH (type);
1368 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1369
1370 /* PCS C.13 - Pass in registers if we have enough spare */
1371 if (info->ngrn + nregs <= 8)
1372 {
1373 pass_in_x (gdbarch, regcache, info, type, arg);
1374 info->ngrn += nregs;
1375 }
1376 else
1377 {
1378 info->ngrn = 8;
1379 pass_on_stack (info, type, arg);
1380 }
1381 }
1382
1383 /* Pass a value in a V register, or on the stack if insufficient are
1384 available. */
1385
1386 static void
1387 pass_in_v_or_stack (struct gdbarch *gdbarch,
1388 struct regcache *regcache,
1389 struct aarch64_call_info *info,
1390 struct type *type,
1391 struct value *arg)
1392 {
1393 if (!pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (type),
1394 value_contents (arg)))
1395 pass_on_stack (info, type, arg);
1396 }
1397
1398 /* Implement the "push_dummy_call" gdbarch method. */
1399
1400 static CORE_ADDR
1401 aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1402 struct regcache *regcache, CORE_ADDR bp_addr,
1403 int nargs,
1404 struct value **args, CORE_ADDR sp, int struct_return,
1405 CORE_ADDR struct_addr)
1406 {
1407 int argnum;
1408 struct aarch64_call_info info;
1409 struct type *func_type;
1410 struct type *return_type;
1411 int lang_struct_return;
1412
1413 memset (&info, 0, sizeof (info));
1414
1415 /* We need to know what the type of the called function is in order
1416 to determine the number of named/anonymous arguments for the
1417 actual argument placement, and the return type in order to handle
1418 return value correctly.
1419
1420 The generic code above us views the decision of return in memory
1421 or return in registers as a two stage processes. The language
1422 handler is consulted first and may decide to return in memory (eg
1423 class with copy constructor returned by value), this will cause
1424 the generic code to allocate space AND insert an initial leading
1425 argument.
1426
1427 If the language code does not decide to pass in memory then the
1428 target code is consulted.
1429
1430 If the language code decides to pass in memory we want to move
1431 the pointer inserted as the initial argument from the argument
1432 list and into X8, the conventional AArch64 struct return pointer
1433 register.
1434
1435 This is slightly awkward, ideally the flag "lang_struct_return"
1436 would be passed to the targets implementation of push_dummy_call.
1437 Rather that change the target interface we call the language code
1438 directly ourselves. */
1439
1440 func_type = check_typedef (value_type (function));
1441
1442 /* Dereference function pointer types. */
1443 if (TYPE_CODE (func_type) == TYPE_CODE_PTR)
1444 func_type = TYPE_TARGET_TYPE (func_type);
1445
1446 gdb_assert (TYPE_CODE (func_type) == TYPE_CODE_FUNC
1447 || TYPE_CODE (func_type) == TYPE_CODE_METHOD);
1448
1449 /* If language_pass_by_reference () returned true we will have been
1450 given an additional initial argument, a hidden pointer to the
1451 return slot in memory. */
1452 return_type = TYPE_TARGET_TYPE (func_type);
1453 lang_struct_return = language_pass_by_reference (return_type);
1454
1455 /* Set the return address. For the AArch64, the return breakpoint
1456 is always at BP_ADDR. */
1457 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1458
1459 /* If we were given an initial argument for the return slot because
1460 lang_struct_return was true, lose it. */
1461 if (lang_struct_return)
1462 {
1463 args++;
1464 nargs--;
1465 }
1466
1467 /* The struct_return pointer occupies X8. */
1468 if (struct_return || lang_struct_return)
1469 {
1470 if (aarch64_debug)
1471 {
1472 debug_printf ("struct return in %s = 0x%s\n",
1473 gdbarch_register_name (gdbarch,
1474 AARCH64_STRUCT_RETURN_REGNUM),
1475 paddress (gdbarch, struct_addr));
1476 }
1477 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1478 struct_addr);
1479 }
1480
1481 for (argnum = 0; argnum < nargs; argnum++)
1482 {
1483 struct value *arg = args[argnum];
1484 struct type *arg_type;
1485 int len;
1486
1487 arg_type = check_typedef (value_type (arg));
1488 len = TYPE_LENGTH (arg_type);
1489
1490 switch (TYPE_CODE (arg_type))
1491 {
1492 case TYPE_CODE_INT:
1493 case TYPE_CODE_BOOL:
1494 case TYPE_CODE_CHAR:
1495 case TYPE_CODE_RANGE:
1496 case TYPE_CODE_ENUM:
1497 if (len < 4)
1498 {
1499 /* Promote to 32 bit integer. */
1500 if (TYPE_UNSIGNED (arg_type))
1501 arg_type = builtin_type (gdbarch)->builtin_uint32;
1502 else
1503 arg_type = builtin_type (gdbarch)->builtin_int32;
1504 arg = value_cast (arg_type, arg);
1505 }
1506 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1507 break;
1508
1509 case TYPE_CODE_COMPLEX:
1510 if (info.nsrn <= 6)
1511 {
1512 const bfd_byte *buf = value_contents (arg);
1513 struct type *target_type =
1514 check_typedef (TYPE_TARGET_TYPE (arg_type));
1515
1516 pass_in_v (gdbarch, regcache, &info,
1517 TYPE_LENGTH (target_type), buf);
1518 pass_in_v (gdbarch, regcache, &info,
1519 TYPE_LENGTH (target_type),
1520 buf + TYPE_LENGTH (target_type));
1521 }
1522 else
1523 {
1524 info.nsrn = 8;
1525 pass_on_stack (&info, arg_type, arg);
1526 }
1527 break;
1528 case TYPE_CODE_FLT:
1529 pass_in_v_or_stack (gdbarch, regcache, &info, arg_type, arg);
1530 break;
1531
1532 case TYPE_CODE_STRUCT:
1533 case TYPE_CODE_ARRAY:
1534 case TYPE_CODE_UNION:
1535 if (is_hfa_or_hva (arg_type))
1536 {
1537 int elements = TYPE_NFIELDS (arg_type);
1538
1539 /* Homogeneous Aggregates */
1540 if (info.nsrn + elements < 8)
1541 {
1542 int i;
1543
1544 for (i = 0; i < elements; i++)
1545 {
1546 /* We know that we have sufficient registers
1547 available therefore this will never fallback
1548 to the stack. */
1549 struct value *field =
1550 value_primitive_field (arg, 0, i, arg_type);
1551 struct type *field_type =
1552 check_typedef (value_type (field));
1553
1554 pass_in_v_or_stack (gdbarch, regcache, &info,
1555 field_type, field);
1556 }
1557 }
1558 else
1559 {
1560 info.nsrn = 8;
1561 pass_on_stack (&info, arg_type, arg);
1562 }
1563 }
1564 else if (TYPE_CODE (arg_type) == TYPE_CODE_ARRAY
1565 && TYPE_VECTOR (arg_type) && (len == 16 || len == 8))
1566 {
1567 /* Short vector types are passed in V registers. */
1568 pass_in_v_or_stack (gdbarch, regcache, &info, arg_type, arg);
1569 }
1570 else if (len > 16)
1571 {
1572 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1573 invisible reference. */
1574
1575 /* Allocate aligned storage. */
1576 sp = align_down (sp - len, 16);
1577
1578 /* Write the real data into the stack. */
1579 write_memory (sp, value_contents (arg), len);
1580
1581 /* Construct the indirection. */
1582 arg_type = lookup_pointer_type (arg_type);
1583 arg = value_from_pointer (arg_type, sp);
1584 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1585 }
1586 else
1587 /* PCS C.15 / C.18 multiple values pass. */
1588 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1589 break;
1590
1591 default:
1592 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1593 break;
1594 }
1595 }
1596
1597 /* Make sure stack retains 16 byte alignment. */
1598 if (info.nsaa & 15)
1599 sp -= 16 - (info.nsaa & 15);
1600
1601 while (!VEC_empty (stack_item_t, info.si))
1602 {
1603 stack_item_t *si = VEC_last (stack_item_t, info.si);
1604
1605 sp -= si->len;
1606 if (si->data != NULL)
1607 write_memory (sp, si->data, si->len);
1608 VEC_pop (stack_item_t, info.si);
1609 }
1610
1611 VEC_free (stack_item_t, info.si);
1612
1613 /* Finally, update the SP register. */
1614 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1615
1616 return sp;
1617 }
1618
1619 /* Implement the "frame_align" gdbarch method. */
1620
1621 static CORE_ADDR
1622 aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1623 {
1624 /* Align the stack to sixteen bytes. */
1625 return sp & ~(CORE_ADDR) 15;
1626 }
1627
1628 /* Return the type for an AdvSISD Q register. */
1629
1630 static struct type *
1631 aarch64_vnq_type (struct gdbarch *gdbarch)
1632 {
1633 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1634
1635 if (tdep->vnq_type == NULL)
1636 {
1637 struct type *t;
1638 struct type *elem;
1639
1640 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1641 TYPE_CODE_UNION);
1642
1643 elem = builtin_type (gdbarch)->builtin_uint128;
1644 append_composite_type_field (t, "u", elem);
1645
1646 elem = builtin_type (gdbarch)->builtin_int128;
1647 append_composite_type_field (t, "s", elem);
1648
1649 tdep->vnq_type = t;
1650 }
1651
1652 return tdep->vnq_type;
1653 }
1654
1655 /* Return the type for an AdvSISD D register. */
1656
1657 static struct type *
1658 aarch64_vnd_type (struct gdbarch *gdbarch)
1659 {
1660 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1661
1662 if (tdep->vnd_type == NULL)
1663 {
1664 struct type *t;
1665 struct type *elem;
1666
1667 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1668 TYPE_CODE_UNION);
1669
1670 elem = builtin_type (gdbarch)->builtin_double;
1671 append_composite_type_field (t, "f", elem);
1672
1673 elem = builtin_type (gdbarch)->builtin_uint64;
1674 append_composite_type_field (t, "u", elem);
1675
1676 elem = builtin_type (gdbarch)->builtin_int64;
1677 append_composite_type_field (t, "s", elem);
1678
1679 tdep->vnd_type = t;
1680 }
1681
1682 return tdep->vnd_type;
1683 }
1684
1685 /* Return the type for an AdvSISD S register. */
1686
1687 static struct type *
1688 aarch64_vns_type (struct gdbarch *gdbarch)
1689 {
1690 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1691
1692 if (tdep->vns_type == NULL)
1693 {
1694 struct type *t;
1695 struct type *elem;
1696
1697 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1698 TYPE_CODE_UNION);
1699
1700 elem = builtin_type (gdbarch)->builtin_float;
1701 append_composite_type_field (t, "f", elem);
1702
1703 elem = builtin_type (gdbarch)->builtin_uint32;
1704 append_composite_type_field (t, "u", elem);
1705
1706 elem = builtin_type (gdbarch)->builtin_int32;
1707 append_composite_type_field (t, "s", elem);
1708
1709 tdep->vns_type = t;
1710 }
1711
1712 return tdep->vns_type;
1713 }
1714
1715 /* Return the type for an AdvSISD H register. */
1716
1717 static struct type *
1718 aarch64_vnh_type (struct gdbarch *gdbarch)
1719 {
1720 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1721
1722 if (tdep->vnh_type == NULL)
1723 {
1724 struct type *t;
1725 struct type *elem;
1726
1727 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1728 TYPE_CODE_UNION);
1729
1730 elem = builtin_type (gdbarch)->builtin_uint16;
1731 append_composite_type_field (t, "u", elem);
1732
1733 elem = builtin_type (gdbarch)->builtin_int16;
1734 append_composite_type_field (t, "s", elem);
1735
1736 tdep->vnh_type = t;
1737 }
1738
1739 return tdep->vnh_type;
1740 }
1741
1742 /* Return the type for an AdvSISD B register. */
1743
1744 static struct type *
1745 aarch64_vnb_type (struct gdbarch *gdbarch)
1746 {
1747 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1748
1749 if (tdep->vnb_type == NULL)
1750 {
1751 struct type *t;
1752 struct type *elem;
1753
1754 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1755 TYPE_CODE_UNION);
1756
1757 elem = builtin_type (gdbarch)->builtin_uint8;
1758 append_composite_type_field (t, "u", elem);
1759
1760 elem = builtin_type (gdbarch)->builtin_int8;
1761 append_composite_type_field (t, "s", elem);
1762
1763 tdep->vnb_type = t;
1764 }
1765
1766 return tdep->vnb_type;
1767 }
1768
1769 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
1770
1771 static int
1772 aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
1773 {
1774 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
1775 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
1776
1777 if (reg == AARCH64_DWARF_SP)
1778 return AARCH64_SP_REGNUM;
1779
1780 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
1781 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
1782
1783 return -1;
1784 }
1785 \f
1786
1787 /* Implement the "print_insn" gdbarch method. */
1788
1789 static int
1790 aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
1791 {
1792 info->symbols = NULL;
1793 return default_print_insn (memaddr, info);
1794 }
1795
1796 /* AArch64 BRK software debug mode instruction.
1797 Note that AArch64 code is always little-endian.
1798 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
1799 constexpr gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
1800
1801 typedef BP_MANIPULATION (aarch64_default_breakpoint) aarch64_breakpoint;
1802
1803 /* Extract from an array REGS containing the (raw) register state a
1804 function return value of type TYPE, and copy that, in virtual
1805 format, into VALBUF. */
1806
1807 static void
1808 aarch64_extract_return_value (struct type *type, struct regcache *regs,
1809 gdb_byte *valbuf)
1810 {
1811 struct gdbarch *gdbarch = regs->arch ();
1812 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1813
1814 if (TYPE_CODE (type) == TYPE_CODE_FLT)
1815 {
1816 bfd_byte buf[V_REGISTER_SIZE];
1817 int len = TYPE_LENGTH (type);
1818
1819 regs->cooked_read (AARCH64_V0_REGNUM, buf);
1820 memcpy (valbuf, buf, len);
1821 }
1822 else if (TYPE_CODE (type) == TYPE_CODE_INT
1823 || TYPE_CODE (type) == TYPE_CODE_CHAR
1824 || TYPE_CODE (type) == TYPE_CODE_BOOL
1825 || TYPE_CODE (type) == TYPE_CODE_PTR
1826 || TYPE_IS_REFERENCE (type)
1827 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1828 {
1829 /* If the the type is a plain integer, then the access is
1830 straight-forward. Otherwise we have to play around a bit
1831 more. */
1832 int len = TYPE_LENGTH (type);
1833 int regno = AARCH64_X0_REGNUM;
1834 ULONGEST tmp;
1835
1836 while (len > 0)
1837 {
1838 /* By using store_unsigned_integer we avoid having to do
1839 anything special for small big-endian values. */
1840 regcache_cooked_read_unsigned (regs, regno++, &tmp);
1841 store_unsigned_integer (valbuf,
1842 (len > X_REGISTER_SIZE
1843 ? X_REGISTER_SIZE : len), byte_order, tmp);
1844 len -= X_REGISTER_SIZE;
1845 valbuf += X_REGISTER_SIZE;
1846 }
1847 }
1848 else if (TYPE_CODE (type) == TYPE_CODE_COMPLEX)
1849 {
1850 int regno = AARCH64_V0_REGNUM;
1851 bfd_byte buf[V_REGISTER_SIZE];
1852 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
1853 int len = TYPE_LENGTH (target_type);
1854
1855 regs->cooked_read (regno, buf);
1856 memcpy (valbuf, buf, len);
1857 valbuf += len;
1858 regs->cooked_read (regno + 1, buf);
1859 memcpy (valbuf, buf, len);
1860 valbuf += len;
1861 }
1862 else if (is_hfa_or_hva (type))
1863 {
1864 int elements = TYPE_NFIELDS (type);
1865 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
1866 int len = TYPE_LENGTH (member_type);
1867 int i;
1868
1869 for (i = 0; i < elements; i++)
1870 {
1871 int regno = AARCH64_V0_REGNUM + i;
1872 bfd_byte buf[V_REGISTER_SIZE];
1873
1874 if (aarch64_debug)
1875 {
1876 debug_printf ("read HFA or HVA return value element %d from %s\n",
1877 i + 1,
1878 gdbarch_register_name (gdbarch, regno));
1879 }
1880 regs->cooked_read (regno, buf);
1881
1882 memcpy (valbuf, buf, len);
1883 valbuf += len;
1884 }
1885 }
1886 else if (TYPE_CODE (type) == TYPE_CODE_ARRAY && TYPE_VECTOR (type)
1887 && (TYPE_LENGTH (type) == 16 || TYPE_LENGTH (type) == 8))
1888 {
1889 /* Short vector is returned in V register. */
1890 gdb_byte buf[V_REGISTER_SIZE];
1891
1892 regs->cooked_read (AARCH64_V0_REGNUM, buf);
1893 memcpy (valbuf, buf, TYPE_LENGTH (type));
1894 }
1895 else
1896 {
1897 /* For a structure or union the behaviour is as if the value had
1898 been stored to word-aligned memory and then loaded into
1899 registers with 64-bit load instruction(s). */
1900 int len = TYPE_LENGTH (type);
1901 int regno = AARCH64_X0_REGNUM;
1902 bfd_byte buf[X_REGISTER_SIZE];
1903
1904 while (len > 0)
1905 {
1906 regs->cooked_read (regno++, buf);
1907 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
1908 len -= X_REGISTER_SIZE;
1909 valbuf += X_REGISTER_SIZE;
1910 }
1911 }
1912 }
1913
1914
1915 /* Will a function return an aggregate type in memory or in a
1916 register? Return 0 if an aggregate type can be returned in a
1917 register, 1 if it must be returned in memory. */
1918
1919 static int
1920 aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
1921 {
1922 type = check_typedef (type);
1923
1924 if (is_hfa_or_hva (type))
1925 {
1926 /* v0-v7 are used to return values and one register is allocated
1927 for one member. However, HFA or HVA has at most four members. */
1928 return 0;
1929 }
1930
1931 if (TYPE_LENGTH (type) > 16)
1932 {
1933 /* PCS B.6 Aggregates larger than 16 bytes are passed by
1934 invisible reference. */
1935
1936 return 1;
1937 }
1938
1939 return 0;
1940 }
1941
1942 /* Write into appropriate registers a function return value of type
1943 TYPE, given in virtual format. */
1944
1945 static void
1946 aarch64_store_return_value (struct type *type, struct regcache *regs,
1947 const gdb_byte *valbuf)
1948 {
1949 struct gdbarch *gdbarch = regs->arch ();
1950 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1951
1952 if (TYPE_CODE (type) == TYPE_CODE_FLT)
1953 {
1954 bfd_byte buf[V_REGISTER_SIZE];
1955 int len = TYPE_LENGTH (type);
1956
1957 memcpy (buf, valbuf, len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
1958 regs->cooked_write (AARCH64_V0_REGNUM, buf);
1959 }
1960 else if (TYPE_CODE (type) == TYPE_CODE_INT
1961 || TYPE_CODE (type) == TYPE_CODE_CHAR
1962 || TYPE_CODE (type) == TYPE_CODE_BOOL
1963 || TYPE_CODE (type) == TYPE_CODE_PTR
1964 || TYPE_IS_REFERENCE (type)
1965 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1966 {
1967 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
1968 {
1969 /* Values of one word or less are zero/sign-extended and
1970 returned in r0. */
1971 bfd_byte tmpbuf[X_REGISTER_SIZE];
1972 LONGEST val = unpack_long (type, valbuf);
1973
1974 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
1975 regs->cooked_write (AARCH64_X0_REGNUM, tmpbuf);
1976 }
1977 else
1978 {
1979 /* Integral values greater than one word are stored in
1980 consecutive registers starting with r0. This will always
1981 be a multiple of the regiser size. */
1982 int len = TYPE_LENGTH (type);
1983 int regno = AARCH64_X0_REGNUM;
1984
1985 while (len > 0)
1986 {
1987 regs->cooked_write (regno++, valbuf);
1988 len -= X_REGISTER_SIZE;
1989 valbuf += X_REGISTER_SIZE;
1990 }
1991 }
1992 }
1993 else if (is_hfa_or_hva (type))
1994 {
1995 int elements = TYPE_NFIELDS (type);
1996 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
1997 int len = TYPE_LENGTH (member_type);
1998 int i;
1999
2000 for (i = 0; i < elements; i++)
2001 {
2002 int regno = AARCH64_V0_REGNUM + i;
2003 bfd_byte tmpbuf[V_REGISTER_SIZE];
2004
2005 if (aarch64_debug)
2006 {
2007 debug_printf ("write HFA or HVA return value element %d to %s\n",
2008 i + 1,
2009 gdbarch_register_name (gdbarch, regno));
2010 }
2011
2012 memcpy (tmpbuf, valbuf, len);
2013 regs->cooked_write (regno, tmpbuf);
2014 valbuf += len;
2015 }
2016 }
2017 else if (TYPE_CODE (type) == TYPE_CODE_ARRAY && TYPE_VECTOR (type)
2018 && (TYPE_LENGTH (type) == 8 || TYPE_LENGTH (type) == 16))
2019 {
2020 /* Short vector. */
2021 gdb_byte buf[V_REGISTER_SIZE];
2022
2023 memcpy (buf, valbuf, TYPE_LENGTH (type));
2024 regs->cooked_write (AARCH64_V0_REGNUM, buf);
2025 }
2026 else
2027 {
2028 /* For a structure or union the behaviour is as if the value had
2029 been stored to word-aligned memory and then loaded into
2030 registers with 64-bit load instruction(s). */
2031 int len = TYPE_LENGTH (type);
2032 int regno = AARCH64_X0_REGNUM;
2033 bfd_byte tmpbuf[X_REGISTER_SIZE];
2034
2035 while (len > 0)
2036 {
2037 memcpy (tmpbuf, valbuf,
2038 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2039 regs->cooked_write (regno++, tmpbuf);
2040 len -= X_REGISTER_SIZE;
2041 valbuf += X_REGISTER_SIZE;
2042 }
2043 }
2044 }
2045
2046 /* Implement the "return_value" gdbarch method. */
2047
2048 static enum return_value_convention
2049 aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2050 struct type *valtype, struct regcache *regcache,
2051 gdb_byte *readbuf, const gdb_byte *writebuf)
2052 {
2053
2054 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
2055 || TYPE_CODE (valtype) == TYPE_CODE_UNION
2056 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
2057 {
2058 if (aarch64_return_in_memory (gdbarch, valtype))
2059 {
2060 if (aarch64_debug)
2061 debug_printf ("return value in memory\n");
2062 return RETURN_VALUE_STRUCT_CONVENTION;
2063 }
2064 }
2065
2066 if (writebuf)
2067 aarch64_store_return_value (valtype, regcache, writebuf);
2068
2069 if (readbuf)
2070 aarch64_extract_return_value (valtype, regcache, readbuf);
2071
2072 if (aarch64_debug)
2073 debug_printf ("return value in registers\n");
2074
2075 return RETURN_VALUE_REGISTER_CONVENTION;
2076 }
2077
2078 /* Implement the "get_longjmp_target" gdbarch method. */
2079
2080 static int
2081 aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2082 {
2083 CORE_ADDR jb_addr;
2084 gdb_byte buf[X_REGISTER_SIZE];
2085 struct gdbarch *gdbarch = get_frame_arch (frame);
2086 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2087 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2088
2089 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
2090
2091 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2092 X_REGISTER_SIZE))
2093 return 0;
2094
2095 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2096 return 1;
2097 }
2098
2099 /* Implement the "gen_return_address" gdbarch method. */
2100
2101 static void
2102 aarch64_gen_return_address (struct gdbarch *gdbarch,
2103 struct agent_expr *ax, struct axs_value *value,
2104 CORE_ADDR scope)
2105 {
2106 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
2107 value->kind = axs_lvalue_register;
2108 value->u.reg = AARCH64_LR_REGNUM;
2109 }
2110 \f
2111
2112 /* Return the pseudo register name corresponding to register regnum. */
2113
2114 static const char *
2115 aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2116 {
2117 static const char *const q_name[] =
2118 {
2119 "q0", "q1", "q2", "q3",
2120 "q4", "q5", "q6", "q7",
2121 "q8", "q9", "q10", "q11",
2122 "q12", "q13", "q14", "q15",
2123 "q16", "q17", "q18", "q19",
2124 "q20", "q21", "q22", "q23",
2125 "q24", "q25", "q26", "q27",
2126 "q28", "q29", "q30", "q31",
2127 };
2128
2129 static const char *const d_name[] =
2130 {
2131 "d0", "d1", "d2", "d3",
2132 "d4", "d5", "d6", "d7",
2133 "d8", "d9", "d10", "d11",
2134 "d12", "d13", "d14", "d15",
2135 "d16", "d17", "d18", "d19",
2136 "d20", "d21", "d22", "d23",
2137 "d24", "d25", "d26", "d27",
2138 "d28", "d29", "d30", "d31",
2139 };
2140
2141 static const char *const s_name[] =
2142 {
2143 "s0", "s1", "s2", "s3",
2144 "s4", "s5", "s6", "s7",
2145 "s8", "s9", "s10", "s11",
2146 "s12", "s13", "s14", "s15",
2147 "s16", "s17", "s18", "s19",
2148 "s20", "s21", "s22", "s23",
2149 "s24", "s25", "s26", "s27",
2150 "s28", "s29", "s30", "s31",
2151 };
2152
2153 static const char *const h_name[] =
2154 {
2155 "h0", "h1", "h2", "h3",
2156 "h4", "h5", "h6", "h7",
2157 "h8", "h9", "h10", "h11",
2158 "h12", "h13", "h14", "h15",
2159 "h16", "h17", "h18", "h19",
2160 "h20", "h21", "h22", "h23",
2161 "h24", "h25", "h26", "h27",
2162 "h28", "h29", "h30", "h31",
2163 };
2164
2165 static const char *const b_name[] =
2166 {
2167 "b0", "b1", "b2", "b3",
2168 "b4", "b5", "b6", "b7",
2169 "b8", "b9", "b10", "b11",
2170 "b12", "b13", "b14", "b15",
2171 "b16", "b17", "b18", "b19",
2172 "b20", "b21", "b22", "b23",
2173 "b24", "b25", "b26", "b27",
2174 "b28", "b29", "b30", "b31",
2175 };
2176
2177 regnum -= gdbarch_num_regs (gdbarch);
2178
2179 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2180 return q_name[regnum - AARCH64_Q0_REGNUM];
2181
2182 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2183 return d_name[regnum - AARCH64_D0_REGNUM];
2184
2185 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2186 return s_name[regnum - AARCH64_S0_REGNUM];
2187
2188 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2189 return h_name[regnum - AARCH64_H0_REGNUM];
2190
2191 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2192 return b_name[regnum - AARCH64_B0_REGNUM];
2193
2194 internal_error (__FILE__, __LINE__,
2195 _("aarch64_pseudo_register_name: bad register number %d"),
2196 regnum);
2197 }
2198
2199 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
2200
2201 static struct type *
2202 aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2203 {
2204 regnum -= gdbarch_num_regs (gdbarch);
2205
2206 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2207 return aarch64_vnq_type (gdbarch);
2208
2209 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2210 return aarch64_vnd_type (gdbarch);
2211
2212 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2213 return aarch64_vns_type (gdbarch);
2214
2215 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2216 return aarch64_vnh_type (gdbarch);
2217
2218 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2219 return aarch64_vnb_type (gdbarch);
2220
2221 internal_error (__FILE__, __LINE__,
2222 _("aarch64_pseudo_register_type: bad register number %d"),
2223 regnum);
2224 }
2225
2226 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2227
2228 static int
2229 aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2230 struct reggroup *group)
2231 {
2232 regnum -= gdbarch_num_regs (gdbarch);
2233
2234 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2235 return group == all_reggroup || group == vector_reggroup;
2236 else if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2237 return (group == all_reggroup || group == vector_reggroup
2238 || group == float_reggroup);
2239 else if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2240 return (group == all_reggroup || group == vector_reggroup
2241 || group == float_reggroup);
2242 else if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2243 return group == all_reggroup || group == vector_reggroup;
2244 else if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2245 return group == all_reggroup || group == vector_reggroup;
2246
2247 return group == all_reggroup;
2248 }
2249
2250 /* Implement the "pseudo_register_read_value" gdbarch method. */
2251
2252 static struct value *
2253 aarch64_pseudo_read_value (struct gdbarch *gdbarch,
2254 readable_regcache *regcache,
2255 int regnum)
2256 {
2257 gdb_byte reg_buf[V_REGISTER_SIZE];
2258 struct value *result_value;
2259 gdb_byte *buf;
2260
2261 result_value = allocate_value (register_type (gdbarch, regnum));
2262 VALUE_LVAL (result_value) = lval_register;
2263 VALUE_REGNUM (result_value) = regnum;
2264 buf = value_contents_raw (result_value);
2265
2266 regnum -= gdbarch_num_regs (gdbarch);
2267
2268 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2269 {
2270 enum register_status status;
2271 unsigned v_regnum;
2272
2273 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2274 status = regcache->raw_read (v_regnum, reg_buf);
2275 if (status != REG_VALID)
2276 mark_value_bytes_unavailable (result_value, 0,
2277 TYPE_LENGTH (value_type (result_value)));
2278 else
2279 memcpy (buf, reg_buf, Q_REGISTER_SIZE);
2280 return result_value;
2281 }
2282
2283 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2284 {
2285 enum register_status status;
2286 unsigned v_regnum;
2287
2288 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2289 status = regcache->raw_read (v_regnum, reg_buf);
2290 if (status != REG_VALID)
2291 mark_value_bytes_unavailable (result_value, 0,
2292 TYPE_LENGTH (value_type (result_value)));
2293 else
2294 memcpy (buf, reg_buf, D_REGISTER_SIZE);
2295 return result_value;
2296 }
2297
2298 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2299 {
2300 enum register_status status;
2301 unsigned v_regnum;
2302
2303 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2304 status = regcache->raw_read (v_regnum, reg_buf);
2305 if (status != REG_VALID)
2306 mark_value_bytes_unavailable (result_value, 0,
2307 TYPE_LENGTH (value_type (result_value)));
2308 else
2309 memcpy (buf, reg_buf, S_REGISTER_SIZE);
2310 return result_value;
2311 }
2312
2313 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2314 {
2315 enum register_status status;
2316 unsigned v_regnum;
2317
2318 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2319 status = regcache->raw_read (v_regnum, reg_buf);
2320 if (status != REG_VALID)
2321 mark_value_bytes_unavailable (result_value, 0,
2322 TYPE_LENGTH (value_type (result_value)));
2323 else
2324 memcpy (buf, reg_buf, H_REGISTER_SIZE);
2325 return result_value;
2326 }
2327
2328 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2329 {
2330 enum register_status status;
2331 unsigned v_regnum;
2332
2333 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2334 status = regcache->raw_read (v_regnum, reg_buf);
2335 if (status != REG_VALID)
2336 mark_value_bytes_unavailable (result_value, 0,
2337 TYPE_LENGTH (value_type (result_value)));
2338 else
2339 memcpy (buf, reg_buf, B_REGISTER_SIZE);
2340 return result_value;
2341 }
2342
2343 gdb_assert_not_reached ("regnum out of bound");
2344 }
2345
2346 /* Implement the "pseudo_register_write" gdbarch method. */
2347
2348 static void
2349 aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2350 int regnum, const gdb_byte *buf)
2351 {
2352 gdb_byte reg_buf[V_REGISTER_SIZE];
2353
2354 /* Ensure the register buffer is zero, we want gdb writes of the
2355 various 'scalar' pseudo registers to behavior like architectural
2356 writes, register width bytes are written the remainder are set to
2357 zero. */
2358 memset (reg_buf, 0, sizeof (reg_buf));
2359
2360 regnum -= gdbarch_num_regs (gdbarch);
2361
2362 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2363 {
2364 /* pseudo Q registers */
2365 unsigned v_regnum;
2366
2367 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2368 memcpy (reg_buf, buf, Q_REGISTER_SIZE);
2369 regcache->raw_write (v_regnum, reg_buf);
2370 return;
2371 }
2372
2373 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2374 {
2375 /* pseudo D registers */
2376 unsigned v_regnum;
2377
2378 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2379 memcpy (reg_buf, buf, D_REGISTER_SIZE);
2380 regcache->raw_write (v_regnum, reg_buf);
2381 return;
2382 }
2383
2384 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2385 {
2386 unsigned v_regnum;
2387
2388 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2389 memcpy (reg_buf, buf, S_REGISTER_SIZE);
2390 regcache->raw_write (v_regnum, reg_buf);
2391 return;
2392 }
2393
2394 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2395 {
2396 /* pseudo H registers */
2397 unsigned v_regnum;
2398
2399 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2400 memcpy (reg_buf, buf, H_REGISTER_SIZE);
2401 regcache->raw_write (v_regnum, reg_buf);
2402 return;
2403 }
2404
2405 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2406 {
2407 /* pseudo B registers */
2408 unsigned v_regnum;
2409
2410 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2411 memcpy (reg_buf, buf, B_REGISTER_SIZE);
2412 regcache->raw_write (v_regnum, reg_buf);
2413 return;
2414 }
2415
2416 gdb_assert_not_reached ("regnum out of bound");
2417 }
2418
2419 /* Callback function for user_reg_add. */
2420
2421 static struct value *
2422 value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2423 {
2424 const int *reg_p = (const int *) baton;
2425
2426 return value_of_register (*reg_p, frame);
2427 }
2428 \f
2429
2430 /* Implement the "software_single_step" gdbarch method, needed to
2431 single step through atomic sequences on AArch64. */
2432
2433 static std::vector<CORE_ADDR>
2434 aarch64_software_single_step (struct regcache *regcache)
2435 {
2436 struct gdbarch *gdbarch = regcache->arch ();
2437 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2438 const int insn_size = 4;
2439 const int atomic_sequence_length = 16; /* Instruction sequence length. */
2440 CORE_ADDR pc = regcache_read_pc (regcache);
2441 CORE_ADDR breaks[2] = { -1, -1 };
2442 CORE_ADDR loc = pc;
2443 CORE_ADDR closing_insn = 0;
2444 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2445 byte_order_for_code);
2446 int index;
2447 int insn_count;
2448 int bc_insn_count = 0; /* Conditional branch instruction count. */
2449 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
2450 aarch64_inst inst;
2451
2452 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2453 return {};
2454
2455 /* Look for a Load Exclusive instruction which begins the sequence. */
2456 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
2457 return {};
2458
2459 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2460 {
2461 loc += insn_size;
2462 insn = read_memory_unsigned_integer (loc, insn_size,
2463 byte_order_for_code);
2464
2465 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2466 return {};
2467 /* Check if the instruction is a conditional branch. */
2468 if (inst.opcode->iclass == condbranch)
2469 {
2470 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
2471
2472 if (bc_insn_count >= 1)
2473 return {};
2474
2475 /* It is, so we'll try to set a breakpoint at the destination. */
2476 breaks[1] = loc + inst.operands[0].imm.value;
2477
2478 bc_insn_count++;
2479 last_breakpoint++;
2480 }
2481
2482 /* Look for the Store Exclusive which closes the atomic sequence. */
2483 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
2484 {
2485 closing_insn = loc;
2486 break;
2487 }
2488 }
2489
2490 /* We didn't find a closing Store Exclusive instruction, fall back. */
2491 if (!closing_insn)
2492 return {};
2493
2494 /* Insert breakpoint after the end of the atomic sequence. */
2495 breaks[0] = loc + insn_size;
2496
2497 /* Check for duplicated breakpoints, and also check that the second
2498 breakpoint is not within the atomic sequence. */
2499 if (last_breakpoint
2500 && (breaks[1] == breaks[0]
2501 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2502 last_breakpoint = 0;
2503
2504 std::vector<CORE_ADDR> next_pcs;
2505
2506 /* Insert the breakpoint at the end of the sequence, and one at the
2507 destination of the conditional branch, if it exists. */
2508 for (index = 0; index <= last_breakpoint; index++)
2509 next_pcs.push_back (breaks[index]);
2510
2511 return next_pcs;
2512 }
2513
2514 struct aarch64_displaced_step_closure : public displaced_step_closure
2515 {
2516 /* It is true when condition instruction, such as B.CON, TBZ, etc,
2517 is being displaced stepping. */
2518 int cond = 0;
2519
2520 /* PC adjustment offset after displaced stepping. */
2521 int32_t pc_adjust = 0;
2522 };
2523
2524 /* Data when visiting instructions for displaced stepping. */
2525
2526 struct aarch64_displaced_step_data
2527 {
2528 struct aarch64_insn_data base;
2529
2530 /* The address where the instruction will be executed at. */
2531 CORE_ADDR new_addr;
2532 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
2533 uint32_t insn_buf[DISPLACED_MODIFIED_INSNS];
2534 /* Number of instructions in INSN_BUF. */
2535 unsigned insn_count;
2536 /* Registers when doing displaced stepping. */
2537 struct regcache *regs;
2538
2539 aarch64_displaced_step_closure *dsc;
2540 };
2541
2542 /* Implementation of aarch64_insn_visitor method "b". */
2543
2544 static void
2545 aarch64_displaced_step_b (const int is_bl, const int32_t offset,
2546 struct aarch64_insn_data *data)
2547 {
2548 struct aarch64_displaced_step_data *dsd
2549 = (struct aarch64_displaced_step_data *) data;
2550 int64_t new_offset = data->insn_addr - dsd->new_addr + offset;
2551
2552 if (can_encode_int32 (new_offset, 28))
2553 {
2554 /* Emit B rather than BL, because executing BL on a new address
2555 will get the wrong address into LR. In order to avoid this,
2556 we emit B, and update LR if the instruction is BL. */
2557 emit_b (dsd->insn_buf, 0, new_offset);
2558 dsd->insn_count++;
2559 }
2560 else
2561 {
2562 /* Write NOP. */
2563 emit_nop (dsd->insn_buf);
2564 dsd->insn_count++;
2565 dsd->dsc->pc_adjust = offset;
2566 }
2567
2568 if (is_bl)
2569 {
2570 /* Update LR. */
2571 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
2572 data->insn_addr + 4);
2573 }
2574 }
2575
2576 /* Implementation of aarch64_insn_visitor method "b_cond". */
2577
2578 static void
2579 aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
2580 struct aarch64_insn_data *data)
2581 {
2582 struct aarch64_displaced_step_data *dsd
2583 = (struct aarch64_displaced_step_data *) data;
2584
2585 /* GDB has to fix up PC after displaced step this instruction
2586 differently according to the condition is true or false. Instead
2587 of checking COND against conditional flags, we can use
2588 the following instructions, and GDB can tell how to fix up PC
2589 according to the PC value.
2590
2591 B.COND TAKEN ; If cond is true, then jump to TAKEN.
2592 INSN1 ;
2593 TAKEN:
2594 INSN2
2595 */
2596
2597 emit_bcond (dsd->insn_buf, cond, 8);
2598 dsd->dsc->cond = 1;
2599 dsd->dsc->pc_adjust = offset;
2600 dsd->insn_count = 1;
2601 }
2602
2603 /* Dynamically allocate a new register. If we know the register
2604 statically, we should make it a global as above instead of using this
2605 helper function. */
2606
2607 static struct aarch64_register
2608 aarch64_register (unsigned num, int is64)
2609 {
2610 return (struct aarch64_register) { num, is64 };
2611 }
2612
2613 /* Implementation of aarch64_insn_visitor method "cb". */
2614
2615 static void
2616 aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
2617 const unsigned rn, int is64,
2618 struct aarch64_insn_data *data)
2619 {
2620 struct aarch64_displaced_step_data *dsd
2621 = (struct aarch64_displaced_step_data *) data;
2622
2623 /* The offset is out of range for a compare and branch
2624 instruction. We can use the following instructions instead:
2625
2626 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
2627 INSN1 ;
2628 TAKEN:
2629 INSN2
2630 */
2631 emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
2632 dsd->insn_count = 1;
2633 dsd->dsc->cond = 1;
2634 dsd->dsc->pc_adjust = offset;
2635 }
2636
2637 /* Implementation of aarch64_insn_visitor method "tb". */
2638
2639 static void
2640 aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
2641 const unsigned rt, unsigned bit,
2642 struct aarch64_insn_data *data)
2643 {
2644 struct aarch64_displaced_step_data *dsd
2645 = (struct aarch64_displaced_step_data *) data;
2646
2647 /* The offset is out of range for a test bit and branch
2648 instruction We can use the following instructions instead:
2649
2650 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
2651 INSN1 ;
2652 TAKEN:
2653 INSN2
2654
2655 */
2656 emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
2657 dsd->insn_count = 1;
2658 dsd->dsc->cond = 1;
2659 dsd->dsc->pc_adjust = offset;
2660 }
2661
2662 /* Implementation of aarch64_insn_visitor method "adr". */
2663
2664 static void
2665 aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
2666 const int is_adrp, struct aarch64_insn_data *data)
2667 {
2668 struct aarch64_displaced_step_data *dsd
2669 = (struct aarch64_displaced_step_data *) data;
2670 /* We know exactly the address the ADR{P,} instruction will compute.
2671 We can just write it to the destination register. */
2672 CORE_ADDR address = data->insn_addr + offset;
2673
2674 if (is_adrp)
2675 {
2676 /* Clear the lower 12 bits of the offset to get the 4K page. */
2677 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2678 address & ~0xfff);
2679 }
2680 else
2681 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2682 address);
2683
2684 dsd->dsc->pc_adjust = 4;
2685 emit_nop (dsd->insn_buf);
2686 dsd->insn_count = 1;
2687 }
2688
2689 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
2690
2691 static void
2692 aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
2693 const unsigned rt, const int is64,
2694 struct aarch64_insn_data *data)
2695 {
2696 struct aarch64_displaced_step_data *dsd
2697 = (struct aarch64_displaced_step_data *) data;
2698 CORE_ADDR address = data->insn_addr + offset;
2699 struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
2700
2701 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
2702 address);
2703
2704 if (is_sw)
2705 dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
2706 aarch64_register (rt, 1), zero);
2707 else
2708 dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
2709 aarch64_register (rt, 1), zero);
2710
2711 dsd->dsc->pc_adjust = 4;
2712 }
2713
2714 /* Implementation of aarch64_insn_visitor method "others". */
2715
2716 static void
2717 aarch64_displaced_step_others (const uint32_t insn,
2718 struct aarch64_insn_data *data)
2719 {
2720 struct aarch64_displaced_step_data *dsd
2721 = (struct aarch64_displaced_step_data *) data;
2722
2723 aarch64_emit_insn (dsd->insn_buf, insn);
2724 dsd->insn_count = 1;
2725
2726 if ((insn & 0xfffffc1f) == 0xd65f0000)
2727 {
2728 /* RET */
2729 dsd->dsc->pc_adjust = 0;
2730 }
2731 else
2732 dsd->dsc->pc_adjust = 4;
2733 }
2734
2735 static const struct aarch64_insn_visitor visitor =
2736 {
2737 aarch64_displaced_step_b,
2738 aarch64_displaced_step_b_cond,
2739 aarch64_displaced_step_cb,
2740 aarch64_displaced_step_tb,
2741 aarch64_displaced_step_adr,
2742 aarch64_displaced_step_ldr_literal,
2743 aarch64_displaced_step_others,
2744 };
2745
2746 /* Implement the "displaced_step_copy_insn" gdbarch method. */
2747
2748 struct displaced_step_closure *
2749 aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
2750 CORE_ADDR from, CORE_ADDR to,
2751 struct regcache *regs)
2752 {
2753 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2754 uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
2755 struct aarch64_displaced_step_data dsd;
2756 aarch64_inst inst;
2757
2758 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2759 return NULL;
2760
2761 /* Look for a Load Exclusive instruction which begins the sequence. */
2762 if (inst.opcode->iclass == ldstexcl && bit (insn, 22))
2763 {
2764 /* We can't displaced step atomic sequences. */
2765 return NULL;
2766 }
2767
2768 std::unique_ptr<aarch64_displaced_step_closure> dsc
2769 (new aarch64_displaced_step_closure);
2770 dsd.base.insn_addr = from;
2771 dsd.new_addr = to;
2772 dsd.regs = regs;
2773 dsd.dsc = dsc.get ();
2774 dsd.insn_count = 0;
2775 aarch64_relocate_instruction (insn, &visitor,
2776 (struct aarch64_insn_data *) &dsd);
2777 gdb_assert (dsd.insn_count <= DISPLACED_MODIFIED_INSNS);
2778
2779 if (dsd.insn_count != 0)
2780 {
2781 int i;
2782
2783 /* Instruction can be relocated to scratch pad. Copy
2784 relocated instruction(s) there. */
2785 for (i = 0; i < dsd.insn_count; i++)
2786 {
2787 if (debug_displaced)
2788 {
2789 debug_printf ("displaced: writing insn ");
2790 debug_printf ("%.8x", dsd.insn_buf[i]);
2791 debug_printf (" at %s\n", paddress (gdbarch, to + i * 4));
2792 }
2793 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
2794 (ULONGEST) dsd.insn_buf[i]);
2795 }
2796 }
2797 else
2798 {
2799 dsc = NULL;
2800 }
2801
2802 return dsc.release ();
2803 }
2804
2805 /* Implement the "displaced_step_fixup" gdbarch method. */
2806
2807 void
2808 aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
2809 struct displaced_step_closure *dsc_,
2810 CORE_ADDR from, CORE_ADDR to,
2811 struct regcache *regs)
2812 {
2813 aarch64_displaced_step_closure *dsc = (aarch64_displaced_step_closure *) dsc_;
2814
2815 if (dsc->cond)
2816 {
2817 ULONGEST pc;
2818
2819 regcache_cooked_read_unsigned (regs, AARCH64_PC_REGNUM, &pc);
2820 if (pc - to == 8)
2821 {
2822 /* Condition is true. */
2823 }
2824 else if (pc - to == 4)
2825 {
2826 /* Condition is false. */
2827 dsc->pc_adjust = 4;
2828 }
2829 else
2830 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
2831 }
2832
2833 if (dsc->pc_adjust != 0)
2834 {
2835 if (debug_displaced)
2836 {
2837 debug_printf ("displaced: fixup: set PC to %s:%d\n",
2838 paddress (gdbarch, from), dsc->pc_adjust);
2839 }
2840 regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
2841 from + dsc->pc_adjust);
2842 }
2843 }
2844
2845 /* Implement the "displaced_step_hw_singlestep" gdbarch method. */
2846
2847 int
2848 aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch,
2849 struct displaced_step_closure *closure)
2850 {
2851 return 1;
2852 }
2853
2854 /* Get the correct target description for the given VQ value.
2855 If VQ is zero then it is assumed SVE is not supported.
2856 (It is not possible to set VQ to zero on an SVE system). */
2857
2858 const target_desc *
2859 aarch64_read_description (uint64_t vq)
2860 {
2861 if (vq > AARCH64_MAX_SVE_VQ)
2862 error (_("VQ is %" PRIu64 ", maximum supported value is %d"), vq,
2863 AARCH64_MAX_SVE_VQ);
2864
2865 struct target_desc *tdesc = tdesc_aarch64_list[vq];
2866
2867 if (tdesc == NULL)
2868 {
2869 tdesc = aarch64_create_target_description (vq);
2870 tdesc_aarch64_list[vq] = tdesc;
2871 }
2872
2873 return tdesc;
2874 }
2875
2876 /* Return the VQ used when creating the target description TDESC. */
2877
2878 static long
2879 aarch64_get_tdesc_vq (const struct target_desc *tdesc)
2880 {
2881 const struct tdesc_feature *feature_sve;
2882
2883 if (!tdesc_has_registers (tdesc))
2884 return 0;
2885
2886 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
2887
2888 if (feature_sve == nullptr)
2889 return 0;
2890
2891 long vl = tdesc_register_size (feature_sve, aarch64_sve_register_names[0]);
2892 return sve_vq_from_vl (vl);
2893 }
2894
2895
2896 /* Initialize the current architecture based on INFO. If possible,
2897 re-use an architecture from ARCHES, which is a list of
2898 architectures already created during this debugging session.
2899
2900 Called e.g. at program startup, when reading a core file, and when
2901 reading a binary file. */
2902
2903 static struct gdbarch *
2904 aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
2905 {
2906 struct gdbarch_tdep *tdep;
2907 struct gdbarch *gdbarch;
2908 struct gdbarch_list *best_arch;
2909 struct tdesc_arch_data *tdesc_data = NULL;
2910 const struct target_desc *tdesc = info.target_desc;
2911 int i;
2912 int valid_p = 1;
2913 const struct tdesc_feature *feature_core;
2914 const struct tdesc_feature *feature_fpu;
2915 const struct tdesc_feature *feature_sve;
2916 int num_regs = 0;
2917 int num_pseudo_regs = 0;
2918
2919 /* Ensure we always have a target description. */
2920 if (!tdesc_has_registers (tdesc))
2921 tdesc = aarch64_read_description (0);
2922 gdb_assert (tdesc);
2923
2924 feature_core = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.core");
2925 feature_fpu = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
2926 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
2927
2928 if (feature_core == NULL)
2929 return NULL;
2930
2931 tdesc_data = tdesc_data_alloc ();
2932
2933 /* Validate the description provides the mandatory core R registers
2934 and allocate their numbers. */
2935 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
2936 valid_p &= tdesc_numbered_register (feature_core, tdesc_data,
2937 AARCH64_X0_REGNUM + i,
2938 aarch64_r_register_names[i]);
2939
2940 num_regs = AARCH64_X0_REGNUM + i;
2941
2942 /* Add the V registers. */
2943 if (feature_fpu != NULL)
2944 {
2945 if (feature_sve != NULL)
2946 error (_("Program contains both fpu and SVE features."));
2947
2948 /* Validate the description provides the mandatory V registers
2949 and allocate their numbers. */
2950 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
2951 valid_p &= tdesc_numbered_register (feature_fpu, tdesc_data,
2952 AARCH64_V0_REGNUM + i,
2953 aarch64_v_register_names[i]);
2954
2955 num_regs = AARCH64_V0_REGNUM + i;
2956 }
2957
2958 /* Add the SVE registers. */
2959 if (feature_sve != NULL)
2960 {
2961 /* Validate the description provides the mandatory SVE registers
2962 and allocate their numbers. */
2963 for (i = 0; i < ARRAY_SIZE (aarch64_sve_register_names); i++)
2964 valid_p &= tdesc_numbered_register (feature_sve, tdesc_data,
2965 AARCH64_SVE_Z0_REGNUM + i,
2966 aarch64_sve_register_names[i]);
2967
2968 num_regs = AARCH64_SVE_Z0_REGNUM + i;
2969 num_pseudo_regs += 32; /* add the Vn register pseudos. */
2970 }
2971
2972 if (feature_fpu != NULL || feature_sve != NULL)
2973 {
2974 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
2975 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
2976 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
2977 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
2978 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
2979 }
2980
2981 if (!valid_p)
2982 {
2983 tdesc_data_cleanup (tdesc_data);
2984 return NULL;
2985 }
2986
2987 /* AArch64 code is always little-endian. */
2988 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
2989
2990 /* If there is already a candidate, use it. */
2991 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
2992 best_arch != NULL;
2993 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
2994 {
2995 /* Found a match. */
2996 break;
2997 }
2998
2999 if (best_arch != NULL)
3000 {
3001 if (tdesc_data != NULL)
3002 tdesc_data_cleanup (tdesc_data);
3003 return best_arch->gdbarch;
3004 }
3005
3006 tdep = XCNEW (struct gdbarch_tdep);
3007 gdbarch = gdbarch_alloc (&info, tdep);
3008
3009 /* This should be low enough for everything. */
3010 tdep->lowest_pc = 0x20;
3011 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
3012 tdep->jb_elt_size = 8;
3013 tdep->vq = aarch64_get_tdesc_vq (tdesc);
3014
3015 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
3016 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
3017
3018 /* Frame handling. */
3019 set_gdbarch_dummy_id (gdbarch, aarch64_dummy_id);
3020 set_gdbarch_unwind_pc (gdbarch, aarch64_unwind_pc);
3021 set_gdbarch_unwind_sp (gdbarch, aarch64_unwind_sp);
3022
3023 /* Advance PC across function entry code. */
3024 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
3025
3026 /* The stack grows downward. */
3027 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
3028
3029 /* Breakpoint manipulation. */
3030 set_gdbarch_breakpoint_kind_from_pc (gdbarch,
3031 aarch64_breakpoint::kind_from_pc);
3032 set_gdbarch_sw_breakpoint_from_kind (gdbarch,
3033 aarch64_breakpoint::bp_from_kind);
3034 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
3035 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
3036
3037 /* Information about registers, etc. */
3038 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
3039 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
3040 set_gdbarch_num_regs (gdbarch, num_regs);
3041
3042 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
3043 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
3044 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
3045 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
3046 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
3047 set_tdesc_pseudo_register_reggroup_p (gdbarch,
3048 aarch64_pseudo_register_reggroup_p);
3049
3050 /* ABI */
3051 set_gdbarch_short_bit (gdbarch, 16);
3052 set_gdbarch_int_bit (gdbarch, 32);
3053 set_gdbarch_float_bit (gdbarch, 32);
3054 set_gdbarch_double_bit (gdbarch, 64);
3055 set_gdbarch_long_double_bit (gdbarch, 128);
3056 set_gdbarch_long_bit (gdbarch, 64);
3057 set_gdbarch_long_long_bit (gdbarch, 64);
3058 set_gdbarch_ptr_bit (gdbarch, 64);
3059 set_gdbarch_char_signed (gdbarch, 0);
3060 set_gdbarch_wchar_signed (gdbarch, 0);
3061 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
3062 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
3063 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
3064
3065 /* Internal <-> external register number maps. */
3066 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
3067
3068 /* Returning results. */
3069 set_gdbarch_return_value (gdbarch, aarch64_return_value);
3070
3071 /* Disassembly. */
3072 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
3073
3074 /* Virtual tables. */
3075 set_gdbarch_vbit_in_delta (gdbarch, 1);
3076
3077 /* Hook in the ABI-specific overrides, if they have been registered. */
3078 info.target_desc = tdesc;
3079 info.tdesc_data = tdesc_data;
3080 gdbarch_init_osabi (info, gdbarch);
3081
3082 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
3083
3084 /* Add some default predicates. */
3085 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
3086 dwarf2_append_unwinders (gdbarch);
3087 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
3088
3089 frame_base_set_default (gdbarch, &aarch64_normal_base);
3090
3091 /* Now we have tuned the configuration, set a few final things,
3092 based on what the OS ABI has told us. */
3093
3094 if (tdep->jb_pc >= 0)
3095 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
3096
3097 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
3098
3099 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
3100
3101 /* Add standard register aliases. */
3102 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
3103 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
3104 value_of_aarch64_user_reg,
3105 &aarch64_register_aliases[i].regnum);
3106
3107 return gdbarch;
3108 }
3109
3110 static void
3111 aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
3112 {
3113 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3114
3115 if (tdep == NULL)
3116 return;
3117
3118 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
3119 paddress (gdbarch, tdep->lowest_pc));
3120 }
3121
3122 #if GDB_SELF_TEST
3123 namespace selftests
3124 {
3125 static void aarch64_process_record_test (void);
3126 }
3127 #endif
3128
3129 void
3130 _initialize_aarch64_tdep (void)
3131 {
3132 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
3133 aarch64_dump_tdep);
3134
3135 /* Debug this file's internals. */
3136 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
3137 Set AArch64 debugging."), _("\
3138 Show AArch64 debugging."), _("\
3139 When on, AArch64 specific debugging is enabled."),
3140 NULL,
3141 show_aarch64_debug,
3142 &setdebuglist, &showdebuglist);
3143
3144 #if GDB_SELF_TEST
3145 selftests::register_test ("aarch64-analyze-prologue",
3146 selftests::aarch64_analyze_prologue_test);
3147 selftests::register_test ("aarch64-process-record",
3148 selftests::aarch64_process_record_test);
3149 selftests::record_xml_tdesc ("aarch64.xml",
3150 aarch64_create_target_description (0));
3151 #endif
3152 }
3153
3154 /* AArch64 process record-replay related structures, defines etc. */
3155
3156 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
3157 do \
3158 { \
3159 unsigned int reg_len = LENGTH; \
3160 if (reg_len) \
3161 { \
3162 REGS = XNEWVEC (uint32_t, reg_len); \
3163 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
3164 } \
3165 } \
3166 while (0)
3167
3168 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
3169 do \
3170 { \
3171 unsigned int mem_len = LENGTH; \
3172 if (mem_len) \
3173 { \
3174 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
3175 memcpy(&MEMS->len, &RECORD_BUF[0], \
3176 sizeof(struct aarch64_mem_r) * LENGTH); \
3177 } \
3178 } \
3179 while (0)
3180
3181 /* AArch64 record/replay structures and enumerations. */
3182
3183 struct aarch64_mem_r
3184 {
3185 uint64_t len; /* Record length. */
3186 uint64_t addr; /* Memory address. */
3187 };
3188
3189 enum aarch64_record_result
3190 {
3191 AARCH64_RECORD_SUCCESS,
3192 AARCH64_RECORD_UNSUPPORTED,
3193 AARCH64_RECORD_UNKNOWN
3194 };
3195
3196 typedef struct insn_decode_record_t
3197 {
3198 struct gdbarch *gdbarch;
3199 struct regcache *regcache;
3200 CORE_ADDR this_addr; /* Address of insn to be recorded. */
3201 uint32_t aarch64_insn; /* Insn to be recorded. */
3202 uint32_t mem_rec_count; /* Count of memory records. */
3203 uint32_t reg_rec_count; /* Count of register records. */
3204 uint32_t *aarch64_regs; /* Registers to be recorded. */
3205 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
3206 } insn_decode_record;
3207
3208 /* Record handler for data processing - register instructions. */
3209
3210 static unsigned int
3211 aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
3212 {
3213 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
3214 uint32_t record_buf[4];
3215
3216 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3217 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3218 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
3219
3220 if (!bit (aarch64_insn_r->aarch64_insn, 28))
3221 {
3222 uint8_t setflags;
3223
3224 /* Logical (shifted register). */
3225 if (insn_bits24_27 == 0x0a)
3226 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
3227 /* Add/subtract. */
3228 else if (insn_bits24_27 == 0x0b)
3229 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3230 else
3231 return AARCH64_RECORD_UNKNOWN;
3232
3233 record_buf[0] = reg_rd;
3234 aarch64_insn_r->reg_rec_count = 1;
3235 if (setflags)
3236 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3237 }
3238 else
3239 {
3240 if (insn_bits24_27 == 0x0b)
3241 {
3242 /* Data-processing (3 source). */
3243 record_buf[0] = reg_rd;
3244 aarch64_insn_r->reg_rec_count = 1;
3245 }
3246 else if (insn_bits24_27 == 0x0a)
3247 {
3248 if (insn_bits21_23 == 0x00)
3249 {
3250 /* Add/subtract (with carry). */
3251 record_buf[0] = reg_rd;
3252 aarch64_insn_r->reg_rec_count = 1;
3253 if (bit (aarch64_insn_r->aarch64_insn, 29))
3254 {
3255 record_buf[1] = AARCH64_CPSR_REGNUM;
3256 aarch64_insn_r->reg_rec_count = 2;
3257 }
3258 }
3259 else if (insn_bits21_23 == 0x02)
3260 {
3261 /* Conditional compare (register) and conditional compare
3262 (immediate) instructions. */
3263 record_buf[0] = AARCH64_CPSR_REGNUM;
3264 aarch64_insn_r->reg_rec_count = 1;
3265 }
3266 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
3267 {
3268 /* CConditional select. */
3269 /* Data-processing (2 source). */
3270 /* Data-processing (1 source). */
3271 record_buf[0] = reg_rd;
3272 aarch64_insn_r->reg_rec_count = 1;
3273 }
3274 else
3275 return AARCH64_RECORD_UNKNOWN;
3276 }
3277 }
3278
3279 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3280 record_buf);
3281 return AARCH64_RECORD_SUCCESS;
3282 }
3283
3284 /* Record handler for data processing - immediate instructions. */
3285
3286 static unsigned int
3287 aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
3288 {
3289 uint8_t reg_rd, insn_bit23, insn_bits24_27, setflags;
3290 uint32_t record_buf[4];
3291
3292 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3293 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3294 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3295
3296 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
3297 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
3298 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
3299 {
3300 record_buf[0] = reg_rd;
3301 aarch64_insn_r->reg_rec_count = 1;
3302 }
3303 else if (insn_bits24_27 == 0x01)
3304 {
3305 /* Add/Subtract (immediate). */
3306 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3307 record_buf[0] = reg_rd;
3308 aarch64_insn_r->reg_rec_count = 1;
3309 if (setflags)
3310 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3311 }
3312 else if (insn_bits24_27 == 0x02 && !insn_bit23)
3313 {
3314 /* Logical (immediate). */
3315 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
3316 record_buf[0] = reg_rd;
3317 aarch64_insn_r->reg_rec_count = 1;
3318 if (setflags)
3319 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3320 }
3321 else
3322 return AARCH64_RECORD_UNKNOWN;
3323
3324 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3325 record_buf);
3326 return AARCH64_RECORD_SUCCESS;
3327 }
3328
3329 /* Record handler for branch, exception generation and system instructions. */
3330
3331 static unsigned int
3332 aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
3333 {
3334 struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
3335 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
3336 uint32_t record_buf[4];
3337
3338 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3339 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3340 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3341
3342 if (insn_bits28_31 == 0x0d)
3343 {
3344 /* Exception generation instructions. */
3345 if (insn_bits24_27 == 0x04)
3346 {
3347 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
3348 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
3349 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
3350 {
3351 ULONGEST svc_number;
3352
3353 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
3354 &svc_number);
3355 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
3356 svc_number);
3357 }
3358 else
3359 return AARCH64_RECORD_UNSUPPORTED;
3360 }
3361 /* System instructions. */
3362 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
3363 {
3364 uint32_t reg_rt, reg_crn;
3365
3366 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3367 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3368
3369 /* Record rt in case of sysl and mrs instructions. */
3370 if (bit (aarch64_insn_r->aarch64_insn, 21))
3371 {
3372 record_buf[0] = reg_rt;
3373 aarch64_insn_r->reg_rec_count = 1;
3374 }
3375 /* Record cpsr for hint and msr(immediate) instructions. */
3376 else if (reg_crn == 0x02 || reg_crn == 0x04)
3377 {
3378 record_buf[0] = AARCH64_CPSR_REGNUM;
3379 aarch64_insn_r->reg_rec_count = 1;
3380 }
3381 }
3382 /* Unconditional branch (register). */
3383 else if((insn_bits24_27 & 0x0e) == 0x06)
3384 {
3385 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3386 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3387 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3388 }
3389 else
3390 return AARCH64_RECORD_UNKNOWN;
3391 }
3392 /* Unconditional branch (immediate). */
3393 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3394 {
3395 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3396 if (bit (aarch64_insn_r->aarch64_insn, 31))
3397 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3398 }
3399 else
3400 /* Compare & branch (immediate), Test & branch (immediate) and
3401 Conditional branch (immediate). */
3402 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3403
3404 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3405 record_buf);
3406 return AARCH64_RECORD_SUCCESS;
3407 }
3408
3409 /* Record handler for advanced SIMD load and store instructions. */
3410
3411 static unsigned int
3412 aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3413 {
3414 CORE_ADDR address;
3415 uint64_t addr_offset = 0;
3416 uint32_t record_buf[24];
3417 uint64_t record_buf_mem[24];
3418 uint32_t reg_rn, reg_rt;
3419 uint32_t reg_index = 0, mem_index = 0;
3420 uint8_t opcode_bits, size_bits;
3421
3422 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3423 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3424 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3425 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3426 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3427
3428 if (record_debug)
3429 debug_printf ("Process record: Advanced SIMD load/store\n");
3430
3431 /* Load/store single structure. */
3432 if (bit (aarch64_insn_r->aarch64_insn, 24))
3433 {
3434 uint8_t sindex, scale, selem, esize, replicate = 0;
3435 scale = opcode_bits >> 2;
3436 selem = ((opcode_bits & 0x02) |
3437 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3438 switch (scale)
3439 {
3440 case 1:
3441 if (size_bits & 0x01)
3442 return AARCH64_RECORD_UNKNOWN;
3443 break;
3444 case 2:
3445 if ((size_bits >> 1) & 0x01)
3446 return AARCH64_RECORD_UNKNOWN;
3447 if (size_bits & 0x01)
3448 {
3449 if (!((opcode_bits >> 1) & 0x01))
3450 scale = 3;
3451 else
3452 return AARCH64_RECORD_UNKNOWN;
3453 }
3454 break;
3455 case 3:
3456 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3457 {
3458 scale = size_bits;
3459 replicate = 1;
3460 break;
3461 }
3462 else
3463 return AARCH64_RECORD_UNKNOWN;
3464 default:
3465 break;
3466 }
3467 esize = 8 << scale;
3468 if (replicate)
3469 for (sindex = 0; sindex < selem; sindex++)
3470 {
3471 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3472 reg_rt = (reg_rt + 1) % 32;
3473 }
3474 else
3475 {
3476 for (sindex = 0; sindex < selem; sindex++)
3477 {
3478 if (bit (aarch64_insn_r->aarch64_insn, 22))
3479 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3480 else
3481 {
3482 record_buf_mem[mem_index++] = esize / 8;
3483 record_buf_mem[mem_index++] = address + addr_offset;
3484 }
3485 addr_offset = addr_offset + (esize / 8);
3486 reg_rt = (reg_rt + 1) % 32;
3487 }
3488 }
3489 }
3490 /* Load/store multiple structure. */
3491 else
3492 {
3493 uint8_t selem, esize, rpt, elements;
3494 uint8_t eindex, rindex;
3495
3496 esize = 8 << size_bits;
3497 if (bit (aarch64_insn_r->aarch64_insn, 30))
3498 elements = 128 / esize;
3499 else
3500 elements = 64 / esize;
3501
3502 switch (opcode_bits)
3503 {
3504 /*LD/ST4 (4 Registers). */
3505 case 0:
3506 rpt = 1;
3507 selem = 4;
3508 break;
3509 /*LD/ST1 (4 Registers). */
3510 case 2:
3511 rpt = 4;
3512 selem = 1;
3513 break;
3514 /*LD/ST3 (3 Registers). */
3515 case 4:
3516 rpt = 1;
3517 selem = 3;
3518 break;
3519 /*LD/ST1 (3 Registers). */
3520 case 6:
3521 rpt = 3;
3522 selem = 1;
3523 break;
3524 /*LD/ST1 (1 Register). */
3525 case 7:
3526 rpt = 1;
3527 selem = 1;
3528 break;
3529 /*LD/ST2 (2 Registers). */
3530 case 8:
3531 rpt = 1;
3532 selem = 2;
3533 break;
3534 /*LD/ST1 (2 Registers). */
3535 case 10:
3536 rpt = 2;
3537 selem = 1;
3538 break;
3539 default:
3540 return AARCH64_RECORD_UNSUPPORTED;
3541 break;
3542 }
3543 for (rindex = 0; rindex < rpt; rindex++)
3544 for (eindex = 0; eindex < elements; eindex++)
3545 {
3546 uint8_t reg_tt, sindex;
3547 reg_tt = (reg_rt + rindex) % 32;
3548 for (sindex = 0; sindex < selem; sindex++)
3549 {
3550 if (bit (aarch64_insn_r->aarch64_insn, 22))
3551 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
3552 else
3553 {
3554 record_buf_mem[mem_index++] = esize / 8;
3555 record_buf_mem[mem_index++] = address + addr_offset;
3556 }
3557 addr_offset = addr_offset + (esize / 8);
3558 reg_tt = (reg_tt + 1) % 32;
3559 }
3560 }
3561 }
3562
3563 if (bit (aarch64_insn_r->aarch64_insn, 23))
3564 record_buf[reg_index++] = reg_rn;
3565
3566 aarch64_insn_r->reg_rec_count = reg_index;
3567 aarch64_insn_r->mem_rec_count = mem_index / 2;
3568 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3569 record_buf_mem);
3570 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3571 record_buf);
3572 return AARCH64_RECORD_SUCCESS;
3573 }
3574
3575 /* Record handler for load and store instructions. */
3576
3577 static unsigned int
3578 aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
3579 {
3580 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
3581 uint8_t insn_bit23, insn_bit21;
3582 uint8_t opc, size_bits, ld_flag, vector_flag;
3583 uint32_t reg_rn, reg_rt, reg_rt2;
3584 uint64_t datasize, offset;
3585 uint32_t record_buf[8];
3586 uint64_t record_buf_mem[8];
3587 CORE_ADDR address;
3588
3589 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3590 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3591 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
3592 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3593 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3594 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
3595 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
3596 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3597 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3598 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
3599 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
3600
3601 /* Load/store exclusive. */
3602 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
3603 {
3604 if (record_debug)
3605 debug_printf ("Process record: load/store exclusive\n");
3606
3607 if (ld_flag)
3608 {
3609 record_buf[0] = reg_rt;
3610 aarch64_insn_r->reg_rec_count = 1;
3611 if (insn_bit21)
3612 {
3613 record_buf[1] = reg_rt2;
3614 aarch64_insn_r->reg_rec_count = 2;
3615 }
3616 }
3617 else
3618 {
3619 if (insn_bit21)
3620 datasize = (8 << size_bits) * 2;
3621 else
3622 datasize = (8 << size_bits);
3623 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3624 &address);
3625 record_buf_mem[0] = datasize / 8;
3626 record_buf_mem[1] = address;
3627 aarch64_insn_r->mem_rec_count = 1;
3628 if (!insn_bit23)
3629 {
3630 /* Save register rs. */
3631 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
3632 aarch64_insn_r->reg_rec_count = 1;
3633 }
3634 }
3635 }
3636 /* Load register (literal) instructions decoding. */
3637 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
3638 {
3639 if (record_debug)
3640 debug_printf ("Process record: load register (literal)\n");
3641 if (vector_flag)
3642 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3643 else
3644 record_buf[0] = reg_rt;
3645 aarch64_insn_r->reg_rec_count = 1;
3646 }
3647 /* All types of load/store pair instructions decoding. */
3648 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
3649 {
3650 if (record_debug)
3651 debug_printf ("Process record: load/store pair\n");
3652
3653 if (ld_flag)
3654 {
3655 if (vector_flag)
3656 {
3657 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3658 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
3659 }
3660 else
3661 {
3662 record_buf[0] = reg_rt;
3663 record_buf[1] = reg_rt2;
3664 }
3665 aarch64_insn_r->reg_rec_count = 2;
3666 }
3667 else
3668 {
3669 uint16_t imm7_off;
3670 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
3671 if (!vector_flag)
3672 size_bits = size_bits >> 1;
3673 datasize = 8 << (2 + size_bits);
3674 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
3675 offset = offset << (2 + size_bits);
3676 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3677 &address);
3678 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
3679 {
3680 if (imm7_off & 0x40)
3681 address = address - offset;
3682 else
3683 address = address + offset;
3684 }
3685
3686 record_buf_mem[0] = datasize / 8;
3687 record_buf_mem[1] = address;
3688 record_buf_mem[2] = datasize / 8;
3689 record_buf_mem[3] = address + (datasize / 8);
3690 aarch64_insn_r->mem_rec_count = 2;
3691 }
3692 if (bit (aarch64_insn_r->aarch64_insn, 23))
3693 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3694 }
3695 /* Load/store register (unsigned immediate) instructions. */
3696 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
3697 {
3698 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3699 if (!(opc >> 1))
3700 {
3701 if (opc & 0x01)
3702 ld_flag = 0x01;
3703 else
3704 ld_flag = 0x0;
3705 }
3706 else
3707 {
3708 if (size_bits == 0x3 && vector_flag == 0x0 && opc == 0x2)
3709 {
3710 /* PRFM (immediate) */
3711 return AARCH64_RECORD_SUCCESS;
3712 }
3713 else if (size_bits == 0x2 && vector_flag == 0x0 && opc == 0x2)
3714 {
3715 /* LDRSW (immediate) */
3716 ld_flag = 0x1;
3717 }
3718 else
3719 {
3720 if (opc & 0x01)
3721 ld_flag = 0x01;
3722 else
3723 ld_flag = 0x0;
3724 }
3725 }
3726
3727 if (record_debug)
3728 {
3729 debug_printf ("Process record: load/store (unsigned immediate):"
3730 " size %x V %d opc %x\n", size_bits, vector_flag,
3731 opc);
3732 }
3733
3734 if (!ld_flag)
3735 {
3736 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
3737 datasize = 8 << size_bits;
3738 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3739 &address);
3740 offset = offset << size_bits;
3741 address = address + offset;
3742
3743 record_buf_mem[0] = datasize >> 3;
3744 record_buf_mem[1] = address;
3745 aarch64_insn_r->mem_rec_count = 1;
3746 }
3747 else
3748 {
3749 if (vector_flag)
3750 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3751 else
3752 record_buf[0] = reg_rt;
3753 aarch64_insn_r->reg_rec_count = 1;
3754 }
3755 }
3756 /* Load/store register (register offset) instructions. */
3757 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3758 && insn_bits10_11 == 0x02 && insn_bit21)
3759 {
3760 if (record_debug)
3761 debug_printf ("Process record: load/store (register offset)\n");
3762 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3763 if (!(opc >> 1))
3764 if (opc & 0x01)
3765 ld_flag = 0x01;
3766 else
3767 ld_flag = 0x0;
3768 else
3769 if (size_bits != 0x03)
3770 ld_flag = 0x01;
3771 else
3772 return AARCH64_RECORD_UNKNOWN;
3773
3774 if (!ld_flag)
3775 {
3776 ULONGEST reg_rm_val;
3777
3778 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
3779 bits (aarch64_insn_r->aarch64_insn, 16, 20), &reg_rm_val);
3780 if (bit (aarch64_insn_r->aarch64_insn, 12))
3781 offset = reg_rm_val << size_bits;
3782 else
3783 offset = reg_rm_val;
3784 datasize = 8 << size_bits;
3785 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3786 &address);
3787 address = address + offset;
3788 record_buf_mem[0] = datasize >> 3;
3789 record_buf_mem[1] = address;
3790 aarch64_insn_r->mem_rec_count = 1;
3791 }
3792 else
3793 {
3794 if (vector_flag)
3795 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3796 else
3797 record_buf[0] = reg_rt;
3798 aarch64_insn_r->reg_rec_count = 1;
3799 }
3800 }
3801 /* Load/store register (immediate and unprivileged) instructions. */
3802 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3803 && !insn_bit21)
3804 {
3805 if (record_debug)
3806 {
3807 debug_printf ("Process record: load/store "
3808 "(immediate and unprivileged)\n");
3809 }
3810 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3811 if (!(opc >> 1))
3812 if (opc & 0x01)
3813 ld_flag = 0x01;
3814 else
3815 ld_flag = 0x0;
3816 else
3817 if (size_bits != 0x03)
3818 ld_flag = 0x01;
3819 else
3820 return AARCH64_RECORD_UNKNOWN;
3821
3822 if (!ld_flag)
3823 {
3824 uint16_t imm9_off;
3825 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
3826 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
3827 datasize = 8 << size_bits;
3828 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3829 &address);
3830 if (insn_bits10_11 != 0x01)
3831 {
3832 if (imm9_off & 0x0100)
3833 address = address - offset;
3834 else
3835 address = address + offset;
3836 }
3837 record_buf_mem[0] = datasize >> 3;
3838 record_buf_mem[1] = address;
3839 aarch64_insn_r->mem_rec_count = 1;
3840 }
3841 else
3842 {
3843 if (vector_flag)
3844 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3845 else
3846 record_buf[0] = reg_rt;
3847 aarch64_insn_r->reg_rec_count = 1;
3848 }
3849 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
3850 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3851 }
3852 /* Advanced SIMD load/store instructions. */
3853 else
3854 return aarch64_record_asimd_load_store (aarch64_insn_r);
3855
3856 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3857 record_buf_mem);
3858 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3859 record_buf);
3860 return AARCH64_RECORD_SUCCESS;
3861 }
3862
3863 /* Record handler for data processing SIMD and floating point instructions. */
3864
3865 static unsigned int
3866 aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
3867 {
3868 uint8_t insn_bit21, opcode, rmode, reg_rd;
3869 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
3870 uint8_t insn_bits11_14;
3871 uint32_t record_buf[2];
3872
3873 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3874 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3875 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3876 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3877 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
3878 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
3879 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
3880 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3881 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3882
3883 if (record_debug)
3884 debug_printf ("Process record: data processing SIMD/FP: ");
3885
3886 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
3887 {
3888 /* Floating point - fixed point conversion instructions. */
3889 if (!insn_bit21)
3890 {
3891 if (record_debug)
3892 debug_printf ("FP - fixed point conversion");
3893
3894 if ((opcode >> 1) == 0x0 && rmode == 0x03)
3895 record_buf[0] = reg_rd;
3896 else
3897 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3898 }
3899 /* Floating point - conditional compare instructions. */
3900 else if (insn_bits10_11 == 0x01)
3901 {
3902 if (record_debug)
3903 debug_printf ("FP - conditional compare");
3904
3905 record_buf[0] = AARCH64_CPSR_REGNUM;
3906 }
3907 /* Floating point - data processing (2-source) and
3908 conditional select instructions. */
3909 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
3910 {
3911 if (record_debug)
3912 debug_printf ("FP - DP (2-source)");
3913
3914 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3915 }
3916 else if (insn_bits10_11 == 0x00)
3917 {
3918 /* Floating point - immediate instructions. */
3919 if ((insn_bits12_15 & 0x01) == 0x01
3920 || (insn_bits12_15 & 0x07) == 0x04)
3921 {
3922 if (record_debug)
3923 debug_printf ("FP - immediate");
3924 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3925 }
3926 /* Floating point - compare instructions. */
3927 else if ((insn_bits12_15 & 0x03) == 0x02)
3928 {
3929 if (record_debug)
3930 debug_printf ("FP - immediate");
3931 record_buf[0] = AARCH64_CPSR_REGNUM;
3932 }
3933 /* Floating point - integer conversions instructions. */
3934 else if (insn_bits12_15 == 0x00)
3935 {
3936 /* Convert float to integer instruction. */
3937 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
3938 {
3939 if (record_debug)
3940 debug_printf ("float to int conversion");
3941
3942 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3943 }
3944 /* Convert integer to float instruction. */
3945 else if ((opcode >> 1) == 0x01 && !rmode)
3946 {
3947 if (record_debug)
3948 debug_printf ("int to float conversion");
3949
3950 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3951 }
3952 /* Move float to integer instruction. */
3953 else if ((opcode >> 1) == 0x03)
3954 {
3955 if (record_debug)
3956 debug_printf ("move float to int");
3957
3958 if (!(opcode & 0x01))
3959 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3960 else
3961 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3962 }
3963 else
3964 return AARCH64_RECORD_UNKNOWN;
3965 }
3966 else
3967 return AARCH64_RECORD_UNKNOWN;
3968 }
3969 else
3970 return AARCH64_RECORD_UNKNOWN;
3971 }
3972 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
3973 {
3974 if (record_debug)
3975 debug_printf ("SIMD copy");
3976
3977 /* Advanced SIMD copy instructions. */
3978 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
3979 && !bit (aarch64_insn_r->aarch64_insn, 15)
3980 && bit (aarch64_insn_r->aarch64_insn, 10))
3981 {
3982 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
3983 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3984 else
3985 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3986 }
3987 else
3988 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3989 }
3990 /* All remaining floating point or advanced SIMD instructions. */
3991 else
3992 {
3993 if (record_debug)
3994 debug_printf ("all remain");
3995
3996 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3997 }
3998
3999 if (record_debug)
4000 debug_printf ("\n");
4001
4002 aarch64_insn_r->reg_rec_count++;
4003 gdb_assert (aarch64_insn_r->reg_rec_count == 1);
4004 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4005 record_buf);
4006 return AARCH64_RECORD_SUCCESS;
4007 }
4008
4009 /* Decodes insns type and invokes its record handler. */
4010
4011 static unsigned int
4012 aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
4013 {
4014 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
4015
4016 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
4017 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
4018 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
4019 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
4020
4021 /* Data processing - immediate instructions. */
4022 if (!ins_bit26 && !ins_bit27 && ins_bit28)
4023 return aarch64_record_data_proc_imm (aarch64_insn_r);
4024
4025 /* Branch, exception generation and system instructions. */
4026 if (ins_bit26 && !ins_bit27 && ins_bit28)
4027 return aarch64_record_branch_except_sys (aarch64_insn_r);
4028
4029 /* Load and store instructions. */
4030 if (!ins_bit25 && ins_bit27)
4031 return aarch64_record_load_store (aarch64_insn_r);
4032
4033 /* Data processing - register instructions. */
4034 if (ins_bit25 && !ins_bit26 && ins_bit27)
4035 return aarch64_record_data_proc_reg (aarch64_insn_r);
4036
4037 /* Data processing - SIMD and floating point instructions. */
4038 if (ins_bit25 && ins_bit26 && ins_bit27)
4039 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
4040
4041 return AARCH64_RECORD_UNSUPPORTED;
4042 }
4043
4044 /* Cleans up local record registers and memory allocations. */
4045
4046 static void
4047 deallocate_reg_mem (insn_decode_record *record)
4048 {
4049 xfree (record->aarch64_regs);
4050 xfree (record->aarch64_mems);
4051 }
4052
4053 #if GDB_SELF_TEST
4054 namespace selftests {
4055
4056 static void
4057 aarch64_process_record_test (void)
4058 {
4059 struct gdbarch_info info;
4060 uint32_t ret;
4061
4062 gdbarch_info_init (&info);
4063 info.bfd_arch_info = bfd_scan_arch ("aarch64");
4064
4065 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
4066 SELF_CHECK (gdbarch != NULL);
4067
4068 insn_decode_record aarch64_record;
4069
4070 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4071 aarch64_record.regcache = NULL;
4072 aarch64_record.this_addr = 0;
4073 aarch64_record.gdbarch = gdbarch;
4074
4075 /* 20 00 80 f9 prfm pldl1keep, [x1] */
4076 aarch64_record.aarch64_insn = 0xf9800020;
4077 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4078 SELF_CHECK (ret == AARCH64_RECORD_SUCCESS);
4079 SELF_CHECK (aarch64_record.reg_rec_count == 0);
4080 SELF_CHECK (aarch64_record.mem_rec_count == 0);
4081
4082 deallocate_reg_mem (&aarch64_record);
4083 }
4084
4085 } // namespace selftests
4086 #endif /* GDB_SELF_TEST */
4087
4088 /* Parse the current instruction and record the values of the registers and
4089 memory that will be changed in current instruction to record_arch_list
4090 return -1 if something is wrong. */
4091
4092 int
4093 aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
4094 CORE_ADDR insn_addr)
4095 {
4096 uint32_t rec_no = 0;
4097 uint8_t insn_size = 4;
4098 uint32_t ret = 0;
4099 gdb_byte buf[insn_size];
4100 insn_decode_record aarch64_record;
4101
4102 memset (&buf[0], 0, insn_size);
4103 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4104 target_read_memory (insn_addr, &buf[0], insn_size);
4105 aarch64_record.aarch64_insn
4106 = (uint32_t) extract_unsigned_integer (&buf[0],
4107 insn_size,
4108 gdbarch_byte_order (gdbarch));
4109 aarch64_record.regcache = regcache;
4110 aarch64_record.this_addr = insn_addr;
4111 aarch64_record.gdbarch = gdbarch;
4112
4113 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4114 if (ret == AARCH64_RECORD_UNSUPPORTED)
4115 {
4116 printf_unfiltered (_("Process record does not support instruction "
4117 "0x%0x at address %s.\n"),
4118 aarch64_record.aarch64_insn,
4119 paddress (gdbarch, insn_addr));
4120 ret = -1;
4121 }
4122
4123 if (0 == ret)
4124 {
4125 /* Record registers. */
4126 record_full_arch_list_add_reg (aarch64_record.regcache,
4127 AARCH64_PC_REGNUM);
4128 /* Always record register CPSR. */
4129 record_full_arch_list_add_reg (aarch64_record.regcache,
4130 AARCH64_CPSR_REGNUM);
4131 if (aarch64_record.aarch64_regs)
4132 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
4133 if (record_full_arch_list_add_reg (aarch64_record.regcache,
4134 aarch64_record.aarch64_regs[rec_no]))
4135 ret = -1;
4136
4137 /* Record memories. */
4138 if (aarch64_record.aarch64_mems)
4139 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
4140 if (record_full_arch_list_add_mem
4141 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
4142 aarch64_record.aarch64_mems[rec_no].len))
4143 ret = -1;
4144
4145 if (record_full_arch_list_add_end ())
4146 ret = -1;
4147 }
4148
4149 deallocate_reg_mem (&aarch64_record);
4150 return ret;
4151 }