]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/aarch64-tdep.c
Update copyright year range in all GDB files
[thirdparty/binutils-gdb.git] / gdb / aarch64-tdep.c
1 /* Common target dependent code for GDB on AArch64 systems.
2
3 Copyright (C) 2009-2018 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22
23 #include "frame.h"
24 #include "inferior.h"
25 #include "gdbcmd.h"
26 #include "gdbcore.h"
27 #include "dis-asm.h"
28 #include "regcache.h"
29 #include "reggroups.h"
30 #include "value.h"
31 #include "arch-utils.h"
32 #include "osabi.h"
33 #include "frame-unwind.h"
34 #include "frame-base.h"
35 #include "trad-frame.h"
36 #include "objfiles.h"
37 #include "dwarf2-frame.h"
38 #include "gdbtypes.h"
39 #include "prologue-value.h"
40 #include "target-descriptions.h"
41 #include "user-regs.h"
42 #include "language.h"
43 #include "infcall.h"
44 #include "ax.h"
45 #include "ax-gdb.h"
46 #include "selftest.h"
47
48 #include "aarch64-tdep.h"
49
50 #include "elf-bfd.h"
51 #include "elf/aarch64.h"
52
53 #include "vec.h"
54
55 #include "record.h"
56 #include "record-full.h"
57 #include "arch/aarch64-insn.h"
58
59 #include "opcode/aarch64.h"
60 #include <algorithm>
61
62 #define submask(x) ((1L << ((x) + 1)) - 1)
63 #define bit(obj,st) (((obj) >> (st)) & 1)
64 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
65
66 /* Pseudo register base numbers. */
67 #define AARCH64_Q0_REGNUM 0
68 #define AARCH64_D0_REGNUM (AARCH64_Q0_REGNUM + AARCH64_D_REGISTER_COUNT)
69 #define AARCH64_S0_REGNUM (AARCH64_D0_REGNUM + 32)
70 #define AARCH64_H0_REGNUM (AARCH64_S0_REGNUM + 32)
71 #define AARCH64_B0_REGNUM (AARCH64_H0_REGNUM + 32)
72
73 /* The standard register names, and all the valid aliases for them. */
74 static const struct
75 {
76 const char *const name;
77 int regnum;
78 } aarch64_register_aliases[] =
79 {
80 /* 64-bit register names. */
81 {"fp", AARCH64_FP_REGNUM},
82 {"lr", AARCH64_LR_REGNUM},
83 {"sp", AARCH64_SP_REGNUM},
84
85 /* 32-bit register names. */
86 {"w0", AARCH64_X0_REGNUM + 0},
87 {"w1", AARCH64_X0_REGNUM + 1},
88 {"w2", AARCH64_X0_REGNUM + 2},
89 {"w3", AARCH64_X0_REGNUM + 3},
90 {"w4", AARCH64_X0_REGNUM + 4},
91 {"w5", AARCH64_X0_REGNUM + 5},
92 {"w6", AARCH64_X0_REGNUM + 6},
93 {"w7", AARCH64_X0_REGNUM + 7},
94 {"w8", AARCH64_X0_REGNUM + 8},
95 {"w9", AARCH64_X0_REGNUM + 9},
96 {"w10", AARCH64_X0_REGNUM + 10},
97 {"w11", AARCH64_X0_REGNUM + 11},
98 {"w12", AARCH64_X0_REGNUM + 12},
99 {"w13", AARCH64_X0_REGNUM + 13},
100 {"w14", AARCH64_X0_REGNUM + 14},
101 {"w15", AARCH64_X0_REGNUM + 15},
102 {"w16", AARCH64_X0_REGNUM + 16},
103 {"w17", AARCH64_X0_REGNUM + 17},
104 {"w18", AARCH64_X0_REGNUM + 18},
105 {"w19", AARCH64_X0_REGNUM + 19},
106 {"w20", AARCH64_X0_REGNUM + 20},
107 {"w21", AARCH64_X0_REGNUM + 21},
108 {"w22", AARCH64_X0_REGNUM + 22},
109 {"w23", AARCH64_X0_REGNUM + 23},
110 {"w24", AARCH64_X0_REGNUM + 24},
111 {"w25", AARCH64_X0_REGNUM + 25},
112 {"w26", AARCH64_X0_REGNUM + 26},
113 {"w27", AARCH64_X0_REGNUM + 27},
114 {"w28", AARCH64_X0_REGNUM + 28},
115 {"w29", AARCH64_X0_REGNUM + 29},
116 {"w30", AARCH64_X0_REGNUM + 30},
117
118 /* specials */
119 {"ip0", AARCH64_X0_REGNUM + 16},
120 {"ip1", AARCH64_X0_REGNUM + 17}
121 };
122
123 /* The required core 'R' registers. */
124 static const char *const aarch64_r_register_names[] =
125 {
126 /* These registers must appear in consecutive RAW register number
127 order and they must begin with AARCH64_X0_REGNUM! */
128 "x0", "x1", "x2", "x3",
129 "x4", "x5", "x6", "x7",
130 "x8", "x9", "x10", "x11",
131 "x12", "x13", "x14", "x15",
132 "x16", "x17", "x18", "x19",
133 "x20", "x21", "x22", "x23",
134 "x24", "x25", "x26", "x27",
135 "x28", "x29", "x30", "sp",
136 "pc", "cpsr"
137 };
138
139 /* The FP/SIMD 'V' registers. */
140 static const char *const aarch64_v_register_names[] =
141 {
142 /* These registers must appear in consecutive RAW register number
143 order and they must begin with AARCH64_V0_REGNUM! */
144 "v0", "v1", "v2", "v3",
145 "v4", "v5", "v6", "v7",
146 "v8", "v9", "v10", "v11",
147 "v12", "v13", "v14", "v15",
148 "v16", "v17", "v18", "v19",
149 "v20", "v21", "v22", "v23",
150 "v24", "v25", "v26", "v27",
151 "v28", "v29", "v30", "v31",
152 "fpsr",
153 "fpcr"
154 };
155
156 /* AArch64 prologue cache structure. */
157 struct aarch64_prologue_cache
158 {
159 /* The program counter at the start of the function. It is used to
160 identify this frame as a prologue frame. */
161 CORE_ADDR func;
162
163 /* The program counter at the time this frame was created; i.e. where
164 this function was called from. It is used to identify this frame as a
165 stub frame. */
166 CORE_ADDR prev_pc;
167
168 /* The stack pointer at the time this frame was created; i.e. the
169 caller's stack pointer when this function was called. It is used
170 to identify this frame. */
171 CORE_ADDR prev_sp;
172
173 /* Is the target available to read from? */
174 int available_p;
175
176 /* The frame base for this frame is just prev_sp - frame size.
177 FRAMESIZE is the distance from the frame pointer to the
178 initial stack pointer. */
179 int framesize;
180
181 /* The register used to hold the frame pointer for this frame. */
182 int framereg;
183
184 /* Saved register offsets. */
185 struct trad_frame_saved_reg *saved_regs;
186 };
187
188 static void
189 show_aarch64_debug (struct ui_file *file, int from_tty,
190 struct cmd_list_element *c, const char *value)
191 {
192 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
193 }
194
195 namespace {
196
197 /* Abstract instruction reader. */
198
199 class abstract_instruction_reader
200 {
201 public:
202 /* Read in one instruction. */
203 virtual ULONGEST read (CORE_ADDR memaddr, int len,
204 enum bfd_endian byte_order) = 0;
205 };
206
207 /* Instruction reader from real target. */
208
209 class instruction_reader : public abstract_instruction_reader
210 {
211 public:
212 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
213 {
214 return read_code_unsigned_integer (memaddr, len, byte_order);
215 }
216 };
217
218 } // namespace
219
220 /* Analyze a prologue, looking for a recognizable stack frame
221 and frame pointer. Scan until we encounter a store that could
222 clobber the stack frame unexpectedly, or an unknown instruction. */
223
224 static CORE_ADDR
225 aarch64_analyze_prologue (struct gdbarch *gdbarch,
226 CORE_ADDR start, CORE_ADDR limit,
227 struct aarch64_prologue_cache *cache,
228 abstract_instruction_reader& reader)
229 {
230 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
231 int i;
232 /* Track X registers and D registers in prologue. */
233 pv_t regs[AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT];
234
235 for (i = 0; i < AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT; i++)
236 regs[i] = pv_register (i, 0);
237 pv_area stack (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
238
239 for (; start < limit; start += 4)
240 {
241 uint32_t insn;
242 aarch64_inst inst;
243
244 insn = reader.read (start, 4, byte_order_for_code);
245
246 if (aarch64_decode_insn (insn, &inst, 1) != 0)
247 break;
248
249 if (inst.opcode->iclass == addsub_imm
250 && (inst.opcode->op == OP_ADD
251 || strcmp ("sub", inst.opcode->name) == 0))
252 {
253 unsigned rd = inst.operands[0].reg.regno;
254 unsigned rn = inst.operands[1].reg.regno;
255
256 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
257 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
258 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
259 gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM);
260
261 if (inst.opcode->op == OP_ADD)
262 {
263 regs[rd] = pv_add_constant (regs[rn],
264 inst.operands[2].imm.value);
265 }
266 else
267 {
268 regs[rd] = pv_add_constant (regs[rn],
269 -inst.operands[2].imm.value);
270 }
271 }
272 else if (inst.opcode->iclass == pcreladdr
273 && inst.operands[1].type == AARCH64_OPND_ADDR_ADRP)
274 {
275 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
276 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
277
278 regs[inst.operands[0].reg.regno] = pv_unknown ();
279 }
280 else if (inst.opcode->iclass == branch_imm)
281 {
282 /* Stop analysis on branch. */
283 break;
284 }
285 else if (inst.opcode->iclass == condbranch)
286 {
287 /* Stop analysis on branch. */
288 break;
289 }
290 else if (inst.opcode->iclass == branch_reg)
291 {
292 /* Stop analysis on branch. */
293 break;
294 }
295 else if (inst.opcode->iclass == compbranch)
296 {
297 /* Stop analysis on branch. */
298 break;
299 }
300 else if (inst.opcode->op == OP_MOVZ)
301 {
302 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
303 regs[inst.operands[0].reg.regno] = pv_unknown ();
304 }
305 else if (inst.opcode->iclass == log_shift
306 && strcmp (inst.opcode->name, "orr") == 0)
307 {
308 unsigned rd = inst.operands[0].reg.regno;
309 unsigned rn = inst.operands[1].reg.regno;
310 unsigned rm = inst.operands[2].reg.regno;
311
312 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
313 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn);
314 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT);
315
316 if (inst.operands[2].shifter.amount == 0
317 && rn == AARCH64_SP_REGNUM)
318 regs[rd] = regs[rm];
319 else
320 {
321 if (aarch64_debug)
322 {
323 debug_printf ("aarch64: prologue analysis gave up "
324 "addr=%s opcode=0x%x (orr x register)\n",
325 core_addr_to_string_nz (start), insn);
326 }
327 break;
328 }
329 }
330 else if (inst.opcode->op == OP_STUR)
331 {
332 unsigned rt = inst.operands[0].reg.regno;
333 unsigned rn = inst.operands[1].addr.base_regno;
334 int is64
335 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
336
337 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
338 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
339 gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9);
340 gdb_assert (!inst.operands[1].addr.offset.is_reg);
341
342 stack.store (pv_add_constant (regs[rn],
343 inst.operands[1].addr.offset.imm),
344 is64 ? 8 : 4, regs[rt]);
345 }
346 else if ((inst.opcode->iclass == ldstpair_off
347 || (inst.opcode->iclass == ldstpair_indexed
348 && inst.operands[2].addr.preind))
349 && strcmp ("stp", inst.opcode->name) == 0)
350 {
351 /* STP with addressing mode Pre-indexed and Base register. */
352 unsigned rt1;
353 unsigned rt2;
354 unsigned rn = inst.operands[2].addr.base_regno;
355 int32_t imm = inst.operands[2].addr.offset.imm;
356
357 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
358 || inst.operands[0].type == AARCH64_OPND_Ft);
359 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2
360 || inst.operands[1].type == AARCH64_OPND_Ft2);
361 gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7);
362 gdb_assert (!inst.operands[2].addr.offset.is_reg);
363
364 /* If recording this store would invalidate the store area
365 (perhaps because rn is not known) then we should abandon
366 further prologue analysis. */
367 if (stack.store_would_trash (pv_add_constant (regs[rn], imm)))
368 break;
369
370 if (stack.store_would_trash (pv_add_constant (regs[rn], imm + 8)))
371 break;
372
373 rt1 = inst.operands[0].reg.regno;
374 rt2 = inst.operands[1].reg.regno;
375 if (inst.operands[0].type == AARCH64_OPND_Ft)
376 {
377 /* Only bottom 64-bit of each V register (D register) need
378 to be preserved. */
379 gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D);
380 rt1 += AARCH64_X_REGISTER_COUNT;
381 rt2 += AARCH64_X_REGISTER_COUNT;
382 }
383
384 stack.store (pv_add_constant (regs[rn], imm), 8,
385 regs[rt1]);
386 stack.store (pv_add_constant (regs[rn], imm + 8), 8,
387 regs[rt2]);
388
389 if (inst.operands[2].addr.writeback)
390 regs[rn] = pv_add_constant (regs[rn], imm);
391
392 }
393 else if ((inst.opcode->iclass == ldst_imm9 /* Signed immediate. */
394 || (inst.opcode->iclass == ldst_pos /* Unsigned immediate. */
395 && (inst.opcode->op == OP_STR_POS
396 || inst.opcode->op == OP_STRF_POS)))
397 && inst.operands[1].addr.base_regno == AARCH64_SP_REGNUM
398 && strcmp ("str", inst.opcode->name) == 0)
399 {
400 /* STR (immediate) */
401 unsigned int rt = inst.operands[0].reg.regno;
402 int32_t imm = inst.operands[1].addr.offset.imm;
403 unsigned int rn = inst.operands[1].addr.base_regno;
404 bool is64
405 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
406 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
407 || inst.operands[0].type == AARCH64_OPND_Ft);
408
409 if (inst.operands[0].type == AARCH64_OPND_Ft)
410 {
411 /* Only bottom 64-bit of each V register (D register) need
412 to be preserved. */
413 gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D);
414 rt += AARCH64_X_REGISTER_COUNT;
415 }
416
417 stack.store (pv_add_constant (regs[rn], imm),
418 is64 ? 8 : 4, regs[rt]);
419 if (inst.operands[1].addr.writeback)
420 regs[rn] = pv_add_constant (regs[rn], imm);
421 }
422 else if (inst.opcode->iclass == testbranch)
423 {
424 /* Stop analysis on branch. */
425 break;
426 }
427 else
428 {
429 if (aarch64_debug)
430 {
431 debug_printf ("aarch64: prologue analysis gave up addr=%s"
432 " opcode=0x%x\n",
433 core_addr_to_string_nz (start), insn);
434 }
435 break;
436 }
437 }
438
439 if (cache == NULL)
440 return start;
441
442 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
443 {
444 /* Frame pointer is fp. Frame size is constant. */
445 cache->framereg = AARCH64_FP_REGNUM;
446 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
447 }
448 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
449 {
450 /* Try the stack pointer. */
451 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
452 cache->framereg = AARCH64_SP_REGNUM;
453 }
454 else
455 {
456 /* We're just out of luck. We don't know where the frame is. */
457 cache->framereg = -1;
458 cache->framesize = 0;
459 }
460
461 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
462 {
463 CORE_ADDR offset;
464
465 if (stack.find_reg (gdbarch, i, &offset))
466 cache->saved_regs[i].addr = offset;
467 }
468
469 for (i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
470 {
471 int regnum = gdbarch_num_regs (gdbarch);
472 CORE_ADDR offset;
473
474 if (stack.find_reg (gdbarch, i + AARCH64_X_REGISTER_COUNT,
475 &offset))
476 cache->saved_regs[i + regnum + AARCH64_D0_REGNUM].addr = offset;
477 }
478
479 return start;
480 }
481
482 static CORE_ADDR
483 aarch64_analyze_prologue (struct gdbarch *gdbarch,
484 CORE_ADDR start, CORE_ADDR limit,
485 struct aarch64_prologue_cache *cache)
486 {
487 instruction_reader reader;
488
489 return aarch64_analyze_prologue (gdbarch, start, limit, cache,
490 reader);
491 }
492
493 #if GDB_SELF_TEST
494
495 namespace selftests {
496
497 /* Instruction reader from manually cooked instruction sequences. */
498
499 class instruction_reader_test : public abstract_instruction_reader
500 {
501 public:
502 template<size_t SIZE>
503 explicit instruction_reader_test (const uint32_t (&insns)[SIZE])
504 : m_insns (insns), m_insns_size (SIZE)
505 {}
506
507 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
508 {
509 SELF_CHECK (len == 4);
510 SELF_CHECK (memaddr % 4 == 0);
511 SELF_CHECK (memaddr / 4 < m_insns_size);
512
513 return m_insns[memaddr / 4];
514 }
515
516 private:
517 const uint32_t *m_insns;
518 size_t m_insns_size;
519 };
520
521 static void
522 aarch64_analyze_prologue_test (void)
523 {
524 struct gdbarch_info info;
525
526 gdbarch_info_init (&info);
527 info.bfd_arch_info = bfd_scan_arch ("aarch64");
528
529 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
530 SELF_CHECK (gdbarch != NULL);
531
532 /* Test the simple prologue in which frame pointer is used. */
533 {
534 struct aarch64_prologue_cache cache;
535 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
536
537 static const uint32_t insns[] = {
538 0xa9af7bfd, /* stp x29, x30, [sp,#-272]! */
539 0x910003fd, /* mov x29, sp */
540 0x97ffffe6, /* bl 0x400580 */
541 };
542 instruction_reader_test reader (insns);
543
544 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
545 SELF_CHECK (end == 4 * 2);
546
547 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
548 SELF_CHECK (cache.framesize == 272);
549
550 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
551 {
552 if (i == AARCH64_FP_REGNUM)
553 SELF_CHECK (cache.saved_regs[i].addr == -272);
554 else if (i == AARCH64_LR_REGNUM)
555 SELF_CHECK (cache.saved_regs[i].addr == -264);
556 else
557 SELF_CHECK (cache.saved_regs[i].addr == -1);
558 }
559
560 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
561 {
562 int regnum = gdbarch_num_regs (gdbarch);
563
564 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
565 == -1);
566 }
567 }
568
569 /* Test a prologue in which STR is used and frame pointer is not
570 used. */
571 {
572 struct aarch64_prologue_cache cache;
573 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
574
575 static const uint32_t insns[] = {
576 0xf81d0ff3, /* str x19, [sp, #-48]! */
577 0xb9002fe0, /* str w0, [sp, #44] */
578 0xf90013e1, /* str x1, [sp, #32]*/
579 0xfd000fe0, /* str d0, [sp, #24] */
580 0xaa0203f3, /* mov x19, x2 */
581 0xf94013e0, /* ldr x0, [sp, #32] */
582 };
583 instruction_reader_test reader (insns);
584
585 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
586
587 SELF_CHECK (end == 4 * 5);
588
589 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
590 SELF_CHECK (cache.framesize == 48);
591
592 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
593 {
594 if (i == 1)
595 SELF_CHECK (cache.saved_regs[i].addr == -16);
596 else if (i == 19)
597 SELF_CHECK (cache.saved_regs[i].addr == -48);
598 else
599 SELF_CHECK (cache.saved_regs[i].addr == -1);
600 }
601
602 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
603 {
604 int regnum = gdbarch_num_regs (gdbarch);
605
606 if (i == 0)
607 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
608 == -24);
609 else
610 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
611 == -1);
612 }
613 }
614 }
615 } // namespace selftests
616 #endif /* GDB_SELF_TEST */
617
618 /* Implement the "skip_prologue" gdbarch method. */
619
620 static CORE_ADDR
621 aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
622 {
623 CORE_ADDR func_addr, limit_pc;
624
625 /* See if we can determine the end of the prologue via the symbol
626 table. If so, then return either PC, or the PC after the
627 prologue, whichever is greater. */
628 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
629 {
630 CORE_ADDR post_prologue_pc
631 = skip_prologue_using_sal (gdbarch, func_addr);
632
633 if (post_prologue_pc != 0)
634 return std::max (pc, post_prologue_pc);
635 }
636
637 /* Can't determine prologue from the symbol table, need to examine
638 instructions. */
639
640 /* Find an upper limit on the function prologue using the debug
641 information. If the debug information could not be used to
642 provide that bound, then use an arbitrary large number as the
643 upper bound. */
644 limit_pc = skip_prologue_using_sal (gdbarch, pc);
645 if (limit_pc == 0)
646 limit_pc = pc + 128; /* Magic. */
647
648 /* Try disassembling prologue. */
649 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
650 }
651
652 /* Scan the function prologue for THIS_FRAME and populate the prologue
653 cache CACHE. */
654
655 static void
656 aarch64_scan_prologue (struct frame_info *this_frame,
657 struct aarch64_prologue_cache *cache)
658 {
659 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
660 CORE_ADDR prologue_start;
661 CORE_ADDR prologue_end;
662 CORE_ADDR prev_pc = get_frame_pc (this_frame);
663 struct gdbarch *gdbarch = get_frame_arch (this_frame);
664
665 cache->prev_pc = prev_pc;
666
667 /* Assume we do not find a frame. */
668 cache->framereg = -1;
669 cache->framesize = 0;
670
671 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
672 &prologue_end))
673 {
674 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
675
676 if (sal.line == 0)
677 {
678 /* No line info so use the current PC. */
679 prologue_end = prev_pc;
680 }
681 else if (sal.end < prologue_end)
682 {
683 /* The next line begins after the function end. */
684 prologue_end = sal.end;
685 }
686
687 prologue_end = std::min (prologue_end, prev_pc);
688 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
689 }
690 else
691 {
692 CORE_ADDR frame_loc;
693
694 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
695 if (frame_loc == 0)
696 return;
697
698 cache->framereg = AARCH64_FP_REGNUM;
699 cache->framesize = 16;
700 cache->saved_regs[29].addr = 0;
701 cache->saved_regs[30].addr = 8;
702 }
703 }
704
705 /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
706 function may throw an exception if the inferior's registers or memory is
707 not available. */
708
709 static void
710 aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
711 struct aarch64_prologue_cache *cache)
712 {
713 CORE_ADDR unwound_fp;
714 int reg;
715
716 aarch64_scan_prologue (this_frame, cache);
717
718 if (cache->framereg == -1)
719 return;
720
721 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
722 if (unwound_fp == 0)
723 return;
724
725 cache->prev_sp = unwound_fp + cache->framesize;
726
727 /* Calculate actual addresses of saved registers using offsets
728 determined by aarch64_analyze_prologue. */
729 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
730 if (trad_frame_addr_p (cache->saved_regs, reg))
731 cache->saved_regs[reg].addr += cache->prev_sp;
732
733 cache->func = get_frame_func (this_frame);
734
735 cache->available_p = 1;
736 }
737
738 /* Allocate and fill in *THIS_CACHE with information about the prologue of
739 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
740 Return a pointer to the current aarch64_prologue_cache in
741 *THIS_CACHE. */
742
743 static struct aarch64_prologue_cache *
744 aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
745 {
746 struct aarch64_prologue_cache *cache;
747
748 if (*this_cache != NULL)
749 return (struct aarch64_prologue_cache *) *this_cache;
750
751 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
752 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
753 *this_cache = cache;
754
755 TRY
756 {
757 aarch64_make_prologue_cache_1 (this_frame, cache);
758 }
759 CATCH (ex, RETURN_MASK_ERROR)
760 {
761 if (ex.error != NOT_AVAILABLE_ERROR)
762 throw_exception (ex);
763 }
764 END_CATCH
765
766 return cache;
767 }
768
769 /* Implement the "stop_reason" frame_unwind method. */
770
771 static enum unwind_stop_reason
772 aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
773 void **this_cache)
774 {
775 struct aarch64_prologue_cache *cache
776 = aarch64_make_prologue_cache (this_frame, this_cache);
777
778 if (!cache->available_p)
779 return UNWIND_UNAVAILABLE;
780
781 /* Halt the backtrace at "_start". */
782 if (cache->prev_pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
783 return UNWIND_OUTERMOST;
784
785 /* We've hit a wall, stop. */
786 if (cache->prev_sp == 0)
787 return UNWIND_OUTERMOST;
788
789 return UNWIND_NO_REASON;
790 }
791
792 /* Our frame ID for a normal frame is the current function's starting
793 PC and the caller's SP when we were called. */
794
795 static void
796 aarch64_prologue_this_id (struct frame_info *this_frame,
797 void **this_cache, struct frame_id *this_id)
798 {
799 struct aarch64_prologue_cache *cache
800 = aarch64_make_prologue_cache (this_frame, this_cache);
801
802 if (!cache->available_p)
803 *this_id = frame_id_build_unavailable_stack (cache->func);
804 else
805 *this_id = frame_id_build (cache->prev_sp, cache->func);
806 }
807
808 /* Implement the "prev_register" frame_unwind method. */
809
810 static struct value *
811 aarch64_prologue_prev_register (struct frame_info *this_frame,
812 void **this_cache, int prev_regnum)
813 {
814 struct aarch64_prologue_cache *cache
815 = aarch64_make_prologue_cache (this_frame, this_cache);
816
817 /* If we are asked to unwind the PC, then we need to return the LR
818 instead. The prologue may save PC, but it will point into this
819 frame's prologue, not the next frame's resume location. */
820 if (prev_regnum == AARCH64_PC_REGNUM)
821 {
822 CORE_ADDR lr;
823
824 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
825 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
826 }
827
828 /* SP is generally not saved to the stack, but this frame is
829 identified by the next frame's stack pointer at the time of the
830 call. The value was already reconstructed into PREV_SP. */
831 /*
832 +----------+ ^
833 | saved lr | |
834 +->| saved fp |--+
835 | | |
836 | | | <- Previous SP
837 | +----------+
838 | | saved lr |
839 +--| saved fp |<- FP
840 | |
841 | |<- SP
842 +----------+ */
843 if (prev_regnum == AARCH64_SP_REGNUM)
844 return frame_unwind_got_constant (this_frame, prev_regnum,
845 cache->prev_sp);
846
847 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
848 prev_regnum);
849 }
850
851 /* AArch64 prologue unwinder. */
852 struct frame_unwind aarch64_prologue_unwind =
853 {
854 NORMAL_FRAME,
855 aarch64_prologue_frame_unwind_stop_reason,
856 aarch64_prologue_this_id,
857 aarch64_prologue_prev_register,
858 NULL,
859 default_frame_sniffer
860 };
861
862 /* Allocate and fill in *THIS_CACHE with information about the prologue of
863 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
864 Return a pointer to the current aarch64_prologue_cache in
865 *THIS_CACHE. */
866
867 static struct aarch64_prologue_cache *
868 aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
869 {
870 struct aarch64_prologue_cache *cache;
871
872 if (*this_cache != NULL)
873 return (struct aarch64_prologue_cache *) *this_cache;
874
875 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
876 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
877 *this_cache = cache;
878
879 TRY
880 {
881 cache->prev_sp = get_frame_register_unsigned (this_frame,
882 AARCH64_SP_REGNUM);
883 cache->prev_pc = get_frame_pc (this_frame);
884 cache->available_p = 1;
885 }
886 CATCH (ex, RETURN_MASK_ERROR)
887 {
888 if (ex.error != NOT_AVAILABLE_ERROR)
889 throw_exception (ex);
890 }
891 END_CATCH
892
893 return cache;
894 }
895
896 /* Implement the "stop_reason" frame_unwind method. */
897
898 static enum unwind_stop_reason
899 aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
900 void **this_cache)
901 {
902 struct aarch64_prologue_cache *cache
903 = aarch64_make_stub_cache (this_frame, this_cache);
904
905 if (!cache->available_p)
906 return UNWIND_UNAVAILABLE;
907
908 return UNWIND_NO_REASON;
909 }
910
911 /* Our frame ID for a stub frame is the current SP and LR. */
912
913 static void
914 aarch64_stub_this_id (struct frame_info *this_frame,
915 void **this_cache, struct frame_id *this_id)
916 {
917 struct aarch64_prologue_cache *cache
918 = aarch64_make_stub_cache (this_frame, this_cache);
919
920 if (cache->available_p)
921 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
922 else
923 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
924 }
925
926 /* Implement the "sniffer" frame_unwind method. */
927
928 static int
929 aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
930 struct frame_info *this_frame,
931 void **this_prologue_cache)
932 {
933 CORE_ADDR addr_in_block;
934 gdb_byte dummy[4];
935
936 addr_in_block = get_frame_address_in_block (this_frame);
937 if (in_plt_section (addr_in_block)
938 /* We also use the stub winder if the target memory is unreadable
939 to avoid having the prologue unwinder trying to read it. */
940 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
941 return 1;
942
943 return 0;
944 }
945
946 /* AArch64 stub unwinder. */
947 struct frame_unwind aarch64_stub_unwind =
948 {
949 NORMAL_FRAME,
950 aarch64_stub_frame_unwind_stop_reason,
951 aarch64_stub_this_id,
952 aarch64_prologue_prev_register,
953 NULL,
954 aarch64_stub_unwind_sniffer
955 };
956
957 /* Return the frame base address of *THIS_FRAME. */
958
959 static CORE_ADDR
960 aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
961 {
962 struct aarch64_prologue_cache *cache
963 = aarch64_make_prologue_cache (this_frame, this_cache);
964
965 return cache->prev_sp - cache->framesize;
966 }
967
968 /* AArch64 default frame base information. */
969 struct frame_base aarch64_normal_base =
970 {
971 &aarch64_prologue_unwind,
972 aarch64_normal_frame_base,
973 aarch64_normal_frame_base,
974 aarch64_normal_frame_base
975 };
976
977 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
978 dummy frame. The frame ID's base needs to match the TOS value
979 saved by save_dummy_frame_tos () and returned from
980 aarch64_push_dummy_call, and the PC needs to match the dummy
981 frame's breakpoint. */
982
983 static struct frame_id
984 aarch64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
985 {
986 return frame_id_build (get_frame_register_unsigned (this_frame,
987 AARCH64_SP_REGNUM),
988 get_frame_pc (this_frame));
989 }
990
991 /* Implement the "unwind_pc" gdbarch method. */
992
993 static CORE_ADDR
994 aarch64_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
995 {
996 CORE_ADDR pc
997 = frame_unwind_register_unsigned (this_frame, AARCH64_PC_REGNUM);
998
999 return pc;
1000 }
1001
1002 /* Implement the "unwind_sp" gdbarch method. */
1003
1004 static CORE_ADDR
1005 aarch64_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
1006 {
1007 return frame_unwind_register_unsigned (this_frame, AARCH64_SP_REGNUM);
1008 }
1009
1010 /* Return the value of the REGNUM register in the previous frame of
1011 *THIS_FRAME. */
1012
1013 static struct value *
1014 aarch64_dwarf2_prev_register (struct frame_info *this_frame,
1015 void **this_cache, int regnum)
1016 {
1017 CORE_ADDR lr;
1018
1019 switch (regnum)
1020 {
1021 case AARCH64_PC_REGNUM:
1022 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1023 return frame_unwind_got_constant (this_frame, regnum, lr);
1024
1025 default:
1026 internal_error (__FILE__, __LINE__,
1027 _("Unexpected register %d"), regnum);
1028 }
1029 }
1030
1031 /* Implement the "init_reg" dwarf2_frame_ops method. */
1032
1033 static void
1034 aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1035 struct dwarf2_frame_state_reg *reg,
1036 struct frame_info *this_frame)
1037 {
1038 switch (regnum)
1039 {
1040 case AARCH64_PC_REGNUM:
1041 reg->how = DWARF2_FRAME_REG_FN;
1042 reg->loc.fn = aarch64_dwarf2_prev_register;
1043 break;
1044 case AARCH64_SP_REGNUM:
1045 reg->how = DWARF2_FRAME_REG_CFA;
1046 break;
1047 }
1048 }
1049
1050 /* When arguments must be pushed onto the stack, they go on in reverse
1051 order. The code below implements a FILO (stack) to do this. */
1052
1053 typedef struct
1054 {
1055 /* Value to pass on stack. It can be NULL if this item is for stack
1056 padding. */
1057 const gdb_byte *data;
1058
1059 /* Size in bytes of value to pass on stack. */
1060 int len;
1061 } stack_item_t;
1062
1063 DEF_VEC_O (stack_item_t);
1064
1065 /* Return the alignment (in bytes) of the given type. */
1066
1067 static int
1068 aarch64_type_align (struct type *t)
1069 {
1070 int n;
1071 int align;
1072 int falign;
1073
1074 t = check_typedef (t);
1075 switch (TYPE_CODE (t))
1076 {
1077 default:
1078 /* Should never happen. */
1079 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
1080 return 4;
1081
1082 case TYPE_CODE_PTR:
1083 case TYPE_CODE_ENUM:
1084 case TYPE_CODE_INT:
1085 case TYPE_CODE_FLT:
1086 case TYPE_CODE_SET:
1087 case TYPE_CODE_RANGE:
1088 case TYPE_CODE_BITSTRING:
1089 case TYPE_CODE_REF:
1090 case TYPE_CODE_RVALUE_REF:
1091 case TYPE_CODE_CHAR:
1092 case TYPE_CODE_BOOL:
1093 return TYPE_LENGTH (t);
1094
1095 case TYPE_CODE_ARRAY:
1096 if (TYPE_VECTOR (t))
1097 {
1098 /* Use the natural alignment for vector types (the same for
1099 scalar type), but the maximum alignment is 128-bit. */
1100 if (TYPE_LENGTH (t) > 16)
1101 return 16;
1102 else
1103 return TYPE_LENGTH (t);
1104 }
1105 else
1106 return aarch64_type_align (TYPE_TARGET_TYPE (t));
1107 case TYPE_CODE_COMPLEX:
1108 return aarch64_type_align (TYPE_TARGET_TYPE (t));
1109
1110 case TYPE_CODE_STRUCT:
1111 case TYPE_CODE_UNION:
1112 align = 1;
1113 for (n = 0; n < TYPE_NFIELDS (t); n++)
1114 {
1115 falign = aarch64_type_align (TYPE_FIELD_TYPE (t, n));
1116 if (falign > align)
1117 align = falign;
1118 }
1119 return align;
1120 }
1121 }
1122
1123 /* Return 1 if *TY is a homogeneous floating-point aggregate or
1124 homogeneous short-vector aggregate as defined in the AAPCS64 ABI
1125 document; otherwise return 0. */
1126
1127 static int
1128 is_hfa_or_hva (struct type *ty)
1129 {
1130 switch (TYPE_CODE (ty))
1131 {
1132 case TYPE_CODE_ARRAY:
1133 {
1134 struct type *target_ty = TYPE_TARGET_TYPE (ty);
1135
1136 if (TYPE_VECTOR (ty))
1137 return 0;
1138
1139 if (TYPE_LENGTH (ty) <= 4 /* HFA or HVA has at most 4 members. */
1140 && (TYPE_CODE (target_ty) == TYPE_CODE_FLT /* HFA */
1141 || (TYPE_CODE (target_ty) == TYPE_CODE_ARRAY /* HVA */
1142 && TYPE_VECTOR (target_ty))))
1143 return 1;
1144 break;
1145 }
1146
1147 case TYPE_CODE_UNION:
1148 case TYPE_CODE_STRUCT:
1149 {
1150 /* HFA or HVA has at most four members. */
1151 if (TYPE_NFIELDS (ty) > 0 && TYPE_NFIELDS (ty) <= 4)
1152 {
1153 struct type *member0_type;
1154
1155 member0_type = check_typedef (TYPE_FIELD_TYPE (ty, 0));
1156 if (TYPE_CODE (member0_type) == TYPE_CODE_FLT
1157 || (TYPE_CODE (member0_type) == TYPE_CODE_ARRAY
1158 && TYPE_VECTOR (member0_type)))
1159 {
1160 int i;
1161
1162 for (i = 0; i < TYPE_NFIELDS (ty); i++)
1163 {
1164 struct type *member1_type;
1165
1166 member1_type = check_typedef (TYPE_FIELD_TYPE (ty, i));
1167 if (TYPE_CODE (member0_type) != TYPE_CODE (member1_type)
1168 || (TYPE_LENGTH (member0_type)
1169 != TYPE_LENGTH (member1_type)))
1170 return 0;
1171 }
1172 return 1;
1173 }
1174 }
1175 return 0;
1176 }
1177
1178 default:
1179 break;
1180 }
1181
1182 return 0;
1183 }
1184
1185 /* AArch64 function call information structure. */
1186 struct aarch64_call_info
1187 {
1188 /* the current argument number. */
1189 unsigned argnum;
1190
1191 /* The next general purpose register number, equivalent to NGRN as
1192 described in the AArch64 Procedure Call Standard. */
1193 unsigned ngrn;
1194
1195 /* The next SIMD and floating point register number, equivalent to
1196 NSRN as described in the AArch64 Procedure Call Standard. */
1197 unsigned nsrn;
1198
1199 /* The next stacked argument address, equivalent to NSAA as
1200 described in the AArch64 Procedure Call Standard. */
1201 unsigned nsaa;
1202
1203 /* Stack item vector. */
1204 VEC(stack_item_t) *si;
1205 };
1206
1207 /* Pass a value in a sequence of consecutive X registers. The caller
1208 is responsbile for ensuring sufficient registers are available. */
1209
1210 static void
1211 pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1212 struct aarch64_call_info *info, struct type *type,
1213 struct value *arg)
1214 {
1215 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1216 int len = TYPE_LENGTH (type);
1217 enum type_code typecode = TYPE_CODE (type);
1218 int regnum = AARCH64_X0_REGNUM + info->ngrn;
1219 const bfd_byte *buf = value_contents (arg);
1220
1221 info->argnum++;
1222
1223 while (len > 0)
1224 {
1225 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1226 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1227 byte_order);
1228
1229
1230 /* Adjust sub-word struct/union args when big-endian. */
1231 if (byte_order == BFD_ENDIAN_BIG
1232 && partial_len < X_REGISTER_SIZE
1233 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1234 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1235
1236 if (aarch64_debug)
1237 {
1238 debug_printf ("arg %d in %s = 0x%s\n", info->argnum,
1239 gdbarch_register_name (gdbarch, regnum),
1240 phex (regval, X_REGISTER_SIZE));
1241 }
1242 regcache_cooked_write_unsigned (regcache, regnum, regval);
1243 len -= partial_len;
1244 buf += partial_len;
1245 regnum++;
1246 }
1247 }
1248
1249 /* Attempt to marshall a value in a V register. Return 1 if
1250 successful, or 0 if insufficient registers are available. This
1251 function, unlike the equivalent pass_in_x() function does not
1252 handle arguments spread across multiple registers. */
1253
1254 static int
1255 pass_in_v (struct gdbarch *gdbarch,
1256 struct regcache *regcache,
1257 struct aarch64_call_info *info,
1258 int len, const bfd_byte *buf)
1259 {
1260 if (info->nsrn < 8)
1261 {
1262 int regnum = AARCH64_V0_REGNUM + info->nsrn;
1263 gdb_byte reg[V_REGISTER_SIZE];
1264
1265 info->argnum++;
1266 info->nsrn++;
1267
1268 memset (reg, 0, sizeof (reg));
1269 /* PCS C.1, the argument is allocated to the least significant
1270 bits of V register. */
1271 memcpy (reg, buf, len);
1272 regcache_cooked_write (regcache, regnum, reg);
1273
1274 if (aarch64_debug)
1275 {
1276 debug_printf ("arg %d in %s\n", info->argnum,
1277 gdbarch_register_name (gdbarch, regnum));
1278 }
1279 return 1;
1280 }
1281 info->nsrn = 8;
1282 return 0;
1283 }
1284
1285 /* Marshall an argument onto the stack. */
1286
1287 static void
1288 pass_on_stack (struct aarch64_call_info *info, struct type *type,
1289 struct value *arg)
1290 {
1291 const bfd_byte *buf = value_contents (arg);
1292 int len = TYPE_LENGTH (type);
1293 int align;
1294 stack_item_t item;
1295
1296 info->argnum++;
1297
1298 align = aarch64_type_align (type);
1299
1300 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1301 Natural alignment of the argument's type. */
1302 align = align_up (align, 8);
1303
1304 /* The AArch64 PCS requires at most doubleword alignment. */
1305 if (align > 16)
1306 align = 16;
1307
1308 if (aarch64_debug)
1309 {
1310 debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1311 info->nsaa);
1312 }
1313
1314 item.len = len;
1315 item.data = buf;
1316 VEC_safe_push (stack_item_t, info->si, &item);
1317
1318 info->nsaa += len;
1319 if (info->nsaa & (align - 1))
1320 {
1321 /* Push stack alignment padding. */
1322 int pad = align - (info->nsaa & (align - 1));
1323
1324 item.len = pad;
1325 item.data = NULL;
1326
1327 VEC_safe_push (stack_item_t, info->si, &item);
1328 info->nsaa += pad;
1329 }
1330 }
1331
1332 /* Marshall an argument into a sequence of one or more consecutive X
1333 registers or, if insufficient X registers are available then onto
1334 the stack. */
1335
1336 static void
1337 pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1338 struct aarch64_call_info *info, struct type *type,
1339 struct value *arg)
1340 {
1341 int len = TYPE_LENGTH (type);
1342 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1343
1344 /* PCS C.13 - Pass in registers if we have enough spare */
1345 if (info->ngrn + nregs <= 8)
1346 {
1347 pass_in_x (gdbarch, regcache, info, type, arg);
1348 info->ngrn += nregs;
1349 }
1350 else
1351 {
1352 info->ngrn = 8;
1353 pass_on_stack (info, type, arg);
1354 }
1355 }
1356
1357 /* Pass a value in a V register, or on the stack if insufficient are
1358 available. */
1359
1360 static void
1361 pass_in_v_or_stack (struct gdbarch *gdbarch,
1362 struct regcache *regcache,
1363 struct aarch64_call_info *info,
1364 struct type *type,
1365 struct value *arg)
1366 {
1367 if (!pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (type),
1368 value_contents (arg)))
1369 pass_on_stack (info, type, arg);
1370 }
1371
1372 /* Implement the "push_dummy_call" gdbarch method. */
1373
1374 static CORE_ADDR
1375 aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1376 struct regcache *regcache, CORE_ADDR bp_addr,
1377 int nargs,
1378 struct value **args, CORE_ADDR sp, int struct_return,
1379 CORE_ADDR struct_addr)
1380 {
1381 int argnum;
1382 struct aarch64_call_info info;
1383 struct type *func_type;
1384 struct type *return_type;
1385 int lang_struct_return;
1386
1387 memset (&info, 0, sizeof (info));
1388
1389 /* We need to know what the type of the called function is in order
1390 to determine the number of named/anonymous arguments for the
1391 actual argument placement, and the return type in order to handle
1392 return value correctly.
1393
1394 The generic code above us views the decision of return in memory
1395 or return in registers as a two stage processes. The language
1396 handler is consulted first and may decide to return in memory (eg
1397 class with copy constructor returned by value), this will cause
1398 the generic code to allocate space AND insert an initial leading
1399 argument.
1400
1401 If the language code does not decide to pass in memory then the
1402 target code is consulted.
1403
1404 If the language code decides to pass in memory we want to move
1405 the pointer inserted as the initial argument from the argument
1406 list and into X8, the conventional AArch64 struct return pointer
1407 register.
1408
1409 This is slightly awkward, ideally the flag "lang_struct_return"
1410 would be passed to the targets implementation of push_dummy_call.
1411 Rather that change the target interface we call the language code
1412 directly ourselves. */
1413
1414 func_type = check_typedef (value_type (function));
1415
1416 /* Dereference function pointer types. */
1417 if (TYPE_CODE (func_type) == TYPE_CODE_PTR)
1418 func_type = TYPE_TARGET_TYPE (func_type);
1419
1420 gdb_assert (TYPE_CODE (func_type) == TYPE_CODE_FUNC
1421 || TYPE_CODE (func_type) == TYPE_CODE_METHOD);
1422
1423 /* If language_pass_by_reference () returned true we will have been
1424 given an additional initial argument, a hidden pointer to the
1425 return slot in memory. */
1426 return_type = TYPE_TARGET_TYPE (func_type);
1427 lang_struct_return = language_pass_by_reference (return_type);
1428
1429 /* Set the return address. For the AArch64, the return breakpoint
1430 is always at BP_ADDR. */
1431 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1432
1433 /* If we were given an initial argument for the return slot because
1434 lang_struct_return was true, lose it. */
1435 if (lang_struct_return)
1436 {
1437 args++;
1438 nargs--;
1439 }
1440
1441 /* The struct_return pointer occupies X8. */
1442 if (struct_return || lang_struct_return)
1443 {
1444 if (aarch64_debug)
1445 {
1446 debug_printf ("struct return in %s = 0x%s\n",
1447 gdbarch_register_name (gdbarch,
1448 AARCH64_STRUCT_RETURN_REGNUM),
1449 paddress (gdbarch, struct_addr));
1450 }
1451 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1452 struct_addr);
1453 }
1454
1455 for (argnum = 0; argnum < nargs; argnum++)
1456 {
1457 struct value *arg = args[argnum];
1458 struct type *arg_type;
1459 int len;
1460
1461 arg_type = check_typedef (value_type (arg));
1462 len = TYPE_LENGTH (arg_type);
1463
1464 switch (TYPE_CODE (arg_type))
1465 {
1466 case TYPE_CODE_INT:
1467 case TYPE_CODE_BOOL:
1468 case TYPE_CODE_CHAR:
1469 case TYPE_CODE_RANGE:
1470 case TYPE_CODE_ENUM:
1471 if (len < 4)
1472 {
1473 /* Promote to 32 bit integer. */
1474 if (TYPE_UNSIGNED (arg_type))
1475 arg_type = builtin_type (gdbarch)->builtin_uint32;
1476 else
1477 arg_type = builtin_type (gdbarch)->builtin_int32;
1478 arg = value_cast (arg_type, arg);
1479 }
1480 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1481 break;
1482
1483 case TYPE_CODE_COMPLEX:
1484 if (info.nsrn <= 6)
1485 {
1486 const bfd_byte *buf = value_contents (arg);
1487 struct type *target_type =
1488 check_typedef (TYPE_TARGET_TYPE (arg_type));
1489
1490 pass_in_v (gdbarch, regcache, &info,
1491 TYPE_LENGTH (target_type), buf);
1492 pass_in_v (gdbarch, regcache, &info,
1493 TYPE_LENGTH (target_type),
1494 buf + TYPE_LENGTH (target_type));
1495 }
1496 else
1497 {
1498 info.nsrn = 8;
1499 pass_on_stack (&info, arg_type, arg);
1500 }
1501 break;
1502 case TYPE_CODE_FLT:
1503 pass_in_v_or_stack (gdbarch, regcache, &info, arg_type, arg);
1504 break;
1505
1506 case TYPE_CODE_STRUCT:
1507 case TYPE_CODE_ARRAY:
1508 case TYPE_CODE_UNION:
1509 if (is_hfa_or_hva (arg_type))
1510 {
1511 int elements = TYPE_NFIELDS (arg_type);
1512
1513 /* Homogeneous Aggregates */
1514 if (info.nsrn + elements < 8)
1515 {
1516 int i;
1517
1518 for (i = 0; i < elements; i++)
1519 {
1520 /* We know that we have sufficient registers
1521 available therefore this will never fallback
1522 to the stack. */
1523 struct value *field =
1524 value_primitive_field (arg, 0, i, arg_type);
1525 struct type *field_type =
1526 check_typedef (value_type (field));
1527
1528 pass_in_v_or_stack (gdbarch, regcache, &info,
1529 field_type, field);
1530 }
1531 }
1532 else
1533 {
1534 info.nsrn = 8;
1535 pass_on_stack (&info, arg_type, arg);
1536 }
1537 }
1538 else if (TYPE_CODE (arg_type) == TYPE_CODE_ARRAY
1539 && TYPE_VECTOR (arg_type) && (len == 16 || len == 8))
1540 {
1541 /* Short vector types are passed in V registers. */
1542 pass_in_v_or_stack (gdbarch, regcache, &info, arg_type, arg);
1543 }
1544 else if (len > 16)
1545 {
1546 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1547 invisible reference. */
1548
1549 /* Allocate aligned storage. */
1550 sp = align_down (sp - len, 16);
1551
1552 /* Write the real data into the stack. */
1553 write_memory (sp, value_contents (arg), len);
1554
1555 /* Construct the indirection. */
1556 arg_type = lookup_pointer_type (arg_type);
1557 arg = value_from_pointer (arg_type, sp);
1558 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1559 }
1560 else
1561 /* PCS C.15 / C.18 multiple values pass. */
1562 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1563 break;
1564
1565 default:
1566 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1567 break;
1568 }
1569 }
1570
1571 /* Make sure stack retains 16 byte alignment. */
1572 if (info.nsaa & 15)
1573 sp -= 16 - (info.nsaa & 15);
1574
1575 while (!VEC_empty (stack_item_t, info.si))
1576 {
1577 stack_item_t *si = VEC_last (stack_item_t, info.si);
1578
1579 sp -= si->len;
1580 if (si->data != NULL)
1581 write_memory (sp, si->data, si->len);
1582 VEC_pop (stack_item_t, info.si);
1583 }
1584
1585 VEC_free (stack_item_t, info.si);
1586
1587 /* Finally, update the SP register. */
1588 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1589
1590 return sp;
1591 }
1592
1593 /* Implement the "frame_align" gdbarch method. */
1594
1595 static CORE_ADDR
1596 aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1597 {
1598 /* Align the stack to sixteen bytes. */
1599 return sp & ~(CORE_ADDR) 15;
1600 }
1601
1602 /* Return the type for an AdvSISD Q register. */
1603
1604 static struct type *
1605 aarch64_vnq_type (struct gdbarch *gdbarch)
1606 {
1607 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1608
1609 if (tdep->vnq_type == NULL)
1610 {
1611 struct type *t;
1612 struct type *elem;
1613
1614 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1615 TYPE_CODE_UNION);
1616
1617 elem = builtin_type (gdbarch)->builtin_uint128;
1618 append_composite_type_field (t, "u", elem);
1619
1620 elem = builtin_type (gdbarch)->builtin_int128;
1621 append_composite_type_field (t, "s", elem);
1622
1623 tdep->vnq_type = t;
1624 }
1625
1626 return tdep->vnq_type;
1627 }
1628
1629 /* Return the type for an AdvSISD D register. */
1630
1631 static struct type *
1632 aarch64_vnd_type (struct gdbarch *gdbarch)
1633 {
1634 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1635
1636 if (tdep->vnd_type == NULL)
1637 {
1638 struct type *t;
1639 struct type *elem;
1640
1641 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1642 TYPE_CODE_UNION);
1643
1644 elem = builtin_type (gdbarch)->builtin_double;
1645 append_composite_type_field (t, "f", elem);
1646
1647 elem = builtin_type (gdbarch)->builtin_uint64;
1648 append_composite_type_field (t, "u", elem);
1649
1650 elem = builtin_type (gdbarch)->builtin_int64;
1651 append_composite_type_field (t, "s", elem);
1652
1653 tdep->vnd_type = t;
1654 }
1655
1656 return tdep->vnd_type;
1657 }
1658
1659 /* Return the type for an AdvSISD S register. */
1660
1661 static struct type *
1662 aarch64_vns_type (struct gdbarch *gdbarch)
1663 {
1664 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1665
1666 if (tdep->vns_type == NULL)
1667 {
1668 struct type *t;
1669 struct type *elem;
1670
1671 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1672 TYPE_CODE_UNION);
1673
1674 elem = builtin_type (gdbarch)->builtin_float;
1675 append_composite_type_field (t, "f", elem);
1676
1677 elem = builtin_type (gdbarch)->builtin_uint32;
1678 append_composite_type_field (t, "u", elem);
1679
1680 elem = builtin_type (gdbarch)->builtin_int32;
1681 append_composite_type_field (t, "s", elem);
1682
1683 tdep->vns_type = t;
1684 }
1685
1686 return tdep->vns_type;
1687 }
1688
1689 /* Return the type for an AdvSISD H register. */
1690
1691 static struct type *
1692 aarch64_vnh_type (struct gdbarch *gdbarch)
1693 {
1694 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1695
1696 if (tdep->vnh_type == NULL)
1697 {
1698 struct type *t;
1699 struct type *elem;
1700
1701 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1702 TYPE_CODE_UNION);
1703
1704 elem = builtin_type (gdbarch)->builtin_uint16;
1705 append_composite_type_field (t, "u", elem);
1706
1707 elem = builtin_type (gdbarch)->builtin_int16;
1708 append_composite_type_field (t, "s", elem);
1709
1710 tdep->vnh_type = t;
1711 }
1712
1713 return tdep->vnh_type;
1714 }
1715
1716 /* Return the type for an AdvSISD B register. */
1717
1718 static struct type *
1719 aarch64_vnb_type (struct gdbarch *gdbarch)
1720 {
1721 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1722
1723 if (tdep->vnb_type == NULL)
1724 {
1725 struct type *t;
1726 struct type *elem;
1727
1728 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1729 TYPE_CODE_UNION);
1730
1731 elem = builtin_type (gdbarch)->builtin_uint8;
1732 append_composite_type_field (t, "u", elem);
1733
1734 elem = builtin_type (gdbarch)->builtin_int8;
1735 append_composite_type_field (t, "s", elem);
1736
1737 tdep->vnb_type = t;
1738 }
1739
1740 return tdep->vnb_type;
1741 }
1742
1743 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
1744
1745 static int
1746 aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
1747 {
1748 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
1749 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
1750
1751 if (reg == AARCH64_DWARF_SP)
1752 return AARCH64_SP_REGNUM;
1753
1754 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
1755 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
1756
1757 return -1;
1758 }
1759 \f
1760
1761 /* Implement the "print_insn" gdbarch method. */
1762
1763 static int
1764 aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
1765 {
1766 info->symbols = NULL;
1767 return default_print_insn (memaddr, info);
1768 }
1769
1770 /* AArch64 BRK software debug mode instruction.
1771 Note that AArch64 code is always little-endian.
1772 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
1773 constexpr gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
1774
1775 typedef BP_MANIPULATION (aarch64_default_breakpoint) aarch64_breakpoint;
1776
1777 /* Extract from an array REGS containing the (raw) register state a
1778 function return value of type TYPE, and copy that, in virtual
1779 format, into VALBUF. */
1780
1781 static void
1782 aarch64_extract_return_value (struct type *type, struct regcache *regs,
1783 gdb_byte *valbuf)
1784 {
1785 struct gdbarch *gdbarch = regs->arch ();
1786 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1787
1788 if (TYPE_CODE (type) == TYPE_CODE_FLT)
1789 {
1790 bfd_byte buf[V_REGISTER_SIZE];
1791 int len = TYPE_LENGTH (type);
1792
1793 regcache_cooked_read (regs, AARCH64_V0_REGNUM, buf);
1794 memcpy (valbuf, buf, len);
1795 }
1796 else if (TYPE_CODE (type) == TYPE_CODE_INT
1797 || TYPE_CODE (type) == TYPE_CODE_CHAR
1798 || TYPE_CODE (type) == TYPE_CODE_BOOL
1799 || TYPE_CODE (type) == TYPE_CODE_PTR
1800 || TYPE_IS_REFERENCE (type)
1801 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1802 {
1803 /* If the the type is a plain integer, then the access is
1804 straight-forward. Otherwise we have to play around a bit
1805 more. */
1806 int len = TYPE_LENGTH (type);
1807 int regno = AARCH64_X0_REGNUM;
1808 ULONGEST tmp;
1809
1810 while (len > 0)
1811 {
1812 /* By using store_unsigned_integer we avoid having to do
1813 anything special for small big-endian values. */
1814 regcache_cooked_read_unsigned (regs, regno++, &tmp);
1815 store_unsigned_integer (valbuf,
1816 (len > X_REGISTER_SIZE
1817 ? X_REGISTER_SIZE : len), byte_order, tmp);
1818 len -= X_REGISTER_SIZE;
1819 valbuf += X_REGISTER_SIZE;
1820 }
1821 }
1822 else if (TYPE_CODE (type) == TYPE_CODE_COMPLEX)
1823 {
1824 int regno = AARCH64_V0_REGNUM;
1825 bfd_byte buf[V_REGISTER_SIZE];
1826 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
1827 int len = TYPE_LENGTH (target_type);
1828
1829 regcache_cooked_read (regs, regno, buf);
1830 memcpy (valbuf, buf, len);
1831 valbuf += len;
1832 regcache_cooked_read (regs, regno + 1, buf);
1833 memcpy (valbuf, buf, len);
1834 valbuf += len;
1835 }
1836 else if (is_hfa_or_hva (type))
1837 {
1838 int elements = TYPE_NFIELDS (type);
1839 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
1840 int len = TYPE_LENGTH (member_type);
1841 int i;
1842
1843 for (i = 0; i < elements; i++)
1844 {
1845 int regno = AARCH64_V0_REGNUM + i;
1846 bfd_byte buf[V_REGISTER_SIZE];
1847
1848 if (aarch64_debug)
1849 {
1850 debug_printf ("read HFA or HVA return value element %d from %s\n",
1851 i + 1,
1852 gdbarch_register_name (gdbarch, regno));
1853 }
1854 regcache_cooked_read (regs, regno, buf);
1855
1856 memcpy (valbuf, buf, len);
1857 valbuf += len;
1858 }
1859 }
1860 else if (TYPE_CODE (type) == TYPE_CODE_ARRAY && TYPE_VECTOR (type)
1861 && (TYPE_LENGTH (type) == 16 || TYPE_LENGTH (type) == 8))
1862 {
1863 /* Short vector is returned in V register. */
1864 gdb_byte buf[V_REGISTER_SIZE];
1865
1866 regcache_cooked_read (regs, AARCH64_V0_REGNUM, buf);
1867 memcpy (valbuf, buf, TYPE_LENGTH (type));
1868 }
1869 else
1870 {
1871 /* For a structure or union the behaviour is as if the value had
1872 been stored to word-aligned memory and then loaded into
1873 registers with 64-bit load instruction(s). */
1874 int len = TYPE_LENGTH (type);
1875 int regno = AARCH64_X0_REGNUM;
1876 bfd_byte buf[X_REGISTER_SIZE];
1877
1878 while (len > 0)
1879 {
1880 regcache_cooked_read (regs, regno++, buf);
1881 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
1882 len -= X_REGISTER_SIZE;
1883 valbuf += X_REGISTER_SIZE;
1884 }
1885 }
1886 }
1887
1888
1889 /* Will a function return an aggregate type in memory or in a
1890 register? Return 0 if an aggregate type can be returned in a
1891 register, 1 if it must be returned in memory. */
1892
1893 static int
1894 aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
1895 {
1896 type = check_typedef (type);
1897
1898 if (is_hfa_or_hva (type))
1899 {
1900 /* v0-v7 are used to return values and one register is allocated
1901 for one member. However, HFA or HVA has at most four members. */
1902 return 0;
1903 }
1904
1905 if (TYPE_LENGTH (type) > 16)
1906 {
1907 /* PCS B.6 Aggregates larger than 16 bytes are passed by
1908 invisible reference. */
1909
1910 return 1;
1911 }
1912
1913 return 0;
1914 }
1915
1916 /* Write into appropriate registers a function return value of type
1917 TYPE, given in virtual format. */
1918
1919 static void
1920 aarch64_store_return_value (struct type *type, struct regcache *regs,
1921 const gdb_byte *valbuf)
1922 {
1923 struct gdbarch *gdbarch = regs->arch ();
1924 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1925
1926 if (TYPE_CODE (type) == TYPE_CODE_FLT)
1927 {
1928 bfd_byte buf[V_REGISTER_SIZE];
1929 int len = TYPE_LENGTH (type);
1930
1931 memcpy (buf, valbuf, len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
1932 regcache_cooked_write (regs, AARCH64_V0_REGNUM, buf);
1933 }
1934 else if (TYPE_CODE (type) == TYPE_CODE_INT
1935 || TYPE_CODE (type) == TYPE_CODE_CHAR
1936 || TYPE_CODE (type) == TYPE_CODE_BOOL
1937 || TYPE_CODE (type) == TYPE_CODE_PTR
1938 || TYPE_IS_REFERENCE (type)
1939 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1940 {
1941 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
1942 {
1943 /* Values of one word or less are zero/sign-extended and
1944 returned in r0. */
1945 bfd_byte tmpbuf[X_REGISTER_SIZE];
1946 LONGEST val = unpack_long (type, valbuf);
1947
1948 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
1949 regcache_cooked_write (regs, AARCH64_X0_REGNUM, tmpbuf);
1950 }
1951 else
1952 {
1953 /* Integral values greater than one word are stored in
1954 consecutive registers starting with r0. This will always
1955 be a multiple of the regiser size. */
1956 int len = TYPE_LENGTH (type);
1957 int regno = AARCH64_X0_REGNUM;
1958
1959 while (len > 0)
1960 {
1961 regcache_cooked_write (regs, regno++, valbuf);
1962 len -= X_REGISTER_SIZE;
1963 valbuf += X_REGISTER_SIZE;
1964 }
1965 }
1966 }
1967 else if (is_hfa_or_hva (type))
1968 {
1969 int elements = TYPE_NFIELDS (type);
1970 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
1971 int len = TYPE_LENGTH (member_type);
1972 int i;
1973
1974 for (i = 0; i < elements; i++)
1975 {
1976 int regno = AARCH64_V0_REGNUM + i;
1977 bfd_byte tmpbuf[V_REGISTER_SIZE];
1978
1979 if (aarch64_debug)
1980 {
1981 debug_printf ("write HFA or HVA return value element %d to %s\n",
1982 i + 1,
1983 gdbarch_register_name (gdbarch, regno));
1984 }
1985
1986 memcpy (tmpbuf, valbuf, len);
1987 regcache_cooked_write (regs, regno, tmpbuf);
1988 valbuf += len;
1989 }
1990 }
1991 else if (TYPE_CODE (type) == TYPE_CODE_ARRAY && TYPE_VECTOR (type)
1992 && (TYPE_LENGTH (type) == 8 || TYPE_LENGTH (type) == 16))
1993 {
1994 /* Short vector. */
1995 gdb_byte buf[V_REGISTER_SIZE];
1996
1997 memcpy (buf, valbuf, TYPE_LENGTH (type));
1998 regcache_cooked_write (regs, AARCH64_V0_REGNUM, buf);
1999 }
2000 else
2001 {
2002 /* For a structure or union the behaviour is as if the value had
2003 been stored to word-aligned memory and then loaded into
2004 registers with 64-bit load instruction(s). */
2005 int len = TYPE_LENGTH (type);
2006 int regno = AARCH64_X0_REGNUM;
2007 bfd_byte tmpbuf[X_REGISTER_SIZE];
2008
2009 while (len > 0)
2010 {
2011 memcpy (tmpbuf, valbuf,
2012 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2013 regcache_cooked_write (regs, regno++, tmpbuf);
2014 len -= X_REGISTER_SIZE;
2015 valbuf += X_REGISTER_SIZE;
2016 }
2017 }
2018 }
2019
2020 /* Implement the "return_value" gdbarch method. */
2021
2022 static enum return_value_convention
2023 aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2024 struct type *valtype, struct regcache *regcache,
2025 gdb_byte *readbuf, const gdb_byte *writebuf)
2026 {
2027
2028 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
2029 || TYPE_CODE (valtype) == TYPE_CODE_UNION
2030 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
2031 {
2032 if (aarch64_return_in_memory (gdbarch, valtype))
2033 {
2034 if (aarch64_debug)
2035 debug_printf ("return value in memory\n");
2036 return RETURN_VALUE_STRUCT_CONVENTION;
2037 }
2038 }
2039
2040 if (writebuf)
2041 aarch64_store_return_value (valtype, regcache, writebuf);
2042
2043 if (readbuf)
2044 aarch64_extract_return_value (valtype, regcache, readbuf);
2045
2046 if (aarch64_debug)
2047 debug_printf ("return value in registers\n");
2048
2049 return RETURN_VALUE_REGISTER_CONVENTION;
2050 }
2051
2052 /* Implement the "get_longjmp_target" gdbarch method. */
2053
2054 static int
2055 aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2056 {
2057 CORE_ADDR jb_addr;
2058 gdb_byte buf[X_REGISTER_SIZE];
2059 struct gdbarch *gdbarch = get_frame_arch (frame);
2060 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2061 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2062
2063 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
2064
2065 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2066 X_REGISTER_SIZE))
2067 return 0;
2068
2069 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2070 return 1;
2071 }
2072
2073 /* Implement the "gen_return_address" gdbarch method. */
2074
2075 static void
2076 aarch64_gen_return_address (struct gdbarch *gdbarch,
2077 struct agent_expr *ax, struct axs_value *value,
2078 CORE_ADDR scope)
2079 {
2080 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
2081 value->kind = axs_lvalue_register;
2082 value->u.reg = AARCH64_LR_REGNUM;
2083 }
2084 \f
2085
2086 /* Return the pseudo register name corresponding to register regnum. */
2087
2088 static const char *
2089 aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2090 {
2091 static const char *const q_name[] =
2092 {
2093 "q0", "q1", "q2", "q3",
2094 "q4", "q5", "q6", "q7",
2095 "q8", "q9", "q10", "q11",
2096 "q12", "q13", "q14", "q15",
2097 "q16", "q17", "q18", "q19",
2098 "q20", "q21", "q22", "q23",
2099 "q24", "q25", "q26", "q27",
2100 "q28", "q29", "q30", "q31",
2101 };
2102
2103 static const char *const d_name[] =
2104 {
2105 "d0", "d1", "d2", "d3",
2106 "d4", "d5", "d6", "d7",
2107 "d8", "d9", "d10", "d11",
2108 "d12", "d13", "d14", "d15",
2109 "d16", "d17", "d18", "d19",
2110 "d20", "d21", "d22", "d23",
2111 "d24", "d25", "d26", "d27",
2112 "d28", "d29", "d30", "d31",
2113 };
2114
2115 static const char *const s_name[] =
2116 {
2117 "s0", "s1", "s2", "s3",
2118 "s4", "s5", "s6", "s7",
2119 "s8", "s9", "s10", "s11",
2120 "s12", "s13", "s14", "s15",
2121 "s16", "s17", "s18", "s19",
2122 "s20", "s21", "s22", "s23",
2123 "s24", "s25", "s26", "s27",
2124 "s28", "s29", "s30", "s31",
2125 };
2126
2127 static const char *const h_name[] =
2128 {
2129 "h0", "h1", "h2", "h3",
2130 "h4", "h5", "h6", "h7",
2131 "h8", "h9", "h10", "h11",
2132 "h12", "h13", "h14", "h15",
2133 "h16", "h17", "h18", "h19",
2134 "h20", "h21", "h22", "h23",
2135 "h24", "h25", "h26", "h27",
2136 "h28", "h29", "h30", "h31",
2137 };
2138
2139 static const char *const b_name[] =
2140 {
2141 "b0", "b1", "b2", "b3",
2142 "b4", "b5", "b6", "b7",
2143 "b8", "b9", "b10", "b11",
2144 "b12", "b13", "b14", "b15",
2145 "b16", "b17", "b18", "b19",
2146 "b20", "b21", "b22", "b23",
2147 "b24", "b25", "b26", "b27",
2148 "b28", "b29", "b30", "b31",
2149 };
2150
2151 regnum -= gdbarch_num_regs (gdbarch);
2152
2153 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2154 return q_name[regnum - AARCH64_Q0_REGNUM];
2155
2156 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2157 return d_name[regnum - AARCH64_D0_REGNUM];
2158
2159 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2160 return s_name[regnum - AARCH64_S0_REGNUM];
2161
2162 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2163 return h_name[regnum - AARCH64_H0_REGNUM];
2164
2165 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2166 return b_name[regnum - AARCH64_B0_REGNUM];
2167
2168 internal_error (__FILE__, __LINE__,
2169 _("aarch64_pseudo_register_name: bad register number %d"),
2170 regnum);
2171 }
2172
2173 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
2174
2175 static struct type *
2176 aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2177 {
2178 regnum -= gdbarch_num_regs (gdbarch);
2179
2180 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2181 return aarch64_vnq_type (gdbarch);
2182
2183 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2184 return aarch64_vnd_type (gdbarch);
2185
2186 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2187 return aarch64_vns_type (gdbarch);
2188
2189 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2190 return aarch64_vnh_type (gdbarch);
2191
2192 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2193 return aarch64_vnb_type (gdbarch);
2194
2195 internal_error (__FILE__, __LINE__,
2196 _("aarch64_pseudo_register_type: bad register number %d"),
2197 regnum);
2198 }
2199
2200 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2201
2202 static int
2203 aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2204 struct reggroup *group)
2205 {
2206 regnum -= gdbarch_num_regs (gdbarch);
2207
2208 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2209 return group == all_reggroup || group == vector_reggroup;
2210 else if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2211 return (group == all_reggroup || group == vector_reggroup
2212 || group == float_reggroup);
2213 else if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2214 return (group == all_reggroup || group == vector_reggroup
2215 || group == float_reggroup);
2216 else if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2217 return group == all_reggroup || group == vector_reggroup;
2218 else if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2219 return group == all_reggroup || group == vector_reggroup;
2220
2221 return group == all_reggroup;
2222 }
2223
2224 /* Implement the "pseudo_register_read_value" gdbarch method. */
2225
2226 static struct value *
2227 aarch64_pseudo_read_value (struct gdbarch *gdbarch,
2228 struct regcache *regcache,
2229 int regnum)
2230 {
2231 gdb_byte reg_buf[V_REGISTER_SIZE];
2232 struct value *result_value;
2233 gdb_byte *buf;
2234
2235 result_value = allocate_value (register_type (gdbarch, regnum));
2236 VALUE_LVAL (result_value) = lval_register;
2237 VALUE_REGNUM (result_value) = regnum;
2238 buf = value_contents_raw (result_value);
2239
2240 regnum -= gdbarch_num_regs (gdbarch);
2241
2242 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2243 {
2244 enum register_status status;
2245 unsigned v_regnum;
2246
2247 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2248 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2249 if (status != REG_VALID)
2250 mark_value_bytes_unavailable (result_value, 0,
2251 TYPE_LENGTH (value_type (result_value)));
2252 else
2253 memcpy (buf, reg_buf, Q_REGISTER_SIZE);
2254 return result_value;
2255 }
2256
2257 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2258 {
2259 enum register_status status;
2260 unsigned v_regnum;
2261
2262 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2263 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2264 if (status != REG_VALID)
2265 mark_value_bytes_unavailable (result_value, 0,
2266 TYPE_LENGTH (value_type (result_value)));
2267 else
2268 memcpy (buf, reg_buf, D_REGISTER_SIZE);
2269 return result_value;
2270 }
2271
2272 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2273 {
2274 enum register_status status;
2275 unsigned v_regnum;
2276
2277 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2278 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2279 if (status != REG_VALID)
2280 mark_value_bytes_unavailable (result_value, 0,
2281 TYPE_LENGTH (value_type (result_value)));
2282 else
2283 memcpy (buf, reg_buf, S_REGISTER_SIZE);
2284 return result_value;
2285 }
2286
2287 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2288 {
2289 enum register_status status;
2290 unsigned v_regnum;
2291
2292 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2293 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2294 if (status != REG_VALID)
2295 mark_value_bytes_unavailable (result_value, 0,
2296 TYPE_LENGTH (value_type (result_value)));
2297 else
2298 memcpy (buf, reg_buf, H_REGISTER_SIZE);
2299 return result_value;
2300 }
2301
2302 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2303 {
2304 enum register_status status;
2305 unsigned v_regnum;
2306
2307 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2308 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2309 if (status != REG_VALID)
2310 mark_value_bytes_unavailable (result_value, 0,
2311 TYPE_LENGTH (value_type (result_value)));
2312 else
2313 memcpy (buf, reg_buf, B_REGISTER_SIZE);
2314 return result_value;
2315 }
2316
2317 gdb_assert_not_reached ("regnum out of bound");
2318 }
2319
2320 /* Implement the "pseudo_register_write" gdbarch method. */
2321
2322 static void
2323 aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2324 int regnum, const gdb_byte *buf)
2325 {
2326 gdb_byte reg_buf[V_REGISTER_SIZE];
2327
2328 /* Ensure the register buffer is zero, we want gdb writes of the
2329 various 'scalar' pseudo registers to behavior like architectural
2330 writes, register width bytes are written the remainder are set to
2331 zero. */
2332 memset (reg_buf, 0, sizeof (reg_buf));
2333
2334 regnum -= gdbarch_num_regs (gdbarch);
2335
2336 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2337 {
2338 /* pseudo Q registers */
2339 unsigned v_regnum;
2340
2341 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2342 memcpy (reg_buf, buf, Q_REGISTER_SIZE);
2343 regcache_raw_write (regcache, v_regnum, reg_buf);
2344 return;
2345 }
2346
2347 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2348 {
2349 /* pseudo D registers */
2350 unsigned v_regnum;
2351
2352 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2353 memcpy (reg_buf, buf, D_REGISTER_SIZE);
2354 regcache_raw_write (regcache, v_regnum, reg_buf);
2355 return;
2356 }
2357
2358 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2359 {
2360 unsigned v_regnum;
2361
2362 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2363 memcpy (reg_buf, buf, S_REGISTER_SIZE);
2364 regcache_raw_write (regcache, v_regnum, reg_buf);
2365 return;
2366 }
2367
2368 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2369 {
2370 /* pseudo H registers */
2371 unsigned v_regnum;
2372
2373 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2374 memcpy (reg_buf, buf, H_REGISTER_SIZE);
2375 regcache_raw_write (regcache, v_regnum, reg_buf);
2376 return;
2377 }
2378
2379 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2380 {
2381 /* pseudo B registers */
2382 unsigned v_regnum;
2383
2384 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2385 memcpy (reg_buf, buf, B_REGISTER_SIZE);
2386 regcache_raw_write (regcache, v_regnum, reg_buf);
2387 return;
2388 }
2389
2390 gdb_assert_not_reached ("regnum out of bound");
2391 }
2392
2393 /* Callback function for user_reg_add. */
2394
2395 static struct value *
2396 value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2397 {
2398 const int *reg_p = (const int *) baton;
2399
2400 return value_of_register (*reg_p, frame);
2401 }
2402 \f
2403
2404 /* Implement the "software_single_step" gdbarch method, needed to
2405 single step through atomic sequences on AArch64. */
2406
2407 static std::vector<CORE_ADDR>
2408 aarch64_software_single_step (struct regcache *regcache)
2409 {
2410 struct gdbarch *gdbarch = regcache->arch ();
2411 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2412 const int insn_size = 4;
2413 const int atomic_sequence_length = 16; /* Instruction sequence length. */
2414 CORE_ADDR pc = regcache_read_pc (regcache);
2415 CORE_ADDR breaks[2] = { -1, -1 };
2416 CORE_ADDR loc = pc;
2417 CORE_ADDR closing_insn = 0;
2418 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2419 byte_order_for_code);
2420 int index;
2421 int insn_count;
2422 int bc_insn_count = 0; /* Conditional branch instruction count. */
2423 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
2424 aarch64_inst inst;
2425
2426 if (aarch64_decode_insn (insn, &inst, 1) != 0)
2427 return {};
2428
2429 /* Look for a Load Exclusive instruction which begins the sequence. */
2430 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
2431 return {};
2432
2433 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2434 {
2435 loc += insn_size;
2436 insn = read_memory_unsigned_integer (loc, insn_size,
2437 byte_order_for_code);
2438
2439 if (aarch64_decode_insn (insn, &inst, 1) != 0)
2440 return {};
2441 /* Check if the instruction is a conditional branch. */
2442 if (inst.opcode->iclass == condbranch)
2443 {
2444 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
2445
2446 if (bc_insn_count >= 1)
2447 return {};
2448
2449 /* It is, so we'll try to set a breakpoint at the destination. */
2450 breaks[1] = loc + inst.operands[0].imm.value;
2451
2452 bc_insn_count++;
2453 last_breakpoint++;
2454 }
2455
2456 /* Look for the Store Exclusive which closes the atomic sequence. */
2457 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
2458 {
2459 closing_insn = loc;
2460 break;
2461 }
2462 }
2463
2464 /* We didn't find a closing Store Exclusive instruction, fall back. */
2465 if (!closing_insn)
2466 return {};
2467
2468 /* Insert breakpoint after the end of the atomic sequence. */
2469 breaks[0] = loc + insn_size;
2470
2471 /* Check for duplicated breakpoints, and also check that the second
2472 breakpoint is not within the atomic sequence. */
2473 if (last_breakpoint
2474 && (breaks[1] == breaks[0]
2475 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2476 last_breakpoint = 0;
2477
2478 std::vector<CORE_ADDR> next_pcs;
2479
2480 /* Insert the breakpoint at the end of the sequence, and one at the
2481 destination of the conditional branch, if it exists. */
2482 for (index = 0; index <= last_breakpoint; index++)
2483 next_pcs.push_back (breaks[index]);
2484
2485 return next_pcs;
2486 }
2487
2488 struct aarch64_displaced_step_closure : public displaced_step_closure
2489 {
2490 /* It is true when condition instruction, such as B.CON, TBZ, etc,
2491 is being displaced stepping. */
2492 int cond = 0;
2493
2494 /* PC adjustment offset after displaced stepping. */
2495 int32_t pc_adjust = 0;
2496 };
2497
2498 /* Data when visiting instructions for displaced stepping. */
2499
2500 struct aarch64_displaced_step_data
2501 {
2502 struct aarch64_insn_data base;
2503
2504 /* The address where the instruction will be executed at. */
2505 CORE_ADDR new_addr;
2506 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
2507 uint32_t insn_buf[DISPLACED_MODIFIED_INSNS];
2508 /* Number of instructions in INSN_BUF. */
2509 unsigned insn_count;
2510 /* Registers when doing displaced stepping. */
2511 struct regcache *regs;
2512
2513 aarch64_displaced_step_closure *dsc;
2514 };
2515
2516 /* Implementation of aarch64_insn_visitor method "b". */
2517
2518 static void
2519 aarch64_displaced_step_b (const int is_bl, const int32_t offset,
2520 struct aarch64_insn_data *data)
2521 {
2522 struct aarch64_displaced_step_data *dsd
2523 = (struct aarch64_displaced_step_data *) data;
2524 int64_t new_offset = data->insn_addr - dsd->new_addr + offset;
2525
2526 if (can_encode_int32 (new_offset, 28))
2527 {
2528 /* Emit B rather than BL, because executing BL on a new address
2529 will get the wrong address into LR. In order to avoid this,
2530 we emit B, and update LR if the instruction is BL. */
2531 emit_b (dsd->insn_buf, 0, new_offset);
2532 dsd->insn_count++;
2533 }
2534 else
2535 {
2536 /* Write NOP. */
2537 emit_nop (dsd->insn_buf);
2538 dsd->insn_count++;
2539 dsd->dsc->pc_adjust = offset;
2540 }
2541
2542 if (is_bl)
2543 {
2544 /* Update LR. */
2545 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
2546 data->insn_addr + 4);
2547 }
2548 }
2549
2550 /* Implementation of aarch64_insn_visitor method "b_cond". */
2551
2552 static void
2553 aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
2554 struct aarch64_insn_data *data)
2555 {
2556 struct aarch64_displaced_step_data *dsd
2557 = (struct aarch64_displaced_step_data *) data;
2558
2559 /* GDB has to fix up PC after displaced step this instruction
2560 differently according to the condition is true or false. Instead
2561 of checking COND against conditional flags, we can use
2562 the following instructions, and GDB can tell how to fix up PC
2563 according to the PC value.
2564
2565 B.COND TAKEN ; If cond is true, then jump to TAKEN.
2566 INSN1 ;
2567 TAKEN:
2568 INSN2
2569 */
2570
2571 emit_bcond (dsd->insn_buf, cond, 8);
2572 dsd->dsc->cond = 1;
2573 dsd->dsc->pc_adjust = offset;
2574 dsd->insn_count = 1;
2575 }
2576
2577 /* Dynamically allocate a new register. If we know the register
2578 statically, we should make it a global as above instead of using this
2579 helper function. */
2580
2581 static struct aarch64_register
2582 aarch64_register (unsigned num, int is64)
2583 {
2584 return (struct aarch64_register) { num, is64 };
2585 }
2586
2587 /* Implementation of aarch64_insn_visitor method "cb". */
2588
2589 static void
2590 aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
2591 const unsigned rn, int is64,
2592 struct aarch64_insn_data *data)
2593 {
2594 struct aarch64_displaced_step_data *dsd
2595 = (struct aarch64_displaced_step_data *) data;
2596
2597 /* The offset is out of range for a compare and branch
2598 instruction. We can use the following instructions instead:
2599
2600 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
2601 INSN1 ;
2602 TAKEN:
2603 INSN2
2604 */
2605 emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
2606 dsd->insn_count = 1;
2607 dsd->dsc->cond = 1;
2608 dsd->dsc->pc_adjust = offset;
2609 }
2610
2611 /* Implementation of aarch64_insn_visitor method "tb". */
2612
2613 static void
2614 aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
2615 const unsigned rt, unsigned bit,
2616 struct aarch64_insn_data *data)
2617 {
2618 struct aarch64_displaced_step_data *dsd
2619 = (struct aarch64_displaced_step_data *) data;
2620
2621 /* The offset is out of range for a test bit and branch
2622 instruction We can use the following instructions instead:
2623
2624 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
2625 INSN1 ;
2626 TAKEN:
2627 INSN2
2628
2629 */
2630 emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
2631 dsd->insn_count = 1;
2632 dsd->dsc->cond = 1;
2633 dsd->dsc->pc_adjust = offset;
2634 }
2635
2636 /* Implementation of aarch64_insn_visitor method "adr". */
2637
2638 static void
2639 aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
2640 const int is_adrp, struct aarch64_insn_data *data)
2641 {
2642 struct aarch64_displaced_step_data *dsd
2643 = (struct aarch64_displaced_step_data *) data;
2644 /* We know exactly the address the ADR{P,} instruction will compute.
2645 We can just write it to the destination register. */
2646 CORE_ADDR address = data->insn_addr + offset;
2647
2648 if (is_adrp)
2649 {
2650 /* Clear the lower 12 bits of the offset to get the 4K page. */
2651 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2652 address & ~0xfff);
2653 }
2654 else
2655 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2656 address);
2657
2658 dsd->dsc->pc_adjust = 4;
2659 emit_nop (dsd->insn_buf);
2660 dsd->insn_count = 1;
2661 }
2662
2663 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
2664
2665 static void
2666 aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
2667 const unsigned rt, const int is64,
2668 struct aarch64_insn_data *data)
2669 {
2670 struct aarch64_displaced_step_data *dsd
2671 = (struct aarch64_displaced_step_data *) data;
2672 CORE_ADDR address = data->insn_addr + offset;
2673 struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
2674
2675 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
2676 address);
2677
2678 if (is_sw)
2679 dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
2680 aarch64_register (rt, 1), zero);
2681 else
2682 dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
2683 aarch64_register (rt, 1), zero);
2684
2685 dsd->dsc->pc_adjust = 4;
2686 }
2687
2688 /* Implementation of aarch64_insn_visitor method "others". */
2689
2690 static void
2691 aarch64_displaced_step_others (const uint32_t insn,
2692 struct aarch64_insn_data *data)
2693 {
2694 struct aarch64_displaced_step_data *dsd
2695 = (struct aarch64_displaced_step_data *) data;
2696
2697 aarch64_emit_insn (dsd->insn_buf, insn);
2698 dsd->insn_count = 1;
2699
2700 if ((insn & 0xfffffc1f) == 0xd65f0000)
2701 {
2702 /* RET */
2703 dsd->dsc->pc_adjust = 0;
2704 }
2705 else
2706 dsd->dsc->pc_adjust = 4;
2707 }
2708
2709 static const struct aarch64_insn_visitor visitor =
2710 {
2711 aarch64_displaced_step_b,
2712 aarch64_displaced_step_b_cond,
2713 aarch64_displaced_step_cb,
2714 aarch64_displaced_step_tb,
2715 aarch64_displaced_step_adr,
2716 aarch64_displaced_step_ldr_literal,
2717 aarch64_displaced_step_others,
2718 };
2719
2720 /* Implement the "displaced_step_copy_insn" gdbarch method. */
2721
2722 struct displaced_step_closure *
2723 aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
2724 CORE_ADDR from, CORE_ADDR to,
2725 struct regcache *regs)
2726 {
2727 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2728 uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
2729 struct aarch64_displaced_step_data dsd;
2730 aarch64_inst inst;
2731
2732 if (aarch64_decode_insn (insn, &inst, 1) != 0)
2733 return NULL;
2734
2735 /* Look for a Load Exclusive instruction which begins the sequence. */
2736 if (inst.opcode->iclass == ldstexcl && bit (insn, 22))
2737 {
2738 /* We can't displaced step atomic sequences. */
2739 return NULL;
2740 }
2741
2742 std::unique_ptr<aarch64_displaced_step_closure> dsc
2743 (new aarch64_displaced_step_closure);
2744 dsd.base.insn_addr = from;
2745 dsd.new_addr = to;
2746 dsd.regs = regs;
2747 dsd.dsc = dsc.get ();
2748 dsd.insn_count = 0;
2749 aarch64_relocate_instruction (insn, &visitor,
2750 (struct aarch64_insn_data *) &dsd);
2751 gdb_assert (dsd.insn_count <= DISPLACED_MODIFIED_INSNS);
2752
2753 if (dsd.insn_count != 0)
2754 {
2755 int i;
2756
2757 /* Instruction can be relocated to scratch pad. Copy
2758 relocated instruction(s) there. */
2759 for (i = 0; i < dsd.insn_count; i++)
2760 {
2761 if (debug_displaced)
2762 {
2763 debug_printf ("displaced: writing insn ");
2764 debug_printf ("%.8x", dsd.insn_buf[i]);
2765 debug_printf (" at %s\n", paddress (gdbarch, to + i * 4));
2766 }
2767 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
2768 (ULONGEST) dsd.insn_buf[i]);
2769 }
2770 }
2771 else
2772 {
2773 dsc = NULL;
2774 }
2775
2776 return dsc.release ();
2777 }
2778
2779 /* Implement the "displaced_step_fixup" gdbarch method. */
2780
2781 void
2782 aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
2783 struct displaced_step_closure *dsc_,
2784 CORE_ADDR from, CORE_ADDR to,
2785 struct regcache *regs)
2786 {
2787 aarch64_displaced_step_closure *dsc = (aarch64_displaced_step_closure *) dsc_;
2788
2789 if (dsc->cond)
2790 {
2791 ULONGEST pc;
2792
2793 regcache_cooked_read_unsigned (regs, AARCH64_PC_REGNUM, &pc);
2794 if (pc - to == 8)
2795 {
2796 /* Condition is true. */
2797 }
2798 else if (pc - to == 4)
2799 {
2800 /* Condition is false. */
2801 dsc->pc_adjust = 4;
2802 }
2803 else
2804 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
2805 }
2806
2807 if (dsc->pc_adjust != 0)
2808 {
2809 if (debug_displaced)
2810 {
2811 debug_printf ("displaced: fixup: set PC to %s:%d\n",
2812 paddress (gdbarch, from), dsc->pc_adjust);
2813 }
2814 regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
2815 from + dsc->pc_adjust);
2816 }
2817 }
2818
2819 /* Implement the "displaced_step_hw_singlestep" gdbarch method. */
2820
2821 int
2822 aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch,
2823 struct displaced_step_closure *closure)
2824 {
2825 return 1;
2826 }
2827
2828 /* Get the correct target description. */
2829
2830 const target_desc *
2831 aarch64_read_description ()
2832 {
2833 static target_desc *aarch64_tdesc = NULL;
2834 target_desc **tdesc = &aarch64_tdesc;
2835
2836 if (*tdesc == NULL)
2837 *tdesc = aarch64_create_target_description ();
2838
2839 return *tdesc;
2840 }
2841
2842 /* Initialize the current architecture based on INFO. If possible,
2843 re-use an architecture from ARCHES, which is a list of
2844 architectures already created during this debugging session.
2845
2846 Called e.g. at program startup, when reading a core file, and when
2847 reading a binary file. */
2848
2849 static struct gdbarch *
2850 aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
2851 {
2852 struct gdbarch_tdep *tdep;
2853 struct gdbarch *gdbarch;
2854 struct gdbarch_list *best_arch;
2855 struct tdesc_arch_data *tdesc_data = NULL;
2856 const struct target_desc *tdesc = info.target_desc;
2857 int i;
2858 int valid_p = 1;
2859 const struct tdesc_feature *feature;
2860 int num_regs = 0;
2861 int num_pseudo_regs = 0;
2862
2863 /* Ensure we always have a target descriptor. */
2864 if (!tdesc_has_registers (tdesc))
2865 tdesc = aarch64_read_description ();
2866
2867 gdb_assert (tdesc);
2868
2869 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.core");
2870
2871 if (feature == NULL)
2872 return NULL;
2873
2874 tdesc_data = tdesc_data_alloc ();
2875
2876 /* Validate the descriptor provides the mandatory core R registers
2877 and allocate their numbers. */
2878 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
2879 valid_p &=
2880 tdesc_numbered_register (feature, tdesc_data, AARCH64_X0_REGNUM + i,
2881 aarch64_r_register_names[i]);
2882
2883 num_regs = AARCH64_X0_REGNUM + i;
2884
2885 /* Look for the V registers. */
2886 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
2887 if (feature)
2888 {
2889 /* Validate the descriptor provides the mandatory V registers
2890 and allocate their numbers. */
2891 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
2892 valid_p &=
2893 tdesc_numbered_register (feature, tdesc_data, AARCH64_V0_REGNUM + i,
2894 aarch64_v_register_names[i]);
2895
2896 num_regs = AARCH64_V0_REGNUM + i;
2897
2898 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
2899 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
2900 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
2901 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
2902 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
2903 }
2904
2905 if (!valid_p)
2906 {
2907 tdesc_data_cleanup (tdesc_data);
2908 return NULL;
2909 }
2910
2911 /* AArch64 code is always little-endian. */
2912 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
2913
2914 /* If there is already a candidate, use it. */
2915 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
2916 best_arch != NULL;
2917 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
2918 {
2919 /* Found a match. */
2920 break;
2921 }
2922
2923 if (best_arch != NULL)
2924 {
2925 if (tdesc_data != NULL)
2926 tdesc_data_cleanup (tdesc_data);
2927 return best_arch->gdbarch;
2928 }
2929
2930 tdep = XCNEW (struct gdbarch_tdep);
2931 gdbarch = gdbarch_alloc (&info, tdep);
2932
2933 /* This should be low enough for everything. */
2934 tdep->lowest_pc = 0x20;
2935 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
2936 tdep->jb_elt_size = 8;
2937
2938 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
2939 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
2940
2941 /* Frame handling. */
2942 set_gdbarch_dummy_id (gdbarch, aarch64_dummy_id);
2943 set_gdbarch_unwind_pc (gdbarch, aarch64_unwind_pc);
2944 set_gdbarch_unwind_sp (gdbarch, aarch64_unwind_sp);
2945
2946 /* Advance PC across function entry code. */
2947 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
2948
2949 /* The stack grows downward. */
2950 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
2951
2952 /* Breakpoint manipulation. */
2953 set_gdbarch_breakpoint_kind_from_pc (gdbarch,
2954 aarch64_breakpoint::kind_from_pc);
2955 set_gdbarch_sw_breakpoint_from_kind (gdbarch,
2956 aarch64_breakpoint::bp_from_kind);
2957 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
2958 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
2959
2960 /* Information about registers, etc. */
2961 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
2962 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
2963 set_gdbarch_num_regs (gdbarch, num_regs);
2964
2965 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
2966 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
2967 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
2968 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
2969 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
2970 set_tdesc_pseudo_register_reggroup_p (gdbarch,
2971 aarch64_pseudo_register_reggroup_p);
2972
2973 /* The top byte of an address is known as the "tag" and is
2974 ignored by the kernel, the hardware, etc. and can be regarded
2975 as additional data associated with the address. */
2976 set_gdbarch_significant_addr_bit (gdbarch, 56);
2977
2978 /* ABI */
2979 set_gdbarch_short_bit (gdbarch, 16);
2980 set_gdbarch_int_bit (gdbarch, 32);
2981 set_gdbarch_float_bit (gdbarch, 32);
2982 set_gdbarch_double_bit (gdbarch, 64);
2983 set_gdbarch_long_double_bit (gdbarch, 128);
2984 set_gdbarch_long_bit (gdbarch, 64);
2985 set_gdbarch_long_long_bit (gdbarch, 64);
2986 set_gdbarch_ptr_bit (gdbarch, 64);
2987 set_gdbarch_char_signed (gdbarch, 0);
2988 set_gdbarch_wchar_signed (gdbarch, 0);
2989 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
2990 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
2991 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
2992
2993 /* Internal <-> external register number maps. */
2994 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
2995
2996 /* Returning results. */
2997 set_gdbarch_return_value (gdbarch, aarch64_return_value);
2998
2999 /* Disassembly. */
3000 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
3001
3002 /* Virtual tables. */
3003 set_gdbarch_vbit_in_delta (gdbarch, 1);
3004
3005 /* Hook in the ABI-specific overrides, if they have been registered. */
3006 info.target_desc = tdesc;
3007 info.tdesc_data = tdesc_data;
3008 gdbarch_init_osabi (info, gdbarch);
3009
3010 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
3011
3012 /* Add some default predicates. */
3013 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
3014 dwarf2_append_unwinders (gdbarch);
3015 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
3016
3017 frame_base_set_default (gdbarch, &aarch64_normal_base);
3018
3019 /* Now we have tuned the configuration, set a few final things,
3020 based on what the OS ABI has told us. */
3021
3022 if (tdep->jb_pc >= 0)
3023 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
3024
3025 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
3026
3027 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
3028
3029 /* Add standard register aliases. */
3030 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
3031 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
3032 value_of_aarch64_user_reg,
3033 &aarch64_register_aliases[i].regnum);
3034
3035 return gdbarch;
3036 }
3037
3038 static void
3039 aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
3040 {
3041 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3042
3043 if (tdep == NULL)
3044 return;
3045
3046 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
3047 paddress (gdbarch, tdep->lowest_pc));
3048 }
3049
3050 #if GDB_SELF_TEST
3051 namespace selftests
3052 {
3053 static void aarch64_process_record_test (void);
3054 }
3055 #endif
3056
3057 void
3058 _initialize_aarch64_tdep (void)
3059 {
3060 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
3061 aarch64_dump_tdep);
3062
3063 /* Debug this file's internals. */
3064 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
3065 Set AArch64 debugging."), _("\
3066 Show AArch64 debugging."), _("\
3067 When on, AArch64 specific debugging is enabled."),
3068 NULL,
3069 show_aarch64_debug,
3070 &setdebuglist, &showdebuglist);
3071
3072 #if GDB_SELF_TEST
3073 selftests::register_test ("aarch64-analyze-prologue",
3074 selftests::aarch64_analyze_prologue_test);
3075 selftests::register_test ("aarch64-process-record",
3076 selftests::aarch64_process_record_test);
3077 selftests::record_xml_tdesc ("aarch64.xml",
3078 aarch64_create_target_description ());
3079 #endif
3080 }
3081
3082 /* AArch64 process record-replay related structures, defines etc. */
3083
3084 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
3085 do \
3086 { \
3087 unsigned int reg_len = LENGTH; \
3088 if (reg_len) \
3089 { \
3090 REGS = XNEWVEC (uint32_t, reg_len); \
3091 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
3092 } \
3093 } \
3094 while (0)
3095
3096 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
3097 do \
3098 { \
3099 unsigned int mem_len = LENGTH; \
3100 if (mem_len) \
3101 { \
3102 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
3103 memcpy(&MEMS->len, &RECORD_BUF[0], \
3104 sizeof(struct aarch64_mem_r) * LENGTH); \
3105 } \
3106 } \
3107 while (0)
3108
3109 /* AArch64 record/replay structures and enumerations. */
3110
3111 struct aarch64_mem_r
3112 {
3113 uint64_t len; /* Record length. */
3114 uint64_t addr; /* Memory address. */
3115 };
3116
3117 enum aarch64_record_result
3118 {
3119 AARCH64_RECORD_SUCCESS,
3120 AARCH64_RECORD_UNSUPPORTED,
3121 AARCH64_RECORD_UNKNOWN
3122 };
3123
3124 typedef struct insn_decode_record_t
3125 {
3126 struct gdbarch *gdbarch;
3127 struct regcache *regcache;
3128 CORE_ADDR this_addr; /* Address of insn to be recorded. */
3129 uint32_t aarch64_insn; /* Insn to be recorded. */
3130 uint32_t mem_rec_count; /* Count of memory records. */
3131 uint32_t reg_rec_count; /* Count of register records. */
3132 uint32_t *aarch64_regs; /* Registers to be recorded. */
3133 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
3134 } insn_decode_record;
3135
3136 /* Record handler for data processing - register instructions. */
3137
3138 static unsigned int
3139 aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
3140 {
3141 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
3142 uint32_t record_buf[4];
3143
3144 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3145 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3146 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
3147
3148 if (!bit (aarch64_insn_r->aarch64_insn, 28))
3149 {
3150 uint8_t setflags;
3151
3152 /* Logical (shifted register). */
3153 if (insn_bits24_27 == 0x0a)
3154 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
3155 /* Add/subtract. */
3156 else if (insn_bits24_27 == 0x0b)
3157 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3158 else
3159 return AARCH64_RECORD_UNKNOWN;
3160
3161 record_buf[0] = reg_rd;
3162 aarch64_insn_r->reg_rec_count = 1;
3163 if (setflags)
3164 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3165 }
3166 else
3167 {
3168 if (insn_bits24_27 == 0x0b)
3169 {
3170 /* Data-processing (3 source). */
3171 record_buf[0] = reg_rd;
3172 aarch64_insn_r->reg_rec_count = 1;
3173 }
3174 else if (insn_bits24_27 == 0x0a)
3175 {
3176 if (insn_bits21_23 == 0x00)
3177 {
3178 /* Add/subtract (with carry). */
3179 record_buf[0] = reg_rd;
3180 aarch64_insn_r->reg_rec_count = 1;
3181 if (bit (aarch64_insn_r->aarch64_insn, 29))
3182 {
3183 record_buf[1] = AARCH64_CPSR_REGNUM;
3184 aarch64_insn_r->reg_rec_count = 2;
3185 }
3186 }
3187 else if (insn_bits21_23 == 0x02)
3188 {
3189 /* Conditional compare (register) and conditional compare
3190 (immediate) instructions. */
3191 record_buf[0] = AARCH64_CPSR_REGNUM;
3192 aarch64_insn_r->reg_rec_count = 1;
3193 }
3194 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
3195 {
3196 /* CConditional select. */
3197 /* Data-processing (2 source). */
3198 /* Data-processing (1 source). */
3199 record_buf[0] = reg_rd;
3200 aarch64_insn_r->reg_rec_count = 1;
3201 }
3202 else
3203 return AARCH64_RECORD_UNKNOWN;
3204 }
3205 }
3206
3207 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3208 record_buf);
3209 return AARCH64_RECORD_SUCCESS;
3210 }
3211
3212 /* Record handler for data processing - immediate instructions. */
3213
3214 static unsigned int
3215 aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
3216 {
3217 uint8_t reg_rd, insn_bit23, insn_bits24_27, setflags;
3218 uint32_t record_buf[4];
3219
3220 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3221 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3222 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3223
3224 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
3225 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
3226 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
3227 {
3228 record_buf[0] = reg_rd;
3229 aarch64_insn_r->reg_rec_count = 1;
3230 }
3231 else if (insn_bits24_27 == 0x01)
3232 {
3233 /* Add/Subtract (immediate). */
3234 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3235 record_buf[0] = reg_rd;
3236 aarch64_insn_r->reg_rec_count = 1;
3237 if (setflags)
3238 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3239 }
3240 else if (insn_bits24_27 == 0x02 && !insn_bit23)
3241 {
3242 /* Logical (immediate). */
3243 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
3244 record_buf[0] = reg_rd;
3245 aarch64_insn_r->reg_rec_count = 1;
3246 if (setflags)
3247 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3248 }
3249 else
3250 return AARCH64_RECORD_UNKNOWN;
3251
3252 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3253 record_buf);
3254 return AARCH64_RECORD_SUCCESS;
3255 }
3256
3257 /* Record handler for branch, exception generation and system instructions. */
3258
3259 static unsigned int
3260 aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
3261 {
3262 struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
3263 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
3264 uint32_t record_buf[4];
3265
3266 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3267 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3268 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3269
3270 if (insn_bits28_31 == 0x0d)
3271 {
3272 /* Exception generation instructions. */
3273 if (insn_bits24_27 == 0x04)
3274 {
3275 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
3276 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
3277 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
3278 {
3279 ULONGEST svc_number;
3280
3281 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
3282 &svc_number);
3283 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
3284 svc_number);
3285 }
3286 else
3287 return AARCH64_RECORD_UNSUPPORTED;
3288 }
3289 /* System instructions. */
3290 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
3291 {
3292 uint32_t reg_rt, reg_crn;
3293
3294 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3295 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3296
3297 /* Record rt in case of sysl and mrs instructions. */
3298 if (bit (aarch64_insn_r->aarch64_insn, 21))
3299 {
3300 record_buf[0] = reg_rt;
3301 aarch64_insn_r->reg_rec_count = 1;
3302 }
3303 /* Record cpsr for hint and msr(immediate) instructions. */
3304 else if (reg_crn == 0x02 || reg_crn == 0x04)
3305 {
3306 record_buf[0] = AARCH64_CPSR_REGNUM;
3307 aarch64_insn_r->reg_rec_count = 1;
3308 }
3309 }
3310 /* Unconditional branch (register). */
3311 else if((insn_bits24_27 & 0x0e) == 0x06)
3312 {
3313 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3314 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3315 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3316 }
3317 else
3318 return AARCH64_RECORD_UNKNOWN;
3319 }
3320 /* Unconditional branch (immediate). */
3321 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3322 {
3323 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3324 if (bit (aarch64_insn_r->aarch64_insn, 31))
3325 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3326 }
3327 else
3328 /* Compare & branch (immediate), Test & branch (immediate) and
3329 Conditional branch (immediate). */
3330 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3331
3332 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3333 record_buf);
3334 return AARCH64_RECORD_SUCCESS;
3335 }
3336
3337 /* Record handler for advanced SIMD load and store instructions. */
3338
3339 static unsigned int
3340 aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3341 {
3342 CORE_ADDR address;
3343 uint64_t addr_offset = 0;
3344 uint32_t record_buf[24];
3345 uint64_t record_buf_mem[24];
3346 uint32_t reg_rn, reg_rt;
3347 uint32_t reg_index = 0, mem_index = 0;
3348 uint8_t opcode_bits, size_bits;
3349
3350 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3351 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3352 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3353 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3354 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3355
3356 if (record_debug)
3357 debug_printf ("Process record: Advanced SIMD load/store\n");
3358
3359 /* Load/store single structure. */
3360 if (bit (aarch64_insn_r->aarch64_insn, 24))
3361 {
3362 uint8_t sindex, scale, selem, esize, replicate = 0;
3363 scale = opcode_bits >> 2;
3364 selem = ((opcode_bits & 0x02) |
3365 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3366 switch (scale)
3367 {
3368 case 1:
3369 if (size_bits & 0x01)
3370 return AARCH64_RECORD_UNKNOWN;
3371 break;
3372 case 2:
3373 if ((size_bits >> 1) & 0x01)
3374 return AARCH64_RECORD_UNKNOWN;
3375 if (size_bits & 0x01)
3376 {
3377 if (!((opcode_bits >> 1) & 0x01))
3378 scale = 3;
3379 else
3380 return AARCH64_RECORD_UNKNOWN;
3381 }
3382 break;
3383 case 3:
3384 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3385 {
3386 scale = size_bits;
3387 replicate = 1;
3388 break;
3389 }
3390 else
3391 return AARCH64_RECORD_UNKNOWN;
3392 default:
3393 break;
3394 }
3395 esize = 8 << scale;
3396 if (replicate)
3397 for (sindex = 0; sindex < selem; sindex++)
3398 {
3399 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3400 reg_rt = (reg_rt + 1) % 32;
3401 }
3402 else
3403 {
3404 for (sindex = 0; sindex < selem; sindex++)
3405 {
3406 if (bit (aarch64_insn_r->aarch64_insn, 22))
3407 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3408 else
3409 {
3410 record_buf_mem[mem_index++] = esize / 8;
3411 record_buf_mem[mem_index++] = address + addr_offset;
3412 }
3413 addr_offset = addr_offset + (esize / 8);
3414 reg_rt = (reg_rt + 1) % 32;
3415 }
3416 }
3417 }
3418 /* Load/store multiple structure. */
3419 else
3420 {
3421 uint8_t selem, esize, rpt, elements;
3422 uint8_t eindex, rindex;
3423
3424 esize = 8 << size_bits;
3425 if (bit (aarch64_insn_r->aarch64_insn, 30))
3426 elements = 128 / esize;
3427 else
3428 elements = 64 / esize;
3429
3430 switch (opcode_bits)
3431 {
3432 /*LD/ST4 (4 Registers). */
3433 case 0:
3434 rpt = 1;
3435 selem = 4;
3436 break;
3437 /*LD/ST1 (4 Registers). */
3438 case 2:
3439 rpt = 4;
3440 selem = 1;
3441 break;
3442 /*LD/ST3 (3 Registers). */
3443 case 4:
3444 rpt = 1;
3445 selem = 3;
3446 break;
3447 /*LD/ST1 (3 Registers). */
3448 case 6:
3449 rpt = 3;
3450 selem = 1;
3451 break;
3452 /*LD/ST1 (1 Register). */
3453 case 7:
3454 rpt = 1;
3455 selem = 1;
3456 break;
3457 /*LD/ST2 (2 Registers). */
3458 case 8:
3459 rpt = 1;
3460 selem = 2;
3461 break;
3462 /*LD/ST1 (2 Registers). */
3463 case 10:
3464 rpt = 2;
3465 selem = 1;
3466 break;
3467 default:
3468 return AARCH64_RECORD_UNSUPPORTED;
3469 break;
3470 }
3471 for (rindex = 0; rindex < rpt; rindex++)
3472 for (eindex = 0; eindex < elements; eindex++)
3473 {
3474 uint8_t reg_tt, sindex;
3475 reg_tt = (reg_rt + rindex) % 32;
3476 for (sindex = 0; sindex < selem; sindex++)
3477 {
3478 if (bit (aarch64_insn_r->aarch64_insn, 22))
3479 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
3480 else
3481 {
3482 record_buf_mem[mem_index++] = esize / 8;
3483 record_buf_mem[mem_index++] = address + addr_offset;
3484 }
3485 addr_offset = addr_offset + (esize / 8);
3486 reg_tt = (reg_tt + 1) % 32;
3487 }
3488 }
3489 }
3490
3491 if (bit (aarch64_insn_r->aarch64_insn, 23))
3492 record_buf[reg_index++] = reg_rn;
3493
3494 aarch64_insn_r->reg_rec_count = reg_index;
3495 aarch64_insn_r->mem_rec_count = mem_index / 2;
3496 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3497 record_buf_mem);
3498 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3499 record_buf);
3500 return AARCH64_RECORD_SUCCESS;
3501 }
3502
3503 /* Record handler for load and store instructions. */
3504
3505 static unsigned int
3506 aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
3507 {
3508 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
3509 uint8_t insn_bit23, insn_bit21;
3510 uint8_t opc, size_bits, ld_flag, vector_flag;
3511 uint32_t reg_rn, reg_rt, reg_rt2;
3512 uint64_t datasize, offset;
3513 uint32_t record_buf[8];
3514 uint64_t record_buf_mem[8];
3515 CORE_ADDR address;
3516
3517 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3518 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3519 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
3520 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3521 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3522 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
3523 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
3524 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3525 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3526 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
3527 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
3528
3529 /* Load/store exclusive. */
3530 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
3531 {
3532 if (record_debug)
3533 debug_printf ("Process record: load/store exclusive\n");
3534
3535 if (ld_flag)
3536 {
3537 record_buf[0] = reg_rt;
3538 aarch64_insn_r->reg_rec_count = 1;
3539 if (insn_bit21)
3540 {
3541 record_buf[1] = reg_rt2;
3542 aarch64_insn_r->reg_rec_count = 2;
3543 }
3544 }
3545 else
3546 {
3547 if (insn_bit21)
3548 datasize = (8 << size_bits) * 2;
3549 else
3550 datasize = (8 << size_bits);
3551 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3552 &address);
3553 record_buf_mem[0] = datasize / 8;
3554 record_buf_mem[1] = address;
3555 aarch64_insn_r->mem_rec_count = 1;
3556 if (!insn_bit23)
3557 {
3558 /* Save register rs. */
3559 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
3560 aarch64_insn_r->reg_rec_count = 1;
3561 }
3562 }
3563 }
3564 /* Load register (literal) instructions decoding. */
3565 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
3566 {
3567 if (record_debug)
3568 debug_printf ("Process record: load register (literal)\n");
3569 if (vector_flag)
3570 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3571 else
3572 record_buf[0] = reg_rt;
3573 aarch64_insn_r->reg_rec_count = 1;
3574 }
3575 /* All types of load/store pair instructions decoding. */
3576 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
3577 {
3578 if (record_debug)
3579 debug_printf ("Process record: load/store pair\n");
3580
3581 if (ld_flag)
3582 {
3583 if (vector_flag)
3584 {
3585 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3586 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
3587 }
3588 else
3589 {
3590 record_buf[0] = reg_rt;
3591 record_buf[1] = reg_rt2;
3592 }
3593 aarch64_insn_r->reg_rec_count = 2;
3594 }
3595 else
3596 {
3597 uint16_t imm7_off;
3598 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
3599 if (!vector_flag)
3600 size_bits = size_bits >> 1;
3601 datasize = 8 << (2 + size_bits);
3602 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
3603 offset = offset << (2 + size_bits);
3604 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3605 &address);
3606 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
3607 {
3608 if (imm7_off & 0x40)
3609 address = address - offset;
3610 else
3611 address = address + offset;
3612 }
3613
3614 record_buf_mem[0] = datasize / 8;
3615 record_buf_mem[1] = address;
3616 record_buf_mem[2] = datasize / 8;
3617 record_buf_mem[3] = address + (datasize / 8);
3618 aarch64_insn_r->mem_rec_count = 2;
3619 }
3620 if (bit (aarch64_insn_r->aarch64_insn, 23))
3621 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3622 }
3623 /* Load/store register (unsigned immediate) instructions. */
3624 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
3625 {
3626 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3627 if (!(opc >> 1))
3628 {
3629 if (opc & 0x01)
3630 ld_flag = 0x01;
3631 else
3632 ld_flag = 0x0;
3633 }
3634 else
3635 {
3636 if (size_bits == 0x3 && vector_flag == 0x0 && opc == 0x2)
3637 {
3638 /* PRFM (immediate) */
3639 return AARCH64_RECORD_SUCCESS;
3640 }
3641 else if (size_bits == 0x2 && vector_flag == 0x0 && opc == 0x2)
3642 {
3643 /* LDRSW (immediate) */
3644 ld_flag = 0x1;
3645 }
3646 else
3647 {
3648 if (opc & 0x01)
3649 ld_flag = 0x01;
3650 else
3651 ld_flag = 0x0;
3652 }
3653 }
3654
3655 if (record_debug)
3656 {
3657 debug_printf ("Process record: load/store (unsigned immediate):"
3658 " size %x V %d opc %x\n", size_bits, vector_flag,
3659 opc);
3660 }
3661
3662 if (!ld_flag)
3663 {
3664 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
3665 datasize = 8 << size_bits;
3666 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3667 &address);
3668 offset = offset << size_bits;
3669 address = address + offset;
3670
3671 record_buf_mem[0] = datasize >> 3;
3672 record_buf_mem[1] = address;
3673 aarch64_insn_r->mem_rec_count = 1;
3674 }
3675 else
3676 {
3677 if (vector_flag)
3678 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3679 else
3680 record_buf[0] = reg_rt;
3681 aarch64_insn_r->reg_rec_count = 1;
3682 }
3683 }
3684 /* Load/store register (register offset) instructions. */
3685 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3686 && insn_bits10_11 == 0x02 && insn_bit21)
3687 {
3688 if (record_debug)
3689 debug_printf ("Process record: load/store (register offset)\n");
3690 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3691 if (!(opc >> 1))
3692 if (opc & 0x01)
3693 ld_flag = 0x01;
3694 else
3695 ld_flag = 0x0;
3696 else
3697 if (size_bits != 0x03)
3698 ld_flag = 0x01;
3699 else
3700 return AARCH64_RECORD_UNKNOWN;
3701
3702 if (!ld_flag)
3703 {
3704 ULONGEST reg_rm_val;
3705
3706 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
3707 bits (aarch64_insn_r->aarch64_insn, 16, 20), &reg_rm_val);
3708 if (bit (aarch64_insn_r->aarch64_insn, 12))
3709 offset = reg_rm_val << size_bits;
3710 else
3711 offset = reg_rm_val;
3712 datasize = 8 << size_bits;
3713 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3714 &address);
3715 address = address + offset;
3716 record_buf_mem[0] = datasize >> 3;
3717 record_buf_mem[1] = address;
3718 aarch64_insn_r->mem_rec_count = 1;
3719 }
3720 else
3721 {
3722 if (vector_flag)
3723 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3724 else
3725 record_buf[0] = reg_rt;
3726 aarch64_insn_r->reg_rec_count = 1;
3727 }
3728 }
3729 /* Load/store register (immediate and unprivileged) instructions. */
3730 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3731 && !insn_bit21)
3732 {
3733 if (record_debug)
3734 {
3735 debug_printf ("Process record: load/store "
3736 "(immediate and unprivileged)\n");
3737 }
3738 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3739 if (!(opc >> 1))
3740 if (opc & 0x01)
3741 ld_flag = 0x01;
3742 else
3743 ld_flag = 0x0;
3744 else
3745 if (size_bits != 0x03)
3746 ld_flag = 0x01;
3747 else
3748 return AARCH64_RECORD_UNKNOWN;
3749
3750 if (!ld_flag)
3751 {
3752 uint16_t imm9_off;
3753 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
3754 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
3755 datasize = 8 << size_bits;
3756 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3757 &address);
3758 if (insn_bits10_11 != 0x01)
3759 {
3760 if (imm9_off & 0x0100)
3761 address = address - offset;
3762 else
3763 address = address + offset;
3764 }
3765 record_buf_mem[0] = datasize >> 3;
3766 record_buf_mem[1] = address;
3767 aarch64_insn_r->mem_rec_count = 1;
3768 }
3769 else
3770 {
3771 if (vector_flag)
3772 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3773 else
3774 record_buf[0] = reg_rt;
3775 aarch64_insn_r->reg_rec_count = 1;
3776 }
3777 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
3778 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3779 }
3780 /* Advanced SIMD load/store instructions. */
3781 else
3782 return aarch64_record_asimd_load_store (aarch64_insn_r);
3783
3784 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3785 record_buf_mem);
3786 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3787 record_buf);
3788 return AARCH64_RECORD_SUCCESS;
3789 }
3790
3791 /* Record handler for data processing SIMD and floating point instructions. */
3792
3793 static unsigned int
3794 aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
3795 {
3796 uint8_t insn_bit21, opcode, rmode, reg_rd;
3797 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
3798 uint8_t insn_bits11_14;
3799 uint32_t record_buf[2];
3800
3801 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3802 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3803 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3804 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3805 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
3806 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
3807 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
3808 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3809 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3810
3811 if (record_debug)
3812 debug_printf ("Process record: data processing SIMD/FP: ");
3813
3814 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
3815 {
3816 /* Floating point - fixed point conversion instructions. */
3817 if (!insn_bit21)
3818 {
3819 if (record_debug)
3820 debug_printf ("FP - fixed point conversion");
3821
3822 if ((opcode >> 1) == 0x0 && rmode == 0x03)
3823 record_buf[0] = reg_rd;
3824 else
3825 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3826 }
3827 /* Floating point - conditional compare instructions. */
3828 else if (insn_bits10_11 == 0x01)
3829 {
3830 if (record_debug)
3831 debug_printf ("FP - conditional compare");
3832
3833 record_buf[0] = AARCH64_CPSR_REGNUM;
3834 }
3835 /* Floating point - data processing (2-source) and
3836 conditional select instructions. */
3837 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
3838 {
3839 if (record_debug)
3840 debug_printf ("FP - DP (2-source)");
3841
3842 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3843 }
3844 else if (insn_bits10_11 == 0x00)
3845 {
3846 /* Floating point - immediate instructions. */
3847 if ((insn_bits12_15 & 0x01) == 0x01
3848 || (insn_bits12_15 & 0x07) == 0x04)
3849 {
3850 if (record_debug)
3851 debug_printf ("FP - immediate");
3852 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3853 }
3854 /* Floating point - compare instructions. */
3855 else if ((insn_bits12_15 & 0x03) == 0x02)
3856 {
3857 if (record_debug)
3858 debug_printf ("FP - immediate");
3859 record_buf[0] = AARCH64_CPSR_REGNUM;
3860 }
3861 /* Floating point - integer conversions instructions. */
3862 else if (insn_bits12_15 == 0x00)
3863 {
3864 /* Convert float to integer instruction. */
3865 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
3866 {
3867 if (record_debug)
3868 debug_printf ("float to int conversion");
3869
3870 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3871 }
3872 /* Convert integer to float instruction. */
3873 else if ((opcode >> 1) == 0x01 && !rmode)
3874 {
3875 if (record_debug)
3876 debug_printf ("int to float conversion");
3877
3878 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3879 }
3880 /* Move float to integer instruction. */
3881 else if ((opcode >> 1) == 0x03)
3882 {
3883 if (record_debug)
3884 debug_printf ("move float to int");
3885
3886 if (!(opcode & 0x01))
3887 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3888 else
3889 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3890 }
3891 else
3892 return AARCH64_RECORD_UNKNOWN;
3893 }
3894 else
3895 return AARCH64_RECORD_UNKNOWN;
3896 }
3897 else
3898 return AARCH64_RECORD_UNKNOWN;
3899 }
3900 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
3901 {
3902 if (record_debug)
3903 debug_printf ("SIMD copy");
3904
3905 /* Advanced SIMD copy instructions. */
3906 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
3907 && !bit (aarch64_insn_r->aarch64_insn, 15)
3908 && bit (aarch64_insn_r->aarch64_insn, 10))
3909 {
3910 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
3911 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3912 else
3913 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3914 }
3915 else
3916 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3917 }
3918 /* All remaining floating point or advanced SIMD instructions. */
3919 else
3920 {
3921 if (record_debug)
3922 debug_printf ("all remain");
3923
3924 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3925 }
3926
3927 if (record_debug)
3928 debug_printf ("\n");
3929
3930 aarch64_insn_r->reg_rec_count++;
3931 gdb_assert (aarch64_insn_r->reg_rec_count == 1);
3932 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3933 record_buf);
3934 return AARCH64_RECORD_SUCCESS;
3935 }
3936
3937 /* Decodes insns type and invokes its record handler. */
3938
3939 static unsigned int
3940 aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
3941 {
3942 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
3943
3944 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
3945 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
3946 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
3947 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
3948
3949 /* Data processing - immediate instructions. */
3950 if (!ins_bit26 && !ins_bit27 && ins_bit28)
3951 return aarch64_record_data_proc_imm (aarch64_insn_r);
3952
3953 /* Branch, exception generation and system instructions. */
3954 if (ins_bit26 && !ins_bit27 && ins_bit28)
3955 return aarch64_record_branch_except_sys (aarch64_insn_r);
3956
3957 /* Load and store instructions. */
3958 if (!ins_bit25 && ins_bit27)
3959 return aarch64_record_load_store (aarch64_insn_r);
3960
3961 /* Data processing - register instructions. */
3962 if (ins_bit25 && !ins_bit26 && ins_bit27)
3963 return aarch64_record_data_proc_reg (aarch64_insn_r);
3964
3965 /* Data processing - SIMD and floating point instructions. */
3966 if (ins_bit25 && ins_bit26 && ins_bit27)
3967 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
3968
3969 return AARCH64_RECORD_UNSUPPORTED;
3970 }
3971
3972 /* Cleans up local record registers and memory allocations. */
3973
3974 static void
3975 deallocate_reg_mem (insn_decode_record *record)
3976 {
3977 xfree (record->aarch64_regs);
3978 xfree (record->aarch64_mems);
3979 }
3980
3981 #if GDB_SELF_TEST
3982 namespace selftests {
3983
3984 static void
3985 aarch64_process_record_test (void)
3986 {
3987 struct gdbarch_info info;
3988 uint32_t ret;
3989
3990 gdbarch_info_init (&info);
3991 info.bfd_arch_info = bfd_scan_arch ("aarch64");
3992
3993 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
3994 SELF_CHECK (gdbarch != NULL);
3995
3996 insn_decode_record aarch64_record;
3997
3998 memset (&aarch64_record, 0, sizeof (insn_decode_record));
3999 aarch64_record.regcache = NULL;
4000 aarch64_record.this_addr = 0;
4001 aarch64_record.gdbarch = gdbarch;
4002
4003 /* 20 00 80 f9 prfm pldl1keep, [x1] */
4004 aarch64_record.aarch64_insn = 0xf9800020;
4005 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4006 SELF_CHECK (ret == AARCH64_RECORD_SUCCESS);
4007 SELF_CHECK (aarch64_record.reg_rec_count == 0);
4008 SELF_CHECK (aarch64_record.mem_rec_count == 0);
4009
4010 deallocate_reg_mem (&aarch64_record);
4011 }
4012
4013 } // namespace selftests
4014 #endif /* GDB_SELF_TEST */
4015
4016 /* Parse the current instruction and record the values of the registers and
4017 memory that will be changed in current instruction to record_arch_list
4018 return -1 if something is wrong. */
4019
4020 int
4021 aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
4022 CORE_ADDR insn_addr)
4023 {
4024 uint32_t rec_no = 0;
4025 uint8_t insn_size = 4;
4026 uint32_t ret = 0;
4027 gdb_byte buf[insn_size];
4028 insn_decode_record aarch64_record;
4029
4030 memset (&buf[0], 0, insn_size);
4031 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4032 target_read_memory (insn_addr, &buf[0], insn_size);
4033 aarch64_record.aarch64_insn
4034 = (uint32_t) extract_unsigned_integer (&buf[0],
4035 insn_size,
4036 gdbarch_byte_order (gdbarch));
4037 aarch64_record.regcache = regcache;
4038 aarch64_record.this_addr = insn_addr;
4039 aarch64_record.gdbarch = gdbarch;
4040
4041 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4042 if (ret == AARCH64_RECORD_UNSUPPORTED)
4043 {
4044 printf_unfiltered (_("Process record does not support instruction "
4045 "0x%0x at address %s.\n"),
4046 aarch64_record.aarch64_insn,
4047 paddress (gdbarch, insn_addr));
4048 ret = -1;
4049 }
4050
4051 if (0 == ret)
4052 {
4053 /* Record registers. */
4054 record_full_arch_list_add_reg (aarch64_record.regcache,
4055 AARCH64_PC_REGNUM);
4056 /* Always record register CPSR. */
4057 record_full_arch_list_add_reg (aarch64_record.regcache,
4058 AARCH64_CPSR_REGNUM);
4059 if (aarch64_record.aarch64_regs)
4060 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
4061 if (record_full_arch_list_add_reg (aarch64_record.regcache,
4062 aarch64_record.aarch64_regs[rec_no]))
4063 ret = -1;
4064
4065 /* Record memories. */
4066 if (aarch64_record.aarch64_mems)
4067 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
4068 if (record_full_arch_list_add_mem
4069 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
4070 aarch64_record.aarch64_mems[rec_no].len))
4071 ret = -1;
4072
4073 if (record_full_arch_list_add_end ())
4074 ret = -1;
4075 }
4076
4077 deallocate_reg_mem (&aarch64_record);
4078 return ret;
4079 }