]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/aarch64-tdep.c
Use flexible target descriptors for aarch64
[thirdparty/binutils-gdb.git] / gdb / aarch64-tdep.c
1 /* Common target dependent code for GDB on AArch64 systems.
2
3 Copyright (C) 2009-2017 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22
23 #include "frame.h"
24 #include "inferior.h"
25 #include "gdbcmd.h"
26 #include "gdbcore.h"
27 #include "dis-asm.h"
28 #include "regcache.h"
29 #include "reggroups.h"
30 #include "value.h"
31 #include "arch-utils.h"
32 #include "osabi.h"
33 #include "frame-unwind.h"
34 #include "frame-base.h"
35 #include "trad-frame.h"
36 #include "objfiles.h"
37 #include "dwarf2-frame.h"
38 #include "gdbtypes.h"
39 #include "prologue-value.h"
40 #include "target-descriptions.h"
41 #include "user-regs.h"
42 #include "language.h"
43 #include "infcall.h"
44 #include "ax.h"
45 #include "ax-gdb.h"
46 #include "selftest.h"
47
48 #include "aarch64-tdep.h"
49
50 #include "elf-bfd.h"
51 #include "elf/aarch64.h"
52
53 #include "vec.h"
54
55 #include "record.h"
56 #include "record-full.h"
57 #include "arch/aarch64-insn.h"
58
59 #include "opcode/aarch64.h"
60 #include <algorithm>
61
62 #define submask(x) ((1L << ((x) + 1)) - 1)
63 #define bit(obj,st) (((obj) >> (st)) & 1)
64 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
65
66 /* Pseudo register base numbers. */
67 #define AARCH64_Q0_REGNUM 0
68 #define AARCH64_D0_REGNUM (AARCH64_Q0_REGNUM + AARCH64_D_REGISTER_COUNT)
69 #define AARCH64_S0_REGNUM (AARCH64_D0_REGNUM + 32)
70 #define AARCH64_H0_REGNUM (AARCH64_S0_REGNUM + 32)
71 #define AARCH64_B0_REGNUM (AARCH64_H0_REGNUM + 32)
72
73 /* The standard register names, and all the valid aliases for them. */
74 static const struct
75 {
76 const char *const name;
77 int regnum;
78 } aarch64_register_aliases[] =
79 {
80 /* 64-bit register names. */
81 {"fp", AARCH64_FP_REGNUM},
82 {"lr", AARCH64_LR_REGNUM},
83 {"sp", AARCH64_SP_REGNUM},
84
85 /* 32-bit register names. */
86 {"w0", AARCH64_X0_REGNUM + 0},
87 {"w1", AARCH64_X0_REGNUM + 1},
88 {"w2", AARCH64_X0_REGNUM + 2},
89 {"w3", AARCH64_X0_REGNUM + 3},
90 {"w4", AARCH64_X0_REGNUM + 4},
91 {"w5", AARCH64_X0_REGNUM + 5},
92 {"w6", AARCH64_X0_REGNUM + 6},
93 {"w7", AARCH64_X0_REGNUM + 7},
94 {"w8", AARCH64_X0_REGNUM + 8},
95 {"w9", AARCH64_X0_REGNUM + 9},
96 {"w10", AARCH64_X0_REGNUM + 10},
97 {"w11", AARCH64_X0_REGNUM + 11},
98 {"w12", AARCH64_X0_REGNUM + 12},
99 {"w13", AARCH64_X0_REGNUM + 13},
100 {"w14", AARCH64_X0_REGNUM + 14},
101 {"w15", AARCH64_X0_REGNUM + 15},
102 {"w16", AARCH64_X0_REGNUM + 16},
103 {"w17", AARCH64_X0_REGNUM + 17},
104 {"w18", AARCH64_X0_REGNUM + 18},
105 {"w19", AARCH64_X0_REGNUM + 19},
106 {"w20", AARCH64_X0_REGNUM + 20},
107 {"w21", AARCH64_X0_REGNUM + 21},
108 {"w22", AARCH64_X0_REGNUM + 22},
109 {"w23", AARCH64_X0_REGNUM + 23},
110 {"w24", AARCH64_X0_REGNUM + 24},
111 {"w25", AARCH64_X0_REGNUM + 25},
112 {"w26", AARCH64_X0_REGNUM + 26},
113 {"w27", AARCH64_X0_REGNUM + 27},
114 {"w28", AARCH64_X0_REGNUM + 28},
115 {"w29", AARCH64_X0_REGNUM + 29},
116 {"w30", AARCH64_X0_REGNUM + 30},
117
118 /* specials */
119 {"ip0", AARCH64_X0_REGNUM + 16},
120 {"ip1", AARCH64_X0_REGNUM + 17}
121 };
122
123 /* The required core 'R' registers. */
124 static const char *const aarch64_r_register_names[] =
125 {
126 /* These registers must appear in consecutive RAW register number
127 order and they must begin with AARCH64_X0_REGNUM! */
128 "x0", "x1", "x2", "x3",
129 "x4", "x5", "x6", "x7",
130 "x8", "x9", "x10", "x11",
131 "x12", "x13", "x14", "x15",
132 "x16", "x17", "x18", "x19",
133 "x20", "x21", "x22", "x23",
134 "x24", "x25", "x26", "x27",
135 "x28", "x29", "x30", "sp",
136 "pc", "cpsr"
137 };
138
139 /* The FP/SIMD 'V' registers. */
140 static const char *const aarch64_v_register_names[] =
141 {
142 /* These registers must appear in consecutive RAW register number
143 order and they must begin with AARCH64_V0_REGNUM! */
144 "v0", "v1", "v2", "v3",
145 "v4", "v5", "v6", "v7",
146 "v8", "v9", "v10", "v11",
147 "v12", "v13", "v14", "v15",
148 "v16", "v17", "v18", "v19",
149 "v20", "v21", "v22", "v23",
150 "v24", "v25", "v26", "v27",
151 "v28", "v29", "v30", "v31",
152 "fpsr",
153 "fpcr"
154 };
155
156 /* AArch64 prologue cache structure. */
157 struct aarch64_prologue_cache
158 {
159 /* The program counter at the start of the function. It is used to
160 identify this frame as a prologue frame. */
161 CORE_ADDR func;
162
163 /* The program counter at the time this frame was created; i.e. where
164 this function was called from. It is used to identify this frame as a
165 stub frame. */
166 CORE_ADDR prev_pc;
167
168 /* The stack pointer at the time this frame was created; i.e. the
169 caller's stack pointer when this function was called. It is used
170 to identify this frame. */
171 CORE_ADDR prev_sp;
172
173 /* Is the target available to read from? */
174 int available_p;
175
176 /* The frame base for this frame is just prev_sp - frame size.
177 FRAMESIZE is the distance from the frame pointer to the
178 initial stack pointer. */
179 int framesize;
180
181 /* The register used to hold the frame pointer for this frame. */
182 int framereg;
183
184 /* Saved register offsets. */
185 struct trad_frame_saved_reg *saved_regs;
186 };
187
188 static void
189 show_aarch64_debug (struct ui_file *file, int from_tty,
190 struct cmd_list_element *c, const char *value)
191 {
192 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
193 }
194
195 namespace {
196
197 /* Abstract instruction reader. */
198
199 class abstract_instruction_reader
200 {
201 public:
202 /* Read in one instruction. */
203 virtual ULONGEST read (CORE_ADDR memaddr, int len,
204 enum bfd_endian byte_order) = 0;
205 };
206
207 /* Instruction reader from real target. */
208
209 class instruction_reader : public abstract_instruction_reader
210 {
211 public:
212 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
213 {
214 return read_code_unsigned_integer (memaddr, len, byte_order);
215 }
216 };
217
218 } // namespace
219
220 /* Analyze a prologue, looking for a recognizable stack frame
221 and frame pointer. Scan until we encounter a store that could
222 clobber the stack frame unexpectedly, or an unknown instruction. */
223
224 static CORE_ADDR
225 aarch64_analyze_prologue (struct gdbarch *gdbarch,
226 CORE_ADDR start, CORE_ADDR limit,
227 struct aarch64_prologue_cache *cache,
228 abstract_instruction_reader& reader)
229 {
230 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
231 int i;
232 /* Track X registers and D registers in prologue. */
233 pv_t regs[AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT];
234
235 for (i = 0; i < AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT; i++)
236 regs[i] = pv_register (i, 0);
237 pv_area stack (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
238
239 for (; start < limit; start += 4)
240 {
241 uint32_t insn;
242 aarch64_inst inst;
243
244 insn = reader.read (start, 4, byte_order_for_code);
245
246 if (aarch64_decode_insn (insn, &inst, 1) != 0)
247 break;
248
249 if (inst.opcode->iclass == addsub_imm
250 && (inst.opcode->op == OP_ADD
251 || strcmp ("sub", inst.opcode->name) == 0))
252 {
253 unsigned rd = inst.operands[0].reg.regno;
254 unsigned rn = inst.operands[1].reg.regno;
255
256 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
257 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
258 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
259 gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM);
260
261 if (inst.opcode->op == OP_ADD)
262 {
263 regs[rd] = pv_add_constant (regs[rn],
264 inst.operands[2].imm.value);
265 }
266 else
267 {
268 regs[rd] = pv_add_constant (regs[rn],
269 -inst.operands[2].imm.value);
270 }
271 }
272 else if (inst.opcode->iclass == pcreladdr
273 && inst.operands[1].type == AARCH64_OPND_ADDR_ADRP)
274 {
275 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
276 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
277
278 regs[inst.operands[0].reg.regno] = pv_unknown ();
279 }
280 else if (inst.opcode->iclass == branch_imm)
281 {
282 /* Stop analysis on branch. */
283 break;
284 }
285 else if (inst.opcode->iclass == condbranch)
286 {
287 /* Stop analysis on branch. */
288 break;
289 }
290 else if (inst.opcode->iclass == branch_reg)
291 {
292 /* Stop analysis on branch. */
293 break;
294 }
295 else if (inst.opcode->iclass == compbranch)
296 {
297 /* Stop analysis on branch. */
298 break;
299 }
300 else if (inst.opcode->op == OP_MOVZ)
301 {
302 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
303 regs[inst.operands[0].reg.regno] = pv_unknown ();
304 }
305 else if (inst.opcode->iclass == log_shift
306 && strcmp (inst.opcode->name, "orr") == 0)
307 {
308 unsigned rd = inst.operands[0].reg.regno;
309 unsigned rn = inst.operands[1].reg.regno;
310 unsigned rm = inst.operands[2].reg.regno;
311
312 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
313 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn);
314 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT);
315
316 if (inst.operands[2].shifter.amount == 0
317 && rn == AARCH64_SP_REGNUM)
318 regs[rd] = regs[rm];
319 else
320 {
321 if (aarch64_debug)
322 {
323 debug_printf ("aarch64: prologue analysis gave up "
324 "addr=%s opcode=0x%x (orr x register)\n",
325 core_addr_to_string_nz (start), insn);
326 }
327 break;
328 }
329 }
330 else if (inst.opcode->op == OP_STUR)
331 {
332 unsigned rt = inst.operands[0].reg.regno;
333 unsigned rn = inst.operands[1].addr.base_regno;
334 int is64
335 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
336
337 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
338 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
339 gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9);
340 gdb_assert (!inst.operands[1].addr.offset.is_reg);
341
342 stack.store (pv_add_constant (regs[rn],
343 inst.operands[1].addr.offset.imm),
344 is64 ? 8 : 4, regs[rt]);
345 }
346 else if ((inst.opcode->iclass == ldstpair_off
347 || (inst.opcode->iclass == ldstpair_indexed
348 && inst.operands[2].addr.preind))
349 && strcmp ("stp", inst.opcode->name) == 0)
350 {
351 /* STP with addressing mode Pre-indexed and Base register. */
352 unsigned rt1;
353 unsigned rt2;
354 unsigned rn = inst.operands[2].addr.base_regno;
355 int32_t imm = inst.operands[2].addr.offset.imm;
356
357 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
358 || inst.operands[0].type == AARCH64_OPND_Ft);
359 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2
360 || inst.operands[1].type == AARCH64_OPND_Ft2);
361 gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7);
362 gdb_assert (!inst.operands[2].addr.offset.is_reg);
363
364 /* If recording this store would invalidate the store area
365 (perhaps because rn is not known) then we should abandon
366 further prologue analysis. */
367 if (stack.store_would_trash (pv_add_constant (regs[rn], imm)))
368 break;
369
370 if (stack.store_would_trash (pv_add_constant (regs[rn], imm + 8)))
371 break;
372
373 rt1 = inst.operands[0].reg.regno;
374 rt2 = inst.operands[1].reg.regno;
375 if (inst.operands[0].type == AARCH64_OPND_Ft)
376 {
377 /* Only bottom 64-bit of each V register (D register) need
378 to be preserved. */
379 gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D);
380 rt1 += AARCH64_X_REGISTER_COUNT;
381 rt2 += AARCH64_X_REGISTER_COUNT;
382 }
383
384 stack.store (pv_add_constant (regs[rn], imm), 8,
385 regs[rt1]);
386 stack.store (pv_add_constant (regs[rn], imm + 8), 8,
387 regs[rt2]);
388
389 if (inst.operands[2].addr.writeback)
390 regs[rn] = pv_add_constant (regs[rn], imm);
391
392 }
393 else if ((inst.opcode->iclass == ldst_imm9 /* Signed immediate. */
394 || (inst.opcode->iclass == ldst_pos /* Unsigned immediate. */
395 && (inst.opcode->op == OP_STR_POS
396 || inst.opcode->op == OP_STRF_POS)))
397 && inst.operands[1].addr.base_regno == AARCH64_SP_REGNUM
398 && strcmp ("str", inst.opcode->name) == 0)
399 {
400 /* STR (immediate) */
401 unsigned int rt = inst.operands[0].reg.regno;
402 int32_t imm = inst.operands[1].addr.offset.imm;
403 unsigned int rn = inst.operands[1].addr.base_regno;
404 bool is64
405 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
406 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
407 || inst.operands[0].type == AARCH64_OPND_Ft);
408
409 if (inst.operands[0].type == AARCH64_OPND_Ft)
410 {
411 /* Only bottom 64-bit of each V register (D register) need
412 to be preserved. */
413 gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D);
414 rt += AARCH64_X_REGISTER_COUNT;
415 }
416
417 stack.store (pv_add_constant (regs[rn], imm),
418 is64 ? 8 : 4, regs[rt]);
419 if (inst.operands[1].addr.writeback)
420 regs[rn] = pv_add_constant (regs[rn], imm);
421 }
422 else if (inst.opcode->iclass == testbranch)
423 {
424 /* Stop analysis on branch. */
425 break;
426 }
427 else
428 {
429 if (aarch64_debug)
430 {
431 debug_printf ("aarch64: prologue analysis gave up addr=%s"
432 " opcode=0x%x\n",
433 core_addr_to_string_nz (start), insn);
434 }
435 break;
436 }
437 }
438
439 if (cache == NULL)
440 return start;
441
442 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
443 {
444 /* Frame pointer is fp. Frame size is constant. */
445 cache->framereg = AARCH64_FP_REGNUM;
446 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
447 }
448 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
449 {
450 /* Try the stack pointer. */
451 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
452 cache->framereg = AARCH64_SP_REGNUM;
453 }
454 else
455 {
456 /* We're just out of luck. We don't know where the frame is. */
457 cache->framereg = -1;
458 cache->framesize = 0;
459 }
460
461 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
462 {
463 CORE_ADDR offset;
464
465 if (stack.find_reg (gdbarch, i, &offset))
466 cache->saved_regs[i].addr = offset;
467 }
468
469 for (i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
470 {
471 int regnum = gdbarch_num_regs (gdbarch);
472 CORE_ADDR offset;
473
474 if (stack.find_reg (gdbarch, i + AARCH64_X_REGISTER_COUNT,
475 &offset))
476 cache->saved_regs[i + regnum + AARCH64_D0_REGNUM].addr = offset;
477 }
478
479 return start;
480 }
481
482 static CORE_ADDR
483 aarch64_analyze_prologue (struct gdbarch *gdbarch,
484 CORE_ADDR start, CORE_ADDR limit,
485 struct aarch64_prologue_cache *cache)
486 {
487 instruction_reader reader;
488
489 return aarch64_analyze_prologue (gdbarch, start, limit, cache,
490 reader);
491 }
492
493 #if GDB_SELF_TEST
494
495 namespace selftests {
496
497 /* Instruction reader from manually cooked instruction sequences. */
498
499 class instruction_reader_test : public abstract_instruction_reader
500 {
501 public:
502 template<size_t SIZE>
503 explicit instruction_reader_test (const uint32_t (&insns)[SIZE])
504 : m_insns (insns), m_insns_size (SIZE)
505 {}
506
507 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
508 {
509 SELF_CHECK (len == 4);
510 SELF_CHECK (memaddr % 4 == 0);
511 SELF_CHECK (memaddr / 4 < m_insns_size);
512
513 return m_insns[memaddr / 4];
514 }
515
516 private:
517 const uint32_t *m_insns;
518 size_t m_insns_size;
519 };
520
521 static void
522 aarch64_analyze_prologue_test (void)
523 {
524 struct gdbarch_info info;
525
526 gdbarch_info_init (&info);
527 info.bfd_arch_info = bfd_scan_arch ("aarch64");
528
529 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
530 SELF_CHECK (gdbarch != NULL);
531
532 /* Test the simple prologue in which frame pointer is used. */
533 {
534 struct aarch64_prologue_cache cache;
535 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
536
537 static const uint32_t insns[] = {
538 0xa9af7bfd, /* stp x29, x30, [sp,#-272]! */
539 0x910003fd, /* mov x29, sp */
540 0x97ffffe6, /* bl 0x400580 */
541 };
542 instruction_reader_test reader (insns);
543
544 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
545 SELF_CHECK (end == 4 * 2);
546
547 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
548 SELF_CHECK (cache.framesize == 272);
549
550 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
551 {
552 if (i == AARCH64_FP_REGNUM)
553 SELF_CHECK (cache.saved_regs[i].addr == -272);
554 else if (i == AARCH64_LR_REGNUM)
555 SELF_CHECK (cache.saved_regs[i].addr == -264);
556 else
557 SELF_CHECK (cache.saved_regs[i].addr == -1);
558 }
559
560 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
561 {
562 int regnum = gdbarch_num_regs (gdbarch);
563
564 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
565 == -1);
566 }
567 }
568
569 /* Test a prologue in which STR is used and frame pointer is not
570 used. */
571 {
572 struct aarch64_prologue_cache cache;
573 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
574
575 static const uint32_t insns[] = {
576 0xf81d0ff3, /* str x19, [sp, #-48]! */
577 0xb9002fe0, /* str w0, [sp, #44] */
578 0xf90013e1, /* str x1, [sp, #32]*/
579 0xfd000fe0, /* str d0, [sp, #24] */
580 0xaa0203f3, /* mov x19, x2 */
581 0xf94013e0, /* ldr x0, [sp, #32] */
582 };
583 instruction_reader_test reader (insns);
584
585 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
586
587 SELF_CHECK (end == 4 * 5);
588
589 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
590 SELF_CHECK (cache.framesize == 48);
591
592 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
593 {
594 if (i == 1)
595 SELF_CHECK (cache.saved_regs[i].addr == -16);
596 else if (i == 19)
597 SELF_CHECK (cache.saved_regs[i].addr == -48);
598 else
599 SELF_CHECK (cache.saved_regs[i].addr == -1);
600 }
601
602 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
603 {
604 int regnum = gdbarch_num_regs (gdbarch);
605
606 if (i == 0)
607 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
608 == -24);
609 else
610 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
611 == -1);
612 }
613 }
614 }
615 } // namespace selftests
616 #endif /* GDB_SELF_TEST */
617
618 /* Implement the "skip_prologue" gdbarch method. */
619
620 static CORE_ADDR
621 aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
622 {
623 CORE_ADDR func_addr, limit_pc;
624
625 /* See if we can determine the end of the prologue via the symbol
626 table. If so, then return either PC, or the PC after the
627 prologue, whichever is greater. */
628 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
629 {
630 CORE_ADDR post_prologue_pc
631 = skip_prologue_using_sal (gdbarch, func_addr);
632
633 if (post_prologue_pc != 0)
634 return std::max (pc, post_prologue_pc);
635 }
636
637 /* Can't determine prologue from the symbol table, need to examine
638 instructions. */
639
640 /* Find an upper limit on the function prologue using the debug
641 information. If the debug information could not be used to
642 provide that bound, then use an arbitrary large number as the
643 upper bound. */
644 limit_pc = skip_prologue_using_sal (gdbarch, pc);
645 if (limit_pc == 0)
646 limit_pc = pc + 128; /* Magic. */
647
648 /* Try disassembling prologue. */
649 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
650 }
651
652 /* Scan the function prologue for THIS_FRAME and populate the prologue
653 cache CACHE. */
654
655 static void
656 aarch64_scan_prologue (struct frame_info *this_frame,
657 struct aarch64_prologue_cache *cache)
658 {
659 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
660 CORE_ADDR prologue_start;
661 CORE_ADDR prologue_end;
662 CORE_ADDR prev_pc = get_frame_pc (this_frame);
663 struct gdbarch *gdbarch = get_frame_arch (this_frame);
664
665 cache->prev_pc = prev_pc;
666
667 /* Assume we do not find a frame. */
668 cache->framereg = -1;
669 cache->framesize = 0;
670
671 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
672 &prologue_end))
673 {
674 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
675
676 if (sal.line == 0)
677 {
678 /* No line info so use the current PC. */
679 prologue_end = prev_pc;
680 }
681 else if (sal.end < prologue_end)
682 {
683 /* The next line begins after the function end. */
684 prologue_end = sal.end;
685 }
686
687 prologue_end = std::min (prologue_end, prev_pc);
688 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
689 }
690 else
691 {
692 CORE_ADDR frame_loc;
693
694 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
695 if (frame_loc == 0)
696 return;
697
698 cache->framereg = AARCH64_FP_REGNUM;
699 cache->framesize = 16;
700 cache->saved_regs[29].addr = 0;
701 cache->saved_regs[30].addr = 8;
702 }
703 }
704
705 /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
706 function may throw an exception if the inferior's registers or memory is
707 not available. */
708
709 static void
710 aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
711 struct aarch64_prologue_cache *cache)
712 {
713 CORE_ADDR unwound_fp;
714 int reg;
715
716 aarch64_scan_prologue (this_frame, cache);
717
718 if (cache->framereg == -1)
719 return;
720
721 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
722 if (unwound_fp == 0)
723 return;
724
725 cache->prev_sp = unwound_fp + cache->framesize;
726
727 /* Calculate actual addresses of saved registers using offsets
728 determined by aarch64_analyze_prologue. */
729 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
730 if (trad_frame_addr_p (cache->saved_regs, reg))
731 cache->saved_regs[reg].addr += cache->prev_sp;
732
733 cache->func = get_frame_func (this_frame);
734
735 cache->available_p = 1;
736 }
737
738 /* Allocate and fill in *THIS_CACHE with information about the prologue of
739 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
740 Return a pointer to the current aarch64_prologue_cache in
741 *THIS_CACHE. */
742
743 static struct aarch64_prologue_cache *
744 aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
745 {
746 struct aarch64_prologue_cache *cache;
747
748 if (*this_cache != NULL)
749 return (struct aarch64_prologue_cache *) *this_cache;
750
751 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
752 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
753 *this_cache = cache;
754
755 TRY
756 {
757 aarch64_make_prologue_cache_1 (this_frame, cache);
758 }
759 CATCH (ex, RETURN_MASK_ERROR)
760 {
761 if (ex.error != NOT_AVAILABLE_ERROR)
762 throw_exception (ex);
763 }
764 END_CATCH
765
766 return cache;
767 }
768
769 /* Implement the "stop_reason" frame_unwind method. */
770
771 static enum unwind_stop_reason
772 aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
773 void **this_cache)
774 {
775 struct aarch64_prologue_cache *cache
776 = aarch64_make_prologue_cache (this_frame, this_cache);
777
778 if (!cache->available_p)
779 return UNWIND_UNAVAILABLE;
780
781 /* Halt the backtrace at "_start". */
782 if (cache->prev_pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
783 return UNWIND_OUTERMOST;
784
785 /* We've hit a wall, stop. */
786 if (cache->prev_sp == 0)
787 return UNWIND_OUTERMOST;
788
789 return UNWIND_NO_REASON;
790 }
791
792 /* Our frame ID for a normal frame is the current function's starting
793 PC and the caller's SP when we were called. */
794
795 static void
796 aarch64_prologue_this_id (struct frame_info *this_frame,
797 void **this_cache, struct frame_id *this_id)
798 {
799 struct aarch64_prologue_cache *cache
800 = aarch64_make_prologue_cache (this_frame, this_cache);
801
802 if (!cache->available_p)
803 *this_id = frame_id_build_unavailable_stack (cache->func);
804 else
805 *this_id = frame_id_build (cache->prev_sp, cache->func);
806 }
807
808 /* Implement the "prev_register" frame_unwind method. */
809
810 static struct value *
811 aarch64_prologue_prev_register (struct frame_info *this_frame,
812 void **this_cache, int prev_regnum)
813 {
814 struct aarch64_prologue_cache *cache
815 = aarch64_make_prologue_cache (this_frame, this_cache);
816
817 /* If we are asked to unwind the PC, then we need to return the LR
818 instead. The prologue may save PC, but it will point into this
819 frame's prologue, not the next frame's resume location. */
820 if (prev_regnum == AARCH64_PC_REGNUM)
821 {
822 CORE_ADDR lr;
823
824 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
825 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
826 }
827
828 /* SP is generally not saved to the stack, but this frame is
829 identified by the next frame's stack pointer at the time of the
830 call. The value was already reconstructed into PREV_SP. */
831 /*
832 +----------+ ^
833 | saved lr | |
834 +->| saved fp |--+
835 | | |
836 | | | <- Previous SP
837 | +----------+
838 | | saved lr |
839 +--| saved fp |<- FP
840 | |
841 | |<- SP
842 +----------+ */
843 if (prev_regnum == AARCH64_SP_REGNUM)
844 return frame_unwind_got_constant (this_frame, prev_regnum,
845 cache->prev_sp);
846
847 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
848 prev_regnum);
849 }
850
851 /* AArch64 prologue unwinder. */
852 struct frame_unwind aarch64_prologue_unwind =
853 {
854 NORMAL_FRAME,
855 aarch64_prologue_frame_unwind_stop_reason,
856 aarch64_prologue_this_id,
857 aarch64_prologue_prev_register,
858 NULL,
859 default_frame_sniffer
860 };
861
862 /* Allocate and fill in *THIS_CACHE with information about the prologue of
863 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
864 Return a pointer to the current aarch64_prologue_cache in
865 *THIS_CACHE. */
866
867 static struct aarch64_prologue_cache *
868 aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
869 {
870 struct aarch64_prologue_cache *cache;
871
872 if (*this_cache != NULL)
873 return (struct aarch64_prologue_cache *) *this_cache;
874
875 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
876 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
877 *this_cache = cache;
878
879 TRY
880 {
881 cache->prev_sp = get_frame_register_unsigned (this_frame,
882 AARCH64_SP_REGNUM);
883 cache->prev_pc = get_frame_pc (this_frame);
884 cache->available_p = 1;
885 }
886 CATCH (ex, RETURN_MASK_ERROR)
887 {
888 if (ex.error != NOT_AVAILABLE_ERROR)
889 throw_exception (ex);
890 }
891 END_CATCH
892
893 return cache;
894 }
895
896 /* Implement the "stop_reason" frame_unwind method. */
897
898 static enum unwind_stop_reason
899 aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
900 void **this_cache)
901 {
902 struct aarch64_prologue_cache *cache
903 = aarch64_make_stub_cache (this_frame, this_cache);
904
905 if (!cache->available_p)
906 return UNWIND_UNAVAILABLE;
907
908 return UNWIND_NO_REASON;
909 }
910
911 /* Our frame ID for a stub frame is the current SP and LR. */
912
913 static void
914 aarch64_stub_this_id (struct frame_info *this_frame,
915 void **this_cache, struct frame_id *this_id)
916 {
917 struct aarch64_prologue_cache *cache
918 = aarch64_make_stub_cache (this_frame, this_cache);
919
920 if (cache->available_p)
921 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
922 else
923 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
924 }
925
926 /* Implement the "sniffer" frame_unwind method. */
927
928 static int
929 aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
930 struct frame_info *this_frame,
931 void **this_prologue_cache)
932 {
933 CORE_ADDR addr_in_block;
934 gdb_byte dummy[4];
935
936 addr_in_block = get_frame_address_in_block (this_frame);
937 if (in_plt_section (addr_in_block)
938 /* We also use the stub winder if the target memory is unreadable
939 to avoid having the prologue unwinder trying to read it. */
940 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
941 return 1;
942
943 return 0;
944 }
945
946 /* AArch64 stub unwinder. */
947 struct frame_unwind aarch64_stub_unwind =
948 {
949 NORMAL_FRAME,
950 aarch64_stub_frame_unwind_stop_reason,
951 aarch64_stub_this_id,
952 aarch64_prologue_prev_register,
953 NULL,
954 aarch64_stub_unwind_sniffer
955 };
956
957 /* Return the frame base address of *THIS_FRAME. */
958
959 static CORE_ADDR
960 aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
961 {
962 struct aarch64_prologue_cache *cache
963 = aarch64_make_prologue_cache (this_frame, this_cache);
964
965 return cache->prev_sp - cache->framesize;
966 }
967
968 /* AArch64 default frame base information. */
969 struct frame_base aarch64_normal_base =
970 {
971 &aarch64_prologue_unwind,
972 aarch64_normal_frame_base,
973 aarch64_normal_frame_base,
974 aarch64_normal_frame_base
975 };
976
977 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
978 dummy frame. The frame ID's base needs to match the TOS value
979 saved by save_dummy_frame_tos () and returned from
980 aarch64_push_dummy_call, and the PC needs to match the dummy
981 frame's breakpoint. */
982
983 static struct frame_id
984 aarch64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
985 {
986 return frame_id_build (get_frame_register_unsigned (this_frame,
987 AARCH64_SP_REGNUM),
988 get_frame_pc (this_frame));
989 }
990
991 /* Implement the "unwind_pc" gdbarch method. */
992
993 static CORE_ADDR
994 aarch64_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
995 {
996 CORE_ADDR pc
997 = frame_unwind_register_unsigned (this_frame, AARCH64_PC_REGNUM);
998
999 return pc;
1000 }
1001
1002 /* Implement the "unwind_sp" gdbarch method. */
1003
1004 static CORE_ADDR
1005 aarch64_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
1006 {
1007 return frame_unwind_register_unsigned (this_frame, AARCH64_SP_REGNUM);
1008 }
1009
1010 /* Return the value of the REGNUM register in the previous frame of
1011 *THIS_FRAME. */
1012
1013 static struct value *
1014 aarch64_dwarf2_prev_register (struct frame_info *this_frame,
1015 void **this_cache, int regnum)
1016 {
1017 CORE_ADDR lr;
1018
1019 switch (regnum)
1020 {
1021 case AARCH64_PC_REGNUM:
1022 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1023 return frame_unwind_got_constant (this_frame, regnum, lr);
1024
1025 default:
1026 internal_error (__FILE__, __LINE__,
1027 _("Unexpected register %d"), regnum);
1028 }
1029 }
1030
1031 /* Implement the "init_reg" dwarf2_frame_ops method. */
1032
1033 static void
1034 aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1035 struct dwarf2_frame_state_reg *reg,
1036 struct frame_info *this_frame)
1037 {
1038 switch (regnum)
1039 {
1040 case AARCH64_PC_REGNUM:
1041 reg->how = DWARF2_FRAME_REG_FN;
1042 reg->loc.fn = aarch64_dwarf2_prev_register;
1043 break;
1044 case AARCH64_SP_REGNUM:
1045 reg->how = DWARF2_FRAME_REG_CFA;
1046 break;
1047 }
1048 }
1049
1050 /* When arguments must be pushed onto the stack, they go on in reverse
1051 order. The code below implements a FILO (stack) to do this. */
1052
1053 typedef struct
1054 {
1055 /* Value to pass on stack. It can be NULL if this item is for stack
1056 padding. */
1057 const gdb_byte *data;
1058
1059 /* Size in bytes of value to pass on stack. */
1060 int len;
1061 } stack_item_t;
1062
1063 DEF_VEC_O (stack_item_t);
1064
1065 /* Return the alignment (in bytes) of the given type. */
1066
1067 static int
1068 aarch64_type_align (struct type *t)
1069 {
1070 int n;
1071 int align;
1072 int falign;
1073
1074 t = check_typedef (t);
1075 switch (TYPE_CODE (t))
1076 {
1077 default:
1078 /* Should never happen. */
1079 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
1080 return 4;
1081
1082 case TYPE_CODE_PTR:
1083 case TYPE_CODE_ENUM:
1084 case TYPE_CODE_INT:
1085 case TYPE_CODE_FLT:
1086 case TYPE_CODE_SET:
1087 case TYPE_CODE_RANGE:
1088 case TYPE_CODE_BITSTRING:
1089 case TYPE_CODE_REF:
1090 case TYPE_CODE_RVALUE_REF:
1091 case TYPE_CODE_CHAR:
1092 case TYPE_CODE_BOOL:
1093 return TYPE_LENGTH (t);
1094
1095 case TYPE_CODE_ARRAY:
1096 if (TYPE_VECTOR (t))
1097 {
1098 /* Use the natural alignment for vector types (the same for
1099 scalar type), but the maximum alignment is 128-bit. */
1100 if (TYPE_LENGTH (t) > 16)
1101 return 16;
1102 else
1103 return TYPE_LENGTH (t);
1104 }
1105 else
1106 return aarch64_type_align (TYPE_TARGET_TYPE (t));
1107 case TYPE_CODE_COMPLEX:
1108 return aarch64_type_align (TYPE_TARGET_TYPE (t));
1109
1110 case TYPE_CODE_STRUCT:
1111 case TYPE_CODE_UNION:
1112 align = 1;
1113 for (n = 0; n < TYPE_NFIELDS (t); n++)
1114 {
1115 falign = aarch64_type_align (TYPE_FIELD_TYPE (t, n));
1116 if (falign > align)
1117 align = falign;
1118 }
1119 return align;
1120 }
1121 }
1122
1123 /* Return 1 if *TY is a homogeneous floating-point aggregate or
1124 homogeneous short-vector aggregate as defined in the AAPCS64 ABI
1125 document; otherwise return 0. */
1126
1127 static int
1128 is_hfa_or_hva (struct type *ty)
1129 {
1130 switch (TYPE_CODE (ty))
1131 {
1132 case TYPE_CODE_ARRAY:
1133 {
1134 struct type *target_ty = TYPE_TARGET_TYPE (ty);
1135
1136 if (TYPE_VECTOR (ty))
1137 return 0;
1138
1139 if (TYPE_LENGTH (ty) <= 4 /* HFA or HVA has at most 4 members. */
1140 && (TYPE_CODE (target_ty) == TYPE_CODE_FLT /* HFA */
1141 || (TYPE_CODE (target_ty) == TYPE_CODE_ARRAY /* HVA */
1142 && TYPE_VECTOR (target_ty))))
1143 return 1;
1144 break;
1145 }
1146
1147 case TYPE_CODE_UNION:
1148 case TYPE_CODE_STRUCT:
1149 {
1150 /* HFA or HVA has at most four members. */
1151 if (TYPE_NFIELDS (ty) > 0 && TYPE_NFIELDS (ty) <= 4)
1152 {
1153 struct type *member0_type;
1154
1155 member0_type = check_typedef (TYPE_FIELD_TYPE (ty, 0));
1156 if (TYPE_CODE (member0_type) == TYPE_CODE_FLT
1157 || (TYPE_CODE (member0_type) == TYPE_CODE_ARRAY
1158 && TYPE_VECTOR (member0_type)))
1159 {
1160 int i;
1161
1162 for (i = 0; i < TYPE_NFIELDS (ty); i++)
1163 {
1164 struct type *member1_type;
1165
1166 member1_type = check_typedef (TYPE_FIELD_TYPE (ty, i));
1167 if (TYPE_CODE (member0_type) != TYPE_CODE (member1_type)
1168 || (TYPE_LENGTH (member0_type)
1169 != TYPE_LENGTH (member1_type)))
1170 return 0;
1171 }
1172 return 1;
1173 }
1174 }
1175 return 0;
1176 }
1177
1178 default:
1179 break;
1180 }
1181
1182 return 0;
1183 }
1184
1185 /* AArch64 function call information structure. */
1186 struct aarch64_call_info
1187 {
1188 /* the current argument number. */
1189 unsigned argnum;
1190
1191 /* The next general purpose register number, equivalent to NGRN as
1192 described in the AArch64 Procedure Call Standard. */
1193 unsigned ngrn;
1194
1195 /* The next SIMD and floating point register number, equivalent to
1196 NSRN as described in the AArch64 Procedure Call Standard. */
1197 unsigned nsrn;
1198
1199 /* The next stacked argument address, equivalent to NSAA as
1200 described in the AArch64 Procedure Call Standard. */
1201 unsigned nsaa;
1202
1203 /* Stack item vector. */
1204 VEC(stack_item_t) *si;
1205 };
1206
1207 /* Pass a value in a sequence of consecutive X registers. The caller
1208 is responsbile for ensuring sufficient registers are available. */
1209
1210 static void
1211 pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1212 struct aarch64_call_info *info, struct type *type,
1213 struct value *arg)
1214 {
1215 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1216 int len = TYPE_LENGTH (type);
1217 enum type_code typecode = TYPE_CODE (type);
1218 int regnum = AARCH64_X0_REGNUM + info->ngrn;
1219 const bfd_byte *buf = value_contents (arg);
1220
1221 info->argnum++;
1222
1223 while (len > 0)
1224 {
1225 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1226 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1227 byte_order);
1228
1229
1230 /* Adjust sub-word struct/union args when big-endian. */
1231 if (byte_order == BFD_ENDIAN_BIG
1232 && partial_len < X_REGISTER_SIZE
1233 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1234 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1235
1236 if (aarch64_debug)
1237 {
1238 debug_printf ("arg %d in %s = 0x%s\n", info->argnum,
1239 gdbarch_register_name (gdbarch, regnum),
1240 phex (regval, X_REGISTER_SIZE));
1241 }
1242 regcache_cooked_write_unsigned (regcache, regnum, regval);
1243 len -= partial_len;
1244 buf += partial_len;
1245 regnum++;
1246 }
1247 }
1248
1249 /* Attempt to marshall a value in a V register. Return 1 if
1250 successful, or 0 if insufficient registers are available. This
1251 function, unlike the equivalent pass_in_x() function does not
1252 handle arguments spread across multiple registers. */
1253
1254 static int
1255 pass_in_v (struct gdbarch *gdbarch,
1256 struct regcache *regcache,
1257 struct aarch64_call_info *info,
1258 int len, const bfd_byte *buf)
1259 {
1260 if (info->nsrn < 8)
1261 {
1262 int regnum = AARCH64_V0_REGNUM + info->nsrn;
1263 gdb_byte reg[V_REGISTER_SIZE];
1264
1265 info->argnum++;
1266 info->nsrn++;
1267
1268 memset (reg, 0, sizeof (reg));
1269 /* PCS C.1, the argument is allocated to the least significant
1270 bits of V register. */
1271 memcpy (reg, buf, len);
1272 regcache_cooked_write (regcache, regnum, reg);
1273
1274 if (aarch64_debug)
1275 {
1276 debug_printf ("arg %d in %s\n", info->argnum,
1277 gdbarch_register_name (gdbarch, regnum));
1278 }
1279 return 1;
1280 }
1281 info->nsrn = 8;
1282 return 0;
1283 }
1284
1285 /* Marshall an argument onto the stack. */
1286
1287 static void
1288 pass_on_stack (struct aarch64_call_info *info, struct type *type,
1289 struct value *arg)
1290 {
1291 const bfd_byte *buf = value_contents (arg);
1292 int len = TYPE_LENGTH (type);
1293 int align;
1294 stack_item_t item;
1295
1296 info->argnum++;
1297
1298 align = aarch64_type_align (type);
1299
1300 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1301 Natural alignment of the argument's type. */
1302 align = align_up (align, 8);
1303
1304 /* The AArch64 PCS requires at most doubleword alignment. */
1305 if (align > 16)
1306 align = 16;
1307
1308 if (aarch64_debug)
1309 {
1310 debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1311 info->nsaa);
1312 }
1313
1314 item.len = len;
1315 item.data = buf;
1316 VEC_safe_push (stack_item_t, info->si, &item);
1317
1318 info->nsaa += len;
1319 if (info->nsaa & (align - 1))
1320 {
1321 /* Push stack alignment padding. */
1322 int pad = align - (info->nsaa & (align - 1));
1323
1324 item.len = pad;
1325 item.data = NULL;
1326
1327 VEC_safe_push (stack_item_t, info->si, &item);
1328 info->nsaa += pad;
1329 }
1330 }
1331
1332 /* Marshall an argument into a sequence of one or more consecutive X
1333 registers or, if insufficient X registers are available then onto
1334 the stack. */
1335
1336 static void
1337 pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1338 struct aarch64_call_info *info, struct type *type,
1339 struct value *arg)
1340 {
1341 int len = TYPE_LENGTH (type);
1342 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1343
1344 /* PCS C.13 - Pass in registers if we have enough spare */
1345 if (info->ngrn + nregs <= 8)
1346 {
1347 pass_in_x (gdbarch, regcache, info, type, arg);
1348 info->ngrn += nregs;
1349 }
1350 else
1351 {
1352 info->ngrn = 8;
1353 pass_on_stack (info, type, arg);
1354 }
1355 }
1356
1357 /* Pass a value in a V register, or on the stack if insufficient are
1358 available. */
1359
1360 static void
1361 pass_in_v_or_stack (struct gdbarch *gdbarch,
1362 struct regcache *regcache,
1363 struct aarch64_call_info *info,
1364 struct type *type,
1365 struct value *arg)
1366 {
1367 if (!pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (type),
1368 value_contents (arg)))
1369 pass_on_stack (info, type, arg);
1370 }
1371
1372 /* Implement the "push_dummy_call" gdbarch method. */
1373
1374 static CORE_ADDR
1375 aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1376 struct regcache *regcache, CORE_ADDR bp_addr,
1377 int nargs,
1378 struct value **args, CORE_ADDR sp, int struct_return,
1379 CORE_ADDR struct_addr)
1380 {
1381 int argnum;
1382 struct aarch64_call_info info;
1383 struct type *func_type;
1384 struct type *return_type;
1385 int lang_struct_return;
1386
1387 memset (&info, 0, sizeof (info));
1388
1389 /* We need to know what the type of the called function is in order
1390 to determine the number of named/anonymous arguments for the
1391 actual argument placement, and the return type in order to handle
1392 return value correctly.
1393
1394 The generic code above us views the decision of return in memory
1395 or return in registers as a two stage processes. The language
1396 handler is consulted first and may decide to return in memory (eg
1397 class with copy constructor returned by value), this will cause
1398 the generic code to allocate space AND insert an initial leading
1399 argument.
1400
1401 If the language code does not decide to pass in memory then the
1402 target code is consulted.
1403
1404 If the language code decides to pass in memory we want to move
1405 the pointer inserted as the initial argument from the argument
1406 list and into X8, the conventional AArch64 struct return pointer
1407 register.
1408
1409 This is slightly awkward, ideally the flag "lang_struct_return"
1410 would be passed to the targets implementation of push_dummy_call.
1411 Rather that change the target interface we call the language code
1412 directly ourselves. */
1413
1414 func_type = check_typedef (value_type (function));
1415
1416 /* Dereference function pointer types. */
1417 if (TYPE_CODE (func_type) == TYPE_CODE_PTR)
1418 func_type = TYPE_TARGET_TYPE (func_type);
1419
1420 gdb_assert (TYPE_CODE (func_type) == TYPE_CODE_FUNC
1421 || TYPE_CODE (func_type) == TYPE_CODE_METHOD);
1422
1423 /* If language_pass_by_reference () returned true we will have been
1424 given an additional initial argument, a hidden pointer to the
1425 return slot in memory. */
1426 return_type = TYPE_TARGET_TYPE (func_type);
1427 lang_struct_return = language_pass_by_reference (return_type);
1428
1429 /* Set the return address. For the AArch64, the return breakpoint
1430 is always at BP_ADDR. */
1431 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1432
1433 /* If we were given an initial argument for the return slot because
1434 lang_struct_return was true, lose it. */
1435 if (lang_struct_return)
1436 {
1437 args++;
1438 nargs--;
1439 }
1440
1441 /* The struct_return pointer occupies X8. */
1442 if (struct_return || lang_struct_return)
1443 {
1444 if (aarch64_debug)
1445 {
1446 debug_printf ("struct return in %s = 0x%s\n",
1447 gdbarch_register_name (gdbarch,
1448 AARCH64_STRUCT_RETURN_REGNUM),
1449 paddress (gdbarch, struct_addr));
1450 }
1451 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1452 struct_addr);
1453 }
1454
1455 for (argnum = 0; argnum < nargs; argnum++)
1456 {
1457 struct value *arg = args[argnum];
1458 struct type *arg_type;
1459 int len;
1460
1461 arg_type = check_typedef (value_type (arg));
1462 len = TYPE_LENGTH (arg_type);
1463
1464 switch (TYPE_CODE (arg_type))
1465 {
1466 case TYPE_CODE_INT:
1467 case TYPE_CODE_BOOL:
1468 case TYPE_CODE_CHAR:
1469 case TYPE_CODE_RANGE:
1470 case TYPE_CODE_ENUM:
1471 if (len < 4)
1472 {
1473 /* Promote to 32 bit integer. */
1474 if (TYPE_UNSIGNED (arg_type))
1475 arg_type = builtin_type (gdbarch)->builtin_uint32;
1476 else
1477 arg_type = builtin_type (gdbarch)->builtin_int32;
1478 arg = value_cast (arg_type, arg);
1479 }
1480 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1481 break;
1482
1483 case TYPE_CODE_COMPLEX:
1484 if (info.nsrn <= 6)
1485 {
1486 const bfd_byte *buf = value_contents (arg);
1487 struct type *target_type =
1488 check_typedef (TYPE_TARGET_TYPE (arg_type));
1489
1490 pass_in_v (gdbarch, regcache, &info,
1491 TYPE_LENGTH (target_type), buf);
1492 pass_in_v (gdbarch, regcache, &info,
1493 TYPE_LENGTH (target_type),
1494 buf + TYPE_LENGTH (target_type));
1495 }
1496 else
1497 {
1498 info.nsrn = 8;
1499 pass_on_stack (&info, arg_type, arg);
1500 }
1501 break;
1502 case TYPE_CODE_FLT:
1503 pass_in_v_or_stack (gdbarch, regcache, &info, arg_type, arg);
1504 break;
1505
1506 case TYPE_CODE_STRUCT:
1507 case TYPE_CODE_ARRAY:
1508 case TYPE_CODE_UNION:
1509 if (is_hfa_or_hva (arg_type))
1510 {
1511 int elements = TYPE_NFIELDS (arg_type);
1512
1513 /* Homogeneous Aggregates */
1514 if (info.nsrn + elements < 8)
1515 {
1516 int i;
1517
1518 for (i = 0; i < elements; i++)
1519 {
1520 /* We know that we have sufficient registers
1521 available therefore this will never fallback
1522 to the stack. */
1523 struct value *field =
1524 value_primitive_field (arg, 0, i, arg_type);
1525 struct type *field_type =
1526 check_typedef (value_type (field));
1527
1528 pass_in_v_or_stack (gdbarch, regcache, &info,
1529 field_type, field);
1530 }
1531 }
1532 else
1533 {
1534 info.nsrn = 8;
1535 pass_on_stack (&info, arg_type, arg);
1536 }
1537 }
1538 else if (TYPE_CODE (arg_type) == TYPE_CODE_ARRAY
1539 && TYPE_VECTOR (arg_type) && (len == 16 || len == 8))
1540 {
1541 /* Short vector types are passed in V registers. */
1542 pass_in_v_or_stack (gdbarch, regcache, &info, arg_type, arg);
1543 }
1544 else if (len > 16)
1545 {
1546 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1547 invisible reference. */
1548
1549 /* Allocate aligned storage. */
1550 sp = align_down (sp - len, 16);
1551
1552 /* Write the real data into the stack. */
1553 write_memory (sp, value_contents (arg), len);
1554
1555 /* Construct the indirection. */
1556 arg_type = lookup_pointer_type (arg_type);
1557 arg = value_from_pointer (arg_type, sp);
1558 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1559 }
1560 else
1561 /* PCS C.15 / C.18 multiple values pass. */
1562 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1563 break;
1564
1565 default:
1566 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1567 break;
1568 }
1569 }
1570
1571 /* Make sure stack retains 16 byte alignment. */
1572 if (info.nsaa & 15)
1573 sp -= 16 - (info.nsaa & 15);
1574
1575 while (!VEC_empty (stack_item_t, info.si))
1576 {
1577 stack_item_t *si = VEC_last (stack_item_t, info.si);
1578
1579 sp -= si->len;
1580 if (si->data != NULL)
1581 write_memory (sp, si->data, si->len);
1582 VEC_pop (stack_item_t, info.si);
1583 }
1584
1585 VEC_free (stack_item_t, info.si);
1586
1587 /* Finally, update the SP register. */
1588 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1589
1590 return sp;
1591 }
1592
1593 /* Implement the "frame_align" gdbarch method. */
1594
1595 static CORE_ADDR
1596 aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1597 {
1598 /* Align the stack to sixteen bytes. */
1599 return sp & ~(CORE_ADDR) 15;
1600 }
1601
1602 /* Return the type for an AdvSISD Q register. */
1603
1604 static struct type *
1605 aarch64_vnq_type (struct gdbarch *gdbarch)
1606 {
1607 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1608
1609 if (tdep->vnq_type == NULL)
1610 {
1611 struct type *t;
1612 struct type *elem;
1613
1614 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1615 TYPE_CODE_UNION);
1616
1617 elem = builtin_type (gdbarch)->builtin_uint128;
1618 append_composite_type_field (t, "u", elem);
1619
1620 elem = builtin_type (gdbarch)->builtin_int128;
1621 append_composite_type_field (t, "s", elem);
1622
1623 tdep->vnq_type = t;
1624 }
1625
1626 return tdep->vnq_type;
1627 }
1628
1629 /* Return the type for an AdvSISD D register. */
1630
1631 static struct type *
1632 aarch64_vnd_type (struct gdbarch *gdbarch)
1633 {
1634 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1635
1636 if (tdep->vnd_type == NULL)
1637 {
1638 struct type *t;
1639 struct type *elem;
1640
1641 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1642 TYPE_CODE_UNION);
1643
1644 elem = builtin_type (gdbarch)->builtin_double;
1645 append_composite_type_field (t, "f", elem);
1646
1647 elem = builtin_type (gdbarch)->builtin_uint64;
1648 append_composite_type_field (t, "u", elem);
1649
1650 elem = builtin_type (gdbarch)->builtin_int64;
1651 append_composite_type_field (t, "s", elem);
1652
1653 tdep->vnd_type = t;
1654 }
1655
1656 return tdep->vnd_type;
1657 }
1658
1659 /* Return the type for an AdvSISD S register. */
1660
1661 static struct type *
1662 aarch64_vns_type (struct gdbarch *gdbarch)
1663 {
1664 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1665
1666 if (tdep->vns_type == NULL)
1667 {
1668 struct type *t;
1669 struct type *elem;
1670
1671 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1672 TYPE_CODE_UNION);
1673
1674 elem = builtin_type (gdbarch)->builtin_float;
1675 append_composite_type_field (t, "f", elem);
1676
1677 elem = builtin_type (gdbarch)->builtin_uint32;
1678 append_composite_type_field (t, "u", elem);
1679
1680 elem = builtin_type (gdbarch)->builtin_int32;
1681 append_composite_type_field (t, "s", elem);
1682
1683 tdep->vns_type = t;
1684 }
1685
1686 return tdep->vns_type;
1687 }
1688
1689 /* Return the type for an AdvSISD H register. */
1690
1691 static struct type *
1692 aarch64_vnh_type (struct gdbarch *gdbarch)
1693 {
1694 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1695
1696 if (tdep->vnh_type == NULL)
1697 {
1698 struct type *t;
1699 struct type *elem;
1700
1701 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1702 TYPE_CODE_UNION);
1703
1704 elem = builtin_type (gdbarch)->builtin_uint16;
1705 append_composite_type_field (t, "u", elem);
1706
1707 elem = builtin_type (gdbarch)->builtin_int16;
1708 append_composite_type_field (t, "s", elem);
1709
1710 tdep->vnh_type = t;
1711 }
1712
1713 return tdep->vnh_type;
1714 }
1715
1716 /* Return the type for an AdvSISD B register. */
1717
1718 static struct type *
1719 aarch64_vnb_type (struct gdbarch *gdbarch)
1720 {
1721 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1722
1723 if (tdep->vnb_type == NULL)
1724 {
1725 struct type *t;
1726 struct type *elem;
1727
1728 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1729 TYPE_CODE_UNION);
1730
1731 elem = builtin_type (gdbarch)->builtin_uint8;
1732 append_composite_type_field (t, "u", elem);
1733
1734 elem = builtin_type (gdbarch)->builtin_int8;
1735 append_composite_type_field (t, "s", elem);
1736
1737 tdep->vnb_type = t;
1738 }
1739
1740 return tdep->vnb_type;
1741 }
1742
1743 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
1744
1745 static int
1746 aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
1747 {
1748 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
1749 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
1750
1751 if (reg == AARCH64_DWARF_SP)
1752 return AARCH64_SP_REGNUM;
1753
1754 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
1755 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
1756
1757 return -1;
1758 }
1759 \f
1760
1761 /* Implement the "print_insn" gdbarch method. */
1762
1763 static int
1764 aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
1765 {
1766 info->symbols = NULL;
1767 return default_print_insn (memaddr, info);
1768 }
1769
1770 /* AArch64 BRK software debug mode instruction.
1771 Note that AArch64 code is always little-endian.
1772 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
1773 constexpr gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
1774
1775 typedef BP_MANIPULATION (aarch64_default_breakpoint) aarch64_breakpoint;
1776
1777 /* Extract from an array REGS containing the (raw) register state a
1778 function return value of type TYPE, and copy that, in virtual
1779 format, into VALBUF. */
1780
1781 static void
1782 aarch64_extract_return_value (struct type *type, struct regcache *regs,
1783 gdb_byte *valbuf)
1784 {
1785 struct gdbarch *gdbarch = regs->arch ();
1786 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1787
1788 if (TYPE_CODE (type) == TYPE_CODE_FLT)
1789 {
1790 bfd_byte buf[V_REGISTER_SIZE];
1791 int len = TYPE_LENGTH (type);
1792
1793 regcache_cooked_read (regs, AARCH64_V0_REGNUM, buf);
1794 memcpy (valbuf, buf, len);
1795 }
1796 else if (TYPE_CODE (type) == TYPE_CODE_INT
1797 || TYPE_CODE (type) == TYPE_CODE_CHAR
1798 || TYPE_CODE (type) == TYPE_CODE_BOOL
1799 || TYPE_CODE (type) == TYPE_CODE_PTR
1800 || TYPE_IS_REFERENCE (type)
1801 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1802 {
1803 /* If the the type is a plain integer, then the access is
1804 straight-forward. Otherwise we have to play around a bit
1805 more. */
1806 int len = TYPE_LENGTH (type);
1807 int regno = AARCH64_X0_REGNUM;
1808 ULONGEST tmp;
1809
1810 while (len > 0)
1811 {
1812 /* By using store_unsigned_integer we avoid having to do
1813 anything special for small big-endian values. */
1814 regcache_cooked_read_unsigned (regs, regno++, &tmp);
1815 store_unsigned_integer (valbuf,
1816 (len > X_REGISTER_SIZE
1817 ? X_REGISTER_SIZE : len), byte_order, tmp);
1818 len -= X_REGISTER_SIZE;
1819 valbuf += X_REGISTER_SIZE;
1820 }
1821 }
1822 else if (TYPE_CODE (type) == TYPE_CODE_COMPLEX)
1823 {
1824 int regno = AARCH64_V0_REGNUM;
1825 bfd_byte buf[V_REGISTER_SIZE];
1826 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
1827 int len = TYPE_LENGTH (target_type);
1828
1829 regcache_cooked_read (regs, regno, buf);
1830 memcpy (valbuf, buf, len);
1831 valbuf += len;
1832 regcache_cooked_read (regs, regno + 1, buf);
1833 memcpy (valbuf, buf, len);
1834 valbuf += len;
1835 }
1836 else if (is_hfa_or_hva (type))
1837 {
1838 int elements = TYPE_NFIELDS (type);
1839 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
1840 int len = TYPE_LENGTH (member_type);
1841 int i;
1842
1843 for (i = 0; i < elements; i++)
1844 {
1845 int regno = AARCH64_V0_REGNUM + i;
1846 bfd_byte buf[V_REGISTER_SIZE];
1847
1848 if (aarch64_debug)
1849 {
1850 debug_printf ("read HFA or HVA return value element %d from %s\n",
1851 i + 1,
1852 gdbarch_register_name (gdbarch, regno));
1853 }
1854 regcache_cooked_read (regs, regno, buf);
1855
1856 memcpy (valbuf, buf, len);
1857 valbuf += len;
1858 }
1859 }
1860 else if (TYPE_CODE (type) == TYPE_CODE_ARRAY && TYPE_VECTOR (type)
1861 && (TYPE_LENGTH (type) == 16 || TYPE_LENGTH (type) == 8))
1862 {
1863 /* Short vector is returned in V register. */
1864 gdb_byte buf[V_REGISTER_SIZE];
1865
1866 regcache_cooked_read (regs, AARCH64_V0_REGNUM, buf);
1867 memcpy (valbuf, buf, TYPE_LENGTH (type));
1868 }
1869 else
1870 {
1871 /* For a structure or union the behaviour is as if the value had
1872 been stored to word-aligned memory and then loaded into
1873 registers with 64-bit load instruction(s). */
1874 int len = TYPE_LENGTH (type);
1875 int regno = AARCH64_X0_REGNUM;
1876 bfd_byte buf[X_REGISTER_SIZE];
1877
1878 while (len > 0)
1879 {
1880 regcache_cooked_read (regs, regno++, buf);
1881 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
1882 len -= X_REGISTER_SIZE;
1883 valbuf += X_REGISTER_SIZE;
1884 }
1885 }
1886 }
1887
1888
1889 /* Will a function return an aggregate type in memory or in a
1890 register? Return 0 if an aggregate type can be returned in a
1891 register, 1 if it must be returned in memory. */
1892
1893 static int
1894 aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
1895 {
1896 type = check_typedef (type);
1897
1898 if (is_hfa_or_hva (type))
1899 {
1900 /* v0-v7 are used to return values and one register is allocated
1901 for one member. However, HFA or HVA has at most four members. */
1902 return 0;
1903 }
1904
1905 if (TYPE_LENGTH (type) > 16)
1906 {
1907 /* PCS B.6 Aggregates larger than 16 bytes are passed by
1908 invisible reference. */
1909
1910 return 1;
1911 }
1912
1913 return 0;
1914 }
1915
1916 /* Write into appropriate registers a function return value of type
1917 TYPE, given in virtual format. */
1918
1919 static void
1920 aarch64_store_return_value (struct type *type, struct regcache *regs,
1921 const gdb_byte *valbuf)
1922 {
1923 struct gdbarch *gdbarch = regs->arch ();
1924 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1925
1926 if (TYPE_CODE (type) == TYPE_CODE_FLT)
1927 {
1928 bfd_byte buf[V_REGISTER_SIZE];
1929 int len = TYPE_LENGTH (type);
1930
1931 memcpy (buf, valbuf, len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
1932 regcache_cooked_write (regs, AARCH64_V0_REGNUM, buf);
1933 }
1934 else if (TYPE_CODE (type) == TYPE_CODE_INT
1935 || TYPE_CODE (type) == TYPE_CODE_CHAR
1936 || TYPE_CODE (type) == TYPE_CODE_BOOL
1937 || TYPE_CODE (type) == TYPE_CODE_PTR
1938 || TYPE_IS_REFERENCE (type)
1939 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1940 {
1941 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
1942 {
1943 /* Values of one word or less are zero/sign-extended and
1944 returned in r0. */
1945 bfd_byte tmpbuf[X_REGISTER_SIZE];
1946 LONGEST val = unpack_long (type, valbuf);
1947
1948 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
1949 regcache_cooked_write (regs, AARCH64_X0_REGNUM, tmpbuf);
1950 }
1951 else
1952 {
1953 /* Integral values greater than one word are stored in
1954 consecutive registers starting with r0. This will always
1955 be a multiple of the regiser size. */
1956 int len = TYPE_LENGTH (type);
1957 int regno = AARCH64_X0_REGNUM;
1958
1959 while (len > 0)
1960 {
1961 regcache_cooked_write (regs, regno++, valbuf);
1962 len -= X_REGISTER_SIZE;
1963 valbuf += X_REGISTER_SIZE;
1964 }
1965 }
1966 }
1967 else if (is_hfa_or_hva (type))
1968 {
1969 int elements = TYPE_NFIELDS (type);
1970 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
1971 int len = TYPE_LENGTH (member_type);
1972 int i;
1973
1974 for (i = 0; i < elements; i++)
1975 {
1976 int regno = AARCH64_V0_REGNUM + i;
1977 bfd_byte tmpbuf[V_REGISTER_SIZE];
1978
1979 if (aarch64_debug)
1980 {
1981 debug_printf ("write HFA or HVA return value element %d to %s\n",
1982 i + 1,
1983 gdbarch_register_name (gdbarch, regno));
1984 }
1985
1986 memcpy (tmpbuf, valbuf, len);
1987 regcache_cooked_write (regs, regno, tmpbuf);
1988 valbuf += len;
1989 }
1990 }
1991 else if (TYPE_CODE (type) == TYPE_CODE_ARRAY && TYPE_VECTOR (type)
1992 && (TYPE_LENGTH (type) == 8 || TYPE_LENGTH (type) == 16))
1993 {
1994 /* Short vector. */
1995 gdb_byte buf[V_REGISTER_SIZE];
1996
1997 memcpy (buf, valbuf, TYPE_LENGTH (type));
1998 regcache_cooked_write (regs, AARCH64_V0_REGNUM, buf);
1999 }
2000 else
2001 {
2002 /* For a structure or union the behaviour is as if the value had
2003 been stored to word-aligned memory and then loaded into
2004 registers with 64-bit load instruction(s). */
2005 int len = TYPE_LENGTH (type);
2006 int regno = AARCH64_X0_REGNUM;
2007 bfd_byte tmpbuf[X_REGISTER_SIZE];
2008
2009 while (len > 0)
2010 {
2011 memcpy (tmpbuf, valbuf,
2012 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2013 regcache_cooked_write (regs, regno++, tmpbuf);
2014 len -= X_REGISTER_SIZE;
2015 valbuf += X_REGISTER_SIZE;
2016 }
2017 }
2018 }
2019
2020 /* Implement the "return_value" gdbarch method. */
2021
2022 static enum return_value_convention
2023 aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2024 struct type *valtype, struct regcache *regcache,
2025 gdb_byte *readbuf, const gdb_byte *writebuf)
2026 {
2027
2028 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
2029 || TYPE_CODE (valtype) == TYPE_CODE_UNION
2030 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
2031 {
2032 if (aarch64_return_in_memory (gdbarch, valtype))
2033 {
2034 if (aarch64_debug)
2035 debug_printf ("return value in memory\n");
2036 return RETURN_VALUE_STRUCT_CONVENTION;
2037 }
2038 }
2039
2040 if (writebuf)
2041 aarch64_store_return_value (valtype, regcache, writebuf);
2042
2043 if (readbuf)
2044 aarch64_extract_return_value (valtype, regcache, readbuf);
2045
2046 if (aarch64_debug)
2047 debug_printf ("return value in registers\n");
2048
2049 return RETURN_VALUE_REGISTER_CONVENTION;
2050 }
2051
2052 /* Implement the "get_longjmp_target" gdbarch method. */
2053
2054 static int
2055 aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2056 {
2057 CORE_ADDR jb_addr;
2058 gdb_byte buf[X_REGISTER_SIZE];
2059 struct gdbarch *gdbarch = get_frame_arch (frame);
2060 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2061 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2062
2063 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
2064
2065 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2066 X_REGISTER_SIZE))
2067 return 0;
2068
2069 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2070 return 1;
2071 }
2072
2073 /* Implement the "gen_return_address" gdbarch method. */
2074
2075 static void
2076 aarch64_gen_return_address (struct gdbarch *gdbarch,
2077 struct agent_expr *ax, struct axs_value *value,
2078 CORE_ADDR scope)
2079 {
2080 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
2081 value->kind = axs_lvalue_register;
2082 value->u.reg = AARCH64_LR_REGNUM;
2083 }
2084 \f
2085
2086 /* Return the pseudo register name corresponding to register regnum. */
2087
2088 static const char *
2089 aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2090 {
2091 static const char *const q_name[] =
2092 {
2093 "q0", "q1", "q2", "q3",
2094 "q4", "q5", "q6", "q7",
2095 "q8", "q9", "q10", "q11",
2096 "q12", "q13", "q14", "q15",
2097 "q16", "q17", "q18", "q19",
2098 "q20", "q21", "q22", "q23",
2099 "q24", "q25", "q26", "q27",
2100 "q28", "q29", "q30", "q31",
2101 };
2102
2103 static const char *const d_name[] =
2104 {
2105 "d0", "d1", "d2", "d3",
2106 "d4", "d5", "d6", "d7",
2107 "d8", "d9", "d10", "d11",
2108 "d12", "d13", "d14", "d15",
2109 "d16", "d17", "d18", "d19",
2110 "d20", "d21", "d22", "d23",
2111 "d24", "d25", "d26", "d27",
2112 "d28", "d29", "d30", "d31",
2113 };
2114
2115 static const char *const s_name[] =
2116 {
2117 "s0", "s1", "s2", "s3",
2118 "s4", "s5", "s6", "s7",
2119 "s8", "s9", "s10", "s11",
2120 "s12", "s13", "s14", "s15",
2121 "s16", "s17", "s18", "s19",
2122 "s20", "s21", "s22", "s23",
2123 "s24", "s25", "s26", "s27",
2124 "s28", "s29", "s30", "s31",
2125 };
2126
2127 static const char *const h_name[] =
2128 {
2129 "h0", "h1", "h2", "h3",
2130 "h4", "h5", "h6", "h7",
2131 "h8", "h9", "h10", "h11",
2132 "h12", "h13", "h14", "h15",
2133 "h16", "h17", "h18", "h19",
2134 "h20", "h21", "h22", "h23",
2135 "h24", "h25", "h26", "h27",
2136 "h28", "h29", "h30", "h31",
2137 };
2138
2139 static const char *const b_name[] =
2140 {
2141 "b0", "b1", "b2", "b3",
2142 "b4", "b5", "b6", "b7",
2143 "b8", "b9", "b10", "b11",
2144 "b12", "b13", "b14", "b15",
2145 "b16", "b17", "b18", "b19",
2146 "b20", "b21", "b22", "b23",
2147 "b24", "b25", "b26", "b27",
2148 "b28", "b29", "b30", "b31",
2149 };
2150
2151 regnum -= gdbarch_num_regs (gdbarch);
2152
2153 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2154 return q_name[regnum - AARCH64_Q0_REGNUM];
2155
2156 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2157 return d_name[regnum - AARCH64_D0_REGNUM];
2158
2159 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2160 return s_name[regnum - AARCH64_S0_REGNUM];
2161
2162 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2163 return h_name[regnum - AARCH64_H0_REGNUM];
2164
2165 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2166 return b_name[regnum - AARCH64_B0_REGNUM];
2167
2168 internal_error (__FILE__, __LINE__,
2169 _("aarch64_pseudo_register_name: bad register number %d"),
2170 regnum);
2171 }
2172
2173 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
2174
2175 static struct type *
2176 aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2177 {
2178 regnum -= gdbarch_num_regs (gdbarch);
2179
2180 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2181 return aarch64_vnq_type (gdbarch);
2182
2183 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2184 return aarch64_vnd_type (gdbarch);
2185
2186 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2187 return aarch64_vns_type (gdbarch);
2188
2189 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2190 return aarch64_vnh_type (gdbarch);
2191
2192 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2193 return aarch64_vnb_type (gdbarch);
2194
2195 internal_error (__FILE__, __LINE__,
2196 _("aarch64_pseudo_register_type: bad register number %d"),
2197 regnum);
2198 }
2199
2200 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2201
2202 static int
2203 aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2204 struct reggroup *group)
2205 {
2206 regnum -= gdbarch_num_regs (gdbarch);
2207
2208 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2209 return group == all_reggroup || group == vector_reggroup;
2210 else if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2211 return (group == all_reggroup || group == vector_reggroup
2212 || group == float_reggroup);
2213 else if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2214 return (group == all_reggroup || group == vector_reggroup
2215 || group == float_reggroup);
2216 else if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2217 return group == all_reggroup || group == vector_reggroup;
2218 else if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2219 return group == all_reggroup || group == vector_reggroup;
2220
2221 return group == all_reggroup;
2222 }
2223
2224 /* Implement the "pseudo_register_read_value" gdbarch method. */
2225
2226 static struct value *
2227 aarch64_pseudo_read_value (struct gdbarch *gdbarch,
2228 struct regcache *regcache,
2229 int regnum)
2230 {
2231 gdb_byte reg_buf[V_REGISTER_SIZE];
2232 struct value *result_value;
2233 gdb_byte *buf;
2234
2235 result_value = allocate_value (register_type (gdbarch, regnum));
2236 VALUE_LVAL (result_value) = lval_register;
2237 VALUE_REGNUM (result_value) = regnum;
2238 buf = value_contents_raw (result_value);
2239
2240 regnum -= gdbarch_num_regs (gdbarch);
2241
2242 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2243 {
2244 enum register_status status;
2245 unsigned v_regnum;
2246
2247 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2248 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2249 if (status != REG_VALID)
2250 mark_value_bytes_unavailable (result_value, 0,
2251 TYPE_LENGTH (value_type (result_value)));
2252 else
2253 memcpy (buf, reg_buf, Q_REGISTER_SIZE);
2254 return result_value;
2255 }
2256
2257 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2258 {
2259 enum register_status status;
2260 unsigned v_regnum;
2261
2262 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2263 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2264 if (status != REG_VALID)
2265 mark_value_bytes_unavailable (result_value, 0,
2266 TYPE_LENGTH (value_type (result_value)));
2267 else
2268 memcpy (buf, reg_buf, D_REGISTER_SIZE);
2269 return result_value;
2270 }
2271
2272 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2273 {
2274 enum register_status status;
2275 unsigned v_regnum;
2276
2277 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2278 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2279 if (status != REG_VALID)
2280 mark_value_bytes_unavailable (result_value, 0,
2281 TYPE_LENGTH (value_type (result_value)));
2282 else
2283 memcpy (buf, reg_buf, S_REGISTER_SIZE);
2284 return result_value;
2285 }
2286
2287 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2288 {
2289 enum register_status status;
2290 unsigned v_regnum;
2291
2292 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2293 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2294 if (status != REG_VALID)
2295 mark_value_bytes_unavailable (result_value, 0,
2296 TYPE_LENGTH (value_type (result_value)));
2297 else
2298 memcpy (buf, reg_buf, H_REGISTER_SIZE);
2299 return result_value;
2300 }
2301
2302 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2303 {
2304 enum register_status status;
2305 unsigned v_regnum;
2306
2307 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2308 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2309 if (status != REG_VALID)
2310 mark_value_bytes_unavailable (result_value, 0,
2311 TYPE_LENGTH (value_type (result_value)));
2312 else
2313 memcpy (buf, reg_buf, B_REGISTER_SIZE);
2314 return result_value;
2315 }
2316
2317 gdb_assert_not_reached ("regnum out of bound");
2318 }
2319
2320 /* Implement the "pseudo_register_write" gdbarch method. */
2321
2322 static void
2323 aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2324 int regnum, const gdb_byte *buf)
2325 {
2326 gdb_byte reg_buf[V_REGISTER_SIZE];
2327
2328 /* Ensure the register buffer is zero, we want gdb writes of the
2329 various 'scalar' pseudo registers to behavior like architectural
2330 writes, register width bytes are written the remainder are set to
2331 zero. */
2332 memset (reg_buf, 0, sizeof (reg_buf));
2333
2334 regnum -= gdbarch_num_regs (gdbarch);
2335
2336 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2337 {
2338 /* pseudo Q registers */
2339 unsigned v_regnum;
2340
2341 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2342 memcpy (reg_buf, buf, Q_REGISTER_SIZE);
2343 regcache_raw_write (regcache, v_regnum, reg_buf);
2344 return;
2345 }
2346
2347 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2348 {
2349 /* pseudo D registers */
2350 unsigned v_regnum;
2351
2352 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2353 memcpy (reg_buf, buf, D_REGISTER_SIZE);
2354 regcache_raw_write (regcache, v_regnum, reg_buf);
2355 return;
2356 }
2357
2358 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2359 {
2360 unsigned v_regnum;
2361
2362 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2363 memcpy (reg_buf, buf, S_REGISTER_SIZE);
2364 regcache_raw_write (regcache, v_regnum, reg_buf);
2365 return;
2366 }
2367
2368 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2369 {
2370 /* pseudo H registers */
2371 unsigned v_regnum;
2372
2373 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2374 memcpy (reg_buf, buf, H_REGISTER_SIZE);
2375 regcache_raw_write (regcache, v_regnum, reg_buf);
2376 return;
2377 }
2378
2379 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2380 {
2381 /* pseudo B registers */
2382 unsigned v_regnum;
2383
2384 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2385 memcpy (reg_buf, buf, B_REGISTER_SIZE);
2386 regcache_raw_write (regcache, v_regnum, reg_buf);
2387 return;
2388 }
2389
2390 gdb_assert_not_reached ("regnum out of bound");
2391 }
2392
2393 /* Callback function for user_reg_add. */
2394
2395 static struct value *
2396 value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2397 {
2398 const int *reg_p = (const int *) baton;
2399
2400 return value_of_register (*reg_p, frame);
2401 }
2402 \f
2403
2404 /* Implement the "software_single_step" gdbarch method, needed to
2405 single step through atomic sequences on AArch64. */
2406
2407 static std::vector<CORE_ADDR>
2408 aarch64_software_single_step (struct regcache *regcache)
2409 {
2410 struct gdbarch *gdbarch = regcache->arch ();
2411 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2412 const int insn_size = 4;
2413 const int atomic_sequence_length = 16; /* Instruction sequence length. */
2414 CORE_ADDR pc = regcache_read_pc (regcache);
2415 CORE_ADDR breaks[2] = { -1, -1 };
2416 CORE_ADDR loc = pc;
2417 CORE_ADDR closing_insn = 0;
2418 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2419 byte_order_for_code);
2420 int index;
2421 int insn_count;
2422 int bc_insn_count = 0; /* Conditional branch instruction count. */
2423 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
2424 aarch64_inst inst;
2425
2426 if (aarch64_decode_insn (insn, &inst, 1) != 0)
2427 return {};
2428
2429 /* Look for a Load Exclusive instruction which begins the sequence. */
2430 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
2431 return {};
2432
2433 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2434 {
2435 loc += insn_size;
2436 insn = read_memory_unsigned_integer (loc, insn_size,
2437 byte_order_for_code);
2438
2439 if (aarch64_decode_insn (insn, &inst, 1) != 0)
2440 return {};
2441 /* Check if the instruction is a conditional branch. */
2442 if (inst.opcode->iclass == condbranch)
2443 {
2444 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
2445
2446 if (bc_insn_count >= 1)
2447 return {};
2448
2449 /* It is, so we'll try to set a breakpoint at the destination. */
2450 breaks[1] = loc + inst.operands[0].imm.value;
2451
2452 bc_insn_count++;
2453 last_breakpoint++;
2454 }
2455
2456 /* Look for the Store Exclusive which closes the atomic sequence. */
2457 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
2458 {
2459 closing_insn = loc;
2460 break;
2461 }
2462 }
2463
2464 /* We didn't find a closing Store Exclusive instruction, fall back. */
2465 if (!closing_insn)
2466 return {};
2467
2468 /* Insert breakpoint after the end of the atomic sequence. */
2469 breaks[0] = loc + insn_size;
2470
2471 /* Check for duplicated breakpoints, and also check that the second
2472 breakpoint is not within the atomic sequence. */
2473 if (last_breakpoint
2474 && (breaks[1] == breaks[0]
2475 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2476 last_breakpoint = 0;
2477
2478 std::vector<CORE_ADDR> next_pcs;
2479
2480 /* Insert the breakpoint at the end of the sequence, and one at the
2481 destination of the conditional branch, if it exists. */
2482 for (index = 0; index <= last_breakpoint; index++)
2483 next_pcs.push_back (breaks[index]);
2484
2485 return next_pcs;
2486 }
2487
2488 struct aarch64_displaced_step_closure : public displaced_step_closure
2489 {
2490 /* It is true when condition instruction, such as B.CON, TBZ, etc,
2491 is being displaced stepping. */
2492 int cond = 0;
2493
2494 /* PC adjustment offset after displaced stepping. */
2495 int32_t pc_adjust = 0;
2496 };
2497
2498 /* Data when visiting instructions for displaced stepping. */
2499
2500 struct aarch64_displaced_step_data
2501 {
2502 struct aarch64_insn_data base;
2503
2504 /* The address where the instruction will be executed at. */
2505 CORE_ADDR new_addr;
2506 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
2507 uint32_t insn_buf[DISPLACED_MODIFIED_INSNS];
2508 /* Number of instructions in INSN_BUF. */
2509 unsigned insn_count;
2510 /* Registers when doing displaced stepping. */
2511 struct regcache *regs;
2512
2513 aarch64_displaced_step_closure *dsc;
2514 };
2515
2516 /* Implementation of aarch64_insn_visitor method "b". */
2517
2518 static void
2519 aarch64_displaced_step_b (const int is_bl, const int32_t offset,
2520 struct aarch64_insn_data *data)
2521 {
2522 struct aarch64_displaced_step_data *dsd
2523 = (struct aarch64_displaced_step_data *) data;
2524 int64_t new_offset = data->insn_addr - dsd->new_addr + offset;
2525
2526 if (can_encode_int32 (new_offset, 28))
2527 {
2528 /* Emit B rather than BL, because executing BL on a new address
2529 will get the wrong address into LR. In order to avoid this,
2530 we emit B, and update LR if the instruction is BL. */
2531 emit_b (dsd->insn_buf, 0, new_offset);
2532 dsd->insn_count++;
2533 }
2534 else
2535 {
2536 /* Write NOP. */
2537 emit_nop (dsd->insn_buf);
2538 dsd->insn_count++;
2539 dsd->dsc->pc_adjust = offset;
2540 }
2541
2542 if (is_bl)
2543 {
2544 /* Update LR. */
2545 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
2546 data->insn_addr + 4);
2547 }
2548 }
2549
2550 /* Implementation of aarch64_insn_visitor method "b_cond". */
2551
2552 static void
2553 aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
2554 struct aarch64_insn_data *data)
2555 {
2556 struct aarch64_displaced_step_data *dsd
2557 = (struct aarch64_displaced_step_data *) data;
2558
2559 /* GDB has to fix up PC after displaced step this instruction
2560 differently according to the condition is true or false. Instead
2561 of checking COND against conditional flags, we can use
2562 the following instructions, and GDB can tell how to fix up PC
2563 according to the PC value.
2564
2565 B.COND TAKEN ; If cond is true, then jump to TAKEN.
2566 INSN1 ;
2567 TAKEN:
2568 INSN2
2569 */
2570
2571 emit_bcond (dsd->insn_buf, cond, 8);
2572 dsd->dsc->cond = 1;
2573 dsd->dsc->pc_adjust = offset;
2574 dsd->insn_count = 1;
2575 }
2576
2577 /* Dynamically allocate a new register. If we know the register
2578 statically, we should make it a global as above instead of using this
2579 helper function. */
2580
2581 static struct aarch64_register
2582 aarch64_register (unsigned num, int is64)
2583 {
2584 return (struct aarch64_register) { num, is64 };
2585 }
2586
2587 /* Implementation of aarch64_insn_visitor method "cb". */
2588
2589 static void
2590 aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
2591 const unsigned rn, int is64,
2592 struct aarch64_insn_data *data)
2593 {
2594 struct aarch64_displaced_step_data *dsd
2595 = (struct aarch64_displaced_step_data *) data;
2596
2597 /* The offset is out of range for a compare and branch
2598 instruction. We can use the following instructions instead:
2599
2600 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
2601 INSN1 ;
2602 TAKEN:
2603 INSN2
2604 */
2605 emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
2606 dsd->insn_count = 1;
2607 dsd->dsc->cond = 1;
2608 dsd->dsc->pc_adjust = offset;
2609 }
2610
2611 /* Implementation of aarch64_insn_visitor method "tb". */
2612
2613 static void
2614 aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
2615 const unsigned rt, unsigned bit,
2616 struct aarch64_insn_data *data)
2617 {
2618 struct aarch64_displaced_step_data *dsd
2619 = (struct aarch64_displaced_step_data *) data;
2620
2621 /* The offset is out of range for a test bit and branch
2622 instruction We can use the following instructions instead:
2623
2624 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
2625 INSN1 ;
2626 TAKEN:
2627 INSN2
2628
2629 */
2630 emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
2631 dsd->insn_count = 1;
2632 dsd->dsc->cond = 1;
2633 dsd->dsc->pc_adjust = offset;
2634 }
2635
2636 /* Implementation of aarch64_insn_visitor method "adr". */
2637
2638 static void
2639 aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
2640 const int is_adrp, struct aarch64_insn_data *data)
2641 {
2642 struct aarch64_displaced_step_data *dsd
2643 = (struct aarch64_displaced_step_data *) data;
2644 /* We know exactly the address the ADR{P,} instruction will compute.
2645 We can just write it to the destination register. */
2646 CORE_ADDR address = data->insn_addr + offset;
2647
2648 if (is_adrp)
2649 {
2650 /* Clear the lower 12 bits of the offset to get the 4K page. */
2651 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2652 address & ~0xfff);
2653 }
2654 else
2655 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2656 address);
2657
2658 dsd->dsc->pc_adjust = 4;
2659 emit_nop (dsd->insn_buf);
2660 dsd->insn_count = 1;
2661 }
2662
2663 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
2664
2665 static void
2666 aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
2667 const unsigned rt, const int is64,
2668 struct aarch64_insn_data *data)
2669 {
2670 struct aarch64_displaced_step_data *dsd
2671 = (struct aarch64_displaced_step_data *) data;
2672 CORE_ADDR address = data->insn_addr + offset;
2673 struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
2674
2675 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
2676 address);
2677
2678 if (is_sw)
2679 dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
2680 aarch64_register (rt, 1), zero);
2681 else
2682 dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
2683 aarch64_register (rt, 1), zero);
2684
2685 dsd->dsc->pc_adjust = 4;
2686 }
2687
2688 /* Implementation of aarch64_insn_visitor method "others". */
2689
2690 static void
2691 aarch64_displaced_step_others (const uint32_t insn,
2692 struct aarch64_insn_data *data)
2693 {
2694 struct aarch64_displaced_step_data *dsd
2695 = (struct aarch64_displaced_step_data *) data;
2696
2697 aarch64_emit_insn (dsd->insn_buf, insn);
2698 dsd->insn_count = 1;
2699
2700 if ((insn & 0xfffffc1f) == 0xd65f0000)
2701 {
2702 /* RET */
2703 dsd->dsc->pc_adjust = 0;
2704 }
2705 else
2706 dsd->dsc->pc_adjust = 4;
2707 }
2708
2709 static const struct aarch64_insn_visitor visitor =
2710 {
2711 aarch64_displaced_step_b,
2712 aarch64_displaced_step_b_cond,
2713 aarch64_displaced_step_cb,
2714 aarch64_displaced_step_tb,
2715 aarch64_displaced_step_adr,
2716 aarch64_displaced_step_ldr_literal,
2717 aarch64_displaced_step_others,
2718 };
2719
2720 /* Implement the "displaced_step_copy_insn" gdbarch method. */
2721
2722 struct displaced_step_closure *
2723 aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
2724 CORE_ADDR from, CORE_ADDR to,
2725 struct regcache *regs)
2726 {
2727 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2728 uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
2729 struct aarch64_displaced_step_data dsd;
2730 aarch64_inst inst;
2731
2732 if (aarch64_decode_insn (insn, &inst, 1) != 0)
2733 return NULL;
2734
2735 /* Look for a Load Exclusive instruction which begins the sequence. */
2736 if (inst.opcode->iclass == ldstexcl && bit (insn, 22))
2737 {
2738 /* We can't displaced step atomic sequences. */
2739 return NULL;
2740 }
2741
2742 std::unique_ptr<aarch64_displaced_step_closure> dsc
2743 (new aarch64_displaced_step_closure);
2744 dsd.base.insn_addr = from;
2745 dsd.new_addr = to;
2746 dsd.regs = regs;
2747 dsd.dsc = dsc.get ();
2748 dsd.insn_count = 0;
2749 aarch64_relocate_instruction (insn, &visitor,
2750 (struct aarch64_insn_data *) &dsd);
2751 gdb_assert (dsd.insn_count <= DISPLACED_MODIFIED_INSNS);
2752
2753 if (dsd.insn_count != 0)
2754 {
2755 int i;
2756
2757 /* Instruction can be relocated to scratch pad. Copy
2758 relocated instruction(s) there. */
2759 for (i = 0; i < dsd.insn_count; i++)
2760 {
2761 if (debug_displaced)
2762 {
2763 debug_printf ("displaced: writing insn ");
2764 debug_printf ("%.8x", dsd.insn_buf[i]);
2765 debug_printf (" at %s\n", paddress (gdbarch, to + i * 4));
2766 }
2767 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
2768 (ULONGEST) dsd.insn_buf[i]);
2769 }
2770 }
2771 else
2772 {
2773 dsc = NULL;
2774 }
2775
2776 return dsc.release ();
2777 }
2778
2779 /* Implement the "displaced_step_fixup" gdbarch method. */
2780
2781 void
2782 aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
2783 struct displaced_step_closure *dsc_,
2784 CORE_ADDR from, CORE_ADDR to,
2785 struct regcache *regs)
2786 {
2787 aarch64_displaced_step_closure *dsc = (aarch64_displaced_step_closure *) dsc_;
2788
2789 if (dsc->cond)
2790 {
2791 ULONGEST pc;
2792
2793 regcache_cooked_read_unsigned (regs, AARCH64_PC_REGNUM, &pc);
2794 if (pc - to == 8)
2795 {
2796 /* Condition is true. */
2797 }
2798 else if (pc - to == 4)
2799 {
2800 /* Condition is false. */
2801 dsc->pc_adjust = 4;
2802 }
2803 else
2804 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
2805 }
2806
2807 if (dsc->pc_adjust != 0)
2808 {
2809 if (debug_displaced)
2810 {
2811 debug_printf ("displaced: fixup: set PC to %s:%d\n",
2812 paddress (gdbarch, from), dsc->pc_adjust);
2813 }
2814 regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
2815 from + dsc->pc_adjust);
2816 }
2817 }
2818
2819 /* Implement the "displaced_step_hw_singlestep" gdbarch method. */
2820
2821 int
2822 aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch,
2823 struct displaced_step_closure *closure)
2824 {
2825 return 1;
2826 }
2827
2828 /* Get the correct target description. */
2829
2830 const target_desc *
2831 aarch64_read_description ()
2832 {
2833 static target_desc *aarch64_tdesc = NULL;
2834 target_desc **tdesc = &aarch64_tdesc;
2835
2836 if (*tdesc == NULL)
2837 *tdesc = aarch64_create_target_description ();
2838
2839 return *tdesc;
2840 }
2841
2842 /* Initialize the current architecture based on INFO. If possible,
2843 re-use an architecture from ARCHES, which is a list of
2844 architectures already created during this debugging session.
2845
2846 Called e.g. at program startup, when reading a core file, and when
2847 reading a binary file. */
2848
2849 static struct gdbarch *
2850 aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
2851 {
2852 struct gdbarch_tdep *tdep;
2853 struct gdbarch *gdbarch;
2854 struct gdbarch_list *best_arch;
2855 struct tdesc_arch_data *tdesc_data = NULL;
2856 const struct target_desc *tdesc = info.target_desc;
2857 int i;
2858 int valid_p = 1;
2859 const struct tdesc_feature *feature;
2860 int num_regs = 0;
2861 int num_pseudo_regs = 0;
2862
2863 /* Ensure we always have a target descriptor. */
2864 if (!tdesc_has_registers (tdesc))
2865 tdesc = aarch64_read_description ();
2866
2867 gdb_assert (tdesc);
2868
2869 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.core");
2870
2871 if (feature == NULL)
2872 return NULL;
2873
2874 tdesc_data = tdesc_data_alloc ();
2875
2876 /* Validate the descriptor provides the mandatory core R registers
2877 and allocate their numbers. */
2878 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
2879 valid_p &=
2880 tdesc_numbered_register (feature, tdesc_data, AARCH64_X0_REGNUM + i,
2881 aarch64_r_register_names[i]);
2882
2883 num_regs = AARCH64_X0_REGNUM + i;
2884
2885 /* Look for the V registers. */
2886 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
2887 if (feature)
2888 {
2889 /* Validate the descriptor provides the mandatory V registers
2890 and allocate their numbers. */
2891 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
2892 valid_p &=
2893 tdesc_numbered_register (feature, tdesc_data, AARCH64_V0_REGNUM + i,
2894 aarch64_v_register_names[i]);
2895
2896 num_regs = AARCH64_V0_REGNUM + i;
2897
2898 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
2899 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
2900 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
2901 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
2902 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
2903 }
2904
2905 if (!valid_p)
2906 {
2907 tdesc_data_cleanup (tdesc_data);
2908 return NULL;
2909 }
2910
2911 /* AArch64 code is always little-endian. */
2912 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
2913
2914 /* If there is already a candidate, use it. */
2915 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
2916 best_arch != NULL;
2917 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
2918 {
2919 /* Found a match. */
2920 break;
2921 }
2922
2923 if (best_arch != NULL)
2924 {
2925 if (tdesc_data != NULL)
2926 tdesc_data_cleanup (tdesc_data);
2927 return best_arch->gdbarch;
2928 }
2929
2930 tdep = XCNEW (struct gdbarch_tdep);
2931 gdbarch = gdbarch_alloc (&info, tdep);
2932
2933 /* This should be low enough for everything. */
2934 tdep->lowest_pc = 0x20;
2935 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
2936 tdep->jb_elt_size = 8;
2937
2938 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
2939 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
2940
2941 /* Frame handling. */
2942 set_gdbarch_dummy_id (gdbarch, aarch64_dummy_id);
2943 set_gdbarch_unwind_pc (gdbarch, aarch64_unwind_pc);
2944 set_gdbarch_unwind_sp (gdbarch, aarch64_unwind_sp);
2945
2946 /* Advance PC across function entry code. */
2947 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
2948
2949 /* The stack grows downward. */
2950 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
2951
2952 /* Breakpoint manipulation. */
2953 set_gdbarch_breakpoint_kind_from_pc (gdbarch,
2954 aarch64_breakpoint::kind_from_pc);
2955 set_gdbarch_sw_breakpoint_from_kind (gdbarch,
2956 aarch64_breakpoint::bp_from_kind);
2957 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
2958 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
2959
2960 /* Information about registers, etc. */
2961 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
2962 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
2963 set_gdbarch_num_regs (gdbarch, num_regs);
2964
2965 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
2966 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
2967 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
2968 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
2969 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
2970 set_tdesc_pseudo_register_reggroup_p (gdbarch,
2971 aarch64_pseudo_register_reggroup_p);
2972
2973 /* ABI */
2974 set_gdbarch_short_bit (gdbarch, 16);
2975 set_gdbarch_int_bit (gdbarch, 32);
2976 set_gdbarch_float_bit (gdbarch, 32);
2977 set_gdbarch_double_bit (gdbarch, 64);
2978 set_gdbarch_long_double_bit (gdbarch, 128);
2979 set_gdbarch_long_bit (gdbarch, 64);
2980 set_gdbarch_long_long_bit (gdbarch, 64);
2981 set_gdbarch_ptr_bit (gdbarch, 64);
2982 set_gdbarch_char_signed (gdbarch, 0);
2983 set_gdbarch_wchar_signed (gdbarch, 0);
2984 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
2985 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
2986 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
2987
2988 /* Internal <-> external register number maps. */
2989 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
2990
2991 /* Returning results. */
2992 set_gdbarch_return_value (gdbarch, aarch64_return_value);
2993
2994 /* Disassembly. */
2995 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
2996
2997 /* Virtual tables. */
2998 set_gdbarch_vbit_in_delta (gdbarch, 1);
2999
3000 /* Hook in the ABI-specific overrides, if they have been registered. */
3001 info.target_desc = tdesc;
3002 info.tdesc_data = tdesc_data;
3003 gdbarch_init_osabi (info, gdbarch);
3004
3005 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
3006
3007 /* Add some default predicates. */
3008 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
3009 dwarf2_append_unwinders (gdbarch);
3010 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
3011
3012 frame_base_set_default (gdbarch, &aarch64_normal_base);
3013
3014 /* Now we have tuned the configuration, set a few final things,
3015 based on what the OS ABI has told us. */
3016
3017 if (tdep->jb_pc >= 0)
3018 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
3019
3020 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
3021
3022 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
3023
3024 /* Add standard register aliases. */
3025 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
3026 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
3027 value_of_aarch64_user_reg,
3028 &aarch64_register_aliases[i].regnum);
3029
3030 return gdbarch;
3031 }
3032
3033 static void
3034 aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
3035 {
3036 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3037
3038 if (tdep == NULL)
3039 return;
3040
3041 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
3042 paddress (gdbarch, tdep->lowest_pc));
3043 }
3044
3045 #if GDB_SELF_TEST
3046 namespace selftests
3047 {
3048 static void aarch64_process_record_test (void);
3049 }
3050 #endif
3051
3052 void
3053 _initialize_aarch64_tdep (void)
3054 {
3055 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
3056 aarch64_dump_tdep);
3057
3058 /* Debug this file's internals. */
3059 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
3060 Set AArch64 debugging."), _("\
3061 Show AArch64 debugging."), _("\
3062 When on, AArch64 specific debugging is enabled."),
3063 NULL,
3064 show_aarch64_debug,
3065 &setdebuglist, &showdebuglist);
3066
3067 #if GDB_SELF_TEST
3068 selftests::register_test ("aarch64-analyze-prologue",
3069 selftests::aarch64_analyze_prologue_test);
3070 selftests::register_test ("aarch64-process-record",
3071 selftests::aarch64_process_record_test);
3072 #endif
3073 }
3074
3075 /* AArch64 process record-replay related structures, defines etc. */
3076
3077 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
3078 do \
3079 { \
3080 unsigned int reg_len = LENGTH; \
3081 if (reg_len) \
3082 { \
3083 REGS = XNEWVEC (uint32_t, reg_len); \
3084 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
3085 } \
3086 } \
3087 while (0)
3088
3089 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
3090 do \
3091 { \
3092 unsigned int mem_len = LENGTH; \
3093 if (mem_len) \
3094 { \
3095 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
3096 memcpy(&MEMS->len, &RECORD_BUF[0], \
3097 sizeof(struct aarch64_mem_r) * LENGTH); \
3098 } \
3099 } \
3100 while (0)
3101
3102 /* AArch64 record/replay structures and enumerations. */
3103
3104 struct aarch64_mem_r
3105 {
3106 uint64_t len; /* Record length. */
3107 uint64_t addr; /* Memory address. */
3108 };
3109
3110 enum aarch64_record_result
3111 {
3112 AARCH64_RECORD_SUCCESS,
3113 AARCH64_RECORD_UNSUPPORTED,
3114 AARCH64_RECORD_UNKNOWN
3115 };
3116
3117 typedef struct insn_decode_record_t
3118 {
3119 struct gdbarch *gdbarch;
3120 struct regcache *regcache;
3121 CORE_ADDR this_addr; /* Address of insn to be recorded. */
3122 uint32_t aarch64_insn; /* Insn to be recorded. */
3123 uint32_t mem_rec_count; /* Count of memory records. */
3124 uint32_t reg_rec_count; /* Count of register records. */
3125 uint32_t *aarch64_regs; /* Registers to be recorded. */
3126 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
3127 } insn_decode_record;
3128
3129 /* Record handler for data processing - register instructions. */
3130
3131 static unsigned int
3132 aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
3133 {
3134 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
3135 uint32_t record_buf[4];
3136
3137 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3138 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3139 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
3140
3141 if (!bit (aarch64_insn_r->aarch64_insn, 28))
3142 {
3143 uint8_t setflags;
3144
3145 /* Logical (shifted register). */
3146 if (insn_bits24_27 == 0x0a)
3147 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
3148 /* Add/subtract. */
3149 else if (insn_bits24_27 == 0x0b)
3150 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3151 else
3152 return AARCH64_RECORD_UNKNOWN;
3153
3154 record_buf[0] = reg_rd;
3155 aarch64_insn_r->reg_rec_count = 1;
3156 if (setflags)
3157 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3158 }
3159 else
3160 {
3161 if (insn_bits24_27 == 0x0b)
3162 {
3163 /* Data-processing (3 source). */
3164 record_buf[0] = reg_rd;
3165 aarch64_insn_r->reg_rec_count = 1;
3166 }
3167 else if (insn_bits24_27 == 0x0a)
3168 {
3169 if (insn_bits21_23 == 0x00)
3170 {
3171 /* Add/subtract (with carry). */
3172 record_buf[0] = reg_rd;
3173 aarch64_insn_r->reg_rec_count = 1;
3174 if (bit (aarch64_insn_r->aarch64_insn, 29))
3175 {
3176 record_buf[1] = AARCH64_CPSR_REGNUM;
3177 aarch64_insn_r->reg_rec_count = 2;
3178 }
3179 }
3180 else if (insn_bits21_23 == 0x02)
3181 {
3182 /* Conditional compare (register) and conditional compare
3183 (immediate) instructions. */
3184 record_buf[0] = AARCH64_CPSR_REGNUM;
3185 aarch64_insn_r->reg_rec_count = 1;
3186 }
3187 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
3188 {
3189 /* CConditional select. */
3190 /* Data-processing (2 source). */
3191 /* Data-processing (1 source). */
3192 record_buf[0] = reg_rd;
3193 aarch64_insn_r->reg_rec_count = 1;
3194 }
3195 else
3196 return AARCH64_RECORD_UNKNOWN;
3197 }
3198 }
3199
3200 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3201 record_buf);
3202 return AARCH64_RECORD_SUCCESS;
3203 }
3204
3205 /* Record handler for data processing - immediate instructions. */
3206
3207 static unsigned int
3208 aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
3209 {
3210 uint8_t reg_rd, insn_bit23, insn_bits24_27, setflags;
3211 uint32_t record_buf[4];
3212
3213 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3214 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3215 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3216
3217 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
3218 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
3219 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
3220 {
3221 record_buf[0] = reg_rd;
3222 aarch64_insn_r->reg_rec_count = 1;
3223 }
3224 else if (insn_bits24_27 == 0x01)
3225 {
3226 /* Add/Subtract (immediate). */
3227 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3228 record_buf[0] = reg_rd;
3229 aarch64_insn_r->reg_rec_count = 1;
3230 if (setflags)
3231 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3232 }
3233 else if (insn_bits24_27 == 0x02 && !insn_bit23)
3234 {
3235 /* Logical (immediate). */
3236 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
3237 record_buf[0] = reg_rd;
3238 aarch64_insn_r->reg_rec_count = 1;
3239 if (setflags)
3240 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3241 }
3242 else
3243 return AARCH64_RECORD_UNKNOWN;
3244
3245 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3246 record_buf);
3247 return AARCH64_RECORD_SUCCESS;
3248 }
3249
3250 /* Record handler for branch, exception generation and system instructions. */
3251
3252 static unsigned int
3253 aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
3254 {
3255 struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
3256 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
3257 uint32_t record_buf[4];
3258
3259 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3260 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3261 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3262
3263 if (insn_bits28_31 == 0x0d)
3264 {
3265 /* Exception generation instructions. */
3266 if (insn_bits24_27 == 0x04)
3267 {
3268 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
3269 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
3270 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
3271 {
3272 ULONGEST svc_number;
3273
3274 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
3275 &svc_number);
3276 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
3277 svc_number);
3278 }
3279 else
3280 return AARCH64_RECORD_UNSUPPORTED;
3281 }
3282 /* System instructions. */
3283 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
3284 {
3285 uint32_t reg_rt, reg_crn;
3286
3287 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3288 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3289
3290 /* Record rt in case of sysl and mrs instructions. */
3291 if (bit (aarch64_insn_r->aarch64_insn, 21))
3292 {
3293 record_buf[0] = reg_rt;
3294 aarch64_insn_r->reg_rec_count = 1;
3295 }
3296 /* Record cpsr for hint and msr(immediate) instructions. */
3297 else if (reg_crn == 0x02 || reg_crn == 0x04)
3298 {
3299 record_buf[0] = AARCH64_CPSR_REGNUM;
3300 aarch64_insn_r->reg_rec_count = 1;
3301 }
3302 }
3303 /* Unconditional branch (register). */
3304 else if((insn_bits24_27 & 0x0e) == 0x06)
3305 {
3306 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3307 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3308 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3309 }
3310 else
3311 return AARCH64_RECORD_UNKNOWN;
3312 }
3313 /* Unconditional branch (immediate). */
3314 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3315 {
3316 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3317 if (bit (aarch64_insn_r->aarch64_insn, 31))
3318 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3319 }
3320 else
3321 /* Compare & branch (immediate), Test & branch (immediate) and
3322 Conditional branch (immediate). */
3323 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3324
3325 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3326 record_buf);
3327 return AARCH64_RECORD_SUCCESS;
3328 }
3329
3330 /* Record handler for advanced SIMD load and store instructions. */
3331
3332 static unsigned int
3333 aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3334 {
3335 CORE_ADDR address;
3336 uint64_t addr_offset = 0;
3337 uint32_t record_buf[24];
3338 uint64_t record_buf_mem[24];
3339 uint32_t reg_rn, reg_rt;
3340 uint32_t reg_index = 0, mem_index = 0;
3341 uint8_t opcode_bits, size_bits;
3342
3343 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3344 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3345 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3346 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3347 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3348
3349 if (record_debug)
3350 debug_printf ("Process record: Advanced SIMD load/store\n");
3351
3352 /* Load/store single structure. */
3353 if (bit (aarch64_insn_r->aarch64_insn, 24))
3354 {
3355 uint8_t sindex, scale, selem, esize, replicate = 0;
3356 scale = opcode_bits >> 2;
3357 selem = ((opcode_bits & 0x02) |
3358 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3359 switch (scale)
3360 {
3361 case 1:
3362 if (size_bits & 0x01)
3363 return AARCH64_RECORD_UNKNOWN;
3364 break;
3365 case 2:
3366 if ((size_bits >> 1) & 0x01)
3367 return AARCH64_RECORD_UNKNOWN;
3368 if (size_bits & 0x01)
3369 {
3370 if (!((opcode_bits >> 1) & 0x01))
3371 scale = 3;
3372 else
3373 return AARCH64_RECORD_UNKNOWN;
3374 }
3375 break;
3376 case 3:
3377 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3378 {
3379 scale = size_bits;
3380 replicate = 1;
3381 break;
3382 }
3383 else
3384 return AARCH64_RECORD_UNKNOWN;
3385 default:
3386 break;
3387 }
3388 esize = 8 << scale;
3389 if (replicate)
3390 for (sindex = 0; sindex < selem; sindex++)
3391 {
3392 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3393 reg_rt = (reg_rt + 1) % 32;
3394 }
3395 else
3396 {
3397 for (sindex = 0; sindex < selem; sindex++)
3398 {
3399 if (bit (aarch64_insn_r->aarch64_insn, 22))
3400 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3401 else
3402 {
3403 record_buf_mem[mem_index++] = esize / 8;
3404 record_buf_mem[mem_index++] = address + addr_offset;
3405 }
3406 addr_offset = addr_offset + (esize / 8);
3407 reg_rt = (reg_rt + 1) % 32;
3408 }
3409 }
3410 }
3411 /* Load/store multiple structure. */
3412 else
3413 {
3414 uint8_t selem, esize, rpt, elements;
3415 uint8_t eindex, rindex;
3416
3417 esize = 8 << size_bits;
3418 if (bit (aarch64_insn_r->aarch64_insn, 30))
3419 elements = 128 / esize;
3420 else
3421 elements = 64 / esize;
3422
3423 switch (opcode_bits)
3424 {
3425 /*LD/ST4 (4 Registers). */
3426 case 0:
3427 rpt = 1;
3428 selem = 4;
3429 break;
3430 /*LD/ST1 (4 Registers). */
3431 case 2:
3432 rpt = 4;
3433 selem = 1;
3434 break;
3435 /*LD/ST3 (3 Registers). */
3436 case 4:
3437 rpt = 1;
3438 selem = 3;
3439 break;
3440 /*LD/ST1 (3 Registers). */
3441 case 6:
3442 rpt = 3;
3443 selem = 1;
3444 break;
3445 /*LD/ST1 (1 Register). */
3446 case 7:
3447 rpt = 1;
3448 selem = 1;
3449 break;
3450 /*LD/ST2 (2 Registers). */
3451 case 8:
3452 rpt = 1;
3453 selem = 2;
3454 break;
3455 /*LD/ST1 (2 Registers). */
3456 case 10:
3457 rpt = 2;
3458 selem = 1;
3459 break;
3460 default:
3461 return AARCH64_RECORD_UNSUPPORTED;
3462 break;
3463 }
3464 for (rindex = 0; rindex < rpt; rindex++)
3465 for (eindex = 0; eindex < elements; eindex++)
3466 {
3467 uint8_t reg_tt, sindex;
3468 reg_tt = (reg_rt + rindex) % 32;
3469 for (sindex = 0; sindex < selem; sindex++)
3470 {
3471 if (bit (aarch64_insn_r->aarch64_insn, 22))
3472 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
3473 else
3474 {
3475 record_buf_mem[mem_index++] = esize / 8;
3476 record_buf_mem[mem_index++] = address + addr_offset;
3477 }
3478 addr_offset = addr_offset + (esize / 8);
3479 reg_tt = (reg_tt + 1) % 32;
3480 }
3481 }
3482 }
3483
3484 if (bit (aarch64_insn_r->aarch64_insn, 23))
3485 record_buf[reg_index++] = reg_rn;
3486
3487 aarch64_insn_r->reg_rec_count = reg_index;
3488 aarch64_insn_r->mem_rec_count = mem_index / 2;
3489 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3490 record_buf_mem);
3491 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3492 record_buf);
3493 return AARCH64_RECORD_SUCCESS;
3494 }
3495
3496 /* Record handler for load and store instructions. */
3497
3498 static unsigned int
3499 aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
3500 {
3501 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
3502 uint8_t insn_bit23, insn_bit21;
3503 uint8_t opc, size_bits, ld_flag, vector_flag;
3504 uint32_t reg_rn, reg_rt, reg_rt2;
3505 uint64_t datasize, offset;
3506 uint32_t record_buf[8];
3507 uint64_t record_buf_mem[8];
3508 CORE_ADDR address;
3509
3510 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3511 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3512 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
3513 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3514 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3515 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
3516 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
3517 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3518 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3519 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
3520 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
3521
3522 /* Load/store exclusive. */
3523 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
3524 {
3525 if (record_debug)
3526 debug_printf ("Process record: load/store exclusive\n");
3527
3528 if (ld_flag)
3529 {
3530 record_buf[0] = reg_rt;
3531 aarch64_insn_r->reg_rec_count = 1;
3532 if (insn_bit21)
3533 {
3534 record_buf[1] = reg_rt2;
3535 aarch64_insn_r->reg_rec_count = 2;
3536 }
3537 }
3538 else
3539 {
3540 if (insn_bit21)
3541 datasize = (8 << size_bits) * 2;
3542 else
3543 datasize = (8 << size_bits);
3544 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3545 &address);
3546 record_buf_mem[0] = datasize / 8;
3547 record_buf_mem[1] = address;
3548 aarch64_insn_r->mem_rec_count = 1;
3549 if (!insn_bit23)
3550 {
3551 /* Save register rs. */
3552 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
3553 aarch64_insn_r->reg_rec_count = 1;
3554 }
3555 }
3556 }
3557 /* Load register (literal) instructions decoding. */
3558 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
3559 {
3560 if (record_debug)
3561 debug_printf ("Process record: load register (literal)\n");
3562 if (vector_flag)
3563 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3564 else
3565 record_buf[0] = reg_rt;
3566 aarch64_insn_r->reg_rec_count = 1;
3567 }
3568 /* All types of load/store pair instructions decoding. */
3569 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
3570 {
3571 if (record_debug)
3572 debug_printf ("Process record: load/store pair\n");
3573
3574 if (ld_flag)
3575 {
3576 if (vector_flag)
3577 {
3578 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3579 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
3580 }
3581 else
3582 {
3583 record_buf[0] = reg_rt;
3584 record_buf[1] = reg_rt2;
3585 }
3586 aarch64_insn_r->reg_rec_count = 2;
3587 }
3588 else
3589 {
3590 uint16_t imm7_off;
3591 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
3592 if (!vector_flag)
3593 size_bits = size_bits >> 1;
3594 datasize = 8 << (2 + size_bits);
3595 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
3596 offset = offset << (2 + size_bits);
3597 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3598 &address);
3599 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
3600 {
3601 if (imm7_off & 0x40)
3602 address = address - offset;
3603 else
3604 address = address + offset;
3605 }
3606
3607 record_buf_mem[0] = datasize / 8;
3608 record_buf_mem[1] = address;
3609 record_buf_mem[2] = datasize / 8;
3610 record_buf_mem[3] = address + (datasize / 8);
3611 aarch64_insn_r->mem_rec_count = 2;
3612 }
3613 if (bit (aarch64_insn_r->aarch64_insn, 23))
3614 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3615 }
3616 /* Load/store register (unsigned immediate) instructions. */
3617 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
3618 {
3619 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3620 if (!(opc >> 1))
3621 {
3622 if (opc & 0x01)
3623 ld_flag = 0x01;
3624 else
3625 ld_flag = 0x0;
3626 }
3627 else
3628 {
3629 if (size_bits == 0x3 && vector_flag == 0x0 && opc == 0x2)
3630 {
3631 /* PRFM (immediate) */
3632 return AARCH64_RECORD_SUCCESS;
3633 }
3634 else if (size_bits == 0x2 && vector_flag == 0x0 && opc == 0x2)
3635 {
3636 /* LDRSW (immediate) */
3637 ld_flag = 0x1;
3638 }
3639 else
3640 {
3641 if (opc & 0x01)
3642 ld_flag = 0x01;
3643 else
3644 ld_flag = 0x0;
3645 }
3646 }
3647
3648 if (record_debug)
3649 {
3650 debug_printf ("Process record: load/store (unsigned immediate):"
3651 " size %x V %d opc %x\n", size_bits, vector_flag,
3652 opc);
3653 }
3654
3655 if (!ld_flag)
3656 {
3657 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
3658 datasize = 8 << size_bits;
3659 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3660 &address);
3661 offset = offset << size_bits;
3662 address = address + offset;
3663
3664 record_buf_mem[0] = datasize >> 3;
3665 record_buf_mem[1] = address;
3666 aarch64_insn_r->mem_rec_count = 1;
3667 }
3668 else
3669 {
3670 if (vector_flag)
3671 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3672 else
3673 record_buf[0] = reg_rt;
3674 aarch64_insn_r->reg_rec_count = 1;
3675 }
3676 }
3677 /* Load/store register (register offset) instructions. */
3678 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3679 && insn_bits10_11 == 0x02 && insn_bit21)
3680 {
3681 if (record_debug)
3682 debug_printf ("Process record: load/store (register offset)\n");
3683 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3684 if (!(opc >> 1))
3685 if (opc & 0x01)
3686 ld_flag = 0x01;
3687 else
3688 ld_flag = 0x0;
3689 else
3690 if (size_bits != 0x03)
3691 ld_flag = 0x01;
3692 else
3693 return AARCH64_RECORD_UNKNOWN;
3694
3695 if (!ld_flag)
3696 {
3697 ULONGEST reg_rm_val;
3698
3699 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
3700 bits (aarch64_insn_r->aarch64_insn, 16, 20), &reg_rm_val);
3701 if (bit (aarch64_insn_r->aarch64_insn, 12))
3702 offset = reg_rm_val << size_bits;
3703 else
3704 offset = reg_rm_val;
3705 datasize = 8 << size_bits;
3706 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3707 &address);
3708 address = address + offset;
3709 record_buf_mem[0] = datasize >> 3;
3710 record_buf_mem[1] = address;
3711 aarch64_insn_r->mem_rec_count = 1;
3712 }
3713 else
3714 {
3715 if (vector_flag)
3716 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3717 else
3718 record_buf[0] = reg_rt;
3719 aarch64_insn_r->reg_rec_count = 1;
3720 }
3721 }
3722 /* Load/store register (immediate and unprivileged) instructions. */
3723 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3724 && !insn_bit21)
3725 {
3726 if (record_debug)
3727 {
3728 debug_printf ("Process record: load/store "
3729 "(immediate and unprivileged)\n");
3730 }
3731 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3732 if (!(opc >> 1))
3733 if (opc & 0x01)
3734 ld_flag = 0x01;
3735 else
3736 ld_flag = 0x0;
3737 else
3738 if (size_bits != 0x03)
3739 ld_flag = 0x01;
3740 else
3741 return AARCH64_RECORD_UNKNOWN;
3742
3743 if (!ld_flag)
3744 {
3745 uint16_t imm9_off;
3746 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
3747 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
3748 datasize = 8 << size_bits;
3749 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3750 &address);
3751 if (insn_bits10_11 != 0x01)
3752 {
3753 if (imm9_off & 0x0100)
3754 address = address - offset;
3755 else
3756 address = address + offset;
3757 }
3758 record_buf_mem[0] = datasize >> 3;
3759 record_buf_mem[1] = address;
3760 aarch64_insn_r->mem_rec_count = 1;
3761 }
3762 else
3763 {
3764 if (vector_flag)
3765 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3766 else
3767 record_buf[0] = reg_rt;
3768 aarch64_insn_r->reg_rec_count = 1;
3769 }
3770 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
3771 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3772 }
3773 /* Advanced SIMD load/store instructions. */
3774 else
3775 return aarch64_record_asimd_load_store (aarch64_insn_r);
3776
3777 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3778 record_buf_mem);
3779 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3780 record_buf);
3781 return AARCH64_RECORD_SUCCESS;
3782 }
3783
3784 /* Record handler for data processing SIMD and floating point instructions. */
3785
3786 static unsigned int
3787 aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
3788 {
3789 uint8_t insn_bit21, opcode, rmode, reg_rd;
3790 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
3791 uint8_t insn_bits11_14;
3792 uint32_t record_buf[2];
3793
3794 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3795 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3796 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3797 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3798 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
3799 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
3800 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
3801 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3802 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3803
3804 if (record_debug)
3805 debug_printf ("Process record: data processing SIMD/FP: ");
3806
3807 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
3808 {
3809 /* Floating point - fixed point conversion instructions. */
3810 if (!insn_bit21)
3811 {
3812 if (record_debug)
3813 debug_printf ("FP - fixed point conversion");
3814
3815 if ((opcode >> 1) == 0x0 && rmode == 0x03)
3816 record_buf[0] = reg_rd;
3817 else
3818 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3819 }
3820 /* Floating point - conditional compare instructions. */
3821 else if (insn_bits10_11 == 0x01)
3822 {
3823 if (record_debug)
3824 debug_printf ("FP - conditional compare");
3825
3826 record_buf[0] = AARCH64_CPSR_REGNUM;
3827 }
3828 /* Floating point - data processing (2-source) and
3829 conditional select instructions. */
3830 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
3831 {
3832 if (record_debug)
3833 debug_printf ("FP - DP (2-source)");
3834
3835 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3836 }
3837 else if (insn_bits10_11 == 0x00)
3838 {
3839 /* Floating point - immediate instructions. */
3840 if ((insn_bits12_15 & 0x01) == 0x01
3841 || (insn_bits12_15 & 0x07) == 0x04)
3842 {
3843 if (record_debug)
3844 debug_printf ("FP - immediate");
3845 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3846 }
3847 /* Floating point - compare instructions. */
3848 else if ((insn_bits12_15 & 0x03) == 0x02)
3849 {
3850 if (record_debug)
3851 debug_printf ("FP - immediate");
3852 record_buf[0] = AARCH64_CPSR_REGNUM;
3853 }
3854 /* Floating point - integer conversions instructions. */
3855 else if (insn_bits12_15 == 0x00)
3856 {
3857 /* Convert float to integer instruction. */
3858 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
3859 {
3860 if (record_debug)
3861 debug_printf ("float to int conversion");
3862
3863 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3864 }
3865 /* Convert integer to float instruction. */
3866 else if ((opcode >> 1) == 0x01 && !rmode)
3867 {
3868 if (record_debug)
3869 debug_printf ("int to float conversion");
3870
3871 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3872 }
3873 /* Move float to integer instruction. */
3874 else if ((opcode >> 1) == 0x03)
3875 {
3876 if (record_debug)
3877 debug_printf ("move float to int");
3878
3879 if (!(opcode & 0x01))
3880 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3881 else
3882 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3883 }
3884 else
3885 return AARCH64_RECORD_UNKNOWN;
3886 }
3887 else
3888 return AARCH64_RECORD_UNKNOWN;
3889 }
3890 else
3891 return AARCH64_RECORD_UNKNOWN;
3892 }
3893 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
3894 {
3895 if (record_debug)
3896 debug_printf ("SIMD copy");
3897
3898 /* Advanced SIMD copy instructions. */
3899 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
3900 && !bit (aarch64_insn_r->aarch64_insn, 15)
3901 && bit (aarch64_insn_r->aarch64_insn, 10))
3902 {
3903 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
3904 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3905 else
3906 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3907 }
3908 else
3909 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3910 }
3911 /* All remaining floating point or advanced SIMD instructions. */
3912 else
3913 {
3914 if (record_debug)
3915 debug_printf ("all remain");
3916
3917 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3918 }
3919
3920 if (record_debug)
3921 debug_printf ("\n");
3922
3923 aarch64_insn_r->reg_rec_count++;
3924 gdb_assert (aarch64_insn_r->reg_rec_count == 1);
3925 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3926 record_buf);
3927 return AARCH64_RECORD_SUCCESS;
3928 }
3929
3930 /* Decodes insns type and invokes its record handler. */
3931
3932 static unsigned int
3933 aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
3934 {
3935 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
3936
3937 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
3938 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
3939 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
3940 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
3941
3942 /* Data processing - immediate instructions. */
3943 if (!ins_bit26 && !ins_bit27 && ins_bit28)
3944 return aarch64_record_data_proc_imm (aarch64_insn_r);
3945
3946 /* Branch, exception generation and system instructions. */
3947 if (ins_bit26 && !ins_bit27 && ins_bit28)
3948 return aarch64_record_branch_except_sys (aarch64_insn_r);
3949
3950 /* Load and store instructions. */
3951 if (!ins_bit25 && ins_bit27)
3952 return aarch64_record_load_store (aarch64_insn_r);
3953
3954 /* Data processing - register instructions. */
3955 if (ins_bit25 && !ins_bit26 && ins_bit27)
3956 return aarch64_record_data_proc_reg (aarch64_insn_r);
3957
3958 /* Data processing - SIMD and floating point instructions. */
3959 if (ins_bit25 && ins_bit26 && ins_bit27)
3960 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
3961
3962 return AARCH64_RECORD_UNSUPPORTED;
3963 }
3964
3965 /* Cleans up local record registers and memory allocations. */
3966
3967 static void
3968 deallocate_reg_mem (insn_decode_record *record)
3969 {
3970 xfree (record->aarch64_regs);
3971 xfree (record->aarch64_mems);
3972 }
3973
3974 #if GDB_SELF_TEST
3975 namespace selftests {
3976
3977 static void
3978 aarch64_process_record_test (void)
3979 {
3980 struct gdbarch_info info;
3981 uint32_t ret;
3982
3983 gdbarch_info_init (&info);
3984 info.bfd_arch_info = bfd_scan_arch ("aarch64");
3985
3986 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
3987 SELF_CHECK (gdbarch != NULL);
3988
3989 insn_decode_record aarch64_record;
3990
3991 memset (&aarch64_record, 0, sizeof (insn_decode_record));
3992 aarch64_record.regcache = NULL;
3993 aarch64_record.this_addr = 0;
3994 aarch64_record.gdbarch = gdbarch;
3995
3996 /* 20 00 80 f9 prfm pldl1keep, [x1] */
3997 aarch64_record.aarch64_insn = 0xf9800020;
3998 ret = aarch64_record_decode_insn_handler (&aarch64_record);
3999 SELF_CHECK (ret == AARCH64_RECORD_SUCCESS);
4000 SELF_CHECK (aarch64_record.reg_rec_count == 0);
4001 SELF_CHECK (aarch64_record.mem_rec_count == 0);
4002
4003 deallocate_reg_mem (&aarch64_record);
4004 }
4005
4006 } // namespace selftests
4007 #endif /* GDB_SELF_TEST */
4008
4009 /* Parse the current instruction and record the values of the registers and
4010 memory that will be changed in current instruction to record_arch_list
4011 return -1 if something is wrong. */
4012
4013 int
4014 aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
4015 CORE_ADDR insn_addr)
4016 {
4017 uint32_t rec_no = 0;
4018 uint8_t insn_size = 4;
4019 uint32_t ret = 0;
4020 gdb_byte buf[insn_size];
4021 insn_decode_record aarch64_record;
4022
4023 memset (&buf[0], 0, insn_size);
4024 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4025 target_read_memory (insn_addr, &buf[0], insn_size);
4026 aarch64_record.aarch64_insn
4027 = (uint32_t) extract_unsigned_integer (&buf[0],
4028 insn_size,
4029 gdbarch_byte_order (gdbarch));
4030 aarch64_record.regcache = regcache;
4031 aarch64_record.this_addr = insn_addr;
4032 aarch64_record.gdbarch = gdbarch;
4033
4034 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4035 if (ret == AARCH64_RECORD_UNSUPPORTED)
4036 {
4037 printf_unfiltered (_("Process record does not support instruction "
4038 "0x%0x at address %s.\n"),
4039 aarch64_record.aarch64_insn,
4040 paddress (gdbarch, insn_addr));
4041 ret = -1;
4042 }
4043
4044 if (0 == ret)
4045 {
4046 /* Record registers. */
4047 record_full_arch_list_add_reg (aarch64_record.regcache,
4048 AARCH64_PC_REGNUM);
4049 /* Always record register CPSR. */
4050 record_full_arch_list_add_reg (aarch64_record.regcache,
4051 AARCH64_CPSR_REGNUM);
4052 if (aarch64_record.aarch64_regs)
4053 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
4054 if (record_full_arch_list_add_reg (aarch64_record.regcache,
4055 aarch64_record.aarch64_regs[rec_no]))
4056 ret = -1;
4057
4058 /* Record memories. */
4059 if (aarch64_record.aarch64_mems)
4060 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
4061 if (record_full_arch_list_add_mem
4062 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
4063 aarch64_record.aarch64_mems[rec_no].len))
4064 ret = -1;
4065
4066 if (record_full_arch_list_add_end ())
4067 ret = -1;
4068 }
4069
4070 deallocate_reg_mem (&aarch64_record);
4071 return ret;
4072 }