]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/aarch64-tdep.c
update copyright year range in GDB files
[thirdparty/binutils-gdb.git] / gdb / aarch64-tdep.c
1 /* Common target dependent code for GDB on AArch64 systems.
2
3 Copyright (C) 2009-2017 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22
23 #include "frame.h"
24 #include "inferior.h"
25 #include "gdbcmd.h"
26 #include "gdbcore.h"
27 #include "dis-asm.h"
28 #include "regcache.h"
29 #include "reggroups.h"
30 #include "doublest.h"
31 #include "value.h"
32 #include "arch-utils.h"
33 #include "osabi.h"
34 #include "frame-unwind.h"
35 #include "frame-base.h"
36 #include "trad-frame.h"
37 #include "objfiles.h"
38 #include "dwarf2-frame.h"
39 #include "gdbtypes.h"
40 #include "prologue-value.h"
41 #include "target-descriptions.h"
42 #include "user-regs.h"
43 #include "language.h"
44 #include "infcall.h"
45 #include "ax.h"
46 #include "ax-gdb.h"
47 #include "selftest.h"
48
49 #include "aarch64-tdep.h"
50
51 #include "elf-bfd.h"
52 #include "elf/aarch64.h"
53
54 #include "vec.h"
55
56 #include "record.h"
57 #include "record-full.h"
58
59 #include "features/aarch64.c"
60
61 #include "arch/aarch64-insn.h"
62
63 #include "opcode/aarch64.h"
64 #include <algorithm>
65
66 #define submask(x) ((1L << ((x) + 1)) - 1)
67 #define bit(obj,st) (((obj) >> (st)) & 1)
68 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
69
70 /* Pseudo register base numbers. */
71 #define AARCH64_Q0_REGNUM 0
72 #define AARCH64_D0_REGNUM (AARCH64_Q0_REGNUM + AARCH64_D_REGISTER_COUNT)
73 #define AARCH64_S0_REGNUM (AARCH64_D0_REGNUM + 32)
74 #define AARCH64_H0_REGNUM (AARCH64_S0_REGNUM + 32)
75 #define AARCH64_B0_REGNUM (AARCH64_H0_REGNUM + 32)
76
77 /* The standard register names, and all the valid aliases for them. */
78 static const struct
79 {
80 const char *const name;
81 int regnum;
82 } aarch64_register_aliases[] =
83 {
84 /* 64-bit register names. */
85 {"fp", AARCH64_FP_REGNUM},
86 {"lr", AARCH64_LR_REGNUM},
87 {"sp", AARCH64_SP_REGNUM},
88
89 /* 32-bit register names. */
90 {"w0", AARCH64_X0_REGNUM + 0},
91 {"w1", AARCH64_X0_REGNUM + 1},
92 {"w2", AARCH64_X0_REGNUM + 2},
93 {"w3", AARCH64_X0_REGNUM + 3},
94 {"w4", AARCH64_X0_REGNUM + 4},
95 {"w5", AARCH64_X0_REGNUM + 5},
96 {"w6", AARCH64_X0_REGNUM + 6},
97 {"w7", AARCH64_X0_REGNUM + 7},
98 {"w8", AARCH64_X0_REGNUM + 8},
99 {"w9", AARCH64_X0_REGNUM + 9},
100 {"w10", AARCH64_X0_REGNUM + 10},
101 {"w11", AARCH64_X0_REGNUM + 11},
102 {"w12", AARCH64_X0_REGNUM + 12},
103 {"w13", AARCH64_X0_REGNUM + 13},
104 {"w14", AARCH64_X0_REGNUM + 14},
105 {"w15", AARCH64_X0_REGNUM + 15},
106 {"w16", AARCH64_X0_REGNUM + 16},
107 {"w17", AARCH64_X0_REGNUM + 17},
108 {"w18", AARCH64_X0_REGNUM + 18},
109 {"w19", AARCH64_X0_REGNUM + 19},
110 {"w20", AARCH64_X0_REGNUM + 20},
111 {"w21", AARCH64_X0_REGNUM + 21},
112 {"w22", AARCH64_X0_REGNUM + 22},
113 {"w23", AARCH64_X0_REGNUM + 23},
114 {"w24", AARCH64_X0_REGNUM + 24},
115 {"w25", AARCH64_X0_REGNUM + 25},
116 {"w26", AARCH64_X0_REGNUM + 26},
117 {"w27", AARCH64_X0_REGNUM + 27},
118 {"w28", AARCH64_X0_REGNUM + 28},
119 {"w29", AARCH64_X0_REGNUM + 29},
120 {"w30", AARCH64_X0_REGNUM + 30},
121
122 /* specials */
123 {"ip0", AARCH64_X0_REGNUM + 16},
124 {"ip1", AARCH64_X0_REGNUM + 17}
125 };
126
127 /* The required core 'R' registers. */
128 static const char *const aarch64_r_register_names[] =
129 {
130 /* These registers must appear in consecutive RAW register number
131 order and they must begin with AARCH64_X0_REGNUM! */
132 "x0", "x1", "x2", "x3",
133 "x4", "x5", "x6", "x7",
134 "x8", "x9", "x10", "x11",
135 "x12", "x13", "x14", "x15",
136 "x16", "x17", "x18", "x19",
137 "x20", "x21", "x22", "x23",
138 "x24", "x25", "x26", "x27",
139 "x28", "x29", "x30", "sp",
140 "pc", "cpsr"
141 };
142
143 /* The FP/SIMD 'V' registers. */
144 static const char *const aarch64_v_register_names[] =
145 {
146 /* These registers must appear in consecutive RAW register number
147 order and they must begin with AARCH64_V0_REGNUM! */
148 "v0", "v1", "v2", "v3",
149 "v4", "v5", "v6", "v7",
150 "v8", "v9", "v10", "v11",
151 "v12", "v13", "v14", "v15",
152 "v16", "v17", "v18", "v19",
153 "v20", "v21", "v22", "v23",
154 "v24", "v25", "v26", "v27",
155 "v28", "v29", "v30", "v31",
156 "fpsr",
157 "fpcr"
158 };
159
160 /* AArch64 prologue cache structure. */
161 struct aarch64_prologue_cache
162 {
163 /* The program counter at the start of the function. It is used to
164 identify this frame as a prologue frame. */
165 CORE_ADDR func;
166
167 /* The program counter at the time this frame was created; i.e. where
168 this function was called from. It is used to identify this frame as a
169 stub frame. */
170 CORE_ADDR prev_pc;
171
172 /* The stack pointer at the time this frame was created; i.e. the
173 caller's stack pointer when this function was called. It is used
174 to identify this frame. */
175 CORE_ADDR prev_sp;
176
177 /* Is the target available to read from? */
178 int available_p;
179
180 /* The frame base for this frame is just prev_sp - frame size.
181 FRAMESIZE is the distance from the frame pointer to the
182 initial stack pointer. */
183 int framesize;
184
185 /* The register used to hold the frame pointer for this frame. */
186 int framereg;
187
188 /* Saved register offsets. */
189 struct trad_frame_saved_reg *saved_regs;
190 };
191
192 static void
193 show_aarch64_debug (struct ui_file *file, int from_tty,
194 struct cmd_list_element *c, const char *value)
195 {
196 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
197 }
198
199 /* Abstract instruction reader. */
200
201 class abstract_instruction_reader
202 {
203 public:
204 /* Read in one instruction. */
205 virtual ULONGEST read (CORE_ADDR memaddr, int len,
206 enum bfd_endian byte_order) = 0;
207 };
208
209 /* Instruction reader from real target. */
210
211 class instruction_reader : public abstract_instruction_reader
212 {
213 public:
214 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
215 {
216 return read_code_unsigned_integer (memaddr, len, byte_order);
217 }
218 };
219
220 /* Analyze a prologue, looking for a recognizable stack frame
221 and frame pointer. Scan until we encounter a store that could
222 clobber the stack frame unexpectedly, or an unknown instruction. */
223
224 static CORE_ADDR
225 aarch64_analyze_prologue (struct gdbarch *gdbarch,
226 CORE_ADDR start, CORE_ADDR limit,
227 struct aarch64_prologue_cache *cache,
228 abstract_instruction_reader& reader)
229 {
230 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
231 int i;
232 /* Track X registers and D registers in prologue. */
233 pv_t regs[AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT];
234 struct pv_area *stack;
235 struct cleanup *back_to;
236
237 for (i = 0; i < AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT; i++)
238 regs[i] = pv_register (i, 0);
239 stack = make_pv_area (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
240 back_to = make_cleanup_free_pv_area (stack);
241
242 for (; start < limit; start += 4)
243 {
244 uint32_t insn;
245 aarch64_inst inst;
246
247 insn = reader.read (start, 4, byte_order_for_code);
248
249 if (aarch64_decode_insn (insn, &inst, 1) != 0)
250 break;
251
252 if (inst.opcode->iclass == addsub_imm
253 && (inst.opcode->op == OP_ADD
254 || strcmp ("sub", inst.opcode->name) == 0))
255 {
256 unsigned rd = inst.operands[0].reg.regno;
257 unsigned rn = inst.operands[1].reg.regno;
258
259 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
260 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
261 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
262 gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM);
263
264 if (inst.opcode->op == OP_ADD)
265 {
266 regs[rd] = pv_add_constant (regs[rn],
267 inst.operands[2].imm.value);
268 }
269 else
270 {
271 regs[rd] = pv_add_constant (regs[rn],
272 -inst.operands[2].imm.value);
273 }
274 }
275 else if (inst.opcode->iclass == pcreladdr
276 && inst.operands[1].type == AARCH64_OPND_ADDR_ADRP)
277 {
278 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
279 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
280
281 regs[inst.operands[0].reg.regno] = pv_unknown ();
282 }
283 else if (inst.opcode->iclass == branch_imm)
284 {
285 /* Stop analysis on branch. */
286 break;
287 }
288 else if (inst.opcode->iclass == condbranch)
289 {
290 /* Stop analysis on branch. */
291 break;
292 }
293 else if (inst.opcode->iclass == branch_reg)
294 {
295 /* Stop analysis on branch. */
296 break;
297 }
298 else if (inst.opcode->iclass == compbranch)
299 {
300 /* Stop analysis on branch. */
301 break;
302 }
303 else if (inst.opcode->op == OP_MOVZ)
304 {
305 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
306 regs[inst.operands[0].reg.regno] = pv_unknown ();
307 }
308 else if (inst.opcode->iclass == log_shift
309 && strcmp (inst.opcode->name, "orr") == 0)
310 {
311 unsigned rd = inst.operands[0].reg.regno;
312 unsigned rn = inst.operands[1].reg.regno;
313 unsigned rm = inst.operands[2].reg.regno;
314
315 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
316 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn);
317 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT);
318
319 if (inst.operands[2].shifter.amount == 0
320 && rn == AARCH64_SP_REGNUM)
321 regs[rd] = regs[rm];
322 else
323 {
324 if (aarch64_debug)
325 {
326 debug_printf ("aarch64: prologue analysis gave up "
327 "addr=%s opcode=0x%x (orr x register)\n",
328 core_addr_to_string_nz (start), insn);
329 }
330 break;
331 }
332 }
333 else if (inst.opcode->op == OP_STUR)
334 {
335 unsigned rt = inst.operands[0].reg.regno;
336 unsigned rn = inst.operands[1].addr.base_regno;
337 int is64
338 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
339
340 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
341 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
342 gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9);
343 gdb_assert (!inst.operands[1].addr.offset.is_reg);
344
345 pv_area_store (stack, pv_add_constant (regs[rn],
346 inst.operands[1].addr.offset.imm),
347 is64 ? 8 : 4, regs[rt]);
348 }
349 else if ((inst.opcode->iclass == ldstpair_off
350 || (inst.opcode->iclass == ldstpair_indexed
351 && inst.operands[2].addr.preind))
352 && strcmp ("stp", inst.opcode->name) == 0)
353 {
354 /* STP with addressing mode Pre-indexed and Base register. */
355 unsigned rt1;
356 unsigned rt2;
357 unsigned rn = inst.operands[2].addr.base_regno;
358 int32_t imm = inst.operands[2].addr.offset.imm;
359
360 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
361 || inst.operands[0].type == AARCH64_OPND_Ft);
362 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2
363 || inst.operands[1].type == AARCH64_OPND_Ft2);
364 gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7);
365 gdb_assert (!inst.operands[2].addr.offset.is_reg);
366
367 /* If recording this store would invalidate the store area
368 (perhaps because rn is not known) then we should abandon
369 further prologue analysis. */
370 if (pv_area_store_would_trash (stack,
371 pv_add_constant (regs[rn], imm)))
372 break;
373
374 if (pv_area_store_would_trash (stack,
375 pv_add_constant (regs[rn], imm + 8)))
376 break;
377
378 rt1 = inst.operands[0].reg.regno;
379 rt2 = inst.operands[1].reg.regno;
380 if (inst.operands[0].type == AARCH64_OPND_Ft)
381 {
382 /* Only bottom 64-bit of each V register (D register) need
383 to be preserved. */
384 gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D);
385 rt1 += AARCH64_X_REGISTER_COUNT;
386 rt2 += AARCH64_X_REGISTER_COUNT;
387 }
388
389 pv_area_store (stack, pv_add_constant (regs[rn], imm), 8,
390 regs[rt1]);
391 pv_area_store (stack, pv_add_constant (regs[rn], imm + 8), 8,
392 regs[rt2]);
393
394 if (inst.operands[2].addr.writeback)
395 regs[rn] = pv_add_constant (regs[rn], imm);
396
397 }
398 else if ((inst.opcode->iclass == ldst_imm9 /* Signed immediate. */
399 || (inst.opcode->iclass == ldst_pos /* Unsigned immediate. */
400 && (inst.opcode->op == OP_STR_POS
401 || inst.opcode->op == OP_STRF_POS)))
402 && inst.operands[1].addr.base_regno == AARCH64_SP_REGNUM
403 && strcmp ("str", inst.opcode->name) == 0)
404 {
405 /* STR (immediate) */
406 unsigned int rt = inst.operands[0].reg.regno;
407 int32_t imm = inst.operands[1].addr.offset.imm;
408 unsigned int rn = inst.operands[1].addr.base_regno;
409 bool is64
410 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
411 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
412 || inst.operands[0].type == AARCH64_OPND_Ft);
413
414 if (inst.operands[0].type == AARCH64_OPND_Ft)
415 {
416 /* Only bottom 64-bit of each V register (D register) need
417 to be preserved. */
418 gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D);
419 rt += AARCH64_X_REGISTER_COUNT;
420 }
421
422 pv_area_store (stack, pv_add_constant (regs[rn], imm),
423 is64 ? 8 : 4, regs[rt]);
424 if (inst.operands[1].addr.writeback)
425 regs[rn] = pv_add_constant (regs[rn], imm);
426 }
427 else if (inst.opcode->iclass == testbranch)
428 {
429 /* Stop analysis on branch. */
430 break;
431 }
432 else
433 {
434 if (aarch64_debug)
435 {
436 debug_printf ("aarch64: prologue analysis gave up addr=%s"
437 " opcode=0x%x\n",
438 core_addr_to_string_nz (start), insn);
439 }
440 break;
441 }
442 }
443
444 if (cache == NULL)
445 {
446 do_cleanups (back_to);
447 return start;
448 }
449
450 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
451 {
452 /* Frame pointer is fp. Frame size is constant. */
453 cache->framereg = AARCH64_FP_REGNUM;
454 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
455 }
456 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
457 {
458 /* Try the stack pointer. */
459 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
460 cache->framereg = AARCH64_SP_REGNUM;
461 }
462 else
463 {
464 /* We're just out of luck. We don't know where the frame is. */
465 cache->framereg = -1;
466 cache->framesize = 0;
467 }
468
469 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
470 {
471 CORE_ADDR offset;
472
473 if (pv_area_find_reg (stack, gdbarch, i, &offset))
474 cache->saved_regs[i].addr = offset;
475 }
476
477 for (i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
478 {
479 int regnum = gdbarch_num_regs (gdbarch);
480 CORE_ADDR offset;
481
482 if (pv_area_find_reg (stack, gdbarch, i + AARCH64_X_REGISTER_COUNT,
483 &offset))
484 cache->saved_regs[i + regnum + AARCH64_D0_REGNUM].addr = offset;
485 }
486
487 do_cleanups (back_to);
488 return start;
489 }
490
491 static CORE_ADDR
492 aarch64_analyze_prologue (struct gdbarch *gdbarch,
493 CORE_ADDR start, CORE_ADDR limit,
494 struct aarch64_prologue_cache *cache)
495 {
496 instruction_reader reader;
497
498 return aarch64_analyze_prologue (gdbarch, start, limit, cache,
499 reader);
500 }
501
502 #if GDB_SELF_TEST
503
504 namespace selftests {
505
506 /* Instruction reader from manually cooked instruction sequences. */
507
508 class instruction_reader_test : public abstract_instruction_reader
509 {
510 public:
511 template<size_t SIZE>
512 explicit instruction_reader_test (const uint32_t (&insns)[SIZE])
513 : m_insns (insns), m_insns_size (SIZE)
514 {}
515
516 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
517 {
518 SELF_CHECK (len == 4);
519 SELF_CHECK (memaddr % 4 == 0);
520 SELF_CHECK (memaddr / 4 < m_insns_size);
521
522 return m_insns[memaddr / 4];
523 }
524
525 private:
526 const uint32_t *m_insns;
527 size_t m_insns_size;
528 };
529
530 static void
531 aarch64_analyze_prologue_test (void)
532 {
533 struct gdbarch_info info;
534
535 gdbarch_info_init (&info);
536 info.bfd_arch_info = bfd_scan_arch ("aarch64");
537
538 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
539 SELF_CHECK (gdbarch != NULL);
540
541 /* Test the simple prologue in which frame pointer is used. */
542 {
543 struct aarch64_prologue_cache cache;
544 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
545
546 static const uint32_t insns[] = {
547 0xa9af7bfd, /* stp x29, x30, [sp,#-272]! */
548 0x910003fd, /* mov x29, sp */
549 0x97ffffe6, /* bl 0x400580 */
550 };
551 instruction_reader_test reader (insns);
552
553 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
554 SELF_CHECK (end == 4 * 2);
555
556 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
557 SELF_CHECK (cache.framesize == 272);
558
559 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
560 {
561 if (i == AARCH64_FP_REGNUM)
562 SELF_CHECK (cache.saved_regs[i].addr == -272);
563 else if (i == AARCH64_LR_REGNUM)
564 SELF_CHECK (cache.saved_regs[i].addr == -264);
565 else
566 SELF_CHECK (cache.saved_regs[i].addr == -1);
567 }
568
569 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
570 {
571 int regnum = gdbarch_num_regs (gdbarch);
572
573 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
574 == -1);
575 }
576 }
577
578 /* Test a prologue in which STR is used and frame pointer is not
579 used. */
580 {
581 struct aarch64_prologue_cache cache;
582 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
583
584 static const uint32_t insns[] = {
585 0xf81d0ff3, /* str x19, [sp, #-48]! */
586 0xb9002fe0, /* str w0, [sp, #44] */
587 0xf90013e1, /* str x1, [sp, #32]*/
588 0xfd000fe0, /* str d0, [sp, #24] */
589 0xaa0203f3, /* mov x19, x2 */
590 0xf94013e0, /* ldr x0, [sp, #32] */
591 };
592 instruction_reader_test reader (insns);
593
594 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
595
596 SELF_CHECK (end == 4 * 5);
597
598 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
599 SELF_CHECK (cache.framesize == 48);
600
601 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
602 {
603 if (i == 1)
604 SELF_CHECK (cache.saved_regs[i].addr == -16);
605 else if (i == 19)
606 SELF_CHECK (cache.saved_regs[i].addr == -48);
607 else
608 SELF_CHECK (cache.saved_regs[i].addr == -1);
609 }
610
611 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
612 {
613 int regnum = gdbarch_num_regs (gdbarch);
614
615 if (i == 0)
616 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
617 == -24);
618 else
619 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
620 == -1);
621 }
622 }
623 }
624 } // namespace selftests
625 #endif /* GDB_SELF_TEST */
626
627 /* Implement the "skip_prologue" gdbarch method. */
628
629 static CORE_ADDR
630 aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
631 {
632 CORE_ADDR func_addr, limit_pc;
633
634 /* See if we can determine the end of the prologue via the symbol
635 table. If so, then return either PC, or the PC after the
636 prologue, whichever is greater. */
637 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
638 {
639 CORE_ADDR post_prologue_pc
640 = skip_prologue_using_sal (gdbarch, func_addr);
641
642 if (post_prologue_pc != 0)
643 return std::max (pc, post_prologue_pc);
644 }
645
646 /* Can't determine prologue from the symbol table, need to examine
647 instructions. */
648
649 /* Find an upper limit on the function prologue using the debug
650 information. If the debug information could not be used to
651 provide that bound, then use an arbitrary large number as the
652 upper bound. */
653 limit_pc = skip_prologue_using_sal (gdbarch, pc);
654 if (limit_pc == 0)
655 limit_pc = pc + 128; /* Magic. */
656
657 /* Try disassembling prologue. */
658 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
659 }
660
661 /* Scan the function prologue for THIS_FRAME and populate the prologue
662 cache CACHE. */
663
664 static void
665 aarch64_scan_prologue (struct frame_info *this_frame,
666 struct aarch64_prologue_cache *cache)
667 {
668 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
669 CORE_ADDR prologue_start;
670 CORE_ADDR prologue_end;
671 CORE_ADDR prev_pc = get_frame_pc (this_frame);
672 struct gdbarch *gdbarch = get_frame_arch (this_frame);
673
674 cache->prev_pc = prev_pc;
675
676 /* Assume we do not find a frame. */
677 cache->framereg = -1;
678 cache->framesize = 0;
679
680 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
681 &prologue_end))
682 {
683 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
684
685 if (sal.line == 0)
686 {
687 /* No line info so use the current PC. */
688 prologue_end = prev_pc;
689 }
690 else if (sal.end < prologue_end)
691 {
692 /* The next line begins after the function end. */
693 prologue_end = sal.end;
694 }
695
696 prologue_end = std::min (prologue_end, prev_pc);
697 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
698 }
699 else
700 {
701 CORE_ADDR frame_loc;
702
703 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
704 if (frame_loc == 0)
705 return;
706
707 cache->framereg = AARCH64_FP_REGNUM;
708 cache->framesize = 16;
709 cache->saved_regs[29].addr = 0;
710 cache->saved_regs[30].addr = 8;
711 }
712 }
713
714 /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
715 function may throw an exception if the inferior's registers or memory is
716 not available. */
717
718 static void
719 aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
720 struct aarch64_prologue_cache *cache)
721 {
722 CORE_ADDR unwound_fp;
723 int reg;
724
725 aarch64_scan_prologue (this_frame, cache);
726
727 if (cache->framereg == -1)
728 return;
729
730 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
731 if (unwound_fp == 0)
732 return;
733
734 cache->prev_sp = unwound_fp + cache->framesize;
735
736 /* Calculate actual addresses of saved registers using offsets
737 determined by aarch64_analyze_prologue. */
738 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
739 if (trad_frame_addr_p (cache->saved_regs, reg))
740 cache->saved_regs[reg].addr += cache->prev_sp;
741
742 cache->func = get_frame_func (this_frame);
743
744 cache->available_p = 1;
745 }
746
747 /* Allocate and fill in *THIS_CACHE with information about the prologue of
748 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
749 Return a pointer to the current aarch64_prologue_cache in
750 *THIS_CACHE. */
751
752 static struct aarch64_prologue_cache *
753 aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
754 {
755 struct aarch64_prologue_cache *cache;
756
757 if (*this_cache != NULL)
758 return (struct aarch64_prologue_cache *) *this_cache;
759
760 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
761 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
762 *this_cache = cache;
763
764 TRY
765 {
766 aarch64_make_prologue_cache_1 (this_frame, cache);
767 }
768 CATCH (ex, RETURN_MASK_ERROR)
769 {
770 if (ex.error != NOT_AVAILABLE_ERROR)
771 throw_exception (ex);
772 }
773 END_CATCH
774
775 return cache;
776 }
777
778 /* Implement the "stop_reason" frame_unwind method. */
779
780 static enum unwind_stop_reason
781 aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
782 void **this_cache)
783 {
784 struct aarch64_prologue_cache *cache
785 = aarch64_make_prologue_cache (this_frame, this_cache);
786
787 if (!cache->available_p)
788 return UNWIND_UNAVAILABLE;
789
790 /* Halt the backtrace at "_start". */
791 if (cache->prev_pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
792 return UNWIND_OUTERMOST;
793
794 /* We've hit a wall, stop. */
795 if (cache->prev_sp == 0)
796 return UNWIND_OUTERMOST;
797
798 return UNWIND_NO_REASON;
799 }
800
801 /* Our frame ID for a normal frame is the current function's starting
802 PC and the caller's SP when we were called. */
803
804 static void
805 aarch64_prologue_this_id (struct frame_info *this_frame,
806 void **this_cache, struct frame_id *this_id)
807 {
808 struct aarch64_prologue_cache *cache
809 = aarch64_make_prologue_cache (this_frame, this_cache);
810
811 if (!cache->available_p)
812 *this_id = frame_id_build_unavailable_stack (cache->func);
813 else
814 *this_id = frame_id_build (cache->prev_sp, cache->func);
815 }
816
817 /* Implement the "prev_register" frame_unwind method. */
818
819 static struct value *
820 aarch64_prologue_prev_register (struct frame_info *this_frame,
821 void **this_cache, int prev_regnum)
822 {
823 struct aarch64_prologue_cache *cache
824 = aarch64_make_prologue_cache (this_frame, this_cache);
825
826 /* If we are asked to unwind the PC, then we need to return the LR
827 instead. The prologue may save PC, but it will point into this
828 frame's prologue, not the next frame's resume location. */
829 if (prev_regnum == AARCH64_PC_REGNUM)
830 {
831 CORE_ADDR lr;
832
833 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
834 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
835 }
836
837 /* SP is generally not saved to the stack, but this frame is
838 identified by the next frame's stack pointer at the time of the
839 call. The value was already reconstructed into PREV_SP. */
840 /*
841 +----------+ ^
842 | saved lr | |
843 +->| saved fp |--+
844 | | |
845 | | | <- Previous SP
846 | +----------+
847 | | saved lr |
848 +--| saved fp |<- FP
849 | |
850 | |<- SP
851 +----------+ */
852 if (prev_regnum == AARCH64_SP_REGNUM)
853 return frame_unwind_got_constant (this_frame, prev_regnum,
854 cache->prev_sp);
855
856 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
857 prev_regnum);
858 }
859
860 /* AArch64 prologue unwinder. */
861 struct frame_unwind aarch64_prologue_unwind =
862 {
863 NORMAL_FRAME,
864 aarch64_prologue_frame_unwind_stop_reason,
865 aarch64_prologue_this_id,
866 aarch64_prologue_prev_register,
867 NULL,
868 default_frame_sniffer
869 };
870
871 /* Allocate and fill in *THIS_CACHE with information about the prologue of
872 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
873 Return a pointer to the current aarch64_prologue_cache in
874 *THIS_CACHE. */
875
876 static struct aarch64_prologue_cache *
877 aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
878 {
879 struct aarch64_prologue_cache *cache;
880
881 if (*this_cache != NULL)
882 return (struct aarch64_prologue_cache *) *this_cache;
883
884 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
885 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
886 *this_cache = cache;
887
888 TRY
889 {
890 cache->prev_sp = get_frame_register_unsigned (this_frame,
891 AARCH64_SP_REGNUM);
892 cache->prev_pc = get_frame_pc (this_frame);
893 cache->available_p = 1;
894 }
895 CATCH (ex, RETURN_MASK_ERROR)
896 {
897 if (ex.error != NOT_AVAILABLE_ERROR)
898 throw_exception (ex);
899 }
900 END_CATCH
901
902 return cache;
903 }
904
905 /* Implement the "stop_reason" frame_unwind method. */
906
907 static enum unwind_stop_reason
908 aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
909 void **this_cache)
910 {
911 struct aarch64_prologue_cache *cache
912 = aarch64_make_stub_cache (this_frame, this_cache);
913
914 if (!cache->available_p)
915 return UNWIND_UNAVAILABLE;
916
917 return UNWIND_NO_REASON;
918 }
919
920 /* Our frame ID for a stub frame is the current SP and LR. */
921
922 static void
923 aarch64_stub_this_id (struct frame_info *this_frame,
924 void **this_cache, struct frame_id *this_id)
925 {
926 struct aarch64_prologue_cache *cache
927 = aarch64_make_stub_cache (this_frame, this_cache);
928
929 if (cache->available_p)
930 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
931 else
932 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
933 }
934
935 /* Implement the "sniffer" frame_unwind method. */
936
937 static int
938 aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
939 struct frame_info *this_frame,
940 void **this_prologue_cache)
941 {
942 CORE_ADDR addr_in_block;
943 gdb_byte dummy[4];
944
945 addr_in_block = get_frame_address_in_block (this_frame);
946 if (in_plt_section (addr_in_block)
947 /* We also use the stub winder if the target memory is unreadable
948 to avoid having the prologue unwinder trying to read it. */
949 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
950 return 1;
951
952 return 0;
953 }
954
955 /* AArch64 stub unwinder. */
956 struct frame_unwind aarch64_stub_unwind =
957 {
958 NORMAL_FRAME,
959 aarch64_stub_frame_unwind_stop_reason,
960 aarch64_stub_this_id,
961 aarch64_prologue_prev_register,
962 NULL,
963 aarch64_stub_unwind_sniffer
964 };
965
966 /* Return the frame base address of *THIS_FRAME. */
967
968 static CORE_ADDR
969 aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
970 {
971 struct aarch64_prologue_cache *cache
972 = aarch64_make_prologue_cache (this_frame, this_cache);
973
974 return cache->prev_sp - cache->framesize;
975 }
976
977 /* AArch64 default frame base information. */
978 struct frame_base aarch64_normal_base =
979 {
980 &aarch64_prologue_unwind,
981 aarch64_normal_frame_base,
982 aarch64_normal_frame_base,
983 aarch64_normal_frame_base
984 };
985
986 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
987 dummy frame. The frame ID's base needs to match the TOS value
988 saved by save_dummy_frame_tos () and returned from
989 aarch64_push_dummy_call, and the PC needs to match the dummy
990 frame's breakpoint. */
991
992 static struct frame_id
993 aarch64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
994 {
995 return frame_id_build (get_frame_register_unsigned (this_frame,
996 AARCH64_SP_REGNUM),
997 get_frame_pc (this_frame));
998 }
999
1000 /* Implement the "unwind_pc" gdbarch method. */
1001
1002 static CORE_ADDR
1003 aarch64_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
1004 {
1005 CORE_ADDR pc
1006 = frame_unwind_register_unsigned (this_frame, AARCH64_PC_REGNUM);
1007
1008 return pc;
1009 }
1010
1011 /* Implement the "unwind_sp" gdbarch method. */
1012
1013 static CORE_ADDR
1014 aarch64_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
1015 {
1016 return frame_unwind_register_unsigned (this_frame, AARCH64_SP_REGNUM);
1017 }
1018
1019 /* Return the value of the REGNUM register in the previous frame of
1020 *THIS_FRAME. */
1021
1022 static struct value *
1023 aarch64_dwarf2_prev_register (struct frame_info *this_frame,
1024 void **this_cache, int regnum)
1025 {
1026 CORE_ADDR lr;
1027
1028 switch (regnum)
1029 {
1030 case AARCH64_PC_REGNUM:
1031 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1032 return frame_unwind_got_constant (this_frame, regnum, lr);
1033
1034 default:
1035 internal_error (__FILE__, __LINE__,
1036 _("Unexpected register %d"), regnum);
1037 }
1038 }
1039
1040 /* Implement the "init_reg" dwarf2_frame_ops method. */
1041
1042 static void
1043 aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1044 struct dwarf2_frame_state_reg *reg,
1045 struct frame_info *this_frame)
1046 {
1047 switch (regnum)
1048 {
1049 case AARCH64_PC_REGNUM:
1050 reg->how = DWARF2_FRAME_REG_FN;
1051 reg->loc.fn = aarch64_dwarf2_prev_register;
1052 break;
1053 case AARCH64_SP_REGNUM:
1054 reg->how = DWARF2_FRAME_REG_CFA;
1055 break;
1056 }
1057 }
1058
1059 /* When arguments must be pushed onto the stack, they go on in reverse
1060 order. The code below implements a FILO (stack) to do this. */
1061
1062 typedef struct
1063 {
1064 /* Value to pass on stack. It can be NULL if this item is for stack
1065 padding. */
1066 const gdb_byte *data;
1067
1068 /* Size in bytes of value to pass on stack. */
1069 int len;
1070 } stack_item_t;
1071
1072 DEF_VEC_O (stack_item_t);
1073
1074 /* Return the alignment (in bytes) of the given type. */
1075
1076 static int
1077 aarch64_type_align (struct type *t)
1078 {
1079 int n;
1080 int align;
1081 int falign;
1082
1083 t = check_typedef (t);
1084 switch (TYPE_CODE (t))
1085 {
1086 default:
1087 /* Should never happen. */
1088 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
1089 return 4;
1090
1091 case TYPE_CODE_PTR:
1092 case TYPE_CODE_ENUM:
1093 case TYPE_CODE_INT:
1094 case TYPE_CODE_FLT:
1095 case TYPE_CODE_SET:
1096 case TYPE_CODE_RANGE:
1097 case TYPE_CODE_BITSTRING:
1098 case TYPE_CODE_REF:
1099 case TYPE_CODE_CHAR:
1100 case TYPE_CODE_BOOL:
1101 return TYPE_LENGTH (t);
1102
1103 case TYPE_CODE_ARRAY:
1104 if (TYPE_VECTOR (t))
1105 {
1106 /* Use the natural alignment for vector types (the same for
1107 scalar type), but the maximum alignment is 128-bit. */
1108 if (TYPE_LENGTH (t) > 16)
1109 return 16;
1110 else
1111 return TYPE_LENGTH (t);
1112 }
1113 else
1114 return aarch64_type_align (TYPE_TARGET_TYPE (t));
1115 case TYPE_CODE_COMPLEX:
1116 return aarch64_type_align (TYPE_TARGET_TYPE (t));
1117
1118 case TYPE_CODE_STRUCT:
1119 case TYPE_CODE_UNION:
1120 align = 1;
1121 for (n = 0; n < TYPE_NFIELDS (t); n++)
1122 {
1123 falign = aarch64_type_align (TYPE_FIELD_TYPE (t, n));
1124 if (falign > align)
1125 align = falign;
1126 }
1127 return align;
1128 }
1129 }
1130
1131 /* Return 1 if *TY is a homogeneous floating-point aggregate or
1132 homogeneous short-vector aggregate as defined in the AAPCS64 ABI
1133 document; otherwise return 0. */
1134
1135 static int
1136 is_hfa_or_hva (struct type *ty)
1137 {
1138 switch (TYPE_CODE (ty))
1139 {
1140 case TYPE_CODE_ARRAY:
1141 {
1142 struct type *target_ty = TYPE_TARGET_TYPE (ty);
1143
1144 if (TYPE_VECTOR (ty))
1145 return 0;
1146
1147 if (TYPE_LENGTH (ty) <= 4 /* HFA or HVA has at most 4 members. */
1148 && (TYPE_CODE (target_ty) == TYPE_CODE_FLT /* HFA */
1149 || (TYPE_CODE (target_ty) == TYPE_CODE_ARRAY /* HVA */
1150 && TYPE_VECTOR (target_ty))))
1151 return 1;
1152 break;
1153 }
1154
1155 case TYPE_CODE_UNION:
1156 case TYPE_CODE_STRUCT:
1157 {
1158 /* HFA or HVA has at most four members. */
1159 if (TYPE_NFIELDS (ty) > 0 && TYPE_NFIELDS (ty) <= 4)
1160 {
1161 struct type *member0_type;
1162
1163 member0_type = check_typedef (TYPE_FIELD_TYPE (ty, 0));
1164 if (TYPE_CODE (member0_type) == TYPE_CODE_FLT
1165 || (TYPE_CODE (member0_type) == TYPE_CODE_ARRAY
1166 && TYPE_VECTOR (member0_type)))
1167 {
1168 int i;
1169
1170 for (i = 0; i < TYPE_NFIELDS (ty); i++)
1171 {
1172 struct type *member1_type;
1173
1174 member1_type = check_typedef (TYPE_FIELD_TYPE (ty, i));
1175 if (TYPE_CODE (member0_type) != TYPE_CODE (member1_type)
1176 || (TYPE_LENGTH (member0_type)
1177 != TYPE_LENGTH (member1_type)))
1178 return 0;
1179 }
1180 return 1;
1181 }
1182 }
1183 return 0;
1184 }
1185
1186 default:
1187 break;
1188 }
1189
1190 return 0;
1191 }
1192
1193 /* AArch64 function call information structure. */
1194 struct aarch64_call_info
1195 {
1196 /* the current argument number. */
1197 unsigned argnum;
1198
1199 /* The next general purpose register number, equivalent to NGRN as
1200 described in the AArch64 Procedure Call Standard. */
1201 unsigned ngrn;
1202
1203 /* The next SIMD and floating point register number, equivalent to
1204 NSRN as described in the AArch64 Procedure Call Standard. */
1205 unsigned nsrn;
1206
1207 /* The next stacked argument address, equivalent to NSAA as
1208 described in the AArch64 Procedure Call Standard. */
1209 unsigned nsaa;
1210
1211 /* Stack item vector. */
1212 VEC(stack_item_t) *si;
1213 };
1214
1215 /* Pass a value in a sequence of consecutive X registers. The caller
1216 is responsbile for ensuring sufficient registers are available. */
1217
1218 static void
1219 pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1220 struct aarch64_call_info *info, struct type *type,
1221 struct value *arg)
1222 {
1223 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1224 int len = TYPE_LENGTH (type);
1225 enum type_code typecode = TYPE_CODE (type);
1226 int regnum = AARCH64_X0_REGNUM + info->ngrn;
1227 const bfd_byte *buf = value_contents (arg);
1228
1229 info->argnum++;
1230
1231 while (len > 0)
1232 {
1233 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1234 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1235 byte_order);
1236
1237
1238 /* Adjust sub-word struct/union args when big-endian. */
1239 if (byte_order == BFD_ENDIAN_BIG
1240 && partial_len < X_REGISTER_SIZE
1241 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1242 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1243
1244 if (aarch64_debug)
1245 {
1246 debug_printf ("arg %d in %s = 0x%s\n", info->argnum,
1247 gdbarch_register_name (gdbarch, regnum),
1248 phex (regval, X_REGISTER_SIZE));
1249 }
1250 regcache_cooked_write_unsigned (regcache, regnum, regval);
1251 len -= partial_len;
1252 buf += partial_len;
1253 regnum++;
1254 }
1255 }
1256
1257 /* Attempt to marshall a value in a V register. Return 1 if
1258 successful, or 0 if insufficient registers are available. This
1259 function, unlike the equivalent pass_in_x() function does not
1260 handle arguments spread across multiple registers. */
1261
1262 static int
1263 pass_in_v (struct gdbarch *gdbarch,
1264 struct regcache *regcache,
1265 struct aarch64_call_info *info,
1266 int len, const bfd_byte *buf)
1267 {
1268 if (info->nsrn < 8)
1269 {
1270 int regnum = AARCH64_V0_REGNUM + info->nsrn;
1271 gdb_byte reg[V_REGISTER_SIZE];
1272
1273 info->argnum++;
1274 info->nsrn++;
1275
1276 memset (reg, 0, sizeof (reg));
1277 /* PCS C.1, the argument is allocated to the least significant
1278 bits of V register. */
1279 memcpy (reg, buf, len);
1280 regcache_cooked_write (regcache, regnum, reg);
1281
1282 if (aarch64_debug)
1283 {
1284 debug_printf ("arg %d in %s\n", info->argnum,
1285 gdbarch_register_name (gdbarch, regnum));
1286 }
1287 return 1;
1288 }
1289 info->nsrn = 8;
1290 return 0;
1291 }
1292
1293 /* Marshall an argument onto the stack. */
1294
1295 static void
1296 pass_on_stack (struct aarch64_call_info *info, struct type *type,
1297 struct value *arg)
1298 {
1299 const bfd_byte *buf = value_contents (arg);
1300 int len = TYPE_LENGTH (type);
1301 int align;
1302 stack_item_t item;
1303
1304 info->argnum++;
1305
1306 align = aarch64_type_align (type);
1307
1308 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1309 Natural alignment of the argument's type. */
1310 align = align_up (align, 8);
1311
1312 /* The AArch64 PCS requires at most doubleword alignment. */
1313 if (align > 16)
1314 align = 16;
1315
1316 if (aarch64_debug)
1317 {
1318 debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1319 info->nsaa);
1320 }
1321
1322 item.len = len;
1323 item.data = buf;
1324 VEC_safe_push (stack_item_t, info->si, &item);
1325
1326 info->nsaa += len;
1327 if (info->nsaa & (align - 1))
1328 {
1329 /* Push stack alignment padding. */
1330 int pad = align - (info->nsaa & (align - 1));
1331
1332 item.len = pad;
1333 item.data = NULL;
1334
1335 VEC_safe_push (stack_item_t, info->si, &item);
1336 info->nsaa += pad;
1337 }
1338 }
1339
1340 /* Marshall an argument into a sequence of one or more consecutive X
1341 registers or, if insufficient X registers are available then onto
1342 the stack. */
1343
1344 static void
1345 pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1346 struct aarch64_call_info *info, struct type *type,
1347 struct value *arg)
1348 {
1349 int len = TYPE_LENGTH (type);
1350 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1351
1352 /* PCS C.13 - Pass in registers if we have enough spare */
1353 if (info->ngrn + nregs <= 8)
1354 {
1355 pass_in_x (gdbarch, regcache, info, type, arg);
1356 info->ngrn += nregs;
1357 }
1358 else
1359 {
1360 info->ngrn = 8;
1361 pass_on_stack (info, type, arg);
1362 }
1363 }
1364
1365 /* Pass a value in a V register, or on the stack if insufficient are
1366 available. */
1367
1368 static void
1369 pass_in_v_or_stack (struct gdbarch *gdbarch,
1370 struct regcache *regcache,
1371 struct aarch64_call_info *info,
1372 struct type *type,
1373 struct value *arg)
1374 {
1375 if (!pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (type),
1376 value_contents (arg)))
1377 pass_on_stack (info, type, arg);
1378 }
1379
1380 /* Implement the "push_dummy_call" gdbarch method. */
1381
1382 static CORE_ADDR
1383 aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1384 struct regcache *regcache, CORE_ADDR bp_addr,
1385 int nargs,
1386 struct value **args, CORE_ADDR sp, int struct_return,
1387 CORE_ADDR struct_addr)
1388 {
1389 int argnum;
1390 struct aarch64_call_info info;
1391 struct type *func_type;
1392 struct type *return_type;
1393 int lang_struct_return;
1394
1395 memset (&info, 0, sizeof (info));
1396
1397 /* We need to know what the type of the called function is in order
1398 to determine the number of named/anonymous arguments for the
1399 actual argument placement, and the return type in order to handle
1400 return value correctly.
1401
1402 The generic code above us views the decision of return in memory
1403 or return in registers as a two stage processes. The language
1404 handler is consulted first and may decide to return in memory (eg
1405 class with copy constructor returned by value), this will cause
1406 the generic code to allocate space AND insert an initial leading
1407 argument.
1408
1409 If the language code does not decide to pass in memory then the
1410 target code is consulted.
1411
1412 If the language code decides to pass in memory we want to move
1413 the pointer inserted as the initial argument from the argument
1414 list and into X8, the conventional AArch64 struct return pointer
1415 register.
1416
1417 This is slightly awkward, ideally the flag "lang_struct_return"
1418 would be passed to the targets implementation of push_dummy_call.
1419 Rather that change the target interface we call the language code
1420 directly ourselves. */
1421
1422 func_type = check_typedef (value_type (function));
1423
1424 /* Dereference function pointer types. */
1425 if (TYPE_CODE (func_type) == TYPE_CODE_PTR)
1426 func_type = TYPE_TARGET_TYPE (func_type);
1427
1428 gdb_assert (TYPE_CODE (func_type) == TYPE_CODE_FUNC
1429 || TYPE_CODE (func_type) == TYPE_CODE_METHOD);
1430
1431 /* If language_pass_by_reference () returned true we will have been
1432 given an additional initial argument, a hidden pointer to the
1433 return slot in memory. */
1434 return_type = TYPE_TARGET_TYPE (func_type);
1435 lang_struct_return = language_pass_by_reference (return_type);
1436
1437 /* Set the return address. For the AArch64, the return breakpoint
1438 is always at BP_ADDR. */
1439 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1440
1441 /* If we were given an initial argument for the return slot because
1442 lang_struct_return was true, lose it. */
1443 if (lang_struct_return)
1444 {
1445 args++;
1446 nargs--;
1447 }
1448
1449 /* The struct_return pointer occupies X8. */
1450 if (struct_return || lang_struct_return)
1451 {
1452 if (aarch64_debug)
1453 {
1454 debug_printf ("struct return in %s = 0x%s\n",
1455 gdbarch_register_name (gdbarch,
1456 AARCH64_STRUCT_RETURN_REGNUM),
1457 paddress (gdbarch, struct_addr));
1458 }
1459 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1460 struct_addr);
1461 }
1462
1463 for (argnum = 0; argnum < nargs; argnum++)
1464 {
1465 struct value *arg = args[argnum];
1466 struct type *arg_type;
1467 int len;
1468
1469 arg_type = check_typedef (value_type (arg));
1470 len = TYPE_LENGTH (arg_type);
1471
1472 switch (TYPE_CODE (arg_type))
1473 {
1474 case TYPE_CODE_INT:
1475 case TYPE_CODE_BOOL:
1476 case TYPE_CODE_CHAR:
1477 case TYPE_CODE_RANGE:
1478 case TYPE_CODE_ENUM:
1479 if (len < 4)
1480 {
1481 /* Promote to 32 bit integer. */
1482 if (TYPE_UNSIGNED (arg_type))
1483 arg_type = builtin_type (gdbarch)->builtin_uint32;
1484 else
1485 arg_type = builtin_type (gdbarch)->builtin_int32;
1486 arg = value_cast (arg_type, arg);
1487 }
1488 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1489 break;
1490
1491 case TYPE_CODE_COMPLEX:
1492 if (info.nsrn <= 6)
1493 {
1494 const bfd_byte *buf = value_contents (arg);
1495 struct type *target_type =
1496 check_typedef (TYPE_TARGET_TYPE (arg_type));
1497
1498 pass_in_v (gdbarch, regcache, &info,
1499 TYPE_LENGTH (target_type), buf);
1500 pass_in_v (gdbarch, regcache, &info,
1501 TYPE_LENGTH (target_type),
1502 buf + TYPE_LENGTH (target_type));
1503 }
1504 else
1505 {
1506 info.nsrn = 8;
1507 pass_on_stack (&info, arg_type, arg);
1508 }
1509 break;
1510 case TYPE_CODE_FLT:
1511 pass_in_v_or_stack (gdbarch, regcache, &info, arg_type, arg);
1512 break;
1513
1514 case TYPE_CODE_STRUCT:
1515 case TYPE_CODE_ARRAY:
1516 case TYPE_CODE_UNION:
1517 if (is_hfa_or_hva (arg_type))
1518 {
1519 int elements = TYPE_NFIELDS (arg_type);
1520
1521 /* Homogeneous Aggregates */
1522 if (info.nsrn + elements < 8)
1523 {
1524 int i;
1525
1526 for (i = 0; i < elements; i++)
1527 {
1528 /* We know that we have sufficient registers
1529 available therefore this will never fallback
1530 to the stack. */
1531 struct value *field =
1532 value_primitive_field (arg, 0, i, arg_type);
1533 struct type *field_type =
1534 check_typedef (value_type (field));
1535
1536 pass_in_v_or_stack (gdbarch, regcache, &info,
1537 field_type, field);
1538 }
1539 }
1540 else
1541 {
1542 info.nsrn = 8;
1543 pass_on_stack (&info, arg_type, arg);
1544 }
1545 }
1546 else if (TYPE_CODE (arg_type) == TYPE_CODE_ARRAY
1547 && TYPE_VECTOR (arg_type) && (len == 16 || len == 8))
1548 {
1549 /* Short vector types are passed in V registers. */
1550 pass_in_v_or_stack (gdbarch, regcache, &info, arg_type, arg);
1551 }
1552 else if (len > 16)
1553 {
1554 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1555 invisible reference. */
1556
1557 /* Allocate aligned storage. */
1558 sp = align_down (sp - len, 16);
1559
1560 /* Write the real data into the stack. */
1561 write_memory (sp, value_contents (arg), len);
1562
1563 /* Construct the indirection. */
1564 arg_type = lookup_pointer_type (arg_type);
1565 arg = value_from_pointer (arg_type, sp);
1566 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1567 }
1568 else
1569 /* PCS C.15 / C.18 multiple values pass. */
1570 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1571 break;
1572
1573 default:
1574 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1575 break;
1576 }
1577 }
1578
1579 /* Make sure stack retains 16 byte alignment. */
1580 if (info.nsaa & 15)
1581 sp -= 16 - (info.nsaa & 15);
1582
1583 while (!VEC_empty (stack_item_t, info.si))
1584 {
1585 stack_item_t *si = VEC_last (stack_item_t, info.si);
1586
1587 sp -= si->len;
1588 if (si->data != NULL)
1589 write_memory (sp, si->data, si->len);
1590 VEC_pop (stack_item_t, info.si);
1591 }
1592
1593 VEC_free (stack_item_t, info.si);
1594
1595 /* Finally, update the SP register. */
1596 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1597
1598 return sp;
1599 }
1600
1601 /* Implement the "frame_align" gdbarch method. */
1602
1603 static CORE_ADDR
1604 aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1605 {
1606 /* Align the stack to sixteen bytes. */
1607 return sp & ~(CORE_ADDR) 15;
1608 }
1609
1610 /* Return the type for an AdvSISD Q register. */
1611
1612 static struct type *
1613 aarch64_vnq_type (struct gdbarch *gdbarch)
1614 {
1615 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1616
1617 if (tdep->vnq_type == NULL)
1618 {
1619 struct type *t;
1620 struct type *elem;
1621
1622 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1623 TYPE_CODE_UNION);
1624
1625 elem = builtin_type (gdbarch)->builtin_uint128;
1626 append_composite_type_field (t, "u", elem);
1627
1628 elem = builtin_type (gdbarch)->builtin_int128;
1629 append_composite_type_field (t, "s", elem);
1630
1631 tdep->vnq_type = t;
1632 }
1633
1634 return tdep->vnq_type;
1635 }
1636
1637 /* Return the type for an AdvSISD D register. */
1638
1639 static struct type *
1640 aarch64_vnd_type (struct gdbarch *gdbarch)
1641 {
1642 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1643
1644 if (tdep->vnd_type == NULL)
1645 {
1646 struct type *t;
1647 struct type *elem;
1648
1649 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1650 TYPE_CODE_UNION);
1651
1652 elem = builtin_type (gdbarch)->builtin_double;
1653 append_composite_type_field (t, "f", elem);
1654
1655 elem = builtin_type (gdbarch)->builtin_uint64;
1656 append_composite_type_field (t, "u", elem);
1657
1658 elem = builtin_type (gdbarch)->builtin_int64;
1659 append_composite_type_field (t, "s", elem);
1660
1661 tdep->vnd_type = t;
1662 }
1663
1664 return tdep->vnd_type;
1665 }
1666
1667 /* Return the type for an AdvSISD S register. */
1668
1669 static struct type *
1670 aarch64_vns_type (struct gdbarch *gdbarch)
1671 {
1672 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1673
1674 if (tdep->vns_type == NULL)
1675 {
1676 struct type *t;
1677 struct type *elem;
1678
1679 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1680 TYPE_CODE_UNION);
1681
1682 elem = builtin_type (gdbarch)->builtin_float;
1683 append_composite_type_field (t, "f", elem);
1684
1685 elem = builtin_type (gdbarch)->builtin_uint32;
1686 append_composite_type_field (t, "u", elem);
1687
1688 elem = builtin_type (gdbarch)->builtin_int32;
1689 append_composite_type_field (t, "s", elem);
1690
1691 tdep->vns_type = t;
1692 }
1693
1694 return tdep->vns_type;
1695 }
1696
1697 /* Return the type for an AdvSISD H register. */
1698
1699 static struct type *
1700 aarch64_vnh_type (struct gdbarch *gdbarch)
1701 {
1702 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1703
1704 if (tdep->vnh_type == NULL)
1705 {
1706 struct type *t;
1707 struct type *elem;
1708
1709 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1710 TYPE_CODE_UNION);
1711
1712 elem = builtin_type (gdbarch)->builtin_uint16;
1713 append_composite_type_field (t, "u", elem);
1714
1715 elem = builtin_type (gdbarch)->builtin_int16;
1716 append_composite_type_field (t, "s", elem);
1717
1718 tdep->vnh_type = t;
1719 }
1720
1721 return tdep->vnh_type;
1722 }
1723
1724 /* Return the type for an AdvSISD B register. */
1725
1726 static struct type *
1727 aarch64_vnb_type (struct gdbarch *gdbarch)
1728 {
1729 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1730
1731 if (tdep->vnb_type == NULL)
1732 {
1733 struct type *t;
1734 struct type *elem;
1735
1736 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1737 TYPE_CODE_UNION);
1738
1739 elem = builtin_type (gdbarch)->builtin_uint8;
1740 append_composite_type_field (t, "u", elem);
1741
1742 elem = builtin_type (gdbarch)->builtin_int8;
1743 append_composite_type_field (t, "s", elem);
1744
1745 tdep->vnb_type = t;
1746 }
1747
1748 return tdep->vnb_type;
1749 }
1750
1751 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
1752
1753 static int
1754 aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
1755 {
1756 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
1757 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
1758
1759 if (reg == AARCH64_DWARF_SP)
1760 return AARCH64_SP_REGNUM;
1761
1762 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
1763 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
1764
1765 return -1;
1766 }
1767 \f
1768
1769 /* Implement the "print_insn" gdbarch method. */
1770
1771 static int
1772 aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
1773 {
1774 info->symbols = NULL;
1775 return print_insn_aarch64 (memaddr, info);
1776 }
1777
1778 /* AArch64 BRK software debug mode instruction.
1779 Note that AArch64 code is always little-endian.
1780 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
1781 constexpr gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
1782
1783 typedef BP_MANIPULATION (aarch64_default_breakpoint) aarch64_breakpoint;
1784
1785 /* Extract from an array REGS containing the (raw) register state a
1786 function return value of type TYPE, and copy that, in virtual
1787 format, into VALBUF. */
1788
1789 static void
1790 aarch64_extract_return_value (struct type *type, struct regcache *regs,
1791 gdb_byte *valbuf)
1792 {
1793 struct gdbarch *gdbarch = get_regcache_arch (regs);
1794 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1795
1796 if (TYPE_CODE (type) == TYPE_CODE_FLT)
1797 {
1798 bfd_byte buf[V_REGISTER_SIZE];
1799 int len = TYPE_LENGTH (type);
1800
1801 regcache_cooked_read (regs, AARCH64_V0_REGNUM, buf);
1802 memcpy (valbuf, buf, len);
1803 }
1804 else if (TYPE_CODE (type) == TYPE_CODE_INT
1805 || TYPE_CODE (type) == TYPE_CODE_CHAR
1806 || TYPE_CODE (type) == TYPE_CODE_BOOL
1807 || TYPE_CODE (type) == TYPE_CODE_PTR
1808 || TYPE_CODE (type) == TYPE_CODE_REF
1809 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1810 {
1811 /* If the the type is a plain integer, then the access is
1812 straight-forward. Otherwise we have to play around a bit
1813 more. */
1814 int len = TYPE_LENGTH (type);
1815 int regno = AARCH64_X0_REGNUM;
1816 ULONGEST tmp;
1817
1818 while (len > 0)
1819 {
1820 /* By using store_unsigned_integer we avoid having to do
1821 anything special for small big-endian values. */
1822 regcache_cooked_read_unsigned (regs, regno++, &tmp);
1823 store_unsigned_integer (valbuf,
1824 (len > X_REGISTER_SIZE
1825 ? X_REGISTER_SIZE : len), byte_order, tmp);
1826 len -= X_REGISTER_SIZE;
1827 valbuf += X_REGISTER_SIZE;
1828 }
1829 }
1830 else if (TYPE_CODE (type) == TYPE_CODE_COMPLEX)
1831 {
1832 int regno = AARCH64_V0_REGNUM;
1833 bfd_byte buf[V_REGISTER_SIZE];
1834 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
1835 int len = TYPE_LENGTH (target_type);
1836
1837 regcache_cooked_read (regs, regno, buf);
1838 memcpy (valbuf, buf, len);
1839 valbuf += len;
1840 regcache_cooked_read (regs, regno + 1, buf);
1841 memcpy (valbuf, buf, len);
1842 valbuf += len;
1843 }
1844 else if (is_hfa_or_hva (type))
1845 {
1846 int elements = TYPE_NFIELDS (type);
1847 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
1848 int len = TYPE_LENGTH (member_type);
1849 int i;
1850
1851 for (i = 0; i < elements; i++)
1852 {
1853 int regno = AARCH64_V0_REGNUM + i;
1854 bfd_byte buf[V_REGISTER_SIZE];
1855
1856 if (aarch64_debug)
1857 {
1858 debug_printf ("read HFA or HVA return value element %d from %s\n",
1859 i + 1,
1860 gdbarch_register_name (gdbarch, regno));
1861 }
1862 regcache_cooked_read (regs, regno, buf);
1863
1864 memcpy (valbuf, buf, len);
1865 valbuf += len;
1866 }
1867 }
1868 else if (TYPE_CODE (type) == TYPE_CODE_ARRAY && TYPE_VECTOR (type)
1869 && (TYPE_LENGTH (type) == 16 || TYPE_LENGTH (type) == 8))
1870 {
1871 /* Short vector is returned in V register. */
1872 gdb_byte buf[V_REGISTER_SIZE];
1873
1874 regcache_cooked_read (regs, AARCH64_V0_REGNUM, buf);
1875 memcpy (valbuf, buf, TYPE_LENGTH (type));
1876 }
1877 else
1878 {
1879 /* For a structure or union the behaviour is as if the value had
1880 been stored to word-aligned memory and then loaded into
1881 registers with 64-bit load instruction(s). */
1882 int len = TYPE_LENGTH (type);
1883 int regno = AARCH64_X0_REGNUM;
1884 bfd_byte buf[X_REGISTER_SIZE];
1885
1886 while (len > 0)
1887 {
1888 regcache_cooked_read (regs, regno++, buf);
1889 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
1890 len -= X_REGISTER_SIZE;
1891 valbuf += X_REGISTER_SIZE;
1892 }
1893 }
1894 }
1895
1896
1897 /* Will a function return an aggregate type in memory or in a
1898 register? Return 0 if an aggregate type can be returned in a
1899 register, 1 if it must be returned in memory. */
1900
1901 static int
1902 aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
1903 {
1904 type = check_typedef (type);
1905
1906 if (is_hfa_or_hva (type))
1907 {
1908 /* v0-v7 are used to return values and one register is allocated
1909 for one member. However, HFA or HVA has at most four members. */
1910 return 0;
1911 }
1912
1913 if (TYPE_LENGTH (type) > 16)
1914 {
1915 /* PCS B.6 Aggregates larger than 16 bytes are passed by
1916 invisible reference. */
1917
1918 return 1;
1919 }
1920
1921 return 0;
1922 }
1923
1924 /* Write into appropriate registers a function return value of type
1925 TYPE, given in virtual format. */
1926
1927 static void
1928 aarch64_store_return_value (struct type *type, struct regcache *regs,
1929 const gdb_byte *valbuf)
1930 {
1931 struct gdbarch *gdbarch = get_regcache_arch (regs);
1932 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1933
1934 if (TYPE_CODE (type) == TYPE_CODE_FLT)
1935 {
1936 bfd_byte buf[V_REGISTER_SIZE];
1937 int len = TYPE_LENGTH (type);
1938
1939 memcpy (buf, valbuf, len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
1940 regcache_cooked_write (regs, AARCH64_V0_REGNUM, buf);
1941 }
1942 else if (TYPE_CODE (type) == TYPE_CODE_INT
1943 || TYPE_CODE (type) == TYPE_CODE_CHAR
1944 || TYPE_CODE (type) == TYPE_CODE_BOOL
1945 || TYPE_CODE (type) == TYPE_CODE_PTR
1946 || TYPE_CODE (type) == TYPE_CODE_REF
1947 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1948 {
1949 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
1950 {
1951 /* Values of one word or less are zero/sign-extended and
1952 returned in r0. */
1953 bfd_byte tmpbuf[X_REGISTER_SIZE];
1954 LONGEST val = unpack_long (type, valbuf);
1955
1956 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
1957 regcache_cooked_write (regs, AARCH64_X0_REGNUM, tmpbuf);
1958 }
1959 else
1960 {
1961 /* Integral values greater than one word are stored in
1962 consecutive registers starting with r0. This will always
1963 be a multiple of the regiser size. */
1964 int len = TYPE_LENGTH (type);
1965 int regno = AARCH64_X0_REGNUM;
1966
1967 while (len > 0)
1968 {
1969 regcache_cooked_write (regs, regno++, valbuf);
1970 len -= X_REGISTER_SIZE;
1971 valbuf += X_REGISTER_SIZE;
1972 }
1973 }
1974 }
1975 else if (is_hfa_or_hva (type))
1976 {
1977 int elements = TYPE_NFIELDS (type);
1978 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
1979 int len = TYPE_LENGTH (member_type);
1980 int i;
1981
1982 for (i = 0; i < elements; i++)
1983 {
1984 int regno = AARCH64_V0_REGNUM + i;
1985 bfd_byte tmpbuf[MAX_REGISTER_SIZE];
1986
1987 if (aarch64_debug)
1988 {
1989 debug_printf ("write HFA or HVA return value element %d to %s\n",
1990 i + 1,
1991 gdbarch_register_name (gdbarch, regno));
1992 }
1993
1994 memcpy (tmpbuf, valbuf, len);
1995 regcache_cooked_write (regs, regno, tmpbuf);
1996 valbuf += len;
1997 }
1998 }
1999 else if (TYPE_CODE (type) == TYPE_CODE_ARRAY && TYPE_VECTOR (type)
2000 && (TYPE_LENGTH (type) == 8 || TYPE_LENGTH (type) == 16))
2001 {
2002 /* Short vector. */
2003 gdb_byte buf[V_REGISTER_SIZE];
2004
2005 memcpy (buf, valbuf, TYPE_LENGTH (type));
2006 regcache_cooked_write (regs, AARCH64_V0_REGNUM, buf);
2007 }
2008 else
2009 {
2010 /* For a structure or union the behaviour is as if the value had
2011 been stored to word-aligned memory and then loaded into
2012 registers with 64-bit load instruction(s). */
2013 int len = TYPE_LENGTH (type);
2014 int regno = AARCH64_X0_REGNUM;
2015 bfd_byte tmpbuf[X_REGISTER_SIZE];
2016
2017 while (len > 0)
2018 {
2019 memcpy (tmpbuf, valbuf,
2020 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2021 regcache_cooked_write (regs, regno++, tmpbuf);
2022 len -= X_REGISTER_SIZE;
2023 valbuf += X_REGISTER_SIZE;
2024 }
2025 }
2026 }
2027
2028 /* Implement the "return_value" gdbarch method. */
2029
2030 static enum return_value_convention
2031 aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2032 struct type *valtype, struct regcache *regcache,
2033 gdb_byte *readbuf, const gdb_byte *writebuf)
2034 {
2035
2036 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
2037 || TYPE_CODE (valtype) == TYPE_CODE_UNION
2038 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
2039 {
2040 if (aarch64_return_in_memory (gdbarch, valtype))
2041 {
2042 if (aarch64_debug)
2043 debug_printf ("return value in memory\n");
2044 return RETURN_VALUE_STRUCT_CONVENTION;
2045 }
2046 }
2047
2048 if (writebuf)
2049 aarch64_store_return_value (valtype, regcache, writebuf);
2050
2051 if (readbuf)
2052 aarch64_extract_return_value (valtype, regcache, readbuf);
2053
2054 if (aarch64_debug)
2055 debug_printf ("return value in registers\n");
2056
2057 return RETURN_VALUE_REGISTER_CONVENTION;
2058 }
2059
2060 /* Implement the "get_longjmp_target" gdbarch method. */
2061
2062 static int
2063 aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2064 {
2065 CORE_ADDR jb_addr;
2066 gdb_byte buf[X_REGISTER_SIZE];
2067 struct gdbarch *gdbarch = get_frame_arch (frame);
2068 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2069 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2070
2071 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
2072
2073 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2074 X_REGISTER_SIZE))
2075 return 0;
2076
2077 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2078 return 1;
2079 }
2080
2081 /* Implement the "gen_return_address" gdbarch method. */
2082
2083 static void
2084 aarch64_gen_return_address (struct gdbarch *gdbarch,
2085 struct agent_expr *ax, struct axs_value *value,
2086 CORE_ADDR scope)
2087 {
2088 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
2089 value->kind = axs_lvalue_register;
2090 value->u.reg = AARCH64_LR_REGNUM;
2091 }
2092 \f
2093
2094 /* Return the pseudo register name corresponding to register regnum. */
2095
2096 static const char *
2097 aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2098 {
2099 static const char *const q_name[] =
2100 {
2101 "q0", "q1", "q2", "q3",
2102 "q4", "q5", "q6", "q7",
2103 "q8", "q9", "q10", "q11",
2104 "q12", "q13", "q14", "q15",
2105 "q16", "q17", "q18", "q19",
2106 "q20", "q21", "q22", "q23",
2107 "q24", "q25", "q26", "q27",
2108 "q28", "q29", "q30", "q31",
2109 };
2110
2111 static const char *const d_name[] =
2112 {
2113 "d0", "d1", "d2", "d3",
2114 "d4", "d5", "d6", "d7",
2115 "d8", "d9", "d10", "d11",
2116 "d12", "d13", "d14", "d15",
2117 "d16", "d17", "d18", "d19",
2118 "d20", "d21", "d22", "d23",
2119 "d24", "d25", "d26", "d27",
2120 "d28", "d29", "d30", "d31",
2121 };
2122
2123 static const char *const s_name[] =
2124 {
2125 "s0", "s1", "s2", "s3",
2126 "s4", "s5", "s6", "s7",
2127 "s8", "s9", "s10", "s11",
2128 "s12", "s13", "s14", "s15",
2129 "s16", "s17", "s18", "s19",
2130 "s20", "s21", "s22", "s23",
2131 "s24", "s25", "s26", "s27",
2132 "s28", "s29", "s30", "s31",
2133 };
2134
2135 static const char *const h_name[] =
2136 {
2137 "h0", "h1", "h2", "h3",
2138 "h4", "h5", "h6", "h7",
2139 "h8", "h9", "h10", "h11",
2140 "h12", "h13", "h14", "h15",
2141 "h16", "h17", "h18", "h19",
2142 "h20", "h21", "h22", "h23",
2143 "h24", "h25", "h26", "h27",
2144 "h28", "h29", "h30", "h31",
2145 };
2146
2147 static const char *const b_name[] =
2148 {
2149 "b0", "b1", "b2", "b3",
2150 "b4", "b5", "b6", "b7",
2151 "b8", "b9", "b10", "b11",
2152 "b12", "b13", "b14", "b15",
2153 "b16", "b17", "b18", "b19",
2154 "b20", "b21", "b22", "b23",
2155 "b24", "b25", "b26", "b27",
2156 "b28", "b29", "b30", "b31",
2157 };
2158
2159 regnum -= gdbarch_num_regs (gdbarch);
2160
2161 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2162 return q_name[regnum - AARCH64_Q0_REGNUM];
2163
2164 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2165 return d_name[regnum - AARCH64_D0_REGNUM];
2166
2167 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2168 return s_name[regnum - AARCH64_S0_REGNUM];
2169
2170 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2171 return h_name[regnum - AARCH64_H0_REGNUM];
2172
2173 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2174 return b_name[regnum - AARCH64_B0_REGNUM];
2175
2176 internal_error (__FILE__, __LINE__,
2177 _("aarch64_pseudo_register_name: bad register number %d"),
2178 regnum);
2179 }
2180
2181 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
2182
2183 static struct type *
2184 aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2185 {
2186 regnum -= gdbarch_num_regs (gdbarch);
2187
2188 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2189 return aarch64_vnq_type (gdbarch);
2190
2191 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2192 return aarch64_vnd_type (gdbarch);
2193
2194 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2195 return aarch64_vns_type (gdbarch);
2196
2197 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2198 return aarch64_vnh_type (gdbarch);
2199
2200 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2201 return aarch64_vnb_type (gdbarch);
2202
2203 internal_error (__FILE__, __LINE__,
2204 _("aarch64_pseudo_register_type: bad register number %d"),
2205 regnum);
2206 }
2207
2208 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2209
2210 static int
2211 aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2212 struct reggroup *group)
2213 {
2214 regnum -= gdbarch_num_regs (gdbarch);
2215
2216 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2217 return group == all_reggroup || group == vector_reggroup;
2218 else if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2219 return (group == all_reggroup || group == vector_reggroup
2220 || group == float_reggroup);
2221 else if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2222 return (group == all_reggroup || group == vector_reggroup
2223 || group == float_reggroup);
2224 else if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2225 return group == all_reggroup || group == vector_reggroup;
2226 else if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2227 return group == all_reggroup || group == vector_reggroup;
2228
2229 return group == all_reggroup;
2230 }
2231
2232 /* Implement the "pseudo_register_read_value" gdbarch method. */
2233
2234 static struct value *
2235 aarch64_pseudo_read_value (struct gdbarch *gdbarch,
2236 struct regcache *regcache,
2237 int regnum)
2238 {
2239 gdb_byte reg_buf[MAX_REGISTER_SIZE];
2240 struct value *result_value;
2241 gdb_byte *buf;
2242
2243 result_value = allocate_value (register_type (gdbarch, regnum));
2244 VALUE_LVAL (result_value) = lval_register;
2245 VALUE_REGNUM (result_value) = regnum;
2246 buf = value_contents_raw (result_value);
2247
2248 regnum -= gdbarch_num_regs (gdbarch);
2249
2250 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2251 {
2252 enum register_status status;
2253 unsigned v_regnum;
2254
2255 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2256 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2257 if (status != REG_VALID)
2258 mark_value_bytes_unavailable (result_value, 0,
2259 TYPE_LENGTH (value_type (result_value)));
2260 else
2261 memcpy (buf, reg_buf, Q_REGISTER_SIZE);
2262 return result_value;
2263 }
2264
2265 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2266 {
2267 enum register_status status;
2268 unsigned v_regnum;
2269
2270 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2271 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2272 if (status != REG_VALID)
2273 mark_value_bytes_unavailable (result_value, 0,
2274 TYPE_LENGTH (value_type (result_value)));
2275 else
2276 memcpy (buf, reg_buf, D_REGISTER_SIZE);
2277 return result_value;
2278 }
2279
2280 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2281 {
2282 enum register_status status;
2283 unsigned v_regnum;
2284
2285 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2286 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2287 if (status != REG_VALID)
2288 mark_value_bytes_unavailable (result_value, 0,
2289 TYPE_LENGTH (value_type (result_value)));
2290 else
2291 memcpy (buf, reg_buf, S_REGISTER_SIZE);
2292 return result_value;
2293 }
2294
2295 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2296 {
2297 enum register_status status;
2298 unsigned v_regnum;
2299
2300 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2301 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2302 if (status != REG_VALID)
2303 mark_value_bytes_unavailable (result_value, 0,
2304 TYPE_LENGTH (value_type (result_value)));
2305 else
2306 memcpy (buf, reg_buf, H_REGISTER_SIZE);
2307 return result_value;
2308 }
2309
2310 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2311 {
2312 enum register_status status;
2313 unsigned v_regnum;
2314
2315 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2316 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2317 if (status != REG_VALID)
2318 mark_value_bytes_unavailable (result_value, 0,
2319 TYPE_LENGTH (value_type (result_value)));
2320 else
2321 memcpy (buf, reg_buf, B_REGISTER_SIZE);
2322 return result_value;
2323 }
2324
2325 gdb_assert_not_reached ("regnum out of bound");
2326 }
2327
2328 /* Implement the "pseudo_register_write" gdbarch method. */
2329
2330 static void
2331 aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2332 int regnum, const gdb_byte *buf)
2333 {
2334 gdb_byte reg_buf[MAX_REGISTER_SIZE];
2335
2336 /* Ensure the register buffer is zero, we want gdb writes of the
2337 various 'scalar' pseudo registers to behavior like architectural
2338 writes, register width bytes are written the remainder are set to
2339 zero. */
2340 memset (reg_buf, 0, sizeof (reg_buf));
2341
2342 regnum -= gdbarch_num_regs (gdbarch);
2343
2344 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2345 {
2346 /* pseudo Q registers */
2347 unsigned v_regnum;
2348
2349 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2350 memcpy (reg_buf, buf, Q_REGISTER_SIZE);
2351 regcache_raw_write (regcache, v_regnum, reg_buf);
2352 return;
2353 }
2354
2355 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2356 {
2357 /* pseudo D registers */
2358 unsigned v_regnum;
2359
2360 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2361 memcpy (reg_buf, buf, D_REGISTER_SIZE);
2362 regcache_raw_write (regcache, v_regnum, reg_buf);
2363 return;
2364 }
2365
2366 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2367 {
2368 unsigned v_regnum;
2369
2370 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2371 memcpy (reg_buf, buf, S_REGISTER_SIZE);
2372 regcache_raw_write (regcache, v_regnum, reg_buf);
2373 return;
2374 }
2375
2376 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2377 {
2378 /* pseudo H registers */
2379 unsigned v_regnum;
2380
2381 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2382 memcpy (reg_buf, buf, H_REGISTER_SIZE);
2383 regcache_raw_write (regcache, v_regnum, reg_buf);
2384 return;
2385 }
2386
2387 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2388 {
2389 /* pseudo B registers */
2390 unsigned v_regnum;
2391
2392 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2393 memcpy (reg_buf, buf, B_REGISTER_SIZE);
2394 regcache_raw_write (regcache, v_regnum, reg_buf);
2395 return;
2396 }
2397
2398 gdb_assert_not_reached ("regnum out of bound");
2399 }
2400
2401 /* Callback function for user_reg_add. */
2402
2403 static struct value *
2404 value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2405 {
2406 const int *reg_p = (const int *) baton;
2407
2408 return value_of_register (*reg_p, frame);
2409 }
2410 \f
2411
2412 /* Implement the "software_single_step" gdbarch method, needed to
2413 single step through atomic sequences on AArch64. */
2414
2415 static VEC (CORE_ADDR) *
2416 aarch64_software_single_step (struct regcache *regcache)
2417 {
2418 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2419 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2420 const int insn_size = 4;
2421 const int atomic_sequence_length = 16; /* Instruction sequence length. */
2422 CORE_ADDR pc = regcache_read_pc (regcache);
2423 CORE_ADDR breaks[2] = { -1, -1 };
2424 CORE_ADDR loc = pc;
2425 CORE_ADDR closing_insn = 0;
2426 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2427 byte_order_for_code);
2428 int index;
2429 int insn_count;
2430 int bc_insn_count = 0; /* Conditional branch instruction count. */
2431 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
2432 aarch64_inst inst;
2433 VEC (CORE_ADDR) *next_pcs = NULL;
2434
2435 if (aarch64_decode_insn (insn, &inst, 1) != 0)
2436 return NULL;
2437
2438 /* Look for a Load Exclusive instruction which begins the sequence. */
2439 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
2440 return NULL;
2441
2442 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2443 {
2444 loc += insn_size;
2445 insn = read_memory_unsigned_integer (loc, insn_size,
2446 byte_order_for_code);
2447
2448 if (aarch64_decode_insn (insn, &inst, 1) != 0)
2449 return NULL;
2450 /* Check if the instruction is a conditional branch. */
2451 if (inst.opcode->iclass == condbranch)
2452 {
2453 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
2454
2455 if (bc_insn_count >= 1)
2456 return NULL;
2457
2458 /* It is, so we'll try to set a breakpoint at the destination. */
2459 breaks[1] = loc + inst.operands[0].imm.value;
2460
2461 bc_insn_count++;
2462 last_breakpoint++;
2463 }
2464
2465 /* Look for the Store Exclusive which closes the atomic sequence. */
2466 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
2467 {
2468 closing_insn = loc;
2469 break;
2470 }
2471 }
2472
2473 /* We didn't find a closing Store Exclusive instruction, fall back. */
2474 if (!closing_insn)
2475 return NULL;
2476
2477 /* Insert breakpoint after the end of the atomic sequence. */
2478 breaks[0] = loc + insn_size;
2479
2480 /* Check for duplicated breakpoints, and also check that the second
2481 breakpoint is not within the atomic sequence. */
2482 if (last_breakpoint
2483 && (breaks[1] == breaks[0]
2484 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2485 last_breakpoint = 0;
2486
2487 /* Insert the breakpoint at the end of the sequence, and one at the
2488 destination of the conditional branch, if it exists. */
2489 for (index = 0; index <= last_breakpoint; index++)
2490 VEC_safe_push (CORE_ADDR, next_pcs, breaks[index]);
2491
2492 return next_pcs;
2493 }
2494
2495 struct displaced_step_closure
2496 {
2497 /* It is true when condition instruction, such as B.CON, TBZ, etc,
2498 is being displaced stepping. */
2499 int cond;
2500
2501 /* PC adjustment offset after displaced stepping. */
2502 int32_t pc_adjust;
2503 };
2504
2505 /* Data when visiting instructions for displaced stepping. */
2506
2507 struct aarch64_displaced_step_data
2508 {
2509 struct aarch64_insn_data base;
2510
2511 /* The address where the instruction will be executed at. */
2512 CORE_ADDR new_addr;
2513 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
2514 uint32_t insn_buf[DISPLACED_MODIFIED_INSNS];
2515 /* Number of instructions in INSN_BUF. */
2516 unsigned insn_count;
2517 /* Registers when doing displaced stepping. */
2518 struct regcache *regs;
2519
2520 struct displaced_step_closure *dsc;
2521 };
2522
2523 /* Implementation of aarch64_insn_visitor method "b". */
2524
2525 static void
2526 aarch64_displaced_step_b (const int is_bl, const int32_t offset,
2527 struct aarch64_insn_data *data)
2528 {
2529 struct aarch64_displaced_step_data *dsd
2530 = (struct aarch64_displaced_step_data *) data;
2531 int64_t new_offset = data->insn_addr - dsd->new_addr + offset;
2532
2533 if (can_encode_int32 (new_offset, 28))
2534 {
2535 /* Emit B rather than BL, because executing BL on a new address
2536 will get the wrong address into LR. In order to avoid this,
2537 we emit B, and update LR if the instruction is BL. */
2538 emit_b (dsd->insn_buf, 0, new_offset);
2539 dsd->insn_count++;
2540 }
2541 else
2542 {
2543 /* Write NOP. */
2544 emit_nop (dsd->insn_buf);
2545 dsd->insn_count++;
2546 dsd->dsc->pc_adjust = offset;
2547 }
2548
2549 if (is_bl)
2550 {
2551 /* Update LR. */
2552 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
2553 data->insn_addr + 4);
2554 }
2555 }
2556
2557 /* Implementation of aarch64_insn_visitor method "b_cond". */
2558
2559 static void
2560 aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
2561 struct aarch64_insn_data *data)
2562 {
2563 struct aarch64_displaced_step_data *dsd
2564 = (struct aarch64_displaced_step_data *) data;
2565
2566 /* GDB has to fix up PC after displaced step this instruction
2567 differently according to the condition is true or false. Instead
2568 of checking COND against conditional flags, we can use
2569 the following instructions, and GDB can tell how to fix up PC
2570 according to the PC value.
2571
2572 B.COND TAKEN ; If cond is true, then jump to TAKEN.
2573 INSN1 ;
2574 TAKEN:
2575 INSN2
2576 */
2577
2578 emit_bcond (dsd->insn_buf, cond, 8);
2579 dsd->dsc->cond = 1;
2580 dsd->dsc->pc_adjust = offset;
2581 dsd->insn_count = 1;
2582 }
2583
2584 /* Dynamically allocate a new register. If we know the register
2585 statically, we should make it a global as above instead of using this
2586 helper function. */
2587
2588 static struct aarch64_register
2589 aarch64_register (unsigned num, int is64)
2590 {
2591 return (struct aarch64_register) { num, is64 };
2592 }
2593
2594 /* Implementation of aarch64_insn_visitor method "cb". */
2595
2596 static void
2597 aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
2598 const unsigned rn, int is64,
2599 struct aarch64_insn_data *data)
2600 {
2601 struct aarch64_displaced_step_data *dsd
2602 = (struct aarch64_displaced_step_data *) data;
2603
2604 /* The offset is out of range for a compare and branch
2605 instruction. We can use the following instructions instead:
2606
2607 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
2608 INSN1 ;
2609 TAKEN:
2610 INSN2
2611 */
2612 emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
2613 dsd->insn_count = 1;
2614 dsd->dsc->cond = 1;
2615 dsd->dsc->pc_adjust = offset;
2616 }
2617
2618 /* Implementation of aarch64_insn_visitor method "tb". */
2619
2620 static void
2621 aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
2622 const unsigned rt, unsigned bit,
2623 struct aarch64_insn_data *data)
2624 {
2625 struct aarch64_displaced_step_data *dsd
2626 = (struct aarch64_displaced_step_data *) data;
2627
2628 /* The offset is out of range for a test bit and branch
2629 instruction We can use the following instructions instead:
2630
2631 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
2632 INSN1 ;
2633 TAKEN:
2634 INSN2
2635
2636 */
2637 emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
2638 dsd->insn_count = 1;
2639 dsd->dsc->cond = 1;
2640 dsd->dsc->pc_adjust = offset;
2641 }
2642
2643 /* Implementation of aarch64_insn_visitor method "adr". */
2644
2645 static void
2646 aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
2647 const int is_adrp, struct aarch64_insn_data *data)
2648 {
2649 struct aarch64_displaced_step_data *dsd
2650 = (struct aarch64_displaced_step_data *) data;
2651 /* We know exactly the address the ADR{P,} instruction will compute.
2652 We can just write it to the destination register. */
2653 CORE_ADDR address = data->insn_addr + offset;
2654
2655 if (is_adrp)
2656 {
2657 /* Clear the lower 12 bits of the offset to get the 4K page. */
2658 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2659 address & ~0xfff);
2660 }
2661 else
2662 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2663 address);
2664
2665 dsd->dsc->pc_adjust = 4;
2666 emit_nop (dsd->insn_buf);
2667 dsd->insn_count = 1;
2668 }
2669
2670 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
2671
2672 static void
2673 aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
2674 const unsigned rt, const int is64,
2675 struct aarch64_insn_data *data)
2676 {
2677 struct aarch64_displaced_step_data *dsd
2678 = (struct aarch64_displaced_step_data *) data;
2679 CORE_ADDR address = data->insn_addr + offset;
2680 struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
2681
2682 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
2683 address);
2684
2685 if (is_sw)
2686 dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
2687 aarch64_register (rt, 1), zero);
2688 else
2689 dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
2690 aarch64_register (rt, 1), zero);
2691
2692 dsd->dsc->pc_adjust = 4;
2693 }
2694
2695 /* Implementation of aarch64_insn_visitor method "others". */
2696
2697 static void
2698 aarch64_displaced_step_others (const uint32_t insn,
2699 struct aarch64_insn_data *data)
2700 {
2701 struct aarch64_displaced_step_data *dsd
2702 = (struct aarch64_displaced_step_data *) data;
2703
2704 aarch64_emit_insn (dsd->insn_buf, insn);
2705 dsd->insn_count = 1;
2706
2707 if ((insn & 0xfffffc1f) == 0xd65f0000)
2708 {
2709 /* RET */
2710 dsd->dsc->pc_adjust = 0;
2711 }
2712 else
2713 dsd->dsc->pc_adjust = 4;
2714 }
2715
2716 static const struct aarch64_insn_visitor visitor =
2717 {
2718 aarch64_displaced_step_b,
2719 aarch64_displaced_step_b_cond,
2720 aarch64_displaced_step_cb,
2721 aarch64_displaced_step_tb,
2722 aarch64_displaced_step_adr,
2723 aarch64_displaced_step_ldr_literal,
2724 aarch64_displaced_step_others,
2725 };
2726
2727 /* Implement the "displaced_step_copy_insn" gdbarch method. */
2728
2729 struct displaced_step_closure *
2730 aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
2731 CORE_ADDR from, CORE_ADDR to,
2732 struct regcache *regs)
2733 {
2734 struct displaced_step_closure *dsc = NULL;
2735 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2736 uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
2737 struct aarch64_displaced_step_data dsd;
2738 aarch64_inst inst;
2739
2740 if (aarch64_decode_insn (insn, &inst, 1) != 0)
2741 return NULL;
2742
2743 /* Look for a Load Exclusive instruction which begins the sequence. */
2744 if (inst.opcode->iclass == ldstexcl && bit (insn, 22))
2745 {
2746 /* We can't displaced step atomic sequences. */
2747 return NULL;
2748 }
2749
2750 dsc = XCNEW (struct displaced_step_closure);
2751 dsd.base.insn_addr = from;
2752 dsd.new_addr = to;
2753 dsd.regs = regs;
2754 dsd.dsc = dsc;
2755 dsd.insn_count = 0;
2756 aarch64_relocate_instruction (insn, &visitor,
2757 (struct aarch64_insn_data *) &dsd);
2758 gdb_assert (dsd.insn_count <= DISPLACED_MODIFIED_INSNS);
2759
2760 if (dsd.insn_count != 0)
2761 {
2762 int i;
2763
2764 /* Instruction can be relocated to scratch pad. Copy
2765 relocated instruction(s) there. */
2766 for (i = 0; i < dsd.insn_count; i++)
2767 {
2768 if (debug_displaced)
2769 {
2770 debug_printf ("displaced: writing insn ");
2771 debug_printf ("%.8x", dsd.insn_buf[i]);
2772 debug_printf (" at %s\n", paddress (gdbarch, to + i * 4));
2773 }
2774 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
2775 (ULONGEST) dsd.insn_buf[i]);
2776 }
2777 }
2778 else
2779 {
2780 xfree (dsc);
2781 dsc = NULL;
2782 }
2783
2784 return dsc;
2785 }
2786
2787 /* Implement the "displaced_step_fixup" gdbarch method. */
2788
2789 void
2790 aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
2791 struct displaced_step_closure *dsc,
2792 CORE_ADDR from, CORE_ADDR to,
2793 struct regcache *regs)
2794 {
2795 if (dsc->cond)
2796 {
2797 ULONGEST pc;
2798
2799 regcache_cooked_read_unsigned (regs, AARCH64_PC_REGNUM, &pc);
2800 if (pc - to == 8)
2801 {
2802 /* Condition is true. */
2803 }
2804 else if (pc - to == 4)
2805 {
2806 /* Condition is false. */
2807 dsc->pc_adjust = 4;
2808 }
2809 else
2810 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
2811 }
2812
2813 if (dsc->pc_adjust != 0)
2814 {
2815 if (debug_displaced)
2816 {
2817 debug_printf ("displaced: fixup: set PC to %s:%d\n",
2818 paddress (gdbarch, from), dsc->pc_adjust);
2819 }
2820 regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
2821 from + dsc->pc_adjust);
2822 }
2823 }
2824
2825 /* Implement the "displaced_step_hw_singlestep" gdbarch method. */
2826
2827 int
2828 aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch,
2829 struct displaced_step_closure *closure)
2830 {
2831 return 1;
2832 }
2833
2834 /* Initialize the current architecture based on INFO. If possible,
2835 re-use an architecture from ARCHES, which is a list of
2836 architectures already created during this debugging session.
2837
2838 Called e.g. at program startup, when reading a core file, and when
2839 reading a binary file. */
2840
2841 static struct gdbarch *
2842 aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
2843 {
2844 struct gdbarch_tdep *tdep;
2845 struct gdbarch *gdbarch;
2846 struct gdbarch_list *best_arch;
2847 struct tdesc_arch_data *tdesc_data = NULL;
2848 const struct target_desc *tdesc = info.target_desc;
2849 int i;
2850 int valid_p = 1;
2851 const struct tdesc_feature *feature;
2852 int num_regs = 0;
2853 int num_pseudo_regs = 0;
2854
2855 /* Ensure we always have a target descriptor. */
2856 if (!tdesc_has_registers (tdesc))
2857 tdesc = tdesc_aarch64;
2858
2859 gdb_assert (tdesc);
2860
2861 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.core");
2862
2863 if (feature == NULL)
2864 return NULL;
2865
2866 tdesc_data = tdesc_data_alloc ();
2867
2868 /* Validate the descriptor provides the mandatory core R registers
2869 and allocate their numbers. */
2870 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
2871 valid_p &=
2872 tdesc_numbered_register (feature, tdesc_data, AARCH64_X0_REGNUM + i,
2873 aarch64_r_register_names[i]);
2874
2875 num_regs = AARCH64_X0_REGNUM + i;
2876
2877 /* Look for the V registers. */
2878 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
2879 if (feature)
2880 {
2881 /* Validate the descriptor provides the mandatory V registers
2882 and allocate their numbers. */
2883 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
2884 valid_p &=
2885 tdesc_numbered_register (feature, tdesc_data, AARCH64_V0_REGNUM + i,
2886 aarch64_v_register_names[i]);
2887
2888 num_regs = AARCH64_V0_REGNUM + i;
2889
2890 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
2891 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
2892 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
2893 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
2894 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
2895 }
2896
2897 if (!valid_p)
2898 {
2899 tdesc_data_cleanup (tdesc_data);
2900 return NULL;
2901 }
2902
2903 /* AArch64 code is always little-endian. */
2904 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
2905
2906 /* If there is already a candidate, use it. */
2907 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
2908 best_arch != NULL;
2909 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
2910 {
2911 /* Found a match. */
2912 break;
2913 }
2914
2915 if (best_arch != NULL)
2916 {
2917 if (tdesc_data != NULL)
2918 tdesc_data_cleanup (tdesc_data);
2919 return best_arch->gdbarch;
2920 }
2921
2922 tdep = XCNEW (struct gdbarch_tdep);
2923 gdbarch = gdbarch_alloc (&info, tdep);
2924
2925 /* This should be low enough for everything. */
2926 tdep->lowest_pc = 0x20;
2927 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
2928 tdep->jb_elt_size = 8;
2929
2930 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
2931 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
2932
2933 /* Frame handling. */
2934 set_gdbarch_dummy_id (gdbarch, aarch64_dummy_id);
2935 set_gdbarch_unwind_pc (gdbarch, aarch64_unwind_pc);
2936 set_gdbarch_unwind_sp (gdbarch, aarch64_unwind_sp);
2937
2938 /* Advance PC across function entry code. */
2939 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
2940
2941 /* The stack grows downward. */
2942 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
2943
2944 /* Breakpoint manipulation. */
2945 set_gdbarch_breakpoint_kind_from_pc (gdbarch,
2946 aarch64_breakpoint::kind_from_pc);
2947 set_gdbarch_sw_breakpoint_from_kind (gdbarch,
2948 aarch64_breakpoint::bp_from_kind);
2949 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
2950 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
2951
2952 /* Information about registers, etc. */
2953 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
2954 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
2955 set_gdbarch_num_regs (gdbarch, num_regs);
2956
2957 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
2958 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
2959 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
2960 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
2961 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
2962 set_tdesc_pseudo_register_reggroup_p (gdbarch,
2963 aarch64_pseudo_register_reggroup_p);
2964
2965 /* ABI */
2966 set_gdbarch_short_bit (gdbarch, 16);
2967 set_gdbarch_int_bit (gdbarch, 32);
2968 set_gdbarch_float_bit (gdbarch, 32);
2969 set_gdbarch_double_bit (gdbarch, 64);
2970 set_gdbarch_long_double_bit (gdbarch, 128);
2971 set_gdbarch_long_bit (gdbarch, 64);
2972 set_gdbarch_long_long_bit (gdbarch, 64);
2973 set_gdbarch_ptr_bit (gdbarch, 64);
2974 set_gdbarch_char_signed (gdbarch, 0);
2975 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
2976 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
2977 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
2978
2979 /* Internal <-> external register number maps. */
2980 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
2981
2982 /* Returning results. */
2983 set_gdbarch_return_value (gdbarch, aarch64_return_value);
2984
2985 /* Disassembly. */
2986 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
2987
2988 /* Virtual tables. */
2989 set_gdbarch_vbit_in_delta (gdbarch, 1);
2990
2991 /* Hook in the ABI-specific overrides, if they have been registered. */
2992 info.target_desc = tdesc;
2993 info.tdep_info = (void *) tdesc_data;
2994 gdbarch_init_osabi (info, gdbarch);
2995
2996 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
2997
2998 /* Add some default predicates. */
2999 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
3000 dwarf2_append_unwinders (gdbarch);
3001 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
3002
3003 frame_base_set_default (gdbarch, &aarch64_normal_base);
3004
3005 /* Now we have tuned the configuration, set a few final things,
3006 based on what the OS ABI has told us. */
3007
3008 if (tdep->jb_pc >= 0)
3009 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
3010
3011 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
3012
3013 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
3014
3015 /* Add standard register aliases. */
3016 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
3017 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
3018 value_of_aarch64_user_reg,
3019 &aarch64_register_aliases[i].regnum);
3020
3021 return gdbarch;
3022 }
3023
3024 static void
3025 aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
3026 {
3027 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3028
3029 if (tdep == NULL)
3030 return;
3031
3032 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
3033 paddress (gdbarch, tdep->lowest_pc));
3034 }
3035
3036 /* Suppress warning from -Wmissing-prototypes. */
3037 extern initialize_file_ftype _initialize_aarch64_tdep;
3038
3039 void
3040 _initialize_aarch64_tdep (void)
3041 {
3042 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
3043 aarch64_dump_tdep);
3044
3045 initialize_tdesc_aarch64 ();
3046
3047 /* Debug this file's internals. */
3048 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
3049 Set AArch64 debugging."), _("\
3050 Show AArch64 debugging."), _("\
3051 When on, AArch64 specific debugging is enabled."),
3052 NULL,
3053 show_aarch64_debug,
3054 &setdebuglist, &showdebuglist);
3055
3056 #if GDB_SELF_TEST
3057 register_self_test (selftests::aarch64_analyze_prologue_test);
3058 #endif
3059 }
3060
3061 /* AArch64 process record-replay related structures, defines etc. */
3062
3063 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
3064 do \
3065 { \
3066 unsigned int reg_len = LENGTH; \
3067 if (reg_len) \
3068 { \
3069 REGS = XNEWVEC (uint32_t, reg_len); \
3070 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
3071 } \
3072 } \
3073 while (0)
3074
3075 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
3076 do \
3077 { \
3078 unsigned int mem_len = LENGTH; \
3079 if (mem_len) \
3080 { \
3081 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
3082 memcpy(&MEMS->len, &RECORD_BUF[0], \
3083 sizeof(struct aarch64_mem_r) * LENGTH); \
3084 } \
3085 } \
3086 while (0)
3087
3088 /* AArch64 record/replay structures and enumerations. */
3089
3090 struct aarch64_mem_r
3091 {
3092 uint64_t len; /* Record length. */
3093 uint64_t addr; /* Memory address. */
3094 };
3095
3096 enum aarch64_record_result
3097 {
3098 AARCH64_RECORD_SUCCESS,
3099 AARCH64_RECORD_FAILURE,
3100 AARCH64_RECORD_UNSUPPORTED,
3101 AARCH64_RECORD_UNKNOWN
3102 };
3103
3104 typedef struct insn_decode_record_t
3105 {
3106 struct gdbarch *gdbarch;
3107 struct regcache *regcache;
3108 CORE_ADDR this_addr; /* Address of insn to be recorded. */
3109 uint32_t aarch64_insn; /* Insn to be recorded. */
3110 uint32_t mem_rec_count; /* Count of memory records. */
3111 uint32_t reg_rec_count; /* Count of register records. */
3112 uint32_t *aarch64_regs; /* Registers to be recorded. */
3113 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
3114 } insn_decode_record;
3115
3116 /* Record handler for data processing - register instructions. */
3117
3118 static unsigned int
3119 aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
3120 {
3121 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
3122 uint32_t record_buf[4];
3123
3124 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3125 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3126 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
3127
3128 if (!bit (aarch64_insn_r->aarch64_insn, 28))
3129 {
3130 uint8_t setflags;
3131
3132 /* Logical (shifted register). */
3133 if (insn_bits24_27 == 0x0a)
3134 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
3135 /* Add/subtract. */
3136 else if (insn_bits24_27 == 0x0b)
3137 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3138 else
3139 return AARCH64_RECORD_UNKNOWN;
3140
3141 record_buf[0] = reg_rd;
3142 aarch64_insn_r->reg_rec_count = 1;
3143 if (setflags)
3144 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3145 }
3146 else
3147 {
3148 if (insn_bits24_27 == 0x0b)
3149 {
3150 /* Data-processing (3 source). */
3151 record_buf[0] = reg_rd;
3152 aarch64_insn_r->reg_rec_count = 1;
3153 }
3154 else if (insn_bits24_27 == 0x0a)
3155 {
3156 if (insn_bits21_23 == 0x00)
3157 {
3158 /* Add/subtract (with carry). */
3159 record_buf[0] = reg_rd;
3160 aarch64_insn_r->reg_rec_count = 1;
3161 if (bit (aarch64_insn_r->aarch64_insn, 29))
3162 {
3163 record_buf[1] = AARCH64_CPSR_REGNUM;
3164 aarch64_insn_r->reg_rec_count = 2;
3165 }
3166 }
3167 else if (insn_bits21_23 == 0x02)
3168 {
3169 /* Conditional compare (register) and conditional compare
3170 (immediate) instructions. */
3171 record_buf[0] = AARCH64_CPSR_REGNUM;
3172 aarch64_insn_r->reg_rec_count = 1;
3173 }
3174 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
3175 {
3176 /* CConditional select. */
3177 /* Data-processing (2 source). */
3178 /* Data-processing (1 source). */
3179 record_buf[0] = reg_rd;
3180 aarch64_insn_r->reg_rec_count = 1;
3181 }
3182 else
3183 return AARCH64_RECORD_UNKNOWN;
3184 }
3185 }
3186
3187 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3188 record_buf);
3189 return AARCH64_RECORD_SUCCESS;
3190 }
3191
3192 /* Record handler for data processing - immediate instructions. */
3193
3194 static unsigned int
3195 aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
3196 {
3197 uint8_t reg_rd, insn_bit23, insn_bits24_27, setflags;
3198 uint32_t record_buf[4];
3199
3200 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3201 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3202 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3203
3204 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
3205 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
3206 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
3207 {
3208 record_buf[0] = reg_rd;
3209 aarch64_insn_r->reg_rec_count = 1;
3210 }
3211 else if (insn_bits24_27 == 0x01)
3212 {
3213 /* Add/Subtract (immediate). */
3214 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3215 record_buf[0] = reg_rd;
3216 aarch64_insn_r->reg_rec_count = 1;
3217 if (setflags)
3218 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3219 }
3220 else if (insn_bits24_27 == 0x02 && !insn_bit23)
3221 {
3222 /* Logical (immediate). */
3223 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
3224 record_buf[0] = reg_rd;
3225 aarch64_insn_r->reg_rec_count = 1;
3226 if (setflags)
3227 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3228 }
3229 else
3230 return AARCH64_RECORD_UNKNOWN;
3231
3232 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3233 record_buf);
3234 return AARCH64_RECORD_SUCCESS;
3235 }
3236
3237 /* Record handler for branch, exception generation and system instructions. */
3238
3239 static unsigned int
3240 aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
3241 {
3242 struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
3243 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
3244 uint32_t record_buf[4];
3245
3246 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3247 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3248 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3249
3250 if (insn_bits28_31 == 0x0d)
3251 {
3252 /* Exception generation instructions. */
3253 if (insn_bits24_27 == 0x04)
3254 {
3255 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
3256 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
3257 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
3258 {
3259 ULONGEST svc_number;
3260
3261 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
3262 &svc_number);
3263 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
3264 svc_number);
3265 }
3266 else
3267 return AARCH64_RECORD_UNSUPPORTED;
3268 }
3269 /* System instructions. */
3270 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
3271 {
3272 uint32_t reg_rt, reg_crn;
3273
3274 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3275 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3276
3277 /* Record rt in case of sysl and mrs instructions. */
3278 if (bit (aarch64_insn_r->aarch64_insn, 21))
3279 {
3280 record_buf[0] = reg_rt;
3281 aarch64_insn_r->reg_rec_count = 1;
3282 }
3283 /* Record cpsr for hint and msr(immediate) instructions. */
3284 else if (reg_crn == 0x02 || reg_crn == 0x04)
3285 {
3286 record_buf[0] = AARCH64_CPSR_REGNUM;
3287 aarch64_insn_r->reg_rec_count = 1;
3288 }
3289 }
3290 /* Unconditional branch (register). */
3291 else if((insn_bits24_27 & 0x0e) == 0x06)
3292 {
3293 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3294 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3295 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3296 }
3297 else
3298 return AARCH64_RECORD_UNKNOWN;
3299 }
3300 /* Unconditional branch (immediate). */
3301 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3302 {
3303 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3304 if (bit (aarch64_insn_r->aarch64_insn, 31))
3305 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3306 }
3307 else
3308 /* Compare & branch (immediate), Test & branch (immediate) and
3309 Conditional branch (immediate). */
3310 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3311
3312 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3313 record_buf);
3314 return AARCH64_RECORD_SUCCESS;
3315 }
3316
3317 /* Record handler for advanced SIMD load and store instructions. */
3318
3319 static unsigned int
3320 aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3321 {
3322 CORE_ADDR address;
3323 uint64_t addr_offset = 0;
3324 uint32_t record_buf[24];
3325 uint64_t record_buf_mem[24];
3326 uint32_t reg_rn, reg_rt;
3327 uint32_t reg_index = 0, mem_index = 0;
3328 uint8_t opcode_bits, size_bits;
3329
3330 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3331 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3332 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3333 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3334 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3335
3336 if (record_debug)
3337 debug_printf ("Process record: Advanced SIMD load/store\n");
3338
3339 /* Load/store single structure. */
3340 if (bit (aarch64_insn_r->aarch64_insn, 24))
3341 {
3342 uint8_t sindex, scale, selem, esize, replicate = 0;
3343 scale = opcode_bits >> 2;
3344 selem = ((opcode_bits & 0x02) |
3345 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3346 switch (scale)
3347 {
3348 case 1:
3349 if (size_bits & 0x01)
3350 return AARCH64_RECORD_UNKNOWN;
3351 break;
3352 case 2:
3353 if ((size_bits >> 1) & 0x01)
3354 return AARCH64_RECORD_UNKNOWN;
3355 if (size_bits & 0x01)
3356 {
3357 if (!((opcode_bits >> 1) & 0x01))
3358 scale = 3;
3359 else
3360 return AARCH64_RECORD_UNKNOWN;
3361 }
3362 break;
3363 case 3:
3364 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3365 {
3366 scale = size_bits;
3367 replicate = 1;
3368 break;
3369 }
3370 else
3371 return AARCH64_RECORD_UNKNOWN;
3372 default:
3373 break;
3374 }
3375 esize = 8 << scale;
3376 if (replicate)
3377 for (sindex = 0; sindex < selem; sindex++)
3378 {
3379 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3380 reg_rt = (reg_rt + 1) % 32;
3381 }
3382 else
3383 {
3384 for (sindex = 0; sindex < selem; sindex++)
3385 {
3386 if (bit (aarch64_insn_r->aarch64_insn, 22))
3387 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3388 else
3389 {
3390 record_buf_mem[mem_index++] = esize / 8;
3391 record_buf_mem[mem_index++] = address + addr_offset;
3392 }
3393 addr_offset = addr_offset + (esize / 8);
3394 reg_rt = (reg_rt + 1) % 32;
3395 }
3396 }
3397 }
3398 /* Load/store multiple structure. */
3399 else
3400 {
3401 uint8_t selem, esize, rpt, elements;
3402 uint8_t eindex, rindex;
3403
3404 esize = 8 << size_bits;
3405 if (bit (aarch64_insn_r->aarch64_insn, 30))
3406 elements = 128 / esize;
3407 else
3408 elements = 64 / esize;
3409
3410 switch (opcode_bits)
3411 {
3412 /*LD/ST4 (4 Registers). */
3413 case 0:
3414 rpt = 1;
3415 selem = 4;
3416 break;
3417 /*LD/ST1 (4 Registers). */
3418 case 2:
3419 rpt = 4;
3420 selem = 1;
3421 break;
3422 /*LD/ST3 (3 Registers). */
3423 case 4:
3424 rpt = 1;
3425 selem = 3;
3426 break;
3427 /*LD/ST1 (3 Registers). */
3428 case 6:
3429 rpt = 3;
3430 selem = 1;
3431 break;
3432 /*LD/ST1 (1 Register). */
3433 case 7:
3434 rpt = 1;
3435 selem = 1;
3436 break;
3437 /*LD/ST2 (2 Registers). */
3438 case 8:
3439 rpt = 1;
3440 selem = 2;
3441 break;
3442 /*LD/ST1 (2 Registers). */
3443 case 10:
3444 rpt = 2;
3445 selem = 1;
3446 break;
3447 default:
3448 return AARCH64_RECORD_UNSUPPORTED;
3449 break;
3450 }
3451 for (rindex = 0; rindex < rpt; rindex++)
3452 for (eindex = 0; eindex < elements; eindex++)
3453 {
3454 uint8_t reg_tt, sindex;
3455 reg_tt = (reg_rt + rindex) % 32;
3456 for (sindex = 0; sindex < selem; sindex++)
3457 {
3458 if (bit (aarch64_insn_r->aarch64_insn, 22))
3459 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
3460 else
3461 {
3462 record_buf_mem[mem_index++] = esize / 8;
3463 record_buf_mem[mem_index++] = address + addr_offset;
3464 }
3465 addr_offset = addr_offset + (esize / 8);
3466 reg_tt = (reg_tt + 1) % 32;
3467 }
3468 }
3469 }
3470
3471 if (bit (aarch64_insn_r->aarch64_insn, 23))
3472 record_buf[reg_index++] = reg_rn;
3473
3474 aarch64_insn_r->reg_rec_count = reg_index;
3475 aarch64_insn_r->mem_rec_count = mem_index / 2;
3476 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3477 record_buf_mem);
3478 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3479 record_buf);
3480 return AARCH64_RECORD_SUCCESS;
3481 }
3482
3483 /* Record handler for load and store instructions. */
3484
3485 static unsigned int
3486 aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
3487 {
3488 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
3489 uint8_t insn_bit23, insn_bit21;
3490 uint8_t opc, size_bits, ld_flag, vector_flag;
3491 uint32_t reg_rn, reg_rt, reg_rt2;
3492 uint64_t datasize, offset;
3493 uint32_t record_buf[8];
3494 uint64_t record_buf_mem[8];
3495 CORE_ADDR address;
3496
3497 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3498 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3499 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
3500 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3501 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3502 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
3503 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
3504 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3505 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3506 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
3507 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
3508
3509 /* Load/store exclusive. */
3510 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
3511 {
3512 if (record_debug)
3513 debug_printf ("Process record: load/store exclusive\n");
3514
3515 if (ld_flag)
3516 {
3517 record_buf[0] = reg_rt;
3518 aarch64_insn_r->reg_rec_count = 1;
3519 if (insn_bit21)
3520 {
3521 record_buf[1] = reg_rt2;
3522 aarch64_insn_r->reg_rec_count = 2;
3523 }
3524 }
3525 else
3526 {
3527 if (insn_bit21)
3528 datasize = (8 << size_bits) * 2;
3529 else
3530 datasize = (8 << size_bits);
3531 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3532 &address);
3533 record_buf_mem[0] = datasize / 8;
3534 record_buf_mem[1] = address;
3535 aarch64_insn_r->mem_rec_count = 1;
3536 if (!insn_bit23)
3537 {
3538 /* Save register rs. */
3539 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
3540 aarch64_insn_r->reg_rec_count = 1;
3541 }
3542 }
3543 }
3544 /* Load register (literal) instructions decoding. */
3545 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
3546 {
3547 if (record_debug)
3548 debug_printf ("Process record: load register (literal)\n");
3549 if (vector_flag)
3550 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3551 else
3552 record_buf[0] = reg_rt;
3553 aarch64_insn_r->reg_rec_count = 1;
3554 }
3555 /* All types of load/store pair instructions decoding. */
3556 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
3557 {
3558 if (record_debug)
3559 debug_printf ("Process record: load/store pair\n");
3560
3561 if (ld_flag)
3562 {
3563 if (vector_flag)
3564 {
3565 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3566 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
3567 }
3568 else
3569 {
3570 record_buf[0] = reg_rt;
3571 record_buf[1] = reg_rt2;
3572 }
3573 aarch64_insn_r->reg_rec_count = 2;
3574 }
3575 else
3576 {
3577 uint16_t imm7_off;
3578 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
3579 if (!vector_flag)
3580 size_bits = size_bits >> 1;
3581 datasize = 8 << (2 + size_bits);
3582 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
3583 offset = offset << (2 + size_bits);
3584 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3585 &address);
3586 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
3587 {
3588 if (imm7_off & 0x40)
3589 address = address - offset;
3590 else
3591 address = address + offset;
3592 }
3593
3594 record_buf_mem[0] = datasize / 8;
3595 record_buf_mem[1] = address;
3596 record_buf_mem[2] = datasize / 8;
3597 record_buf_mem[3] = address + (datasize / 8);
3598 aarch64_insn_r->mem_rec_count = 2;
3599 }
3600 if (bit (aarch64_insn_r->aarch64_insn, 23))
3601 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3602 }
3603 /* Load/store register (unsigned immediate) instructions. */
3604 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
3605 {
3606 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3607 if (!(opc >> 1))
3608 if (opc & 0x01)
3609 ld_flag = 0x01;
3610 else
3611 ld_flag = 0x0;
3612 else
3613 if (size_bits != 0x03)
3614 ld_flag = 0x01;
3615 else
3616 return AARCH64_RECORD_UNKNOWN;
3617
3618 if (record_debug)
3619 {
3620 debug_printf ("Process record: load/store (unsigned immediate):"
3621 " size %x V %d opc %x\n", size_bits, vector_flag,
3622 opc);
3623 }
3624
3625 if (!ld_flag)
3626 {
3627 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
3628 datasize = 8 << size_bits;
3629 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3630 &address);
3631 offset = offset << size_bits;
3632 address = address + offset;
3633
3634 record_buf_mem[0] = datasize >> 3;
3635 record_buf_mem[1] = address;
3636 aarch64_insn_r->mem_rec_count = 1;
3637 }
3638 else
3639 {
3640 if (vector_flag)
3641 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3642 else
3643 record_buf[0] = reg_rt;
3644 aarch64_insn_r->reg_rec_count = 1;
3645 }
3646 }
3647 /* Load/store register (register offset) instructions. */
3648 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3649 && insn_bits10_11 == 0x02 && insn_bit21)
3650 {
3651 if (record_debug)
3652 debug_printf ("Process record: load/store (register offset)\n");
3653 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3654 if (!(opc >> 1))
3655 if (opc & 0x01)
3656 ld_flag = 0x01;
3657 else
3658 ld_flag = 0x0;
3659 else
3660 if (size_bits != 0x03)
3661 ld_flag = 0x01;
3662 else
3663 return AARCH64_RECORD_UNKNOWN;
3664
3665 if (!ld_flag)
3666 {
3667 ULONGEST reg_rm_val;
3668
3669 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
3670 bits (aarch64_insn_r->aarch64_insn, 16, 20), &reg_rm_val);
3671 if (bit (aarch64_insn_r->aarch64_insn, 12))
3672 offset = reg_rm_val << size_bits;
3673 else
3674 offset = reg_rm_val;
3675 datasize = 8 << size_bits;
3676 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3677 &address);
3678 address = address + offset;
3679 record_buf_mem[0] = datasize >> 3;
3680 record_buf_mem[1] = address;
3681 aarch64_insn_r->mem_rec_count = 1;
3682 }
3683 else
3684 {
3685 if (vector_flag)
3686 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3687 else
3688 record_buf[0] = reg_rt;
3689 aarch64_insn_r->reg_rec_count = 1;
3690 }
3691 }
3692 /* Load/store register (immediate and unprivileged) instructions. */
3693 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3694 && !insn_bit21)
3695 {
3696 if (record_debug)
3697 {
3698 debug_printf ("Process record: load/store "
3699 "(immediate and unprivileged)\n");
3700 }
3701 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3702 if (!(opc >> 1))
3703 if (opc & 0x01)
3704 ld_flag = 0x01;
3705 else
3706 ld_flag = 0x0;
3707 else
3708 if (size_bits != 0x03)
3709 ld_flag = 0x01;
3710 else
3711 return AARCH64_RECORD_UNKNOWN;
3712
3713 if (!ld_flag)
3714 {
3715 uint16_t imm9_off;
3716 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
3717 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
3718 datasize = 8 << size_bits;
3719 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3720 &address);
3721 if (insn_bits10_11 != 0x01)
3722 {
3723 if (imm9_off & 0x0100)
3724 address = address - offset;
3725 else
3726 address = address + offset;
3727 }
3728 record_buf_mem[0] = datasize >> 3;
3729 record_buf_mem[1] = address;
3730 aarch64_insn_r->mem_rec_count = 1;
3731 }
3732 else
3733 {
3734 if (vector_flag)
3735 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3736 else
3737 record_buf[0] = reg_rt;
3738 aarch64_insn_r->reg_rec_count = 1;
3739 }
3740 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
3741 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3742 }
3743 /* Advanced SIMD load/store instructions. */
3744 else
3745 return aarch64_record_asimd_load_store (aarch64_insn_r);
3746
3747 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3748 record_buf_mem);
3749 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3750 record_buf);
3751 return AARCH64_RECORD_SUCCESS;
3752 }
3753
3754 /* Record handler for data processing SIMD and floating point instructions. */
3755
3756 static unsigned int
3757 aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
3758 {
3759 uint8_t insn_bit21, opcode, rmode, reg_rd;
3760 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
3761 uint8_t insn_bits11_14;
3762 uint32_t record_buf[2];
3763
3764 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3765 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3766 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3767 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3768 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
3769 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
3770 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
3771 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3772 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3773
3774 if (record_debug)
3775 debug_printf ("Process record: data processing SIMD/FP: ");
3776
3777 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
3778 {
3779 /* Floating point - fixed point conversion instructions. */
3780 if (!insn_bit21)
3781 {
3782 if (record_debug)
3783 debug_printf ("FP - fixed point conversion");
3784
3785 if ((opcode >> 1) == 0x0 && rmode == 0x03)
3786 record_buf[0] = reg_rd;
3787 else
3788 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3789 }
3790 /* Floating point - conditional compare instructions. */
3791 else if (insn_bits10_11 == 0x01)
3792 {
3793 if (record_debug)
3794 debug_printf ("FP - conditional compare");
3795
3796 record_buf[0] = AARCH64_CPSR_REGNUM;
3797 }
3798 /* Floating point - data processing (2-source) and
3799 conditional select instructions. */
3800 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
3801 {
3802 if (record_debug)
3803 debug_printf ("FP - DP (2-source)");
3804
3805 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3806 }
3807 else if (insn_bits10_11 == 0x00)
3808 {
3809 /* Floating point - immediate instructions. */
3810 if ((insn_bits12_15 & 0x01) == 0x01
3811 || (insn_bits12_15 & 0x07) == 0x04)
3812 {
3813 if (record_debug)
3814 debug_printf ("FP - immediate");
3815 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3816 }
3817 /* Floating point - compare instructions. */
3818 else if ((insn_bits12_15 & 0x03) == 0x02)
3819 {
3820 if (record_debug)
3821 debug_printf ("FP - immediate");
3822 record_buf[0] = AARCH64_CPSR_REGNUM;
3823 }
3824 /* Floating point - integer conversions instructions. */
3825 else if (insn_bits12_15 == 0x00)
3826 {
3827 /* Convert float to integer instruction. */
3828 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
3829 {
3830 if (record_debug)
3831 debug_printf ("float to int conversion");
3832
3833 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3834 }
3835 /* Convert integer to float instruction. */
3836 else if ((opcode >> 1) == 0x01 && !rmode)
3837 {
3838 if (record_debug)
3839 debug_printf ("int to float conversion");
3840
3841 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3842 }
3843 /* Move float to integer instruction. */
3844 else if ((opcode >> 1) == 0x03)
3845 {
3846 if (record_debug)
3847 debug_printf ("move float to int");
3848
3849 if (!(opcode & 0x01))
3850 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3851 else
3852 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3853 }
3854 else
3855 return AARCH64_RECORD_UNKNOWN;
3856 }
3857 else
3858 return AARCH64_RECORD_UNKNOWN;
3859 }
3860 else
3861 return AARCH64_RECORD_UNKNOWN;
3862 }
3863 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
3864 {
3865 if (record_debug)
3866 debug_printf ("SIMD copy");
3867
3868 /* Advanced SIMD copy instructions. */
3869 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
3870 && !bit (aarch64_insn_r->aarch64_insn, 15)
3871 && bit (aarch64_insn_r->aarch64_insn, 10))
3872 {
3873 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
3874 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3875 else
3876 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3877 }
3878 else
3879 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3880 }
3881 /* All remaining floating point or advanced SIMD instructions. */
3882 else
3883 {
3884 if (record_debug)
3885 debug_printf ("all remain");
3886
3887 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3888 }
3889
3890 if (record_debug)
3891 debug_printf ("\n");
3892
3893 aarch64_insn_r->reg_rec_count++;
3894 gdb_assert (aarch64_insn_r->reg_rec_count == 1);
3895 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3896 record_buf);
3897 return AARCH64_RECORD_SUCCESS;
3898 }
3899
3900 /* Decodes insns type and invokes its record handler. */
3901
3902 static unsigned int
3903 aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
3904 {
3905 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
3906
3907 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
3908 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
3909 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
3910 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
3911
3912 /* Data processing - immediate instructions. */
3913 if (!ins_bit26 && !ins_bit27 && ins_bit28)
3914 return aarch64_record_data_proc_imm (aarch64_insn_r);
3915
3916 /* Branch, exception generation and system instructions. */
3917 if (ins_bit26 && !ins_bit27 && ins_bit28)
3918 return aarch64_record_branch_except_sys (aarch64_insn_r);
3919
3920 /* Load and store instructions. */
3921 if (!ins_bit25 && ins_bit27)
3922 return aarch64_record_load_store (aarch64_insn_r);
3923
3924 /* Data processing - register instructions. */
3925 if (ins_bit25 && !ins_bit26 && ins_bit27)
3926 return aarch64_record_data_proc_reg (aarch64_insn_r);
3927
3928 /* Data processing - SIMD and floating point instructions. */
3929 if (ins_bit25 && ins_bit26 && ins_bit27)
3930 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
3931
3932 return AARCH64_RECORD_UNSUPPORTED;
3933 }
3934
3935 /* Cleans up local record registers and memory allocations. */
3936
3937 static void
3938 deallocate_reg_mem (insn_decode_record *record)
3939 {
3940 xfree (record->aarch64_regs);
3941 xfree (record->aarch64_mems);
3942 }
3943
3944 /* Parse the current instruction and record the values of the registers and
3945 memory that will be changed in current instruction to record_arch_list
3946 return -1 if something is wrong. */
3947
3948 int
3949 aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
3950 CORE_ADDR insn_addr)
3951 {
3952 uint32_t rec_no = 0;
3953 uint8_t insn_size = 4;
3954 uint32_t ret = 0;
3955 gdb_byte buf[insn_size];
3956 insn_decode_record aarch64_record;
3957
3958 memset (&buf[0], 0, insn_size);
3959 memset (&aarch64_record, 0, sizeof (insn_decode_record));
3960 target_read_memory (insn_addr, &buf[0], insn_size);
3961 aarch64_record.aarch64_insn
3962 = (uint32_t) extract_unsigned_integer (&buf[0],
3963 insn_size,
3964 gdbarch_byte_order (gdbarch));
3965 aarch64_record.regcache = regcache;
3966 aarch64_record.this_addr = insn_addr;
3967 aarch64_record.gdbarch = gdbarch;
3968
3969 ret = aarch64_record_decode_insn_handler (&aarch64_record);
3970 if (ret == AARCH64_RECORD_UNSUPPORTED)
3971 {
3972 printf_unfiltered (_("Process record does not support instruction "
3973 "0x%0x at address %s.\n"),
3974 aarch64_record.aarch64_insn,
3975 paddress (gdbarch, insn_addr));
3976 ret = -1;
3977 }
3978
3979 if (0 == ret)
3980 {
3981 /* Record registers. */
3982 record_full_arch_list_add_reg (aarch64_record.regcache,
3983 AARCH64_PC_REGNUM);
3984 /* Always record register CPSR. */
3985 record_full_arch_list_add_reg (aarch64_record.regcache,
3986 AARCH64_CPSR_REGNUM);
3987 if (aarch64_record.aarch64_regs)
3988 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
3989 if (record_full_arch_list_add_reg (aarch64_record.regcache,
3990 aarch64_record.aarch64_regs[rec_no]))
3991 ret = -1;
3992
3993 /* Record memories. */
3994 if (aarch64_record.aarch64_mems)
3995 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
3996 if (record_full_arch_list_add_mem
3997 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
3998 aarch64_record.aarch64_mems[rec_no].len))
3999 ret = -1;
4000
4001 if (record_full_arch_list_add_end ())
4002 ret = -1;
4003 }
4004
4005 deallocate_reg_mem (&aarch64_record);
4006 return ret;
4007 }