]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/aarch64-tdep.c
Simplify regcache_restore
[thirdparty/binutils-gdb.git] / gdb / aarch64-tdep.c
CommitLineData
07b287a0
MS
1/* Common target dependent code for GDB on AArch64 systems.
2
61baf725 3 Copyright (C) 2009-2017 Free Software Foundation, Inc.
07b287a0
MS
4 Contributed by ARM Ltd.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21#include "defs.h"
22
23#include "frame.h"
24#include "inferior.h"
25#include "gdbcmd.h"
26#include "gdbcore.h"
07b287a0
MS
27#include "dis-asm.h"
28#include "regcache.h"
29#include "reggroups.h"
30#include "doublest.h"
31#include "value.h"
32#include "arch-utils.h"
33#include "osabi.h"
34#include "frame-unwind.h"
35#include "frame-base.h"
36#include "trad-frame.h"
37#include "objfiles.h"
38#include "dwarf2-frame.h"
39#include "gdbtypes.h"
40#include "prologue-value.h"
41#include "target-descriptions.h"
42#include "user-regs.h"
43#include "language.h"
44#include "infcall.h"
ea873d8e
PL
45#include "ax.h"
46#include "ax-gdb.h"
4d9a9006 47#include "selftest.h"
07b287a0
MS
48
49#include "aarch64-tdep.h"
50
51#include "elf-bfd.h"
52#include "elf/aarch64.h"
53
07b287a0
MS
54#include "vec.h"
55
99afc88b
OJ
56#include "record.h"
57#include "record-full.h"
58
07b287a0 59#include "features/aarch64.c"
07b287a0 60
787749ea
PL
61#include "arch/aarch64-insn.h"
62
f77ee802 63#include "opcode/aarch64.h"
325fac50 64#include <algorithm>
f77ee802
YQ
65
66#define submask(x) ((1L << ((x) + 1)) - 1)
67#define bit(obj,st) (((obj) >> (st)) & 1)
68#define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
69
07b287a0
MS
70/* Pseudo register base numbers. */
71#define AARCH64_Q0_REGNUM 0
187f5d00 72#define AARCH64_D0_REGNUM (AARCH64_Q0_REGNUM + AARCH64_D_REGISTER_COUNT)
07b287a0
MS
73#define AARCH64_S0_REGNUM (AARCH64_D0_REGNUM + 32)
74#define AARCH64_H0_REGNUM (AARCH64_S0_REGNUM + 32)
75#define AARCH64_B0_REGNUM (AARCH64_H0_REGNUM + 32)
76
77/* The standard register names, and all the valid aliases for them. */
78static const struct
79{
80 const char *const name;
81 int regnum;
82} aarch64_register_aliases[] =
83{
84 /* 64-bit register names. */
85 {"fp", AARCH64_FP_REGNUM},
86 {"lr", AARCH64_LR_REGNUM},
87 {"sp", AARCH64_SP_REGNUM},
88
89 /* 32-bit register names. */
90 {"w0", AARCH64_X0_REGNUM + 0},
91 {"w1", AARCH64_X0_REGNUM + 1},
92 {"w2", AARCH64_X0_REGNUM + 2},
93 {"w3", AARCH64_X0_REGNUM + 3},
94 {"w4", AARCH64_X0_REGNUM + 4},
95 {"w5", AARCH64_X0_REGNUM + 5},
96 {"w6", AARCH64_X0_REGNUM + 6},
97 {"w7", AARCH64_X0_REGNUM + 7},
98 {"w8", AARCH64_X0_REGNUM + 8},
99 {"w9", AARCH64_X0_REGNUM + 9},
100 {"w10", AARCH64_X0_REGNUM + 10},
101 {"w11", AARCH64_X0_REGNUM + 11},
102 {"w12", AARCH64_X0_REGNUM + 12},
103 {"w13", AARCH64_X0_REGNUM + 13},
104 {"w14", AARCH64_X0_REGNUM + 14},
105 {"w15", AARCH64_X0_REGNUM + 15},
106 {"w16", AARCH64_X0_REGNUM + 16},
107 {"w17", AARCH64_X0_REGNUM + 17},
108 {"w18", AARCH64_X0_REGNUM + 18},
109 {"w19", AARCH64_X0_REGNUM + 19},
110 {"w20", AARCH64_X0_REGNUM + 20},
111 {"w21", AARCH64_X0_REGNUM + 21},
112 {"w22", AARCH64_X0_REGNUM + 22},
113 {"w23", AARCH64_X0_REGNUM + 23},
114 {"w24", AARCH64_X0_REGNUM + 24},
115 {"w25", AARCH64_X0_REGNUM + 25},
116 {"w26", AARCH64_X0_REGNUM + 26},
117 {"w27", AARCH64_X0_REGNUM + 27},
118 {"w28", AARCH64_X0_REGNUM + 28},
119 {"w29", AARCH64_X0_REGNUM + 29},
120 {"w30", AARCH64_X0_REGNUM + 30},
121
122 /* specials */
123 {"ip0", AARCH64_X0_REGNUM + 16},
124 {"ip1", AARCH64_X0_REGNUM + 17}
125};
126
127/* The required core 'R' registers. */
128static const char *const aarch64_r_register_names[] =
129{
130 /* These registers must appear in consecutive RAW register number
131 order and they must begin with AARCH64_X0_REGNUM! */
132 "x0", "x1", "x2", "x3",
133 "x4", "x5", "x6", "x7",
134 "x8", "x9", "x10", "x11",
135 "x12", "x13", "x14", "x15",
136 "x16", "x17", "x18", "x19",
137 "x20", "x21", "x22", "x23",
138 "x24", "x25", "x26", "x27",
139 "x28", "x29", "x30", "sp",
140 "pc", "cpsr"
141};
142
143/* The FP/SIMD 'V' registers. */
144static const char *const aarch64_v_register_names[] =
145{
146 /* These registers must appear in consecutive RAW register number
147 order and they must begin with AARCH64_V0_REGNUM! */
148 "v0", "v1", "v2", "v3",
149 "v4", "v5", "v6", "v7",
150 "v8", "v9", "v10", "v11",
151 "v12", "v13", "v14", "v15",
152 "v16", "v17", "v18", "v19",
153 "v20", "v21", "v22", "v23",
154 "v24", "v25", "v26", "v27",
155 "v28", "v29", "v30", "v31",
156 "fpsr",
157 "fpcr"
158};
159
160/* AArch64 prologue cache structure. */
161struct aarch64_prologue_cache
162{
db634143
PL
163 /* The program counter at the start of the function. It is used to
164 identify this frame as a prologue frame. */
165 CORE_ADDR func;
166
167 /* The program counter at the time this frame was created; i.e. where
168 this function was called from. It is used to identify this frame as a
169 stub frame. */
170 CORE_ADDR prev_pc;
171
07b287a0
MS
172 /* The stack pointer at the time this frame was created; i.e. the
173 caller's stack pointer when this function was called. It is used
174 to identify this frame. */
175 CORE_ADDR prev_sp;
176
7dfa3edc
PL
177 /* Is the target available to read from? */
178 int available_p;
179
07b287a0
MS
180 /* The frame base for this frame is just prev_sp - frame size.
181 FRAMESIZE is the distance from the frame pointer to the
182 initial stack pointer. */
183 int framesize;
184
185 /* The register used to hold the frame pointer for this frame. */
186 int framereg;
187
188 /* Saved register offsets. */
189 struct trad_frame_saved_reg *saved_regs;
190};
191
07b287a0
MS
192static void
193show_aarch64_debug (struct ui_file *file, int from_tty,
194 struct cmd_list_element *c, const char *value)
195{
196 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
197}
198
ffdbe864
YQ
199namespace {
200
4d9a9006
YQ
201/* Abstract instruction reader. */
202
203class abstract_instruction_reader
204{
205public:
206 /* Read in one instruction. */
207 virtual ULONGEST read (CORE_ADDR memaddr, int len,
208 enum bfd_endian byte_order) = 0;
209};
210
211/* Instruction reader from real target. */
212
213class instruction_reader : public abstract_instruction_reader
214{
215 public:
216 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
217 {
fc2f703e 218 return read_code_unsigned_integer (memaddr, len, byte_order);
4d9a9006
YQ
219 }
220};
221
ffdbe864
YQ
222} // namespace
223
07b287a0
MS
224/* Analyze a prologue, looking for a recognizable stack frame
225 and frame pointer. Scan until we encounter a store that could
226 clobber the stack frame unexpectedly, or an unknown instruction. */
227
228static CORE_ADDR
229aarch64_analyze_prologue (struct gdbarch *gdbarch,
230 CORE_ADDR start, CORE_ADDR limit,
4d9a9006
YQ
231 struct aarch64_prologue_cache *cache,
232 abstract_instruction_reader& reader)
07b287a0
MS
233{
234 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
235 int i;
187f5d00
YQ
236 /* Track X registers and D registers in prologue. */
237 pv_t regs[AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT];
07b287a0
MS
238 struct pv_area *stack;
239 struct cleanup *back_to;
240
187f5d00 241 for (i = 0; i < AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT; i++)
07b287a0
MS
242 regs[i] = pv_register (i, 0);
243 stack = make_pv_area (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
244 back_to = make_cleanup_free_pv_area (stack);
245
246 for (; start < limit; start += 4)
247 {
248 uint32_t insn;
d9ebcbce 249 aarch64_inst inst;
07b287a0 250
4d9a9006 251 insn = reader.read (start, 4, byte_order_for_code);
07b287a0 252
d9ebcbce
YQ
253 if (aarch64_decode_insn (insn, &inst, 1) != 0)
254 break;
255
256 if (inst.opcode->iclass == addsub_imm
257 && (inst.opcode->op == OP_ADD
258 || strcmp ("sub", inst.opcode->name) == 0))
07b287a0 259 {
d9ebcbce
YQ
260 unsigned rd = inst.operands[0].reg.regno;
261 unsigned rn = inst.operands[1].reg.regno;
262
263 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
264 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
265 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
266 gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM);
267
268 if (inst.opcode->op == OP_ADD)
269 {
270 regs[rd] = pv_add_constant (regs[rn],
271 inst.operands[2].imm.value);
272 }
273 else
274 {
275 regs[rd] = pv_add_constant (regs[rn],
276 -inst.operands[2].imm.value);
277 }
278 }
279 else if (inst.opcode->iclass == pcreladdr
280 && inst.operands[1].type == AARCH64_OPND_ADDR_ADRP)
281 {
282 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
283 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
284
285 regs[inst.operands[0].reg.regno] = pv_unknown ();
07b287a0 286 }
d9ebcbce 287 else if (inst.opcode->iclass == branch_imm)
07b287a0
MS
288 {
289 /* Stop analysis on branch. */
290 break;
291 }
d9ebcbce 292 else if (inst.opcode->iclass == condbranch)
07b287a0
MS
293 {
294 /* Stop analysis on branch. */
295 break;
296 }
d9ebcbce 297 else if (inst.opcode->iclass == branch_reg)
07b287a0
MS
298 {
299 /* Stop analysis on branch. */
300 break;
301 }
d9ebcbce 302 else if (inst.opcode->iclass == compbranch)
07b287a0
MS
303 {
304 /* Stop analysis on branch. */
305 break;
306 }
d9ebcbce
YQ
307 else if (inst.opcode->op == OP_MOVZ)
308 {
309 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
310 regs[inst.operands[0].reg.regno] = pv_unknown ();
311 }
312 else if (inst.opcode->iclass == log_shift
313 && strcmp (inst.opcode->name, "orr") == 0)
07b287a0 314 {
d9ebcbce
YQ
315 unsigned rd = inst.operands[0].reg.regno;
316 unsigned rn = inst.operands[1].reg.regno;
317 unsigned rm = inst.operands[2].reg.regno;
318
319 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
320 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn);
321 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT);
322
323 if (inst.operands[2].shifter.amount == 0
324 && rn == AARCH64_SP_REGNUM)
07b287a0
MS
325 regs[rd] = regs[rm];
326 else
327 {
328 if (aarch64_debug)
b277c936
PL
329 {
330 debug_printf ("aarch64: prologue analysis gave up "
0a0da556 331 "addr=%s opcode=0x%x (orr x register)\n",
b277c936
PL
332 core_addr_to_string_nz (start), insn);
333 }
07b287a0
MS
334 break;
335 }
336 }
d9ebcbce 337 else if (inst.opcode->op == OP_STUR)
07b287a0 338 {
d9ebcbce
YQ
339 unsigned rt = inst.operands[0].reg.regno;
340 unsigned rn = inst.operands[1].addr.base_regno;
341 int is64
342 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
343
344 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
345 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
346 gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9);
347 gdb_assert (!inst.operands[1].addr.offset.is_reg);
348
349 pv_area_store (stack, pv_add_constant (regs[rn],
350 inst.operands[1].addr.offset.imm),
07b287a0
MS
351 is64 ? 8 : 4, regs[rt]);
352 }
d9ebcbce 353 else if ((inst.opcode->iclass == ldstpair_off
03bcd739
YQ
354 || (inst.opcode->iclass == ldstpair_indexed
355 && inst.operands[2].addr.preind))
d9ebcbce 356 && strcmp ("stp", inst.opcode->name) == 0)
07b287a0 357 {
03bcd739 358 /* STP with addressing mode Pre-indexed and Base register. */
187f5d00
YQ
359 unsigned rt1;
360 unsigned rt2;
d9ebcbce
YQ
361 unsigned rn = inst.operands[2].addr.base_regno;
362 int32_t imm = inst.operands[2].addr.offset.imm;
363
187f5d00
YQ
364 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
365 || inst.operands[0].type == AARCH64_OPND_Ft);
366 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2
367 || inst.operands[1].type == AARCH64_OPND_Ft2);
d9ebcbce
YQ
368 gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7);
369 gdb_assert (!inst.operands[2].addr.offset.is_reg);
370
07b287a0
MS
371 /* If recording this store would invalidate the store area
372 (perhaps because rn is not known) then we should abandon
373 further prologue analysis. */
374 if (pv_area_store_would_trash (stack,
375 pv_add_constant (regs[rn], imm)))
376 break;
377
378 if (pv_area_store_would_trash (stack,
379 pv_add_constant (regs[rn], imm + 8)))
380 break;
381
187f5d00
YQ
382 rt1 = inst.operands[0].reg.regno;
383 rt2 = inst.operands[1].reg.regno;
384 if (inst.operands[0].type == AARCH64_OPND_Ft)
385 {
386 /* Only bottom 64-bit of each V register (D register) need
387 to be preserved. */
388 gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D);
389 rt1 += AARCH64_X_REGISTER_COUNT;
390 rt2 += AARCH64_X_REGISTER_COUNT;
391 }
392
07b287a0
MS
393 pv_area_store (stack, pv_add_constant (regs[rn], imm), 8,
394 regs[rt1]);
395 pv_area_store (stack, pv_add_constant (regs[rn], imm + 8), 8,
396 regs[rt2]);
14ac654f 397
d9ebcbce 398 if (inst.operands[2].addr.writeback)
93d96012 399 regs[rn] = pv_add_constant (regs[rn], imm);
07b287a0 400
07b287a0 401 }
432ec081
YQ
402 else if ((inst.opcode->iclass == ldst_imm9 /* Signed immediate. */
403 || (inst.opcode->iclass == ldst_pos /* Unsigned immediate. */
404 && (inst.opcode->op == OP_STR_POS
405 || inst.opcode->op == OP_STRF_POS)))
406 && inst.operands[1].addr.base_regno == AARCH64_SP_REGNUM
407 && strcmp ("str", inst.opcode->name) == 0)
408 {
409 /* STR (immediate) */
410 unsigned int rt = inst.operands[0].reg.regno;
411 int32_t imm = inst.operands[1].addr.offset.imm;
412 unsigned int rn = inst.operands[1].addr.base_regno;
413 bool is64
414 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
415 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
416 || inst.operands[0].type == AARCH64_OPND_Ft);
417
418 if (inst.operands[0].type == AARCH64_OPND_Ft)
419 {
420 /* Only bottom 64-bit of each V register (D register) need
421 to be preserved. */
422 gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D);
423 rt += AARCH64_X_REGISTER_COUNT;
424 }
425
426 pv_area_store (stack, pv_add_constant (regs[rn], imm),
427 is64 ? 8 : 4, regs[rt]);
428 if (inst.operands[1].addr.writeback)
429 regs[rn] = pv_add_constant (regs[rn], imm);
430 }
d9ebcbce 431 else if (inst.opcode->iclass == testbranch)
07b287a0
MS
432 {
433 /* Stop analysis on branch. */
434 break;
435 }
436 else
437 {
438 if (aarch64_debug)
b277c936 439 {
0a0da556 440 debug_printf ("aarch64: prologue analysis gave up addr=%s"
b277c936
PL
441 " opcode=0x%x\n",
442 core_addr_to_string_nz (start), insn);
443 }
07b287a0
MS
444 break;
445 }
446 }
447
448 if (cache == NULL)
449 {
450 do_cleanups (back_to);
451 return start;
452 }
453
454 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
455 {
456 /* Frame pointer is fp. Frame size is constant. */
457 cache->framereg = AARCH64_FP_REGNUM;
458 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
459 }
460 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
461 {
462 /* Try the stack pointer. */
463 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
464 cache->framereg = AARCH64_SP_REGNUM;
465 }
466 else
467 {
468 /* We're just out of luck. We don't know where the frame is. */
469 cache->framereg = -1;
470 cache->framesize = 0;
471 }
472
473 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
474 {
475 CORE_ADDR offset;
476
477 if (pv_area_find_reg (stack, gdbarch, i, &offset))
478 cache->saved_regs[i].addr = offset;
479 }
480
187f5d00
YQ
481 for (i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
482 {
483 int regnum = gdbarch_num_regs (gdbarch);
484 CORE_ADDR offset;
485
486 if (pv_area_find_reg (stack, gdbarch, i + AARCH64_X_REGISTER_COUNT,
487 &offset))
488 cache->saved_regs[i + regnum + AARCH64_D0_REGNUM].addr = offset;
489 }
490
07b287a0
MS
491 do_cleanups (back_to);
492 return start;
493}
494
4d9a9006
YQ
495static CORE_ADDR
496aarch64_analyze_prologue (struct gdbarch *gdbarch,
497 CORE_ADDR start, CORE_ADDR limit,
498 struct aarch64_prologue_cache *cache)
499{
500 instruction_reader reader;
501
502 return aarch64_analyze_prologue (gdbarch, start, limit, cache,
503 reader);
504}
505
506#if GDB_SELF_TEST
507
508namespace selftests {
509
510/* Instruction reader from manually cooked instruction sequences. */
511
512class instruction_reader_test : public abstract_instruction_reader
513{
514public:
515 template<size_t SIZE>
516 explicit instruction_reader_test (const uint32_t (&insns)[SIZE])
517 : m_insns (insns), m_insns_size (SIZE)
518 {}
519
520 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
521 {
522 SELF_CHECK (len == 4);
523 SELF_CHECK (memaddr % 4 == 0);
524 SELF_CHECK (memaddr / 4 < m_insns_size);
525
526 return m_insns[memaddr / 4];
527 }
528
529private:
530 const uint32_t *m_insns;
531 size_t m_insns_size;
532};
533
534static void
535aarch64_analyze_prologue_test (void)
536{
537 struct gdbarch_info info;
538
539 gdbarch_info_init (&info);
540 info.bfd_arch_info = bfd_scan_arch ("aarch64");
541
542 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
543 SELF_CHECK (gdbarch != NULL);
544
545 /* Test the simple prologue in which frame pointer is used. */
546 {
547 struct aarch64_prologue_cache cache;
548 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
549
550 static const uint32_t insns[] = {
551 0xa9af7bfd, /* stp x29, x30, [sp,#-272]! */
552 0x910003fd, /* mov x29, sp */
553 0x97ffffe6, /* bl 0x400580 */
554 };
555 instruction_reader_test reader (insns);
556
557 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
558 SELF_CHECK (end == 4 * 2);
559
560 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
561 SELF_CHECK (cache.framesize == 272);
562
563 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
564 {
565 if (i == AARCH64_FP_REGNUM)
566 SELF_CHECK (cache.saved_regs[i].addr == -272);
567 else if (i == AARCH64_LR_REGNUM)
568 SELF_CHECK (cache.saved_regs[i].addr == -264);
569 else
570 SELF_CHECK (cache.saved_regs[i].addr == -1);
571 }
572
573 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
574 {
575 int regnum = gdbarch_num_regs (gdbarch);
576
577 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
578 == -1);
579 }
580 }
432ec081
YQ
581
582 /* Test a prologue in which STR is used and frame pointer is not
583 used. */
584 {
585 struct aarch64_prologue_cache cache;
586 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
587
588 static const uint32_t insns[] = {
589 0xf81d0ff3, /* str x19, [sp, #-48]! */
590 0xb9002fe0, /* str w0, [sp, #44] */
591 0xf90013e1, /* str x1, [sp, #32]*/
592 0xfd000fe0, /* str d0, [sp, #24] */
593 0xaa0203f3, /* mov x19, x2 */
594 0xf94013e0, /* ldr x0, [sp, #32] */
595 };
596 instruction_reader_test reader (insns);
597
598 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
599
600 SELF_CHECK (end == 4 * 5);
601
602 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
603 SELF_CHECK (cache.framesize == 48);
604
605 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
606 {
607 if (i == 1)
608 SELF_CHECK (cache.saved_regs[i].addr == -16);
609 else if (i == 19)
610 SELF_CHECK (cache.saved_regs[i].addr == -48);
611 else
612 SELF_CHECK (cache.saved_regs[i].addr == -1);
613 }
614
615 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
616 {
617 int regnum = gdbarch_num_regs (gdbarch);
618
619 if (i == 0)
620 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
621 == -24);
622 else
623 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
624 == -1);
625 }
626 }
4d9a9006
YQ
627}
628} // namespace selftests
629#endif /* GDB_SELF_TEST */
630
07b287a0
MS
631/* Implement the "skip_prologue" gdbarch method. */
632
633static CORE_ADDR
634aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
635{
07b287a0 636 CORE_ADDR func_addr, limit_pc;
07b287a0
MS
637
638 /* See if we can determine the end of the prologue via the symbol
639 table. If so, then return either PC, or the PC after the
640 prologue, whichever is greater. */
641 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
642 {
643 CORE_ADDR post_prologue_pc
644 = skip_prologue_using_sal (gdbarch, func_addr);
645
646 if (post_prologue_pc != 0)
325fac50 647 return std::max (pc, post_prologue_pc);
07b287a0
MS
648 }
649
650 /* Can't determine prologue from the symbol table, need to examine
651 instructions. */
652
653 /* Find an upper limit on the function prologue using the debug
654 information. If the debug information could not be used to
655 provide that bound, then use an arbitrary large number as the
656 upper bound. */
657 limit_pc = skip_prologue_using_sal (gdbarch, pc);
658 if (limit_pc == 0)
659 limit_pc = pc + 128; /* Magic. */
660
661 /* Try disassembling prologue. */
662 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
663}
664
665/* Scan the function prologue for THIS_FRAME and populate the prologue
666 cache CACHE. */
667
668static void
669aarch64_scan_prologue (struct frame_info *this_frame,
670 struct aarch64_prologue_cache *cache)
671{
672 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
673 CORE_ADDR prologue_start;
674 CORE_ADDR prologue_end;
675 CORE_ADDR prev_pc = get_frame_pc (this_frame);
676 struct gdbarch *gdbarch = get_frame_arch (this_frame);
677
db634143
PL
678 cache->prev_pc = prev_pc;
679
07b287a0
MS
680 /* Assume we do not find a frame. */
681 cache->framereg = -1;
682 cache->framesize = 0;
683
684 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
685 &prologue_end))
686 {
687 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
688
689 if (sal.line == 0)
690 {
691 /* No line info so use the current PC. */
692 prologue_end = prev_pc;
693 }
694 else if (sal.end < prologue_end)
695 {
696 /* The next line begins after the function end. */
697 prologue_end = sal.end;
698 }
699
325fac50 700 prologue_end = std::min (prologue_end, prev_pc);
07b287a0
MS
701 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
702 }
703 else
704 {
705 CORE_ADDR frame_loc;
07b287a0
MS
706
707 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
708 if (frame_loc == 0)
709 return;
710
711 cache->framereg = AARCH64_FP_REGNUM;
712 cache->framesize = 16;
713 cache->saved_regs[29].addr = 0;
714 cache->saved_regs[30].addr = 8;
715 }
716}
717
7dfa3edc
PL
718/* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
719 function may throw an exception if the inferior's registers or memory is
720 not available. */
07b287a0 721
7dfa3edc
PL
722static void
723aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
724 struct aarch64_prologue_cache *cache)
07b287a0 725{
07b287a0
MS
726 CORE_ADDR unwound_fp;
727 int reg;
728
07b287a0
MS
729 aarch64_scan_prologue (this_frame, cache);
730
731 if (cache->framereg == -1)
7dfa3edc 732 return;
07b287a0
MS
733
734 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
735 if (unwound_fp == 0)
7dfa3edc 736 return;
07b287a0
MS
737
738 cache->prev_sp = unwound_fp + cache->framesize;
739
740 /* Calculate actual addresses of saved registers using offsets
741 determined by aarch64_analyze_prologue. */
742 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
743 if (trad_frame_addr_p (cache->saved_regs, reg))
744 cache->saved_regs[reg].addr += cache->prev_sp;
745
db634143
PL
746 cache->func = get_frame_func (this_frame);
747
7dfa3edc
PL
748 cache->available_p = 1;
749}
750
751/* Allocate and fill in *THIS_CACHE with information about the prologue of
752 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
753 Return a pointer to the current aarch64_prologue_cache in
754 *THIS_CACHE. */
755
756static struct aarch64_prologue_cache *
757aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
758{
759 struct aarch64_prologue_cache *cache;
760
761 if (*this_cache != NULL)
9a3c8263 762 return (struct aarch64_prologue_cache *) *this_cache;
7dfa3edc
PL
763
764 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
765 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
766 *this_cache = cache;
767
768 TRY
769 {
770 aarch64_make_prologue_cache_1 (this_frame, cache);
771 }
772 CATCH (ex, RETURN_MASK_ERROR)
773 {
774 if (ex.error != NOT_AVAILABLE_ERROR)
775 throw_exception (ex);
776 }
777 END_CATCH
778
07b287a0
MS
779 return cache;
780}
781
7dfa3edc
PL
782/* Implement the "stop_reason" frame_unwind method. */
783
784static enum unwind_stop_reason
785aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
786 void **this_cache)
787{
788 struct aarch64_prologue_cache *cache
789 = aarch64_make_prologue_cache (this_frame, this_cache);
790
791 if (!cache->available_p)
792 return UNWIND_UNAVAILABLE;
793
794 /* Halt the backtrace at "_start". */
795 if (cache->prev_pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
796 return UNWIND_OUTERMOST;
797
798 /* We've hit a wall, stop. */
799 if (cache->prev_sp == 0)
800 return UNWIND_OUTERMOST;
801
802 return UNWIND_NO_REASON;
803}
804
07b287a0
MS
805/* Our frame ID for a normal frame is the current function's starting
806 PC and the caller's SP when we were called. */
807
808static void
809aarch64_prologue_this_id (struct frame_info *this_frame,
810 void **this_cache, struct frame_id *this_id)
811{
7c8edfae
PL
812 struct aarch64_prologue_cache *cache
813 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0 814
7dfa3edc
PL
815 if (!cache->available_p)
816 *this_id = frame_id_build_unavailable_stack (cache->func);
817 else
818 *this_id = frame_id_build (cache->prev_sp, cache->func);
07b287a0
MS
819}
820
821/* Implement the "prev_register" frame_unwind method. */
822
823static struct value *
824aarch64_prologue_prev_register (struct frame_info *this_frame,
825 void **this_cache, int prev_regnum)
826{
7c8edfae
PL
827 struct aarch64_prologue_cache *cache
828 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0
MS
829
830 /* If we are asked to unwind the PC, then we need to return the LR
831 instead. The prologue may save PC, but it will point into this
832 frame's prologue, not the next frame's resume location. */
833 if (prev_regnum == AARCH64_PC_REGNUM)
834 {
835 CORE_ADDR lr;
836
837 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
838 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
839 }
840
841 /* SP is generally not saved to the stack, but this frame is
842 identified by the next frame's stack pointer at the time of the
843 call. The value was already reconstructed into PREV_SP. */
844 /*
845 +----------+ ^
846 | saved lr | |
847 +->| saved fp |--+
848 | | |
849 | | | <- Previous SP
850 | +----------+
851 | | saved lr |
852 +--| saved fp |<- FP
853 | |
854 | |<- SP
855 +----------+ */
856 if (prev_regnum == AARCH64_SP_REGNUM)
857 return frame_unwind_got_constant (this_frame, prev_regnum,
858 cache->prev_sp);
859
860 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
861 prev_regnum);
862}
863
864/* AArch64 prologue unwinder. */
865struct frame_unwind aarch64_prologue_unwind =
866{
867 NORMAL_FRAME,
7dfa3edc 868 aarch64_prologue_frame_unwind_stop_reason,
07b287a0
MS
869 aarch64_prologue_this_id,
870 aarch64_prologue_prev_register,
871 NULL,
872 default_frame_sniffer
873};
874
8b61f75d
PL
875/* Allocate and fill in *THIS_CACHE with information about the prologue of
876 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
877 Return a pointer to the current aarch64_prologue_cache in
878 *THIS_CACHE. */
07b287a0
MS
879
880static struct aarch64_prologue_cache *
8b61f75d 881aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
07b287a0 882{
07b287a0 883 struct aarch64_prologue_cache *cache;
8b61f75d
PL
884
885 if (*this_cache != NULL)
9a3c8263 886 return (struct aarch64_prologue_cache *) *this_cache;
07b287a0
MS
887
888 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
889 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
8b61f75d 890 *this_cache = cache;
07b287a0 891
02a2a705
PL
892 TRY
893 {
894 cache->prev_sp = get_frame_register_unsigned (this_frame,
895 AARCH64_SP_REGNUM);
896 cache->prev_pc = get_frame_pc (this_frame);
897 cache->available_p = 1;
898 }
899 CATCH (ex, RETURN_MASK_ERROR)
900 {
901 if (ex.error != NOT_AVAILABLE_ERROR)
902 throw_exception (ex);
903 }
904 END_CATCH
07b287a0
MS
905
906 return cache;
907}
908
02a2a705
PL
909/* Implement the "stop_reason" frame_unwind method. */
910
911static enum unwind_stop_reason
912aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
913 void **this_cache)
914{
915 struct aarch64_prologue_cache *cache
916 = aarch64_make_stub_cache (this_frame, this_cache);
917
918 if (!cache->available_p)
919 return UNWIND_UNAVAILABLE;
920
921 return UNWIND_NO_REASON;
922}
923
07b287a0
MS
924/* Our frame ID for a stub frame is the current SP and LR. */
925
926static void
927aarch64_stub_this_id (struct frame_info *this_frame,
928 void **this_cache, struct frame_id *this_id)
929{
8b61f75d
PL
930 struct aarch64_prologue_cache *cache
931 = aarch64_make_stub_cache (this_frame, this_cache);
07b287a0 932
02a2a705
PL
933 if (cache->available_p)
934 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
935 else
936 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
07b287a0
MS
937}
938
939/* Implement the "sniffer" frame_unwind method. */
940
941static int
942aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
943 struct frame_info *this_frame,
944 void **this_prologue_cache)
945{
946 CORE_ADDR addr_in_block;
947 gdb_byte dummy[4];
948
949 addr_in_block = get_frame_address_in_block (this_frame);
3e5d3a5a 950 if (in_plt_section (addr_in_block)
07b287a0
MS
951 /* We also use the stub winder if the target memory is unreadable
952 to avoid having the prologue unwinder trying to read it. */
953 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
954 return 1;
955
956 return 0;
957}
958
959/* AArch64 stub unwinder. */
960struct frame_unwind aarch64_stub_unwind =
961{
962 NORMAL_FRAME,
02a2a705 963 aarch64_stub_frame_unwind_stop_reason,
07b287a0
MS
964 aarch64_stub_this_id,
965 aarch64_prologue_prev_register,
966 NULL,
967 aarch64_stub_unwind_sniffer
968};
969
970/* Return the frame base address of *THIS_FRAME. */
971
972static CORE_ADDR
973aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
974{
7c8edfae
PL
975 struct aarch64_prologue_cache *cache
976 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0
MS
977
978 return cache->prev_sp - cache->framesize;
979}
980
981/* AArch64 default frame base information. */
982struct frame_base aarch64_normal_base =
983{
984 &aarch64_prologue_unwind,
985 aarch64_normal_frame_base,
986 aarch64_normal_frame_base,
987 aarch64_normal_frame_base
988};
989
990/* Assuming THIS_FRAME is a dummy, return the frame ID of that
991 dummy frame. The frame ID's base needs to match the TOS value
992 saved by save_dummy_frame_tos () and returned from
993 aarch64_push_dummy_call, and the PC needs to match the dummy
994 frame's breakpoint. */
995
996static struct frame_id
997aarch64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
998{
999 return frame_id_build (get_frame_register_unsigned (this_frame,
1000 AARCH64_SP_REGNUM),
1001 get_frame_pc (this_frame));
1002}
1003
1004/* Implement the "unwind_pc" gdbarch method. */
1005
1006static CORE_ADDR
1007aarch64_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
1008{
1009 CORE_ADDR pc
1010 = frame_unwind_register_unsigned (this_frame, AARCH64_PC_REGNUM);
1011
1012 return pc;
1013}
1014
1015/* Implement the "unwind_sp" gdbarch method. */
1016
1017static CORE_ADDR
1018aarch64_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
1019{
1020 return frame_unwind_register_unsigned (this_frame, AARCH64_SP_REGNUM);
1021}
1022
1023/* Return the value of the REGNUM register in the previous frame of
1024 *THIS_FRAME. */
1025
1026static struct value *
1027aarch64_dwarf2_prev_register (struct frame_info *this_frame,
1028 void **this_cache, int regnum)
1029{
07b287a0
MS
1030 CORE_ADDR lr;
1031
1032 switch (regnum)
1033 {
1034 case AARCH64_PC_REGNUM:
1035 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1036 return frame_unwind_got_constant (this_frame, regnum, lr);
1037
1038 default:
1039 internal_error (__FILE__, __LINE__,
1040 _("Unexpected register %d"), regnum);
1041 }
1042}
1043
1044/* Implement the "init_reg" dwarf2_frame_ops method. */
1045
1046static void
1047aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1048 struct dwarf2_frame_state_reg *reg,
1049 struct frame_info *this_frame)
1050{
1051 switch (regnum)
1052 {
1053 case AARCH64_PC_REGNUM:
1054 reg->how = DWARF2_FRAME_REG_FN;
1055 reg->loc.fn = aarch64_dwarf2_prev_register;
1056 break;
1057 case AARCH64_SP_REGNUM:
1058 reg->how = DWARF2_FRAME_REG_CFA;
1059 break;
1060 }
1061}
1062
1063/* When arguments must be pushed onto the stack, they go on in reverse
1064 order. The code below implements a FILO (stack) to do this. */
1065
1066typedef struct
1067{
c3c87445
YQ
1068 /* Value to pass on stack. It can be NULL if this item is for stack
1069 padding. */
7c543f7b 1070 const gdb_byte *data;
07b287a0
MS
1071
1072 /* Size in bytes of value to pass on stack. */
1073 int len;
1074} stack_item_t;
1075
1076DEF_VEC_O (stack_item_t);
1077
1078/* Return the alignment (in bytes) of the given type. */
1079
1080static int
1081aarch64_type_align (struct type *t)
1082{
1083 int n;
1084 int align;
1085 int falign;
1086
1087 t = check_typedef (t);
1088 switch (TYPE_CODE (t))
1089 {
1090 default:
1091 /* Should never happen. */
1092 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
1093 return 4;
1094
1095 case TYPE_CODE_PTR:
1096 case TYPE_CODE_ENUM:
1097 case TYPE_CODE_INT:
1098 case TYPE_CODE_FLT:
1099 case TYPE_CODE_SET:
1100 case TYPE_CODE_RANGE:
1101 case TYPE_CODE_BITSTRING:
1102 case TYPE_CODE_REF:
aa006118 1103 case TYPE_CODE_RVALUE_REF:
07b287a0
MS
1104 case TYPE_CODE_CHAR:
1105 case TYPE_CODE_BOOL:
1106 return TYPE_LENGTH (t);
1107
1108 case TYPE_CODE_ARRAY:
238f2452
YQ
1109 if (TYPE_VECTOR (t))
1110 {
1111 /* Use the natural alignment for vector types (the same for
1112 scalar type), but the maximum alignment is 128-bit. */
1113 if (TYPE_LENGTH (t) > 16)
1114 return 16;
1115 else
1116 return TYPE_LENGTH (t);
1117 }
1118 else
1119 return aarch64_type_align (TYPE_TARGET_TYPE (t));
07b287a0
MS
1120 case TYPE_CODE_COMPLEX:
1121 return aarch64_type_align (TYPE_TARGET_TYPE (t));
1122
1123 case TYPE_CODE_STRUCT:
1124 case TYPE_CODE_UNION:
1125 align = 1;
1126 for (n = 0; n < TYPE_NFIELDS (t); n++)
1127 {
1128 falign = aarch64_type_align (TYPE_FIELD_TYPE (t, n));
1129 if (falign > align)
1130 align = falign;
1131 }
1132 return align;
1133 }
1134}
1135
cd635f74
YQ
1136/* Return 1 if *TY is a homogeneous floating-point aggregate or
1137 homogeneous short-vector aggregate as defined in the AAPCS64 ABI
1138 document; otherwise return 0. */
07b287a0
MS
1139
1140static int
cd635f74 1141is_hfa_or_hva (struct type *ty)
07b287a0
MS
1142{
1143 switch (TYPE_CODE (ty))
1144 {
1145 case TYPE_CODE_ARRAY:
1146 {
1147 struct type *target_ty = TYPE_TARGET_TYPE (ty);
238f2452
YQ
1148
1149 if (TYPE_VECTOR (ty))
1150 return 0;
1151
cd635f74
YQ
1152 if (TYPE_LENGTH (ty) <= 4 /* HFA or HVA has at most 4 members. */
1153 && (TYPE_CODE (target_ty) == TYPE_CODE_FLT /* HFA */
1154 || (TYPE_CODE (target_ty) == TYPE_CODE_ARRAY /* HVA */
1155 && TYPE_VECTOR (target_ty))))
07b287a0
MS
1156 return 1;
1157 break;
1158 }
1159
1160 case TYPE_CODE_UNION:
1161 case TYPE_CODE_STRUCT:
1162 {
cd635f74 1163 /* HFA or HVA has at most four members. */
07b287a0
MS
1164 if (TYPE_NFIELDS (ty) > 0 && TYPE_NFIELDS (ty) <= 4)
1165 {
1166 struct type *member0_type;
1167
1168 member0_type = check_typedef (TYPE_FIELD_TYPE (ty, 0));
cd635f74
YQ
1169 if (TYPE_CODE (member0_type) == TYPE_CODE_FLT
1170 || (TYPE_CODE (member0_type) == TYPE_CODE_ARRAY
1171 && TYPE_VECTOR (member0_type)))
07b287a0
MS
1172 {
1173 int i;
1174
1175 for (i = 0; i < TYPE_NFIELDS (ty); i++)
1176 {
1177 struct type *member1_type;
1178
1179 member1_type = check_typedef (TYPE_FIELD_TYPE (ty, i));
1180 if (TYPE_CODE (member0_type) != TYPE_CODE (member1_type)
1181 || (TYPE_LENGTH (member0_type)
1182 != TYPE_LENGTH (member1_type)))
1183 return 0;
1184 }
1185 return 1;
1186 }
1187 }
1188 return 0;
1189 }
1190
1191 default:
1192 break;
1193 }
1194
1195 return 0;
1196}
1197
1198/* AArch64 function call information structure. */
1199struct aarch64_call_info
1200{
1201 /* the current argument number. */
1202 unsigned argnum;
1203
1204 /* The next general purpose register number, equivalent to NGRN as
1205 described in the AArch64 Procedure Call Standard. */
1206 unsigned ngrn;
1207
1208 /* The next SIMD and floating point register number, equivalent to
1209 NSRN as described in the AArch64 Procedure Call Standard. */
1210 unsigned nsrn;
1211
1212 /* The next stacked argument address, equivalent to NSAA as
1213 described in the AArch64 Procedure Call Standard. */
1214 unsigned nsaa;
1215
1216 /* Stack item vector. */
1217 VEC(stack_item_t) *si;
1218};
1219
1220/* Pass a value in a sequence of consecutive X registers. The caller
1221 is responsbile for ensuring sufficient registers are available. */
1222
1223static void
1224pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1225 struct aarch64_call_info *info, struct type *type,
8e80f9d1 1226 struct value *arg)
07b287a0
MS
1227{
1228 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1229 int len = TYPE_LENGTH (type);
1230 enum type_code typecode = TYPE_CODE (type);
1231 int regnum = AARCH64_X0_REGNUM + info->ngrn;
8e80f9d1 1232 const bfd_byte *buf = value_contents (arg);
07b287a0
MS
1233
1234 info->argnum++;
1235
1236 while (len > 0)
1237 {
1238 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1239 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1240 byte_order);
1241
1242
1243 /* Adjust sub-word struct/union args when big-endian. */
1244 if (byte_order == BFD_ENDIAN_BIG
1245 && partial_len < X_REGISTER_SIZE
1246 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1247 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1248
1249 if (aarch64_debug)
b277c936
PL
1250 {
1251 debug_printf ("arg %d in %s = 0x%s\n", info->argnum,
1252 gdbarch_register_name (gdbarch, regnum),
1253 phex (regval, X_REGISTER_SIZE));
1254 }
07b287a0
MS
1255 regcache_cooked_write_unsigned (regcache, regnum, regval);
1256 len -= partial_len;
1257 buf += partial_len;
1258 regnum++;
1259 }
1260}
1261
1262/* Attempt to marshall a value in a V register. Return 1 if
1263 successful, or 0 if insufficient registers are available. This
1264 function, unlike the equivalent pass_in_x() function does not
1265 handle arguments spread across multiple registers. */
1266
1267static int
1268pass_in_v (struct gdbarch *gdbarch,
1269 struct regcache *regcache,
1270 struct aarch64_call_info *info,
0735fddd 1271 int len, const bfd_byte *buf)
07b287a0
MS
1272{
1273 if (info->nsrn < 8)
1274 {
07b287a0 1275 int regnum = AARCH64_V0_REGNUM + info->nsrn;
0735fddd 1276 gdb_byte reg[V_REGISTER_SIZE];
07b287a0
MS
1277
1278 info->argnum++;
1279 info->nsrn++;
1280
0735fddd
YQ
1281 memset (reg, 0, sizeof (reg));
1282 /* PCS C.1, the argument is allocated to the least significant
1283 bits of V register. */
1284 memcpy (reg, buf, len);
1285 regcache_cooked_write (regcache, regnum, reg);
1286
07b287a0 1287 if (aarch64_debug)
b277c936
PL
1288 {
1289 debug_printf ("arg %d in %s\n", info->argnum,
1290 gdbarch_register_name (gdbarch, regnum));
1291 }
07b287a0
MS
1292 return 1;
1293 }
1294 info->nsrn = 8;
1295 return 0;
1296}
1297
1298/* Marshall an argument onto the stack. */
1299
1300static void
1301pass_on_stack (struct aarch64_call_info *info, struct type *type,
8e80f9d1 1302 struct value *arg)
07b287a0 1303{
8e80f9d1 1304 const bfd_byte *buf = value_contents (arg);
07b287a0
MS
1305 int len = TYPE_LENGTH (type);
1306 int align;
1307 stack_item_t item;
1308
1309 info->argnum++;
1310
1311 align = aarch64_type_align (type);
1312
1313 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1314 Natural alignment of the argument's type. */
1315 align = align_up (align, 8);
1316
1317 /* The AArch64 PCS requires at most doubleword alignment. */
1318 if (align > 16)
1319 align = 16;
1320
1321 if (aarch64_debug)
b277c936
PL
1322 {
1323 debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1324 info->nsaa);
1325 }
07b287a0
MS
1326
1327 item.len = len;
1328 item.data = buf;
1329 VEC_safe_push (stack_item_t, info->si, &item);
1330
1331 info->nsaa += len;
1332 if (info->nsaa & (align - 1))
1333 {
1334 /* Push stack alignment padding. */
1335 int pad = align - (info->nsaa & (align - 1));
1336
1337 item.len = pad;
c3c87445 1338 item.data = NULL;
07b287a0
MS
1339
1340 VEC_safe_push (stack_item_t, info->si, &item);
1341 info->nsaa += pad;
1342 }
1343}
1344
1345/* Marshall an argument into a sequence of one or more consecutive X
1346 registers or, if insufficient X registers are available then onto
1347 the stack. */
1348
1349static void
1350pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1351 struct aarch64_call_info *info, struct type *type,
8e80f9d1 1352 struct value *arg)
07b287a0
MS
1353{
1354 int len = TYPE_LENGTH (type);
1355 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1356
1357 /* PCS C.13 - Pass in registers if we have enough spare */
1358 if (info->ngrn + nregs <= 8)
1359 {
8e80f9d1 1360 pass_in_x (gdbarch, regcache, info, type, arg);
07b287a0
MS
1361 info->ngrn += nregs;
1362 }
1363 else
1364 {
1365 info->ngrn = 8;
8e80f9d1 1366 pass_on_stack (info, type, arg);
07b287a0
MS
1367 }
1368}
1369
1370/* Pass a value in a V register, or on the stack if insufficient are
1371 available. */
1372
1373static void
1374pass_in_v_or_stack (struct gdbarch *gdbarch,
1375 struct regcache *regcache,
1376 struct aarch64_call_info *info,
1377 struct type *type,
8e80f9d1 1378 struct value *arg)
07b287a0 1379{
0735fddd
YQ
1380 if (!pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (type),
1381 value_contents (arg)))
8e80f9d1 1382 pass_on_stack (info, type, arg);
07b287a0
MS
1383}
1384
1385/* Implement the "push_dummy_call" gdbarch method. */
1386
1387static CORE_ADDR
1388aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1389 struct regcache *regcache, CORE_ADDR bp_addr,
1390 int nargs,
1391 struct value **args, CORE_ADDR sp, int struct_return,
1392 CORE_ADDR struct_addr)
1393{
07b287a0 1394 int argnum;
07b287a0
MS
1395 struct aarch64_call_info info;
1396 struct type *func_type;
1397 struct type *return_type;
1398 int lang_struct_return;
1399
1400 memset (&info, 0, sizeof (info));
1401
1402 /* We need to know what the type of the called function is in order
1403 to determine the number of named/anonymous arguments for the
1404 actual argument placement, and the return type in order to handle
1405 return value correctly.
1406
1407 The generic code above us views the decision of return in memory
1408 or return in registers as a two stage processes. The language
1409 handler is consulted first and may decide to return in memory (eg
1410 class with copy constructor returned by value), this will cause
1411 the generic code to allocate space AND insert an initial leading
1412 argument.
1413
1414 If the language code does not decide to pass in memory then the
1415 target code is consulted.
1416
1417 If the language code decides to pass in memory we want to move
1418 the pointer inserted as the initial argument from the argument
1419 list and into X8, the conventional AArch64 struct return pointer
1420 register.
1421
1422 This is slightly awkward, ideally the flag "lang_struct_return"
1423 would be passed to the targets implementation of push_dummy_call.
1424 Rather that change the target interface we call the language code
1425 directly ourselves. */
1426
1427 func_type = check_typedef (value_type (function));
1428
1429 /* Dereference function pointer types. */
1430 if (TYPE_CODE (func_type) == TYPE_CODE_PTR)
1431 func_type = TYPE_TARGET_TYPE (func_type);
1432
1433 gdb_assert (TYPE_CODE (func_type) == TYPE_CODE_FUNC
1434 || TYPE_CODE (func_type) == TYPE_CODE_METHOD);
1435
1436 /* If language_pass_by_reference () returned true we will have been
1437 given an additional initial argument, a hidden pointer to the
1438 return slot in memory. */
1439 return_type = TYPE_TARGET_TYPE (func_type);
1440 lang_struct_return = language_pass_by_reference (return_type);
1441
1442 /* Set the return address. For the AArch64, the return breakpoint
1443 is always at BP_ADDR. */
1444 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1445
1446 /* If we were given an initial argument for the return slot because
1447 lang_struct_return was true, lose it. */
1448 if (lang_struct_return)
1449 {
1450 args++;
1451 nargs--;
1452 }
1453
1454 /* The struct_return pointer occupies X8. */
1455 if (struct_return || lang_struct_return)
1456 {
1457 if (aarch64_debug)
b277c936
PL
1458 {
1459 debug_printf ("struct return in %s = 0x%s\n",
1460 gdbarch_register_name (gdbarch,
1461 AARCH64_STRUCT_RETURN_REGNUM),
1462 paddress (gdbarch, struct_addr));
1463 }
07b287a0
MS
1464 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1465 struct_addr);
1466 }
1467
1468 for (argnum = 0; argnum < nargs; argnum++)
1469 {
1470 struct value *arg = args[argnum];
1471 struct type *arg_type;
1472 int len;
1473
1474 arg_type = check_typedef (value_type (arg));
1475 len = TYPE_LENGTH (arg_type);
1476
1477 switch (TYPE_CODE (arg_type))
1478 {
1479 case TYPE_CODE_INT:
1480 case TYPE_CODE_BOOL:
1481 case TYPE_CODE_CHAR:
1482 case TYPE_CODE_RANGE:
1483 case TYPE_CODE_ENUM:
1484 if (len < 4)
1485 {
1486 /* Promote to 32 bit integer. */
1487 if (TYPE_UNSIGNED (arg_type))
1488 arg_type = builtin_type (gdbarch)->builtin_uint32;
1489 else
1490 arg_type = builtin_type (gdbarch)->builtin_int32;
1491 arg = value_cast (arg_type, arg);
1492 }
8e80f9d1 1493 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1494 break;
1495
1496 case TYPE_CODE_COMPLEX:
1497 if (info.nsrn <= 6)
1498 {
1499 const bfd_byte *buf = value_contents (arg);
1500 struct type *target_type =
1501 check_typedef (TYPE_TARGET_TYPE (arg_type));
1502
07b287a0 1503 pass_in_v (gdbarch, regcache, &info,
0735fddd
YQ
1504 TYPE_LENGTH (target_type), buf);
1505 pass_in_v (gdbarch, regcache, &info,
1506 TYPE_LENGTH (target_type),
07b287a0
MS
1507 buf + TYPE_LENGTH (target_type));
1508 }
1509 else
1510 {
1511 info.nsrn = 8;
8e80f9d1 1512 pass_on_stack (&info, arg_type, arg);
07b287a0
MS
1513 }
1514 break;
1515 case TYPE_CODE_FLT:
8e80f9d1 1516 pass_in_v_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1517 break;
1518
1519 case TYPE_CODE_STRUCT:
1520 case TYPE_CODE_ARRAY:
1521 case TYPE_CODE_UNION:
cd635f74 1522 if (is_hfa_or_hva (arg_type))
07b287a0
MS
1523 {
1524 int elements = TYPE_NFIELDS (arg_type);
1525
1526 /* Homogeneous Aggregates */
1527 if (info.nsrn + elements < 8)
1528 {
1529 int i;
1530
1531 for (i = 0; i < elements; i++)
1532 {
1533 /* We know that we have sufficient registers
1534 available therefore this will never fallback
1535 to the stack. */
1536 struct value *field =
1537 value_primitive_field (arg, 0, i, arg_type);
1538 struct type *field_type =
1539 check_typedef (value_type (field));
1540
8e80f9d1
YQ
1541 pass_in_v_or_stack (gdbarch, regcache, &info,
1542 field_type, field);
07b287a0
MS
1543 }
1544 }
1545 else
1546 {
1547 info.nsrn = 8;
8e80f9d1 1548 pass_on_stack (&info, arg_type, arg);
07b287a0
MS
1549 }
1550 }
238f2452
YQ
1551 else if (TYPE_CODE (arg_type) == TYPE_CODE_ARRAY
1552 && TYPE_VECTOR (arg_type) && (len == 16 || len == 8))
1553 {
1554 /* Short vector types are passed in V registers. */
1555 pass_in_v_or_stack (gdbarch, regcache, &info, arg_type, arg);
1556 }
07b287a0
MS
1557 else if (len > 16)
1558 {
1559 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1560 invisible reference. */
1561
1562 /* Allocate aligned storage. */
1563 sp = align_down (sp - len, 16);
1564
1565 /* Write the real data into the stack. */
1566 write_memory (sp, value_contents (arg), len);
1567
1568 /* Construct the indirection. */
1569 arg_type = lookup_pointer_type (arg_type);
1570 arg = value_from_pointer (arg_type, sp);
8e80f9d1 1571 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1572 }
1573 else
1574 /* PCS C.15 / C.18 multiple values pass. */
8e80f9d1 1575 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1576 break;
1577
1578 default:
8e80f9d1 1579 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1580 break;
1581 }
1582 }
1583
1584 /* Make sure stack retains 16 byte alignment. */
1585 if (info.nsaa & 15)
1586 sp -= 16 - (info.nsaa & 15);
1587
1588 while (!VEC_empty (stack_item_t, info.si))
1589 {
1590 stack_item_t *si = VEC_last (stack_item_t, info.si);
1591
1592 sp -= si->len;
c3c87445
YQ
1593 if (si->data != NULL)
1594 write_memory (sp, si->data, si->len);
07b287a0
MS
1595 VEC_pop (stack_item_t, info.si);
1596 }
1597
1598 VEC_free (stack_item_t, info.si);
1599
1600 /* Finally, update the SP register. */
1601 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1602
1603 return sp;
1604}
1605
1606/* Implement the "frame_align" gdbarch method. */
1607
1608static CORE_ADDR
1609aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1610{
1611 /* Align the stack to sixteen bytes. */
1612 return sp & ~(CORE_ADDR) 15;
1613}
1614
1615/* Return the type for an AdvSISD Q register. */
1616
1617static struct type *
1618aarch64_vnq_type (struct gdbarch *gdbarch)
1619{
1620 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1621
1622 if (tdep->vnq_type == NULL)
1623 {
1624 struct type *t;
1625 struct type *elem;
1626
1627 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1628 TYPE_CODE_UNION);
1629
1630 elem = builtin_type (gdbarch)->builtin_uint128;
1631 append_composite_type_field (t, "u", elem);
1632
1633 elem = builtin_type (gdbarch)->builtin_int128;
1634 append_composite_type_field (t, "s", elem);
1635
1636 tdep->vnq_type = t;
1637 }
1638
1639 return tdep->vnq_type;
1640}
1641
1642/* Return the type for an AdvSISD D register. */
1643
1644static struct type *
1645aarch64_vnd_type (struct gdbarch *gdbarch)
1646{
1647 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1648
1649 if (tdep->vnd_type == NULL)
1650 {
1651 struct type *t;
1652 struct type *elem;
1653
1654 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1655 TYPE_CODE_UNION);
1656
1657 elem = builtin_type (gdbarch)->builtin_double;
1658 append_composite_type_field (t, "f", elem);
1659
1660 elem = builtin_type (gdbarch)->builtin_uint64;
1661 append_composite_type_field (t, "u", elem);
1662
1663 elem = builtin_type (gdbarch)->builtin_int64;
1664 append_composite_type_field (t, "s", elem);
1665
1666 tdep->vnd_type = t;
1667 }
1668
1669 return tdep->vnd_type;
1670}
1671
1672/* Return the type for an AdvSISD S register. */
1673
1674static struct type *
1675aarch64_vns_type (struct gdbarch *gdbarch)
1676{
1677 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1678
1679 if (tdep->vns_type == NULL)
1680 {
1681 struct type *t;
1682 struct type *elem;
1683
1684 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1685 TYPE_CODE_UNION);
1686
1687 elem = builtin_type (gdbarch)->builtin_float;
1688 append_composite_type_field (t, "f", elem);
1689
1690 elem = builtin_type (gdbarch)->builtin_uint32;
1691 append_composite_type_field (t, "u", elem);
1692
1693 elem = builtin_type (gdbarch)->builtin_int32;
1694 append_composite_type_field (t, "s", elem);
1695
1696 tdep->vns_type = t;
1697 }
1698
1699 return tdep->vns_type;
1700}
1701
1702/* Return the type for an AdvSISD H register. */
1703
1704static struct type *
1705aarch64_vnh_type (struct gdbarch *gdbarch)
1706{
1707 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1708
1709 if (tdep->vnh_type == NULL)
1710 {
1711 struct type *t;
1712 struct type *elem;
1713
1714 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1715 TYPE_CODE_UNION);
1716
1717 elem = builtin_type (gdbarch)->builtin_uint16;
1718 append_composite_type_field (t, "u", elem);
1719
1720 elem = builtin_type (gdbarch)->builtin_int16;
1721 append_composite_type_field (t, "s", elem);
1722
1723 tdep->vnh_type = t;
1724 }
1725
1726 return tdep->vnh_type;
1727}
1728
1729/* Return the type for an AdvSISD B register. */
1730
1731static struct type *
1732aarch64_vnb_type (struct gdbarch *gdbarch)
1733{
1734 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1735
1736 if (tdep->vnb_type == NULL)
1737 {
1738 struct type *t;
1739 struct type *elem;
1740
1741 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1742 TYPE_CODE_UNION);
1743
1744 elem = builtin_type (gdbarch)->builtin_uint8;
1745 append_composite_type_field (t, "u", elem);
1746
1747 elem = builtin_type (gdbarch)->builtin_int8;
1748 append_composite_type_field (t, "s", elem);
1749
1750 tdep->vnb_type = t;
1751 }
1752
1753 return tdep->vnb_type;
1754}
1755
1756/* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
1757
1758static int
1759aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
1760{
1761 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
1762 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
1763
1764 if (reg == AARCH64_DWARF_SP)
1765 return AARCH64_SP_REGNUM;
1766
1767 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
1768 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
1769
1770 return -1;
1771}
1772\f
1773
1774/* Implement the "print_insn" gdbarch method. */
1775
1776static int
1777aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
1778{
1779 info->symbols = NULL;
1780 return print_insn_aarch64 (memaddr, info);
1781}
1782
1783/* AArch64 BRK software debug mode instruction.
1784 Note that AArch64 code is always little-endian.
1785 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
04180708 1786constexpr gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
07b287a0 1787
04180708 1788typedef BP_MANIPULATION (aarch64_default_breakpoint) aarch64_breakpoint;
07b287a0
MS
1789
1790/* Extract from an array REGS containing the (raw) register state a
1791 function return value of type TYPE, and copy that, in virtual
1792 format, into VALBUF. */
1793
1794static void
1795aarch64_extract_return_value (struct type *type, struct regcache *regs,
1796 gdb_byte *valbuf)
1797{
1798 struct gdbarch *gdbarch = get_regcache_arch (regs);
1799 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1800
1801 if (TYPE_CODE (type) == TYPE_CODE_FLT)
1802 {
1803 bfd_byte buf[V_REGISTER_SIZE];
1804 int len = TYPE_LENGTH (type);
1805
1806 regcache_cooked_read (regs, AARCH64_V0_REGNUM, buf);
1807 memcpy (valbuf, buf, len);
1808 }
1809 else if (TYPE_CODE (type) == TYPE_CODE_INT
1810 || TYPE_CODE (type) == TYPE_CODE_CHAR
1811 || TYPE_CODE (type) == TYPE_CODE_BOOL
1812 || TYPE_CODE (type) == TYPE_CODE_PTR
aa006118 1813 || TYPE_IS_REFERENCE (type)
07b287a0
MS
1814 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1815 {
1816 /* If the the type is a plain integer, then the access is
1817 straight-forward. Otherwise we have to play around a bit
1818 more. */
1819 int len = TYPE_LENGTH (type);
1820 int regno = AARCH64_X0_REGNUM;
1821 ULONGEST tmp;
1822
1823 while (len > 0)
1824 {
1825 /* By using store_unsigned_integer we avoid having to do
1826 anything special for small big-endian values. */
1827 regcache_cooked_read_unsigned (regs, regno++, &tmp);
1828 store_unsigned_integer (valbuf,
1829 (len > X_REGISTER_SIZE
1830 ? X_REGISTER_SIZE : len), byte_order, tmp);
1831 len -= X_REGISTER_SIZE;
1832 valbuf += X_REGISTER_SIZE;
1833 }
1834 }
1835 else if (TYPE_CODE (type) == TYPE_CODE_COMPLEX)
1836 {
1837 int regno = AARCH64_V0_REGNUM;
1838 bfd_byte buf[V_REGISTER_SIZE];
1839 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
1840 int len = TYPE_LENGTH (target_type);
1841
1842 regcache_cooked_read (regs, regno, buf);
1843 memcpy (valbuf, buf, len);
1844 valbuf += len;
1845 regcache_cooked_read (regs, regno + 1, buf);
1846 memcpy (valbuf, buf, len);
1847 valbuf += len;
1848 }
cd635f74 1849 else if (is_hfa_or_hva (type))
07b287a0
MS
1850 {
1851 int elements = TYPE_NFIELDS (type);
1852 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
1853 int len = TYPE_LENGTH (member_type);
1854 int i;
1855
1856 for (i = 0; i < elements; i++)
1857 {
1858 int regno = AARCH64_V0_REGNUM + i;
db3516bb 1859 bfd_byte buf[V_REGISTER_SIZE];
07b287a0
MS
1860
1861 if (aarch64_debug)
b277c936 1862 {
cd635f74 1863 debug_printf ("read HFA or HVA return value element %d from %s\n",
b277c936
PL
1864 i + 1,
1865 gdbarch_register_name (gdbarch, regno));
1866 }
07b287a0
MS
1867 regcache_cooked_read (regs, regno, buf);
1868
1869 memcpy (valbuf, buf, len);
1870 valbuf += len;
1871 }
1872 }
238f2452
YQ
1873 else if (TYPE_CODE (type) == TYPE_CODE_ARRAY && TYPE_VECTOR (type)
1874 && (TYPE_LENGTH (type) == 16 || TYPE_LENGTH (type) == 8))
1875 {
1876 /* Short vector is returned in V register. */
1877 gdb_byte buf[V_REGISTER_SIZE];
1878
1879 regcache_cooked_read (regs, AARCH64_V0_REGNUM, buf);
1880 memcpy (valbuf, buf, TYPE_LENGTH (type));
1881 }
07b287a0
MS
1882 else
1883 {
1884 /* For a structure or union the behaviour is as if the value had
1885 been stored to word-aligned memory and then loaded into
1886 registers with 64-bit load instruction(s). */
1887 int len = TYPE_LENGTH (type);
1888 int regno = AARCH64_X0_REGNUM;
1889 bfd_byte buf[X_REGISTER_SIZE];
1890
1891 while (len > 0)
1892 {
1893 regcache_cooked_read (regs, regno++, buf);
1894 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
1895 len -= X_REGISTER_SIZE;
1896 valbuf += X_REGISTER_SIZE;
1897 }
1898 }
1899}
1900
1901
1902/* Will a function return an aggregate type in memory or in a
1903 register? Return 0 if an aggregate type can be returned in a
1904 register, 1 if it must be returned in memory. */
1905
1906static int
1907aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
1908{
f168693b 1909 type = check_typedef (type);
07b287a0 1910
cd635f74 1911 if (is_hfa_or_hva (type))
07b287a0 1912 {
cd635f74
YQ
1913 /* v0-v7 are used to return values and one register is allocated
1914 for one member. However, HFA or HVA has at most four members. */
07b287a0
MS
1915 return 0;
1916 }
1917
1918 if (TYPE_LENGTH (type) > 16)
1919 {
1920 /* PCS B.6 Aggregates larger than 16 bytes are passed by
1921 invisible reference. */
1922
1923 return 1;
1924 }
1925
1926 return 0;
1927}
1928
1929/* Write into appropriate registers a function return value of type
1930 TYPE, given in virtual format. */
1931
1932static void
1933aarch64_store_return_value (struct type *type, struct regcache *regs,
1934 const gdb_byte *valbuf)
1935{
1936 struct gdbarch *gdbarch = get_regcache_arch (regs);
1937 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1938
1939 if (TYPE_CODE (type) == TYPE_CODE_FLT)
1940 {
1941 bfd_byte buf[V_REGISTER_SIZE];
1942 int len = TYPE_LENGTH (type);
1943
1944 memcpy (buf, valbuf, len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
1945 regcache_cooked_write (regs, AARCH64_V0_REGNUM, buf);
1946 }
1947 else if (TYPE_CODE (type) == TYPE_CODE_INT
1948 || TYPE_CODE (type) == TYPE_CODE_CHAR
1949 || TYPE_CODE (type) == TYPE_CODE_BOOL
1950 || TYPE_CODE (type) == TYPE_CODE_PTR
aa006118 1951 || TYPE_IS_REFERENCE (type)
07b287a0
MS
1952 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1953 {
1954 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
1955 {
1956 /* Values of one word or less are zero/sign-extended and
1957 returned in r0. */
1958 bfd_byte tmpbuf[X_REGISTER_SIZE];
1959 LONGEST val = unpack_long (type, valbuf);
1960
1961 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
1962 regcache_cooked_write (regs, AARCH64_X0_REGNUM, tmpbuf);
1963 }
1964 else
1965 {
1966 /* Integral values greater than one word are stored in
1967 consecutive registers starting with r0. This will always
1968 be a multiple of the regiser size. */
1969 int len = TYPE_LENGTH (type);
1970 int regno = AARCH64_X0_REGNUM;
1971
1972 while (len > 0)
1973 {
1974 regcache_cooked_write (regs, regno++, valbuf);
1975 len -= X_REGISTER_SIZE;
1976 valbuf += X_REGISTER_SIZE;
1977 }
1978 }
1979 }
cd635f74 1980 else if (is_hfa_or_hva (type))
07b287a0
MS
1981 {
1982 int elements = TYPE_NFIELDS (type);
1983 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
1984 int len = TYPE_LENGTH (member_type);
1985 int i;
1986
1987 for (i = 0; i < elements; i++)
1988 {
1989 int regno = AARCH64_V0_REGNUM + i;
1990 bfd_byte tmpbuf[MAX_REGISTER_SIZE];
1991
1992 if (aarch64_debug)
b277c936 1993 {
cd635f74 1994 debug_printf ("write HFA or HVA return value element %d to %s\n",
b277c936
PL
1995 i + 1,
1996 gdbarch_register_name (gdbarch, regno));
1997 }
07b287a0
MS
1998
1999 memcpy (tmpbuf, valbuf, len);
2000 regcache_cooked_write (regs, regno, tmpbuf);
2001 valbuf += len;
2002 }
2003 }
238f2452
YQ
2004 else if (TYPE_CODE (type) == TYPE_CODE_ARRAY && TYPE_VECTOR (type)
2005 && (TYPE_LENGTH (type) == 8 || TYPE_LENGTH (type) == 16))
2006 {
2007 /* Short vector. */
2008 gdb_byte buf[V_REGISTER_SIZE];
2009
2010 memcpy (buf, valbuf, TYPE_LENGTH (type));
2011 regcache_cooked_write (regs, AARCH64_V0_REGNUM, buf);
2012 }
07b287a0
MS
2013 else
2014 {
2015 /* For a structure or union the behaviour is as if the value had
2016 been stored to word-aligned memory and then loaded into
2017 registers with 64-bit load instruction(s). */
2018 int len = TYPE_LENGTH (type);
2019 int regno = AARCH64_X0_REGNUM;
2020 bfd_byte tmpbuf[X_REGISTER_SIZE];
2021
2022 while (len > 0)
2023 {
2024 memcpy (tmpbuf, valbuf,
2025 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2026 regcache_cooked_write (regs, regno++, tmpbuf);
2027 len -= X_REGISTER_SIZE;
2028 valbuf += X_REGISTER_SIZE;
2029 }
2030 }
2031}
2032
2033/* Implement the "return_value" gdbarch method. */
2034
2035static enum return_value_convention
2036aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2037 struct type *valtype, struct regcache *regcache,
2038 gdb_byte *readbuf, const gdb_byte *writebuf)
2039{
07b287a0
MS
2040
2041 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
2042 || TYPE_CODE (valtype) == TYPE_CODE_UNION
2043 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
2044 {
2045 if (aarch64_return_in_memory (gdbarch, valtype))
2046 {
2047 if (aarch64_debug)
b277c936 2048 debug_printf ("return value in memory\n");
07b287a0
MS
2049 return RETURN_VALUE_STRUCT_CONVENTION;
2050 }
2051 }
2052
2053 if (writebuf)
2054 aarch64_store_return_value (valtype, regcache, writebuf);
2055
2056 if (readbuf)
2057 aarch64_extract_return_value (valtype, regcache, readbuf);
2058
2059 if (aarch64_debug)
b277c936 2060 debug_printf ("return value in registers\n");
07b287a0
MS
2061
2062 return RETURN_VALUE_REGISTER_CONVENTION;
2063}
2064
2065/* Implement the "get_longjmp_target" gdbarch method. */
2066
2067static int
2068aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2069{
2070 CORE_ADDR jb_addr;
2071 gdb_byte buf[X_REGISTER_SIZE];
2072 struct gdbarch *gdbarch = get_frame_arch (frame);
2073 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2074 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2075
2076 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
2077
2078 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2079 X_REGISTER_SIZE))
2080 return 0;
2081
2082 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2083 return 1;
2084}
ea873d8e
PL
2085
2086/* Implement the "gen_return_address" gdbarch method. */
2087
2088static void
2089aarch64_gen_return_address (struct gdbarch *gdbarch,
2090 struct agent_expr *ax, struct axs_value *value,
2091 CORE_ADDR scope)
2092{
2093 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
2094 value->kind = axs_lvalue_register;
2095 value->u.reg = AARCH64_LR_REGNUM;
2096}
07b287a0
MS
2097\f
2098
2099/* Return the pseudo register name corresponding to register regnum. */
2100
2101static const char *
2102aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2103{
2104 static const char *const q_name[] =
2105 {
2106 "q0", "q1", "q2", "q3",
2107 "q4", "q5", "q6", "q7",
2108 "q8", "q9", "q10", "q11",
2109 "q12", "q13", "q14", "q15",
2110 "q16", "q17", "q18", "q19",
2111 "q20", "q21", "q22", "q23",
2112 "q24", "q25", "q26", "q27",
2113 "q28", "q29", "q30", "q31",
2114 };
2115
2116 static const char *const d_name[] =
2117 {
2118 "d0", "d1", "d2", "d3",
2119 "d4", "d5", "d6", "d7",
2120 "d8", "d9", "d10", "d11",
2121 "d12", "d13", "d14", "d15",
2122 "d16", "d17", "d18", "d19",
2123 "d20", "d21", "d22", "d23",
2124 "d24", "d25", "d26", "d27",
2125 "d28", "d29", "d30", "d31",
2126 };
2127
2128 static const char *const s_name[] =
2129 {
2130 "s0", "s1", "s2", "s3",
2131 "s4", "s5", "s6", "s7",
2132 "s8", "s9", "s10", "s11",
2133 "s12", "s13", "s14", "s15",
2134 "s16", "s17", "s18", "s19",
2135 "s20", "s21", "s22", "s23",
2136 "s24", "s25", "s26", "s27",
2137 "s28", "s29", "s30", "s31",
2138 };
2139
2140 static const char *const h_name[] =
2141 {
2142 "h0", "h1", "h2", "h3",
2143 "h4", "h5", "h6", "h7",
2144 "h8", "h9", "h10", "h11",
2145 "h12", "h13", "h14", "h15",
2146 "h16", "h17", "h18", "h19",
2147 "h20", "h21", "h22", "h23",
2148 "h24", "h25", "h26", "h27",
2149 "h28", "h29", "h30", "h31",
2150 };
2151
2152 static const char *const b_name[] =
2153 {
2154 "b0", "b1", "b2", "b3",
2155 "b4", "b5", "b6", "b7",
2156 "b8", "b9", "b10", "b11",
2157 "b12", "b13", "b14", "b15",
2158 "b16", "b17", "b18", "b19",
2159 "b20", "b21", "b22", "b23",
2160 "b24", "b25", "b26", "b27",
2161 "b28", "b29", "b30", "b31",
2162 };
2163
2164 regnum -= gdbarch_num_regs (gdbarch);
2165
2166 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2167 return q_name[regnum - AARCH64_Q0_REGNUM];
2168
2169 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2170 return d_name[regnum - AARCH64_D0_REGNUM];
2171
2172 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2173 return s_name[regnum - AARCH64_S0_REGNUM];
2174
2175 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2176 return h_name[regnum - AARCH64_H0_REGNUM];
2177
2178 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2179 return b_name[regnum - AARCH64_B0_REGNUM];
2180
2181 internal_error (__FILE__, __LINE__,
2182 _("aarch64_pseudo_register_name: bad register number %d"),
2183 regnum);
2184}
2185
2186/* Implement the "pseudo_register_type" tdesc_arch_data method. */
2187
2188static struct type *
2189aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2190{
2191 regnum -= gdbarch_num_regs (gdbarch);
2192
2193 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2194 return aarch64_vnq_type (gdbarch);
2195
2196 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2197 return aarch64_vnd_type (gdbarch);
2198
2199 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2200 return aarch64_vns_type (gdbarch);
2201
2202 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2203 return aarch64_vnh_type (gdbarch);
2204
2205 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2206 return aarch64_vnb_type (gdbarch);
2207
2208 internal_error (__FILE__, __LINE__,
2209 _("aarch64_pseudo_register_type: bad register number %d"),
2210 regnum);
2211}
2212
2213/* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2214
2215static int
2216aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2217 struct reggroup *group)
2218{
2219 regnum -= gdbarch_num_regs (gdbarch);
2220
2221 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2222 return group == all_reggroup || group == vector_reggroup;
2223 else if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2224 return (group == all_reggroup || group == vector_reggroup
2225 || group == float_reggroup);
2226 else if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2227 return (group == all_reggroup || group == vector_reggroup
2228 || group == float_reggroup);
2229 else if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2230 return group == all_reggroup || group == vector_reggroup;
2231 else if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2232 return group == all_reggroup || group == vector_reggroup;
2233
2234 return group == all_reggroup;
2235}
2236
2237/* Implement the "pseudo_register_read_value" gdbarch method. */
2238
2239static struct value *
2240aarch64_pseudo_read_value (struct gdbarch *gdbarch,
2241 struct regcache *regcache,
2242 int regnum)
2243{
2244 gdb_byte reg_buf[MAX_REGISTER_SIZE];
2245 struct value *result_value;
2246 gdb_byte *buf;
2247
2248 result_value = allocate_value (register_type (gdbarch, regnum));
2249 VALUE_LVAL (result_value) = lval_register;
2250 VALUE_REGNUM (result_value) = regnum;
2251 buf = value_contents_raw (result_value);
2252
2253 regnum -= gdbarch_num_regs (gdbarch);
2254
2255 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2256 {
2257 enum register_status status;
2258 unsigned v_regnum;
2259
2260 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2261 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2262 if (status != REG_VALID)
2263 mark_value_bytes_unavailable (result_value, 0,
2264 TYPE_LENGTH (value_type (result_value)));
2265 else
2266 memcpy (buf, reg_buf, Q_REGISTER_SIZE);
2267 return result_value;
2268 }
2269
2270 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2271 {
2272 enum register_status status;
2273 unsigned v_regnum;
2274
2275 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2276 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2277 if (status != REG_VALID)
2278 mark_value_bytes_unavailable (result_value, 0,
2279 TYPE_LENGTH (value_type (result_value)));
2280 else
2281 memcpy (buf, reg_buf, D_REGISTER_SIZE);
2282 return result_value;
2283 }
2284
2285 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2286 {
2287 enum register_status status;
2288 unsigned v_regnum;
2289
2290 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2291 status = regcache_raw_read (regcache, v_regnum, reg_buf);
4bcddace
PL
2292 if (status != REG_VALID)
2293 mark_value_bytes_unavailable (result_value, 0,
2294 TYPE_LENGTH (value_type (result_value)));
2295 else
2296 memcpy (buf, reg_buf, S_REGISTER_SIZE);
07b287a0
MS
2297 return result_value;
2298 }
2299
2300 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2301 {
2302 enum register_status status;
2303 unsigned v_regnum;
2304
2305 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2306 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2307 if (status != REG_VALID)
2308 mark_value_bytes_unavailable (result_value, 0,
2309 TYPE_LENGTH (value_type (result_value)));
2310 else
2311 memcpy (buf, reg_buf, H_REGISTER_SIZE);
2312 return result_value;
2313 }
2314
2315 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2316 {
2317 enum register_status status;
2318 unsigned v_regnum;
2319
2320 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2321 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2322 if (status != REG_VALID)
2323 mark_value_bytes_unavailable (result_value, 0,
2324 TYPE_LENGTH (value_type (result_value)));
2325 else
2326 memcpy (buf, reg_buf, B_REGISTER_SIZE);
2327 return result_value;
2328 }
2329
2330 gdb_assert_not_reached ("regnum out of bound");
2331}
2332
2333/* Implement the "pseudo_register_write" gdbarch method. */
2334
2335static void
2336aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2337 int regnum, const gdb_byte *buf)
2338{
2339 gdb_byte reg_buf[MAX_REGISTER_SIZE];
2340
2341 /* Ensure the register buffer is zero, we want gdb writes of the
2342 various 'scalar' pseudo registers to behavior like architectural
2343 writes, register width bytes are written the remainder are set to
2344 zero. */
2345 memset (reg_buf, 0, sizeof (reg_buf));
2346
2347 regnum -= gdbarch_num_regs (gdbarch);
2348
2349 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2350 {
2351 /* pseudo Q registers */
2352 unsigned v_regnum;
2353
2354 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2355 memcpy (reg_buf, buf, Q_REGISTER_SIZE);
2356 regcache_raw_write (regcache, v_regnum, reg_buf);
2357 return;
2358 }
2359
2360 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2361 {
2362 /* pseudo D registers */
2363 unsigned v_regnum;
2364
2365 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2366 memcpy (reg_buf, buf, D_REGISTER_SIZE);
2367 regcache_raw_write (regcache, v_regnum, reg_buf);
2368 return;
2369 }
2370
2371 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2372 {
2373 unsigned v_regnum;
2374
2375 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2376 memcpy (reg_buf, buf, S_REGISTER_SIZE);
2377 regcache_raw_write (regcache, v_regnum, reg_buf);
2378 return;
2379 }
2380
2381 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2382 {
2383 /* pseudo H registers */
2384 unsigned v_regnum;
2385
2386 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2387 memcpy (reg_buf, buf, H_REGISTER_SIZE);
2388 regcache_raw_write (regcache, v_regnum, reg_buf);
2389 return;
2390 }
2391
2392 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2393 {
2394 /* pseudo B registers */
2395 unsigned v_regnum;
2396
2397 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2398 memcpy (reg_buf, buf, B_REGISTER_SIZE);
2399 regcache_raw_write (regcache, v_regnum, reg_buf);
2400 return;
2401 }
2402
2403 gdb_assert_not_reached ("regnum out of bound");
2404}
2405
07b287a0
MS
2406/* Callback function for user_reg_add. */
2407
2408static struct value *
2409value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2410{
9a3c8263 2411 const int *reg_p = (const int *) baton;
07b287a0
MS
2412
2413 return value_of_register (*reg_p, frame);
2414}
2415\f
2416
9404b58f
KM
2417/* Implement the "software_single_step" gdbarch method, needed to
2418 single step through atomic sequences on AArch64. */
2419
93f9a11f 2420static VEC (CORE_ADDR) *
f5ea389a 2421aarch64_software_single_step (struct regcache *regcache)
9404b58f 2422{
0187a92f 2423 struct gdbarch *gdbarch = get_regcache_arch (regcache);
9404b58f
KM
2424 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2425 const int insn_size = 4;
2426 const int atomic_sequence_length = 16; /* Instruction sequence length. */
0187a92f 2427 CORE_ADDR pc = regcache_read_pc (regcache);
9404b58f
KM
2428 CORE_ADDR breaks[2] = { -1, -1 };
2429 CORE_ADDR loc = pc;
2430 CORE_ADDR closing_insn = 0;
2431 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2432 byte_order_for_code);
2433 int index;
2434 int insn_count;
2435 int bc_insn_count = 0; /* Conditional branch instruction count. */
2436 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
f77ee802 2437 aarch64_inst inst;
93f9a11f 2438 VEC (CORE_ADDR) *next_pcs = NULL;
f77ee802 2439
43cdf5ae 2440 if (aarch64_decode_insn (insn, &inst, 1) != 0)
93f9a11f 2441 return NULL;
9404b58f
KM
2442
2443 /* Look for a Load Exclusive instruction which begins the sequence. */
f77ee802 2444 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
93f9a11f 2445 return NULL;
9404b58f
KM
2446
2447 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2448 {
9404b58f
KM
2449 loc += insn_size;
2450 insn = read_memory_unsigned_integer (loc, insn_size,
2451 byte_order_for_code);
2452
43cdf5ae 2453 if (aarch64_decode_insn (insn, &inst, 1) != 0)
93f9a11f 2454 return NULL;
9404b58f 2455 /* Check if the instruction is a conditional branch. */
f77ee802 2456 if (inst.opcode->iclass == condbranch)
9404b58f 2457 {
f77ee802
YQ
2458 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
2459
9404b58f 2460 if (bc_insn_count >= 1)
93f9a11f 2461 return NULL;
9404b58f
KM
2462
2463 /* It is, so we'll try to set a breakpoint at the destination. */
f77ee802 2464 breaks[1] = loc + inst.operands[0].imm.value;
9404b58f
KM
2465
2466 bc_insn_count++;
2467 last_breakpoint++;
2468 }
2469
2470 /* Look for the Store Exclusive which closes the atomic sequence. */
f77ee802 2471 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
9404b58f
KM
2472 {
2473 closing_insn = loc;
2474 break;
2475 }
2476 }
2477
2478 /* We didn't find a closing Store Exclusive instruction, fall back. */
2479 if (!closing_insn)
93f9a11f 2480 return NULL;
9404b58f
KM
2481
2482 /* Insert breakpoint after the end of the atomic sequence. */
2483 breaks[0] = loc + insn_size;
2484
2485 /* Check for duplicated breakpoints, and also check that the second
2486 breakpoint is not within the atomic sequence. */
2487 if (last_breakpoint
2488 && (breaks[1] == breaks[0]
2489 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2490 last_breakpoint = 0;
2491
2492 /* Insert the breakpoint at the end of the sequence, and one at the
2493 destination of the conditional branch, if it exists. */
2494 for (index = 0; index <= last_breakpoint; index++)
93f9a11f 2495 VEC_safe_push (CORE_ADDR, next_pcs, breaks[index]);
9404b58f 2496
93f9a11f 2497 return next_pcs;
9404b58f
KM
2498}
2499
b6542f81
YQ
2500struct displaced_step_closure
2501{
2502 /* It is true when condition instruction, such as B.CON, TBZ, etc,
2503 is being displaced stepping. */
2504 int cond;
2505
2506 /* PC adjustment offset after displaced stepping. */
2507 int32_t pc_adjust;
2508};
2509
2510/* Data when visiting instructions for displaced stepping. */
2511
2512struct aarch64_displaced_step_data
2513{
2514 struct aarch64_insn_data base;
2515
2516 /* The address where the instruction will be executed at. */
2517 CORE_ADDR new_addr;
2518 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
2519 uint32_t insn_buf[DISPLACED_MODIFIED_INSNS];
2520 /* Number of instructions in INSN_BUF. */
2521 unsigned insn_count;
2522 /* Registers when doing displaced stepping. */
2523 struct regcache *regs;
2524
2525 struct displaced_step_closure *dsc;
2526};
2527
2528/* Implementation of aarch64_insn_visitor method "b". */
2529
2530static void
2531aarch64_displaced_step_b (const int is_bl, const int32_t offset,
2532 struct aarch64_insn_data *data)
2533{
2534 struct aarch64_displaced_step_data *dsd
2535 = (struct aarch64_displaced_step_data *) data;
2ac09a5b 2536 int64_t new_offset = data->insn_addr - dsd->new_addr + offset;
b6542f81
YQ
2537
2538 if (can_encode_int32 (new_offset, 28))
2539 {
2540 /* Emit B rather than BL, because executing BL on a new address
2541 will get the wrong address into LR. In order to avoid this,
2542 we emit B, and update LR if the instruction is BL. */
2543 emit_b (dsd->insn_buf, 0, new_offset);
2544 dsd->insn_count++;
2545 }
2546 else
2547 {
2548 /* Write NOP. */
2549 emit_nop (dsd->insn_buf);
2550 dsd->insn_count++;
2551 dsd->dsc->pc_adjust = offset;
2552 }
2553
2554 if (is_bl)
2555 {
2556 /* Update LR. */
2557 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
2558 data->insn_addr + 4);
2559 }
2560}
2561
2562/* Implementation of aarch64_insn_visitor method "b_cond". */
2563
2564static void
2565aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
2566 struct aarch64_insn_data *data)
2567{
2568 struct aarch64_displaced_step_data *dsd
2569 = (struct aarch64_displaced_step_data *) data;
b6542f81
YQ
2570
2571 /* GDB has to fix up PC after displaced step this instruction
2572 differently according to the condition is true or false. Instead
2573 of checking COND against conditional flags, we can use
2574 the following instructions, and GDB can tell how to fix up PC
2575 according to the PC value.
2576
2577 B.COND TAKEN ; If cond is true, then jump to TAKEN.
2578 INSN1 ;
2579 TAKEN:
2580 INSN2
2581 */
2582
2583 emit_bcond (dsd->insn_buf, cond, 8);
2584 dsd->dsc->cond = 1;
2585 dsd->dsc->pc_adjust = offset;
2586 dsd->insn_count = 1;
2587}
2588
2589/* Dynamically allocate a new register. If we know the register
2590 statically, we should make it a global as above instead of using this
2591 helper function. */
2592
2593static struct aarch64_register
2594aarch64_register (unsigned num, int is64)
2595{
2596 return (struct aarch64_register) { num, is64 };
2597}
2598
2599/* Implementation of aarch64_insn_visitor method "cb". */
2600
2601static void
2602aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
2603 const unsigned rn, int is64,
2604 struct aarch64_insn_data *data)
2605{
2606 struct aarch64_displaced_step_data *dsd
2607 = (struct aarch64_displaced_step_data *) data;
b6542f81
YQ
2608
2609 /* The offset is out of range for a compare and branch
2610 instruction. We can use the following instructions instead:
2611
2612 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
2613 INSN1 ;
2614 TAKEN:
2615 INSN2
2616 */
2617 emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
2618 dsd->insn_count = 1;
2619 dsd->dsc->cond = 1;
2620 dsd->dsc->pc_adjust = offset;
2621}
2622
2623/* Implementation of aarch64_insn_visitor method "tb". */
2624
2625static void
2626aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
2627 const unsigned rt, unsigned bit,
2628 struct aarch64_insn_data *data)
2629{
2630 struct aarch64_displaced_step_data *dsd
2631 = (struct aarch64_displaced_step_data *) data;
b6542f81
YQ
2632
2633 /* The offset is out of range for a test bit and branch
2634 instruction We can use the following instructions instead:
2635
2636 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
2637 INSN1 ;
2638 TAKEN:
2639 INSN2
2640
2641 */
2642 emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
2643 dsd->insn_count = 1;
2644 dsd->dsc->cond = 1;
2645 dsd->dsc->pc_adjust = offset;
2646}
2647
2648/* Implementation of aarch64_insn_visitor method "adr". */
2649
2650static void
2651aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
2652 const int is_adrp, struct aarch64_insn_data *data)
2653{
2654 struct aarch64_displaced_step_data *dsd
2655 = (struct aarch64_displaced_step_data *) data;
2656 /* We know exactly the address the ADR{P,} instruction will compute.
2657 We can just write it to the destination register. */
2658 CORE_ADDR address = data->insn_addr + offset;
2659
2660 if (is_adrp)
2661 {
2662 /* Clear the lower 12 bits of the offset to get the 4K page. */
2663 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2664 address & ~0xfff);
2665 }
2666 else
2667 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2668 address);
2669
2670 dsd->dsc->pc_adjust = 4;
2671 emit_nop (dsd->insn_buf);
2672 dsd->insn_count = 1;
2673}
2674
2675/* Implementation of aarch64_insn_visitor method "ldr_literal". */
2676
2677static void
2678aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
2679 const unsigned rt, const int is64,
2680 struct aarch64_insn_data *data)
2681{
2682 struct aarch64_displaced_step_data *dsd
2683 = (struct aarch64_displaced_step_data *) data;
2684 CORE_ADDR address = data->insn_addr + offset;
2685 struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
2686
2687 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
2688 address);
2689
2690 if (is_sw)
2691 dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
2692 aarch64_register (rt, 1), zero);
2693 else
2694 dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
2695 aarch64_register (rt, 1), zero);
2696
2697 dsd->dsc->pc_adjust = 4;
2698}
2699
2700/* Implementation of aarch64_insn_visitor method "others". */
2701
2702static void
2703aarch64_displaced_step_others (const uint32_t insn,
2704 struct aarch64_insn_data *data)
2705{
2706 struct aarch64_displaced_step_data *dsd
2707 = (struct aarch64_displaced_step_data *) data;
2708
e1c587c3 2709 aarch64_emit_insn (dsd->insn_buf, insn);
b6542f81
YQ
2710 dsd->insn_count = 1;
2711
2712 if ((insn & 0xfffffc1f) == 0xd65f0000)
2713 {
2714 /* RET */
2715 dsd->dsc->pc_adjust = 0;
2716 }
2717 else
2718 dsd->dsc->pc_adjust = 4;
2719}
2720
2721static const struct aarch64_insn_visitor visitor =
2722{
2723 aarch64_displaced_step_b,
2724 aarch64_displaced_step_b_cond,
2725 aarch64_displaced_step_cb,
2726 aarch64_displaced_step_tb,
2727 aarch64_displaced_step_adr,
2728 aarch64_displaced_step_ldr_literal,
2729 aarch64_displaced_step_others,
2730};
2731
2732/* Implement the "displaced_step_copy_insn" gdbarch method. */
2733
2734struct displaced_step_closure *
2735aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
2736 CORE_ADDR from, CORE_ADDR to,
2737 struct regcache *regs)
2738{
2739 struct displaced_step_closure *dsc = NULL;
2740 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2741 uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
2742 struct aarch64_displaced_step_data dsd;
c86a40c6
YQ
2743 aarch64_inst inst;
2744
2745 if (aarch64_decode_insn (insn, &inst, 1) != 0)
2746 return NULL;
b6542f81
YQ
2747
2748 /* Look for a Load Exclusive instruction which begins the sequence. */
c86a40c6 2749 if (inst.opcode->iclass == ldstexcl && bit (insn, 22))
b6542f81
YQ
2750 {
2751 /* We can't displaced step atomic sequences. */
2752 return NULL;
2753 }
2754
2755 dsc = XCNEW (struct displaced_step_closure);
2756 dsd.base.insn_addr = from;
2757 dsd.new_addr = to;
2758 dsd.regs = regs;
2759 dsd.dsc = dsc;
034f1a81 2760 dsd.insn_count = 0;
b6542f81
YQ
2761 aarch64_relocate_instruction (insn, &visitor,
2762 (struct aarch64_insn_data *) &dsd);
2763 gdb_assert (dsd.insn_count <= DISPLACED_MODIFIED_INSNS);
2764
2765 if (dsd.insn_count != 0)
2766 {
2767 int i;
2768
2769 /* Instruction can be relocated to scratch pad. Copy
2770 relocated instruction(s) there. */
2771 for (i = 0; i < dsd.insn_count; i++)
2772 {
2773 if (debug_displaced)
2774 {
2775 debug_printf ("displaced: writing insn ");
2776 debug_printf ("%.8x", dsd.insn_buf[i]);
2777 debug_printf (" at %s\n", paddress (gdbarch, to + i * 4));
2778 }
2779 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
2780 (ULONGEST) dsd.insn_buf[i]);
2781 }
2782 }
2783 else
2784 {
2785 xfree (dsc);
2786 dsc = NULL;
2787 }
2788
2789 return dsc;
2790}
2791
2792/* Implement the "displaced_step_fixup" gdbarch method. */
2793
2794void
2795aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
2796 struct displaced_step_closure *dsc,
2797 CORE_ADDR from, CORE_ADDR to,
2798 struct regcache *regs)
2799{
2800 if (dsc->cond)
2801 {
2802 ULONGEST pc;
2803
2804 regcache_cooked_read_unsigned (regs, AARCH64_PC_REGNUM, &pc);
2805 if (pc - to == 8)
2806 {
2807 /* Condition is true. */
2808 }
2809 else if (pc - to == 4)
2810 {
2811 /* Condition is false. */
2812 dsc->pc_adjust = 4;
2813 }
2814 else
2815 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
2816 }
2817
2818 if (dsc->pc_adjust != 0)
2819 {
2820 if (debug_displaced)
2821 {
2822 debug_printf ("displaced: fixup: set PC to %s:%d\n",
2823 paddress (gdbarch, from), dsc->pc_adjust);
2824 }
2825 regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
2826 from + dsc->pc_adjust);
2827 }
2828}
2829
2830/* Implement the "displaced_step_hw_singlestep" gdbarch method. */
2831
2832int
2833aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch,
2834 struct displaced_step_closure *closure)
2835{
2836 return 1;
2837}
2838
07b287a0
MS
2839/* Initialize the current architecture based on INFO. If possible,
2840 re-use an architecture from ARCHES, which is a list of
2841 architectures already created during this debugging session.
2842
2843 Called e.g. at program startup, when reading a core file, and when
2844 reading a binary file. */
2845
2846static struct gdbarch *
2847aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
2848{
2849 struct gdbarch_tdep *tdep;
2850 struct gdbarch *gdbarch;
2851 struct gdbarch_list *best_arch;
2852 struct tdesc_arch_data *tdesc_data = NULL;
2853 const struct target_desc *tdesc = info.target_desc;
2854 int i;
07b287a0
MS
2855 int valid_p = 1;
2856 const struct tdesc_feature *feature;
2857 int num_regs = 0;
2858 int num_pseudo_regs = 0;
2859
2860 /* Ensure we always have a target descriptor. */
2861 if (!tdesc_has_registers (tdesc))
2862 tdesc = tdesc_aarch64;
2863
2864 gdb_assert (tdesc);
2865
2866 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.core");
2867
2868 if (feature == NULL)
2869 return NULL;
2870
2871 tdesc_data = tdesc_data_alloc ();
2872
2873 /* Validate the descriptor provides the mandatory core R registers
2874 and allocate their numbers. */
2875 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
2876 valid_p &=
2877 tdesc_numbered_register (feature, tdesc_data, AARCH64_X0_REGNUM + i,
2878 aarch64_r_register_names[i]);
2879
2880 num_regs = AARCH64_X0_REGNUM + i;
2881
2882 /* Look for the V registers. */
2883 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
2884 if (feature)
2885 {
2886 /* Validate the descriptor provides the mandatory V registers
2887 and allocate their numbers. */
2888 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
2889 valid_p &=
2890 tdesc_numbered_register (feature, tdesc_data, AARCH64_V0_REGNUM + i,
2891 aarch64_v_register_names[i]);
2892
2893 num_regs = AARCH64_V0_REGNUM + i;
2894
2895 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
2896 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
2897 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
2898 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
2899 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
2900 }
2901
2902 if (!valid_p)
2903 {
2904 tdesc_data_cleanup (tdesc_data);
2905 return NULL;
2906 }
2907
2908 /* AArch64 code is always little-endian. */
2909 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
2910
2911 /* If there is already a candidate, use it. */
2912 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
2913 best_arch != NULL;
2914 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
2915 {
2916 /* Found a match. */
2917 break;
2918 }
2919
2920 if (best_arch != NULL)
2921 {
2922 if (tdesc_data != NULL)
2923 tdesc_data_cleanup (tdesc_data);
2924 return best_arch->gdbarch;
2925 }
2926
8d749320 2927 tdep = XCNEW (struct gdbarch_tdep);
07b287a0
MS
2928 gdbarch = gdbarch_alloc (&info, tdep);
2929
2930 /* This should be low enough for everything. */
2931 tdep->lowest_pc = 0x20;
2932 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
2933 tdep->jb_elt_size = 8;
2934
2935 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
2936 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
2937
07b287a0
MS
2938 /* Frame handling. */
2939 set_gdbarch_dummy_id (gdbarch, aarch64_dummy_id);
2940 set_gdbarch_unwind_pc (gdbarch, aarch64_unwind_pc);
2941 set_gdbarch_unwind_sp (gdbarch, aarch64_unwind_sp);
2942
2943 /* Advance PC across function entry code. */
2944 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
2945
2946 /* The stack grows downward. */
2947 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
2948
2949 /* Breakpoint manipulation. */
04180708
YQ
2950 set_gdbarch_breakpoint_kind_from_pc (gdbarch,
2951 aarch64_breakpoint::kind_from_pc);
2952 set_gdbarch_sw_breakpoint_from_kind (gdbarch,
2953 aarch64_breakpoint::bp_from_kind);
07b287a0 2954 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
9404b58f 2955 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
07b287a0
MS
2956
2957 /* Information about registers, etc. */
2958 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
2959 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
2960 set_gdbarch_num_regs (gdbarch, num_regs);
2961
2962 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
2963 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
2964 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
2965 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
2966 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
2967 set_tdesc_pseudo_register_reggroup_p (gdbarch,
2968 aarch64_pseudo_register_reggroup_p);
2969
2970 /* ABI */
2971 set_gdbarch_short_bit (gdbarch, 16);
2972 set_gdbarch_int_bit (gdbarch, 32);
2973 set_gdbarch_float_bit (gdbarch, 32);
2974 set_gdbarch_double_bit (gdbarch, 64);
2975 set_gdbarch_long_double_bit (gdbarch, 128);
2976 set_gdbarch_long_bit (gdbarch, 64);
2977 set_gdbarch_long_long_bit (gdbarch, 64);
2978 set_gdbarch_ptr_bit (gdbarch, 64);
2979 set_gdbarch_char_signed (gdbarch, 0);
53375380
PA
2980 set_gdbarch_wchar_bit (gdbarch, 64);
2981 set_gdbarch_wchar_signed (gdbarch, 0);
07b287a0
MS
2982 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
2983 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
2984 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
2985
2986 /* Internal <-> external register number maps. */
2987 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
2988
2989 /* Returning results. */
2990 set_gdbarch_return_value (gdbarch, aarch64_return_value);
2991
2992 /* Disassembly. */
2993 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
2994
2995 /* Virtual tables. */
2996 set_gdbarch_vbit_in_delta (gdbarch, 1);
2997
2998 /* Hook in the ABI-specific overrides, if they have been registered. */
2999 info.target_desc = tdesc;
3000 info.tdep_info = (void *) tdesc_data;
3001 gdbarch_init_osabi (info, gdbarch);
3002
3003 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
3004
3005 /* Add some default predicates. */
3006 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
3007 dwarf2_append_unwinders (gdbarch);
3008 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
3009
3010 frame_base_set_default (gdbarch, &aarch64_normal_base);
3011
3012 /* Now we have tuned the configuration, set a few final things,
3013 based on what the OS ABI has told us. */
3014
3015 if (tdep->jb_pc >= 0)
3016 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
3017
ea873d8e
PL
3018 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
3019
07b287a0
MS
3020 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
3021
3022 /* Add standard register aliases. */
3023 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
3024 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
3025 value_of_aarch64_user_reg,
3026 &aarch64_register_aliases[i].regnum);
3027
3028 return gdbarch;
3029}
3030
3031static void
3032aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
3033{
3034 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3035
3036 if (tdep == NULL)
3037 return;
3038
3039 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
3040 paddress (gdbarch, tdep->lowest_pc));
3041}
3042
1e2b521d
YQ
3043namespace selftests
3044{
3045static void aarch64_process_record_test (void);
3046}
3047
07b287a0
MS
3048/* Suppress warning from -Wmissing-prototypes. */
3049extern initialize_file_ftype _initialize_aarch64_tdep;
3050
3051void
3052_initialize_aarch64_tdep (void)
3053{
3054 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
3055 aarch64_dump_tdep);
3056
3057 initialize_tdesc_aarch64 ();
07b287a0
MS
3058
3059 /* Debug this file's internals. */
3060 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
3061Set AArch64 debugging."), _("\
3062Show AArch64 debugging."), _("\
3063When on, AArch64 specific debugging is enabled."),
3064 NULL,
3065 show_aarch64_debug,
3066 &setdebuglist, &showdebuglist);
4d9a9006
YQ
3067
3068#if GDB_SELF_TEST
3069 register_self_test (selftests::aarch64_analyze_prologue_test);
1e2b521d 3070 register_self_test (selftests::aarch64_process_record_test);
4d9a9006 3071#endif
07b287a0 3072}
99afc88b
OJ
3073
3074/* AArch64 process record-replay related structures, defines etc. */
3075
99afc88b
OJ
3076#define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
3077 do \
3078 { \
3079 unsigned int reg_len = LENGTH; \
3080 if (reg_len) \
3081 { \
3082 REGS = XNEWVEC (uint32_t, reg_len); \
3083 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
3084 } \
3085 } \
3086 while (0)
3087
3088#define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
3089 do \
3090 { \
3091 unsigned int mem_len = LENGTH; \
3092 if (mem_len) \
3093 { \
3094 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
3095 memcpy(&MEMS->len, &RECORD_BUF[0], \
3096 sizeof(struct aarch64_mem_r) * LENGTH); \
3097 } \
3098 } \
3099 while (0)
3100
3101/* AArch64 record/replay structures and enumerations. */
3102
3103struct aarch64_mem_r
3104{
3105 uint64_t len; /* Record length. */
3106 uint64_t addr; /* Memory address. */
3107};
3108
3109enum aarch64_record_result
3110{
3111 AARCH64_RECORD_SUCCESS,
99afc88b
OJ
3112 AARCH64_RECORD_UNSUPPORTED,
3113 AARCH64_RECORD_UNKNOWN
3114};
3115
3116typedef struct insn_decode_record_t
3117{
3118 struct gdbarch *gdbarch;
3119 struct regcache *regcache;
3120 CORE_ADDR this_addr; /* Address of insn to be recorded. */
3121 uint32_t aarch64_insn; /* Insn to be recorded. */
3122 uint32_t mem_rec_count; /* Count of memory records. */
3123 uint32_t reg_rec_count; /* Count of register records. */
3124 uint32_t *aarch64_regs; /* Registers to be recorded. */
3125 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
3126} insn_decode_record;
3127
3128/* Record handler for data processing - register instructions. */
3129
3130static unsigned int
3131aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
3132{
3133 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
3134 uint32_t record_buf[4];
3135
3136 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3137 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3138 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
3139
3140 if (!bit (aarch64_insn_r->aarch64_insn, 28))
3141 {
3142 uint8_t setflags;
3143
3144 /* Logical (shifted register). */
3145 if (insn_bits24_27 == 0x0a)
3146 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
3147 /* Add/subtract. */
3148 else if (insn_bits24_27 == 0x0b)
3149 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3150 else
3151 return AARCH64_RECORD_UNKNOWN;
3152
3153 record_buf[0] = reg_rd;
3154 aarch64_insn_r->reg_rec_count = 1;
3155 if (setflags)
3156 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3157 }
3158 else
3159 {
3160 if (insn_bits24_27 == 0x0b)
3161 {
3162 /* Data-processing (3 source). */
3163 record_buf[0] = reg_rd;
3164 aarch64_insn_r->reg_rec_count = 1;
3165 }
3166 else if (insn_bits24_27 == 0x0a)
3167 {
3168 if (insn_bits21_23 == 0x00)
3169 {
3170 /* Add/subtract (with carry). */
3171 record_buf[0] = reg_rd;
3172 aarch64_insn_r->reg_rec_count = 1;
3173 if (bit (aarch64_insn_r->aarch64_insn, 29))
3174 {
3175 record_buf[1] = AARCH64_CPSR_REGNUM;
3176 aarch64_insn_r->reg_rec_count = 2;
3177 }
3178 }
3179 else if (insn_bits21_23 == 0x02)
3180 {
3181 /* Conditional compare (register) and conditional compare
3182 (immediate) instructions. */
3183 record_buf[0] = AARCH64_CPSR_REGNUM;
3184 aarch64_insn_r->reg_rec_count = 1;
3185 }
3186 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
3187 {
3188 /* CConditional select. */
3189 /* Data-processing (2 source). */
3190 /* Data-processing (1 source). */
3191 record_buf[0] = reg_rd;
3192 aarch64_insn_r->reg_rec_count = 1;
3193 }
3194 else
3195 return AARCH64_RECORD_UNKNOWN;
3196 }
3197 }
3198
3199 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3200 record_buf);
3201 return AARCH64_RECORD_SUCCESS;
3202}
3203
3204/* Record handler for data processing - immediate instructions. */
3205
3206static unsigned int
3207aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
3208{
78cc6c2d 3209 uint8_t reg_rd, insn_bit23, insn_bits24_27, setflags;
99afc88b
OJ
3210 uint32_t record_buf[4];
3211
3212 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
99afc88b
OJ
3213 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3214 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3215
3216 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
3217 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
3218 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
3219 {
3220 record_buf[0] = reg_rd;
3221 aarch64_insn_r->reg_rec_count = 1;
3222 }
3223 else if (insn_bits24_27 == 0x01)
3224 {
3225 /* Add/Subtract (immediate). */
3226 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3227 record_buf[0] = reg_rd;
3228 aarch64_insn_r->reg_rec_count = 1;
3229 if (setflags)
3230 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3231 }
3232 else if (insn_bits24_27 == 0x02 && !insn_bit23)
3233 {
3234 /* Logical (immediate). */
3235 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
3236 record_buf[0] = reg_rd;
3237 aarch64_insn_r->reg_rec_count = 1;
3238 if (setflags)
3239 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3240 }
3241 else
3242 return AARCH64_RECORD_UNKNOWN;
3243
3244 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3245 record_buf);
3246 return AARCH64_RECORD_SUCCESS;
3247}
3248
3249/* Record handler for branch, exception generation and system instructions. */
3250
3251static unsigned int
3252aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
3253{
3254 struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
3255 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
3256 uint32_t record_buf[4];
3257
3258 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3259 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3260 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3261
3262 if (insn_bits28_31 == 0x0d)
3263 {
3264 /* Exception generation instructions. */
3265 if (insn_bits24_27 == 0x04)
3266 {
5d98d3cd
YQ
3267 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
3268 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
3269 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
99afc88b
OJ
3270 {
3271 ULONGEST svc_number;
3272
3273 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
3274 &svc_number);
3275 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
3276 svc_number);
3277 }
3278 else
3279 return AARCH64_RECORD_UNSUPPORTED;
3280 }
3281 /* System instructions. */
3282 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
3283 {
3284 uint32_t reg_rt, reg_crn;
3285
3286 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3287 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3288
3289 /* Record rt in case of sysl and mrs instructions. */
3290 if (bit (aarch64_insn_r->aarch64_insn, 21))
3291 {
3292 record_buf[0] = reg_rt;
3293 aarch64_insn_r->reg_rec_count = 1;
3294 }
3295 /* Record cpsr for hint and msr(immediate) instructions. */
3296 else if (reg_crn == 0x02 || reg_crn == 0x04)
3297 {
3298 record_buf[0] = AARCH64_CPSR_REGNUM;
3299 aarch64_insn_r->reg_rec_count = 1;
3300 }
3301 }
3302 /* Unconditional branch (register). */
3303 else if((insn_bits24_27 & 0x0e) == 0x06)
3304 {
3305 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3306 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3307 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3308 }
3309 else
3310 return AARCH64_RECORD_UNKNOWN;
3311 }
3312 /* Unconditional branch (immediate). */
3313 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3314 {
3315 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3316 if (bit (aarch64_insn_r->aarch64_insn, 31))
3317 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3318 }
3319 else
3320 /* Compare & branch (immediate), Test & branch (immediate) and
3321 Conditional branch (immediate). */
3322 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3323
3324 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3325 record_buf);
3326 return AARCH64_RECORD_SUCCESS;
3327}
3328
3329/* Record handler for advanced SIMD load and store instructions. */
3330
3331static unsigned int
3332aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3333{
3334 CORE_ADDR address;
3335 uint64_t addr_offset = 0;
3336 uint32_t record_buf[24];
3337 uint64_t record_buf_mem[24];
3338 uint32_t reg_rn, reg_rt;
3339 uint32_t reg_index = 0, mem_index = 0;
3340 uint8_t opcode_bits, size_bits;
3341
3342 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3343 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3344 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3345 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3346 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3347
3348 if (record_debug)
b277c936 3349 debug_printf ("Process record: Advanced SIMD load/store\n");
99afc88b
OJ
3350
3351 /* Load/store single structure. */
3352 if (bit (aarch64_insn_r->aarch64_insn, 24))
3353 {
3354 uint8_t sindex, scale, selem, esize, replicate = 0;
3355 scale = opcode_bits >> 2;
3356 selem = ((opcode_bits & 0x02) |
3357 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3358 switch (scale)
3359 {
3360 case 1:
3361 if (size_bits & 0x01)
3362 return AARCH64_RECORD_UNKNOWN;
3363 break;
3364 case 2:
3365 if ((size_bits >> 1) & 0x01)
3366 return AARCH64_RECORD_UNKNOWN;
3367 if (size_bits & 0x01)
3368 {
3369 if (!((opcode_bits >> 1) & 0x01))
3370 scale = 3;
3371 else
3372 return AARCH64_RECORD_UNKNOWN;
3373 }
3374 break;
3375 case 3:
3376 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3377 {
3378 scale = size_bits;
3379 replicate = 1;
3380 break;
3381 }
3382 else
3383 return AARCH64_RECORD_UNKNOWN;
3384 default:
3385 break;
3386 }
3387 esize = 8 << scale;
3388 if (replicate)
3389 for (sindex = 0; sindex < selem; sindex++)
3390 {
3391 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3392 reg_rt = (reg_rt + 1) % 32;
3393 }
3394 else
3395 {
3396 for (sindex = 0; sindex < selem; sindex++)
a2e3e93f
SM
3397 {
3398 if (bit (aarch64_insn_r->aarch64_insn, 22))
3399 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3400 else
3401 {
3402 record_buf_mem[mem_index++] = esize / 8;
3403 record_buf_mem[mem_index++] = address + addr_offset;
3404 }
3405 addr_offset = addr_offset + (esize / 8);
3406 reg_rt = (reg_rt + 1) % 32;
3407 }
99afc88b
OJ
3408 }
3409 }
3410 /* Load/store multiple structure. */
3411 else
3412 {
3413 uint8_t selem, esize, rpt, elements;
3414 uint8_t eindex, rindex;
3415
3416 esize = 8 << size_bits;
3417 if (bit (aarch64_insn_r->aarch64_insn, 30))
3418 elements = 128 / esize;
3419 else
3420 elements = 64 / esize;
3421
3422 switch (opcode_bits)
3423 {
3424 /*LD/ST4 (4 Registers). */
3425 case 0:
3426 rpt = 1;
3427 selem = 4;
3428 break;
3429 /*LD/ST1 (4 Registers). */
3430 case 2:
3431 rpt = 4;
3432 selem = 1;
3433 break;
3434 /*LD/ST3 (3 Registers). */
3435 case 4:
3436 rpt = 1;
3437 selem = 3;
3438 break;
3439 /*LD/ST1 (3 Registers). */
3440 case 6:
3441 rpt = 3;
3442 selem = 1;
3443 break;
3444 /*LD/ST1 (1 Register). */
3445 case 7:
3446 rpt = 1;
3447 selem = 1;
3448 break;
3449 /*LD/ST2 (2 Registers). */
3450 case 8:
3451 rpt = 1;
3452 selem = 2;
3453 break;
3454 /*LD/ST1 (2 Registers). */
3455 case 10:
3456 rpt = 2;
3457 selem = 1;
3458 break;
3459 default:
3460 return AARCH64_RECORD_UNSUPPORTED;
3461 break;
3462 }
3463 for (rindex = 0; rindex < rpt; rindex++)
3464 for (eindex = 0; eindex < elements; eindex++)
3465 {
3466 uint8_t reg_tt, sindex;
3467 reg_tt = (reg_rt + rindex) % 32;
3468 for (sindex = 0; sindex < selem; sindex++)
3469 {
3470 if (bit (aarch64_insn_r->aarch64_insn, 22))
3471 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
3472 else
3473 {
3474 record_buf_mem[mem_index++] = esize / 8;
3475 record_buf_mem[mem_index++] = address + addr_offset;
3476 }
3477 addr_offset = addr_offset + (esize / 8);
3478 reg_tt = (reg_tt + 1) % 32;
3479 }
3480 }
3481 }
3482
3483 if (bit (aarch64_insn_r->aarch64_insn, 23))
3484 record_buf[reg_index++] = reg_rn;
3485
3486 aarch64_insn_r->reg_rec_count = reg_index;
3487 aarch64_insn_r->mem_rec_count = mem_index / 2;
3488 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3489 record_buf_mem);
3490 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3491 record_buf);
3492 return AARCH64_RECORD_SUCCESS;
3493}
3494
3495/* Record handler for load and store instructions. */
3496
3497static unsigned int
3498aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
3499{
3500 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
3501 uint8_t insn_bit23, insn_bit21;
3502 uint8_t opc, size_bits, ld_flag, vector_flag;
3503 uint32_t reg_rn, reg_rt, reg_rt2;
3504 uint64_t datasize, offset;
3505 uint32_t record_buf[8];
3506 uint64_t record_buf_mem[8];
3507 CORE_ADDR address;
3508
3509 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3510 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3511 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
3512 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3513 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3514 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
3515 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
3516 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3517 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3518 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
3519 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
3520
3521 /* Load/store exclusive. */
3522 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
3523 {
3524 if (record_debug)
b277c936 3525 debug_printf ("Process record: load/store exclusive\n");
99afc88b
OJ
3526
3527 if (ld_flag)
3528 {
3529 record_buf[0] = reg_rt;
3530 aarch64_insn_r->reg_rec_count = 1;
3531 if (insn_bit21)
3532 {
3533 record_buf[1] = reg_rt2;
3534 aarch64_insn_r->reg_rec_count = 2;
3535 }
3536 }
3537 else
3538 {
3539 if (insn_bit21)
3540 datasize = (8 << size_bits) * 2;
3541 else
3542 datasize = (8 << size_bits);
3543 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3544 &address);
3545 record_buf_mem[0] = datasize / 8;
3546 record_buf_mem[1] = address;
3547 aarch64_insn_r->mem_rec_count = 1;
3548 if (!insn_bit23)
3549 {
3550 /* Save register rs. */
3551 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
3552 aarch64_insn_r->reg_rec_count = 1;
3553 }
3554 }
3555 }
3556 /* Load register (literal) instructions decoding. */
3557 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
3558 {
3559 if (record_debug)
b277c936 3560 debug_printf ("Process record: load register (literal)\n");
99afc88b
OJ
3561 if (vector_flag)
3562 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3563 else
3564 record_buf[0] = reg_rt;
3565 aarch64_insn_r->reg_rec_count = 1;
3566 }
3567 /* All types of load/store pair instructions decoding. */
3568 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
3569 {
3570 if (record_debug)
b277c936 3571 debug_printf ("Process record: load/store pair\n");
99afc88b
OJ
3572
3573 if (ld_flag)
3574 {
3575 if (vector_flag)
3576 {
3577 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3578 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
3579 }
3580 else
3581 {
3582 record_buf[0] = reg_rt;
3583 record_buf[1] = reg_rt2;
3584 }
3585 aarch64_insn_r->reg_rec_count = 2;
3586 }
3587 else
3588 {
3589 uint16_t imm7_off;
3590 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
3591 if (!vector_flag)
3592 size_bits = size_bits >> 1;
3593 datasize = 8 << (2 + size_bits);
3594 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
3595 offset = offset << (2 + size_bits);
3596 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3597 &address);
3598 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
3599 {
3600 if (imm7_off & 0x40)
3601 address = address - offset;
3602 else
3603 address = address + offset;
3604 }
3605
3606 record_buf_mem[0] = datasize / 8;
3607 record_buf_mem[1] = address;
3608 record_buf_mem[2] = datasize / 8;
3609 record_buf_mem[3] = address + (datasize / 8);
3610 aarch64_insn_r->mem_rec_count = 2;
3611 }
3612 if (bit (aarch64_insn_r->aarch64_insn, 23))
3613 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3614 }
3615 /* Load/store register (unsigned immediate) instructions. */
3616 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
3617 {
3618 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3619 if (!(opc >> 1))
33877125
YQ
3620 {
3621 if (opc & 0x01)
3622 ld_flag = 0x01;
3623 else
3624 ld_flag = 0x0;
3625 }
99afc88b 3626 else
33877125 3627 {
1e2b521d
YQ
3628 if (size_bits == 0x3 && vector_flag == 0x0 && opc == 0x2)
3629 {
3630 /* PRFM (immediate) */
3631 return AARCH64_RECORD_SUCCESS;
3632 }
3633 else if (size_bits == 0x2 && vector_flag == 0x0 && opc == 0x2)
3634 {
3635 /* LDRSW (immediate) */
3636 ld_flag = 0x1;
3637 }
33877125 3638 else
1e2b521d
YQ
3639 {
3640 if (opc & 0x01)
3641 ld_flag = 0x01;
3642 else
3643 ld_flag = 0x0;
3644 }
33877125 3645 }
99afc88b
OJ
3646
3647 if (record_debug)
3648 {
b277c936
PL
3649 debug_printf ("Process record: load/store (unsigned immediate):"
3650 " size %x V %d opc %x\n", size_bits, vector_flag,
3651 opc);
99afc88b
OJ
3652 }
3653
3654 if (!ld_flag)
3655 {
3656 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
3657 datasize = 8 << size_bits;
3658 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3659 &address);
3660 offset = offset << size_bits;
3661 address = address + offset;
3662
3663 record_buf_mem[0] = datasize >> 3;
3664 record_buf_mem[1] = address;
3665 aarch64_insn_r->mem_rec_count = 1;
3666 }
3667 else
3668 {
3669 if (vector_flag)
3670 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3671 else
3672 record_buf[0] = reg_rt;
3673 aarch64_insn_r->reg_rec_count = 1;
3674 }
3675 }
3676 /* Load/store register (register offset) instructions. */
5d98d3cd
YQ
3677 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3678 && insn_bits10_11 == 0x02 && insn_bit21)
99afc88b
OJ
3679 {
3680 if (record_debug)
b277c936 3681 debug_printf ("Process record: load/store (register offset)\n");
99afc88b
OJ
3682 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3683 if (!(opc >> 1))
3684 if (opc & 0x01)
3685 ld_flag = 0x01;
3686 else
3687 ld_flag = 0x0;
3688 else
3689 if (size_bits != 0x03)
3690 ld_flag = 0x01;
3691 else
3692 return AARCH64_RECORD_UNKNOWN;
3693
3694 if (!ld_flag)
3695 {
d9436c7c
PA
3696 ULONGEST reg_rm_val;
3697
99afc88b
OJ
3698 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
3699 bits (aarch64_insn_r->aarch64_insn, 16, 20), &reg_rm_val);
3700 if (bit (aarch64_insn_r->aarch64_insn, 12))
3701 offset = reg_rm_val << size_bits;
3702 else
3703 offset = reg_rm_val;
3704 datasize = 8 << size_bits;
3705 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3706 &address);
3707 address = address + offset;
3708 record_buf_mem[0] = datasize >> 3;
3709 record_buf_mem[1] = address;
3710 aarch64_insn_r->mem_rec_count = 1;
3711 }
3712 else
3713 {
3714 if (vector_flag)
3715 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3716 else
3717 record_buf[0] = reg_rt;
3718 aarch64_insn_r->reg_rec_count = 1;
3719 }
3720 }
3721 /* Load/store register (immediate and unprivileged) instructions. */
5d98d3cd
YQ
3722 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3723 && !insn_bit21)
99afc88b
OJ
3724 {
3725 if (record_debug)
3726 {
b277c936
PL
3727 debug_printf ("Process record: load/store "
3728 "(immediate and unprivileged)\n");
99afc88b
OJ
3729 }
3730 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3731 if (!(opc >> 1))
3732 if (opc & 0x01)
3733 ld_flag = 0x01;
3734 else
3735 ld_flag = 0x0;
3736 else
3737 if (size_bits != 0x03)
3738 ld_flag = 0x01;
3739 else
3740 return AARCH64_RECORD_UNKNOWN;
3741
3742 if (!ld_flag)
3743 {
3744 uint16_t imm9_off;
3745 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
3746 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
3747 datasize = 8 << size_bits;
3748 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3749 &address);
3750 if (insn_bits10_11 != 0x01)
3751 {
3752 if (imm9_off & 0x0100)
3753 address = address - offset;
3754 else
3755 address = address + offset;
3756 }
3757 record_buf_mem[0] = datasize >> 3;
3758 record_buf_mem[1] = address;
3759 aarch64_insn_r->mem_rec_count = 1;
3760 }
3761 else
3762 {
3763 if (vector_flag)
3764 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3765 else
3766 record_buf[0] = reg_rt;
3767 aarch64_insn_r->reg_rec_count = 1;
3768 }
3769 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
3770 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3771 }
3772 /* Advanced SIMD load/store instructions. */
3773 else
3774 return aarch64_record_asimd_load_store (aarch64_insn_r);
3775
3776 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3777 record_buf_mem);
3778 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3779 record_buf);
3780 return AARCH64_RECORD_SUCCESS;
3781}
3782
3783/* Record handler for data processing SIMD and floating point instructions. */
3784
3785static unsigned int
3786aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
3787{
3788 uint8_t insn_bit21, opcode, rmode, reg_rd;
3789 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
3790 uint8_t insn_bits11_14;
3791 uint32_t record_buf[2];
3792
3793 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3794 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3795 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3796 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3797 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
3798 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
3799 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
3800 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3801 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3802
3803 if (record_debug)
b277c936 3804 debug_printf ("Process record: data processing SIMD/FP: ");
99afc88b
OJ
3805
3806 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
3807 {
3808 /* Floating point - fixed point conversion instructions. */
3809 if (!insn_bit21)
3810 {
3811 if (record_debug)
b277c936 3812 debug_printf ("FP - fixed point conversion");
99afc88b
OJ
3813
3814 if ((opcode >> 1) == 0x0 && rmode == 0x03)
3815 record_buf[0] = reg_rd;
3816 else
3817 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3818 }
3819 /* Floating point - conditional compare instructions. */
3820 else if (insn_bits10_11 == 0x01)
3821 {
3822 if (record_debug)
b277c936 3823 debug_printf ("FP - conditional compare");
99afc88b
OJ
3824
3825 record_buf[0] = AARCH64_CPSR_REGNUM;
3826 }
3827 /* Floating point - data processing (2-source) and
3828 conditional select instructions. */
3829 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
3830 {
3831 if (record_debug)
b277c936 3832 debug_printf ("FP - DP (2-source)");
99afc88b
OJ
3833
3834 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3835 }
3836 else if (insn_bits10_11 == 0x00)
3837 {
3838 /* Floating point - immediate instructions. */
3839 if ((insn_bits12_15 & 0x01) == 0x01
3840 || (insn_bits12_15 & 0x07) == 0x04)
3841 {
3842 if (record_debug)
b277c936 3843 debug_printf ("FP - immediate");
99afc88b
OJ
3844 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3845 }
3846 /* Floating point - compare instructions. */
3847 else if ((insn_bits12_15 & 0x03) == 0x02)
3848 {
3849 if (record_debug)
b277c936 3850 debug_printf ("FP - immediate");
99afc88b
OJ
3851 record_buf[0] = AARCH64_CPSR_REGNUM;
3852 }
3853 /* Floating point - integer conversions instructions. */
f62fce35 3854 else if (insn_bits12_15 == 0x00)
99afc88b
OJ
3855 {
3856 /* Convert float to integer instruction. */
3857 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
3858 {
3859 if (record_debug)
b277c936 3860 debug_printf ("float to int conversion");
99afc88b
OJ
3861
3862 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3863 }
3864 /* Convert integer to float instruction. */
3865 else if ((opcode >> 1) == 0x01 && !rmode)
3866 {
3867 if (record_debug)
b277c936 3868 debug_printf ("int to float conversion");
99afc88b
OJ
3869
3870 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3871 }
3872 /* Move float to integer instruction. */
3873 else if ((opcode >> 1) == 0x03)
3874 {
3875 if (record_debug)
b277c936 3876 debug_printf ("move float to int");
99afc88b
OJ
3877
3878 if (!(opcode & 0x01))
3879 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3880 else
3881 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3882 }
f62fce35
YQ
3883 else
3884 return AARCH64_RECORD_UNKNOWN;
99afc88b 3885 }
f62fce35
YQ
3886 else
3887 return AARCH64_RECORD_UNKNOWN;
99afc88b 3888 }
f62fce35
YQ
3889 else
3890 return AARCH64_RECORD_UNKNOWN;
99afc88b
OJ
3891 }
3892 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
3893 {
3894 if (record_debug)
b277c936 3895 debug_printf ("SIMD copy");
99afc88b
OJ
3896
3897 /* Advanced SIMD copy instructions. */
3898 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
3899 && !bit (aarch64_insn_r->aarch64_insn, 15)
3900 && bit (aarch64_insn_r->aarch64_insn, 10))
3901 {
3902 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
3903 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3904 else
3905 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3906 }
3907 else
3908 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3909 }
3910 /* All remaining floating point or advanced SIMD instructions. */
3911 else
3912 {
3913 if (record_debug)
b277c936 3914 debug_printf ("all remain");
99afc88b
OJ
3915
3916 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3917 }
3918
3919 if (record_debug)
b277c936 3920 debug_printf ("\n");
99afc88b
OJ
3921
3922 aarch64_insn_r->reg_rec_count++;
3923 gdb_assert (aarch64_insn_r->reg_rec_count == 1);
3924 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3925 record_buf);
3926 return AARCH64_RECORD_SUCCESS;
3927}
3928
3929/* Decodes insns type and invokes its record handler. */
3930
3931static unsigned int
3932aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
3933{
3934 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
3935
3936 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
3937 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
3938 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
3939 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
3940
3941 /* Data processing - immediate instructions. */
3942 if (!ins_bit26 && !ins_bit27 && ins_bit28)
3943 return aarch64_record_data_proc_imm (aarch64_insn_r);
3944
3945 /* Branch, exception generation and system instructions. */
3946 if (ins_bit26 && !ins_bit27 && ins_bit28)
3947 return aarch64_record_branch_except_sys (aarch64_insn_r);
3948
3949 /* Load and store instructions. */
3950 if (!ins_bit25 && ins_bit27)
3951 return aarch64_record_load_store (aarch64_insn_r);
3952
3953 /* Data processing - register instructions. */
3954 if (ins_bit25 && !ins_bit26 && ins_bit27)
3955 return aarch64_record_data_proc_reg (aarch64_insn_r);
3956
3957 /* Data processing - SIMD and floating point instructions. */
3958 if (ins_bit25 && ins_bit26 && ins_bit27)
3959 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
3960
3961 return AARCH64_RECORD_UNSUPPORTED;
3962}
3963
3964/* Cleans up local record registers and memory allocations. */
3965
3966static void
3967deallocate_reg_mem (insn_decode_record *record)
3968{
3969 xfree (record->aarch64_regs);
3970 xfree (record->aarch64_mems);
3971}
3972
1e2b521d
YQ
3973#if GDB_SELF_TEST
3974namespace selftests {
3975
3976static void
3977aarch64_process_record_test (void)
3978{
3979 struct gdbarch_info info;
3980 uint32_t ret;
3981
3982 gdbarch_info_init (&info);
3983 info.bfd_arch_info = bfd_scan_arch ("aarch64");
3984
3985 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
3986 SELF_CHECK (gdbarch != NULL);
3987
3988 insn_decode_record aarch64_record;
3989
3990 memset (&aarch64_record, 0, sizeof (insn_decode_record));
3991 aarch64_record.regcache = NULL;
3992 aarch64_record.this_addr = 0;
3993 aarch64_record.gdbarch = gdbarch;
3994
3995 /* 20 00 80 f9 prfm pldl1keep, [x1] */
3996 aarch64_record.aarch64_insn = 0xf9800020;
3997 ret = aarch64_record_decode_insn_handler (&aarch64_record);
3998 SELF_CHECK (ret == AARCH64_RECORD_SUCCESS);
3999 SELF_CHECK (aarch64_record.reg_rec_count == 0);
4000 SELF_CHECK (aarch64_record.mem_rec_count == 0);
4001
4002 deallocate_reg_mem (&aarch64_record);
4003}
4004
4005} // namespace selftests
4006#endif /* GDB_SELF_TEST */
4007
99afc88b
OJ
4008/* Parse the current instruction and record the values of the registers and
4009 memory that will be changed in current instruction to record_arch_list
4010 return -1 if something is wrong. */
4011
4012int
4013aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
4014 CORE_ADDR insn_addr)
4015{
4016 uint32_t rec_no = 0;
4017 uint8_t insn_size = 4;
4018 uint32_t ret = 0;
99afc88b
OJ
4019 gdb_byte buf[insn_size];
4020 insn_decode_record aarch64_record;
4021
4022 memset (&buf[0], 0, insn_size);
4023 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4024 target_read_memory (insn_addr, &buf[0], insn_size);
4025 aarch64_record.aarch64_insn
4026 = (uint32_t) extract_unsigned_integer (&buf[0],
4027 insn_size,
4028 gdbarch_byte_order (gdbarch));
4029 aarch64_record.regcache = regcache;
4030 aarch64_record.this_addr = insn_addr;
4031 aarch64_record.gdbarch = gdbarch;
4032
4033 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4034 if (ret == AARCH64_RECORD_UNSUPPORTED)
4035 {
4036 printf_unfiltered (_("Process record does not support instruction "
4037 "0x%0x at address %s.\n"),
4038 aarch64_record.aarch64_insn,
4039 paddress (gdbarch, insn_addr));
4040 ret = -1;
4041 }
4042
4043 if (0 == ret)
4044 {
4045 /* Record registers. */
4046 record_full_arch_list_add_reg (aarch64_record.regcache,
4047 AARCH64_PC_REGNUM);
4048 /* Always record register CPSR. */
4049 record_full_arch_list_add_reg (aarch64_record.regcache,
4050 AARCH64_CPSR_REGNUM);
4051 if (aarch64_record.aarch64_regs)
4052 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
4053 if (record_full_arch_list_add_reg (aarch64_record.regcache,
4054 aarch64_record.aarch64_regs[rec_no]))
4055 ret = -1;
4056
4057 /* Record memories. */
4058 if (aarch64_record.aarch64_mems)
4059 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
4060 if (record_full_arch_list_add_mem
4061 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
4062 aarch64_record.aarch64_mems[rec_no].len))
4063 ret = -1;
4064
4065 if (record_full_arch_list_add_end ())
4066 ret = -1;
4067 }
4068
4069 deallocate_reg_mem (&aarch64_record);
4070 return ret;
4071}