]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/aarch64-tdep.c
Import sys in gdb/python/lib/gdb/printer/bound_registers.py
[thirdparty/binutils-gdb.git] / gdb / aarch64-tdep.c
CommitLineData
07b287a0
MS
1/* Common target dependent code for GDB on AArch64 systems.
2
61baf725 3 Copyright (C) 2009-2017 Free Software Foundation, Inc.
07b287a0
MS
4 Contributed by ARM Ltd.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21#include "defs.h"
22
23#include "frame.h"
24#include "inferior.h"
25#include "gdbcmd.h"
26#include "gdbcore.h"
07b287a0
MS
27#include "dis-asm.h"
28#include "regcache.h"
29#include "reggroups.h"
30#include "doublest.h"
31#include "value.h"
32#include "arch-utils.h"
33#include "osabi.h"
34#include "frame-unwind.h"
35#include "frame-base.h"
36#include "trad-frame.h"
37#include "objfiles.h"
38#include "dwarf2-frame.h"
39#include "gdbtypes.h"
40#include "prologue-value.h"
41#include "target-descriptions.h"
42#include "user-regs.h"
43#include "language.h"
44#include "infcall.h"
ea873d8e
PL
45#include "ax.h"
46#include "ax-gdb.h"
4d9a9006 47#include "selftest.h"
07b287a0
MS
48
49#include "aarch64-tdep.h"
50
51#include "elf-bfd.h"
52#include "elf/aarch64.h"
53
07b287a0
MS
54#include "vec.h"
55
99afc88b
OJ
56#include "record.h"
57#include "record-full.h"
58
07b287a0 59#include "features/aarch64.c"
07b287a0 60
787749ea
PL
61#include "arch/aarch64-insn.h"
62
f77ee802 63#include "opcode/aarch64.h"
325fac50 64#include <algorithm>
f77ee802
YQ
65
66#define submask(x) ((1L << ((x) + 1)) - 1)
67#define bit(obj,st) (((obj) >> (st)) & 1)
68#define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
69
07b287a0
MS
70/* Pseudo register base numbers. */
71#define AARCH64_Q0_REGNUM 0
187f5d00 72#define AARCH64_D0_REGNUM (AARCH64_Q0_REGNUM + AARCH64_D_REGISTER_COUNT)
07b287a0
MS
73#define AARCH64_S0_REGNUM (AARCH64_D0_REGNUM + 32)
74#define AARCH64_H0_REGNUM (AARCH64_S0_REGNUM + 32)
75#define AARCH64_B0_REGNUM (AARCH64_H0_REGNUM + 32)
76
77/* The standard register names, and all the valid aliases for them. */
78static const struct
79{
80 const char *const name;
81 int regnum;
82} aarch64_register_aliases[] =
83{
84 /* 64-bit register names. */
85 {"fp", AARCH64_FP_REGNUM},
86 {"lr", AARCH64_LR_REGNUM},
87 {"sp", AARCH64_SP_REGNUM},
88
89 /* 32-bit register names. */
90 {"w0", AARCH64_X0_REGNUM + 0},
91 {"w1", AARCH64_X0_REGNUM + 1},
92 {"w2", AARCH64_X0_REGNUM + 2},
93 {"w3", AARCH64_X0_REGNUM + 3},
94 {"w4", AARCH64_X0_REGNUM + 4},
95 {"w5", AARCH64_X0_REGNUM + 5},
96 {"w6", AARCH64_X0_REGNUM + 6},
97 {"w7", AARCH64_X0_REGNUM + 7},
98 {"w8", AARCH64_X0_REGNUM + 8},
99 {"w9", AARCH64_X0_REGNUM + 9},
100 {"w10", AARCH64_X0_REGNUM + 10},
101 {"w11", AARCH64_X0_REGNUM + 11},
102 {"w12", AARCH64_X0_REGNUM + 12},
103 {"w13", AARCH64_X0_REGNUM + 13},
104 {"w14", AARCH64_X0_REGNUM + 14},
105 {"w15", AARCH64_X0_REGNUM + 15},
106 {"w16", AARCH64_X0_REGNUM + 16},
107 {"w17", AARCH64_X0_REGNUM + 17},
108 {"w18", AARCH64_X0_REGNUM + 18},
109 {"w19", AARCH64_X0_REGNUM + 19},
110 {"w20", AARCH64_X0_REGNUM + 20},
111 {"w21", AARCH64_X0_REGNUM + 21},
112 {"w22", AARCH64_X0_REGNUM + 22},
113 {"w23", AARCH64_X0_REGNUM + 23},
114 {"w24", AARCH64_X0_REGNUM + 24},
115 {"w25", AARCH64_X0_REGNUM + 25},
116 {"w26", AARCH64_X0_REGNUM + 26},
117 {"w27", AARCH64_X0_REGNUM + 27},
118 {"w28", AARCH64_X0_REGNUM + 28},
119 {"w29", AARCH64_X0_REGNUM + 29},
120 {"w30", AARCH64_X0_REGNUM + 30},
121
122 /* specials */
123 {"ip0", AARCH64_X0_REGNUM + 16},
124 {"ip1", AARCH64_X0_REGNUM + 17}
125};
126
127/* The required core 'R' registers. */
128static const char *const aarch64_r_register_names[] =
129{
130 /* These registers must appear in consecutive RAW register number
131 order and they must begin with AARCH64_X0_REGNUM! */
132 "x0", "x1", "x2", "x3",
133 "x4", "x5", "x6", "x7",
134 "x8", "x9", "x10", "x11",
135 "x12", "x13", "x14", "x15",
136 "x16", "x17", "x18", "x19",
137 "x20", "x21", "x22", "x23",
138 "x24", "x25", "x26", "x27",
139 "x28", "x29", "x30", "sp",
140 "pc", "cpsr"
141};
142
143/* The FP/SIMD 'V' registers. */
144static const char *const aarch64_v_register_names[] =
145{
146 /* These registers must appear in consecutive RAW register number
147 order and they must begin with AARCH64_V0_REGNUM! */
148 "v0", "v1", "v2", "v3",
149 "v4", "v5", "v6", "v7",
150 "v8", "v9", "v10", "v11",
151 "v12", "v13", "v14", "v15",
152 "v16", "v17", "v18", "v19",
153 "v20", "v21", "v22", "v23",
154 "v24", "v25", "v26", "v27",
155 "v28", "v29", "v30", "v31",
156 "fpsr",
157 "fpcr"
158};
159
160/* AArch64 prologue cache structure. */
161struct aarch64_prologue_cache
162{
db634143
PL
163 /* The program counter at the start of the function. It is used to
164 identify this frame as a prologue frame. */
165 CORE_ADDR func;
166
167 /* The program counter at the time this frame was created; i.e. where
168 this function was called from. It is used to identify this frame as a
169 stub frame. */
170 CORE_ADDR prev_pc;
171
07b287a0
MS
172 /* The stack pointer at the time this frame was created; i.e. the
173 caller's stack pointer when this function was called. It is used
174 to identify this frame. */
175 CORE_ADDR prev_sp;
176
7dfa3edc
PL
177 /* Is the target available to read from? */
178 int available_p;
179
07b287a0
MS
180 /* The frame base for this frame is just prev_sp - frame size.
181 FRAMESIZE is the distance from the frame pointer to the
182 initial stack pointer. */
183 int framesize;
184
185 /* The register used to hold the frame pointer for this frame. */
186 int framereg;
187
188 /* Saved register offsets. */
189 struct trad_frame_saved_reg *saved_regs;
190};
191
07b287a0
MS
192static void
193show_aarch64_debug (struct ui_file *file, int from_tty,
194 struct cmd_list_element *c, const char *value)
195{
196 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
197}
198
4d9a9006
YQ
199/* Abstract instruction reader. */
200
201class abstract_instruction_reader
202{
203public:
204 /* Read in one instruction. */
205 virtual ULONGEST read (CORE_ADDR memaddr, int len,
206 enum bfd_endian byte_order) = 0;
207};
208
209/* Instruction reader from real target. */
210
211class instruction_reader : public abstract_instruction_reader
212{
213 public:
214 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
215 {
fc2f703e 216 return read_code_unsigned_integer (memaddr, len, byte_order);
4d9a9006
YQ
217 }
218};
219
07b287a0
MS
220/* Analyze a prologue, looking for a recognizable stack frame
221 and frame pointer. Scan until we encounter a store that could
222 clobber the stack frame unexpectedly, or an unknown instruction. */
223
224static CORE_ADDR
225aarch64_analyze_prologue (struct gdbarch *gdbarch,
226 CORE_ADDR start, CORE_ADDR limit,
4d9a9006
YQ
227 struct aarch64_prologue_cache *cache,
228 abstract_instruction_reader& reader)
07b287a0
MS
229{
230 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
231 int i;
187f5d00
YQ
232 /* Track X registers and D registers in prologue. */
233 pv_t regs[AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT];
07b287a0
MS
234 struct pv_area *stack;
235 struct cleanup *back_to;
236
187f5d00 237 for (i = 0; i < AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT; i++)
07b287a0
MS
238 regs[i] = pv_register (i, 0);
239 stack = make_pv_area (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
240 back_to = make_cleanup_free_pv_area (stack);
241
242 for (; start < limit; start += 4)
243 {
244 uint32_t insn;
d9ebcbce 245 aarch64_inst inst;
07b287a0 246
4d9a9006 247 insn = reader.read (start, 4, byte_order_for_code);
07b287a0 248
d9ebcbce
YQ
249 if (aarch64_decode_insn (insn, &inst, 1) != 0)
250 break;
251
252 if (inst.opcode->iclass == addsub_imm
253 && (inst.opcode->op == OP_ADD
254 || strcmp ("sub", inst.opcode->name) == 0))
07b287a0 255 {
d9ebcbce
YQ
256 unsigned rd = inst.operands[0].reg.regno;
257 unsigned rn = inst.operands[1].reg.regno;
258
259 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
260 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
261 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
262 gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM);
263
264 if (inst.opcode->op == OP_ADD)
265 {
266 regs[rd] = pv_add_constant (regs[rn],
267 inst.operands[2].imm.value);
268 }
269 else
270 {
271 regs[rd] = pv_add_constant (regs[rn],
272 -inst.operands[2].imm.value);
273 }
274 }
275 else if (inst.opcode->iclass == pcreladdr
276 && inst.operands[1].type == AARCH64_OPND_ADDR_ADRP)
277 {
278 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
279 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
280
281 regs[inst.operands[0].reg.regno] = pv_unknown ();
07b287a0 282 }
d9ebcbce 283 else if (inst.opcode->iclass == branch_imm)
07b287a0
MS
284 {
285 /* Stop analysis on branch. */
286 break;
287 }
d9ebcbce 288 else if (inst.opcode->iclass == condbranch)
07b287a0
MS
289 {
290 /* Stop analysis on branch. */
291 break;
292 }
d9ebcbce 293 else if (inst.opcode->iclass == branch_reg)
07b287a0
MS
294 {
295 /* Stop analysis on branch. */
296 break;
297 }
d9ebcbce 298 else if (inst.opcode->iclass == compbranch)
07b287a0
MS
299 {
300 /* Stop analysis on branch. */
301 break;
302 }
d9ebcbce
YQ
303 else if (inst.opcode->op == OP_MOVZ)
304 {
305 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
306 regs[inst.operands[0].reg.regno] = pv_unknown ();
307 }
308 else if (inst.opcode->iclass == log_shift
309 && strcmp (inst.opcode->name, "orr") == 0)
07b287a0 310 {
d9ebcbce
YQ
311 unsigned rd = inst.operands[0].reg.regno;
312 unsigned rn = inst.operands[1].reg.regno;
313 unsigned rm = inst.operands[2].reg.regno;
314
315 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
316 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn);
317 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT);
318
319 if (inst.operands[2].shifter.amount == 0
320 && rn == AARCH64_SP_REGNUM)
07b287a0
MS
321 regs[rd] = regs[rm];
322 else
323 {
324 if (aarch64_debug)
b277c936
PL
325 {
326 debug_printf ("aarch64: prologue analysis gave up "
0a0da556 327 "addr=%s opcode=0x%x (orr x register)\n",
b277c936
PL
328 core_addr_to_string_nz (start), insn);
329 }
07b287a0
MS
330 break;
331 }
332 }
d9ebcbce 333 else if (inst.opcode->op == OP_STUR)
07b287a0 334 {
d9ebcbce
YQ
335 unsigned rt = inst.operands[0].reg.regno;
336 unsigned rn = inst.operands[1].addr.base_regno;
337 int is64
338 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
339
340 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
341 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
342 gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9);
343 gdb_assert (!inst.operands[1].addr.offset.is_reg);
344
345 pv_area_store (stack, pv_add_constant (regs[rn],
346 inst.operands[1].addr.offset.imm),
07b287a0
MS
347 is64 ? 8 : 4, regs[rt]);
348 }
d9ebcbce 349 else if ((inst.opcode->iclass == ldstpair_off
03bcd739
YQ
350 || (inst.opcode->iclass == ldstpair_indexed
351 && inst.operands[2].addr.preind))
d9ebcbce 352 && strcmp ("stp", inst.opcode->name) == 0)
07b287a0 353 {
03bcd739 354 /* STP with addressing mode Pre-indexed and Base register. */
187f5d00
YQ
355 unsigned rt1;
356 unsigned rt2;
d9ebcbce
YQ
357 unsigned rn = inst.operands[2].addr.base_regno;
358 int32_t imm = inst.operands[2].addr.offset.imm;
359
187f5d00
YQ
360 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
361 || inst.operands[0].type == AARCH64_OPND_Ft);
362 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2
363 || inst.operands[1].type == AARCH64_OPND_Ft2);
d9ebcbce
YQ
364 gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7);
365 gdb_assert (!inst.operands[2].addr.offset.is_reg);
366
07b287a0
MS
367 /* If recording this store would invalidate the store area
368 (perhaps because rn is not known) then we should abandon
369 further prologue analysis. */
370 if (pv_area_store_would_trash (stack,
371 pv_add_constant (regs[rn], imm)))
372 break;
373
374 if (pv_area_store_would_trash (stack,
375 pv_add_constant (regs[rn], imm + 8)))
376 break;
377
187f5d00
YQ
378 rt1 = inst.operands[0].reg.regno;
379 rt2 = inst.operands[1].reg.regno;
380 if (inst.operands[0].type == AARCH64_OPND_Ft)
381 {
382 /* Only bottom 64-bit of each V register (D register) need
383 to be preserved. */
384 gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D);
385 rt1 += AARCH64_X_REGISTER_COUNT;
386 rt2 += AARCH64_X_REGISTER_COUNT;
387 }
388
07b287a0
MS
389 pv_area_store (stack, pv_add_constant (regs[rn], imm), 8,
390 regs[rt1]);
391 pv_area_store (stack, pv_add_constant (regs[rn], imm + 8), 8,
392 regs[rt2]);
14ac654f 393
d9ebcbce 394 if (inst.operands[2].addr.writeback)
93d96012 395 regs[rn] = pv_add_constant (regs[rn], imm);
07b287a0 396
07b287a0 397 }
432ec081
YQ
398 else if ((inst.opcode->iclass == ldst_imm9 /* Signed immediate. */
399 || (inst.opcode->iclass == ldst_pos /* Unsigned immediate. */
400 && (inst.opcode->op == OP_STR_POS
401 || inst.opcode->op == OP_STRF_POS)))
402 && inst.operands[1].addr.base_regno == AARCH64_SP_REGNUM
403 && strcmp ("str", inst.opcode->name) == 0)
404 {
405 /* STR (immediate) */
406 unsigned int rt = inst.operands[0].reg.regno;
407 int32_t imm = inst.operands[1].addr.offset.imm;
408 unsigned int rn = inst.operands[1].addr.base_regno;
409 bool is64
410 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
411 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
412 || inst.operands[0].type == AARCH64_OPND_Ft);
413
414 if (inst.operands[0].type == AARCH64_OPND_Ft)
415 {
416 /* Only bottom 64-bit of each V register (D register) need
417 to be preserved. */
418 gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D);
419 rt += AARCH64_X_REGISTER_COUNT;
420 }
421
422 pv_area_store (stack, pv_add_constant (regs[rn], imm),
423 is64 ? 8 : 4, regs[rt]);
424 if (inst.operands[1].addr.writeback)
425 regs[rn] = pv_add_constant (regs[rn], imm);
426 }
d9ebcbce 427 else if (inst.opcode->iclass == testbranch)
07b287a0
MS
428 {
429 /* Stop analysis on branch. */
430 break;
431 }
432 else
433 {
434 if (aarch64_debug)
b277c936 435 {
0a0da556 436 debug_printf ("aarch64: prologue analysis gave up addr=%s"
b277c936
PL
437 " opcode=0x%x\n",
438 core_addr_to_string_nz (start), insn);
439 }
07b287a0
MS
440 break;
441 }
442 }
443
444 if (cache == NULL)
445 {
446 do_cleanups (back_to);
447 return start;
448 }
449
450 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
451 {
452 /* Frame pointer is fp. Frame size is constant. */
453 cache->framereg = AARCH64_FP_REGNUM;
454 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
455 }
456 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
457 {
458 /* Try the stack pointer. */
459 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
460 cache->framereg = AARCH64_SP_REGNUM;
461 }
462 else
463 {
464 /* We're just out of luck. We don't know where the frame is. */
465 cache->framereg = -1;
466 cache->framesize = 0;
467 }
468
469 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
470 {
471 CORE_ADDR offset;
472
473 if (pv_area_find_reg (stack, gdbarch, i, &offset))
474 cache->saved_regs[i].addr = offset;
475 }
476
187f5d00
YQ
477 for (i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
478 {
479 int regnum = gdbarch_num_regs (gdbarch);
480 CORE_ADDR offset;
481
482 if (pv_area_find_reg (stack, gdbarch, i + AARCH64_X_REGISTER_COUNT,
483 &offset))
484 cache->saved_regs[i + regnum + AARCH64_D0_REGNUM].addr = offset;
485 }
486
07b287a0
MS
487 do_cleanups (back_to);
488 return start;
489}
490
4d9a9006
YQ
491static CORE_ADDR
492aarch64_analyze_prologue (struct gdbarch *gdbarch,
493 CORE_ADDR start, CORE_ADDR limit,
494 struct aarch64_prologue_cache *cache)
495{
496 instruction_reader reader;
497
498 return aarch64_analyze_prologue (gdbarch, start, limit, cache,
499 reader);
500}
501
502#if GDB_SELF_TEST
503
504namespace selftests {
505
506/* Instruction reader from manually cooked instruction sequences. */
507
508class instruction_reader_test : public abstract_instruction_reader
509{
510public:
511 template<size_t SIZE>
512 explicit instruction_reader_test (const uint32_t (&insns)[SIZE])
513 : m_insns (insns), m_insns_size (SIZE)
514 {}
515
516 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
517 {
518 SELF_CHECK (len == 4);
519 SELF_CHECK (memaddr % 4 == 0);
520 SELF_CHECK (memaddr / 4 < m_insns_size);
521
522 return m_insns[memaddr / 4];
523 }
524
525private:
526 const uint32_t *m_insns;
527 size_t m_insns_size;
528};
529
530static void
531aarch64_analyze_prologue_test (void)
532{
533 struct gdbarch_info info;
534
535 gdbarch_info_init (&info);
536 info.bfd_arch_info = bfd_scan_arch ("aarch64");
537
538 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
539 SELF_CHECK (gdbarch != NULL);
540
541 /* Test the simple prologue in which frame pointer is used. */
542 {
543 struct aarch64_prologue_cache cache;
544 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
545
546 static const uint32_t insns[] = {
547 0xa9af7bfd, /* stp x29, x30, [sp,#-272]! */
548 0x910003fd, /* mov x29, sp */
549 0x97ffffe6, /* bl 0x400580 */
550 };
551 instruction_reader_test reader (insns);
552
553 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
554 SELF_CHECK (end == 4 * 2);
555
556 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
557 SELF_CHECK (cache.framesize == 272);
558
559 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
560 {
561 if (i == AARCH64_FP_REGNUM)
562 SELF_CHECK (cache.saved_regs[i].addr == -272);
563 else if (i == AARCH64_LR_REGNUM)
564 SELF_CHECK (cache.saved_regs[i].addr == -264);
565 else
566 SELF_CHECK (cache.saved_regs[i].addr == -1);
567 }
568
569 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
570 {
571 int regnum = gdbarch_num_regs (gdbarch);
572
573 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
574 == -1);
575 }
576 }
432ec081
YQ
577
578 /* Test a prologue in which STR is used and frame pointer is not
579 used. */
580 {
581 struct aarch64_prologue_cache cache;
582 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
583
584 static const uint32_t insns[] = {
585 0xf81d0ff3, /* str x19, [sp, #-48]! */
586 0xb9002fe0, /* str w0, [sp, #44] */
587 0xf90013e1, /* str x1, [sp, #32]*/
588 0xfd000fe0, /* str d0, [sp, #24] */
589 0xaa0203f3, /* mov x19, x2 */
590 0xf94013e0, /* ldr x0, [sp, #32] */
591 };
592 instruction_reader_test reader (insns);
593
594 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
595
596 SELF_CHECK (end == 4 * 5);
597
598 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
599 SELF_CHECK (cache.framesize == 48);
600
601 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
602 {
603 if (i == 1)
604 SELF_CHECK (cache.saved_regs[i].addr == -16);
605 else if (i == 19)
606 SELF_CHECK (cache.saved_regs[i].addr == -48);
607 else
608 SELF_CHECK (cache.saved_regs[i].addr == -1);
609 }
610
611 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
612 {
613 int regnum = gdbarch_num_regs (gdbarch);
614
615 if (i == 0)
616 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
617 == -24);
618 else
619 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
620 == -1);
621 }
622 }
4d9a9006
YQ
623}
624} // namespace selftests
625#endif /* GDB_SELF_TEST */
626
07b287a0
MS
627/* Implement the "skip_prologue" gdbarch method. */
628
629static CORE_ADDR
630aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
631{
07b287a0 632 CORE_ADDR func_addr, limit_pc;
07b287a0
MS
633
634 /* See if we can determine the end of the prologue via the symbol
635 table. If so, then return either PC, or the PC after the
636 prologue, whichever is greater. */
637 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
638 {
639 CORE_ADDR post_prologue_pc
640 = skip_prologue_using_sal (gdbarch, func_addr);
641
642 if (post_prologue_pc != 0)
325fac50 643 return std::max (pc, post_prologue_pc);
07b287a0
MS
644 }
645
646 /* Can't determine prologue from the symbol table, need to examine
647 instructions. */
648
649 /* Find an upper limit on the function prologue using the debug
650 information. If the debug information could not be used to
651 provide that bound, then use an arbitrary large number as the
652 upper bound. */
653 limit_pc = skip_prologue_using_sal (gdbarch, pc);
654 if (limit_pc == 0)
655 limit_pc = pc + 128; /* Magic. */
656
657 /* Try disassembling prologue. */
658 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
659}
660
661/* Scan the function prologue for THIS_FRAME and populate the prologue
662 cache CACHE. */
663
664static void
665aarch64_scan_prologue (struct frame_info *this_frame,
666 struct aarch64_prologue_cache *cache)
667{
668 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
669 CORE_ADDR prologue_start;
670 CORE_ADDR prologue_end;
671 CORE_ADDR prev_pc = get_frame_pc (this_frame);
672 struct gdbarch *gdbarch = get_frame_arch (this_frame);
673
db634143
PL
674 cache->prev_pc = prev_pc;
675
07b287a0
MS
676 /* Assume we do not find a frame. */
677 cache->framereg = -1;
678 cache->framesize = 0;
679
680 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
681 &prologue_end))
682 {
683 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
684
685 if (sal.line == 0)
686 {
687 /* No line info so use the current PC. */
688 prologue_end = prev_pc;
689 }
690 else if (sal.end < prologue_end)
691 {
692 /* The next line begins after the function end. */
693 prologue_end = sal.end;
694 }
695
325fac50 696 prologue_end = std::min (prologue_end, prev_pc);
07b287a0
MS
697 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
698 }
699 else
700 {
701 CORE_ADDR frame_loc;
07b287a0
MS
702
703 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
704 if (frame_loc == 0)
705 return;
706
707 cache->framereg = AARCH64_FP_REGNUM;
708 cache->framesize = 16;
709 cache->saved_regs[29].addr = 0;
710 cache->saved_regs[30].addr = 8;
711 }
712}
713
7dfa3edc
PL
714/* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
715 function may throw an exception if the inferior's registers or memory is
716 not available. */
07b287a0 717
7dfa3edc
PL
718static void
719aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
720 struct aarch64_prologue_cache *cache)
07b287a0 721{
07b287a0
MS
722 CORE_ADDR unwound_fp;
723 int reg;
724
07b287a0
MS
725 aarch64_scan_prologue (this_frame, cache);
726
727 if (cache->framereg == -1)
7dfa3edc 728 return;
07b287a0
MS
729
730 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
731 if (unwound_fp == 0)
7dfa3edc 732 return;
07b287a0
MS
733
734 cache->prev_sp = unwound_fp + cache->framesize;
735
736 /* Calculate actual addresses of saved registers using offsets
737 determined by aarch64_analyze_prologue. */
738 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
739 if (trad_frame_addr_p (cache->saved_regs, reg))
740 cache->saved_regs[reg].addr += cache->prev_sp;
741
db634143
PL
742 cache->func = get_frame_func (this_frame);
743
7dfa3edc
PL
744 cache->available_p = 1;
745}
746
747/* Allocate and fill in *THIS_CACHE with information about the prologue of
748 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
749 Return a pointer to the current aarch64_prologue_cache in
750 *THIS_CACHE. */
751
752static struct aarch64_prologue_cache *
753aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
754{
755 struct aarch64_prologue_cache *cache;
756
757 if (*this_cache != NULL)
9a3c8263 758 return (struct aarch64_prologue_cache *) *this_cache;
7dfa3edc
PL
759
760 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
761 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
762 *this_cache = cache;
763
764 TRY
765 {
766 aarch64_make_prologue_cache_1 (this_frame, cache);
767 }
768 CATCH (ex, RETURN_MASK_ERROR)
769 {
770 if (ex.error != NOT_AVAILABLE_ERROR)
771 throw_exception (ex);
772 }
773 END_CATCH
774
07b287a0
MS
775 return cache;
776}
777
7dfa3edc
PL
778/* Implement the "stop_reason" frame_unwind method. */
779
780static enum unwind_stop_reason
781aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
782 void **this_cache)
783{
784 struct aarch64_prologue_cache *cache
785 = aarch64_make_prologue_cache (this_frame, this_cache);
786
787 if (!cache->available_p)
788 return UNWIND_UNAVAILABLE;
789
790 /* Halt the backtrace at "_start". */
791 if (cache->prev_pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
792 return UNWIND_OUTERMOST;
793
794 /* We've hit a wall, stop. */
795 if (cache->prev_sp == 0)
796 return UNWIND_OUTERMOST;
797
798 return UNWIND_NO_REASON;
799}
800
07b287a0
MS
801/* Our frame ID for a normal frame is the current function's starting
802 PC and the caller's SP when we were called. */
803
804static void
805aarch64_prologue_this_id (struct frame_info *this_frame,
806 void **this_cache, struct frame_id *this_id)
807{
7c8edfae
PL
808 struct aarch64_prologue_cache *cache
809 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0 810
7dfa3edc
PL
811 if (!cache->available_p)
812 *this_id = frame_id_build_unavailable_stack (cache->func);
813 else
814 *this_id = frame_id_build (cache->prev_sp, cache->func);
07b287a0
MS
815}
816
817/* Implement the "prev_register" frame_unwind method. */
818
819static struct value *
820aarch64_prologue_prev_register (struct frame_info *this_frame,
821 void **this_cache, int prev_regnum)
822{
7c8edfae
PL
823 struct aarch64_prologue_cache *cache
824 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0
MS
825
826 /* If we are asked to unwind the PC, then we need to return the LR
827 instead. The prologue may save PC, but it will point into this
828 frame's prologue, not the next frame's resume location. */
829 if (prev_regnum == AARCH64_PC_REGNUM)
830 {
831 CORE_ADDR lr;
832
833 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
834 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
835 }
836
837 /* SP is generally not saved to the stack, but this frame is
838 identified by the next frame's stack pointer at the time of the
839 call. The value was already reconstructed into PREV_SP. */
840 /*
841 +----------+ ^
842 | saved lr | |
843 +->| saved fp |--+
844 | | |
845 | | | <- Previous SP
846 | +----------+
847 | | saved lr |
848 +--| saved fp |<- FP
849 | |
850 | |<- SP
851 +----------+ */
852 if (prev_regnum == AARCH64_SP_REGNUM)
853 return frame_unwind_got_constant (this_frame, prev_regnum,
854 cache->prev_sp);
855
856 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
857 prev_regnum);
858}
859
860/* AArch64 prologue unwinder. */
861struct frame_unwind aarch64_prologue_unwind =
862{
863 NORMAL_FRAME,
7dfa3edc 864 aarch64_prologue_frame_unwind_stop_reason,
07b287a0
MS
865 aarch64_prologue_this_id,
866 aarch64_prologue_prev_register,
867 NULL,
868 default_frame_sniffer
869};
870
8b61f75d
PL
871/* Allocate and fill in *THIS_CACHE with information about the prologue of
872 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
873 Return a pointer to the current aarch64_prologue_cache in
874 *THIS_CACHE. */
07b287a0
MS
875
876static struct aarch64_prologue_cache *
8b61f75d 877aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
07b287a0 878{
07b287a0 879 struct aarch64_prologue_cache *cache;
8b61f75d
PL
880
881 if (*this_cache != NULL)
9a3c8263 882 return (struct aarch64_prologue_cache *) *this_cache;
07b287a0
MS
883
884 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
885 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
8b61f75d 886 *this_cache = cache;
07b287a0 887
02a2a705
PL
888 TRY
889 {
890 cache->prev_sp = get_frame_register_unsigned (this_frame,
891 AARCH64_SP_REGNUM);
892 cache->prev_pc = get_frame_pc (this_frame);
893 cache->available_p = 1;
894 }
895 CATCH (ex, RETURN_MASK_ERROR)
896 {
897 if (ex.error != NOT_AVAILABLE_ERROR)
898 throw_exception (ex);
899 }
900 END_CATCH
07b287a0
MS
901
902 return cache;
903}
904
02a2a705
PL
905/* Implement the "stop_reason" frame_unwind method. */
906
907static enum unwind_stop_reason
908aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
909 void **this_cache)
910{
911 struct aarch64_prologue_cache *cache
912 = aarch64_make_stub_cache (this_frame, this_cache);
913
914 if (!cache->available_p)
915 return UNWIND_UNAVAILABLE;
916
917 return UNWIND_NO_REASON;
918}
919
07b287a0
MS
920/* Our frame ID for a stub frame is the current SP and LR. */
921
922static void
923aarch64_stub_this_id (struct frame_info *this_frame,
924 void **this_cache, struct frame_id *this_id)
925{
8b61f75d
PL
926 struct aarch64_prologue_cache *cache
927 = aarch64_make_stub_cache (this_frame, this_cache);
07b287a0 928
02a2a705
PL
929 if (cache->available_p)
930 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
931 else
932 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
07b287a0
MS
933}
934
935/* Implement the "sniffer" frame_unwind method. */
936
937static int
938aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
939 struct frame_info *this_frame,
940 void **this_prologue_cache)
941{
942 CORE_ADDR addr_in_block;
943 gdb_byte dummy[4];
944
945 addr_in_block = get_frame_address_in_block (this_frame);
3e5d3a5a 946 if (in_plt_section (addr_in_block)
07b287a0
MS
947 /* We also use the stub winder if the target memory is unreadable
948 to avoid having the prologue unwinder trying to read it. */
949 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
950 return 1;
951
952 return 0;
953}
954
955/* AArch64 stub unwinder. */
956struct frame_unwind aarch64_stub_unwind =
957{
958 NORMAL_FRAME,
02a2a705 959 aarch64_stub_frame_unwind_stop_reason,
07b287a0
MS
960 aarch64_stub_this_id,
961 aarch64_prologue_prev_register,
962 NULL,
963 aarch64_stub_unwind_sniffer
964};
965
966/* Return the frame base address of *THIS_FRAME. */
967
968static CORE_ADDR
969aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
970{
7c8edfae
PL
971 struct aarch64_prologue_cache *cache
972 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0
MS
973
974 return cache->prev_sp - cache->framesize;
975}
976
977/* AArch64 default frame base information. */
978struct frame_base aarch64_normal_base =
979{
980 &aarch64_prologue_unwind,
981 aarch64_normal_frame_base,
982 aarch64_normal_frame_base,
983 aarch64_normal_frame_base
984};
985
986/* Assuming THIS_FRAME is a dummy, return the frame ID of that
987 dummy frame. The frame ID's base needs to match the TOS value
988 saved by save_dummy_frame_tos () and returned from
989 aarch64_push_dummy_call, and the PC needs to match the dummy
990 frame's breakpoint. */
991
992static struct frame_id
993aarch64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
994{
995 return frame_id_build (get_frame_register_unsigned (this_frame,
996 AARCH64_SP_REGNUM),
997 get_frame_pc (this_frame));
998}
999
1000/* Implement the "unwind_pc" gdbarch method. */
1001
1002static CORE_ADDR
1003aarch64_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
1004{
1005 CORE_ADDR pc
1006 = frame_unwind_register_unsigned (this_frame, AARCH64_PC_REGNUM);
1007
1008 return pc;
1009}
1010
1011/* Implement the "unwind_sp" gdbarch method. */
1012
1013static CORE_ADDR
1014aarch64_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
1015{
1016 return frame_unwind_register_unsigned (this_frame, AARCH64_SP_REGNUM);
1017}
1018
1019/* Return the value of the REGNUM register in the previous frame of
1020 *THIS_FRAME. */
1021
1022static struct value *
1023aarch64_dwarf2_prev_register (struct frame_info *this_frame,
1024 void **this_cache, int regnum)
1025{
07b287a0
MS
1026 CORE_ADDR lr;
1027
1028 switch (regnum)
1029 {
1030 case AARCH64_PC_REGNUM:
1031 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1032 return frame_unwind_got_constant (this_frame, regnum, lr);
1033
1034 default:
1035 internal_error (__FILE__, __LINE__,
1036 _("Unexpected register %d"), regnum);
1037 }
1038}
1039
1040/* Implement the "init_reg" dwarf2_frame_ops method. */
1041
1042static void
1043aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1044 struct dwarf2_frame_state_reg *reg,
1045 struct frame_info *this_frame)
1046{
1047 switch (regnum)
1048 {
1049 case AARCH64_PC_REGNUM:
1050 reg->how = DWARF2_FRAME_REG_FN;
1051 reg->loc.fn = aarch64_dwarf2_prev_register;
1052 break;
1053 case AARCH64_SP_REGNUM:
1054 reg->how = DWARF2_FRAME_REG_CFA;
1055 break;
1056 }
1057}
1058
1059/* When arguments must be pushed onto the stack, they go on in reverse
1060 order. The code below implements a FILO (stack) to do this. */
1061
1062typedef struct
1063{
c3c87445
YQ
1064 /* Value to pass on stack. It can be NULL if this item is for stack
1065 padding. */
7c543f7b 1066 const gdb_byte *data;
07b287a0
MS
1067
1068 /* Size in bytes of value to pass on stack. */
1069 int len;
1070} stack_item_t;
1071
1072DEF_VEC_O (stack_item_t);
1073
1074/* Return the alignment (in bytes) of the given type. */
1075
1076static int
1077aarch64_type_align (struct type *t)
1078{
1079 int n;
1080 int align;
1081 int falign;
1082
1083 t = check_typedef (t);
1084 switch (TYPE_CODE (t))
1085 {
1086 default:
1087 /* Should never happen. */
1088 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
1089 return 4;
1090
1091 case TYPE_CODE_PTR:
1092 case TYPE_CODE_ENUM:
1093 case TYPE_CODE_INT:
1094 case TYPE_CODE_FLT:
1095 case TYPE_CODE_SET:
1096 case TYPE_CODE_RANGE:
1097 case TYPE_CODE_BITSTRING:
1098 case TYPE_CODE_REF:
aa006118 1099 case TYPE_CODE_RVALUE_REF:
07b287a0
MS
1100 case TYPE_CODE_CHAR:
1101 case TYPE_CODE_BOOL:
1102 return TYPE_LENGTH (t);
1103
1104 case TYPE_CODE_ARRAY:
238f2452
YQ
1105 if (TYPE_VECTOR (t))
1106 {
1107 /* Use the natural alignment for vector types (the same for
1108 scalar type), but the maximum alignment is 128-bit. */
1109 if (TYPE_LENGTH (t) > 16)
1110 return 16;
1111 else
1112 return TYPE_LENGTH (t);
1113 }
1114 else
1115 return aarch64_type_align (TYPE_TARGET_TYPE (t));
07b287a0
MS
1116 case TYPE_CODE_COMPLEX:
1117 return aarch64_type_align (TYPE_TARGET_TYPE (t));
1118
1119 case TYPE_CODE_STRUCT:
1120 case TYPE_CODE_UNION:
1121 align = 1;
1122 for (n = 0; n < TYPE_NFIELDS (t); n++)
1123 {
1124 falign = aarch64_type_align (TYPE_FIELD_TYPE (t, n));
1125 if (falign > align)
1126 align = falign;
1127 }
1128 return align;
1129 }
1130}
1131
cd635f74
YQ
1132/* Return 1 if *TY is a homogeneous floating-point aggregate or
1133 homogeneous short-vector aggregate as defined in the AAPCS64 ABI
1134 document; otherwise return 0. */
07b287a0
MS
1135
1136static int
cd635f74 1137is_hfa_or_hva (struct type *ty)
07b287a0
MS
1138{
1139 switch (TYPE_CODE (ty))
1140 {
1141 case TYPE_CODE_ARRAY:
1142 {
1143 struct type *target_ty = TYPE_TARGET_TYPE (ty);
238f2452
YQ
1144
1145 if (TYPE_VECTOR (ty))
1146 return 0;
1147
cd635f74
YQ
1148 if (TYPE_LENGTH (ty) <= 4 /* HFA or HVA has at most 4 members. */
1149 && (TYPE_CODE (target_ty) == TYPE_CODE_FLT /* HFA */
1150 || (TYPE_CODE (target_ty) == TYPE_CODE_ARRAY /* HVA */
1151 && TYPE_VECTOR (target_ty))))
07b287a0
MS
1152 return 1;
1153 break;
1154 }
1155
1156 case TYPE_CODE_UNION:
1157 case TYPE_CODE_STRUCT:
1158 {
cd635f74 1159 /* HFA or HVA has at most four members. */
07b287a0
MS
1160 if (TYPE_NFIELDS (ty) > 0 && TYPE_NFIELDS (ty) <= 4)
1161 {
1162 struct type *member0_type;
1163
1164 member0_type = check_typedef (TYPE_FIELD_TYPE (ty, 0));
cd635f74
YQ
1165 if (TYPE_CODE (member0_type) == TYPE_CODE_FLT
1166 || (TYPE_CODE (member0_type) == TYPE_CODE_ARRAY
1167 && TYPE_VECTOR (member0_type)))
07b287a0
MS
1168 {
1169 int i;
1170
1171 for (i = 0; i < TYPE_NFIELDS (ty); i++)
1172 {
1173 struct type *member1_type;
1174
1175 member1_type = check_typedef (TYPE_FIELD_TYPE (ty, i));
1176 if (TYPE_CODE (member0_type) != TYPE_CODE (member1_type)
1177 || (TYPE_LENGTH (member0_type)
1178 != TYPE_LENGTH (member1_type)))
1179 return 0;
1180 }
1181 return 1;
1182 }
1183 }
1184 return 0;
1185 }
1186
1187 default:
1188 break;
1189 }
1190
1191 return 0;
1192}
1193
1194/* AArch64 function call information structure. */
1195struct aarch64_call_info
1196{
1197 /* the current argument number. */
1198 unsigned argnum;
1199
1200 /* The next general purpose register number, equivalent to NGRN as
1201 described in the AArch64 Procedure Call Standard. */
1202 unsigned ngrn;
1203
1204 /* The next SIMD and floating point register number, equivalent to
1205 NSRN as described in the AArch64 Procedure Call Standard. */
1206 unsigned nsrn;
1207
1208 /* The next stacked argument address, equivalent to NSAA as
1209 described in the AArch64 Procedure Call Standard. */
1210 unsigned nsaa;
1211
1212 /* Stack item vector. */
1213 VEC(stack_item_t) *si;
1214};
1215
1216/* Pass a value in a sequence of consecutive X registers. The caller
1217 is responsbile for ensuring sufficient registers are available. */
1218
1219static void
1220pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1221 struct aarch64_call_info *info, struct type *type,
8e80f9d1 1222 struct value *arg)
07b287a0
MS
1223{
1224 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1225 int len = TYPE_LENGTH (type);
1226 enum type_code typecode = TYPE_CODE (type);
1227 int regnum = AARCH64_X0_REGNUM + info->ngrn;
8e80f9d1 1228 const bfd_byte *buf = value_contents (arg);
07b287a0
MS
1229
1230 info->argnum++;
1231
1232 while (len > 0)
1233 {
1234 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1235 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1236 byte_order);
1237
1238
1239 /* Adjust sub-word struct/union args when big-endian. */
1240 if (byte_order == BFD_ENDIAN_BIG
1241 && partial_len < X_REGISTER_SIZE
1242 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1243 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1244
1245 if (aarch64_debug)
b277c936
PL
1246 {
1247 debug_printf ("arg %d in %s = 0x%s\n", info->argnum,
1248 gdbarch_register_name (gdbarch, regnum),
1249 phex (regval, X_REGISTER_SIZE));
1250 }
07b287a0
MS
1251 regcache_cooked_write_unsigned (regcache, regnum, regval);
1252 len -= partial_len;
1253 buf += partial_len;
1254 regnum++;
1255 }
1256}
1257
1258/* Attempt to marshall a value in a V register. Return 1 if
1259 successful, or 0 if insufficient registers are available. This
1260 function, unlike the equivalent pass_in_x() function does not
1261 handle arguments spread across multiple registers. */
1262
1263static int
1264pass_in_v (struct gdbarch *gdbarch,
1265 struct regcache *regcache,
1266 struct aarch64_call_info *info,
0735fddd 1267 int len, const bfd_byte *buf)
07b287a0
MS
1268{
1269 if (info->nsrn < 8)
1270 {
07b287a0 1271 int regnum = AARCH64_V0_REGNUM + info->nsrn;
0735fddd 1272 gdb_byte reg[V_REGISTER_SIZE];
07b287a0
MS
1273
1274 info->argnum++;
1275 info->nsrn++;
1276
0735fddd
YQ
1277 memset (reg, 0, sizeof (reg));
1278 /* PCS C.1, the argument is allocated to the least significant
1279 bits of V register. */
1280 memcpy (reg, buf, len);
1281 regcache_cooked_write (regcache, regnum, reg);
1282
07b287a0 1283 if (aarch64_debug)
b277c936
PL
1284 {
1285 debug_printf ("arg %d in %s\n", info->argnum,
1286 gdbarch_register_name (gdbarch, regnum));
1287 }
07b287a0
MS
1288 return 1;
1289 }
1290 info->nsrn = 8;
1291 return 0;
1292}
1293
1294/* Marshall an argument onto the stack. */
1295
1296static void
1297pass_on_stack (struct aarch64_call_info *info, struct type *type,
8e80f9d1 1298 struct value *arg)
07b287a0 1299{
8e80f9d1 1300 const bfd_byte *buf = value_contents (arg);
07b287a0
MS
1301 int len = TYPE_LENGTH (type);
1302 int align;
1303 stack_item_t item;
1304
1305 info->argnum++;
1306
1307 align = aarch64_type_align (type);
1308
1309 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1310 Natural alignment of the argument's type. */
1311 align = align_up (align, 8);
1312
1313 /* The AArch64 PCS requires at most doubleword alignment. */
1314 if (align > 16)
1315 align = 16;
1316
1317 if (aarch64_debug)
b277c936
PL
1318 {
1319 debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1320 info->nsaa);
1321 }
07b287a0
MS
1322
1323 item.len = len;
1324 item.data = buf;
1325 VEC_safe_push (stack_item_t, info->si, &item);
1326
1327 info->nsaa += len;
1328 if (info->nsaa & (align - 1))
1329 {
1330 /* Push stack alignment padding. */
1331 int pad = align - (info->nsaa & (align - 1));
1332
1333 item.len = pad;
c3c87445 1334 item.data = NULL;
07b287a0
MS
1335
1336 VEC_safe_push (stack_item_t, info->si, &item);
1337 info->nsaa += pad;
1338 }
1339}
1340
1341/* Marshall an argument into a sequence of one or more consecutive X
1342 registers or, if insufficient X registers are available then onto
1343 the stack. */
1344
1345static void
1346pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1347 struct aarch64_call_info *info, struct type *type,
8e80f9d1 1348 struct value *arg)
07b287a0
MS
1349{
1350 int len = TYPE_LENGTH (type);
1351 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1352
1353 /* PCS C.13 - Pass in registers if we have enough spare */
1354 if (info->ngrn + nregs <= 8)
1355 {
8e80f9d1 1356 pass_in_x (gdbarch, regcache, info, type, arg);
07b287a0
MS
1357 info->ngrn += nregs;
1358 }
1359 else
1360 {
1361 info->ngrn = 8;
8e80f9d1 1362 pass_on_stack (info, type, arg);
07b287a0
MS
1363 }
1364}
1365
1366/* Pass a value in a V register, or on the stack if insufficient are
1367 available. */
1368
1369static void
1370pass_in_v_or_stack (struct gdbarch *gdbarch,
1371 struct regcache *regcache,
1372 struct aarch64_call_info *info,
1373 struct type *type,
8e80f9d1 1374 struct value *arg)
07b287a0 1375{
0735fddd
YQ
1376 if (!pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (type),
1377 value_contents (arg)))
8e80f9d1 1378 pass_on_stack (info, type, arg);
07b287a0
MS
1379}
1380
1381/* Implement the "push_dummy_call" gdbarch method. */
1382
1383static CORE_ADDR
1384aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1385 struct regcache *regcache, CORE_ADDR bp_addr,
1386 int nargs,
1387 struct value **args, CORE_ADDR sp, int struct_return,
1388 CORE_ADDR struct_addr)
1389{
07b287a0 1390 int argnum;
07b287a0
MS
1391 struct aarch64_call_info info;
1392 struct type *func_type;
1393 struct type *return_type;
1394 int lang_struct_return;
1395
1396 memset (&info, 0, sizeof (info));
1397
1398 /* We need to know what the type of the called function is in order
1399 to determine the number of named/anonymous arguments for the
1400 actual argument placement, and the return type in order to handle
1401 return value correctly.
1402
1403 The generic code above us views the decision of return in memory
1404 or return in registers as a two stage processes. The language
1405 handler is consulted first and may decide to return in memory (eg
1406 class with copy constructor returned by value), this will cause
1407 the generic code to allocate space AND insert an initial leading
1408 argument.
1409
1410 If the language code does not decide to pass in memory then the
1411 target code is consulted.
1412
1413 If the language code decides to pass in memory we want to move
1414 the pointer inserted as the initial argument from the argument
1415 list and into X8, the conventional AArch64 struct return pointer
1416 register.
1417
1418 This is slightly awkward, ideally the flag "lang_struct_return"
1419 would be passed to the targets implementation of push_dummy_call.
1420 Rather that change the target interface we call the language code
1421 directly ourselves. */
1422
1423 func_type = check_typedef (value_type (function));
1424
1425 /* Dereference function pointer types. */
1426 if (TYPE_CODE (func_type) == TYPE_CODE_PTR)
1427 func_type = TYPE_TARGET_TYPE (func_type);
1428
1429 gdb_assert (TYPE_CODE (func_type) == TYPE_CODE_FUNC
1430 || TYPE_CODE (func_type) == TYPE_CODE_METHOD);
1431
1432 /* If language_pass_by_reference () returned true we will have been
1433 given an additional initial argument, a hidden pointer to the
1434 return slot in memory. */
1435 return_type = TYPE_TARGET_TYPE (func_type);
1436 lang_struct_return = language_pass_by_reference (return_type);
1437
1438 /* Set the return address. For the AArch64, the return breakpoint
1439 is always at BP_ADDR. */
1440 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1441
1442 /* If we were given an initial argument for the return slot because
1443 lang_struct_return was true, lose it. */
1444 if (lang_struct_return)
1445 {
1446 args++;
1447 nargs--;
1448 }
1449
1450 /* The struct_return pointer occupies X8. */
1451 if (struct_return || lang_struct_return)
1452 {
1453 if (aarch64_debug)
b277c936
PL
1454 {
1455 debug_printf ("struct return in %s = 0x%s\n",
1456 gdbarch_register_name (gdbarch,
1457 AARCH64_STRUCT_RETURN_REGNUM),
1458 paddress (gdbarch, struct_addr));
1459 }
07b287a0
MS
1460 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1461 struct_addr);
1462 }
1463
1464 for (argnum = 0; argnum < nargs; argnum++)
1465 {
1466 struct value *arg = args[argnum];
1467 struct type *arg_type;
1468 int len;
1469
1470 arg_type = check_typedef (value_type (arg));
1471 len = TYPE_LENGTH (arg_type);
1472
1473 switch (TYPE_CODE (arg_type))
1474 {
1475 case TYPE_CODE_INT:
1476 case TYPE_CODE_BOOL:
1477 case TYPE_CODE_CHAR:
1478 case TYPE_CODE_RANGE:
1479 case TYPE_CODE_ENUM:
1480 if (len < 4)
1481 {
1482 /* Promote to 32 bit integer. */
1483 if (TYPE_UNSIGNED (arg_type))
1484 arg_type = builtin_type (gdbarch)->builtin_uint32;
1485 else
1486 arg_type = builtin_type (gdbarch)->builtin_int32;
1487 arg = value_cast (arg_type, arg);
1488 }
8e80f9d1 1489 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1490 break;
1491
1492 case TYPE_CODE_COMPLEX:
1493 if (info.nsrn <= 6)
1494 {
1495 const bfd_byte *buf = value_contents (arg);
1496 struct type *target_type =
1497 check_typedef (TYPE_TARGET_TYPE (arg_type));
1498
07b287a0 1499 pass_in_v (gdbarch, regcache, &info,
0735fddd
YQ
1500 TYPE_LENGTH (target_type), buf);
1501 pass_in_v (gdbarch, regcache, &info,
1502 TYPE_LENGTH (target_type),
07b287a0
MS
1503 buf + TYPE_LENGTH (target_type));
1504 }
1505 else
1506 {
1507 info.nsrn = 8;
8e80f9d1 1508 pass_on_stack (&info, arg_type, arg);
07b287a0
MS
1509 }
1510 break;
1511 case TYPE_CODE_FLT:
8e80f9d1 1512 pass_in_v_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1513 break;
1514
1515 case TYPE_CODE_STRUCT:
1516 case TYPE_CODE_ARRAY:
1517 case TYPE_CODE_UNION:
cd635f74 1518 if (is_hfa_or_hva (arg_type))
07b287a0
MS
1519 {
1520 int elements = TYPE_NFIELDS (arg_type);
1521
1522 /* Homogeneous Aggregates */
1523 if (info.nsrn + elements < 8)
1524 {
1525 int i;
1526
1527 for (i = 0; i < elements; i++)
1528 {
1529 /* We know that we have sufficient registers
1530 available therefore this will never fallback
1531 to the stack. */
1532 struct value *field =
1533 value_primitive_field (arg, 0, i, arg_type);
1534 struct type *field_type =
1535 check_typedef (value_type (field));
1536
8e80f9d1
YQ
1537 pass_in_v_or_stack (gdbarch, regcache, &info,
1538 field_type, field);
07b287a0
MS
1539 }
1540 }
1541 else
1542 {
1543 info.nsrn = 8;
8e80f9d1 1544 pass_on_stack (&info, arg_type, arg);
07b287a0
MS
1545 }
1546 }
238f2452
YQ
1547 else if (TYPE_CODE (arg_type) == TYPE_CODE_ARRAY
1548 && TYPE_VECTOR (arg_type) && (len == 16 || len == 8))
1549 {
1550 /* Short vector types are passed in V registers. */
1551 pass_in_v_or_stack (gdbarch, regcache, &info, arg_type, arg);
1552 }
07b287a0
MS
1553 else if (len > 16)
1554 {
1555 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1556 invisible reference. */
1557
1558 /* Allocate aligned storage. */
1559 sp = align_down (sp - len, 16);
1560
1561 /* Write the real data into the stack. */
1562 write_memory (sp, value_contents (arg), len);
1563
1564 /* Construct the indirection. */
1565 arg_type = lookup_pointer_type (arg_type);
1566 arg = value_from_pointer (arg_type, sp);
8e80f9d1 1567 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1568 }
1569 else
1570 /* PCS C.15 / C.18 multiple values pass. */
8e80f9d1 1571 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1572 break;
1573
1574 default:
8e80f9d1 1575 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1576 break;
1577 }
1578 }
1579
1580 /* Make sure stack retains 16 byte alignment. */
1581 if (info.nsaa & 15)
1582 sp -= 16 - (info.nsaa & 15);
1583
1584 while (!VEC_empty (stack_item_t, info.si))
1585 {
1586 stack_item_t *si = VEC_last (stack_item_t, info.si);
1587
1588 sp -= si->len;
c3c87445
YQ
1589 if (si->data != NULL)
1590 write_memory (sp, si->data, si->len);
07b287a0
MS
1591 VEC_pop (stack_item_t, info.si);
1592 }
1593
1594 VEC_free (stack_item_t, info.si);
1595
1596 /* Finally, update the SP register. */
1597 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1598
1599 return sp;
1600}
1601
1602/* Implement the "frame_align" gdbarch method. */
1603
1604static CORE_ADDR
1605aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1606{
1607 /* Align the stack to sixteen bytes. */
1608 return sp & ~(CORE_ADDR) 15;
1609}
1610
1611/* Return the type for an AdvSISD Q register. */
1612
1613static struct type *
1614aarch64_vnq_type (struct gdbarch *gdbarch)
1615{
1616 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1617
1618 if (tdep->vnq_type == NULL)
1619 {
1620 struct type *t;
1621 struct type *elem;
1622
1623 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1624 TYPE_CODE_UNION);
1625
1626 elem = builtin_type (gdbarch)->builtin_uint128;
1627 append_composite_type_field (t, "u", elem);
1628
1629 elem = builtin_type (gdbarch)->builtin_int128;
1630 append_composite_type_field (t, "s", elem);
1631
1632 tdep->vnq_type = t;
1633 }
1634
1635 return tdep->vnq_type;
1636}
1637
1638/* Return the type for an AdvSISD D register. */
1639
1640static struct type *
1641aarch64_vnd_type (struct gdbarch *gdbarch)
1642{
1643 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1644
1645 if (tdep->vnd_type == NULL)
1646 {
1647 struct type *t;
1648 struct type *elem;
1649
1650 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1651 TYPE_CODE_UNION);
1652
1653 elem = builtin_type (gdbarch)->builtin_double;
1654 append_composite_type_field (t, "f", elem);
1655
1656 elem = builtin_type (gdbarch)->builtin_uint64;
1657 append_composite_type_field (t, "u", elem);
1658
1659 elem = builtin_type (gdbarch)->builtin_int64;
1660 append_composite_type_field (t, "s", elem);
1661
1662 tdep->vnd_type = t;
1663 }
1664
1665 return tdep->vnd_type;
1666}
1667
1668/* Return the type for an AdvSISD S register. */
1669
1670static struct type *
1671aarch64_vns_type (struct gdbarch *gdbarch)
1672{
1673 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1674
1675 if (tdep->vns_type == NULL)
1676 {
1677 struct type *t;
1678 struct type *elem;
1679
1680 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1681 TYPE_CODE_UNION);
1682
1683 elem = builtin_type (gdbarch)->builtin_float;
1684 append_composite_type_field (t, "f", elem);
1685
1686 elem = builtin_type (gdbarch)->builtin_uint32;
1687 append_composite_type_field (t, "u", elem);
1688
1689 elem = builtin_type (gdbarch)->builtin_int32;
1690 append_composite_type_field (t, "s", elem);
1691
1692 tdep->vns_type = t;
1693 }
1694
1695 return tdep->vns_type;
1696}
1697
1698/* Return the type for an AdvSISD H register. */
1699
1700static struct type *
1701aarch64_vnh_type (struct gdbarch *gdbarch)
1702{
1703 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1704
1705 if (tdep->vnh_type == NULL)
1706 {
1707 struct type *t;
1708 struct type *elem;
1709
1710 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1711 TYPE_CODE_UNION);
1712
1713 elem = builtin_type (gdbarch)->builtin_uint16;
1714 append_composite_type_field (t, "u", elem);
1715
1716 elem = builtin_type (gdbarch)->builtin_int16;
1717 append_composite_type_field (t, "s", elem);
1718
1719 tdep->vnh_type = t;
1720 }
1721
1722 return tdep->vnh_type;
1723}
1724
1725/* Return the type for an AdvSISD B register. */
1726
1727static struct type *
1728aarch64_vnb_type (struct gdbarch *gdbarch)
1729{
1730 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1731
1732 if (tdep->vnb_type == NULL)
1733 {
1734 struct type *t;
1735 struct type *elem;
1736
1737 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1738 TYPE_CODE_UNION);
1739
1740 elem = builtin_type (gdbarch)->builtin_uint8;
1741 append_composite_type_field (t, "u", elem);
1742
1743 elem = builtin_type (gdbarch)->builtin_int8;
1744 append_composite_type_field (t, "s", elem);
1745
1746 tdep->vnb_type = t;
1747 }
1748
1749 return tdep->vnb_type;
1750}
1751
1752/* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
1753
1754static int
1755aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
1756{
1757 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
1758 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
1759
1760 if (reg == AARCH64_DWARF_SP)
1761 return AARCH64_SP_REGNUM;
1762
1763 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
1764 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
1765
1766 return -1;
1767}
1768\f
1769
1770/* Implement the "print_insn" gdbarch method. */
1771
1772static int
1773aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
1774{
1775 info->symbols = NULL;
1776 return print_insn_aarch64 (memaddr, info);
1777}
1778
1779/* AArch64 BRK software debug mode instruction.
1780 Note that AArch64 code is always little-endian.
1781 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
04180708 1782constexpr gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
07b287a0 1783
04180708 1784typedef BP_MANIPULATION (aarch64_default_breakpoint) aarch64_breakpoint;
07b287a0
MS
1785
1786/* Extract from an array REGS containing the (raw) register state a
1787 function return value of type TYPE, and copy that, in virtual
1788 format, into VALBUF. */
1789
1790static void
1791aarch64_extract_return_value (struct type *type, struct regcache *regs,
1792 gdb_byte *valbuf)
1793{
1794 struct gdbarch *gdbarch = get_regcache_arch (regs);
1795 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1796
1797 if (TYPE_CODE (type) == TYPE_CODE_FLT)
1798 {
1799 bfd_byte buf[V_REGISTER_SIZE];
1800 int len = TYPE_LENGTH (type);
1801
1802 regcache_cooked_read (regs, AARCH64_V0_REGNUM, buf);
1803 memcpy (valbuf, buf, len);
1804 }
1805 else if (TYPE_CODE (type) == TYPE_CODE_INT
1806 || TYPE_CODE (type) == TYPE_CODE_CHAR
1807 || TYPE_CODE (type) == TYPE_CODE_BOOL
1808 || TYPE_CODE (type) == TYPE_CODE_PTR
aa006118 1809 || TYPE_IS_REFERENCE (type)
07b287a0
MS
1810 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1811 {
1812 /* If the the type is a plain integer, then the access is
1813 straight-forward. Otherwise we have to play around a bit
1814 more. */
1815 int len = TYPE_LENGTH (type);
1816 int regno = AARCH64_X0_REGNUM;
1817 ULONGEST tmp;
1818
1819 while (len > 0)
1820 {
1821 /* By using store_unsigned_integer we avoid having to do
1822 anything special for small big-endian values. */
1823 regcache_cooked_read_unsigned (regs, regno++, &tmp);
1824 store_unsigned_integer (valbuf,
1825 (len > X_REGISTER_SIZE
1826 ? X_REGISTER_SIZE : len), byte_order, tmp);
1827 len -= X_REGISTER_SIZE;
1828 valbuf += X_REGISTER_SIZE;
1829 }
1830 }
1831 else if (TYPE_CODE (type) == TYPE_CODE_COMPLEX)
1832 {
1833 int regno = AARCH64_V0_REGNUM;
1834 bfd_byte buf[V_REGISTER_SIZE];
1835 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
1836 int len = TYPE_LENGTH (target_type);
1837
1838 regcache_cooked_read (regs, regno, buf);
1839 memcpy (valbuf, buf, len);
1840 valbuf += len;
1841 regcache_cooked_read (regs, regno + 1, buf);
1842 memcpy (valbuf, buf, len);
1843 valbuf += len;
1844 }
cd635f74 1845 else if (is_hfa_or_hva (type))
07b287a0
MS
1846 {
1847 int elements = TYPE_NFIELDS (type);
1848 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
1849 int len = TYPE_LENGTH (member_type);
1850 int i;
1851
1852 for (i = 0; i < elements; i++)
1853 {
1854 int regno = AARCH64_V0_REGNUM + i;
db3516bb 1855 bfd_byte buf[V_REGISTER_SIZE];
07b287a0
MS
1856
1857 if (aarch64_debug)
b277c936 1858 {
cd635f74 1859 debug_printf ("read HFA or HVA return value element %d from %s\n",
b277c936
PL
1860 i + 1,
1861 gdbarch_register_name (gdbarch, regno));
1862 }
07b287a0
MS
1863 regcache_cooked_read (regs, regno, buf);
1864
1865 memcpy (valbuf, buf, len);
1866 valbuf += len;
1867 }
1868 }
238f2452
YQ
1869 else if (TYPE_CODE (type) == TYPE_CODE_ARRAY && TYPE_VECTOR (type)
1870 && (TYPE_LENGTH (type) == 16 || TYPE_LENGTH (type) == 8))
1871 {
1872 /* Short vector is returned in V register. */
1873 gdb_byte buf[V_REGISTER_SIZE];
1874
1875 regcache_cooked_read (regs, AARCH64_V0_REGNUM, buf);
1876 memcpy (valbuf, buf, TYPE_LENGTH (type));
1877 }
07b287a0
MS
1878 else
1879 {
1880 /* For a structure or union the behaviour is as if the value had
1881 been stored to word-aligned memory and then loaded into
1882 registers with 64-bit load instruction(s). */
1883 int len = TYPE_LENGTH (type);
1884 int regno = AARCH64_X0_REGNUM;
1885 bfd_byte buf[X_REGISTER_SIZE];
1886
1887 while (len > 0)
1888 {
1889 regcache_cooked_read (regs, regno++, buf);
1890 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
1891 len -= X_REGISTER_SIZE;
1892 valbuf += X_REGISTER_SIZE;
1893 }
1894 }
1895}
1896
1897
1898/* Will a function return an aggregate type in memory or in a
1899 register? Return 0 if an aggregate type can be returned in a
1900 register, 1 if it must be returned in memory. */
1901
1902static int
1903aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
1904{
f168693b 1905 type = check_typedef (type);
07b287a0 1906
cd635f74 1907 if (is_hfa_or_hva (type))
07b287a0 1908 {
cd635f74
YQ
1909 /* v0-v7 are used to return values and one register is allocated
1910 for one member. However, HFA or HVA has at most four members. */
07b287a0
MS
1911 return 0;
1912 }
1913
1914 if (TYPE_LENGTH (type) > 16)
1915 {
1916 /* PCS B.6 Aggregates larger than 16 bytes are passed by
1917 invisible reference. */
1918
1919 return 1;
1920 }
1921
1922 return 0;
1923}
1924
1925/* Write into appropriate registers a function return value of type
1926 TYPE, given in virtual format. */
1927
1928static void
1929aarch64_store_return_value (struct type *type, struct regcache *regs,
1930 const gdb_byte *valbuf)
1931{
1932 struct gdbarch *gdbarch = get_regcache_arch (regs);
1933 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1934
1935 if (TYPE_CODE (type) == TYPE_CODE_FLT)
1936 {
1937 bfd_byte buf[V_REGISTER_SIZE];
1938 int len = TYPE_LENGTH (type);
1939
1940 memcpy (buf, valbuf, len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
1941 regcache_cooked_write (regs, AARCH64_V0_REGNUM, buf);
1942 }
1943 else if (TYPE_CODE (type) == TYPE_CODE_INT
1944 || TYPE_CODE (type) == TYPE_CODE_CHAR
1945 || TYPE_CODE (type) == TYPE_CODE_BOOL
1946 || TYPE_CODE (type) == TYPE_CODE_PTR
aa006118 1947 || TYPE_IS_REFERENCE (type)
07b287a0
MS
1948 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1949 {
1950 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
1951 {
1952 /* Values of one word or less are zero/sign-extended and
1953 returned in r0. */
1954 bfd_byte tmpbuf[X_REGISTER_SIZE];
1955 LONGEST val = unpack_long (type, valbuf);
1956
1957 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
1958 regcache_cooked_write (regs, AARCH64_X0_REGNUM, tmpbuf);
1959 }
1960 else
1961 {
1962 /* Integral values greater than one word are stored in
1963 consecutive registers starting with r0. This will always
1964 be a multiple of the regiser size. */
1965 int len = TYPE_LENGTH (type);
1966 int regno = AARCH64_X0_REGNUM;
1967
1968 while (len > 0)
1969 {
1970 regcache_cooked_write (regs, regno++, valbuf);
1971 len -= X_REGISTER_SIZE;
1972 valbuf += X_REGISTER_SIZE;
1973 }
1974 }
1975 }
cd635f74 1976 else if (is_hfa_or_hva (type))
07b287a0
MS
1977 {
1978 int elements = TYPE_NFIELDS (type);
1979 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
1980 int len = TYPE_LENGTH (member_type);
1981 int i;
1982
1983 for (i = 0; i < elements; i++)
1984 {
1985 int regno = AARCH64_V0_REGNUM + i;
1986 bfd_byte tmpbuf[MAX_REGISTER_SIZE];
1987
1988 if (aarch64_debug)
b277c936 1989 {
cd635f74 1990 debug_printf ("write HFA or HVA return value element %d to %s\n",
b277c936
PL
1991 i + 1,
1992 gdbarch_register_name (gdbarch, regno));
1993 }
07b287a0
MS
1994
1995 memcpy (tmpbuf, valbuf, len);
1996 regcache_cooked_write (regs, regno, tmpbuf);
1997 valbuf += len;
1998 }
1999 }
238f2452
YQ
2000 else if (TYPE_CODE (type) == TYPE_CODE_ARRAY && TYPE_VECTOR (type)
2001 && (TYPE_LENGTH (type) == 8 || TYPE_LENGTH (type) == 16))
2002 {
2003 /* Short vector. */
2004 gdb_byte buf[V_REGISTER_SIZE];
2005
2006 memcpy (buf, valbuf, TYPE_LENGTH (type));
2007 regcache_cooked_write (regs, AARCH64_V0_REGNUM, buf);
2008 }
07b287a0
MS
2009 else
2010 {
2011 /* For a structure or union the behaviour is as if the value had
2012 been stored to word-aligned memory and then loaded into
2013 registers with 64-bit load instruction(s). */
2014 int len = TYPE_LENGTH (type);
2015 int regno = AARCH64_X0_REGNUM;
2016 bfd_byte tmpbuf[X_REGISTER_SIZE];
2017
2018 while (len > 0)
2019 {
2020 memcpy (tmpbuf, valbuf,
2021 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2022 regcache_cooked_write (regs, regno++, tmpbuf);
2023 len -= X_REGISTER_SIZE;
2024 valbuf += X_REGISTER_SIZE;
2025 }
2026 }
2027}
2028
2029/* Implement the "return_value" gdbarch method. */
2030
2031static enum return_value_convention
2032aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2033 struct type *valtype, struct regcache *regcache,
2034 gdb_byte *readbuf, const gdb_byte *writebuf)
2035{
07b287a0
MS
2036
2037 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
2038 || TYPE_CODE (valtype) == TYPE_CODE_UNION
2039 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
2040 {
2041 if (aarch64_return_in_memory (gdbarch, valtype))
2042 {
2043 if (aarch64_debug)
b277c936 2044 debug_printf ("return value in memory\n");
07b287a0
MS
2045 return RETURN_VALUE_STRUCT_CONVENTION;
2046 }
2047 }
2048
2049 if (writebuf)
2050 aarch64_store_return_value (valtype, regcache, writebuf);
2051
2052 if (readbuf)
2053 aarch64_extract_return_value (valtype, regcache, readbuf);
2054
2055 if (aarch64_debug)
b277c936 2056 debug_printf ("return value in registers\n");
07b287a0
MS
2057
2058 return RETURN_VALUE_REGISTER_CONVENTION;
2059}
2060
2061/* Implement the "get_longjmp_target" gdbarch method. */
2062
2063static int
2064aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2065{
2066 CORE_ADDR jb_addr;
2067 gdb_byte buf[X_REGISTER_SIZE];
2068 struct gdbarch *gdbarch = get_frame_arch (frame);
2069 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2070 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2071
2072 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
2073
2074 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2075 X_REGISTER_SIZE))
2076 return 0;
2077
2078 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2079 return 1;
2080}
ea873d8e
PL
2081
2082/* Implement the "gen_return_address" gdbarch method. */
2083
2084static void
2085aarch64_gen_return_address (struct gdbarch *gdbarch,
2086 struct agent_expr *ax, struct axs_value *value,
2087 CORE_ADDR scope)
2088{
2089 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
2090 value->kind = axs_lvalue_register;
2091 value->u.reg = AARCH64_LR_REGNUM;
2092}
07b287a0
MS
2093\f
2094
2095/* Return the pseudo register name corresponding to register regnum. */
2096
2097static const char *
2098aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2099{
2100 static const char *const q_name[] =
2101 {
2102 "q0", "q1", "q2", "q3",
2103 "q4", "q5", "q6", "q7",
2104 "q8", "q9", "q10", "q11",
2105 "q12", "q13", "q14", "q15",
2106 "q16", "q17", "q18", "q19",
2107 "q20", "q21", "q22", "q23",
2108 "q24", "q25", "q26", "q27",
2109 "q28", "q29", "q30", "q31",
2110 };
2111
2112 static const char *const d_name[] =
2113 {
2114 "d0", "d1", "d2", "d3",
2115 "d4", "d5", "d6", "d7",
2116 "d8", "d9", "d10", "d11",
2117 "d12", "d13", "d14", "d15",
2118 "d16", "d17", "d18", "d19",
2119 "d20", "d21", "d22", "d23",
2120 "d24", "d25", "d26", "d27",
2121 "d28", "d29", "d30", "d31",
2122 };
2123
2124 static const char *const s_name[] =
2125 {
2126 "s0", "s1", "s2", "s3",
2127 "s4", "s5", "s6", "s7",
2128 "s8", "s9", "s10", "s11",
2129 "s12", "s13", "s14", "s15",
2130 "s16", "s17", "s18", "s19",
2131 "s20", "s21", "s22", "s23",
2132 "s24", "s25", "s26", "s27",
2133 "s28", "s29", "s30", "s31",
2134 };
2135
2136 static const char *const h_name[] =
2137 {
2138 "h0", "h1", "h2", "h3",
2139 "h4", "h5", "h6", "h7",
2140 "h8", "h9", "h10", "h11",
2141 "h12", "h13", "h14", "h15",
2142 "h16", "h17", "h18", "h19",
2143 "h20", "h21", "h22", "h23",
2144 "h24", "h25", "h26", "h27",
2145 "h28", "h29", "h30", "h31",
2146 };
2147
2148 static const char *const b_name[] =
2149 {
2150 "b0", "b1", "b2", "b3",
2151 "b4", "b5", "b6", "b7",
2152 "b8", "b9", "b10", "b11",
2153 "b12", "b13", "b14", "b15",
2154 "b16", "b17", "b18", "b19",
2155 "b20", "b21", "b22", "b23",
2156 "b24", "b25", "b26", "b27",
2157 "b28", "b29", "b30", "b31",
2158 };
2159
2160 regnum -= gdbarch_num_regs (gdbarch);
2161
2162 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2163 return q_name[regnum - AARCH64_Q0_REGNUM];
2164
2165 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2166 return d_name[regnum - AARCH64_D0_REGNUM];
2167
2168 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2169 return s_name[regnum - AARCH64_S0_REGNUM];
2170
2171 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2172 return h_name[regnum - AARCH64_H0_REGNUM];
2173
2174 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2175 return b_name[regnum - AARCH64_B0_REGNUM];
2176
2177 internal_error (__FILE__, __LINE__,
2178 _("aarch64_pseudo_register_name: bad register number %d"),
2179 regnum);
2180}
2181
2182/* Implement the "pseudo_register_type" tdesc_arch_data method. */
2183
2184static struct type *
2185aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2186{
2187 regnum -= gdbarch_num_regs (gdbarch);
2188
2189 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2190 return aarch64_vnq_type (gdbarch);
2191
2192 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2193 return aarch64_vnd_type (gdbarch);
2194
2195 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2196 return aarch64_vns_type (gdbarch);
2197
2198 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2199 return aarch64_vnh_type (gdbarch);
2200
2201 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2202 return aarch64_vnb_type (gdbarch);
2203
2204 internal_error (__FILE__, __LINE__,
2205 _("aarch64_pseudo_register_type: bad register number %d"),
2206 regnum);
2207}
2208
2209/* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2210
2211static int
2212aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2213 struct reggroup *group)
2214{
2215 regnum -= gdbarch_num_regs (gdbarch);
2216
2217 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2218 return group == all_reggroup || group == vector_reggroup;
2219 else if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2220 return (group == all_reggroup || group == vector_reggroup
2221 || group == float_reggroup);
2222 else if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2223 return (group == all_reggroup || group == vector_reggroup
2224 || group == float_reggroup);
2225 else if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2226 return group == all_reggroup || group == vector_reggroup;
2227 else if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2228 return group == all_reggroup || group == vector_reggroup;
2229
2230 return group == all_reggroup;
2231}
2232
2233/* Implement the "pseudo_register_read_value" gdbarch method. */
2234
2235static struct value *
2236aarch64_pseudo_read_value (struct gdbarch *gdbarch,
2237 struct regcache *regcache,
2238 int regnum)
2239{
2240 gdb_byte reg_buf[MAX_REGISTER_SIZE];
2241 struct value *result_value;
2242 gdb_byte *buf;
2243
2244 result_value = allocate_value (register_type (gdbarch, regnum));
2245 VALUE_LVAL (result_value) = lval_register;
2246 VALUE_REGNUM (result_value) = regnum;
2247 buf = value_contents_raw (result_value);
2248
2249 regnum -= gdbarch_num_regs (gdbarch);
2250
2251 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2252 {
2253 enum register_status status;
2254 unsigned v_regnum;
2255
2256 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2257 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2258 if (status != REG_VALID)
2259 mark_value_bytes_unavailable (result_value, 0,
2260 TYPE_LENGTH (value_type (result_value)));
2261 else
2262 memcpy (buf, reg_buf, Q_REGISTER_SIZE);
2263 return result_value;
2264 }
2265
2266 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2267 {
2268 enum register_status status;
2269 unsigned v_regnum;
2270
2271 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2272 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2273 if (status != REG_VALID)
2274 mark_value_bytes_unavailable (result_value, 0,
2275 TYPE_LENGTH (value_type (result_value)));
2276 else
2277 memcpy (buf, reg_buf, D_REGISTER_SIZE);
2278 return result_value;
2279 }
2280
2281 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2282 {
2283 enum register_status status;
2284 unsigned v_regnum;
2285
2286 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2287 status = regcache_raw_read (regcache, v_regnum, reg_buf);
4bcddace
PL
2288 if (status != REG_VALID)
2289 mark_value_bytes_unavailable (result_value, 0,
2290 TYPE_LENGTH (value_type (result_value)));
2291 else
2292 memcpy (buf, reg_buf, S_REGISTER_SIZE);
07b287a0
MS
2293 return result_value;
2294 }
2295
2296 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2297 {
2298 enum register_status status;
2299 unsigned v_regnum;
2300
2301 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2302 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2303 if (status != REG_VALID)
2304 mark_value_bytes_unavailable (result_value, 0,
2305 TYPE_LENGTH (value_type (result_value)));
2306 else
2307 memcpy (buf, reg_buf, H_REGISTER_SIZE);
2308 return result_value;
2309 }
2310
2311 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2312 {
2313 enum register_status status;
2314 unsigned v_regnum;
2315
2316 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2317 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2318 if (status != REG_VALID)
2319 mark_value_bytes_unavailable (result_value, 0,
2320 TYPE_LENGTH (value_type (result_value)));
2321 else
2322 memcpy (buf, reg_buf, B_REGISTER_SIZE);
2323 return result_value;
2324 }
2325
2326 gdb_assert_not_reached ("regnum out of bound");
2327}
2328
2329/* Implement the "pseudo_register_write" gdbarch method. */
2330
2331static void
2332aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2333 int regnum, const gdb_byte *buf)
2334{
2335 gdb_byte reg_buf[MAX_REGISTER_SIZE];
2336
2337 /* Ensure the register buffer is zero, we want gdb writes of the
2338 various 'scalar' pseudo registers to behavior like architectural
2339 writes, register width bytes are written the remainder are set to
2340 zero. */
2341 memset (reg_buf, 0, sizeof (reg_buf));
2342
2343 regnum -= gdbarch_num_regs (gdbarch);
2344
2345 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2346 {
2347 /* pseudo Q registers */
2348 unsigned v_regnum;
2349
2350 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2351 memcpy (reg_buf, buf, Q_REGISTER_SIZE);
2352 regcache_raw_write (regcache, v_regnum, reg_buf);
2353 return;
2354 }
2355
2356 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2357 {
2358 /* pseudo D registers */
2359 unsigned v_regnum;
2360
2361 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2362 memcpy (reg_buf, buf, D_REGISTER_SIZE);
2363 regcache_raw_write (regcache, v_regnum, reg_buf);
2364 return;
2365 }
2366
2367 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2368 {
2369 unsigned v_regnum;
2370
2371 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2372 memcpy (reg_buf, buf, S_REGISTER_SIZE);
2373 regcache_raw_write (regcache, v_regnum, reg_buf);
2374 return;
2375 }
2376
2377 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2378 {
2379 /* pseudo H registers */
2380 unsigned v_regnum;
2381
2382 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2383 memcpy (reg_buf, buf, H_REGISTER_SIZE);
2384 regcache_raw_write (regcache, v_regnum, reg_buf);
2385 return;
2386 }
2387
2388 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2389 {
2390 /* pseudo B registers */
2391 unsigned v_regnum;
2392
2393 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2394 memcpy (reg_buf, buf, B_REGISTER_SIZE);
2395 regcache_raw_write (regcache, v_regnum, reg_buf);
2396 return;
2397 }
2398
2399 gdb_assert_not_reached ("regnum out of bound");
2400}
2401
07b287a0
MS
2402/* Callback function for user_reg_add. */
2403
2404static struct value *
2405value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2406{
9a3c8263 2407 const int *reg_p = (const int *) baton;
07b287a0
MS
2408
2409 return value_of_register (*reg_p, frame);
2410}
2411\f
2412
9404b58f
KM
2413/* Implement the "software_single_step" gdbarch method, needed to
2414 single step through atomic sequences on AArch64. */
2415
93f9a11f 2416static VEC (CORE_ADDR) *
f5ea389a 2417aarch64_software_single_step (struct regcache *regcache)
9404b58f 2418{
0187a92f 2419 struct gdbarch *gdbarch = get_regcache_arch (regcache);
9404b58f
KM
2420 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2421 const int insn_size = 4;
2422 const int atomic_sequence_length = 16; /* Instruction sequence length. */
0187a92f 2423 CORE_ADDR pc = regcache_read_pc (regcache);
9404b58f
KM
2424 CORE_ADDR breaks[2] = { -1, -1 };
2425 CORE_ADDR loc = pc;
2426 CORE_ADDR closing_insn = 0;
2427 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2428 byte_order_for_code);
2429 int index;
2430 int insn_count;
2431 int bc_insn_count = 0; /* Conditional branch instruction count. */
2432 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
f77ee802 2433 aarch64_inst inst;
93f9a11f 2434 VEC (CORE_ADDR) *next_pcs = NULL;
f77ee802 2435
43cdf5ae 2436 if (aarch64_decode_insn (insn, &inst, 1) != 0)
93f9a11f 2437 return NULL;
9404b58f
KM
2438
2439 /* Look for a Load Exclusive instruction which begins the sequence. */
f77ee802 2440 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
93f9a11f 2441 return NULL;
9404b58f
KM
2442
2443 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2444 {
9404b58f
KM
2445 loc += insn_size;
2446 insn = read_memory_unsigned_integer (loc, insn_size,
2447 byte_order_for_code);
2448
43cdf5ae 2449 if (aarch64_decode_insn (insn, &inst, 1) != 0)
93f9a11f 2450 return NULL;
9404b58f 2451 /* Check if the instruction is a conditional branch. */
f77ee802 2452 if (inst.opcode->iclass == condbranch)
9404b58f 2453 {
f77ee802
YQ
2454 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
2455
9404b58f 2456 if (bc_insn_count >= 1)
93f9a11f 2457 return NULL;
9404b58f
KM
2458
2459 /* It is, so we'll try to set a breakpoint at the destination. */
f77ee802 2460 breaks[1] = loc + inst.operands[0].imm.value;
9404b58f
KM
2461
2462 bc_insn_count++;
2463 last_breakpoint++;
2464 }
2465
2466 /* Look for the Store Exclusive which closes the atomic sequence. */
f77ee802 2467 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
9404b58f
KM
2468 {
2469 closing_insn = loc;
2470 break;
2471 }
2472 }
2473
2474 /* We didn't find a closing Store Exclusive instruction, fall back. */
2475 if (!closing_insn)
93f9a11f 2476 return NULL;
9404b58f
KM
2477
2478 /* Insert breakpoint after the end of the atomic sequence. */
2479 breaks[0] = loc + insn_size;
2480
2481 /* Check for duplicated breakpoints, and also check that the second
2482 breakpoint is not within the atomic sequence. */
2483 if (last_breakpoint
2484 && (breaks[1] == breaks[0]
2485 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2486 last_breakpoint = 0;
2487
2488 /* Insert the breakpoint at the end of the sequence, and one at the
2489 destination of the conditional branch, if it exists. */
2490 for (index = 0; index <= last_breakpoint; index++)
93f9a11f 2491 VEC_safe_push (CORE_ADDR, next_pcs, breaks[index]);
9404b58f 2492
93f9a11f 2493 return next_pcs;
9404b58f
KM
2494}
2495
b6542f81
YQ
2496struct displaced_step_closure
2497{
2498 /* It is true when condition instruction, such as B.CON, TBZ, etc,
2499 is being displaced stepping. */
2500 int cond;
2501
2502 /* PC adjustment offset after displaced stepping. */
2503 int32_t pc_adjust;
2504};
2505
2506/* Data when visiting instructions for displaced stepping. */
2507
2508struct aarch64_displaced_step_data
2509{
2510 struct aarch64_insn_data base;
2511
2512 /* The address where the instruction will be executed at. */
2513 CORE_ADDR new_addr;
2514 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
2515 uint32_t insn_buf[DISPLACED_MODIFIED_INSNS];
2516 /* Number of instructions in INSN_BUF. */
2517 unsigned insn_count;
2518 /* Registers when doing displaced stepping. */
2519 struct regcache *regs;
2520
2521 struct displaced_step_closure *dsc;
2522};
2523
2524/* Implementation of aarch64_insn_visitor method "b". */
2525
2526static void
2527aarch64_displaced_step_b (const int is_bl, const int32_t offset,
2528 struct aarch64_insn_data *data)
2529{
2530 struct aarch64_displaced_step_data *dsd
2531 = (struct aarch64_displaced_step_data *) data;
2ac09a5b 2532 int64_t new_offset = data->insn_addr - dsd->new_addr + offset;
b6542f81
YQ
2533
2534 if (can_encode_int32 (new_offset, 28))
2535 {
2536 /* Emit B rather than BL, because executing BL on a new address
2537 will get the wrong address into LR. In order to avoid this,
2538 we emit B, and update LR if the instruction is BL. */
2539 emit_b (dsd->insn_buf, 0, new_offset);
2540 dsd->insn_count++;
2541 }
2542 else
2543 {
2544 /* Write NOP. */
2545 emit_nop (dsd->insn_buf);
2546 dsd->insn_count++;
2547 dsd->dsc->pc_adjust = offset;
2548 }
2549
2550 if (is_bl)
2551 {
2552 /* Update LR. */
2553 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
2554 data->insn_addr + 4);
2555 }
2556}
2557
2558/* Implementation of aarch64_insn_visitor method "b_cond". */
2559
2560static void
2561aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
2562 struct aarch64_insn_data *data)
2563{
2564 struct aarch64_displaced_step_data *dsd
2565 = (struct aarch64_displaced_step_data *) data;
b6542f81
YQ
2566
2567 /* GDB has to fix up PC after displaced step this instruction
2568 differently according to the condition is true or false. Instead
2569 of checking COND against conditional flags, we can use
2570 the following instructions, and GDB can tell how to fix up PC
2571 according to the PC value.
2572
2573 B.COND TAKEN ; If cond is true, then jump to TAKEN.
2574 INSN1 ;
2575 TAKEN:
2576 INSN2
2577 */
2578
2579 emit_bcond (dsd->insn_buf, cond, 8);
2580 dsd->dsc->cond = 1;
2581 dsd->dsc->pc_adjust = offset;
2582 dsd->insn_count = 1;
2583}
2584
2585/* Dynamically allocate a new register. If we know the register
2586 statically, we should make it a global as above instead of using this
2587 helper function. */
2588
2589static struct aarch64_register
2590aarch64_register (unsigned num, int is64)
2591{
2592 return (struct aarch64_register) { num, is64 };
2593}
2594
2595/* Implementation of aarch64_insn_visitor method "cb". */
2596
2597static void
2598aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
2599 const unsigned rn, int is64,
2600 struct aarch64_insn_data *data)
2601{
2602 struct aarch64_displaced_step_data *dsd
2603 = (struct aarch64_displaced_step_data *) data;
b6542f81
YQ
2604
2605 /* The offset is out of range for a compare and branch
2606 instruction. We can use the following instructions instead:
2607
2608 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
2609 INSN1 ;
2610 TAKEN:
2611 INSN2
2612 */
2613 emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
2614 dsd->insn_count = 1;
2615 dsd->dsc->cond = 1;
2616 dsd->dsc->pc_adjust = offset;
2617}
2618
2619/* Implementation of aarch64_insn_visitor method "tb". */
2620
2621static void
2622aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
2623 const unsigned rt, unsigned bit,
2624 struct aarch64_insn_data *data)
2625{
2626 struct aarch64_displaced_step_data *dsd
2627 = (struct aarch64_displaced_step_data *) data;
b6542f81
YQ
2628
2629 /* The offset is out of range for a test bit and branch
2630 instruction We can use the following instructions instead:
2631
2632 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
2633 INSN1 ;
2634 TAKEN:
2635 INSN2
2636
2637 */
2638 emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
2639 dsd->insn_count = 1;
2640 dsd->dsc->cond = 1;
2641 dsd->dsc->pc_adjust = offset;
2642}
2643
2644/* Implementation of aarch64_insn_visitor method "adr". */
2645
2646static void
2647aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
2648 const int is_adrp, struct aarch64_insn_data *data)
2649{
2650 struct aarch64_displaced_step_data *dsd
2651 = (struct aarch64_displaced_step_data *) data;
2652 /* We know exactly the address the ADR{P,} instruction will compute.
2653 We can just write it to the destination register. */
2654 CORE_ADDR address = data->insn_addr + offset;
2655
2656 if (is_adrp)
2657 {
2658 /* Clear the lower 12 bits of the offset to get the 4K page. */
2659 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2660 address & ~0xfff);
2661 }
2662 else
2663 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2664 address);
2665
2666 dsd->dsc->pc_adjust = 4;
2667 emit_nop (dsd->insn_buf);
2668 dsd->insn_count = 1;
2669}
2670
2671/* Implementation of aarch64_insn_visitor method "ldr_literal". */
2672
2673static void
2674aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
2675 const unsigned rt, const int is64,
2676 struct aarch64_insn_data *data)
2677{
2678 struct aarch64_displaced_step_data *dsd
2679 = (struct aarch64_displaced_step_data *) data;
2680 CORE_ADDR address = data->insn_addr + offset;
2681 struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
2682
2683 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
2684 address);
2685
2686 if (is_sw)
2687 dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
2688 aarch64_register (rt, 1), zero);
2689 else
2690 dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
2691 aarch64_register (rt, 1), zero);
2692
2693 dsd->dsc->pc_adjust = 4;
2694}
2695
2696/* Implementation of aarch64_insn_visitor method "others". */
2697
2698static void
2699aarch64_displaced_step_others (const uint32_t insn,
2700 struct aarch64_insn_data *data)
2701{
2702 struct aarch64_displaced_step_data *dsd
2703 = (struct aarch64_displaced_step_data *) data;
2704
e1c587c3 2705 aarch64_emit_insn (dsd->insn_buf, insn);
b6542f81
YQ
2706 dsd->insn_count = 1;
2707
2708 if ((insn & 0xfffffc1f) == 0xd65f0000)
2709 {
2710 /* RET */
2711 dsd->dsc->pc_adjust = 0;
2712 }
2713 else
2714 dsd->dsc->pc_adjust = 4;
2715}
2716
2717static const struct aarch64_insn_visitor visitor =
2718{
2719 aarch64_displaced_step_b,
2720 aarch64_displaced_step_b_cond,
2721 aarch64_displaced_step_cb,
2722 aarch64_displaced_step_tb,
2723 aarch64_displaced_step_adr,
2724 aarch64_displaced_step_ldr_literal,
2725 aarch64_displaced_step_others,
2726};
2727
2728/* Implement the "displaced_step_copy_insn" gdbarch method. */
2729
2730struct displaced_step_closure *
2731aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
2732 CORE_ADDR from, CORE_ADDR to,
2733 struct regcache *regs)
2734{
2735 struct displaced_step_closure *dsc = NULL;
2736 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2737 uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
2738 struct aarch64_displaced_step_data dsd;
c86a40c6
YQ
2739 aarch64_inst inst;
2740
2741 if (aarch64_decode_insn (insn, &inst, 1) != 0)
2742 return NULL;
b6542f81
YQ
2743
2744 /* Look for a Load Exclusive instruction which begins the sequence. */
c86a40c6 2745 if (inst.opcode->iclass == ldstexcl && bit (insn, 22))
b6542f81
YQ
2746 {
2747 /* We can't displaced step atomic sequences. */
2748 return NULL;
2749 }
2750
2751 dsc = XCNEW (struct displaced_step_closure);
2752 dsd.base.insn_addr = from;
2753 dsd.new_addr = to;
2754 dsd.regs = regs;
2755 dsd.dsc = dsc;
034f1a81 2756 dsd.insn_count = 0;
b6542f81
YQ
2757 aarch64_relocate_instruction (insn, &visitor,
2758 (struct aarch64_insn_data *) &dsd);
2759 gdb_assert (dsd.insn_count <= DISPLACED_MODIFIED_INSNS);
2760
2761 if (dsd.insn_count != 0)
2762 {
2763 int i;
2764
2765 /* Instruction can be relocated to scratch pad. Copy
2766 relocated instruction(s) there. */
2767 for (i = 0; i < dsd.insn_count; i++)
2768 {
2769 if (debug_displaced)
2770 {
2771 debug_printf ("displaced: writing insn ");
2772 debug_printf ("%.8x", dsd.insn_buf[i]);
2773 debug_printf (" at %s\n", paddress (gdbarch, to + i * 4));
2774 }
2775 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
2776 (ULONGEST) dsd.insn_buf[i]);
2777 }
2778 }
2779 else
2780 {
2781 xfree (dsc);
2782 dsc = NULL;
2783 }
2784
2785 return dsc;
2786}
2787
2788/* Implement the "displaced_step_fixup" gdbarch method. */
2789
2790void
2791aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
2792 struct displaced_step_closure *dsc,
2793 CORE_ADDR from, CORE_ADDR to,
2794 struct regcache *regs)
2795{
2796 if (dsc->cond)
2797 {
2798 ULONGEST pc;
2799
2800 regcache_cooked_read_unsigned (regs, AARCH64_PC_REGNUM, &pc);
2801 if (pc - to == 8)
2802 {
2803 /* Condition is true. */
2804 }
2805 else if (pc - to == 4)
2806 {
2807 /* Condition is false. */
2808 dsc->pc_adjust = 4;
2809 }
2810 else
2811 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
2812 }
2813
2814 if (dsc->pc_adjust != 0)
2815 {
2816 if (debug_displaced)
2817 {
2818 debug_printf ("displaced: fixup: set PC to %s:%d\n",
2819 paddress (gdbarch, from), dsc->pc_adjust);
2820 }
2821 regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
2822 from + dsc->pc_adjust);
2823 }
2824}
2825
2826/* Implement the "displaced_step_hw_singlestep" gdbarch method. */
2827
2828int
2829aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch,
2830 struct displaced_step_closure *closure)
2831{
2832 return 1;
2833}
2834
07b287a0
MS
2835/* Initialize the current architecture based on INFO. If possible,
2836 re-use an architecture from ARCHES, which is a list of
2837 architectures already created during this debugging session.
2838
2839 Called e.g. at program startup, when reading a core file, and when
2840 reading a binary file. */
2841
2842static struct gdbarch *
2843aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
2844{
2845 struct gdbarch_tdep *tdep;
2846 struct gdbarch *gdbarch;
2847 struct gdbarch_list *best_arch;
2848 struct tdesc_arch_data *tdesc_data = NULL;
2849 const struct target_desc *tdesc = info.target_desc;
2850 int i;
07b287a0
MS
2851 int valid_p = 1;
2852 const struct tdesc_feature *feature;
2853 int num_regs = 0;
2854 int num_pseudo_regs = 0;
2855
2856 /* Ensure we always have a target descriptor. */
2857 if (!tdesc_has_registers (tdesc))
2858 tdesc = tdesc_aarch64;
2859
2860 gdb_assert (tdesc);
2861
2862 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.core");
2863
2864 if (feature == NULL)
2865 return NULL;
2866
2867 tdesc_data = tdesc_data_alloc ();
2868
2869 /* Validate the descriptor provides the mandatory core R registers
2870 and allocate their numbers. */
2871 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
2872 valid_p &=
2873 tdesc_numbered_register (feature, tdesc_data, AARCH64_X0_REGNUM + i,
2874 aarch64_r_register_names[i]);
2875
2876 num_regs = AARCH64_X0_REGNUM + i;
2877
2878 /* Look for the V registers. */
2879 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
2880 if (feature)
2881 {
2882 /* Validate the descriptor provides the mandatory V registers
2883 and allocate their numbers. */
2884 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
2885 valid_p &=
2886 tdesc_numbered_register (feature, tdesc_data, AARCH64_V0_REGNUM + i,
2887 aarch64_v_register_names[i]);
2888
2889 num_regs = AARCH64_V0_REGNUM + i;
2890
2891 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
2892 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
2893 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
2894 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
2895 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
2896 }
2897
2898 if (!valid_p)
2899 {
2900 tdesc_data_cleanup (tdesc_data);
2901 return NULL;
2902 }
2903
2904 /* AArch64 code is always little-endian. */
2905 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
2906
2907 /* If there is already a candidate, use it. */
2908 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
2909 best_arch != NULL;
2910 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
2911 {
2912 /* Found a match. */
2913 break;
2914 }
2915
2916 if (best_arch != NULL)
2917 {
2918 if (tdesc_data != NULL)
2919 tdesc_data_cleanup (tdesc_data);
2920 return best_arch->gdbarch;
2921 }
2922
8d749320 2923 tdep = XCNEW (struct gdbarch_tdep);
07b287a0
MS
2924 gdbarch = gdbarch_alloc (&info, tdep);
2925
2926 /* This should be low enough for everything. */
2927 tdep->lowest_pc = 0x20;
2928 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
2929 tdep->jb_elt_size = 8;
2930
2931 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
2932 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
2933
07b287a0
MS
2934 /* Frame handling. */
2935 set_gdbarch_dummy_id (gdbarch, aarch64_dummy_id);
2936 set_gdbarch_unwind_pc (gdbarch, aarch64_unwind_pc);
2937 set_gdbarch_unwind_sp (gdbarch, aarch64_unwind_sp);
2938
2939 /* Advance PC across function entry code. */
2940 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
2941
2942 /* The stack grows downward. */
2943 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
2944
2945 /* Breakpoint manipulation. */
04180708
YQ
2946 set_gdbarch_breakpoint_kind_from_pc (gdbarch,
2947 aarch64_breakpoint::kind_from_pc);
2948 set_gdbarch_sw_breakpoint_from_kind (gdbarch,
2949 aarch64_breakpoint::bp_from_kind);
07b287a0 2950 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
9404b58f 2951 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
07b287a0
MS
2952
2953 /* Information about registers, etc. */
2954 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
2955 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
2956 set_gdbarch_num_regs (gdbarch, num_regs);
2957
2958 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
2959 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
2960 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
2961 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
2962 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
2963 set_tdesc_pseudo_register_reggroup_p (gdbarch,
2964 aarch64_pseudo_register_reggroup_p);
2965
2966 /* ABI */
2967 set_gdbarch_short_bit (gdbarch, 16);
2968 set_gdbarch_int_bit (gdbarch, 32);
2969 set_gdbarch_float_bit (gdbarch, 32);
2970 set_gdbarch_double_bit (gdbarch, 64);
2971 set_gdbarch_long_double_bit (gdbarch, 128);
2972 set_gdbarch_long_bit (gdbarch, 64);
2973 set_gdbarch_long_long_bit (gdbarch, 64);
2974 set_gdbarch_ptr_bit (gdbarch, 64);
2975 set_gdbarch_char_signed (gdbarch, 0);
2976 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
2977 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
2978 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
2979
2980 /* Internal <-> external register number maps. */
2981 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
2982
2983 /* Returning results. */
2984 set_gdbarch_return_value (gdbarch, aarch64_return_value);
2985
2986 /* Disassembly. */
2987 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
2988
2989 /* Virtual tables. */
2990 set_gdbarch_vbit_in_delta (gdbarch, 1);
2991
2992 /* Hook in the ABI-specific overrides, if they have been registered. */
2993 info.target_desc = tdesc;
2994 info.tdep_info = (void *) tdesc_data;
2995 gdbarch_init_osabi (info, gdbarch);
2996
2997 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
2998
2999 /* Add some default predicates. */
3000 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
3001 dwarf2_append_unwinders (gdbarch);
3002 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
3003
3004 frame_base_set_default (gdbarch, &aarch64_normal_base);
3005
3006 /* Now we have tuned the configuration, set a few final things,
3007 based on what the OS ABI has told us. */
3008
3009 if (tdep->jb_pc >= 0)
3010 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
3011
ea873d8e
PL
3012 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
3013
07b287a0
MS
3014 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
3015
3016 /* Add standard register aliases. */
3017 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
3018 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
3019 value_of_aarch64_user_reg,
3020 &aarch64_register_aliases[i].regnum);
3021
3022 return gdbarch;
3023}
3024
3025static void
3026aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
3027{
3028 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3029
3030 if (tdep == NULL)
3031 return;
3032
3033 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
3034 paddress (gdbarch, tdep->lowest_pc));
3035}
3036
3037/* Suppress warning from -Wmissing-prototypes. */
3038extern initialize_file_ftype _initialize_aarch64_tdep;
3039
3040void
3041_initialize_aarch64_tdep (void)
3042{
3043 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
3044 aarch64_dump_tdep);
3045
3046 initialize_tdesc_aarch64 ();
07b287a0
MS
3047
3048 /* Debug this file's internals. */
3049 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
3050Set AArch64 debugging."), _("\
3051Show AArch64 debugging."), _("\
3052When on, AArch64 specific debugging is enabled."),
3053 NULL,
3054 show_aarch64_debug,
3055 &setdebuglist, &showdebuglist);
4d9a9006
YQ
3056
3057#if GDB_SELF_TEST
3058 register_self_test (selftests::aarch64_analyze_prologue_test);
3059#endif
07b287a0 3060}
99afc88b
OJ
3061
3062/* AArch64 process record-replay related structures, defines etc. */
3063
99afc88b
OJ
3064#define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
3065 do \
3066 { \
3067 unsigned int reg_len = LENGTH; \
3068 if (reg_len) \
3069 { \
3070 REGS = XNEWVEC (uint32_t, reg_len); \
3071 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
3072 } \
3073 } \
3074 while (0)
3075
3076#define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
3077 do \
3078 { \
3079 unsigned int mem_len = LENGTH; \
3080 if (mem_len) \
3081 { \
3082 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
3083 memcpy(&MEMS->len, &RECORD_BUF[0], \
3084 sizeof(struct aarch64_mem_r) * LENGTH); \
3085 } \
3086 } \
3087 while (0)
3088
3089/* AArch64 record/replay structures and enumerations. */
3090
3091struct aarch64_mem_r
3092{
3093 uint64_t len; /* Record length. */
3094 uint64_t addr; /* Memory address. */
3095};
3096
3097enum aarch64_record_result
3098{
3099 AARCH64_RECORD_SUCCESS,
3100 AARCH64_RECORD_FAILURE,
3101 AARCH64_RECORD_UNSUPPORTED,
3102 AARCH64_RECORD_UNKNOWN
3103};
3104
3105typedef struct insn_decode_record_t
3106{
3107 struct gdbarch *gdbarch;
3108 struct regcache *regcache;
3109 CORE_ADDR this_addr; /* Address of insn to be recorded. */
3110 uint32_t aarch64_insn; /* Insn to be recorded. */
3111 uint32_t mem_rec_count; /* Count of memory records. */
3112 uint32_t reg_rec_count; /* Count of register records. */
3113 uint32_t *aarch64_regs; /* Registers to be recorded. */
3114 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
3115} insn_decode_record;
3116
3117/* Record handler for data processing - register instructions. */
3118
3119static unsigned int
3120aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
3121{
3122 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
3123 uint32_t record_buf[4];
3124
3125 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3126 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3127 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
3128
3129 if (!bit (aarch64_insn_r->aarch64_insn, 28))
3130 {
3131 uint8_t setflags;
3132
3133 /* Logical (shifted register). */
3134 if (insn_bits24_27 == 0x0a)
3135 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
3136 /* Add/subtract. */
3137 else if (insn_bits24_27 == 0x0b)
3138 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3139 else
3140 return AARCH64_RECORD_UNKNOWN;
3141
3142 record_buf[0] = reg_rd;
3143 aarch64_insn_r->reg_rec_count = 1;
3144 if (setflags)
3145 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3146 }
3147 else
3148 {
3149 if (insn_bits24_27 == 0x0b)
3150 {
3151 /* Data-processing (3 source). */
3152 record_buf[0] = reg_rd;
3153 aarch64_insn_r->reg_rec_count = 1;
3154 }
3155 else if (insn_bits24_27 == 0x0a)
3156 {
3157 if (insn_bits21_23 == 0x00)
3158 {
3159 /* Add/subtract (with carry). */
3160 record_buf[0] = reg_rd;
3161 aarch64_insn_r->reg_rec_count = 1;
3162 if (bit (aarch64_insn_r->aarch64_insn, 29))
3163 {
3164 record_buf[1] = AARCH64_CPSR_REGNUM;
3165 aarch64_insn_r->reg_rec_count = 2;
3166 }
3167 }
3168 else if (insn_bits21_23 == 0x02)
3169 {
3170 /* Conditional compare (register) and conditional compare
3171 (immediate) instructions. */
3172 record_buf[0] = AARCH64_CPSR_REGNUM;
3173 aarch64_insn_r->reg_rec_count = 1;
3174 }
3175 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
3176 {
3177 /* CConditional select. */
3178 /* Data-processing (2 source). */
3179 /* Data-processing (1 source). */
3180 record_buf[0] = reg_rd;
3181 aarch64_insn_r->reg_rec_count = 1;
3182 }
3183 else
3184 return AARCH64_RECORD_UNKNOWN;
3185 }
3186 }
3187
3188 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3189 record_buf);
3190 return AARCH64_RECORD_SUCCESS;
3191}
3192
3193/* Record handler for data processing - immediate instructions. */
3194
3195static unsigned int
3196aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
3197{
78cc6c2d 3198 uint8_t reg_rd, insn_bit23, insn_bits24_27, setflags;
99afc88b
OJ
3199 uint32_t record_buf[4];
3200
3201 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
99afc88b
OJ
3202 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3203 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3204
3205 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
3206 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
3207 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
3208 {
3209 record_buf[0] = reg_rd;
3210 aarch64_insn_r->reg_rec_count = 1;
3211 }
3212 else if (insn_bits24_27 == 0x01)
3213 {
3214 /* Add/Subtract (immediate). */
3215 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3216 record_buf[0] = reg_rd;
3217 aarch64_insn_r->reg_rec_count = 1;
3218 if (setflags)
3219 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3220 }
3221 else if (insn_bits24_27 == 0x02 && !insn_bit23)
3222 {
3223 /* Logical (immediate). */
3224 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
3225 record_buf[0] = reg_rd;
3226 aarch64_insn_r->reg_rec_count = 1;
3227 if (setflags)
3228 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3229 }
3230 else
3231 return AARCH64_RECORD_UNKNOWN;
3232
3233 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3234 record_buf);
3235 return AARCH64_RECORD_SUCCESS;
3236}
3237
3238/* Record handler for branch, exception generation and system instructions. */
3239
3240static unsigned int
3241aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
3242{
3243 struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
3244 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
3245 uint32_t record_buf[4];
3246
3247 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3248 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3249 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3250
3251 if (insn_bits28_31 == 0x0d)
3252 {
3253 /* Exception generation instructions. */
3254 if (insn_bits24_27 == 0x04)
3255 {
5d98d3cd
YQ
3256 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
3257 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
3258 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
99afc88b
OJ
3259 {
3260 ULONGEST svc_number;
3261
3262 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
3263 &svc_number);
3264 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
3265 svc_number);
3266 }
3267 else
3268 return AARCH64_RECORD_UNSUPPORTED;
3269 }
3270 /* System instructions. */
3271 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
3272 {
3273 uint32_t reg_rt, reg_crn;
3274
3275 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3276 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3277
3278 /* Record rt in case of sysl and mrs instructions. */
3279 if (bit (aarch64_insn_r->aarch64_insn, 21))
3280 {
3281 record_buf[0] = reg_rt;
3282 aarch64_insn_r->reg_rec_count = 1;
3283 }
3284 /* Record cpsr for hint and msr(immediate) instructions. */
3285 else if (reg_crn == 0x02 || reg_crn == 0x04)
3286 {
3287 record_buf[0] = AARCH64_CPSR_REGNUM;
3288 aarch64_insn_r->reg_rec_count = 1;
3289 }
3290 }
3291 /* Unconditional branch (register). */
3292 else if((insn_bits24_27 & 0x0e) == 0x06)
3293 {
3294 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3295 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3296 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3297 }
3298 else
3299 return AARCH64_RECORD_UNKNOWN;
3300 }
3301 /* Unconditional branch (immediate). */
3302 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3303 {
3304 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3305 if (bit (aarch64_insn_r->aarch64_insn, 31))
3306 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3307 }
3308 else
3309 /* Compare & branch (immediate), Test & branch (immediate) and
3310 Conditional branch (immediate). */
3311 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3312
3313 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3314 record_buf);
3315 return AARCH64_RECORD_SUCCESS;
3316}
3317
3318/* Record handler for advanced SIMD load and store instructions. */
3319
3320static unsigned int
3321aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3322{
3323 CORE_ADDR address;
3324 uint64_t addr_offset = 0;
3325 uint32_t record_buf[24];
3326 uint64_t record_buf_mem[24];
3327 uint32_t reg_rn, reg_rt;
3328 uint32_t reg_index = 0, mem_index = 0;
3329 uint8_t opcode_bits, size_bits;
3330
3331 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3332 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3333 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3334 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3335 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3336
3337 if (record_debug)
b277c936 3338 debug_printf ("Process record: Advanced SIMD load/store\n");
99afc88b
OJ
3339
3340 /* Load/store single structure. */
3341 if (bit (aarch64_insn_r->aarch64_insn, 24))
3342 {
3343 uint8_t sindex, scale, selem, esize, replicate = 0;
3344 scale = opcode_bits >> 2;
3345 selem = ((opcode_bits & 0x02) |
3346 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3347 switch (scale)
3348 {
3349 case 1:
3350 if (size_bits & 0x01)
3351 return AARCH64_RECORD_UNKNOWN;
3352 break;
3353 case 2:
3354 if ((size_bits >> 1) & 0x01)
3355 return AARCH64_RECORD_UNKNOWN;
3356 if (size_bits & 0x01)
3357 {
3358 if (!((opcode_bits >> 1) & 0x01))
3359 scale = 3;
3360 else
3361 return AARCH64_RECORD_UNKNOWN;
3362 }
3363 break;
3364 case 3:
3365 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3366 {
3367 scale = size_bits;
3368 replicate = 1;
3369 break;
3370 }
3371 else
3372 return AARCH64_RECORD_UNKNOWN;
3373 default:
3374 break;
3375 }
3376 esize = 8 << scale;
3377 if (replicate)
3378 for (sindex = 0; sindex < selem; sindex++)
3379 {
3380 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3381 reg_rt = (reg_rt + 1) % 32;
3382 }
3383 else
3384 {
3385 for (sindex = 0; sindex < selem; sindex++)
a2e3e93f
SM
3386 {
3387 if (bit (aarch64_insn_r->aarch64_insn, 22))
3388 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3389 else
3390 {
3391 record_buf_mem[mem_index++] = esize / 8;
3392 record_buf_mem[mem_index++] = address + addr_offset;
3393 }
3394 addr_offset = addr_offset + (esize / 8);
3395 reg_rt = (reg_rt + 1) % 32;
3396 }
99afc88b
OJ
3397 }
3398 }
3399 /* Load/store multiple structure. */
3400 else
3401 {
3402 uint8_t selem, esize, rpt, elements;
3403 uint8_t eindex, rindex;
3404
3405 esize = 8 << size_bits;
3406 if (bit (aarch64_insn_r->aarch64_insn, 30))
3407 elements = 128 / esize;
3408 else
3409 elements = 64 / esize;
3410
3411 switch (opcode_bits)
3412 {
3413 /*LD/ST4 (4 Registers). */
3414 case 0:
3415 rpt = 1;
3416 selem = 4;
3417 break;
3418 /*LD/ST1 (4 Registers). */
3419 case 2:
3420 rpt = 4;
3421 selem = 1;
3422 break;
3423 /*LD/ST3 (3 Registers). */
3424 case 4:
3425 rpt = 1;
3426 selem = 3;
3427 break;
3428 /*LD/ST1 (3 Registers). */
3429 case 6:
3430 rpt = 3;
3431 selem = 1;
3432 break;
3433 /*LD/ST1 (1 Register). */
3434 case 7:
3435 rpt = 1;
3436 selem = 1;
3437 break;
3438 /*LD/ST2 (2 Registers). */
3439 case 8:
3440 rpt = 1;
3441 selem = 2;
3442 break;
3443 /*LD/ST1 (2 Registers). */
3444 case 10:
3445 rpt = 2;
3446 selem = 1;
3447 break;
3448 default:
3449 return AARCH64_RECORD_UNSUPPORTED;
3450 break;
3451 }
3452 for (rindex = 0; rindex < rpt; rindex++)
3453 for (eindex = 0; eindex < elements; eindex++)
3454 {
3455 uint8_t reg_tt, sindex;
3456 reg_tt = (reg_rt + rindex) % 32;
3457 for (sindex = 0; sindex < selem; sindex++)
3458 {
3459 if (bit (aarch64_insn_r->aarch64_insn, 22))
3460 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
3461 else
3462 {
3463 record_buf_mem[mem_index++] = esize / 8;
3464 record_buf_mem[mem_index++] = address + addr_offset;
3465 }
3466 addr_offset = addr_offset + (esize / 8);
3467 reg_tt = (reg_tt + 1) % 32;
3468 }
3469 }
3470 }
3471
3472 if (bit (aarch64_insn_r->aarch64_insn, 23))
3473 record_buf[reg_index++] = reg_rn;
3474
3475 aarch64_insn_r->reg_rec_count = reg_index;
3476 aarch64_insn_r->mem_rec_count = mem_index / 2;
3477 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3478 record_buf_mem);
3479 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3480 record_buf);
3481 return AARCH64_RECORD_SUCCESS;
3482}
3483
3484/* Record handler for load and store instructions. */
3485
3486static unsigned int
3487aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
3488{
3489 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
3490 uint8_t insn_bit23, insn_bit21;
3491 uint8_t opc, size_bits, ld_flag, vector_flag;
3492 uint32_t reg_rn, reg_rt, reg_rt2;
3493 uint64_t datasize, offset;
3494 uint32_t record_buf[8];
3495 uint64_t record_buf_mem[8];
3496 CORE_ADDR address;
3497
3498 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3499 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3500 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
3501 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3502 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3503 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
3504 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
3505 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3506 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3507 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
3508 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
3509
3510 /* Load/store exclusive. */
3511 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
3512 {
3513 if (record_debug)
b277c936 3514 debug_printf ("Process record: load/store exclusive\n");
99afc88b
OJ
3515
3516 if (ld_flag)
3517 {
3518 record_buf[0] = reg_rt;
3519 aarch64_insn_r->reg_rec_count = 1;
3520 if (insn_bit21)
3521 {
3522 record_buf[1] = reg_rt2;
3523 aarch64_insn_r->reg_rec_count = 2;
3524 }
3525 }
3526 else
3527 {
3528 if (insn_bit21)
3529 datasize = (8 << size_bits) * 2;
3530 else
3531 datasize = (8 << size_bits);
3532 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3533 &address);
3534 record_buf_mem[0] = datasize / 8;
3535 record_buf_mem[1] = address;
3536 aarch64_insn_r->mem_rec_count = 1;
3537 if (!insn_bit23)
3538 {
3539 /* Save register rs. */
3540 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
3541 aarch64_insn_r->reg_rec_count = 1;
3542 }
3543 }
3544 }
3545 /* Load register (literal) instructions decoding. */
3546 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
3547 {
3548 if (record_debug)
b277c936 3549 debug_printf ("Process record: load register (literal)\n");
99afc88b
OJ
3550 if (vector_flag)
3551 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3552 else
3553 record_buf[0] = reg_rt;
3554 aarch64_insn_r->reg_rec_count = 1;
3555 }
3556 /* All types of load/store pair instructions decoding. */
3557 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
3558 {
3559 if (record_debug)
b277c936 3560 debug_printf ("Process record: load/store pair\n");
99afc88b
OJ
3561
3562 if (ld_flag)
3563 {
3564 if (vector_flag)
3565 {
3566 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3567 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
3568 }
3569 else
3570 {
3571 record_buf[0] = reg_rt;
3572 record_buf[1] = reg_rt2;
3573 }
3574 aarch64_insn_r->reg_rec_count = 2;
3575 }
3576 else
3577 {
3578 uint16_t imm7_off;
3579 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
3580 if (!vector_flag)
3581 size_bits = size_bits >> 1;
3582 datasize = 8 << (2 + size_bits);
3583 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
3584 offset = offset << (2 + size_bits);
3585 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3586 &address);
3587 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
3588 {
3589 if (imm7_off & 0x40)
3590 address = address - offset;
3591 else
3592 address = address + offset;
3593 }
3594
3595 record_buf_mem[0] = datasize / 8;
3596 record_buf_mem[1] = address;
3597 record_buf_mem[2] = datasize / 8;
3598 record_buf_mem[3] = address + (datasize / 8);
3599 aarch64_insn_r->mem_rec_count = 2;
3600 }
3601 if (bit (aarch64_insn_r->aarch64_insn, 23))
3602 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3603 }
3604 /* Load/store register (unsigned immediate) instructions. */
3605 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
3606 {
3607 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3608 if (!(opc >> 1))
3609 if (opc & 0x01)
3610 ld_flag = 0x01;
3611 else
3612 ld_flag = 0x0;
3613 else
3614 if (size_bits != 0x03)
3615 ld_flag = 0x01;
3616 else
3617 return AARCH64_RECORD_UNKNOWN;
3618
3619 if (record_debug)
3620 {
b277c936
PL
3621 debug_printf ("Process record: load/store (unsigned immediate):"
3622 " size %x V %d opc %x\n", size_bits, vector_flag,
3623 opc);
99afc88b
OJ
3624 }
3625
3626 if (!ld_flag)
3627 {
3628 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
3629 datasize = 8 << size_bits;
3630 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3631 &address);
3632 offset = offset << size_bits;
3633 address = address + offset;
3634
3635 record_buf_mem[0] = datasize >> 3;
3636 record_buf_mem[1] = address;
3637 aarch64_insn_r->mem_rec_count = 1;
3638 }
3639 else
3640 {
3641 if (vector_flag)
3642 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3643 else
3644 record_buf[0] = reg_rt;
3645 aarch64_insn_r->reg_rec_count = 1;
3646 }
3647 }
3648 /* Load/store register (register offset) instructions. */
5d98d3cd
YQ
3649 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3650 && insn_bits10_11 == 0x02 && insn_bit21)
99afc88b
OJ
3651 {
3652 if (record_debug)
b277c936 3653 debug_printf ("Process record: load/store (register offset)\n");
99afc88b
OJ
3654 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3655 if (!(opc >> 1))
3656 if (opc & 0x01)
3657 ld_flag = 0x01;
3658 else
3659 ld_flag = 0x0;
3660 else
3661 if (size_bits != 0x03)
3662 ld_flag = 0x01;
3663 else
3664 return AARCH64_RECORD_UNKNOWN;
3665
3666 if (!ld_flag)
3667 {
d9436c7c
PA
3668 ULONGEST reg_rm_val;
3669
99afc88b
OJ
3670 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
3671 bits (aarch64_insn_r->aarch64_insn, 16, 20), &reg_rm_val);
3672 if (bit (aarch64_insn_r->aarch64_insn, 12))
3673 offset = reg_rm_val << size_bits;
3674 else
3675 offset = reg_rm_val;
3676 datasize = 8 << size_bits;
3677 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3678 &address);
3679 address = address + offset;
3680 record_buf_mem[0] = datasize >> 3;
3681 record_buf_mem[1] = address;
3682 aarch64_insn_r->mem_rec_count = 1;
3683 }
3684 else
3685 {
3686 if (vector_flag)
3687 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3688 else
3689 record_buf[0] = reg_rt;
3690 aarch64_insn_r->reg_rec_count = 1;
3691 }
3692 }
3693 /* Load/store register (immediate and unprivileged) instructions. */
5d98d3cd
YQ
3694 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3695 && !insn_bit21)
99afc88b
OJ
3696 {
3697 if (record_debug)
3698 {
b277c936
PL
3699 debug_printf ("Process record: load/store "
3700 "(immediate and unprivileged)\n");
99afc88b
OJ
3701 }
3702 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3703 if (!(opc >> 1))
3704 if (opc & 0x01)
3705 ld_flag = 0x01;
3706 else
3707 ld_flag = 0x0;
3708 else
3709 if (size_bits != 0x03)
3710 ld_flag = 0x01;
3711 else
3712 return AARCH64_RECORD_UNKNOWN;
3713
3714 if (!ld_flag)
3715 {
3716 uint16_t imm9_off;
3717 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
3718 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
3719 datasize = 8 << size_bits;
3720 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3721 &address);
3722 if (insn_bits10_11 != 0x01)
3723 {
3724 if (imm9_off & 0x0100)
3725 address = address - offset;
3726 else
3727 address = address + offset;
3728 }
3729 record_buf_mem[0] = datasize >> 3;
3730 record_buf_mem[1] = address;
3731 aarch64_insn_r->mem_rec_count = 1;
3732 }
3733 else
3734 {
3735 if (vector_flag)
3736 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3737 else
3738 record_buf[0] = reg_rt;
3739 aarch64_insn_r->reg_rec_count = 1;
3740 }
3741 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
3742 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3743 }
3744 /* Advanced SIMD load/store instructions. */
3745 else
3746 return aarch64_record_asimd_load_store (aarch64_insn_r);
3747
3748 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3749 record_buf_mem);
3750 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3751 record_buf);
3752 return AARCH64_RECORD_SUCCESS;
3753}
3754
3755/* Record handler for data processing SIMD and floating point instructions. */
3756
3757static unsigned int
3758aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
3759{
3760 uint8_t insn_bit21, opcode, rmode, reg_rd;
3761 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
3762 uint8_t insn_bits11_14;
3763 uint32_t record_buf[2];
3764
3765 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3766 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3767 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3768 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3769 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
3770 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
3771 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
3772 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3773 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3774
3775 if (record_debug)
b277c936 3776 debug_printf ("Process record: data processing SIMD/FP: ");
99afc88b
OJ
3777
3778 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
3779 {
3780 /* Floating point - fixed point conversion instructions. */
3781 if (!insn_bit21)
3782 {
3783 if (record_debug)
b277c936 3784 debug_printf ("FP - fixed point conversion");
99afc88b
OJ
3785
3786 if ((opcode >> 1) == 0x0 && rmode == 0x03)
3787 record_buf[0] = reg_rd;
3788 else
3789 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3790 }
3791 /* Floating point - conditional compare instructions. */
3792 else if (insn_bits10_11 == 0x01)
3793 {
3794 if (record_debug)
b277c936 3795 debug_printf ("FP - conditional compare");
99afc88b
OJ
3796
3797 record_buf[0] = AARCH64_CPSR_REGNUM;
3798 }
3799 /* Floating point - data processing (2-source) and
3800 conditional select instructions. */
3801 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
3802 {
3803 if (record_debug)
b277c936 3804 debug_printf ("FP - DP (2-source)");
99afc88b
OJ
3805
3806 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3807 }
3808 else if (insn_bits10_11 == 0x00)
3809 {
3810 /* Floating point - immediate instructions. */
3811 if ((insn_bits12_15 & 0x01) == 0x01
3812 || (insn_bits12_15 & 0x07) == 0x04)
3813 {
3814 if (record_debug)
b277c936 3815 debug_printf ("FP - immediate");
99afc88b
OJ
3816 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3817 }
3818 /* Floating point - compare instructions. */
3819 else if ((insn_bits12_15 & 0x03) == 0x02)
3820 {
3821 if (record_debug)
b277c936 3822 debug_printf ("FP - immediate");
99afc88b
OJ
3823 record_buf[0] = AARCH64_CPSR_REGNUM;
3824 }
3825 /* Floating point - integer conversions instructions. */
f62fce35 3826 else if (insn_bits12_15 == 0x00)
99afc88b
OJ
3827 {
3828 /* Convert float to integer instruction. */
3829 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
3830 {
3831 if (record_debug)
b277c936 3832 debug_printf ("float to int conversion");
99afc88b
OJ
3833
3834 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3835 }
3836 /* Convert integer to float instruction. */
3837 else if ((opcode >> 1) == 0x01 && !rmode)
3838 {
3839 if (record_debug)
b277c936 3840 debug_printf ("int to float conversion");
99afc88b
OJ
3841
3842 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3843 }
3844 /* Move float to integer instruction. */
3845 else if ((opcode >> 1) == 0x03)
3846 {
3847 if (record_debug)
b277c936 3848 debug_printf ("move float to int");
99afc88b
OJ
3849
3850 if (!(opcode & 0x01))
3851 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3852 else
3853 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3854 }
f62fce35
YQ
3855 else
3856 return AARCH64_RECORD_UNKNOWN;
99afc88b 3857 }
f62fce35
YQ
3858 else
3859 return AARCH64_RECORD_UNKNOWN;
99afc88b 3860 }
f62fce35
YQ
3861 else
3862 return AARCH64_RECORD_UNKNOWN;
99afc88b
OJ
3863 }
3864 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
3865 {
3866 if (record_debug)
b277c936 3867 debug_printf ("SIMD copy");
99afc88b
OJ
3868
3869 /* Advanced SIMD copy instructions. */
3870 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
3871 && !bit (aarch64_insn_r->aarch64_insn, 15)
3872 && bit (aarch64_insn_r->aarch64_insn, 10))
3873 {
3874 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
3875 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3876 else
3877 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3878 }
3879 else
3880 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3881 }
3882 /* All remaining floating point or advanced SIMD instructions. */
3883 else
3884 {
3885 if (record_debug)
b277c936 3886 debug_printf ("all remain");
99afc88b
OJ
3887
3888 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3889 }
3890
3891 if (record_debug)
b277c936 3892 debug_printf ("\n");
99afc88b
OJ
3893
3894 aarch64_insn_r->reg_rec_count++;
3895 gdb_assert (aarch64_insn_r->reg_rec_count == 1);
3896 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3897 record_buf);
3898 return AARCH64_RECORD_SUCCESS;
3899}
3900
3901/* Decodes insns type and invokes its record handler. */
3902
3903static unsigned int
3904aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
3905{
3906 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
3907
3908 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
3909 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
3910 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
3911 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
3912
3913 /* Data processing - immediate instructions. */
3914 if (!ins_bit26 && !ins_bit27 && ins_bit28)
3915 return aarch64_record_data_proc_imm (aarch64_insn_r);
3916
3917 /* Branch, exception generation and system instructions. */
3918 if (ins_bit26 && !ins_bit27 && ins_bit28)
3919 return aarch64_record_branch_except_sys (aarch64_insn_r);
3920
3921 /* Load and store instructions. */
3922 if (!ins_bit25 && ins_bit27)
3923 return aarch64_record_load_store (aarch64_insn_r);
3924
3925 /* Data processing - register instructions. */
3926 if (ins_bit25 && !ins_bit26 && ins_bit27)
3927 return aarch64_record_data_proc_reg (aarch64_insn_r);
3928
3929 /* Data processing - SIMD and floating point instructions. */
3930 if (ins_bit25 && ins_bit26 && ins_bit27)
3931 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
3932
3933 return AARCH64_RECORD_UNSUPPORTED;
3934}
3935
3936/* Cleans up local record registers and memory allocations. */
3937
3938static void
3939deallocate_reg_mem (insn_decode_record *record)
3940{
3941 xfree (record->aarch64_regs);
3942 xfree (record->aarch64_mems);
3943}
3944
3945/* Parse the current instruction and record the values of the registers and
3946 memory that will be changed in current instruction to record_arch_list
3947 return -1 if something is wrong. */
3948
3949int
3950aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
3951 CORE_ADDR insn_addr)
3952{
3953 uint32_t rec_no = 0;
3954 uint8_t insn_size = 4;
3955 uint32_t ret = 0;
99afc88b
OJ
3956 gdb_byte buf[insn_size];
3957 insn_decode_record aarch64_record;
3958
3959 memset (&buf[0], 0, insn_size);
3960 memset (&aarch64_record, 0, sizeof (insn_decode_record));
3961 target_read_memory (insn_addr, &buf[0], insn_size);
3962 aarch64_record.aarch64_insn
3963 = (uint32_t) extract_unsigned_integer (&buf[0],
3964 insn_size,
3965 gdbarch_byte_order (gdbarch));
3966 aarch64_record.regcache = regcache;
3967 aarch64_record.this_addr = insn_addr;
3968 aarch64_record.gdbarch = gdbarch;
3969
3970 ret = aarch64_record_decode_insn_handler (&aarch64_record);
3971 if (ret == AARCH64_RECORD_UNSUPPORTED)
3972 {
3973 printf_unfiltered (_("Process record does not support instruction "
3974 "0x%0x at address %s.\n"),
3975 aarch64_record.aarch64_insn,
3976 paddress (gdbarch, insn_addr));
3977 ret = -1;
3978 }
3979
3980 if (0 == ret)
3981 {
3982 /* Record registers. */
3983 record_full_arch_list_add_reg (aarch64_record.regcache,
3984 AARCH64_PC_REGNUM);
3985 /* Always record register CPSR. */
3986 record_full_arch_list_add_reg (aarch64_record.regcache,
3987 AARCH64_CPSR_REGNUM);
3988 if (aarch64_record.aarch64_regs)
3989 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
3990 if (record_full_arch_list_add_reg (aarch64_record.regcache,
3991 aarch64_record.aarch64_regs[rec_no]))
3992 ret = -1;
3993
3994 /* Record memories. */
3995 if (aarch64_record.aarch64_mems)
3996 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
3997 if (record_full_arch_list_add_mem
3998 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
3999 aarch64_record.aarch64_mems[rec_no].len))
4000 ret = -1;
4001
4002 if (record_full_arch_list_add_end ())
4003 ret = -1;
4004 }
4005
4006 deallocate_reg_mem (&aarch64_record);
4007 return ret;
4008}