]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/aarch64-tdep.c
Add unit test to aarch64 prologue analyzer
[thirdparty/binutils-gdb.git] / gdb / aarch64-tdep.c
CommitLineData
07b287a0
MS
1/* Common target dependent code for GDB on AArch64 systems.
2
618f726f 3 Copyright (C) 2009-2016 Free Software Foundation, Inc.
07b287a0
MS
4 Contributed by ARM Ltd.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21#include "defs.h"
22
23#include "frame.h"
24#include "inferior.h"
25#include "gdbcmd.h"
26#include "gdbcore.h"
07b287a0
MS
27#include "dis-asm.h"
28#include "regcache.h"
29#include "reggroups.h"
30#include "doublest.h"
31#include "value.h"
32#include "arch-utils.h"
33#include "osabi.h"
34#include "frame-unwind.h"
35#include "frame-base.h"
36#include "trad-frame.h"
37#include "objfiles.h"
38#include "dwarf2-frame.h"
39#include "gdbtypes.h"
40#include "prologue-value.h"
41#include "target-descriptions.h"
42#include "user-regs.h"
43#include "language.h"
44#include "infcall.h"
ea873d8e
PL
45#include "ax.h"
46#include "ax-gdb.h"
4d9a9006 47#include "selftest.h"
07b287a0
MS
48
49#include "aarch64-tdep.h"
50
51#include "elf-bfd.h"
52#include "elf/aarch64.h"
53
07b287a0
MS
54#include "vec.h"
55
99afc88b
OJ
56#include "record.h"
57#include "record-full.h"
58
07b287a0 59#include "features/aarch64.c"
07b287a0 60
787749ea
PL
61#include "arch/aarch64-insn.h"
62
f77ee802 63#include "opcode/aarch64.h"
325fac50 64#include <algorithm>
f77ee802
YQ
65
66#define submask(x) ((1L << ((x) + 1)) - 1)
67#define bit(obj,st) (((obj) >> (st)) & 1)
68#define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
69
07b287a0
MS
70/* Pseudo register base numbers. */
71#define AARCH64_Q0_REGNUM 0
187f5d00 72#define AARCH64_D0_REGNUM (AARCH64_Q0_REGNUM + AARCH64_D_REGISTER_COUNT)
07b287a0
MS
73#define AARCH64_S0_REGNUM (AARCH64_D0_REGNUM + 32)
74#define AARCH64_H0_REGNUM (AARCH64_S0_REGNUM + 32)
75#define AARCH64_B0_REGNUM (AARCH64_H0_REGNUM + 32)
76
77/* The standard register names, and all the valid aliases for them. */
78static const struct
79{
80 const char *const name;
81 int regnum;
82} aarch64_register_aliases[] =
83{
84 /* 64-bit register names. */
85 {"fp", AARCH64_FP_REGNUM},
86 {"lr", AARCH64_LR_REGNUM},
87 {"sp", AARCH64_SP_REGNUM},
88
89 /* 32-bit register names. */
90 {"w0", AARCH64_X0_REGNUM + 0},
91 {"w1", AARCH64_X0_REGNUM + 1},
92 {"w2", AARCH64_X0_REGNUM + 2},
93 {"w3", AARCH64_X0_REGNUM + 3},
94 {"w4", AARCH64_X0_REGNUM + 4},
95 {"w5", AARCH64_X0_REGNUM + 5},
96 {"w6", AARCH64_X0_REGNUM + 6},
97 {"w7", AARCH64_X0_REGNUM + 7},
98 {"w8", AARCH64_X0_REGNUM + 8},
99 {"w9", AARCH64_X0_REGNUM + 9},
100 {"w10", AARCH64_X0_REGNUM + 10},
101 {"w11", AARCH64_X0_REGNUM + 11},
102 {"w12", AARCH64_X0_REGNUM + 12},
103 {"w13", AARCH64_X0_REGNUM + 13},
104 {"w14", AARCH64_X0_REGNUM + 14},
105 {"w15", AARCH64_X0_REGNUM + 15},
106 {"w16", AARCH64_X0_REGNUM + 16},
107 {"w17", AARCH64_X0_REGNUM + 17},
108 {"w18", AARCH64_X0_REGNUM + 18},
109 {"w19", AARCH64_X0_REGNUM + 19},
110 {"w20", AARCH64_X0_REGNUM + 20},
111 {"w21", AARCH64_X0_REGNUM + 21},
112 {"w22", AARCH64_X0_REGNUM + 22},
113 {"w23", AARCH64_X0_REGNUM + 23},
114 {"w24", AARCH64_X0_REGNUM + 24},
115 {"w25", AARCH64_X0_REGNUM + 25},
116 {"w26", AARCH64_X0_REGNUM + 26},
117 {"w27", AARCH64_X0_REGNUM + 27},
118 {"w28", AARCH64_X0_REGNUM + 28},
119 {"w29", AARCH64_X0_REGNUM + 29},
120 {"w30", AARCH64_X0_REGNUM + 30},
121
122 /* specials */
123 {"ip0", AARCH64_X0_REGNUM + 16},
124 {"ip1", AARCH64_X0_REGNUM + 17}
125};
126
127/* The required core 'R' registers. */
128static const char *const aarch64_r_register_names[] =
129{
130 /* These registers must appear in consecutive RAW register number
131 order and they must begin with AARCH64_X0_REGNUM! */
132 "x0", "x1", "x2", "x3",
133 "x4", "x5", "x6", "x7",
134 "x8", "x9", "x10", "x11",
135 "x12", "x13", "x14", "x15",
136 "x16", "x17", "x18", "x19",
137 "x20", "x21", "x22", "x23",
138 "x24", "x25", "x26", "x27",
139 "x28", "x29", "x30", "sp",
140 "pc", "cpsr"
141};
142
143/* The FP/SIMD 'V' registers. */
144static const char *const aarch64_v_register_names[] =
145{
146 /* These registers must appear in consecutive RAW register number
147 order and they must begin with AARCH64_V0_REGNUM! */
148 "v0", "v1", "v2", "v3",
149 "v4", "v5", "v6", "v7",
150 "v8", "v9", "v10", "v11",
151 "v12", "v13", "v14", "v15",
152 "v16", "v17", "v18", "v19",
153 "v20", "v21", "v22", "v23",
154 "v24", "v25", "v26", "v27",
155 "v28", "v29", "v30", "v31",
156 "fpsr",
157 "fpcr"
158};
159
160/* AArch64 prologue cache structure. */
161struct aarch64_prologue_cache
162{
db634143
PL
163 /* The program counter at the start of the function. It is used to
164 identify this frame as a prologue frame. */
165 CORE_ADDR func;
166
167 /* The program counter at the time this frame was created; i.e. where
168 this function was called from. It is used to identify this frame as a
169 stub frame. */
170 CORE_ADDR prev_pc;
171
07b287a0
MS
172 /* The stack pointer at the time this frame was created; i.e. the
173 caller's stack pointer when this function was called. It is used
174 to identify this frame. */
175 CORE_ADDR prev_sp;
176
7dfa3edc
PL
177 /* Is the target available to read from? */
178 int available_p;
179
07b287a0
MS
180 /* The frame base for this frame is just prev_sp - frame size.
181 FRAMESIZE is the distance from the frame pointer to the
182 initial stack pointer. */
183 int framesize;
184
185 /* The register used to hold the frame pointer for this frame. */
186 int framereg;
187
188 /* Saved register offsets. */
189 struct trad_frame_saved_reg *saved_regs;
190};
191
07b287a0
MS
192static void
193show_aarch64_debug (struct ui_file *file, int from_tty,
194 struct cmd_list_element *c, const char *value)
195{
196 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
197}
198
4d9a9006
YQ
199/* Abstract instruction reader. */
200
201class abstract_instruction_reader
202{
203public:
204 /* Read in one instruction. */
205 virtual ULONGEST read (CORE_ADDR memaddr, int len,
206 enum bfd_endian byte_order) = 0;
207};
208
209/* Instruction reader from real target. */
210
211class instruction_reader : public abstract_instruction_reader
212{
213 public:
214 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
215 {
216 return read_memory_unsigned_integer (memaddr, len, byte_order);
217 }
218};
219
07b287a0
MS
220/* Analyze a prologue, looking for a recognizable stack frame
221 and frame pointer. Scan until we encounter a store that could
222 clobber the stack frame unexpectedly, or an unknown instruction. */
223
224static CORE_ADDR
225aarch64_analyze_prologue (struct gdbarch *gdbarch,
226 CORE_ADDR start, CORE_ADDR limit,
4d9a9006
YQ
227 struct aarch64_prologue_cache *cache,
228 abstract_instruction_reader& reader)
07b287a0
MS
229{
230 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
231 int i;
187f5d00
YQ
232 /* Track X registers and D registers in prologue. */
233 pv_t regs[AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT];
07b287a0
MS
234 struct pv_area *stack;
235 struct cleanup *back_to;
236
187f5d00 237 for (i = 0; i < AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT; i++)
07b287a0
MS
238 regs[i] = pv_register (i, 0);
239 stack = make_pv_area (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
240 back_to = make_cleanup_free_pv_area (stack);
241
242 for (; start < limit; start += 4)
243 {
244 uint32_t insn;
d9ebcbce 245 aarch64_inst inst;
07b287a0 246
4d9a9006 247 insn = reader.read (start, 4, byte_order_for_code);
07b287a0 248
d9ebcbce
YQ
249 if (aarch64_decode_insn (insn, &inst, 1) != 0)
250 break;
251
252 if (inst.opcode->iclass == addsub_imm
253 && (inst.opcode->op == OP_ADD
254 || strcmp ("sub", inst.opcode->name) == 0))
07b287a0 255 {
d9ebcbce
YQ
256 unsigned rd = inst.operands[0].reg.regno;
257 unsigned rn = inst.operands[1].reg.regno;
258
259 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
260 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
261 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
262 gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM);
263
264 if (inst.opcode->op == OP_ADD)
265 {
266 regs[rd] = pv_add_constant (regs[rn],
267 inst.operands[2].imm.value);
268 }
269 else
270 {
271 regs[rd] = pv_add_constant (regs[rn],
272 -inst.operands[2].imm.value);
273 }
274 }
275 else if (inst.opcode->iclass == pcreladdr
276 && inst.operands[1].type == AARCH64_OPND_ADDR_ADRP)
277 {
278 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
279 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
280
281 regs[inst.operands[0].reg.regno] = pv_unknown ();
07b287a0 282 }
d9ebcbce 283 else if (inst.opcode->iclass == branch_imm)
07b287a0
MS
284 {
285 /* Stop analysis on branch. */
286 break;
287 }
d9ebcbce 288 else if (inst.opcode->iclass == condbranch)
07b287a0
MS
289 {
290 /* Stop analysis on branch. */
291 break;
292 }
d9ebcbce 293 else if (inst.opcode->iclass == branch_reg)
07b287a0
MS
294 {
295 /* Stop analysis on branch. */
296 break;
297 }
d9ebcbce 298 else if (inst.opcode->iclass == compbranch)
07b287a0
MS
299 {
300 /* Stop analysis on branch. */
301 break;
302 }
d9ebcbce
YQ
303 else if (inst.opcode->op == OP_MOVZ)
304 {
305 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
306 regs[inst.operands[0].reg.regno] = pv_unknown ();
307 }
308 else if (inst.opcode->iclass == log_shift
309 && strcmp (inst.opcode->name, "orr") == 0)
07b287a0 310 {
d9ebcbce
YQ
311 unsigned rd = inst.operands[0].reg.regno;
312 unsigned rn = inst.operands[1].reg.regno;
313 unsigned rm = inst.operands[2].reg.regno;
314
315 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
316 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn);
317 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT);
318
319 if (inst.operands[2].shifter.amount == 0
320 && rn == AARCH64_SP_REGNUM)
07b287a0
MS
321 regs[rd] = regs[rm];
322 else
323 {
324 if (aarch64_debug)
b277c936
PL
325 {
326 debug_printf ("aarch64: prologue analysis gave up "
0a0da556 327 "addr=%s opcode=0x%x (orr x register)\n",
b277c936
PL
328 core_addr_to_string_nz (start), insn);
329 }
07b287a0
MS
330 break;
331 }
332 }
d9ebcbce 333 else if (inst.opcode->op == OP_STUR)
07b287a0 334 {
d9ebcbce
YQ
335 unsigned rt = inst.operands[0].reg.regno;
336 unsigned rn = inst.operands[1].addr.base_regno;
337 int is64
338 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
339
340 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
341 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
342 gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9);
343 gdb_assert (!inst.operands[1].addr.offset.is_reg);
344
345 pv_area_store (stack, pv_add_constant (regs[rn],
346 inst.operands[1].addr.offset.imm),
07b287a0
MS
347 is64 ? 8 : 4, regs[rt]);
348 }
d9ebcbce 349 else if ((inst.opcode->iclass == ldstpair_off
03bcd739
YQ
350 || (inst.opcode->iclass == ldstpair_indexed
351 && inst.operands[2].addr.preind))
d9ebcbce 352 && strcmp ("stp", inst.opcode->name) == 0)
07b287a0 353 {
03bcd739 354 /* STP with addressing mode Pre-indexed and Base register. */
187f5d00
YQ
355 unsigned rt1;
356 unsigned rt2;
d9ebcbce
YQ
357 unsigned rn = inst.operands[2].addr.base_regno;
358 int32_t imm = inst.operands[2].addr.offset.imm;
359
187f5d00
YQ
360 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
361 || inst.operands[0].type == AARCH64_OPND_Ft);
362 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2
363 || inst.operands[1].type == AARCH64_OPND_Ft2);
d9ebcbce
YQ
364 gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7);
365 gdb_assert (!inst.operands[2].addr.offset.is_reg);
366
07b287a0
MS
367 /* If recording this store would invalidate the store area
368 (perhaps because rn is not known) then we should abandon
369 further prologue analysis. */
370 if (pv_area_store_would_trash (stack,
371 pv_add_constant (regs[rn], imm)))
372 break;
373
374 if (pv_area_store_would_trash (stack,
375 pv_add_constant (regs[rn], imm + 8)))
376 break;
377
187f5d00
YQ
378 rt1 = inst.operands[0].reg.regno;
379 rt2 = inst.operands[1].reg.regno;
380 if (inst.operands[0].type == AARCH64_OPND_Ft)
381 {
382 /* Only bottom 64-bit of each V register (D register) need
383 to be preserved. */
384 gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D);
385 rt1 += AARCH64_X_REGISTER_COUNT;
386 rt2 += AARCH64_X_REGISTER_COUNT;
387 }
388
07b287a0
MS
389 pv_area_store (stack, pv_add_constant (regs[rn], imm), 8,
390 regs[rt1]);
391 pv_area_store (stack, pv_add_constant (regs[rn], imm + 8), 8,
392 regs[rt2]);
14ac654f 393
d9ebcbce 394 if (inst.operands[2].addr.writeback)
93d96012 395 regs[rn] = pv_add_constant (regs[rn], imm);
07b287a0 396
07b287a0 397 }
d9ebcbce 398 else if (inst.opcode->iclass == testbranch)
07b287a0
MS
399 {
400 /* Stop analysis on branch. */
401 break;
402 }
403 else
404 {
405 if (aarch64_debug)
b277c936 406 {
0a0da556 407 debug_printf ("aarch64: prologue analysis gave up addr=%s"
b277c936
PL
408 " opcode=0x%x\n",
409 core_addr_to_string_nz (start), insn);
410 }
07b287a0
MS
411 break;
412 }
413 }
414
415 if (cache == NULL)
416 {
417 do_cleanups (back_to);
418 return start;
419 }
420
421 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
422 {
423 /* Frame pointer is fp. Frame size is constant. */
424 cache->framereg = AARCH64_FP_REGNUM;
425 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
426 }
427 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
428 {
429 /* Try the stack pointer. */
430 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
431 cache->framereg = AARCH64_SP_REGNUM;
432 }
433 else
434 {
435 /* We're just out of luck. We don't know where the frame is. */
436 cache->framereg = -1;
437 cache->framesize = 0;
438 }
439
440 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
441 {
442 CORE_ADDR offset;
443
444 if (pv_area_find_reg (stack, gdbarch, i, &offset))
445 cache->saved_regs[i].addr = offset;
446 }
447
187f5d00
YQ
448 for (i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
449 {
450 int regnum = gdbarch_num_regs (gdbarch);
451 CORE_ADDR offset;
452
453 if (pv_area_find_reg (stack, gdbarch, i + AARCH64_X_REGISTER_COUNT,
454 &offset))
455 cache->saved_regs[i + regnum + AARCH64_D0_REGNUM].addr = offset;
456 }
457
07b287a0
MS
458 do_cleanups (back_to);
459 return start;
460}
461
4d9a9006
YQ
462static CORE_ADDR
463aarch64_analyze_prologue (struct gdbarch *gdbarch,
464 CORE_ADDR start, CORE_ADDR limit,
465 struct aarch64_prologue_cache *cache)
466{
467 instruction_reader reader;
468
469 return aarch64_analyze_prologue (gdbarch, start, limit, cache,
470 reader);
471}
472
473#if GDB_SELF_TEST
474
475namespace selftests {
476
477/* Instruction reader from manually cooked instruction sequences. */
478
479class instruction_reader_test : public abstract_instruction_reader
480{
481public:
482 template<size_t SIZE>
483 explicit instruction_reader_test (const uint32_t (&insns)[SIZE])
484 : m_insns (insns), m_insns_size (SIZE)
485 {}
486
487 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
488 {
489 SELF_CHECK (len == 4);
490 SELF_CHECK (memaddr % 4 == 0);
491 SELF_CHECK (memaddr / 4 < m_insns_size);
492
493 return m_insns[memaddr / 4];
494 }
495
496private:
497 const uint32_t *m_insns;
498 size_t m_insns_size;
499};
500
501static void
502aarch64_analyze_prologue_test (void)
503{
504 struct gdbarch_info info;
505
506 gdbarch_info_init (&info);
507 info.bfd_arch_info = bfd_scan_arch ("aarch64");
508
509 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
510 SELF_CHECK (gdbarch != NULL);
511
512 /* Test the simple prologue in which frame pointer is used. */
513 {
514 struct aarch64_prologue_cache cache;
515 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
516
517 static const uint32_t insns[] = {
518 0xa9af7bfd, /* stp x29, x30, [sp,#-272]! */
519 0x910003fd, /* mov x29, sp */
520 0x97ffffe6, /* bl 0x400580 */
521 };
522 instruction_reader_test reader (insns);
523
524 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
525 SELF_CHECK (end == 4 * 2);
526
527 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
528 SELF_CHECK (cache.framesize == 272);
529
530 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
531 {
532 if (i == AARCH64_FP_REGNUM)
533 SELF_CHECK (cache.saved_regs[i].addr == -272);
534 else if (i == AARCH64_LR_REGNUM)
535 SELF_CHECK (cache.saved_regs[i].addr == -264);
536 else
537 SELF_CHECK (cache.saved_regs[i].addr == -1);
538 }
539
540 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
541 {
542 int regnum = gdbarch_num_regs (gdbarch);
543
544 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
545 == -1);
546 }
547 }
548}
549} // namespace selftests
550#endif /* GDB_SELF_TEST */
551
07b287a0
MS
552/* Implement the "skip_prologue" gdbarch method. */
553
554static CORE_ADDR
555aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
556{
07b287a0 557 CORE_ADDR func_addr, limit_pc;
07b287a0
MS
558
559 /* See if we can determine the end of the prologue via the symbol
560 table. If so, then return either PC, or the PC after the
561 prologue, whichever is greater. */
562 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
563 {
564 CORE_ADDR post_prologue_pc
565 = skip_prologue_using_sal (gdbarch, func_addr);
566
567 if (post_prologue_pc != 0)
325fac50 568 return std::max (pc, post_prologue_pc);
07b287a0
MS
569 }
570
571 /* Can't determine prologue from the symbol table, need to examine
572 instructions. */
573
574 /* Find an upper limit on the function prologue using the debug
575 information. If the debug information could not be used to
576 provide that bound, then use an arbitrary large number as the
577 upper bound. */
578 limit_pc = skip_prologue_using_sal (gdbarch, pc);
579 if (limit_pc == 0)
580 limit_pc = pc + 128; /* Magic. */
581
582 /* Try disassembling prologue. */
583 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
584}
585
586/* Scan the function prologue for THIS_FRAME and populate the prologue
587 cache CACHE. */
588
589static void
590aarch64_scan_prologue (struct frame_info *this_frame,
591 struct aarch64_prologue_cache *cache)
592{
593 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
594 CORE_ADDR prologue_start;
595 CORE_ADDR prologue_end;
596 CORE_ADDR prev_pc = get_frame_pc (this_frame);
597 struct gdbarch *gdbarch = get_frame_arch (this_frame);
598
db634143
PL
599 cache->prev_pc = prev_pc;
600
07b287a0
MS
601 /* Assume we do not find a frame. */
602 cache->framereg = -1;
603 cache->framesize = 0;
604
605 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
606 &prologue_end))
607 {
608 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
609
610 if (sal.line == 0)
611 {
612 /* No line info so use the current PC. */
613 prologue_end = prev_pc;
614 }
615 else if (sal.end < prologue_end)
616 {
617 /* The next line begins after the function end. */
618 prologue_end = sal.end;
619 }
620
325fac50 621 prologue_end = std::min (prologue_end, prev_pc);
07b287a0
MS
622 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
623 }
624 else
625 {
626 CORE_ADDR frame_loc;
07b287a0
MS
627
628 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
629 if (frame_loc == 0)
630 return;
631
632 cache->framereg = AARCH64_FP_REGNUM;
633 cache->framesize = 16;
634 cache->saved_regs[29].addr = 0;
635 cache->saved_regs[30].addr = 8;
636 }
637}
638
7dfa3edc
PL
639/* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
640 function may throw an exception if the inferior's registers or memory is
641 not available. */
07b287a0 642
7dfa3edc
PL
643static void
644aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
645 struct aarch64_prologue_cache *cache)
07b287a0 646{
07b287a0
MS
647 CORE_ADDR unwound_fp;
648 int reg;
649
07b287a0
MS
650 aarch64_scan_prologue (this_frame, cache);
651
652 if (cache->framereg == -1)
7dfa3edc 653 return;
07b287a0
MS
654
655 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
656 if (unwound_fp == 0)
7dfa3edc 657 return;
07b287a0
MS
658
659 cache->prev_sp = unwound_fp + cache->framesize;
660
661 /* Calculate actual addresses of saved registers using offsets
662 determined by aarch64_analyze_prologue. */
663 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
664 if (trad_frame_addr_p (cache->saved_regs, reg))
665 cache->saved_regs[reg].addr += cache->prev_sp;
666
db634143
PL
667 cache->func = get_frame_func (this_frame);
668
7dfa3edc
PL
669 cache->available_p = 1;
670}
671
672/* Allocate and fill in *THIS_CACHE with information about the prologue of
673 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
674 Return a pointer to the current aarch64_prologue_cache in
675 *THIS_CACHE. */
676
677static struct aarch64_prologue_cache *
678aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
679{
680 struct aarch64_prologue_cache *cache;
681
682 if (*this_cache != NULL)
9a3c8263 683 return (struct aarch64_prologue_cache *) *this_cache;
7dfa3edc
PL
684
685 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
686 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
687 *this_cache = cache;
688
689 TRY
690 {
691 aarch64_make_prologue_cache_1 (this_frame, cache);
692 }
693 CATCH (ex, RETURN_MASK_ERROR)
694 {
695 if (ex.error != NOT_AVAILABLE_ERROR)
696 throw_exception (ex);
697 }
698 END_CATCH
699
07b287a0
MS
700 return cache;
701}
702
7dfa3edc
PL
703/* Implement the "stop_reason" frame_unwind method. */
704
705static enum unwind_stop_reason
706aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
707 void **this_cache)
708{
709 struct aarch64_prologue_cache *cache
710 = aarch64_make_prologue_cache (this_frame, this_cache);
711
712 if (!cache->available_p)
713 return UNWIND_UNAVAILABLE;
714
715 /* Halt the backtrace at "_start". */
716 if (cache->prev_pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
717 return UNWIND_OUTERMOST;
718
719 /* We've hit a wall, stop. */
720 if (cache->prev_sp == 0)
721 return UNWIND_OUTERMOST;
722
723 return UNWIND_NO_REASON;
724}
725
07b287a0
MS
726/* Our frame ID for a normal frame is the current function's starting
727 PC and the caller's SP when we were called. */
728
729static void
730aarch64_prologue_this_id (struct frame_info *this_frame,
731 void **this_cache, struct frame_id *this_id)
732{
7c8edfae
PL
733 struct aarch64_prologue_cache *cache
734 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0 735
7dfa3edc
PL
736 if (!cache->available_p)
737 *this_id = frame_id_build_unavailable_stack (cache->func);
738 else
739 *this_id = frame_id_build (cache->prev_sp, cache->func);
07b287a0
MS
740}
741
742/* Implement the "prev_register" frame_unwind method. */
743
744static struct value *
745aarch64_prologue_prev_register (struct frame_info *this_frame,
746 void **this_cache, int prev_regnum)
747{
7c8edfae
PL
748 struct aarch64_prologue_cache *cache
749 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0
MS
750
751 /* If we are asked to unwind the PC, then we need to return the LR
752 instead. The prologue may save PC, but it will point into this
753 frame's prologue, not the next frame's resume location. */
754 if (prev_regnum == AARCH64_PC_REGNUM)
755 {
756 CORE_ADDR lr;
757
758 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
759 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
760 }
761
762 /* SP is generally not saved to the stack, but this frame is
763 identified by the next frame's stack pointer at the time of the
764 call. The value was already reconstructed into PREV_SP. */
765 /*
766 +----------+ ^
767 | saved lr | |
768 +->| saved fp |--+
769 | | |
770 | | | <- Previous SP
771 | +----------+
772 | | saved lr |
773 +--| saved fp |<- FP
774 | |
775 | |<- SP
776 +----------+ */
777 if (prev_regnum == AARCH64_SP_REGNUM)
778 return frame_unwind_got_constant (this_frame, prev_regnum,
779 cache->prev_sp);
780
781 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
782 prev_regnum);
783}
784
785/* AArch64 prologue unwinder. */
786struct frame_unwind aarch64_prologue_unwind =
787{
788 NORMAL_FRAME,
7dfa3edc 789 aarch64_prologue_frame_unwind_stop_reason,
07b287a0
MS
790 aarch64_prologue_this_id,
791 aarch64_prologue_prev_register,
792 NULL,
793 default_frame_sniffer
794};
795
8b61f75d
PL
796/* Allocate and fill in *THIS_CACHE with information about the prologue of
797 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
798 Return a pointer to the current aarch64_prologue_cache in
799 *THIS_CACHE. */
07b287a0
MS
800
801static struct aarch64_prologue_cache *
8b61f75d 802aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
07b287a0 803{
07b287a0 804 struct aarch64_prologue_cache *cache;
8b61f75d
PL
805
806 if (*this_cache != NULL)
9a3c8263 807 return (struct aarch64_prologue_cache *) *this_cache;
07b287a0
MS
808
809 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
810 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
8b61f75d 811 *this_cache = cache;
07b287a0 812
02a2a705
PL
813 TRY
814 {
815 cache->prev_sp = get_frame_register_unsigned (this_frame,
816 AARCH64_SP_REGNUM);
817 cache->prev_pc = get_frame_pc (this_frame);
818 cache->available_p = 1;
819 }
820 CATCH (ex, RETURN_MASK_ERROR)
821 {
822 if (ex.error != NOT_AVAILABLE_ERROR)
823 throw_exception (ex);
824 }
825 END_CATCH
07b287a0
MS
826
827 return cache;
828}
829
02a2a705
PL
830/* Implement the "stop_reason" frame_unwind method. */
831
832static enum unwind_stop_reason
833aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
834 void **this_cache)
835{
836 struct aarch64_prologue_cache *cache
837 = aarch64_make_stub_cache (this_frame, this_cache);
838
839 if (!cache->available_p)
840 return UNWIND_UNAVAILABLE;
841
842 return UNWIND_NO_REASON;
843}
844
07b287a0
MS
845/* Our frame ID for a stub frame is the current SP and LR. */
846
847static void
848aarch64_stub_this_id (struct frame_info *this_frame,
849 void **this_cache, struct frame_id *this_id)
850{
8b61f75d
PL
851 struct aarch64_prologue_cache *cache
852 = aarch64_make_stub_cache (this_frame, this_cache);
07b287a0 853
02a2a705
PL
854 if (cache->available_p)
855 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
856 else
857 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
07b287a0
MS
858}
859
860/* Implement the "sniffer" frame_unwind method. */
861
862static int
863aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
864 struct frame_info *this_frame,
865 void **this_prologue_cache)
866{
867 CORE_ADDR addr_in_block;
868 gdb_byte dummy[4];
869
870 addr_in_block = get_frame_address_in_block (this_frame);
3e5d3a5a 871 if (in_plt_section (addr_in_block)
07b287a0
MS
872 /* We also use the stub winder if the target memory is unreadable
873 to avoid having the prologue unwinder trying to read it. */
874 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
875 return 1;
876
877 return 0;
878}
879
880/* AArch64 stub unwinder. */
881struct frame_unwind aarch64_stub_unwind =
882{
883 NORMAL_FRAME,
02a2a705 884 aarch64_stub_frame_unwind_stop_reason,
07b287a0
MS
885 aarch64_stub_this_id,
886 aarch64_prologue_prev_register,
887 NULL,
888 aarch64_stub_unwind_sniffer
889};
890
891/* Return the frame base address of *THIS_FRAME. */
892
893static CORE_ADDR
894aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
895{
7c8edfae
PL
896 struct aarch64_prologue_cache *cache
897 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0
MS
898
899 return cache->prev_sp - cache->framesize;
900}
901
902/* AArch64 default frame base information. */
903struct frame_base aarch64_normal_base =
904{
905 &aarch64_prologue_unwind,
906 aarch64_normal_frame_base,
907 aarch64_normal_frame_base,
908 aarch64_normal_frame_base
909};
910
911/* Assuming THIS_FRAME is a dummy, return the frame ID of that
912 dummy frame. The frame ID's base needs to match the TOS value
913 saved by save_dummy_frame_tos () and returned from
914 aarch64_push_dummy_call, and the PC needs to match the dummy
915 frame's breakpoint. */
916
917static struct frame_id
918aarch64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
919{
920 return frame_id_build (get_frame_register_unsigned (this_frame,
921 AARCH64_SP_REGNUM),
922 get_frame_pc (this_frame));
923}
924
925/* Implement the "unwind_pc" gdbarch method. */
926
927static CORE_ADDR
928aarch64_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
929{
930 CORE_ADDR pc
931 = frame_unwind_register_unsigned (this_frame, AARCH64_PC_REGNUM);
932
933 return pc;
934}
935
936/* Implement the "unwind_sp" gdbarch method. */
937
938static CORE_ADDR
939aarch64_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
940{
941 return frame_unwind_register_unsigned (this_frame, AARCH64_SP_REGNUM);
942}
943
944/* Return the value of the REGNUM register in the previous frame of
945 *THIS_FRAME. */
946
947static struct value *
948aarch64_dwarf2_prev_register (struct frame_info *this_frame,
949 void **this_cache, int regnum)
950{
07b287a0
MS
951 CORE_ADDR lr;
952
953 switch (regnum)
954 {
955 case AARCH64_PC_REGNUM:
956 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
957 return frame_unwind_got_constant (this_frame, regnum, lr);
958
959 default:
960 internal_error (__FILE__, __LINE__,
961 _("Unexpected register %d"), regnum);
962 }
963}
964
965/* Implement the "init_reg" dwarf2_frame_ops method. */
966
967static void
968aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
969 struct dwarf2_frame_state_reg *reg,
970 struct frame_info *this_frame)
971{
972 switch (regnum)
973 {
974 case AARCH64_PC_REGNUM:
975 reg->how = DWARF2_FRAME_REG_FN;
976 reg->loc.fn = aarch64_dwarf2_prev_register;
977 break;
978 case AARCH64_SP_REGNUM:
979 reg->how = DWARF2_FRAME_REG_CFA;
980 break;
981 }
982}
983
984/* When arguments must be pushed onto the stack, they go on in reverse
985 order. The code below implements a FILO (stack) to do this. */
986
987typedef struct
988{
c3c87445
YQ
989 /* Value to pass on stack. It can be NULL if this item is for stack
990 padding. */
7c543f7b 991 const gdb_byte *data;
07b287a0
MS
992
993 /* Size in bytes of value to pass on stack. */
994 int len;
995} stack_item_t;
996
997DEF_VEC_O (stack_item_t);
998
999/* Return the alignment (in bytes) of the given type. */
1000
1001static int
1002aarch64_type_align (struct type *t)
1003{
1004 int n;
1005 int align;
1006 int falign;
1007
1008 t = check_typedef (t);
1009 switch (TYPE_CODE (t))
1010 {
1011 default:
1012 /* Should never happen. */
1013 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
1014 return 4;
1015
1016 case TYPE_CODE_PTR:
1017 case TYPE_CODE_ENUM:
1018 case TYPE_CODE_INT:
1019 case TYPE_CODE_FLT:
1020 case TYPE_CODE_SET:
1021 case TYPE_CODE_RANGE:
1022 case TYPE_CODE_BITSTRING:
1023 case TYPE_CODE_REF:
1024 case TYPE_CODE_CHAR:
1025 case TYPE_CODE_BOOL:
1026 return TYPE_LENGTH (t);
1027
1028 case TYPE_CODE_ARRAY:
238f2452
YQ
1029 if (TYPE_VECTOR (t))
1030 {
1031 /* Use the natural alignment for vector types (the same for
1032 scalar type), but the maximum alignment is 128-bit. */
1033 if (TYPE_LENGTH (t) > 16)
1034 return 16;
1035 else
1036 return TYPE_LENGTH (t);
1037 }
1038 else
1039 return aarch64_type_align (TYPE_TARGET_TYPE (t));
07b287a0
MS
1040 case TYPE_CODE_COMPLEX:
1041 return aarch64_type_align (TYPE_TARGET_TYPE (t));
1042
1043 case TYPE_CODE_STRUCT:
1044 case TYPE_CODE_UNION:
1045 align = 1;
1046 for (n = 0; n < TYPE_NFIELDS (t); n++)
1047 {
1048 falign = aarch64_type_align (TYPE_FIELD_TYPE (t, n));
1049 if (falign > align)
1050 align = falign;
1051 }
1052 return align;
1053 }
1054}
1055
cd635f74
YQ
1056/* Return 1 if *TY is a homogeneous floating-point aggregate or
1057 homogeneous short-vector aggregate as defined in the AAPCS64 ABI
1058 document; otherwise return 0. */
07b287a0
MS
1059
1060static int
cd635f74 1061is_hfa_or_hva (struct type *ty)
07b287a0
MS
1062{
1063 switch (TYPE_CODE (ty))
1064 {
1065 case TYPE_CODE_ARRAY:
1066 {
1067 struct type *target_ty = TYPE_TARGET_TYPE (ty);
238f2452
YQ
1068
1069 if (TYPE_VECTOR (ty))
1070 return 0;
1071
cd635f74
YQ
1072 if (TYPE_LENGTH (ty) <= 4 /* HFA or HVA has at most 4 members. */
1073 && (TYPE_CODE (target_ty) == TYPE_CODE_FLT /* HFA */
1074 || (TYPE_CODE (target_ty) == TYPE_CODE_ARRAY /* HVA */
1075 && TYPE_VECTOR (target_ty))))
07b287a0
MS
1076 return 1;
1077 break;
1078 }
1079
1080 case TYPE_CODE_UNION:
1081 case TYPE_CODE_STRUCT:
1082 {
cd635f74 1083 /* HFA or HVA has at most four members. */
07b287a0
MS
1084 if (TYPE_NFIELDS (ty) > 0 && TYPE_NFIELDS (ty) <= 4)
1085 {
1086 struct type *member0_type;
1087
1088 member0_type = check_typedef (TYPE_FIELD_TYPE (ty, 0));
cd635f74
YQ
1089 if (TYPE_CODE (member0_type) == TYPE_CODE_FLT
1090 || (TYPE_CODE (member0_type) == TYPE_CODE_ARRAY
1091 && TYPE_VECTOR (member0_type)))
07b287a0
MS
1092 {
1093 int i;
1094
1095 for (i = 0; i < TYPE_NFIELDS (ty); i++)
1096 {
1097 struct type *member1_type;
1098
1099 member1_type = check_typedef (TYPE_FIELD_TYPE (ty, i));
1100 if (TYPE_CODE (member0_type) != TYPE_CODE (member1_type)
1101 || (TYPE_LENGTH (member0_type)
1102 != TYPE_LENGTH (member1_type)))
1103 return 0;
1104 }
1105 return 1;
1106 }
1107 }
1108 return 0;
1109 }
1110
1111 default:
1112 break;
1113 }
1114
1115 return 0;
1116}
1117
1118/* AArch64 function call information structure. */
1119struct aarch64_call_info
1120{
1121 /* the current argument number. */
1122 unsigned argnum;
1123
1124 /* The next general purpose register number, equivalent to NGRN as
1125 described in the AArch64 Procedure Call Standard. */
1126 unsigned ngrn;
1127
1128 /* The next SIMD and floating point register number, equivalent to
1129 NSRN as described in the AArch64 Procedure Call Standard. */
1130 unsigned nsrn;
1131
1132 /* The next stacked argument address, equivalent to NSAA as
1133 described in the AArch64 Procedure Call Standard. */
1134 unsigned nsaa;
1135
1136 /* Stack item vector. */
1137 VEC(stack_item_t) *si;
1138};
1139
1140/* Pass a value in a sequence of consecutive X registers. The caller
1141 is responsbile for ensuring sufficient registers are available. */
1142
1143static void
1144pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1145 struct aarch64_call_info *info, struct type *type,
8e80f9d1 1146 struct value *arg)
07b287a0
MS
1147{
1148 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1149 int len = TYPE_LENGTH (type);
1150 enum type_code typecode = TYPE_CODE (type);
1151 int regnum = AARCH64_X0_REGNUM + info->ngrn;
8e80f9d1 1152 const bfd_byte *buf = value_contents (arg);
07b287a0
MS
1153
1154 info->argnum++;
1155
1156 while (len > 0)
1157 {
1158 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1159 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1160 byte_order);
1161
1162
1163 /* Adjust sub-word struct/union args when big-endian. */
1164 if (byte_order == BFD_ENDIAN_BIG
1165 && partial_len < X_REGISTER_SIZE
1166 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1167 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1168
1169 if (aarch64_debug)
b277c936
PL
1170 {
1171 debug_printf ("arg %d in %s = 0x%s\n", info->argnum,
1172 gdbarch_register_name (gdbarch, regnum),
1173 phex (regval, X_REGISTER_SIZE));
1174 }
07b287a0
MS
1175 regcache_cooked_write_unsigned (regcache, regnum, regval);
1176 len -= partial_len;
1177 buf += partial_len;
1178 regnum++;
1179 }
1180}
1181
1182/* Attempt to marshall a value in a V register. Return 1 if
1183 successful, or 0 if insufficient registers are available. This
1184 function, unlike the equivalent pass_in_x() function does not
1185 handle arguments spread across multiple registers. */
1186
1187static int
1188pass_in_v (struct gdbarch *gdbarch,
1189 struct regcache *regcache,
1190 struct aarch64_call_info *info,
0735fddd 1191 int len, const bfd_byte *buf)
07b287a0
MS
1192{
1193 if (info->nsrn < 8)
1194 {
07b287a0 1195 int regnum = AARCH64_V0_REGNUM + info->nsrn;
0735fddd 1196 gdb_byte reg[V_REGISTER_SIZE];
07b287a0
MS
1197
1198 info->argnum++;
1199 info->nsrn++;
1200
0735fddd
YQ
1201 memset (reg, 0, sizeof (reg));
1202 /* PCS C.1, the argument is allocated to the least significant
1203 bits of V register. */
1204 memcpy (reg, buf, len);
1205 regcache_cooked_write (regcache, regnum, reg);
1206
07b287a0 1207 if (aarch64_debug)
b277c936
PL
1208 {
1209 debug_printf ("arg %d in %s\n", info->argnum,
1210 gdbarch_register_name (gdbarch, regnum));
1211 }
07b287a0
MS
1212 return 1;
1213 }
1214 info->nsrn = 8;
1215 return 0;
1216}
1217
1218/* Marshall an argument onto the stack. */
1219
1220static void
1221pass_on_stack (struct aarch64_call_info *info, struct type *type,
8e80f9d1 1222 struct value *arg)
07b287a0 1223{
8e80f9d1 1224 const bfd_byte *buf = value_contents (arg);
07b287a0
MS
1225 int len = TYPE_LENGTH (type);
1226 int align;
1227 stack_item_t item;
1228
1229 info->argnum++;
1230
1231 align = aarch64_type_align (type);
1232
1233 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1234 Natural alignment of the argument's type. */
1235 align = align_up (align, 8);
1236
1237 /* The AArch64 PCS requires at most doubleword alignment. */
1238 if (align > 16)
1239 align = 16;
1240
1241 if (aarch64_debug)
b277c936
PL
1242 {
1243 debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1244 info->nsaa);
1245 }
07b287a0
MS
1246
1247 item.len = len;
1248 item.data = buf;
1249 VEC_safe_push (stack_item_t, info->si, &item);
1250
1251 info->nsaa += len;
1252 if (info->nsaa & (align - 1))
1253 {
1254 /* Push stack alignment padding. */
1255 int pad = align - (info->nsaa & (align - 1));
1256
1257 item.len = pad;
c3c87445 1258 item.data = NULL;
07b287a0
MS
1259
1260 VEC_safe_push (stack_item_t, info->si, &item);
1261 info->nsaa += pad;
1262 }
1263}
1264
1265/* Marshall an argument into a sequence of one or more consecutive X
1266 registers or, if insufficient X registers are available then onto
1267 the stack. */
1268
1269static void
1270pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1271 struct aarch64_call_info *info, struct type *type,
8e80f9d1 1272 struct value *arg)
07b287a0
MS
1273{
1274 int len = TYPE_LENGTH (type);
1275 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1276
1277 /* PCS C.13 - Pass in registers if we have enough spare */
1278 if (info->ngrn + nregs <= 8)
1279 {
8e80f9d1 1280 pass_in_x (gdbarch, regcache, info, type, arg);
07b287a0
MS
1281 info->ngrn += nregs;
1282 }
1283 else
1284 {
1285 info->ngrn = 8;
8e80f9d1 1286 pass_on_stack (info, type, arg);
07b287a0
MS
1287 }
1288}
1289
1290/* Pass a value in a V register, or on the stack if insufficient are
1291 available. */
1292
1293static void
1294pass_in_v_or_stack (struct gdbarch *gdbarch,
1295 struct regcache *regcache,
1296 struct aarch64_call_info *info,
1297 struct type *type,
8e80f9d1 1298 struct value *arg)
07b287a0 1299{
0735fddd
YQ
1300 if (!pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (type),
1301 value_contents (arg)))
8e80f9d1 1302 pass_on_stack (info, type, arg);
07b287a0
MS
1303}
1304
1305/* Implement the "push_dummy_call" gdbarch method. */
1306
1307static CORE_ADDR
1308aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1309 struct regcache *regcache, CORE_ADDR bp_addr,
1310 int nargs,
1311 struct value **args, CORE_ADDR sp, int struct_return,
1312 CORE_ADDR struct_addr)
1313{
07b287a0 1314 int argnum;
07b287a0
MS
1315 struct aarch64_call_info info;
1316 struct type *func_type;
1317 struct type *return_type;
1318 int lang_struct_return;
1319
1320 memset (&info, 0, sizeof (info));
1321
1322 /* We need to know what the type of the called function is in order
1323 to determine the number of named/anonymous arguments for the
1324 actual argument placement, and the return type in order to handle
1325 return value correctly.
1326
1327 The generic code above us views the decision of return in memory
1328 or return in registers as a two stage processes. The language
1329 handler is consulted first and may decide to return in memory (eg
1330 class with copy constructor returned by value), this will cause
1331 the generic code to allocate space AND insert an initial leading
1332 argument.
1333
1334 If the language code does not decide to pass in memory then the
1335 target code is consulted.
1336
1337 If the language code decides to pass in memory we want to move
1338 the pointer inserted as the initial argument from the argument
1339 list and into X8, the conventional AArch64 struct return pointer
1340 register.
1341
1342 This is slightly awkward, ideally the flag "lang_struct_return"
1343 would be passed to the targets implementation of push_dummy_call.
1344 Rather that change the target interface we call the language code
1345 directly ourselves. */
1346
1347 func_type = check_typedef (value_type (function));
1348
1349 /* Dereference function pointer types. */
1350 if (TYPE_CODE (func_type) == TYPE_CODE_PTR)
1351 func_type = TYPE_TARGET_TYPE (func_type);
1352
1353 gdb_assert (TYPE_CODE (func_type) == TYPE_CODE_FUNC
1354 || TYPE_CODE (func_type) == TYPE_CODE_METHOD);
1355
1356 /* If language_pass_by_reference () returned true we will have been
1357 given an additional initial argument, a hidden pointer to the
1358 return slot in memory. */
1359 return_type = TYPE_TARGET_TYPE (func_type);
1360 lang_struct_return = language_pass_by_reference (return_type);
1361
1362 /* Set the return address. For the AArch64, the return breakpoint
1363 is always at BP_ADDR. */
1364 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1365
1366 /* If we were given an initial argument for the return slot because
1367 lang_struct_return was true, lose it. */
1368 if (lang_struct_return)
1369 {
1370 args++;
1371 nargs--;
1372 }
1373
1374 /* The struct_return pointer occupies X8. */
1375 if (struct_return || lang_struct_return)
1376 {
1377 if (aarch64_debug)
b277c936
PL
1378 {
1379 debug_printf ("struct return in %s = 0x%s\n",
1380 gdbarch_register_name (gdbarch,
1381 AARCH64_STRUCT_RETURN_REGNUM),
1382 paddress (gdbarch, struct_addr));
1383 }
07b287a0
MS
1384 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1385 struct_addr);
1386 }
1387
1388 for (argnum = 0; argnum < nargs; argnum++)
1389 {
1390 struct value *arg = args[argnum];
1391 struct type *arg_type;
1392 int len;
1393
1394 arg_type = check_typedef (value_type (arg));
1395 len = TYPE_LENGTH (arg_type);
1396
1397 switch (TYPE_CODE (arg_type))
1398 {
1399 case TYPE_CODE_INT:
1400 case TYPE_CODE_BOOL:
1401 case TYPE_CODE_CHAR:
1402 case TYPE_CODE_RANGE:
1403 case TYPE_CODE_ENUM:
1404 if (len < 4)
1405 {
1406 /* Promote to 32 bit integer. */
1407 if (TYPE_UNSIGNED (arg_type))
1408 arg_type = builtin_type (gdbarch)->builtin_uint32;
1409 else
1410 arg_type = builtin_type (gdbarch)->builtin_int32;
1411 arg = value_cast (arg_type, arg);
1412 }
8e80f9d1 1413 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1414 break;
1415
1416 case TYPE_CODE_COMPLEX:
1417 if (info.nsrn <= 6)
1418 {
1419 const bfd_byte *buf = value_contents (arg);
1420 struct type *target_type =
1421 check_typedef (TYPE_TARGET_TYPE (arg_type));
1422
07b287a0 1423 pass_in_v (gdbarch, regcache, &info,
0735fddd
YQ
1424 TYPE_LENGTH (target_type), buf);
1425 pass_in_v (gdbarch, regcache, &info,
1426 TYPE_LENGTH (target_type),
07b287a0
MS
1427 buf + TYPE_LENGTH (target_type));
1428 }
1429 else
1430 {
1431 info.nsrn = 8;
8e80f9d1 1432 pass_on_stack (&info, arg_type, arg);
07b287a0
MS
1433 }
1434 break;
1435 case TYPE_CODE_FLT:
8e80f9d1 1436 pass_in_v_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1437 break;
1438
1439 case TYPE_CODE_STRUCT:
1440 case TYPE_CODE_ARRAY:
1441 case TYPE_CODE_UNION:
cd635f74 1442 if (is_hfa_or_hva (arg_type))
07b287a0
MS
1443 {
1444 int elements = TYPE_NFIELDS (arg_type);
1445
1446 /* Homogeneous Aggregates */
1447 if (info.nsrn + elements < 8)
1448 {
1449 int i;
1450
1451 for (i = 0; i < elements; i++)
1452 {
1453 /* We know that we have sufficient registers
1454 available therefore this will never fallback
1455 to the stack. */
1456 struct value *field =
1457 value_primitive_field (arg, 0, i, arg_type);
1458 struct type *field_type =
1459 check_typedef (value_type (field));
1460
8e80f9d1
YQ
1461 pass_in_v_or_stack (gdbarch, regcache, &info,
1462 field_type, field);
07b287a0
MS
1463 }
1464 }
1465 else
1466 {
1467 info.nsrn = 8;
8e80f9d1 1468 pass_on_stack (&info, arg_type, arg);
07b287a0
MS
1469 }
1470 }
238f2452
YQ
1471 else if (TYPE_CODE (arg_type) == TYPE_CODE_ARRAY
1472 && TYPE_VECTOR (arg_type) && (len == 16 || len == 8))
1473 {
1474 /* Short vector types are passed in V registers. */
1475 pass_in_v_or_stack (gdbarch, regcache, &info, arg_type, arg);
1476 }
07b287a0
MS
1477 else if (len > 16)
1478 {
1479 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1480 invisible reference. */
1481
1482 /* Allocate aligned storage. */
1483 sp = align_down (sp - len, 16);
1484
1485 /* Write the real data into the stack. */
1486 write_memory (sp, value_contents (arg), len);
1487
1488 /* Construct the indirection. */
1489 arg_type = lookup_pointer_type (arg_type);
1490 arg = value_from_pointer (arg_type, sp);
8e80f9d1 1491 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1492 }
1493 else
1494 /* PCS C.15 / C.18 multiple values pass. */
8e80f9d1 1495 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1496 break;
1497
1498 default:
8e80f9d1 1499 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1500 break;
1501 }
1502 }
1503
1504 /* Make sure stack retains 16 byte alignment. */
1505 if (info.nsaa & 15)
1506 sp -= 16 - (info.nsaa & 15);
1507
1508 while (!VEC_empty (stack_item_t, info.si))
1509 {
1510 stack_item_t *si = VEC_last (stack_item_t, info.si);
1511
1512 sp -= si->len;
c3c87445
YQ
1513 if (si->data != NULL)
1514 write_memory (sp, si->data, si->len);
07b287a0
MS
1515 VEC_pop (stack_item_t, info.si);
1516 }
1517
1518 VEC_free (stack_item_t, info.si);
1519
1520 /* Finally, update the SP register. */
1521 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1522
1523 return sp;
1524}
1525
1526/* Implement the "frame_align" gdbarch method. */
1527
1528static CORE_ADDR
1529aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1530{
1531 /* Align the stack to sixteen bytes. */
1532 return sp & ~(CORE_ADDR) 15;
1533}
1534
1535/* Return the type for an AdvSISD Q register. */
1536
1537static struct type *
1538aarch64_vnq_type (struct gdbarch *gdbarch)
1539{
1540 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1541
1542 if (tdep->vnq_type == NULL)
1543 {
1544 struct type *t;
1545 struct type *elem;
1546
1547 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1548 TYPE_CODE_UNION);
1549
1550 elem = builtin_type (gdbarch)->builtin_uint128;
1551 append_composite_type_field (t, "u", elem);
1552
1553 elem = builtin_type (gdbarch)->builtin_int128;
1554 append_composite_type_field (t, "s", elem);
1555
1556 tdep->vnq_type = t;
1557 }
1558
1559 return tdep->vnq_type;
1560}
1561
1562/* Return the type for an AdvSISD D register. */
1563
1564static struct type *
1565aarch64_vnd_type (struct gdbarch *gdbarch)
1566{
1567 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1568
1569 if (tdep->vnd_type == NULL)
1570 {
1571 struct type *t;
1572 struct type *elem;
1573
1574 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1575 TYPE_CODE_UNION);
1576
1577 elem = builtin_type (gdbarch)->builtin_double;
1578 append_composite_type_field (t, "f", elem);
1579
1580 elem = builtin_type (gdbarch)->builtin_uint64;
1581 append_composite_type_field (t, "u", elem);
1582
1583 elem = builtin_type (gdbarch)->builtin_int64;
1584 append_composite_type_field (t, "s", elem);
1585
1586 tdep->vnd_type = t;
1587 }
1588
1589 return tdep->vnd_type;
1590}
1591
1592/* Return the type for an AdvSISD S register. */
1593
1594static struct type *
1595aarch64_vns_type (struct gdbarch *gdbarch)
1596{
1597 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1598
1599 if (tdep->vns_type == NULL)
1600 {
1601 struct type *t;
1602 struct type *elem;
1603
1604 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1605 TYPE_CODE_UNION);
1606
1607 elem = builtin_type (gdbarch)->builtin_float;
1608 append_composite_type_field (t, "f", elem);
1609
1610 elem = builtin_type (gdbarch)->builtin_uint32;
1611 append_composite_type_field (t, "u", elem);
1612
1613 elem = builtin_type (gdbarch)->builtin_int32;
1614 append_composite_type_field (t, "s", elem);
1615
1616 tdep->vns_type = t;
1617 }
1618
1619 return tdep->vns_type;
1620}
1621
1622/* Return the type for an AdvSISD H register. */
1623
1624static struct type *
1625aarch64_vnh_type (struct gdbarch *gdbarch)
1626{
1627 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1628
1629 if (tdep->vnh_type == NULL)
1630 {
1631 struct type *t;
1632 struct type *elem;
1633
1634 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1635 TYPE_CODE_UNION);
1636
1637 elem = builtin_type (gdbarch)->builtin_uint16;
1638 append_composite_type_field (t, "u", elem);
1639
1640 elem = builtin_type (gdbarch)->builtin_int16;
1641 append_composite_type_field (t, "s", elem);
1642
1643 tdep->vnh_type = t;
1644 }
1645
1646 return tdep->vnh_type;
1647}
1648
1649/* Return the type for an AdvSISD B register. */
1650
1651static struct type *
1652aarch64_vnb_type (struct gdbarch *gdbarch)
1653{
1654 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1655
1656 if (tdep->vnb_type == NULL)
1657 {
1658 struct type *t;
1659 struct type *elem;
1660
1661 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1662 TYPE_CODE_UNION);
1663
1664 elem = builtin_type (gdbarch)->builtin_uint8;
1665 append_composite_type_field (t, "u", elem);
1666
1667 elem = builtin_type (gdbarch)->builtin_int8;
1668 append_composite_type_field (t, "s", elem);
1669
1670 tdep->vnb_type = t;
1671 }
1672
1673 return tdep->vnb_type;
1674}
1675
1676/* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
1677
1678static int
1679aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
1680{
1681 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
1682 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
1683
1684 if (reg == AARCH64_DWARF_SP)
1685 return AARCH64_SP_REGNUM;
1686
1687 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
1688 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
1689
1690 return -1;
1691}
1692\f
1693
1694/* Implement the "print_insn" gdbarch method. */
1695
1696static int
1697aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
1698{
1699 info->symbols = NULL;
1700 return print_insn_aarch64 (memaddr, info);
1701}
1702
1703/* AArch64 BRK software debug mode instruction.
1704 Note that AArch64 code is always little-endian.
1705 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
04180708 1706constexpr gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
07b287a0 1707
04180708 1708typedef BP_MANIPULATION (aarch64_default_breakpoint) aarch64_breakpoint;
07b287a0
MS
1709
1710/* Extract from an array REGS containing the (raw) register state a
1711 function return value of type TYPE, and copy that, in virtual
1712 format, into VALBUF. */
1713
1714static void
1715aarch64_extract_return_value (struct type *type, struct regcache *regs,
1716 gdb_byte *valbuf)
1717{
1718 struct gdbarch *gdbarch = get_regcache_arch (regs);
1719 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1720
1721 if (TYPE_CODE (type) == TYPE_CODE_FLT)
1722 {
1723 bfd_byte buf[V_REGISTER_SIZE];
1724 int len = TYPE_LENGTH (type);
1725
1726 regcache_cooked_read (regs, AARCH64_V0_REGNUM, buf);
1727 memcpy (valbuf, buf, len);
1728 }
1729 else if (TYPE_CODE (type) == TYPE_CODE_INT
1730 || TYPE_CODE (type) == TYPE_CODE_CHAR
1731 || TYPE_CODE (type) == TYPE_CODE_BOOL
1732 || TYPE_CODE (type) == TYPE_CODE_PTR
1733 || TYPE_CODE (type) == TYPE_CODE_REF
1734 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1735 {
1736 /* If the the type is a plain integer, then the access is
1737 straight-forward. Otherwise we have to play around a bit
1738 more. */
1739 int len = TYPE_LENGTH (type);
1740 int regno = AARCH64_X0_REGNUM;
1741 ULONGEST tmp;
1742
1743 while (len > 0)
1744 {
1745 /* By using store_unsigned_integer we avoid having to do
1746 anything special for small big-endian values. */
1747 regcache_cooked_read_unsigned (regs, regno++, &tmp);
1748 store_unsigned_integer (valbuf,
1749 (len > X_REGISTER_SIZE
1750 ? X_REGISTER_SIZE : len), byte_order, tmp);
1751 len -= X_REGISTER_SIZE;
1752 valbuf += X_REGISTER_SIZE;
1753 }
1754 }
1755 else if (TYPE_CODE (type) == TYPE_CODE_COMPLEX)
1756 {
1757 int regno = AARCH64_V0_REGNUM;
1758 bfd_byte buf[V_REGISTER_SIZE];
1759 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
1760 int len = TYPE_LENGTH (target_type);
1761
1762 regcache_cooked_read (regs, regno, buf);
1763 memcpy (valbuf, buf, len);
1764 valbuf += len;
1765 regcache_cooked_read (regs, regno + 1, buf);
1766 memcpy (valbuf, buf, len);
1767 valbuf += len;
1768 }
cd635f74 1769 else if (is_hfa_or_hva (type))
07b287a0
MS
1770 {
1771 int elements = TYPE_NFIELDS (type);
1772 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
1773 int len = TYPE_LENGTH (member_type);
1774 int i;
1775
1776 for (i = 0; i < elements; i++)
1777 {
1778 int regno = AARCH64_V0_REGNUM + i;
db3516bb 1779 bfd_byte buf[V_REGISTER_SIZE];
07b287a0
MS
1780
1781 if (aarch64_debug)
b277c936 1782 {
cd635f74 1783 debug_printf ("read HFA or HVA return value element %d from %s\n",
b277c936
PL
1784 i + 1,
1785 gdbarch_register_name (gdbarch, regno));
1786 }
07b287a0
MS
1787 regcache_cooked_read (regs, regno, buf);
1788
1789 memcpy (valbuf, buf, len);
1790 valbuf += len;
1791 }
1792 }
238f2452
YQ
1793 else if (TYPE_CODE (type) == TYPE_CODE_ARRAY && TYPE_VECTOR (type)
1794 && (TYPE_LENGTH (type) == 16 || TYPE_LENGTH (type) == 8))
1795 {
1796 /* Short vector is returned in V register. */
1797 gdb_byte buf[V_REGISTER_SIZE];
1798
1799 regcache_cooked_read (regs, AARCH64_V0_REGNUM, buf);
1800 memcpy (valbuf, buf, TYPE_LENGTH (type));
1801 }
07b287a0
MS
1802 else
1803 {
1804 /* For a structure or union the behaviour is as if the value had
1805 been stored to word-aligned memory and then loaded into
1806 registers with 64-bit load instruction(s). */
1807 int len = TYPE_LENGTH (type);
1808 int regno = AARCH64_X0_REGNUM;
1809 bfd_byte buf[X_REGISTER_SIZE];
1810
1811 while (len > 0)
1812 {
1813 regcache_cooked_read (regs, regno++, buf);
1814 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
1815 len -= X_REGISTER_SIZE;
1816 valbuf += X_REGISTER_SIZE;
1817 }
1818 }
1819}
1820
1821
1822/* Will a function return an aggregate type in memory or in a
1823 register? Return 0 if an aggregate type can be returned in a
1824 register, 1 if it must be returned in memory. */
1825
1826static int
1827aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
1828{
f168693b 1829 type = check_typedef (type);
07b287a0 1830
cd635f74 1831 if (is_hfa_or_hva (type))
07b287a0 1832 {
cd635f74
YQ
1833 /* v0-v7 are used to return values and one register is allocated
1834 for one member. However, HFA or HVA has at most four members. */
07b287a0
MS
1835 return 0;
1836 }
1837
1838 if (TYPE_LENGTH (type) > 16)
1839 {
1840 /* PCS B.6 Aggregates larger than 16 bytes are passed by
1841 invisible reference. */
1842
1843 return 1;
1844 }
1845
1846 return 0;
1847}
1848
1849/* Write into appropriate registers a function return value of type
1850 TYPE, given in virtual format. */
1851
1852static void
1853aarch64_store_return_value (struct type *type, struct regcache *regs,
1854 const gdb_byte *valbuf)
1855{
1856 struct gdbarch *gdbarch = get_regcache_arch (regs);
1857 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1858
1859 if (TYPE_CODE (type) == TYPE_CODE_FLT)
1860 {
1861 bfd_byte buf[V_REGISTER_SIZE];
1862 int len = TYPE_LENGTH (type);
1863
1864 memcpy (buf, valbuf, len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
1865 regcache_cooked_write (regs, AARCH64_V0_REGNUM, buf);
1866 }
1867 else if (TYPE_CODE (type) == TYPE_CODE_INT
1868 || TYPE_CODE (type) == TYPE_CODE_CHAR
1869 || TYPE_CODE (type) == TYPE_CODE_BOOL
1870 || TYPE_CODE (type) == TYPE_CODE_PTR
1871 || TYPE_CODE (type) == TYPE_CODE_REF
1872 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1873 {
1874 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
1875 {
1876 /* Values of one word or less are zero/sign-extended and
1877 returned in r0. */
1878 bfd_byte tmpbuf[X_REGISTER_SIZE];
1879 LONGEST val = unpack_long (type, valbuf);
1880
1881 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
1882 regcache_cooked_write (regs, AARCH64_X0_REGNUM, tmpbuf);
1883 }
1884 else
1885 {
1886 /* Integral values greater than one word are stored in
1887 consecutive registers starting with r0. This will always
1888 be a multiple of the regiser size. */
1889 int len = TYPE_LENGTH (type);
1890 int regno = AARCH64_X0_REGNUM;
1891
1892 while (len > 0)
1893 {
1894 regcache_cooked_write (regs, regno++, valbuf);
1895 len -= X_REGISTER_SIZE;
1896 valbuf += X_REGISTER_SIZE;
1897 }
1898 }
1899 }
cd635f74 1900 else if (is_hfa_or_hva (type))
07b287a0
MS
1901 {
1902 int elements = TYPE_NFIELDS (type);
1903 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
1904 int len = TYPE_LENGTH (member_type);
1905 int i;
1906
1907 for (i = 0; i < elements; i++)
1908 {
1909 int regno = AARCH64_V0_REGNUM + i;
1910 bfd_byte tmpbuf[MAX_REGISTER_SIZE];
1911
1912 if (aarch64_debug)
b277c936 1913 {
cd635f74 1914 debug_printf ("write HFA or HVA return value element %d to %s\n",
b277c936
PL
1915 i + 1,
1916 gdbarch_register_name (gdbarch, regno));
1917 }
07b287a0
MS
1918
1919 memcpy (tmpbuf, valbuf, len);
1920 regcache_cooked_write (regs, regno, tmpbuf);
1921 valbuf += len;
1922 }
1923 }
238f2452
YQ
1924 else if (TYPE_CODE (type) == TYPE_CODE_ARRAY && TYPE_VECTOR (type)
1925 && (TYPE_LENGTH (type) == 8 || TYPE_LENGTH (type) == 16))
1926 {
1927 /* Short vector. */
1928 gdb_byte buf[V_REGISTER_SIZE];
1929
1930 memcpy (buf, valbuf, TYPE_LENGTH (type));
1931 regcache_cooked_write (regs, AARCH64_V0_REGNUM, buf);
1932 }
07b287a0
MS
1933 else
1934 {
1935 /* For a structure or union the behaviour is as if the value had
1936 been stored to word-aligned memory and then loaded into
1937 registers with 64-bit load instruction(s). */
1938 int len = TYPE_LENGTH (type);
1939 int regno = AARCH64_X0_REGNUM;
1940 bfd_byte tmpbuf[X_REGISTER_SIZE];
1941
1942 while (len > 0)
1943 {
1944 memcpy (tmpbuf, valbuf,
1945 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
1946 regcache_cooked_write (regs, regno++, tmpbuf);
1947 len -= X_REGISTER_SIZE;
1948 valbuf += X_REGISTER_SIZE;
1949 }
1950 }
1951}
1952
1953/* Implement the "return_value" gdbarch method. */
1954
1955static enum return_value_convention
1956aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
1957 struct type *valtype, struct regcache *regcache,
1958 gdb_byte *readbuf, const gdb_byte *writebuf)
1959{
07b287a0
MS
1960
1961 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
1962 || TYPE_CODE (valtype) == TYPE_CODE_UNION
1963 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
1964 {
1965 if (aarch64_return_in_memory (gdbarch, valtype))
1966 {
1967 if (aarch64_debug)
b277c936 1968 debug_printf ("return value in memory\n");
07b287a0
MS
1969 return RETURN_VALUE_STRUCT_CONVENTION;
1970 }
1971 }
1972
1973 if (writebuf)
1974 aarch64_store_return_value (valtype, regcache, writebuf);
1975
1976 if (readbuf)
1977 aarch64_extract_return_value (valtype, regcache, readbuf);
1978
1979 if (aarch64_debug)
b277c936 1980 debug_printf ("return value in registers\n");
07b287a0
MS
1981
1982 return RETURN_VALUE_REGISTER_CONVENTION;
1983}
1984
1985/* Implement the "get_longjmp_target" gdbarch method. */
1986
1987static int
1988aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
1989{
1990 CORE_ADDR jb_addr;
1991 gdb_byte buf[X_REGISTER_SIZE];
1992 struct gdbarch *gdbarch = get_frame_arch (frame);
1993 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1994 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1995
1996 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
1997
1998 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
1999 X_REGISTER_SIZE))
2000 return 0;
2001
2002 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2003 return 1;
2004}
ea873d8e
PL
2005
2006/* Implement the "gen_return_address" gdbarch method. */
2007
2008static void
2009aarch64_gen_return_address (struct gdbarch *gdbarch,
2010 struct agent_expr *ax, struct axs_value *value,
2011 CORE_ADDR scope)
2012{
2013 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
2014 value->kind = axs_lvalue_register;
2015 value->u.reg = AARCH64_LR_REGNUM;
2016}
07b287a0
MS
2017\f
2018
2019/* Return the pseudo register name corresponding to register regnum. */
2020
2021static const char *
2022aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2023{
2024 static const char *const q_name[] =
2025 {
2026 "q0", "q1", "q2", "q3",
2027 "q4", "q5", "q6", "q7",
2028 "q8", "q9", "q10", "q11",
2029 "q12", "q13", "q14", "q15",
2030 "q16", "q17", "q18", "q19",
2031 "q20", "q21", "q22", "q23",
2032 "q24", "q25", "q26", "q27",
2033 "q28", "q29", "q30", "q31",
2034 };
2035
2036 static const char *const d_name[] =
2037 {
2038 "d0", "d1", "d2", "d3",
2039 "d4", "d5", "d6", "d7",
2040 "d8", "d9", "d10", "d11",
2041 "d12", "d13", "d14", "d15",
2042 "d16", "d17", "d18", "d19",
2043 "d20", "d21", "d22", "d23",
2044 "d24", "d25", "d26", "d27",
2045 "d28", "d29", "d30", "d31",
2046 };
2047
2048 static const char *const s_name[] =
2049 {
2050 "s0", "s1", "s2", "s3",
2051 "s4", "s5", "s6", "s7",
2052 "s8", "s9", "s10", "s11",
2053 "s12", "s13", "s14", "s15",
2054 "s16", "s17", "s18", "s19",
2055 "s20", "s21", "s22", "s23",
2056 "s24", "s25", "s26", "s27",
2057 "s28", "s29", "s30", "s31",
2058 };
2059
2060 static const char *const h_name[] =
2061 {
2062 "h0", "h1", "h2", "h3",
2063 "h4", "h5", "h6", "h7",
2064 "h8", "h9", "h10", "h11",
2065 "h12", "h13", "h14", "h15",
2066 "h16", "h17", "h18", "h19",
2067 "h20", "h21", "h22", "h23",
2068 "h24", "h25", "h26", "h27",
2069 "h28", "h29", "h30", "h31",
2070 };
2071
2072 static const char *const b_name[] =
2073 {
2074 "b0", "b1", "b2", "b3",
2075 "b4", "b5", "b6", "b7",
2076 "b8", "b9", "b10", "b11",
2077 "b12", "b13", "b14", "b15",
2078 "b16", "b17", "b18", "b19",
2079 "b20", "b21", "b22", "b23",
2080 "b24", "b25", "b26", "b27",
2081 "b28", "b29", "b30", "b31",
2082 };
2083
2084 regnum -= gdbarch_num_regs (gdbarch);
2085
2086 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2087 return q_name[regnum - AARCH64_Q0_REGNUM];
2088
2089 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2090 return d_name[regnum - AARCH64_D0_REGNUM];
2091
2092 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2093 return s_name[regnum - AARCH64_S0_REGNUM];
2094
2095 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2096 return h_name[regnum - AARCH64_H0_REGNUM];
2097
2098 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2099 return b_name[regnum - AARCH64_B0_REGNUM];
2100
2101 internal_error (__FILE__, __LINE__,
2102 _("aarch64_pseudo_register_name: bad register number %d"),
2103 regnum);
2104}
2105
2106/* Implement the "pseudo_register_type" tdesc_arch_data method. */
2107
2108static struct type *
2109aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2110{
2111 regnum -= gdbarch_num_regs (gdbarch);
2112
2113 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2114 return aarch64_vnq_type (gdbarch);
2115
2116 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2117 return aarch64_vnd_type (gdbarch);
2118
2119 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2120 return aarch64_vns_type (gdbarch);
2121
2122 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2123 return aarch64_vnh_type (gdbarch);
2124
2125 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2126 return aarch64_vnb_type (gdbarch);
2127
2128 internal_error (__FILE__, __LINE__,
2129 _("aarch64_pseudo_register_type: bad register number %d"),
2130 regnum);
2131}
2132
2133/* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2134
2135static int
2136aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2137 struct reggroup *group)
2138{
2139 regnum -= gdbarch_num_regs (gdbarch);
2140
2141 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2142 return group == all_reggroup || group == vector_reggroup;
2143 else if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2144 return (group == all_reggroup || group == vector_reggroup
2145 || group == float_reggroup);
2146 else if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2147 return (group == all_reggroup || group == vector_reggroup
2148 || group == float_reggroup);
2149 else if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2150 return group == all_reggroup || group == vector_reggroup;
2151 else if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2152 return group == all_reggroup || group == vector_reggroup;
2153
2154 return group == all_reggroup;
2155}
2156
2157/* Implement the "pseudo_register_read_value" gdbarch method. */
2158
2159static struct value *
2160aarch64_pseudo_read_value (struct gdbarch *gdbarch,
2161 struct regcache *regcache,
2162 int regnum)
2163{
2164 gdb_byte reg_buf[MAX_REGISTER_SIZE];
2165 struct value *result_value;
2166 gdb_byte *buf;
2167
2168 result_value = allocate_value (register_type (gdbarch, regnum));
2169 VALUE_LVAL (result_value) = lval_register;
2170 VALUE_REGNUM (result_value) = regnum;
2171 buf = value_contents_raw (result_value);
2172
2173 regnum -= gdbarch_num_regs (gdbarch);
2174
2175 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2176 {
2177 enum register_status status;
2178 unsigned v_regnum;
2179
2180 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2181 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2182 if (status != REG_VALID)
2183 mark_value_bytes_unavailable (result_value, 0,
2184 TYPE_LENGTH (value_type (result_value)));
2185 else
2186 memcpy (buf, reg_buf, Q_REGISTER_SIZE);
2187 return result_value;
2188 }
2189
2190 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2191 {
2192 enum register_status status;
2193 unsigned v_regnum;
2194
2195 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2196 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2197 if (status != REG_VALID)
2198 mark_value_bytes_unavailable (result_value, 0,
2199 TYPE_LENGTH (value_type (result_value)));
2200 else
2201 memcpy (buf, reg_buf, D_REGISTER_SIZE);
2202 return result_value;
2203 }
2204
2205 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2206 {
2207 enum register_status status;
2208 unsigned v_regnum;
2209
2210 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2211 status = regcache_raw_read (regcache, v_regnum, reg_buf);
4bcddace
PL
2212 if (status != REG_VALID)
2213 mark_value_bytes_unavailable (result_value, 0,
2214 TYPE_LENGTH (value_type (result_value)));
2215 else
2216 memcpy (buf, reg_buf, S_REGISTER_SIZE);
07b287a0
MS
2217 return result_value;
2218 }
2219
2220 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2221 {
2222 enum register_status status;
2223 unsigned v_regnum;
2224
2225 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2226 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2227 if (status != REG_VALID)
2228 mark_value_bytes_unavailable (result_value, 0,
2229 TYPE_LENGTH (value_type (result_value)));
2230 else
2231 memcpy (buf, reg_buf, H_REGISTER_SIZE);
2232 return result_value;
2233 }
2234
2235 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2236 {
2237 enum register_status status;
2238 unsigned v_regnum;
2239
2240 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2241 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2242 if (status != REG_VALID)
2243 mark_value_bytes_unavailable (result_value, 0,
2244 TYPE_LENGTH (value_type (result_value)));
2245 else
2246 memcpy (buf, reg_buf, B_REGISTER_SIZE);
2247 return result_value;
2248 }
2249
2250 gdb_assert_not_reached ("regnum out of bound");
2251}
2252
2253/* Implement the "pseudo_register_write" gdbarch method. */
2254
2255static void
2256aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2257 int regnum, const gdb_byte *buf)
2258{
2259 gdb_byte reg_buf[MAX_REGISTER_SIZE];
2260
2261 /* Ensure the register buffer is zero, we want gdb writes of the
2262 various 'scalar' pseudo registers to behavior like architectural
2263 writes, register width bytes are written the remainder are set to
2264 zero. */
2265 memset (reg_buf, 0, sizeof (reg_buf));
2266
2267 regnum -= gdbarch_num_regs (gdbarch);
2268
2269 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2270 {
2271 /* pseudo Q registers */
2272 unsigned v_regnum;
2273
2274 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2275 memcpy (reg_buf, buf, Q_REGISTER_SIZE);
2276 regcache_raw_write (regcache, v_regnum, reg_buf);
2277 return;
2278 }
2279
2280 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2281 {
2282 /* pseudo D registers */
2283 unsigned v_regnum;
2284
2285 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2286 memcpy (reg_buf, buf, D_REGISTER_SIZE);
2287 regcache_raw_write (regcache, v_regnum, reg_buf);
2288 return;
2289 }
2290
2291 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2292 {
2293 unsigned v_regnum;
2294
2295 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2296 memcpy (reg_buf, buf, S_REGISTER_SIZE);
2297 regcache_raw_write (regcache, v_regnum, reg_buf);
2298 return;
2299 }
2300
2301 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2302 {
2303 /* pseudo H registers */
2304 unsigned v_regnum;
2305
2306 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2307 memcpy (reg_buf, buf, H_REGISTER_SIZE);
2308 regcache_raw_write (regcache, v_regnum, reg_buf);
2309 return;
2310 }
2311
2312 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2313 {
2314 /* pseudo B registers */
2315 unsigned v_regnum;
2316
2317 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2318 memcpy (reg_buf, buf, B_REGISTER_SIZE);
2319 regcache_raw_write (regcache, v_regnum, reg_buf);
2320 return;
2321 }
2322
2323 gdb_assert_not_reached ("regnum out of bound");
2324}
2325
07b287a0
MS
2326/* Callback function for user_reg_add. */
2327
2328static struct value *
2329value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2330{
9a3c8263 2331 const int *reg_p = (const int *) baton;
07b287a0
MS
2332
2333 return value_of_register (*reg_p, frame);
2334}
2335\f
2336
9404b58f
KM
2337/* Implement the "software_single_step" gdbarch method, needed to
2338 single step through atomic sequences on AArch64. */
2339
93f9a11f 2340static VEC (CORE_ADDR) *
f5ea389a 2341aarch64_software_single_step (struct regcache *regcache)
9404b58f 2342{
0187a92f 2343 struct gdbarch *gdbarch = get_regcache_arch (regcache);
9404b58f
KM
2344 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2345 const int insn_size = 4;
2346 const int atomic_sequence_length = 16; /* Instruction sequence length. */
0187a92f 2347 CORE_ADDR pc = regcache_read_pc (regcache);
9404b58f
KM
2348 CORE_ADDR breaks[2] = { -1, -1 };
2349 CORE_ADDR loc = pc;
2350 CORE_ADDR closing_insn = 0;
2351 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2352 byte_order_for_code);
2353 int index;
2354 int insn_count;
2355 int bc_insn_count = 0; /* Conditional branch instruction count. */
2356 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
f77ee802 2357 aarch64_inst inst;
93f9a11f 2358 VEC (CORE_ADDR) *next_pcs = NULL;
f77ee802 2359
43cdf5ae 2360 if (aarch64_decode_insn (insn, &inst, 1) != 0)
93f9a11f 2361 return NULL;
9404b58f
KM
2362
2363 /* Look for a Load Exclusive instruction which begins the sequence. */
f77ee802 2364 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
93f9a11f 2365 return NULL;
9404b58f
KM
2366
2367 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2368 {
9404b58f
KM
2369 loc += insn_size;
2370 insn = read_memory_unsigned_integer (loc, insn_size,
2371 byte_order_for_code);
2372
43cdf5ae 2373 if (aarch64_decode_insn (insn, &inst, 1) != 0)
93f9a11f 2374 return NULL;
9404b58f 2375 /* Check if the instruction is a conditional branch. */
f77ee802 2376 if (inst.opcode->iclass == condbranch)
9404b58f 2377 {
f77ee802
YQ
2378 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
2379
9404b58f 2380 if (bc_insn_count >= 1)
93f9a11f 2381 return NULL;
9404b58f
KM
2382
2383 /* It is, so we'll try to set a breakpoint at the destination. */
f77ee802 2384 breaks[1] = loc + inst.operands[0].imm.value;
9404b58f
KM
2385
2386 bc_insn_count++;
2387 last_breakpoint++;
2388 }
2389
2390 /* Look for the Store Exclusive which closes the atomic sequence. */
f77ee802 2391 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
9404b58f
KM
2392 {
2393 closing_insn = loc;
2394 break;
2395 }
2396 }
2397
2398 /* We didn't find a closing Store Exclusive instruction, fall back. */
2399 if (!closing_insn)
93f9a11f 2400 return NULL;
9404b58f
KM
2401
2402 /* Insert breakpoint after the end of the atomic sequence. */
2403 breaks[0] = loc + insn_size;
2404
2405 /* Check for duplicated breakpoints, and also check that the second
2406 breakpoint is not within the atomic sequence. */
2407 if (last_breakpoint
2408 && (breaks[1] == breaks[0]
2409 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2410 last_breakpoint = 0;
2411
2412 /* Insert the breakpoint at the end of the sequence, and one at the
2413 destination of the conditional branch, if it exists. */
2414 for (index = 0; index <= last_breakpoint; index++)
93f9a11f 2415 VEC_safe_push (CORE_ADDR, next_pcs, breaks[index]);
9404b58f 2416
93f9a11f 2417 return next_pcs;
9404b58f
KM
2418}
2419
b6542f81
YQ
2420struct displaced_step_closure
2421{
2422 /* It is true when condition instruction, such as B.CON, TBZ, etc,
2423 is being displaced stepping. */
2424 int cond;
2425
2426 /* PC adjustment offset after displaced stepping. */
2427 int32_t pc_adjust;
2428};
2429
2430/* Data when visiting instructions for displaced stepping. */
2431
2432struct aarch64_displaced_step_data
2433{
2434 struct aarch64_insn_data base;
2435
2436 /* The address where the instruction will be executed at. */
2437 CORE_ADDR new_addr;
2438 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
2439 uint32_t insn_buf[DISPLACED_MODIFIED_INSNS];
2440 /* Number of instructions in INSN_BUF. */
2441 unsigned insn_count;
2442 /* Registers when doing displaced stepping. */
2443 struct regcache *regs;
2444
2445 struct displaced_step_closure *dsc;
2446};
2447
2448/* Implementation of aarch64_insn_visitor method "b". */
2449
2450static void
2451aarch64_displaced_step_b (const int is_bl, const int32_t offset,
2452 struct aarch64_insn_data *data)
2453{
2454 struct aarch64_displaced_step_data *dsd
2455 = (struct aarch64_displaced_step_data *) data;
2ac09a5b 2456 int64_t new_offset = data->insn_addr - dsd->new_addr + offset;
b6542f81
YQ
2457
2458 if (can_encode_int32 (new_offset, 28))
2459 {
2460 /* Emit B rather than BL, because executing BL on a new address
2461 will get the wrong address into LR. In order to avoid this,
2462 we emit B, and update LR if the instruction is BL. */
2463 emit_b (dsd->insn_buf, 0, new_offset);
2464 dsd->insn_count++;
2465 }
2466 else
2467 {
2468 /* Write NOP. */
2469 emit_nop (dsd->insn_buf);
2470 dsd->insn_count++;
2471 dsd->dsc->pc_adjust = offset;
2472 }
2473
2474 if (is_bl)
2475 {
2476 /* Update LR. */
2477 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
2478 data->insn_addr + 4);
2479 }
2480}
2481
2482/* Implementation of aarch64_insn_visitor method "b_cond". */
2483
2484static void
2485aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
2486 struct aarch64_insn_data *data)
2487{
2488 struct aarch64_displaced_step_data *dsd
2489 = (struct aarch64_displaced_step_data *) data;
b6542f81
YQ
2490
2491 /* GDB has to fix up PC after displaced step this instruction
2492 differently according to the condition is true or false. Instead
2493 of checking COND against conditional flags, we can use
2494 the following instructions, and GDB can tell how to fix up PC
2495 according to the PC value.
2496
2497 B.COND TAKEN ; If cond is true, then jump to TAKEN.
2498 INSN1 ;
2499 TAKEN:
2500 INSN2
2501 */
2502
2503 emit_bcond (dsd->insn_buf, cond, 8);
2504 dsd->dsc->cond = 1;
2505 dsd->dsc->pc_adjust = offset;
2506 dsd->insn_count = 1;
2507}
2508
2509/* Dynamically allocate a new register. If we know the register
2510 statically, we should make it a global as above instead of using this
2511 helper function. */
2512
2513static struct aarch64_register
2514aarch64_register (unsigned num, int is64)
2515{
2516 return (struct aarch64_register) { num, is64 };
2517}
2518
2519/* Implementation of aarch64_insn_visitor method "cb". */
2520
2521static void
2522aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
2523 const unsigned rn, int is64,
2524 struct aarch64_insn_data *data)
2525{
2526 struct aarch64_displaced_step_data *dsd
2527 = (struct aarch64_displaced_step_data *) data;
b6542f81
YQ
2528
2529 /* The offset is out of range for a compare and branch
2530 instruction. We can use the following instructions instead:
2531
2532 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
2533 INSN1 ;
2534 TAKEN:
2535 INSN2
2536 */
2537 emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
2538 dsd->insn_count = 1;
2539 dsd->dsc->cond = 1;
2540 dsd->dsc->pc_adjust = offset;
2541}
2542
2543/* Implementation of aarch64_insn_visitor method "tb". */
2544
2545static void
2546aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
2547 const unsigned rt, unsigned bit,
2548 struct aarch64_insn_data *data)
2549{
2550 struct aarch64_displaced_step_data *dsd
2551 = (struct aarch64_displaced_step_data *) data;
b6542f81
YQ
2552
2553 /* The offset is out of range for a test bit and branch
2554 instruction We can use the following instructions instead:
2555
2556 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
2557 INSN1 ;
2558 TAKEN:
2559 INSN2
2560
2561 */
2562 emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
2563 dsd->insn_count = 1;
2564 dsd->dsc->cond = 1;
2565 dsd->dsc->pc_adjust = offset;
2566}
2567
2568/* Implementation of aarch64_insn_visitor method "adr". */
2569
2570static void
2571aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
2572 const int is_adrp, struct aarch64_insn_data *data)
2573{
2574 struct aarch64_displaced_step_data *dsd
2575 = (struct aarch64_displaced_step_data *) data;
2576 /* We know exactly the address the ADR{P,} instruction will compute.
2577 We can just write it to the destination register. */
2578 CORE_ADDR address = data->insn_addr + offset;
2579
2580 if (is_adrp)
2581 {
2582 /* Clear the lower 12 bits of the offset to get the 4K page. */
2583 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2584 address & ~0xfff);
2585 }
2586 else
2587 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2588 address);
2589
2590 dsd->dsc->pc_adjust = 4;
2591 emit_nop (dsd->insn_buf);
2592 dsd->insn_count = 1;
2593}
2594
2595/* Implementation of aarch64_insn_visitor method "ldr_literal". */
2596
2597static void
2598aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
2599 const unsigned rt, const int is64,
2600 struct aarch64_insn_data *data)
2601{
2602 struct aarch64_displaced_step_data *dsd
2603 = (struct aarch64_displaced_step_data *) data;
2604 CORE_ADDR address = data->insn_addr + offset;
2605 struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
2606
2607 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
2608 address);
2609
2610 if (is_sw)
2611 dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
2612 aarch64_register (rt, 1), zero);
2613 else
2614 dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
2615 aarch64_register (rt, 1), zero);
2616
2617 dsd->dsc->pc_adjust = 4;
2618}
2619
2620/* Implementation of aarch64_insn_visitor method "others". */
2621
2622static void
2623aarch64_displaced_step_others (const uint32_t insn,
2624 struct aarch64_insn_data *data)
2625{
2626 struct aarch64_displaced_step_data *dsd
2627 = (struct aarch64_displaced_step_data *) data;
2628
e1c587c3 2629 aarch64_emit_insn (dsd->insn_buf, insn);
b6542f81
YQ
2630 dsd->insn_count = 1;
2631
2632 if ((insn & 0xfffffc1f) == 0xd65f0000)
2633 {
2634 /* RET */
2635 dsd->dsc->pc_adjust = 0;
2636 }
2637 else
2638 dsd->dsc->pc_adjust = 4;
2639}
2640
2641static const struct aarch64_insn_visitor visitor =
2642{
2643 aarch64_displaced_step_b,
2644 aarch64_displaced_step_b_cond,
2645 aarch64_displaced_step_cb,
2646 aarch64_displaced_step_tb,
2647 aarch64_displaced_step_adr,
2648 aarch64_displaced_step_ldr_literal,
2649 aarch64_displaced_step_others,
2650};
2651
2652/* Implement the "displaced_step_copy_insn" gdbarch method. */
2653
2654struct displaced_step_closure *
2655aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
2656 CORE_ADDR from, CORE_ADDR to,
2657 struct regcache *regs)
2658{
2659 struct displaced_step_closure *dsc = NULL;
2660 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2661 uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
2662 struct aarch64_displaced_step_data dsd;
c86a40c6
YQ
2663 aarch64_inst inst;
2664
2665 if (aarch64_decode_insn (insn, &inst, 1) != 0)
2666 return NULL;
b6542f81
YQ
2667
2668 /* Look for a Load Exclusive instruction which begins the sequence. */
c86a40c6 2669 if (inst.opcode->iclass == ldstexcl && bit (insn, 22))
b6542f81
YQ
2670 {
2671 /* We can't displaced step atomic sequences. */
2672 return NULL;
2673 }
2674
2675 dsc = XCNEW (struct displaced_step_closure);
2676 dsd.base.insn_addr = from;
2677 dsd.new_addr = to;
2678 dsd.regs = regs;
2679 dsd.dsc = dsc;
034f1a81 2680 dsd.insn_count = 0;
b6542f81
YQ
2681 aarch64_relocate_instruction (insn, &visitor,
2682 (struct aarch64_insn_data *) &dsd);
2683 gdb_assert (dsd.insn_count <= DISPLACED_MODIFIED_INSNS);
2684
2685 if (dsd.insn_count != 0)
2686 {
2687 int i;
2688
2689 /* Instruction can be relocated to scratch pad. Copy
2690 relocated instruction(s) there. */
2691 for (i = 0; i < dsd.insn_count; i++)
2692 {
2693 if (debug_displaced)
2694 {
2695 debug_printf ("displaced: writing insn ");
2696 debug_printf ("%.8x", dsd.insn_buf[i]);
2697 debug_printf (" at %s\n", paddress (gdbarch, to + i * 4));
2698 }
2699 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
2700 (ULONGEST) dsd.insn_buf[i]);
2701 }
2702 }
2703 else
2704 {
2705 xfree (dsc);
2706 dsc = NULL;
2707 }
2708
2709 return dsc;
2710}
2711
2712/* Implement the "displaced_step_fixup" gdbarch method. */
2713
2714void
2715aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
2716 struct displaced_step_closure *dsc,
2717 CORE_ADDR from, CORE_ADDR to,
2718 struct regcache *regs)
2719{
2720 if (dsc->cond)
2721 {
2722 ULONGEST pc;
2723
2724 regcache_cooked_read_unsigned (regs, AARCH64_PC_REGNUM, &pc);
2725 if (pc - to == 8)
2726 {
2727 /* Condition is true. */
2728 }
2729 else if (pc - to == 4)
2730 {
2731 /* Condition is false. */
2732 dsc->pc_adjust = 4;
2733 }
2734 else
2735 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
2736 }
2737
2738 if (dsc->pc_adjust != 0)
2739 {
2740 if (debug_displaced)
2741 {
2742 debug_printf ("displaced: fixup: set PC to %s:%d\n",
2743 paddress (gdbarch, from), dsc->pc_adjust);
2744 }
2745 regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
2746 from + dsc->pc_adjust);
2747 }
2748}
2749
2750/* Implement the "displaced_step_hw_singlestep" gdbarch method. */
2751
2752int
2753aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch,
2754 struct displaced_step_closure *closure)
2755{
2756 return 1;
2757}
2758
07b287a0
MS
2759/* Initialize the current architecture based on INFO. If possible,
2760 re-use an architecture from ARCHES, which is a list of
2761 architectures already created during this debugging session.
2762
2763 Called e.g. at program startup, when reading a core file, and when
2764 reading a binary file. */
2765
2766static struct gdbarch *
2767aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
2768{
2769 struct gdbarch_tdep *tdep;
2770 struct gdbarch *gdbarch;
2771 struct gdbarch_list *best_arch;
2772 struct tdesc_arch_data *tdesc_data = NULL;
2773 const struct target_desc *tdesc = info.target_desc;
2774 int i;
07b287a0
MS
2775 int valid_p = 1;
2776 const struct tdesc_feature *feature;
2777 int num_regs = 0;
2778 int num_pseudo_regs = 0;
2779
2780 /* Ensure we always have a target descriptor. */
2781 if (!tdesc_has_registers (tdesc))
2782 tdesc = tdesc_aarch64;
2783
2784 gdb_assert (tdesc);
2785
2786 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.core");
2787
2788 if (feature == NULL)
2789 return NULL;
2790
2791 tdesc_data = tdesc_data_alloc ();
2792
2793 /* Validate the descriptor provides the mandatory core R registers
2794 and allocate their numbers. */
2795 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
2796 valid_p &=
2797 tdesc_numbered_register (feature, tdesc_data, AARCH64_X0_REGNUM + i,
2798 aarch64_r_register_names[i]);
2799
2800 num_regs = AARCH64_X0_REGNUM + i;
2801
2802 /* Look for the V registers. */
2803 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
2804 if (feature)
2805 {
2806 /* Validate the descriptor provides the mandatory V registers
2807 and allocate their numbers. */
2808 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
2809 valid_p &=
2810 tdesc_numbered_register (feature, tdesc_data, AARCH64_V0_REGNUM + i,
2811 aarch64_v_register_names[i]);
2812
2813 num_regs = AARCH64_V0_REGNUM + i;
2814
2815 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
2816 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
2817 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
2818 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
2819 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
2820 }
2821
2822 if (!valid_p)
2823 {
2824 tdesc_data_cleanup (tdesc_data);
2825 return NULL;
2826 }
2827
2828 /* AArch64 code is always little-endian. */
2829 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
2830
2831 /* If there is already a candidate, use it. */
2832 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
2833 best_arch != NULL;
2834 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
2835 {
2836 /* Found a match. */
2837 break;
2838 }
2839
2840 if (best_arch != NULL)
2841 {
2842 if (tdesc_data != NULL)
2843 tdesc_data_cleanup (tdesc_data);
2844 return best_arch->gdbarch;
2845 }
2846
8d749320 2847 tdep = XCNEW (struct gdbarch_tdep);
07b287a0
MS
2848 gdbarch = gdbarch_alloc (&info, tdep);
2849
2850 /* This should be low enough for everything. */
2851 tdep->lowest_pc = 0x20;
2852 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
2853 tdep->jb_elt_size = 8;
2854
2855 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
2856 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
2857
07b287a0
MS
2858 /* Frame handling. */
2859 set_gdbarch_dummy_id (gdbarch, aarch64_dummy_id);
2860 set_gdbarch_unwind_pc (gdbarch, aarch64_unwind_pc);
2861 set_gdbarch_unwind_sp (gdbarch, aarch64_unwind_sp);
2862
2863 /* Advance PC across function entry code. */
2864 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
2865
2866 /* The stack grows downward. */
2867 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
2868
2869 /* Breakpoint manipulation. */
04180708
YQ
2870 set_gdbarch_breakpoint_kind_from_pc (gdbarch,
2871 aarch64_breakpoint::kind_from_pc);
2872 set_gdbarch_sw_breakpoint_from_kind (gdbarch,
2873 aarch64_breakpoint::bp_from_kind);
07b287a0 2874 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
9404b58f 2875 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
07b287a0
MS
2876
2877 /* Information about registers, etc. */
2878 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
2879 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
2880 set_gdbarch_num_regs (gdbarch, num_regs);
2881
2882 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
2883 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
2884 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
2885 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
2886 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
2887 set_tdesc_pseudo_register_reggroup_p (gdbarch,
2888 aarch64_pseudo_register_reggroup_p);
2889
2890 /* ABI */
2891 set_gdbarch_short_bit (gdbarch, 16);
2892 set_gdbarch_int_bit (gdbarch, 32);
2893 set_gdbarch_float_bit (gdbarch, 32);
2894 set_gdbarch_double_bit (gdbarch, 64);
2895 set_gdbarch_long_double_bit (gdbarch, 128);
2896 set_gdbarch_long_bit (gdbarch, 64);
2897 set_gdbarch_long_long_bit (gdbarch, 64);
2898 set_gdbarch_ptr_bit (gdbarch, 64);
2899 set_gdbarch_char_signed (gdbarch, 0);
2900 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
2901 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
2902 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
2903
2904 /* Internal <-> external register number maps. */
2905 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
2906
2907 /* Returning results. */
2908 set_gdbarch_return_value (gdbarch, aarch64_return_value);
2909
2910 /* Disassembly. */
2911 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
2912
2913 /* Virtual tables. */
2914 set_gdbarch_vbit_in_delta (gdbarch, 1);
2915
2916 /* Hook in the ABI-specific overrides, if they have been registered. */
2917 info.target_desc = tdesc;
2918 info.tdep_info = (void *) tdesc_data;
2919 gdbarch_init_osabi (info, gdbarch);
2920
2921 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
2922
2923 /* Add some default predicates. */
2924 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
2925 dwarf2_append_unwinders (gdbarch);
2926 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
2927
2928 frame_base_set_default (gdbarch, &aarch64_normal_base);
2929
2930 /* Now we have tuned the configuration, set a few final things,
2931 based on what the OS ABI has told us. */
2932
2933 if (tdep->jb_pc >= 0)
2934 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
2935
ea873d8e
PL
2936 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
2937
07b287a0
MS
2938 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
2939
2940 /* Add standard register aliases. */
2941 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
2942 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
2943 value_of_aarch64_user_reg,
2944 &aarch64_register_aliases[i].regnum);
2945
2946 return gdbarch;
2947}
2948
2949static void
2950aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
2951{
2952 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2953
2954 if (tdep == NULL)
2955 return;
2956
2957 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
2958 paddress (gdbarch, tdep->lowest_pc));
2959}
2960
2961/* Suppress warning from -Wmissing-prototypes. */
2962extern initialize_file_ftype _initialize_aarch64_tdep;
2963
2964void
2965_initialize_aarch64_tdep (void)
2966{
2967 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
2968 aarch64_dump_tdep);
2969
2970 initialize_tdesc_aarch64 ();
07b287a0
MS
2971
2972 /* Debug this file's internals. */
2973 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
2974Set AArch64 debugging."), _("\
2975Show AArch64 debugging."), _("\
2976When on, AArch64 specific debugging is enabled."),
2977 NULL,
2978 show_aarch64_debug,
2979 &setdebuglist, &showdebuglist);
4d9a9006
YQ
2980
2981#if GDB_SELF_TEST
2982 register_self_test (selftests::aarch64_analyze_prologue_test);
2983#endif
07b287a0 2984}
99afc88b
OJ
2985
2986/* AArch64 process record-replay related structures, defines etc. */
2987
99afc88b
OJ
2988#define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
2989 do \
2990 { \
2991 unsigned int reg_len = LENGTH; \
2992 if (reg_len) \
2993 { \
2994 REGS = XNEWVEC (uint32_t, reg_len); \
2995 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
2996 } \
2997 } \
2998 while (0)
2999
3000#define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
3001 do \
3002 { \
3003 unsigned int mem_len = LENGTH; \
3004 if (mem_len) \
3005 { \
3006 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
3007 memcpy(&MEMS->len, &RECORD_BUF[0], \
3008 sizeof(struct aarch64_mem_r) * LENGTH); \
3009 } \
3010 } \
3011 while (0)
3012
3013/* AArch64 record/replay structures and enumerations. */
3014
3015struct aarch64_mem_r
3016{
3017 uint64_t len; /* Record length. */
3018 uint64_t addr; /* Memory address. */
3019};
3020
3021enum aarch64_record_result
3022{
3023 AARCH64_RECORD_SUCCESS,
3024 AARCH64_RECORD_FAILURE,
3025 AARCH64_RECORD_UNSUPPORTED,
3026 AARCH64_RECORD_UNKNOWN
3027};
3028
3029typedef struct insn_decode_record_t
3030{
3031 struct gdbarch *gdbarch;
3032 struct regcache *regcache;
3033 CORE_ADDR this_addr; /* Address of insn to be recorded. */
3034 uint32_t aarch64_insn; /* Insn to be recorded. */
3035 uint32_t mem_rec_count; /* Count of memory records. */
3036 uint32_t reg_rec_count; /* Count of register records. */
3037 uint32_t *aarch64_regs; /* Registers to be recorded. */
3038 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
3039} insn_decode_record;
3040
3041/* Record handler for data processing - register instructions. */
3042
3043static unsigned int
3044aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
3045{
3046 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
3047 uint32_t record_buf[4];
3048
3049 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3050 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3051 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
3052
3053 if (!bit (aarch64_insn_r->aarch64_insn, 28))
3054 {
3055 uint8_t setflags;
3056
3057 /* Logical (shifted register). */
3058 if (insn_bits24_27 == 0x0a)
3059 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
3060 /* Add/subtract. */
3061 else if (insn_bits24_27 == 0x0b)
3062 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3063 else
3064 return AARCH64_RECORD_UNKNOWN;
3065
3066 record_buf[0] = reg_rd;
3067 aarch64_insn_r->reg_rec_count = 1;
3068 if (setflags)
3069 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3070 }
3071 else
3072 {
3073 if (insn_bits24_27 == 0x0b)
3074 {
3075 /* Data-processing (3 source). */
3076 record_buf[0] = reg_rd;
3077 aarch64_insn_r->reg_rec_count = 1;
3078 }
3079 else if (insn_bits24_27 == 0x0a)
3080 {
3081 if (insn_bits21_23 == 0x00)
3082 {
3083 /* Add/subtract (with carry). */
3084 record_buf[0] = reg_rd;
3085 aarch64_insn_r->reg_rec_count = 1;
3086 if (bit (aarch64_insn_r->aarch64_insn, 29))
3087 {
3088 record_buf[1] = AARCH64_CPSR_REGNUM;
3089 aarch64_insn_r->reg_rec_count = 2;
3090 }
3091 }
3092 else if (insn_bits21_23 == 0x02)
3093 {
3094 /* Conditional compare (register) and conditional compare
3095 (immediate) instructions. */
3096 record_buf[0] = AARCH64_CPSR_REGNUM;
3097 aarch64_insn_r->reg_rec_count = 1;
3098 }
3099 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
3100 {
3101 /* CConditional select. */
3102 /* Data-processing (2 source). */
3103 /* Data-processing (1 source). */
3104 record_buf[0] = reg_rd;
3105 aarch64_insn_r->reg_rec_count = 1;
3106 }
3107 else
3108 return AARCH64_RECORD_UNKNOWN;
3109 }
3110 }
3111
3112 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3113 record_buf);
3114 return AARCH64_RECORD_SUCCESS;
3115}
3116
3117/* Record handler for data processing - immediate instructions. */
3118
3119static unsigned int
3120aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
3121{
78cc6c2d 3122 uint8_t reg_rd, insn_bit23, insn_bits24_27, setflags;
99afc88b
OJ
3123 uint32_t record_buf[4];
3124
3125 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
99afc88b
OJ
3126 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3127 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3128
3129 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
3130 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
3131 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
3132 {
3133 record_buf[0] = reg_rd;
3134 aarch64_insn_r->reg_rec_count = 1;
3135 }
3136 else if (insn_bits24_27 == 0x01)
3137 {
3138 /* Add/Subtract (immediate). */
3139 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3140 record_buf[0] = reg_rd;
3141 aarch64_insn_r->reg_rec_count = 1;
3142 if (setflags)
3143 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3144 }
3145 else if (insn_bits24_27 == 0x02 && !insn_bit23)
3146 {
3147 /* Logical (immediate). */
3148 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
3149 record_buf[0] = reg_rd;
3150 aarch64_insn_r->reg_rec_count = 1;
3151 if (setflags)
3152 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3153 }
3154 else
3155 return AARCH64_RECORD_UNKNOWN;
3156
3157 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3158 record_buf);
3159 return AARCH64_RECORD_SUCCESS;
3160}
3161
3162/* Record handler for branch, exception generation and system instructions. */
3163
3164static unsigned int
3165aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
3166{
3167 struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
3168 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
3169 uint32_t record_buf[4];
3170
3171 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3172 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3173 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3174
3175 if (insn_bits28_31 == 0x0d)
3176 {
3177 /* Exception generation instructions. */
3178 if (insn_bits24_27 == 0x04)
3179 {
5d98d3cd
YQ
3180 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
3181 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
3182 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
99afc88b
OJ
3183 {
3184 ULONGEST svc_number;
3185
3186 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
3187 &svc_number);
3188 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
3189 svc_number);
3190 }
3191 else
3192 return AARCH64_RECORD_UNSUPPORTED;
3193 }
3194 /* System instructions. */
3195 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
3196 {
3197 uint32_t reg_rt, reg_crn;
3198
3199 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3200 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3201
3202 /* Record rt in case of sysl and mrs instructions. */
3203 if (bit (aarch64_insn_r->aarch64_insn, 21))
3204 {
3205 record_buf[0] = reg_rt;
3206 aarch64_insn_r->reg_rec_count = 1;
3207 }
3208 /* Record cpsr for hint and msr(immediate) instructions. */
3209 else if (reg_crn == 0x02 || reg_crn == 0x04)
3210 {
3211 record_buf[0] = AARCH64_CPSR_REGNUM;
3212 aarch64_insn_r->reg_rec_count = 1;
3213 }
3214 }
3215 /* Unconditional branch (register). */
3216 else if((insn_bits24_27 & 0x0e) == 0x06)
3217 {
3218 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3219 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3220 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3221 }
3222 else
3223 return AARCH64_RECORD_UNKNOWN;
3224 }
3225 /* Unconditional branch (immediate). */
3226 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3227 {
3228 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3229 if (bit (aarch64_insn_r->aarch64_insn, 31))
3230 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3231 }
3232 else
3233 /* Compare & branch (immediate), Test & branch (immediate) and
3234 Conditional branch (immediate). */
3235 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3236
3237 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3238 record_buf);
3239 return AARCH64_RECORD_SUCCESS;
3240}
3241
3242/* Record handler for advanced SIMD load and store instructions. */
3243
3244static unsigned int
3245aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3246{
3247 CORE_ADDR address;
3248 uint64_t addr_offset = 0;
3249 uint32_t record_buf[24];
3250 uint64_t record_buf_mem[24];
3251 uint32_t reg_rn, reg_rt;
3252 uint32_t reg_index = 0, mem_index = 0;
3253 uint8_t opcode_bits, size_bits;
3254
3255 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3256 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3257 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3258 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3259 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3260
3261 if (record_debug)
b277c936 3262 debug_printf ("Process record: Advanced SIMD load/store\n");
99afc88b
OJ
3263
3264 /* Load/store single structure. */
3265 if (bit (aarch64_insn_r->aarch64_insn, 24))
3266 {
3267 uint8_t sindex, scale, selem, esize, replicate = 0;
3268 scale = opcode_bits >> 2;
3269 selem = ((opcode_bits & 0x02) |
3270 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3271 switch (scale)
3272 {
3273 case 1:
3274 if (size_bits & 0x01)
3275 return AARCH64_RECORD_UNKNOWN;
3276 break;
3277 case 2:
3278 if ((size_bits >> 1) & 0x01)
3279 return AARCH64_RECORD_UNKNOWN;
3280 if (size_bits & 0x01)
3281 {
3282 if (!((opcode_bits >> 1) & 0x01))
3283 scale = 3;
3284 else
3285 return AARCH64_RECORD_UNKNOWN;
3286 }
3287 break;
3288 case 3:
3289 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3290 {
3291 scale = size_bits;
3292 replicate = 1;
3293 break;
3294 }
3295 else
3296 return AARCH64_RECORD_UNKNOWN;
3297 default:
3298 break;
3299 }
3300 esize = 8 << scale;
3301 if (replicate)
3302 for (sindex = 0; sindex < selem; sindex++)
3303 {
3304 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3305 reg_rt = (reg_rt + 1) % 32;
3306 }
3307 else
3308 {
3309 for (sindex = 0; sindex < selem; sindex++)
a2e3e93f
SM
3310 {
3311 if (bit (aarch64_insn_r->aarch64_insn, 22))
3312 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3313 else
3314 {
3315 record_buf_mem[mem_index++] = esize / 8;
3316 record_buf_mem[mem_index++] = address + addr_offset;
3317 }
3318 addr_offset = addr_offset + (esize / 8);
3319 reg_rt = (reg_rt + 1) % 32;
3320 }
99afc88b
OJ
3321 }
3322 }
3323 /* Load/store multiple structure. */
3324 else
3325 {
3326 uint8_t selem, esize, rpt, elements;
3327 uint8_t eindex, rindex;
3328
3329 esize = 8 << size_bits;
3330 if (bit (aarch64_insn_r->aarch64_insn, 30))
3331 elements = 128 / esize;
3332 else
3333 elements = 64 / esize;
3334
3335 switch (opcode_bits)
3336 {
3337 /*LD/ST4 (4 Registers). */
3338 case 0:
3339 rpt = 1;
3340 selem = 4;
3341 break;
3342 /*LD/ST1 (4 Registers). */
3343 case 2:
3344 rpt = 4;
3345 selem = 1;
3346 break;
3347 /*LD/ST3 (3 Registers). */
3348 case 4:
3349 rpt = 1;
3350 selem = 3;
3351 break;
3352 /*LD/ST1 (3 Registers). */
3353 case 6:
3354 rpt = 3;
3355 selem = 1;
3356 break;
3357 /*LD/ST1 (1 Register). */
3358 case 7:
3359 rpt = 1;
3360 selem = 1;
3361 break;
3362 /*LD/ST2 (2 Registers). */
3363 case 8:
3364 rpt = 1;
3365 selem = 2;
3366 break;
3367 /*LD/ST1 (2 Registers). */
3368 case 10:
3369 rpt = 2;
3370 selem = 1;
3371 break;
3372 default:
3373 return AARCH64_RECORD_UNSUPPORTED;
3374 break;
3375 }
3376 for (rindex = 0; rindex < rpt; rindex++)
3377 for (eindex = 0; eindex < elements; eindex++)
3378 {
3379 uint8_t reg_tt, sindex;
3380 reg_tt = (reg_rt + rindex) % 32;
3381 for (sindex = 0; sindex < selem; sindex++)
3382 {
3383 if (bit (aarch64_insn_r->aarch64_insn, 22))
3384 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
3385 else
3386 {
3387 record_buf_mem[mem_index++] = esize / 8;
3388 record_buf_mem[mem_index++] = address + addr_offset;
3389 }
3390 addr_offset = addr_offset + (esize / 8);
3391 reg_tt = (reg_tt + 1) % 32;
3392 }
3393 }
3394 }
3395
3396 if (bit (aarch64_insn_r->aarch64_insn, 23))
3397 record_buf[reg_index++] = reg_rn;
3398
3399 aarch64_insn_r->reg_rec_count = reg_index;
3400 aarch64_insn_r->mem_rec_count = mem_index / 2;
3401 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3402 record_buf_mem);
3403 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3404 record_buf);
3405 return AARCH64_RECORD_SUCCESS;
3406}
3407
3408/* Record handler for load and store instructions. */
3409
3410static unsigned int
3411aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
3412{
3413 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
3414 uint8_t insn_bit23, insn_bit21;
3415 uint8_t opc, size_bits, ld_flag, vector_flag;
3416 uint32_t reg_rn, reg_rt, reg_rt2;
3417 uint64_t datasize, offset;
3418 uint32_t record_buf[8];
3419 uint64_t record_buf_mem[8];
3420 CORE_ADDR address;
3421
3422 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3423 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3424 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
3425 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3426 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3427 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
3428 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
3429 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3430 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3431 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
3432 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
3433
3434 /* Load/store exclusive. */
3435 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
3436 {
3437 if (record_debug)
b277c936 3438 debug_printf ("Process record: load/store exclusive\n");
99afc88b
OJ
3439
3440 if (ld_flag)
3441 {
3442 record_buf[0] = reg_rt;
3443 aarch64_insn_r->reg_rec_count = 1;
3444 if (insn_bit21)
3445 {
3446 record_buf[1] = reg_rt2;
3447 aarch64_insn_r->reg_rec_count = 2;
3448 }
3449 }
3450 else
3451 {
3452 if (insn_bit21)
3453 datasize = (8 << size_bits) * 2;
3454 else
3455 datasize = (8 << size_bits);
3456 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3457 &address);
3458 record_buf_mem[0] = datasize / 8;
3459 record_buf_mem[1] = address;
3460 aarch64_insn_r->mem_rec_count = 1;
3461 if (!insn_bit23)
3462 {
3463 /* Save register rs. */
3464 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
3465 aarch64_insn_r->reg_rec_count = 1;
3466 }
3467 }
3468 }
3469 /* Load register (literal) instructions decoding. */
3470 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
3471 {
3472 if (record_debug)
b277c936 3473 debug_printf ("Process record: load register (literal)\n");
99afc88b
OJ
3474 if (vector_flag)
3475 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3476 else
3477 record_buf[0] = reg_rt;
3478 aarch64_insn_r->reg_rec_count = 1;
3479 }
3480 /* All types of load/store pair instructions decoding. */
3481 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
3482 {
3483 if (record_debug)
b277c936 3484 debug_printf ("Process record: load/store pair\n");
99afc88b
OJ
3485
3486 if (ld_flag)
3487 {
3488 if (vector_flag)
3489 {
3490 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3491 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
3492 }
3493 else
3494 {
3495 record_buf[0] = reg_rt;
3496 record_buf[1] = reg_rt2;
3497 }
3498 aarch64_insn_r->reg_rec_count = 2;
3499 }
3500 else
3501 {
3502 uint16_t imm7_off;
3503 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
3504 if (!vector_flag)
3505 size_bits = size_bits >> 1;
3506 datasize = 8 << (2 + size_bits);
3507 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
3508 offset = offset << (2 + size_bits);
3509 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3510 &address);
3511 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
3512 {
3513 if (imm7_off & 0x40)
3514 address = address - offset;
3515 else
3516 address = address + offset;
3517 }
3518
3519 record_buf_mem[0] = datasize / 8;
3520 record_buf_mem[1] = address;
3521 record_buf_mem[2] = datasize / 8;
3522 record_buf_mem[3] = address + (datasize / 8);
3523 aarch64_insn_r->mem_rec_count = 2;
3524 }
3525 if (bit (aarch64_insn_r->aarch64_insn, 23))
3526 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3527 }
3528 /* Load/store register (unsigned immediate) instructions. */
3529 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
3530 {
3531 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3532 if (!(opc >> 1))
3533 if (opc & 0x01)
3534 ld_flag = 0x01;
3535 else
3536 ld_flag = 0x0;
3537 else
3538 if (size_bits != 0x03)
3539 ld_flag = 0x01;
3540 else
3541 return AARCH64_RECORD_UNKNOWN;
3542
3543 if (record_debug)
3544 {
b277c936
PL
3545 debug_printf ("Process record: load/store (unsigned immediate):"
3546 " size %x V %d opc %x\n", size_bits, vector_flag,
3547 opc);
99afc88b
OJ
3548 }
3549
3550 if (!ld_flag)
3551 {
3552 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
3553 datasize = 8 << size_bits;
3554 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3555 &address);
3556 offset = offset << size_bits;
3557 address = address + offset;
3558
3559 record_buf_mem[0] = datasize >> 3;
3560 record_buf_mem[1] = address;
3561 aarch64_insn_r->mem_rec_count = 1;
3562 }
3563 else
3564 {
3565 if (vector_flag)
3566 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3567 else
3568 record_buf[0] = reg_rt;
3569 aarch64_insn_r->reg_rec_count = 1;
3570 }
3571 }
3572 /* Load/store register (register offset) instructions. */
5d98d3cd
YQ
3573 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3574 && insn_bits10_11 == 0x02 && insn_bit21)
99afc88b
OJ
3575 {
3576 if (record_debug)
b277c936 3577 debug_printf ("Process record: load/store (register offset)\n");
99afc88b
OJ
3578 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3579 if (!(opc >> 1))
3580 if (opc & 0x01)
3581 ld_flag = 0x01;
3582 else
3583 ld_flag = 0x0;
3584 else
3585 if (size_bits != 0x03)
3586 ld_flag = 0x01;
3587 else
3588 return AARCH64_RECORD_UNKNOWN;
3589
3590 if (!ld_flag)
3591 {
d9436c7c
PA
3592 ULONGEST reg_rm_val;
3593
99afc88b
OJ
3594 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
3595 bits (aarch64_insn_r->aarch64_insn, 16, 20), &reg_rm_val);
3596 if (bit (aarch64_insn_r->aarch64_insn, 12))
3597 offset = reg_rm_val << size_bits;
3598 else
3599 offset = reg_rm_val;
3600 datasize = 8 << size_bits;
3601 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3602 &address);
3603 address = address + offset;
3604 record_buf_mem[0] = datasize >> 3;
3605 record_buf_mem[1] = address;
3606 aarch64_insn_r->mem_rec_count = 1;
3607 }
3608 else
3609 {
3610 if (vector_flag)
3611 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3612 else
3613 record_buf[0] = reg_rt;
3614 aarch64_insn_r->reg_rec_count = 1;
3615 }
3616 }
3617 /* Load/store register (immediate and unprivileged) instructions. */
5d98d3cd
YQ
3618 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3619 && !insn_bit21)
99afc88b
OJ
3620 {
3621 if (record_debug)
3622 {
b277c936
PL
3623 debug_printf ("Process record: load/store "
3624 "(immediate and unprivileged)\n");
99afc88b
OJ
3625 }
3626 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3627 if (!(opc >> 1))
3628 if (opc & 0x01)
3629 ld_flag = 0x01;
3630 else
3631 ld_flag = 0x0;
3632 else
3633 if (size_bits != 0x03)
3634 ld_flag = 0x01;
3635 else
3636 return AARCH64_RECORD_UNKNOWN;
3637
3638 if (!ld_flag)
3639 {
3640 uint16_t imm9_off;
3641 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
3642 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
3643 datasize = 8 << size_bits;
3644 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3645 &address);
3646 if (insn_bits10_11 != 0x01)
3647 {
3648 if (imm9_off & 0x0100)
3649 address = address - offset;
3650 else
3651 address = address + offset;
3652 }
3653 record_buf_mem[0] = datasize >> 3;
3654 record_buf_mem[1] = address;
3655 aarch64_insn_r->mem_rec_count = 1;
3656 }
3657 else
3658 {
3659 if (vector_flag)
3660 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3661 else
3662 record_buf[0] = reg_rt;
3663 aarch64_insn_r->reg_rec_count = 1;
3664 }
3665 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
3666 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3667 }
3668 /* Advanced SIMD load/store instructions. */
3669 else
3670 return aarch64_record_asimd_load_store (aarch64_insn_r);
3671
3672 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3673 record_buf_mem);
3674 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3675 record_buf);
3676 return AARCH64_RECORD_SUCCESS;
3677}
3678
3679/* Record handler for data processing SIMD and floating point instructions. */
3680
3681static unsigned int
3682aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
3683{
3684 uint8_t insn_bit21, opcode, rmode, reg_rd;
3685 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
3686 uint8_t insn_bits11_14;
3687 uint32_t record_buf[2];
3688
3689 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3690 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3691 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3692 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3693 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
3694 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
3695 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
3696 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3697 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3698
3699 if (record_debug)
b277c936 3700 debug_printf ("Process record: data processing SIMD/FP: ");
99afc88b
OJ
3701
3702 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
3703 {
3704 /* Floating point - fixed point conversion instructions. */
3705 if (!insn_bit21)
3706 {
3707 if (record_debug)
b277c936 3708 debug_printf ("FP - fixed point conversion");
99afc88b
OJ
3709
3710 if ((opcode >> 1) == 0x0 && rmode == 0x03)
3711 record_buf[0] = reg_rd;
3712 else
3713 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3714 }
3715 /* Floating point - conditional compare instructions. */
3716 else if (insn_bits10_11 == 0x01)
3717 {
3718 if (record_debug)
b277c936 3719 debug_printf ("FP - conditional compare");
99afc88b
OJ
3720
3721 record_buf[0] = AARCH64_CPSR_REGNUM;
3722 }
3723 /* Floating point - data processing (2-source) and
3724 conditional select instructions. */
3725 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
3726 {
3727 if (record_debug)
b277c936 3728 debug_printf ("FP - DP (2-source)");
99afc88b
OJ
3729
3730 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3731 }
3732 else if (insn_bits10_11 == 0x00)
3733 {
3734 /* Floating point - immediate instructions. */
3735 if ((insn_bits12_15 & 0x01) == 0x01
3736 || (insn_bits12_15 & 0x07) == 0x04)
3737 {
3738 if (record_debug)
b277c936 3739 debug_printf ("FP - immediate");
99afc88b
OJ
3740 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3741 }
3742 /* Floating point - compare instructions. */
3743 else if ((insn_bits12_15 & 0x03) == 0x02)
3744 {
3745 if (record_debug)
b277c936 3746 debug_printf ("FP - immediate");
99afc88b
OJ
3747 record_buf[0] = AARCH64_CPSR_REGNUM;
3748 }
3749 /* Floating point - integer conversions instructions. */
f62fce35 3750 else if (insn_bits12_15 == 0x00)
99afc88b
OJ
3751 {
3752 /* Convert float to integer instruction. */
3753 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
3754 {
3755 if (record_debug)
b277c936 3756 debug_printf ("float to int conversion");
99afc88b
OJ
3757
3758 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3759 }
3760 /* Convert integer to float instruction. */
3761 else if ((opcode >> 1) == 0x01 && !rmode)
3762 {
3763 if (record_debug)
b277c936 3764 debug_printf ("int to float conversion");
99afc88b
OJ
3765
3766 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3767 }
3768 /* Move float to integer instruction. */
3769 else if ((opcode >> 1) == 0x03)
3770 {
3771 if (record_debug)
b277c936 3772 debug_printf ("move float to int");
99afc88b
OJ
3773
3774 if (!(opcode & 0x01))
3775 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3776 else
3777 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3778 }
f62fce35
YQ
3779 else
3780 return AARCH64_RECORD_UNKNOWN;
99afc88b 3781 }
f62fce35
YQ
3782 else
3783 return AARCH64_RECORD_UNKNOWN;
99afc88b 3784 }
f62fce35
YQ
3785 else
3786 return AARCH64_RECORD_UNKNOWN;
99afc88b
OJ
3787 }
3788 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
3789 {
3790 if (record_debug)
b277c936 3791 debug_printf ("SIMD copy");
99afc88b
OJ
3792
3793 /* Advanced SIMD copy instructions. */
3794 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
3795 && !bit (aarch64_insn_r->aarch64_insn, 15)
3796 && bit (aarch64_insn_r->aarch64_insn, 10))
3797 {
3798 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
3799 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3800 else
3801 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3802 }
3803 else
3804 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3805 }
3806 /* All remaining floating point or advanced SIMD instructions. */
3807 else
3808 {
3809 if (record_debug)
b277c936 3810 debug_printf ("all remain");
99afc88b
OJ
3811
3812 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3813 }
3814
3815 if (record_debug)
b277c936 3816 debug_printf ("\n");
99afc88b
OJ
3817
3818 aarch64_insn_r->reg_rec_count++;
3819 gdb_assert (aarch64_insn_r->reg_rec_count == 1);
3820 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3821 record_buf);
3822 return AARCH64_RECORD_SUCCESS;
3823}
3824
3825/* Decodes insns type and invokes its record handler. */
3826
3827static unsigned int
3828aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
3829{
3830 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
3831
3832 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
3833 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
3834 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
3835 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
3836
3837 /* Data processing - immediate instructions. */
3838 if (!ins_bit26 && !ins_bit27 && ins_bit28)
3839 return aarch64_record_data_proc_imm (aarch64_insn_r);
3840
3841 /* Branch, exception generation and system instructions. */
3842 if (ins_bit26 && !ins_bit27 && ins_bit28)
3843 return aarch64_record_branch_except_sys (aarch64_insn_r);
3844
3845 /* Load and store instructions. */
3846 if (!ins_bit25 && ins_bit27)
3847 return aarch64_record_load_store (aarch64_insn_r);
3848
3849 /* Data processing - register instructions. */
3850 if (ins_bit25 && !ins_bit26 && ins_bit27)
3851 return aarch64_record_data_proc_reg (aarch64_insn_r);
3852
3853 /* Data processing - SIMD and floating point instructions. */
3854 if (ins_bit25 && ins_bit26 && ins_bit27)
3855 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
3856
3857 return AARCH64_RECORD_UNSUPPORTED;
3858}
3859
3860/* Cleans up local record registers and memory allocations. */
3861
3862static void
3863deallocate_reg_mem (insn_decode_record *record)
3864{
3865 xfree (record->aarch64_regs);
3866 xfree (record->aarch64_mems);
3867}
3868
3869/* Parse the current instruction and record the values of the registers and
3870 memory that will be changed in current instruction to record_arch_list
3871 return -1 if something is wrong. */
3872
3873int
3874aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
3875 CORE_ADDR insn_addr)
3876{
3877 uint32_t rec_no = 0;
3878 uint8_t insn_size = 4;
3879 uint32_t ret = 0;
99afc88b
OJ
3880 gdb_byte buf[insn_size];
3881 insn_decode_record aarch64_record;
3882
3883 memset (&buf[0], 0, insn_size);
3884 memset (&aarch64_record, 0, sizeof (insn_decode_record));
3885 target_read_memory (insn_addr, &buf[0], insn_size);
3886 aarch64_record.aarch64_insn
3887 = (uint32_t) extract_unsigned_integer (&buf[0],
3888 insn_size,
3889 gdbarch_byte_order (gdbarch));
3890 aarch64_record.regcache = regcache;
3891 aarch64_record.this_addr = insn_addr;
3892 aarch64_record.gdbarch = gdbarch;
3893
3894 ret = aarch64_record_decode_insn_handler (&aarch64_record);
3895 if (ret == AARCH64_RECORD_UNSUPPORTED)
3896 {
3897 printf_unfiltered (_("Process record does not support instruction "
3898 "0x%0x at address %s.\n"),
3899 aarch64_record.aarch64_insn,
3900 paddress (gdbarch, insn_addr));
3901 ret = -1;
3902 }
3903
3904 if (0 == ret)
3905 {
3906 /* Record registers. */
3907 record_full_arch_list_add_reg (aarch64_record.regcache,
3908 AARCH64_PC_REGNUM);
3909 /* Always record register CPSR. */
3910 record_full_arch_list_add_reg (aarch64_record.regcache,
3911 AARCH64_CPSR_REGNUM);
3912 if (aarch64_record.aarch64_regs)
3913 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
3914 if (record_full_arch_list_add_reg (aarch64_record.regcache,
3915 aarch64_record.aarch64_regs[rec_no]))
3916 ret = -1;
3917
3918 /* Record memories. */
3919 if (aarch64_record.aarch64_mems)
3920 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
3921 if (record_full_arch_list_add_mem
3922 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
3923 aarch64_record.aarch64_mems[rec_no].len))
3924 ret = -1;
3925
3926 if (record_full_arch_list_add_end ())
3927 ret = -1;
3928 }
3929
3930 deallocate_reg_mem (&aarch64_record);
3931 return ret;
3932}