]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/aarch64-tdep.c
gdbarch software_single_step frame_info to regcache: spu
[thirdparty/binutils-gdb.git] / gdb / aarch64-tdep.c
CommitLineData
07b287a0
MS
1/* Common target dependent code for GDB on AArch64 systems.
2
618f726f 3 Copyright (C) 2009-2016 Free Software Foundation, Inc.
07b287a0
MS
4 Contributed by ARM Ltd.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21#include "defs.h"
22
23#include "frame.h"
24#include "inferior.h"
25#include "gdbcmd.h"
26#include "gdbcore.h"
07b287a0
MS
27#include "dis-asm.h"
28#include "regcache.h"
29#include "reggroups.h"
30#include "doublest.h"
31#include "value.h"
32#include "arch-utils.h"
33#include "osabi.h"
34#include "frame-unwind.h"
35#include "frame-base.h"
36#include "trad-frame.h"
37#include "objfiles.h"
38#include "dwarf2-frame.h"
39#include "gdbtypes.h"
40#include "prologue-value.h"
41#include "target-descriptions.h"
42#include "user-regs.h"
43#include "language.h"
44#include "infcall.h"
ea873d8e
PL
45#include "ax.h"
46#include "ax-gdb.h"
07b287a0
MS
47
48#include "aarch64-tdep.h"
49
50#include "elf-bfd.h"
51#include "elf/aarch64.h"
52
07b287a0
MS
53#include "vec.h"
54
99afc88b
OJ
55#include "record.h"
56#include "record-full.h"
57
07b287a0 58#include "features/aarch64.c"
07b287a0 59
787749ea
PL
60#include "arch/aarch64-insn.h"
61
f77ee802 62#include "opcode/aarch64.h"
325fac50 63#include <algorithm>
f77ee802
YQ
64
65#define submask(x) ((1L << ((x) + 1)) - 1)
66#define bit(obj,st) (((obj) >> (st)) & 1)
67#define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
68
07b287a0
MS
69/* Pseudo register base numbers. */
70#define AARCH64_Q0_REGNUM 0
187f5d00 71#define AARCH64_D0_REGNUM (AARCH64_Q0_REGNUM + AARCH64_D_REGISTER_COUNT)
07b287a0
MS
72#define AARCH64_S0_REGNUM (AARCH64_D0_REGNUM + 32)
73#define AARCH64_H0_REGNUM (AARCH64_S0_REGNUM + 32)
74#define AARCH64_B0_REGNUM (AARCH64_H0_REGNUM + 32)
75
76/* The standard register names, and all the valid aliases for them. */
77static const struct
78{
79 const char *const name;
80 int regnum;
81} aarch64_register_aliases[] =
82{
83 /* 64-bit register names. */
84 {"fp", AARCH64_FP_REGNUM},
85 {"lr", AARCH64_LR_REGNUM},
86 {"sp", AARCH64_SP_REGNUM},
87
88 /* 32-bit register names. */
89 {"w0", AARCH64_X0_REGNUM + 0},
90 {"w1", AARCH64_X0_REGNUM + 1},
91 {"w2", AARCH64_X0_REGNUM + 2},
92 {"w3", AARCH64_X0_REGNUM + 3},
93 {"w4", AARCH64_X0_REGNUM + 4},
94 {"w5", AARCH64_X0_REGNUM + 5},
95 {"w6", AARCH64_X0_REGNUM + 6},
96 {"w7", AARCH64_X0_REGNUM + 7},
97 {"w8", AARCH64_X0_REGNUM + 8},
98 {"w9", AARCH64_X0_REGNUM + 9},
99 {"w10", AARCH64_X0_REGNUM + 10},
100 {"w11", AARCH64_X0_REGNUM + 11},
101 {"w12", AARCH64_X0_REGNUM + 12},
102 {"w13", AARCH64_X0_REGNUM + 13},
103 {"w14", AARCH64_X0_REGNUM + 14},
104 {"w15", AARCH64_X0_REGNUM + 15},
105 {"w16", AARCH64_X0_REGNUM + 16},
106 {"w17", AARCH64_X0_REGNUM + 17},
107 {"w18", AARCH64_X0_REGNUM + 18},
108 {"w19", AARCH64_X0_REGNUM + 19},
109 {"w20", AARCH64_X0_REGNUM + 20},
110 {"w21", AARCH64_X0_REGNUM + 21},
111 {"w22", AARCH64_X0_REGNUM + 22},
112 {"w23", AARCH64_X0_REGNUM + 23},
113 {"w24", AARCH64_X0_REGNUM + 24},
114 {"w25", AARCH64_X0_REGNUM + 25},
115 {"w26", AARCH64_X0_REGNUM + 26},
116 {"w27", AARCH64_X0_REGNUM + 27},
117 {"w28", AARCH64_X0_REGNUM + 28},
118 {"w29", AARCH64_X0_REGNUM + 29},
119 {"w30", AARCH64_X0_REGNUM + 30},
120
121 /* specials */
122 {"ip0", AARCH64_X0_REGNUM + 16},
123 {"ip1", AARCH64_X0_REGNUM + 17}
124};
125
126/* The required core 'R' registers. */
127static const char *const aarch64_r_register_names[] =
128{
129 /* These registers must appear in consecutive RAW register number
130 order and they must begin with AARCH64_X0_REGNUM! */
131 "x0", "x1", "x2", "x3",
132 "x4", "x5", "x6", "x7",
133 "x8", "x9", "x10", "x11",
134 "x12", "x13", "x14", "x15",
135 "x16", "x17", "x18", "x19",
136 "x20", "x21", "x22", "x23",
137 "x24", "x25", "x26", "x27",
138 "x28", "x29", "x30", "sp",
139 "pc", "cpsr"
140};
141
142/* The FP/SIMD 'V' registers. */
143static const char *const aarch64_v_register_names[] =
144{
145 /* These registers must appear in consecutive RAW register number
146 order and they must begin with AARCH64_V0_REGNUM! */
147 "v0", "v1", "v2", "v3",
148 "v4", "v5", "v6", "v7",
149 "v8", "v9", "v10", "v11",
150 "v12", "v13", "v14", "v15",
151 "v16", "v17", "v18", "v19",
152 "v20", "v21", "v22", "v23",
153 "v24", "v25", "v26", "v27",
154 "v28", "v29", "v30", "v31",
155 "fpsr",
156 "fpcr"
157};
158
159/* AArch64 prologue cache structure. */
160struct aarch64_prologue_cache
161{
db634143
PL
162 /* The program counter at the start of the function. It is used to
163 identify this frame as a prologue frame. */
164 CORE_ADDR func;
165
166 /* The program counter at the time this frame was created; i.e. where
167 this function was called from. It is used to identify this frame as a
168 stub frame. */
169 CORE_ADDR prev_pc;
170
07b287a0
MS
171 /* The stack pointer at the time this frame was created; i.e. the
172 caller's stack pointer when this function was called. It is used
173 to identify this frame. */
174 CORE_ADDR prev_sp;
175
7dfa3edc
PL
176 /* Is the target available to read from? */
177 int available_p;
178
07b287a0
MS
179 /* The frame base for this frame is just prev_sp - frame size.
180 FRAMESIZE is the distance from the frame pointer to the
181 initial stack pointer. */
182 int framesize;
183
184 /* The register used to hold the frame pointer for this frame. */
185 int framereg;
186
187 /* Saved register offsets. */
188 struct trad_frame_saved_reg *saved_regs;
189};
190
07b287a0
MS
191static void
192show_aarch64_debug (struct ui_file *file, int from_tty,
193 struct cmd_list_element *c, const char *value)
194{
195 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
196}
197
07b287a0
MS
198/* Analyze a prologue, looking for a recognizable stack frame
199 and frame pointer. Scan until we encounter a store that could
200 clobber the stack frame unexpectedly, or an unknown instruction. */
201
202static CORE_ADDR
203aarch64_analyze_prologue (struct gdbarch *gdbarch,
204 CORE_ADDR start, CORE_ADDR limit,
205 struct aarch64_prologue_cache *cache)
206{
207 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
208 int i;
187f5d00
YQ
209 /* Track X registers and D registers in prologue. */
210 pv_t regs[AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT];
07b287a0
MS
211 struct pv_area *stack;
212 struct cleanup *back_to;
213
187f5d00 214 for (i = 0; i < AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT; i++)
07b287a0
MS
215 regs[i] = pv_register (i, 0);
216 stack = make_pv_area (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
217 back_to = make_cleanup_free_pv_area (stack);
218
219 for (; start < limit; start += 4)
220 {
221 uint32_t insn;
d9ebcbce 222 aarch64_inst inst;
07b287a0
MS
223
224 insn = read_memory_unsigned_integer (start, 4, byte_order_for_code);
225
d9ebcbce
YQ
226 if (aarch64_decode_insn (insn, &inst, 1) != 0)
227 break;
228
229 if (inst.opcode->iclass == addsub_imm
230 && (inst.opcode->op == OP_ADD
231 || strcmp ("sub", inst.opcode->name) == 0))
07b287a0 232 {
d9ebcbce
YQ
233 unsigned rd = inst.operands[0].reg.regno;
234 unsigned rn = inst.operands[1].reg.regno;
235
236 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
237 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
238 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
239 gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM);
240
241 if (inst.opcode->op == OP_ADD)
242 {
243 regs[rd] = pv_add_constant (regs[rn],
244 inst.operands[2].imm.value);
245 }
246 else
247 {
248 regs[rd] = pv_add_constant (regs[rn],
249 -inst.operands[2].imm.value);
250 }
251 }
252 else if (inst.opcode->iclass == pcreladdr
253 && inst.operands[1].type == AARCH64_OPND_ADDR_ADRP)
254 {
255 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
256 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
257
258 regs[inst.operands[0].reg.regno] = pv_unknown ();
07b287a0 259 }
d9ebcbce 260 else if (inst.opcode->iclass == branch_imm)
07b287a0
MS
261 {
262 /* Stop analysis on branch. */
263 break;
264 }
d9ebcbce 265 else if (inst.opcode->iclass == condbranch)
07b287a0
MS
266 {
267 /* Stop analysis on branch. */
268 break;
269 }
d9ebcbce 270 else if (inst.opcode->iclass == branch_reg)
07b287a0
MS
271 {
272 /* Stop analysis on branch. */
273 break;
274 }
d9ebcbce 275 else if (inst.opcode->iclass == compbranch)
07b287a0
MS
276 {
277 /* Stop analysis on branch. */
278 break;
279 }
d9ebcbce
YQ
280 else if (inst.opcode->op == OP_MOVZ)
281 {
282 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
283 regs[inst.operands[0].reg.regno] = pv_unknown ();
284 }
285 else if (inst.opcode->iclass == log_shift
286 && strcmp (inst.opcode->name, "orr") == 0)
07b287a0 287 {
d9ebcbce
YQ
288 unsigned rd = inst.operands[0].reg.regno;
289 unsigned rn = inst.operands[1].reg.regno;
290 unsigned rm = inst.operands[2].reg.regno;
291
292 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
293 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn);
294 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT);
295
296 if (inst.operands[2].shifter.amount == 0
297 && rn == AARCH64_SP_REGNUM)
07b287a0
MS
298 regs[rd] = regs[rm];
299 else
300 {
301 if (aarch64_debug)
b277c936
PL
302 {
303 debug_printf ("aarch64: prologue analysis gave up "
0a0da556 304 "addr=%s opcode=0x%x (orr x register)\n",
b277c936
PL
305 core_addr_to_string_nz (start), insn);
306 }
07b287a0
MS
307 break;
308 }
309 }
d9ebcbce 310 else if (inst.opcode->op == OP_STUR)
07b287a0 311 {
d9ebcbce
YQ
312 unsigned rt = inst.operands[0].reg.regno;
313 unsigned rn = inst.operands[1].addr.base_regno;
314 int is64
315 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
316
317 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
318 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
319 gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9);
320 gdb_assert (!inst.operands[1].addr.offset.is_reg);
321
322 pv_area_store (stack, pv_add_constant (regs[rn],
323 inst.operands[1].addr.offset.imm),
07b287a0
MS
324 is64 ? 8 : 4, regs[rt]);
325 }
d9ebcbce 326 else if ((inst.opcode->iclass == ldstpair_off
03bcd739
YQ
327 || (inst.opcode->iclass == ldstpair_indexed
328 && inst.operands[2].addr.preind))
d9ebcbce 329 && strcmp ("stp", inst.opcode->name) == 0)
07b287a0 330 {
03bcd739 331 /* STP with addressing mode Pre-indexed and Base register. */
187f5d00
YQ
332 unsigned rt1;
333 unsigned rt2;
d9ebcbce
YQ
334 unsigned rn = inst.operands[2].addr.base_regno;
335 int32_t imm = inst.operands[2].addr.offset.imm;
336
187f5d00
YQ
337 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
338 || inst.operands[0].type == AARCH64_OPND_Ft);
339 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2
340 || inst.operands[1].type == AARCH64_OPND_Ft2);
d9ebcbce
YQ
341 gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7);
342 gdb_assert (!inst.operands[2].addr.offset.is_reg);
343
07b287a0
MS
344 /* If recording this store would invalidate the store area
345 (perhaps because rn is not known) then we should abandon
346 further prologue analysis. */
347 if (pv_area_store_would_trash (stack,
348 pv_add_constant (regs[rn], imm)))
349 break;
350
351 if (pv_area_store_would_trash (stack,
352 pv_add_constant (regs[rn], imm + 8)))
353 break;
354
187f5d00
YQ
355 rt1 = inst.operands[0].reg.regno;
356 rt2 = inst.operands[1].reg.regno;
357 if (inst.operands[0].type == AARCH64_OPND_Ft)
358 {
359 /* Only bottom 64-bit of each V register (D register) need
360 to be preserved. */
361 gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D);
362 rt1 += AARCH64_X_REGISTER_COUNT;
363 rt2 += AARCH64_X_REGISTER_COUNT;
364 }
365
07b287a0
MS
366 pv_area_store (stack, pv_add_constant (regs[rn], imm), 8,
367 regs[rt1]);
368 pv_area_store (stack, pv_add_constant (regs[rn], imm + 8), 8,
369 regs[rt2]);
14ac654f 370
d9ebcbce 371 if (inst.operands[2].addr.writeback)
93d96012 372 regs[rn] = pv_add_constant (regs[rn], imm);
07b287a0 373
07b287a0 374 }
d9ebcbce 375 else if (inst.opcode->iclass == testbranch)
07b287a0
MS
376 {
377 /* Stop analysis on branch. */
378 break;
379 }
380 else
381 {
382 if (aarch64_debug)
b277c936 383 {
0a0da556 384 debug_printf ("aarch64: prologue analysis gave up addr=%s"
b277c936
PL
385 " opcode=0x%x\n",
386 core_addr_to_string_nz (start), insn);
387 }
07b287a0
MS
388 break;
389 }
390 }
391
392 if (cache == NULL)
393 {
394 do_cleanups (back_to);
395 return start;
396 }
397
398 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
399 {
400 /* Frame pointer is fp. Frame size is constant. */
401 cache->framereg = AARCH64_FP_REGNUM;
402 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
403 }
404 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
405 {
406 /* Try the stack pointer. */
407 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
408 cache->framereg = AARCH64_SP_REGNUM;
409 }
410 else
411 {
412 /* We're just out of luck. We don't know where the frame is. */
413 cache->framereg = -1;
414 cache->framesize = 0;
415 }
416
417 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
418 {
419 CORE_ADDR offset;
420
421 if (pv_area_find_reg (stack, gdbarch, i, &offset))
422 cache->saved_regs[i].addr = offset;
423 }
424
187f5d00
YQ
425 for (i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
426 {
427 int regnum = gdbarch_num_regs (gdbarch);
428 CORE_ADDR offset;
429
430 if (pv_area_find_reg (stack, gdbarch, i + AARCH64_X_REGISTER_COUNT,
431 &offset))
432 cache->saved_regs[i + regnum + AARCH64_D0_REGNUM].addr = offset;
433 }
434
07b287a0
MS
435 do_cleanups (back_to);
436 return start;
437}
438
439/* Implement the "skip_prologue" gdbarch method. */
440
441static CORE_ADDR
442aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
443{
07b287a0 444 CORE_ADDR func_addr, limit_pc;
07b287a0
MS
445
446 /* See if we can determine the end of the prologue via the symbol
447 table. If so, then return either PC, or the PC after the
448 prologue, whichever is greater. */
449 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
450 {
451 CORE_ADDR post_prologue_pc
452 = skip_prologue_using_sal (gdbarch, func_addr);
453
454 if (post_prologue_pc != 0)
325fac50 455 return std::max (pc, post_prologue_pc);
07b287a0
MS
456 }
457
458 /* Can't determine prologue from the symbol table, need to examine
459 instructions. */
460
461 /* Find an upper limit on the function prologue using the debug
462 information. If the debug information could not be used to
463 provide that bound, then use an arbitrary large number as the
464 upper bound. */
465 limit_pc = skip_prologue_using_sal (gdbarch, pc);
466 if (limit_pc == 0)
467 limit_pc = pc + 128; /* Magic. */
468
469 /* Try disassembling prologue. */
470 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
471}
472
473/* Scan the function prologue for THIS_FRAME and populate the prologue
474 cache CACHE. */
475
476static void
477aarch64_scan_prologue (struct frame_info *this_frame,
478 struct aarch64_prologue_cache *cache)
479{
480 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
481 CORE_ADDR prologue_start;
482 CORE_ADDR prologue_end;
483 CORE_ADDR prev_pc = get_frame_pc (this_frame);
484 struct gdbarch *gdbarch = get_frame_arch (this_frame);
485
db634143
PL
486 cache->prev_pc = prev_pc;
487
07b287a0
MS
488 /* Assume we do not find a frame. */
489 cache->framereg = -1;
490 cache->framesize = 0;
491
492 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
493 &prologue_end))
494 {
495 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
496
497 if (sal.line == 0)
498 {
499 /* No line info so use the current PC. */
500 prologue_end = prev_pc;
501 }
502 else if (sal.end < prologue_end)
503 {
504 /* The next line begins after the function end. */
505 prologue_end = sal.end;
506 }
507
325fac50 508 prologue_end = std::min (prologue_end, prev_pc);
07b287a0
MS
509 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
510 }
511 else
512 {
513 CORE_ADDR frame_loc;
07b287a0
MS
514
515 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
516 if (frame_loc == 0)
517 return;
518
519 cache->framereg = AARCH64_FP_REGNUM;
520 cache->framesize = 16;
521 cache->saved_regs[29].addr = 0;
522 cache->saved_regs[30].addr = 8;
523 }
524}
525
7dfa3edc
PL
526/* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
527 function may throw an exception if the inferior's registers or memory is
528 not available. */
07b287a0 529
7dfa3edc
PL
530static void
531aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
532 struct aarch64_prologue_cache *cache)
07b287a0 533{
07b287a0
MS
534 CORE_ADDR unwound_fp;
535 int reg;
536
07b287a0
MS
537 aarch64_scan_prologue (this_frame, cache);
538
539 if (cache->framereg == -1)
7dfa3edc 540 return;
07b287a0
MS
541
542 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
543 if (unwound_fp == 0)
7dfa3edc 544 return;
07b287a0
MS
545
546 cache->prev_sp = unwound_fp + cache->framesize;
547
548 /* Calculate actual addresses of saved registers using offsets
549 determined by aarch64_analyze_prologue. */
550 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
551 if (trad_frame_addr_p (cache->saved_regs, reg))
552 cache->saved_regs[reg].addr += cache->prev_sp;
553
db634143
PL
554 cache->func = get_frame_func (this_frame);
555
7dfa3edc
PL
556 cache->available_p = 1;
557}
558
559/* Allocate and fill in *THIS_CACHE with information about the prologue of
560 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
561 Return a pointer to the current aarch64_prologue_cache in
562 *THIS_CACHE. */
563
564static struct aarch64_prologue_cache *
565aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
566{
567 struct aarch64_prologue_cache *cache;
568
569 if (*this_cache != NULL)
9a3c8263 570 return (struct aarch64_prologue_cache *) *this_cache;
7dfa3edc
PL
571
572 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
573 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
574 *this_cache = cache;
575
576 TRY
577 {
578 aarch64_make_prologue_cache_1 (this_frame, cache);
579 }
580 CATCH (ex, RETURN_MASK_ERROR)
581 {
582 if (ex.error != NOT_AVAILABLE_ERROR)
583 throw_exception (ex);
584 }
585 END_CATCH
586
07b287a0
MS
587 return cache;
588}
589
7dfa3edc
PL
590/* Implement the "stop_reason" frame_unwind method. */
591
592static enum unwind_stop_reason
593aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
594 void **this_cache)
595{
596 struct aarch64_prologue_cache *cache
597 = aarch64_make_prologue_cache (this_frame, this_cache);
598
599 if (!cache->available_p)
600 return UNWIND_UNAVAILABLE;
601
602 /* Halt the backtrace at "_start". */
603 if (cache->prev_pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
604 return UNWIND_OUTERMOST;
605
606 /* We've hit a wall, stop. */
607 if (cache->prev_sp == 0)
608 return UNWIND_OUTERMOST;
609
610 return UNWIND_NO_REASON;
611}
612
07b287a0
MS
613/* Our frame ID for a normal frame is the current function's starting
614 PC and the caller's SP when we were called. */
615
616static void
617aarch64_prologue_this_id (struct frame_info *this_frame,
618 void **this_cache, struct frame_id *this_id)
619{
7c8edfae
PL
620 struct aarch64_prologue_cache *cache
621 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0 622
7dfa3edc
PL
623 if (!cache->available_p)
624 *this_id = frame_id_build_unavailable_stack (cache->func);
625 else
626 *this_id = frame_id_build (cache->prev_sp, cache->func);
07b287a0
MS
627}
628
629/* Implement the "prev_register" frame_unwind method. */
630
631static struct value *
632aarch64_prologue_prev_register (struct frame_info *this_frame,
633 void **this_cache, int prev_regnum)
634{
7c8edfae
PL
635 struct aarch64_prologue_cache *cache
636 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0
MS
637
638 /* If we are asked to unwind the PC, then we need to return the LR
639 instead. The prologue may save PC, but it will point into this
640 frame's prologue, not the next frame's resume location. */
641 if (prev_regnum == AARCH64_PC_REGNUM)
642 {
643 CORE_ADDR lr;
644
645 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
646 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
647 }
648
649 /* SP is generally not saved to the stack, but this frame is
650 identified by the next frame's stack pointer at the time of the
651 call. The value was already reconstructed into PREV_SP. */
652 /*
653 +----------+ ^
654 | saved lr | |
655 +->| saved fp |--+
656 | | |
657 | | | <- Previous SP
658 | +----------+
659 | | saved lr |
660 +--| saved fp |<- FP
661 | |
662 | |<- SP
663 +----------+ */
664 if (prev_regnum == AARCH64_SP_REGNUM)
665 return frame_unwind_got_constant (this_frame, prev_regnum,
666 cache->prev_sp);
667
668 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
669 prev_regnum);
670}
671
672/* AArch64 prologue unwinder. */
673struct frame_unwind aarch64_prologue_unwind =
674{
675 NORMAL_FRAME,
7dfa3edc 676 aarch64_prologue_frame_unwind_stop_reason,
07b287a0
MS
677 aarch64_prologue_this_id,
678 aarch64_prologue_prev_register,
679 NULL,
680 default_frame_sniffer
681};
682
8b61f75d
PL
683/* Allocate and fill in *THIS_CACHE with information about the prologue of
684 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
685 Return a pointer to the current aarch64_prologue_cache in
686 *THIS_CACHE. */
07b287a0
MS
687
688static struct aarch64_prologue_cache *
8b61f75d 689aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
07b287a0 690{
07b287a0 691 struct aarch64_prologue_cache *cache;
8b61f75d
PL
692
693 if (*this_cache != NULL)
9a3c8263 694 return (struct aarch64_prologue_cache *) *this_cache;
07b287a0
MS
695
696 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
697 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
8b61f75d 698 *this_cache = cache;
07b287a0 699
02a2a705
PL
700 TRY
701 {
702 cache->prev_sp = get_frame_register_unsigned (this_frame,
703 AARCH64_SP_REGNUM);
704 cache->prev_pc = get_frame_pc (this_frame);
705 cache->available_p = 1;
706 }
707 CATCH (ex, RETURN_MASK_ERROR)
708 {
709 if (ex.error != NOT_AVAILABLE_ERROR)
710 throw_exception (ex);
711 }
712 END_CATCH
07b287a0
MS
713
714 return cache;
715}
716
02a2a705
PL
717/* Implement the "stop_reason" frame_unwind method. */
718
719static enum unwind_stop_reason
720aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
721 void **this_cache)
722{
723 struct aarch64_prologue_cache *cache
724 = aarch64_make_stub_cache (this_frame, this_cache);
725
726 if (!cache->available_p)
727 return UNWIND_UNAVAILABLE;
728
729 return UNWIND_NO_REASON;
730}
731
07b287a0
MS
732/* Our frame ID for a stub frame is the current SP and LR. */
733
734static void
735aarch64_stub_this_id (struct frame_info *this_frame,
736 void **this_cache, struct frame_id *this_id)
737{
8b61f75d
PL
738 struct aarch64_prologue_cache *cache
739 = aarch64_make_stub_cache (this_frame, this_cache);
07b287a0 740
02a2a705
PL
741 if (cache->available_p)
742 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
743 else
744 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
07b287a0
MS
745}
746
747/* Implement the "sniffer" frame_unwind method. */
748
749static int
750aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
751 struct frame_info *this_frame,
752 void **this_prologue_cache)
753{
754 CORE_ADDR addr_in_block;
755 gdb_byte dummy[4];
756
757 addr_in_block = get_frame_address_in_block (this_frame);
3e5d3a5a 758 if (in_plt_section (addr_in_block)
07b287a0
MS
759 /* We also use the stub winder if the target memory is unreadable
760 to avoid having the prologue unwinder trying to read it. */
761 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
762 return 1;
763
764 return 0;
765}
766
767/* AArch64 stub unwinder. */
768struct frame_unwind aarch64_stub_unwind =
769{
770 NORMAL_FRAME,
02a2a705 771 aarch64_stub_frame_unwind_stop_reason,
07b287a0
MS
772 aarch64_stub_this_id,
773 aarch64_prologue_prev_register,
774 NULL,
775 aarch64_stub_unwind_sniffer
776};
777
778/* Return the frame base address of *THIS_FRAME. */
779
780static CORE_ADDR
781aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
782{
7c8edfae
PL
783 struct aarch64_prologue_cache *cache
784 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0
MS
785
786 return cache->prev_sp - cache->framesize;
787}
788
789/* AArch64 default frame base information. */
790struct frame_base aarch64_normal_base =
791{
792 &aarch64_prologue_unwind,
793 aarch64_normal_frame_base,
794 aarch64_normal_frame_base,
795 aarch64_normal_frame_base
796};
797
798/* Assuming THIS_FRAME is a dummy, return the frame ID of that
799 dummy frame. The frame ID's base needs to match the TOS value
800 saved by save_dummy_frame_tos () and returned from
801 aarch64_push_dummy_call, and the PC needs to match the dummy
802 frame's breakpoint. */
803
804static struct frame_id
805aarch64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
806{
807 return frame_id_build (get_frame_register_unsigned (this_frame,
808 AARCH64_SP_REGNUM),
809 get_frame_pc (this_frame));
810}
811
812/* Implement the "unwind_pc" gdbarch method. */
813
814static CORE_ADDR
815aarch64_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
816{
817 CORE_ADDR pc
818 = frame_unwind_register_unsigned (this_frame, AARCH64_PC_REGNUM);
819
820 return pc;
821}
822
823/* Implement the "unwind_sp" gdbarch method. */
824
825static CORE_ADDR
826aarch64_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
827{
828 return frame_unwind_register_unsigned (this_frame, AARCH64_SP_REGNUM);
829}
830
831/* Return the value of the REGNUM register in the previous frame of
832 *THIS_FRAME. */
833
834static struct value *
835aarch64_dwarf2_prev_register (struct frame_info *this_frame,
836 void **this_cache, int regnum)
837{
07b287a0
MS
838 CORE_ADDR lr;
839
840 switch (regnum)
841 {
842 case AARCH64_PC_REGNUM:
843 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
844 return frame_unwind_got_constant (this_frame, regnum, lr);
845
846 default:
847 internal_error (__FILE__, __LINE__,
848 _("Unexpected register %d"), regnum);
849 }
850}
851
852/* Implement the "init_reg" dwarf2_frame_ops method. */
853
854static void
855aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
856 struct dwarf2_frame_state_reg *reg,
857 struct frame_info *this_frame)
858{
859 switch (regnum)
860 {
861 case AARCH64_PC_REGNUM:
862 reg->how = DWARF2_FRAME_REG_FN;
863 reg->loc.fn = aarch64_dwarf2_prev_register;
864 break;
865 case AARCH64_SP_REGNUM:
866 reg->how = DWARF2_FRAME_REG_CFA;
867 break;
868 }
869}
870
871/* When arguments must be pushed onto the stack, they go on in reverse
872 order. The code below implements a FILO (stack) to do this. */
873
874typedef struct
875{
c3c87445
YQ
876 /* Value to pass on stack. It can be NULL if this item is for stack
877 padding. */
7c543f7b 878 const gdb_byte *data;
07b287a0
MS
879
880 /* Size in bytes of value to pass on stack. */
881 int len;
882} stack_item_t;
883
884DEF_VEC_O (stack_item_t);
885
886/* Return the alignment (in bytes) of the given type. */
887
888static int
889aarch64_type_align (struct type *t)
890{
891 int n;
892 int align;
893 int falign;
894
895 t = check_typedef (t);
896 switch (TYPE_CODE (t))
897 {
898 default:
899 /* Should never happen. */
900 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
901 return 4;
902
903 case TYPE_CODE_PTR:
904 case TYPE_CODE_ENUM:
905 case TYPE_CODE_INT:
906 case TYPE_CODE_FLT:
907 case TYPE_CODE_SET:
908 case TYPE_CODE_RANGE:
909 case TYPE_CODE_BITSTRING:
910 case TYPE_CODE_REF:
911 case TYPE_CODE_CHAR:
912 case TYPE_CODE_BOOL:
913 return TYPE_LENGTH (t);
914
915 case TYPE_CODE_ARRAY:
238f2452
YQ
916 if (TYPE_VECTOR (t))
917 {
918 /* Use the natural alignment for vector types (the same for
919 scalar type), but the maximum alignment is 128-bit. */
920 if (TYPE_LENGTH (t) > 16)
921 return 16;
922 else
923 return TYPE_LENGTH (t);
924 }
925 else
926 return aarch64_type_align (TYPE_TARGET_TYPE (t));
07b287a0
MS
927 case TYPE_CODE_COMPLEX:
928 return aarch64_type_align (TYPE_TARGET_TYPE (t));
929
930 case TYPE_CODE_STRUCT:
931 case TYPE_CODE_UNION:
932 align = 1;
933 for (n = 0; n < TYPE_NFIELDS (t); n++)
934 {
935 falign = aarch64_type_align (TYPE_FIELD_TYPE (t, n));
936 if (falign > align)
937 align = falign;
938 }
939 return align;
940 }
941}
942
cd635f74
YQ
943/* Return 1 if *TY is a homogeneous floating-point aggregate or
944 homogeneous short-vector aggregate as defined in the AAPCS64 ABI
945 document; otherwise return 0. */
07b287a0
MS
946
947static int
cd635f74 948is_hfa_or_hva (struct type *ty)
07b287a0
MS
949{
950 switch (TYPE_CODE (ty))
951 {
952 case TYPE_CODE_ARRAY:
953 {
954 struct type *target_ty = TYPE_TARGET_TYPE (ty);
238f2452
YQ
955
956 if (TYPE_VECTOR (ty))
957 return 0;
958
cd635f74
YQ
959 if (TYPE_LENGTH (ty) <= 4 /* HFA or HVA has at most 4 members. */
960 && (TYPE_CODE (target_ty) == TYPE_CODE_FLT /* HFA */
961 || (TYPE_CODE (target_ty) == TYPE_CODE_ARRAY /* HVA */
962 && TYPE_VECTOR (target_ty))))
07b287a0
MS
963 return 1;
964 break;
965 }
966
967 case TYPE_CODE_UNION:
968 case TYPE_CODE_STRUCT:
969 {
cd635f74 970 /* HFA or HVA has at most four members. */
07b287a0
MS
971 if (TYPE_NFIELDS (ty) > 0 && TYPE_NFIELDS (ty) <= 4)
972 {
973 struct type *member0_type;
974
975 member0_type = check_typedef (TYPE_FIELD_TYPE (ty, 0));
cd635f74
YQ
976 if (TYPE_CODE (member0_type) == TYPE_CODE_FLT
977 || (TYPE_CODE (member0_type) == TYPE_CODE_ARRAY
978 && TYPE_VECTOR (member0_type)))
07b287a0
MS
979 {
980 int i;
981
982 for (i = 0; i < TYPE_NFIELDS (ty); i++)
983 {
984 struct type *member1_type;
985
986 member1_type = check_typedef (TYPE_FIELD_TYPE (ty, i));
987 if (TYPE_CODE (member0_type) != TYPE_CODE (member1_type)
988 || (TYPE_LENGTH (member0_type)
989 != TYPE_LENGTH (member1_type)))
990 return 0;
991 }
992 return 1;
993 }
994 }
995 return 0;
996 }
997
998 default:
999 break;
1000 }
1001
1002 return 0;
1003}
1004
1005/* AArch64 function call information structure. */
1006struct aarch64_call_info
1007{
1008 /* the current argument number. */
1009 unsigned argnum;
1010
1011 /* The next general purpose register number, equivalent to NGRN as
1012 described in the AArch64 Procedure Call Standard. */
1013 unsigned ngrn;
1014
1015 /* The next SIMD and floating point register number, equivalent to
1016 NSRN as described in the AArch64 Procedure Call Standard. */
1017 unsigned nsrn;
1018
1019 /* The next stacked argument address, equivalent to NSAA as
1020 described in the AArch64 Procedure Call Standard. */
1021 unsigned nsaa;
1022
1023 /* Stack item vector. */
1024 VEC(stack_item_t) *si;
1025};
1026
1027/* Pass a value in a sequence of consecutive X registers. The caller
1028 is responsbile for ensuring sufficient registers are available. */
1029
1030static void
1031pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1032 struct aarch64_call_info *info, struct type *type,
8e80f9d1 1033 struct value *arg)
07b287a0
MS
1034{
1035 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1036 int len = TYPE_LENGTH (type);
1037 enum type_code typecode = TYPE_CODE (type);
1038 int regnum = AARCH64_X0_REGNUM + info->ngrn;
8e80f9d1 1039 const bfd_byte *buf = value_contents (arg);
07b287a0
MS
1040
1041 info->argnum++;
1042
1043 while (len > 0)
1044 {
1045 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1046 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1047 byte_order);
1048
1049
1050 /* Adjust sub-word struct/union args when big-endian. */
1051 if (byte_order == BFD_ENDIAN_BIG
1052 && partial_len < X_REGISTER_SIZE
1053 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1054 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1055
1056 if (aarch64_debug)
b277c936
PL
1057 {
1058 debug_printf ("arg %d in %s = 0x%s\n", info->argnum,
1059 gdbarch_register_name (gdbarch, regnum),
1060 phex (regval, X_REGISTER_SIZE));
1061 }
07b287a0
MS
1062 regcache_cooked_write_unsigned (regcache, regnum, regval);
1063 len -= partial_len;
1064 buf += partial_len;
1065 regnum++;
1066 }
1067}
1068
1069/* Attempt to marshall a value in a V register. Return 1 if
1070 successful, or 0 if insufficient registers are available. This
1071 function, unlike the equivalent pass_in_x() function does not
1072 handle arguments spread across multiple registers. */
1073
1074static int
1075pass_in_v (struct gdbarch *gdbarch,
1076 struct regcache *regcache,
1077 struct aarch64_call_info *info,
0735fddd 1078 int len, const bfd_byte *buf)
07b287a0
MS
1079{
1080 if (info->nsrn < 8)
1081 {
07b287a0 1082 int regnum = AARCH64_V0_REGNUM + info->nsrn;
0735fddd 1083 gdb_byte reg[V_REGISTER_SIZE];
07b287a0
MS
1084
1085 info->argnum++;
1086 info->nsrn++;
1087
0735fddd
YQ
1088 memset (reg, 0, sizeof (reg));
1089 /* PCS C.1, the argument is allocated to the least significant
1090 bits of V register. */
1091 memcpy (reg, buf, len);
1092 regcache_cooked_write (regcache, regnum, reg);
1093
07b287a0 1094 if (aarch64_debug)
b277c936
PL
1095 {
1096 debug_printf ("arg %d in %s\n", info->argnum,
1097 gdbarch_register_name (gdbarch, regnum));
1098 }
07b287a0
MS
1099 return 1;
1100 }
1101 info->nsrn = 8;
1102 return 0;
1103}
1104
1105/* Marshall an argument onto the stack. */
1106
1107static void
1108pass_on_stack (struct aarch64_call_info *info, struct type *type,
8e80f9d1 1109 struct value *arg)
07b287a0 1110{
8e80f9d1 1111 const bfd_byte *buf = value_contents (arg);
07b287a0
MS
1112 int len = TYPE_LENGTH (type);
1113 int align;
1114 stack_item_t item;
1115
1116 info->argnum++;
1117
1118 align = aarch64_type_align (type);
1119
1120 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1121 Natural alignment of the argument's type. */
1122 align = align_up (align, 8);
1123
1124 /* The AArch64 PCS requires at most doubleword alignment. */
1125 if (align > 16)
1126 align = 16;
1127
1128 if (aarch64_debug)
b277c936
PL
1129 {
1130 debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1131 info->nsaa);
1132 }
07b287a0
MS
1133
1134 item.len = len;
1135 item.data = buf;
1136 VEC_safe_push (stack_item_t, info->si, &item);
1137
1138 info->nsaa += len;
1139 if (info->nsaa & (align - 1))
1140 {
1141 /* Push stack alignment padding. */
1142 int pad = align - (info->nsaa & (align - 1));
1143
1144 item.len = pad;
c3c87445 1145 item.data = NULL;
07b287a0
MS
1146
1147 VEC_safe_push (stack_item_t, info->si, &item);
1148 info->nsaa += pad;
1149 }
1150}
1151
1152/* Marshall an argument into a sequence of one or more consecutive X
1153 registers or, if insufficient X registers are available then onto
1154 the stack. */
1155
1156static void
1157pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1158 struct aarch64_call_info *info, struct type *type,
8e80f9d1 1159 struct value *arg)
07b287a0
MS
1160{
1161 int len = TYPE_LENGTH (type);
1162 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1163
1164 /* PCS C.13 - Pass in registers if we have enough spare */
1165 if (info->ngrn + nregs <= 8)
1166 {
8e80f9d1 1167 pass_in_x (gdbarch, regcache, info, type, arg);
07b287a0
MS
1168 info->ngrn += nregs;
1169 }
1170 else
1171 {
1172 info->ngrn = 8;
8e80f9d1 1173 pass_on_stack (info, type, arg);
07b287a0
MS
1174 }
1175}
1176
1177/* Pass a value in a V register, or on the stack if insufficient are
1178 available. */
1179
1180static void
1181pass_in_v_or_stack (struct gdbarch *gdbarch,
1182 struct regcache *regcache,
1183 struct aarch64_call_info *info,
1184 struct type *type,
8e80f9d1 1185 struct value *arg)
07b287a0 1186{
0735fddd
YQ
1187 if (!pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (type),
1188 value_contents (arg)))
8e80f9d1 1189 pass_on_stack (info, type, arg);
07b287a0
MS
1190}
1191
1192/* Implement the "push_dummy_call" gdbarch method. */
1193
1194static CORE_ADDR
1195aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1196 struct regcache *regcache, CORE_ADDR bp_addr,
1197 int nargs,
1198 struct value **args, CORE_ADDR sp, int struct_return,
1199 CORE_ADDR struct_addr)
1200{
07b287a0 1201 int argnum;
07b287a0
MS
1202 struct aarch64_call_info info;
1203 struct type *func_type;
1204 struct type *return_type;
1205 int lang_struct_return;
1206
1207 memset (&info, 0, sizeof (info));
1208
1209 /* We need to know what the type of the called function is in order
1210 to determine the number of named/anonymous arguments for the
1211 actual argument placement, and the return type in order to handle
1212 return value correctly.
1213
1214 The generic code above us views the decision of return in memory
1215 or return in registers as a two stage processes. The language
1216 handler is consulted first and may decide to return in memory (eg
1217 class with copy constructor returned by value), this will cause
1218 the generic code to allocate space AND insert an initial leading
1219 argument.
1220
1221 If the language code does not decide to pass in memory then the
1222 target code is consulted.
1223
1224 If the language code decides to pass in memory we want to move
1225 the pointer inserted as the initial argument from the argument
1226 list and into X8, the conventional AArch64 struct return pointer
1227 register.
1228
1229 This is slightly awkward, ideally the flag "lang_struct_return"
1230 would be passed to the targets implementation of push_dummy_call.
1231 Rather that change the target interface we call the language code
1232 directly ourselves. */
1233
1234 func_type = check_typedef (value_type (function));
1235
1236 /* Dereference function pointer types. */
1237 if (TYPE_CODE (func_type) == TYPE_CODE_PTR)
1238 func_type = TYPE_TARGET_TYPE (func_type);
1239
1240 gdb_assert (TYPE_CODE (func_type) == TYPE_CODE_FUNC
1241 || TYPE_CODE (func_type) == TYPE_CODE_METHOD);
1242
1243 /* If language_pass_by_reference () returned true we will have been
1244 given an additional initial argument, a hidden pointer to the
1245 return slot in memory. */
1246 return_type = TYPE_TARGET_TYPE (func_type);
1247 lang_struct_return = language_pass_by_reference (return_type);
1248
1249 /* Set the return address. For the AArch64, the return breakpoint
1250 is always at BP_ADDR. */
1251 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1252
1253 /* If we were given an initial argument for the return slot because
1254 lang_struct_return was true, lose it. */
1255 if (lang_struct_return)
1256 {
1257 args++;
1258 nargs--;
1259 }
1260
1261 /* The struct_return pointer occupies X8. */
1262 if (struct_return || lang_struct_return)
1263 {
1264 if (aarch64_debug)
b277c936
PL
1265 {
1266 debug_printf ("struct return in %s = 0x%s\n",
1267 gdbarch_register_name (gdbarch,
1268 AARCH64_STRUCT_RETURN_REGNUM),
1269 paddress (gdbarch, struct_addr));
1270 }
07b287a0
MS
1271 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1272 struct_addr);
1273 }
1274
1275 for (argnum = 0; argnum < nargs; argnum++)
1276 {
1277 struct value *arg = args[argnum];
1278 struct type *arg_type;
1279 int len;
1280
1281 arg_type = check_typedef (value_type (arg));
1282 len = TYPE_LENGTH (arg_type);
1283
1284 switch (TYPE_CODE (arg_type))
1285 {
1286 case TYPE_CODE_INT:
1287 case TYPE_CODE_BOOL:
1288 case TYPE_CODE_CHAR:
1289 case TYPE_CODE_RANGE:
1290 case TYPE_CODE_ENUM:
1291 if (len < 4)
1292 {
1293 /* Promote to 32 bit integer. */
1294 if (TYPE_UNSIGNED (arg_type))
1295 arg_type = builtin_type (gdbarch)->builtin_uint32;
1296 else
1297 arg_type = builtin_type (gdbarch)->builtin_int32;
1298 arg = value_cast (arg_type, arg);
1299 }
8e80f9d1 1300 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1301 break;
1302
1303 case TYPE_CODE_COMPLEX:
1304 if (info.nsrn <= 6)
1305 {
1306 const bfd_byte *buf = value_contents (arg);
1307 struct type *target_type =
1308 check_typedef (TYPE_TARGET_TYPE (arg_type));
1309
07b287a0 1310 pass_in_v (gdbarch, regcache, &info,
0735fddd
YQ
1311 TYPE_LENGTH (target_type), buf);
1312 pass_in_v (gdbarch, regcache, &info,
1313 TYPE_LENGTH (target_type),
07b287a0
MS
1314 buf + TYPE_LENGTH (target_type));
1315 }
1316 else
1317 {
1318 info.nsrn = 8;
8e80f9d1 1319 pass_on_stack (&info, arg_type, arg);
07b287a0
MS
1320 }
1321 break;
1322 case TYPE_CODE_FLT:
8e80f9d1 1323 pass_in_v_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1324 break;
1325
1326 case TYPE_CODE_STRUCT:
1327 case TYPE_CODE_ARRAY:
1328 case TYPE_CODE_UNION:
cd635f74 1329 if (is_hfa_or_hva (arg_type))
07b287a0
MS
1330 {
1331 int elements = TYPE_NFIELDS (arg_type);
1332
1333 /* Homogeneous Aggregates */
1334 if (info.nsrn + elements < 8)
1335 {
1336 int i;
1337
1338 for (i = 0; i < elements; i++)
1339 {
1340 /* We know that we have sufficient registers
1341 available therefore this will never fallback
1342 to the stack. */
1343 struct value *field =
1344 value_primitive_field (arg, 0, i, arg_type);
1345 struct type *field_type =
1346 check_typedef (value_type (field));
1347
8e80f9d1
YQ
1348 pass_in_v_or_stack (gdbarch, regcache, &info,
1349 field_type, field);
07b287a0
MS
1350 }
1351 }
1352 else
1353 {
1354 info.nsrn = 8;
8e80f9d1 1355 pass_on_stack (&info, arg_type, arg);
07b287a0
MS
1356 }
1357 }
238f2452
YQ
1358 else if (TYPE_CODE (arg_type) == TYPE_CODE_ARRAY
1359 && TYPE_VECTOR (arg_type) && (len == 16 || len == 8))
1360 {
1361 /* Short vector types are passed in V registers. */
1362 pass_in_v_or_stack (gdbarch, regcache, &info, arg_type, arg);
1363 }
07b287a0
MS
1364 else if (len > 16)
1365 {
1366 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1367 invisible reference. */
1368
1369 /* Allocate aligned storage. */
1370 sp = align_down (sp - len, 16);
1371
1372 /* Write the real data into the stack. */
1373 write_memory (sp, value_contents (arg), len);
1374
1375 /* Construct the indirection. */
1376 arg_type = lookup_pointer_type (arg_type);
1377 arg = value_from_pointer (arg_type, sp);
8e80f9d1 1378 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1379 }
1380 else
1381 /* PCS C.15 / C.18 multiple values pass. */
8e80f9d1 1382 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1383 break;
1384
1385 default:
8e80f9d1 1386 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1387 break;
1388 }
1389 }
1390
1391 /* Make sure stack retains 16 byte alignment. */
1392 if (info.nsaa & 15)
1393 sp -= 16 - (info.nsaa & 15);
1394
1395 while (!VEC_empty (stack_item_t, info.si))
1396 {
1397 stack_item_t *si = VEC_last (stack_item_t, info.si);
1398
1399 sp -= si->len;
c3c87445
YQ
1400 if (si->data != NULL)
1401 write_memory (sp, si->data, si->len);
07b287a0
MS
1402 VEC_pop (stack_item_t, info.si);
1403 }
1404
1405 VEC_free (stack_item_t, info.si);
1406
1407 /* Finally, update the SP register. */
1408 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1409
1410 return sp;
1411}
1412
1413/* Implement the "frame_align" gdbarch method. */
1414
1415static CORE_ADDR
1416aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1417{
1418 /* Align the stack to sixteen bytes. */
1419 return sp & ~(CORE_ADDR) 15;
1420}
1421
1422/* Return the type for an AdvSISD Q register. */
1423
1424static struct type *
1425aarch64_vnq_type (struct gdbarch *gdbarch)
1426{
1427 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1428
1429 if (tdep->vnq_type == NULL)
1430 {
1431 struct type *t;
1432 struct type *elem;
1433
1434 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1435 TYPE_CODE_UNION);
1436
1437 elem = builtin_type (gdbarch)->builtin_uint128;
1438 append_composite_type_field (t, "u", elem);
1439
1440 elem = builtin_type (gdbarch)->builtin_int128;
1441 append_composite_type_field (t, "s", elem);
1442
1443 tdep->vnq_type = t;
1444 }
1445
1446 return tdep->vnq_type;
1447}
1448
1449/* Return the type for an AdvSISD D register. */
1450
1451static struct type *
1452aarch64_vnd_type (struct gdbarch *gdbarch)
1453{
1454 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1455
1456 if (tdep->vnd_type == NULL)
1457 {
1458 struct type *t;
1459 struct type *elem;
1460
1461 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1462 TYPE_CODE_UNION);
1463
1464 elem = builtin_type (gdbarch)->builtin_double;
1465 append_composite_type_field (t, "f", elem);
1466
1467 elem = builtin_type (gdbarch)->builtin_uint64;
1468 append_composite_type_field (t, "u", elem);
1469
1470 elem = builtin_type (gdbarch)->builtin_int64;
1471 append_composite_type_field (t, "s", elem);
1472
1473 tdep->vnd_type = t;
1474 }
1475
1476 return tdep->vnd_type;
1477}
1478
1479/* Return the type for an AdvSISD S register. */
1480
1481static struct type *
1482aarch64_vns_type (struct gdbarch *gdbarch)
1483{
1484 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1485
1486 if (tdep->vns_type == NULL)
1487 {
1488 struct type *t;
1489 struct type *elem;
1490
1491 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1492 TYPE_CODE_UNION);
1493
1494 elem = builtin_type (gdbarch)->builtin_float;
1495 append_composite_type_field (t, "f", elem);
1496
1497 elem = builtin_type (gdbarch)->builtin_uint32;
1498 append_composite_type_field (t, "u", elem);
1499
1500 elem = builtin_type (gdbarch)->builtin_int32;
1501 append_composite_type_field (t, "s", elem);
1502
1503 tdep->vns_type = t;
1504 }
1505
1506 return tdep->vns_type;
1507}
1508
1509/* Return the type for an AdvSISD H register. */
1510
1511static struct type *
1512aarch64_vnh_type (struct gdbarch *gdbarch)
1513{
1514 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1515
1516 if (tdep->vnh_type == NULL)
1517 {
1518 struct type *t;
1519 struct type *elem;
1520
1521 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1522 TYPE_CODE_UNION);
1523
1524 elem = builtin_type (gdbarch)->builtin_uint16;
1525 append_composite_type_field (t, "u", elem);
1526
1527 elem = builtin_type (gdbarch)->builtin_int16;
1528 append_composite_type_field (t, "s", elem);
1529
1530 tdep->vnh_type = t;
1531 }
1532
1533 return tdep->vnh_type;
1534}
1535
1536/* Return the type for an AdvSISD B register. */
1537
1538static struct type *
1539aarch64_vnb_type (struct gdbarch *gdbarch)
1540{
1541 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1542
1543 if (tdep->vnb_type == NULL)
1544 {
1545 struct type *t;
1546 struct type *elem;
1547
1548 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1549 TYPE_CODE_UNION);
1550
1551 elem = builtin_type (gdbarch)->builtin_uint8;
1552 append_composite_type_field (t, "u", elem);
1553
1554 elem = builtin_type (gdbarch)->builtin_int8;
1555 append_composite_type_field (t, "s", elem);
1556
1557 tdep->vnb_type = t;
1558 }
1559
1560 return tdep->vnb_type;
1561}
1562
1563/* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
1564
1565static int
1566aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
1567{
1568 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
1569 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
1570
1571 if (reg == AARCH64_DWARF_SP)
1572 return AARCH64_SP_REGNUM;
1573
1574 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
1575 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
1576
1577 return -1;
1578}
1579\f
1580
1581/* Implement the "print_insn" gdbarch method. */
1582
1583static int
1584aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
1585{
1586 info->symbols = NULL;
1587 return print_insn_aarch64 (memaddr, info);
1588}
1589
1590/* AArch64 BRK software debug mode instruction.
1591 Note that AArch64 code is always little-endian.
1592 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
04180708 1593constexpr gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
07b287a0 1594
04180708 1595typedef BP_MANIPULATION (aarch64_default_breakpoint) aarch64_breakpoint;
07b287a0
MS
1596
1597/* Extract from an array REGS containing the (raw) register state a
1598 function return value of type TYPE, and copy that, in virtual
1599 format, into VALBUF. */
1600
1601static void
1602aarch64_extract_return_value (struct type *type, struct regcache *regs,
1603 gdb_byte *valbuf)
1604{
1605 struct gdbarch *gdbarch = get_regcache_arch (regs);
1606 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1607
1608 if (TYPE_CODE (type) == TYPE_CODE_FLT)
1609 {
1610 bfd_byte buf[V_REGISTER_SIZE];
1611 int len = TYPE_LENGTH (type);
1612
1613 regcache_cooked_read (regs, AARCH64_V0_REGNUM, buf);
1614 memcpy (valbuf, buf, len);
1615 }
1616 else if (TYPE_CODE (type) == TYPE_CODE_INT
1617 || TYPE_CODE (type) == TYPE_CODE_CHAR
1618 || TYPE_CODE (type) == TYPE_CODE_BOOL
1619 || TYPE_CODE (type) == TYPE_CODE_PTR
1620 || TYPE_CODE (type) == TYPE_CODE_REF
1621 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1622 {
1623 /* If the the type is a plain integer, then the access is
1624 straight-forward. Otherwise we have to play around a bit
1625 more. */
1626 int len = TYPE_LENGTH (type);
1627 int regno = AARCH64_X0_REGNUM;
1628 ULONGEST tmp;
1629
1630 while (len > 0)
1631 {
1632 /* By using store_unsigned_integer we avoid having to do
1633 anything special for small big-endian values. */
1634 regcache_cooked_read_unsigned (regs, regno++, &tmp);
1635 store_unsigned_integer (valbuf,
1636 (len > X_REGISTER_SIZE
1637 ? X_REGISTER_SIZE : len), byte_order, tmp);
1638 len -= X_REGISTER_SIZE;
1639 valbuf += X_REGISTER_SIZE;
1640 }
1641 }
1642 else if (TYPE_CODE (type) == TYPE_CODE_COMPLEX)
1643 {
1644 int regno = AARCH64_V0_REGNUM;
1645 bfd_byte buf[V_REGISTER_SIZE];
1646 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
1647 int len = TYPE_LENGTH (target_type);
1648
1649 regcache_cooked_read (regs, regno, buf);
1650 memcpy (valbuf, buf, len);
1651 valbuf += len;
1652 regcache_cooked_read (regs, regno + 1, buf);
1653 memcpy (valbuf, buf, len);
1654 valbuf += len;
1655 }
cd635f74 1656 else if (is_hfa_or_hva (type))
07b287a0
MS
1657 {
1658 int elements = TYPE_NFIELDS (type);
1659 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
1660 int len = TYPE_LENGTH (member_type);
1661 int i;
1662
1663 for (i = 0; i < elements; i++)
1664 {
1665 int regno = AARCH64_V0_REGNUM + i;
db3516bb 1666 bfd_byte buf[V_REGISTER_SIZE];
07b287a0
MS
1667
1668 if (aarch64_debug)
b277c936 1669 {
cd635f74 1670 debug_printf ("read HFA or HVA return value element %d from %s\n",
b277c936
PL
1671 i + 1,
1672 gdbarch_register_name (gdbarch, regno));
1673 }
07b287a0
MS
1674 regcache_cooked_read (regs, regno, buf);
1675
1676 memcpy (valbuf, buf, len);
1677 valbuf += len;
1678 }
1679 }
238f2452
YQ
1680 else if (TYPE_CODE (type) == TYPE_CODE_ARRAY && TYPE_VECTOR (type)
1681 && (TYPE_LENGTH (type) == 16 || TYPE_LENGTH (type) == 8))
1682 {
1683 /* Short vector is returned in V register. */
1684 gdb_byte buf[V_REGISTER_SIZE];
1685
1686 regcache_cooked_read (regs, AARCH64_V0_REGNUM, buf);
1687 memcpy (valbuf, buf, TYPE_LENGTH (type));
1688 }
07b287a0
MS
1689 else
1690 {
1691 /* For a structure or union the behaviour is as if the value had
1692 been stored to word-aligned memory and then loaded into
1693 registers with 64-bit load instruction(s). */
1694 int len = TYPE_LENGTH (type);
1695 int regno = AARCH64_X0_REGNUM;
1696 bfd_byte buf[X_REGISTER_SIZE];
1697
1698 while (len > 0)
1699 {
1700 regcache_cooked_read (regs, regno++, buf);
1701 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
1702 len -= X_REGISTER_SIZE;
1703 valbuf += X_REGISTER_SIZE;
1704 }
1705 }
1706}
1707
1708
1709/* Will a function return an aggregate type in memory or in a
1710 register? Return 0 if an aggregate type can be returned in a
1711 register, 1 if it must be returned in memory. */
1712
1713static int
1714aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
1715{
f168693b 1716 type = check_typedef (type);
07b287a0 1717
cd635f74 1718 if (is_hfa_or_hva (type))
07b287a0 1719 {
cd635f74
YQ
1720 /* v0-v7 are used to return values and one register is allocated
1721 for one member. However, HFA or HVA has at most four members. */
07b287a0
MS
1722 return 0;
1723 }
1724
1725 if (TYPE_LENGTH (type) > 16)
1726 {
1727 /* PCS B.6 Aggregates larger than 16 bytes are passed by
1728 invisible reference. */
1729
1730 return 1;
1731 }
1732
1733 return 0;
1734}
1735
1736/* Write into appropriate registers a function return value of type
1737 TYPE, given in virtual format. */
1738
1739static void
1740aarch64_store_return_value (struct type *type, struct regcache *regs,
1741 const gdb_byte *valbuf)
1742{
1743 struct gdbarch *gdbarch = get_regcache_arch (regs);
1744 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1745
1746 if (TYPE_CODE (type) == TYPE_CODE_FLT)
1747 {
1748 bfd_byte buf[V_REGISTER_SIZE];
1749 int len = TYPE_LENGTH (type);
1750
1751 memcpy (buf, valbuf, len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
1752 regcache_cooked_write (regs, AARCH64_V0_REGNUM, buf);
1753 }
1754 else if (TYPE_CODE (type) == TYPE_CODE_INT
1755 || TYPE_CODE (type) == TYPE_CODE_CHAR
1756 || TYPE_CODE (type) == TYPE_CODE_BOOL
1757 || TYPE_CODE (type) == TYPE_CODE_PTR
1758 || TYPE_CODE (type) == TYPE_CODE_REF
1759 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1760 {
1761 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
1762 {
1763 /* Values of one word or less are zero/sign-extended and
1764 returned in r0. */
1765 bfd_byte tmpbuf[X_REGISTER_SIZE];
1766 LONGEST val = unpack_long (type, valbuf);
1767
1768 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
1769 regcache_cooked_write (regs, AARCH64_X0_REGNUM, tmpbuf);
1770 }
1771 else
1772 {
1773 /* Integral values greater than one word are stored in
1774 consecutive registers starting with r0. This will always
1775 be a multiple of the regiser size. */
1776 int len = TYPE_LENGTH (type);
1777 int regno = AARCH64_X0_REGNUM;
1778
1779 while (len > 0)
1780 {
1781 regcache_cooked_write (regs, regno++, valbuf);
1782 len -= X_REGISTER_SIZE;
1783 valbuf += X_REGISTER_SIZE;
1784 }
1785 }
1786 }
cd635f74 1787 else if (is_hfa_or_hva (type))
07b287a0
MS
1788 {
1789 int elements = TYPE_NFIELDS (type);
1790 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
1791 int len = TYPE_LENGTH (member_type);
1792 int i;
1793
1794 for (i = 0; i < elements; i++)
1795 {
1796 int regno = AARCH64_V0_REGNUM + i;
1797 bfd_byte tmpbuf[MAX_REGISTER_SIZE];
1798
1799 if (aarch64_debug)
b277c936 1800 {
cd635f74 1801 debug_printf ("write HFA or HVA return value element %d to %s\n",
b277c936
PL
1802 i + 1,
1803 gdbarch_register_name (gdbarch, regno));
1804 }
07b287a0
MS
1805
1806 memcpy (tmpbuf, valbuf, len);
1807 regcache_cooked_write (regs, regno, tmpbuf);
1808 valbuf += len;
1809 }
1810 }
238f2452
YQ
1811 else if (TYPE_CODE (type) == TYPE_CODE_ARRAY && TYPE_VECTOR (type)
1812 && (TYPE_LENGTH (type) == 8 || TYPE_LENGTH (type) == 16))
1813 {
1814 /* Short vector. */
1815 gdb_byte buf[V_REGISTER_SIZE];
1816
1817 memcpy (buf, valbuf, TYPE_LENGTH (type));
1818 regcache_cooked_write (regs, AARCH64_V0_REGNUM, buf);
1819 }
07b287a0
MS
1820 else
1821 {
1822 /* For a structure or union the behaviour is as if the value had
1823 been stored to word-aligned memory and then loaded into
1824 registers with 64-bit load instruction(s). */
1825 int len = TYPE_LENGTH (type);
1826 int regno = AARCH64_X0_REGNUM;
1827 bfd_byte tmpbuf[X_REGISTER_SIZE];
1828
1829 while (len > 0)
1830 {
1831 memcpy (tmpbuf, valbuf,
1832 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
1833 regcache_cooked_write (regs, regno++, tmpbuf);
1834 len -= X_REGISTER_SIZE;
1835 valbuf += X_REGISTER_SIZE;
1836 }
1837 }
1838}
1839
1840/* Implement the "return_value" gdbarch method. */
1841
1842static enum return_value_convention
1843aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
1844 struct type *valtype, struct regcache *regcache,
1845 gdb_byte *readbuf, const gdb_byte *writebuf)
1846{
07b287a0
MS
1847
1848 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
1849 || TYPE_CODE (valtype) == TYPE_CODE_UNION
1850 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
1851 {
1852 if (aarch64_return_in_memory (gdbarch, valtype))
1853 {
1854 if (aarch64_debug)
b277c936 1855 debug_printf ("return value in memory\n");
07b287a0
MS
1856 return RETURN_VALUE_STRUCT_CONVENTION;
1857 }
1858 }
1859
1860 if (writebuf)
1861 aarch64_store_return_value (valtype, regcache, writebuf);
1862
1863 if (readbuf)
1864 aarch64_extract_return_value (valtype, regcache, readbuf);
1865
1866 if (aarch64_debug)
b277c936 1867 debug_printf ("return value in registers\n");
07b287a0
MS
1868
1869 return RETURN_VALUE_REGISTER_CONVENTION;
1870}
1871
1872/* Implement the "get_longjmp_target" gdbarch method. */
1873
1874static int
1875aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
1876{
1877 CORE_ADDR jb_addr;
1878 gdb_byte buf[X_REGISTER_SIZE];
1879 struct gdbarch *gdbarch = get_frame_arch (frame);
1880 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1881 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1882
1883 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
1884
1885 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
1886 X_REGISTER_SIZE))
1887 return 0;
1888
1889 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
1890 return 1;
1891}
ea873d8e
PL
1892
1893/* Implement the "gen_return_address" gdbarch method. */
1894
1895static void
1896aarch64_gen_return_address (struct gdbarch *gdbarch,
1897 struct agent_expr *ax, struct axs_value *value,
1898 CORE_ADDR scope)
1899{
1900 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
1901 value->kind = axs_lvalue_register;
1902 value->u.reg = AARCH64_LR_REGNUM;
1903}
07b287a0
MS
1904\f
1905
1906/* Return the pseudo register name corresponding to register regnum. */
1907
1908static const char *
1909aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
1910{
1911 static const char *const q_name[] =
1912 {
1913 "q0", "q1", "q2", "q3",
1914 "q4", "q5", "q6", "q7",
1915 "q8", "q9", "q10", "q11",
1916 "q12", "q13", "q14", "q15",
1917 "q16", "q17", "q18", "q19",
1918 "q20", "q21", "q22", "q23",
1919 "q24", "q25", "q26", "q27",
1920 "q28", "q29", "q30", "q31",
1921 };
1922
1923 static const char *const d_name[] =
1924 {
1925 "d0", "d1", "d2", "d3",
1926 "d4", "d5", "d6", "d7",
1927 "d8", "d9", "d10", "d11",
1928 "d12", "d13", "d14", "d15",
1929 "d16", "d17", "d18", "d19",
1930 "d20", "d21", "d22", "d23",
1931 "d24", "d25", "d26", "d27",
1932 "d28", "d29", "d30", "d31",
1933 };
1934
1935 static const char *const s_name[] =
1936 {
1937 "s0", "s1", "s2", "s3",
1938 "s4", "s5", "s6", "s7",
1939 "s8", "s9", "s10", "s11",
1940 "s12", "s13", "s14", "s15",
1941 "s16", "s17", "s18", "s19",
1942 "s20", "s21", "s22", "s23",
1943 "s24", "s25", "s26", "s27",
1944 "s28", "s29", "s30", "s31",
1945 };
1946
1947 static const char *const h_name[] =
1948 {
1949 "h0", "h1", "h2", "h3",
1950 "h4", "h5", "h6", "h7",
1951 "h8", "h9", "h10", "h11",
1952 "h12", "h13", "h14", "h15",
1953 "h16", "h17", "h18", "h19",
1954 "h20", "h21", "h22", "h23",
1955 "h24", "h25", "h26", "h27",
1956 "h28", "h29", "h30", "h31",
1957 };
1958
1959 static const char *const b_name[] =
1960 {
1961 "b0", "b1", "b2", "b3",
1962 "b4", "b5", "b6", "b7",
1963 "b8", "b9", "b10", "b11",
1964 "b12", "b13", "b14", "b15",
1965 "b16", "b17", "b18", "b19",
1966 "b20", "b21", "b22", "b23",
1967 "b24", "b25", "b26", "b27",
1968 "b28", "b29", "b30", "b31",
1969 };
1970
1971 regnum -= gdbarch_num_regs (gdbarch);
1972
1973 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
1974 return q_name[regnum - AARCH64_Q0_REGNUM];
1975
1976 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
1977 return d_name[regnum - AARCH64_D0_REGNUM];
1978
1979 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
1980 return s_name[regnum - AARCH64_S0_REGNUM];
1981
1982 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
1983 return h_name[regnum - AARCH64_H0_REGNUM];
1984
1985 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
1986 return b_name[regnum - AARCH64_B0_REGNUM];
1987
1988 internal_error (__FILE__, __LINE__,
1989 _("aarch64_pseudo_register_name: bad register number %d"),
1990 regnum);
1991}
1992
1993/* Implement the "pseudo_register_type" tdesc_arch_data method. */
1994
1995static struct type *
1996aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
1997{
1998 regnum -= gdbarch_num_regs (gdbarch);
1999
2000 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2001 return aarch64_vnq_type (gdbarch);
2002
2003 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2004 return aarch64_vnd_type (gdbarch);
2005
2006 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2007 return aarch64_vns_type (gdbarch);
2008
2009 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2010 return aarch64_vnh_type (gdbarch);
2011
2012 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2013 return aarch64_vnb_type (gdbarch);
2014
2015 internal_error (__FILE__, __LINE__,
2016 _("aarch64_pseudo_register_type: bad register number %d"),
2017 regnum);
2018}
2019
2020/* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2021
2022static int
2023aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2024 struct reggroup *group)
2025{
2026 regnum -= gdbarch_num_regs (gdbarch);
2027
2028 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2029 return group == all_reggroup || group == vector_reggroup;
2030 else if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2031 return (group == all_reggroup || group == vector_reggroup
2032 || group == float_reggroup);
2033 else if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2034 return (group == all_reggroup || group == vector_reggroup
2035 || group == float_reggroup);
2036 else if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2037 return group == all_reggroup || group == vector_reggroup;
2038 else if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2039 return group == all_reggroup || group == vector_reggroup;
2040
2041 return group == all_reggroup;
2042}
2043
2044/* Implement the "pseudo_register_read_value" gdbarch method. */
2045
2046static struct value *
2047aarch64_pseudo_read_value (struct gdbarch *gdbarch,
2048 struct regcache *regcache,
2049 int regnum)
2050{
2051 gdb_byte reg_buf[MAX_REGISTER_SIZE];
2052 struct value *result_value;
2053 gdb_byte *buf;
2054
2055 result_value = allocate_value (register_type (gdbarch, regnum));
2056 VALUE_LVAL (result_value) = lval_register;
2057 VALUE_REGNUM (result_value) = regnum;
2058 buf = value_contents_raw (result_value);
2059
2060 regnum -= gdbarch_num_regs (gdbarch);
2061
2062 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2063 {
2064 enum register_status status;
2065 unsigned v_regnum;
2066
2067 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2068 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2069 if (status != REG_VALID)
2070 mark_value_bytes_unavailable (result_value, 0,
2071 TYPE_LENGTH (value_type (result_value)));
2072 else
2073 memcpy (buf, reg_buf, Q_REGISTER_SIZE);
2074 return result_value;
2075 }
2076
2077 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2078 {
2079 enum register_status status;
2080 unsigned v_regnum;
2081
2082 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2083 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2084 if (status != REG_VALID)
2085 mark_value_bytes_unavailable (result_value, 0,
2086 TYPE_LENGTH (value_type (result_value)));
2087 else
2088 memcpy (buf, reg_buf, D_REGISTER_SIZE);
2089 return result_value;
2090 }
2091
2092 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2093 {
2094 enum register_status status;
2095 unsigned v_regnum;
2096
2097 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2098 status = regcache_raw_read (regcache, v_regnum, reg_buf);
4bcddace
PL
2099 if (status != REG_VALID)
2100 mark_value_bytes_unavailable (result_value, 0,
2101 TYPE_LENGTH (value_type (result_value)));
2102 else
2103 memcpy (buf, reg_buf, S_REGISTER_SIZE);
07b287a0
MS
2104 return result_value;
2105 }
2106
2107 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2108 {
2109 enum register_status status;
2110 unsigned v_regnum;
2111
2112 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2113 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2114 if (status != REG_VALID)
2115 mark_value_bytes_unavailable (result_value, 0,
2116 TYPE_LENGTH (value_type (result_value)));
2117 else
2118 memcpy (buf, reg_buf, H_REGISTER_SIZE);
2119 return result_value;
2120 }
2121
2122 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2123 {
2124 enum register_status status;
2125 unsigned v_regnum;
2126
2127 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2128 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2129 if (status != REG_VALID)
2130 mark_value_bytes_unavailable (result_value, 0,
2131 TYPE_LENGTH (value_type (result_value)));
2132 else
2133 memcpy (buf, reg_buf, B_REGISTER_SIZE);
2134 return result_value;
2135 }
2136
2137 gdb_assert_not_reached ("regnum out of bound");
2138}
2139
2140/* Implement the "pseudo_register_write" gdbarch method. */
2141
2142static void
2143aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2144 int regnum, const gdb_byte *buf)
2145{
2146 gdb_byte reg_buf[MAX_REGISTER_SIZE];
2147
2148 /* Ensure the register buffer is zero, we want gdb writes of the
2149 various 'scalar' pseudo registers to behavior like architectural
2150 writes, register width bytes are written the remainder are set to
2151 zero. */
2152 memset (reg_buf, 0, sizeof (reg_buf));
2153
2154 regnum -= gdbarch_num_regs (gdbarch);
2155
2156 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2157 {
2158 /* pseudo Q registers */
2159 unsigned v_regnum;
2160
2161 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2162 memcpy (reg_buf, buf, Q_REGISTER_SIZE);
2163 regcache_raw_write (regcache, v_regnum, reg_buf);
2164 return;
2165 }
2166
2167 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2168 {
2169 /* pseudo D registers */
2170 unsigned v_regnum;
2171
2172 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2173 memcpy (reg_buf, buf, D_REGISTER_SIZE);
2174 regcache_raw_write (regcache, v_regnum, reg_buf);
2175 return;
2176 }
2177
2178 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2179 {
2180 unsigned v_regnum;
2181
2182 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2183 memcpy (reg_buf, buf, S_REGISTER_SIZE);
2184 regcache_raw_write (regcache, v_regnum, reg_buf);
2185 return;
2186 }
2187
2188 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2189 {
2190 /* pseudo H registers */
2191 unsigned v_regnum;
2192
2193 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2194 memcpy (reg_buf, buf, H_REGISTER_SIZE);
2195 regcache_raw_write (regcache, v_regnum, reg_buf);
2196 return;
2197 }
2198
2199 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2200 {
2201 /* pseudo B registers */
2202 unsigned v_regnum;
2203
2204 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2205 memcpy (reg_buf, buf, B_REGISTER_SIZE);
2206 regcache_raw_write (regcache, v_regnum, reg_buf);
2207 return;
2208 }
2209
2210 gdb_assert_not_reached ("regnum out of bound");
2211}
2212
07b287a0
MS
2213/* Callback function for user_reg_add. */
2214
2215static struct value *
2216value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2217{
9a3c8263 2218 const int *reg_p = (const int *) baton;
07b287a0
MS
2219
2220 return value_of_register (*reg_p, frame);
2221}
2222\f
2223
9404b58f
KM
2224/* Implement the "software_single_step" gdbarch method, needed to
2225 single step through atomic sequences on AArch64. */
2226
93f9a11f 2227static VEC (CORE_ADDR) *
9404b58f
KM
2228aarch64_software_single_step (struct frame_info *frame)
2229{
0187a92f
YQ
2230 struct regcache *regcache = get_current_regcache ();
2231 struct gdbarch *gdbarch = get_regcache_arch (regcache);
9404b58f
KM
2232 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2233 const int insn_size = 4;
2234 const int atomic_sequence_length = 16; /* Instruction sequence length. */
0187a92f 2235 CORE_ADDR pc = regcache_read_pc (regcache);
9404b58f
KM
2236 CORE_ADDR breaks[2] = { -1, -1 };
2237 CORE_ADDR loc = pc;
2238 CORE_ADDR closing_insn = 0;
2239 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2240 byte_order_for_code);
2241 int index;
2242 int insn_count;
2243 int bc_insn_count = 0; /* Conditional branch instruction count. */
2244 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
f77ee802 2245 aarch64_inst inst;
93f9a11f 2246 VEC (CORE_ADDR) *next_pcs = NULL;
f77ee802 2247
43cdf5ae 2248 if (aarch64_decode_insn (insn, &inst, 1) != 0)
93f9a11f 2249 return NULL;
9404b58f
KM
2250
2251 /* Look for a Load Exclusive instruction which begins the sequence. */
f77ee802 2252 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
93f9a11f 2253 return NULL;
9404b58f
KM
2254
2255 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2256 {
9404b58f
KM
2257 loc += insn_size;
2258 insn = read_memory_unsigned_integer (loc, insn_size,
2259 byte_order_for_code);
2260
43cdf5ae 2261 if (aarch64_decode_insn (insn, &inst, 1) != 0)
93f9a11f 2262 return NULL;
9404b58f 2263 /* Check if the instruction is a conditional branch. */
f77ee802 2264 if (inst.opcode->iclass == condbranch)
9404b58f 2265 {
f77ee802
YQ
2266 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
2267
9404b58f 2268 if (bc_insn_count >= 1)
93f9a11f 2269 return NULL;
9404b58f
KM
2270
2271 /* It is, so we'll try to set a breakpoint at the destination. */
f77ee802 2272 breaks[1] = loc + inst.operands[0].imm.value;
9404b58f
KM
2273
2274 bc_insn_count++;
2275 last_breakpoint++;
2276 }
2277
2278 /* Look for the Store Exclusive which closes the atomic sequence. */
f77ee802 2279 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
9404b58f
KM
2280 {
2281 closing_insn = loc;
2282 break;
2283 }
2284 }
2285
2286 /* We didn't find a closing Store Exclusive instruction, fall back. */
2287 if (!closing_insn)
93f9a11f 2288 return NULL;
9404b58f
KM
2289
2290 /* Insert breakpoint after the end of the atomic sequence. */
2291 breaks[0] = loc + insn_size;
2292
2293 /* Check for duplicated breakpoints, and also check that the second
2294 breakpoint is not within the atomic sequence. */
2295 if (last_breakpoint
2296 && (breaks[1] == breaks[0]
2297 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2298 last_breakpoint = 0;
2299
2300 /* Insert the breakpoint at the end of the sequence, and one at the
2301 destination of the conditional branch, if it exists. */
2302 for (index = 0; index <= last_breakpoint; index++)
93f9a11f 2303 VEC_safe_push (CORE_ADDR, next_pcs, breaks[index]);
9404b58f 2304
93f9a11f 2305 return next_pcs;
9404b58f
KM
2306}
2307
b6542f81
YQ
2308struct displaced_step_closure
2309{
2310 /* It is true when condition instruction, such as B.CON, TBZ, etc,
2311 is being displaced stepping. */
2312 int cond;
2313
2314 /* PC adjustment offset after displaced stepping. */
2315 int32_t pc_adjust;
2316};
2317
2318/* Data when visiting instructions for displaced stepping. */
2319
2320struct aarch64_displaced_step_data
2321{
2322 struct aarch64_insn_data base;
2323
2324 /* The address where the instruction will be executed at. */
2325 CORE_ADDR new_addr;
2326 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
2327 uint32_t insn_buf[DISPLACED_MODIFIED_INSNS];
2328 /* Number of instructions in INSN_BUF. */
2329 unsigned insn_count;
2330 /* Registers when doing displaced stepping. */
2331 struct regcache *regs;
2332
2333 struct displaced_step_closure *dsc;
2334};
2335
2336/* Implementation of aarch64_insn_visitor method "b". */
2337
2338static void
2339aarch64_displaced_step_b (const int is_bl, const int32_t offset,
2340 struct aarch64_insn_data *data)
2341{
2342 struct aarch64_displaced_step_data *dsd
2343 = (struct aarch64_displaced_step_data *) data;
2ac09a5b 2344 int64_t new_offset = data->insn_addr - dsd->new_addr + offset;
b6542f81
YQ
2345
2346 if (can_encode_int32 (new_offset, 28))
2347 {
2348 /* Emit B rather than BL, because executing BL on a new address
2349 will get the wrong address into LR. In order to avoid this,
2350 we emit B, and update LR if the instruction is BL. */
2351 emit_b (dsd->insn_buf, 0, new_offset);
2352 dsd->insn_count++;
2353 }
2354 else
2355 {
2356 /* Write NOP. */
2357 emit_nop (dsd->insn_buf);
2358 dsd->insn_count++;
2359 dsd->dsc->pc_adjust = offset;
2360 }
2361
2362 if (is_bl)
2363 {
2364 /* Update LR. */
2365 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
2366 data->insn_addr + 4);
2367 }
2368}
2369
2370/* Implementation of aarch64_insn_visitor method "b_cond". */
2371
2372static void
2373aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
2374 struct aarch64_insn_data *data)
2375{
2376 struct aarch64_displaced_step_data *dsd
2377 = (struct aarch64_displaced_step_data *) data;
b6542f81
YQ
2378
2379 /* GDB has to fix up PC after displaced step this instruction
2380 differently according to the condition is true or false. Instead
2381 of checking COND against conditional flags, we can use
2382 the following instructions, and GDB can tell how to fix up PC
2383 according to the PC value.
2384
2385 B.COND TAKEN ; If cond is true, then jump to TAKEN.
2386 INSN1 ;
2387 TAKEN:
2388 INSN2
2389 */
2390
2391 emit_bcond (dsd->insn_buf, cond, 8);
2392 dsd->dsc->cond = 1;
2393 dsd->dsc->pc_adjust = offset;
2394 dsd->insn_count = 1;
2395}
2396
2397/* Dynamically allocate a new register. If we know the register
2398 statically, we should make it a global as above instead of using this
2399 helper function. */
2400
2401static struct aarch64_register
2402aarch64_register (unsigned num, int is64)
2403{
2404 return (struct aarch64_register) { num, is64 };
2405}
2406
2407/* Implementation of aarch64_insn_visitor method "cb". */
2408
2409static void
2410aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
2411 const unsigned rn, int is64,
2412 struct aarch64_insn_data *data)
2413{
2414 struct aarch64_displaced_step_data *dsd
2415 = (struct aarch64_displaced_step_data *) data;
b6542f81
YQ
2416
2417 /* The offset is out of range for a compare and branch
2418 instruction. We can use the following instructions instead:
2419
2420 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
2421 INSN1 ;
2422 TAKEN:
2423 INSN2
2424 */
2425 emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
2426 dsd->insn_count = 1;
2427 dsd->dsc->cond = 1;
2428 dsd->dsc->pc_adjust = offset;
2429}
2430
2431/* Implementation of aarch64_insn_visitor method "tb". */
2432
2433static void
2434aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
2435 const unsigned rt, unsigned bit,
2436 struct aarch64_insn_data *data)
2437{
2438 struct aarch64_displaced_step_data *dsd
2439 = (struct aarch64_displaced_step_data *) data;
b6542f81
YQ
2440
2441 /* The offset is out of range for a test bit and branch
2442 instruction We can use the following instructions instead:
2443
2444 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
2445 INSN1 ;
2446 TAKEN:
2447 INSN2
2448
2449 */
2450 emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
2451 dsd->insn_count = 1;
2452 dsd->dsc->cond = 1;
2453 dsd->dsc->pc_adjust = offset;
2454}
2455
2456/* Implementation of aarch64_insn_visitor method "adr". */
2457
2458static void
2459aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
2460 const int is_adrp, struct aarch64_insn_data *data)
2461{
2462 struct aarch64_displaced_step_data *dsd
2463 = (struct aarch64_displaced_step_data *) data;
2464 /* We know exactly the address the ADR{P,} instruction will compute.
2465 We can just write it to the destination register. */
2466 CORE_ADDR address = data->insn_addr + offset;
2467
2468 if (is_adrp)
2469 {
2470 /* Clear the lower 12 bits of the offset to get the 4K page. */
2471 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2472 address & ~0xfff);
2473 }
2474 else
2475 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2476 address);
2477
2478 dsd->dsc->pc_adjust = 4;
2479 emit_nop (dsd->insn_buf);
2480 dsd->insn_count = 1;
2481}
2482
2483/* Implementation of aarch64_insn_visitor method "ldr_literal". */
2484
2485static void
2486aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
2487 const unsigned rt, const int is64,
2488 struct aarch64_insn_data *data)
2489{
2490 struct aarch64_displaced_step_data *dsd
2491 = (struct aarch64_displaced_step_data *) data;
2492 CORE_ADDR address = data->insn_addr + offset;
2493 struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
2494
2495 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
2496 address);
2497
2498 if (is_sw)
2499 dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
2500 aarch64_register (rt, 1), zero);
2501 else
2502 dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
2503 aarch64_register (rt, 1), zero);
2504
2505 dsd->dsc->pc_adjust = 4;
2506}
2507
2508/* Implementation of aarch64_insn_visitor method "others". */
2509
2510static void
2511aarch64_displaced_step_others (const uint32_t insn,
2512 struct aarch64_insn_data *data)
2513{
2514 struct aarch64_displaced_step_data *dsd
2515 = (struct aarch64_displaced_step_data *) data;
2516
e1c587c3 2517 aarch64_emit_insn (dsd->insn_buf, insn);
b6542f81
YQ
2518 dsd->insn_count = 1;
2519
2520 if ((insn & 0xfffffc1f) == 0xd65f0000)
2521 {
2522 /* RET */
2523 dsd->dsc->pc_adjust = 0;
2524 }
2525 else
2526 dsd->dsc->pc_adjust = 4;
2527}
2528
2529static const struct aarch64_insn_visitor visitor =
2530{
2531 aarch64_displaced_step_b,
2532 aarch64_displaced_step_b_cond,
2533 aarch64_displaced_step_cb,
2534 aarch64_displaced_step_tb,
2535 aarch64_displaced_step_adr,
2536 aarch64_displaced_step_ldr_literal,
2537 aarch64_displaced_step_others,
2538};
2539
2540/* Implement the "displaced_step_copy_insn" gdbarch method. */
2541
2542struct displaced_step_closure *
2543aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
2544 CORE_ADDR from, CORE_ADDR to,
2545 struct regcache *regs)
2546{
2547 struct displaced_step_closure *dsc = NULL;
2548 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2549 uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
2550 struct aarch64_displaced_step_data dsd;
c86a40c6
YQ
2551 aarch64_inst inst;
2552
2553 if (aarch64_decode_insn (insn, &inst, 1) != 0)
2554 return NULL;
b6542f81
YQ
2555
2556 /* Look for a Load Exclusive instruction which begins the sequence. */
c86a40c6 2557 if (inst.opcode->iclass == ldstexcl && bit (insn, 22))
b6542f81
YQ
2558 {
2559 /* We can't displaced step atomic sequences. */
2560 return NULL;
2561 }
2562
2563 dsc = XCNEW (struct displaced_step_closure);
2564 dsd.base.insn_addr = from;
2565 dsd.new_addr = to;
2566 dsd.regs = regs;
2567 dsd.dsc = dsc;
034f1a81 2568 dsd.insn_count = 0;
b6542f81
YQ
2569 aarch64_relocate_instruction (insn, &visitor,
2570 (struct aarch64_insn_data *) &dsd);
2571 gdb_assert (dsd.insn_count <= DISPLACED_MODIFIED_INSNS);
2572
2573 if (dsd.insn_count != 0)
2574 {
2575 int i;
2576
2577 /* Instruction can be relocated to scratch pad. Copy
2578 relocated instruction(s) there. */
2579 for (i = 0; i < dsd.insn_count; i++)
2580 {
2581 if (debug_displaced)
2582 {
2583 debug_printf ("displaced: writing insn ");
2584 debug_printf ("%.8x", dsd.insn_buf[i]);
2585 debug_printf (" at %s\n", paddress (gdbarch, to + i * 4));
2586 }
2587 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
2588 (ULONGEST) dsd.insn_buf[i]);
2589 }
2590 }
2591 else
2592 {
2593 xfree (dsc);
2594 dsc = NULL;
2595 }
2596
2597 return dsc;
2598}
2599
2600/* Implement the "displaced_step_fixup" gdbarch method. */
2601
2602void
2603aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
2604 struct displaced_step_closure *dsc,
2605 CORE_ADDR from, CORE_ADDR to,
2606 struct regcache *regs)
2607{
2608 if (dsc->cond)
2609 {
2610 ULONGEST pc;
2611
2612 regcache_cooked_read_unsigned (regs, AARCH64_PC_REGNUM, &pc);
2613 if (pc - to == 8)
2614 {
2615 /* Condition is true. */
2616 }
2617 else if (pc - to == 4)
2618 {
2619 /* Condition is false. */
2620 dsc->pc_adjust = 4;
2621 }
2622 else
2623 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
2624 }
2625
2626 if (dsc->pc_adjust != 0)
2627 {
2628 if (debug_displaced)
2629 {
2630 debug_printf ("displaced: fixup: set PC to %s:%d\n",
2631 paddress (gdbarch, from), dsc->pc_adjust);
2632 }
2633 regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
2634 from + dsc->pc_adjust);
2635 }
2636}
2637
2638/* Implement the "displaced_step_hw_singlestep" gdbarch method. */
2639
2640int
2641aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch,
2642 struct displaced_step_closure *closure)
2643{
2644 return 1;
2645}
2646
07b287a0
MS
2647/* Initialize the current architecture based on INFO. If possible,
2648 re-use an architecture from ARCHES, which is a list of
2649 architectures already created during this debugging session.
2650
2651 Called e.g. at program startup, when reading a core file, and when
2652 reading a binary file. */
2653
2654static struct gdbarch *
2655aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
2656{
2657 struct gdbarch_tdep *tdep;
2658 struct gdbarch *gdbarch;
2659 struct gdbarch_list *best_arch;
2660 struct tdesc_arch_data *tdesc_data = NULL;
2661 const struct target_desc *tdesc = info.target_desc;
2662 int i;
07b287a0
MS
2663 int valid_p = 1;
2664 const struct tdesc_feature *feature;
2665 int num_regs = 0;
2666 int num_pseudo_regs = 0;
2667
2668 /* Ensure we always have a target descriptor. */
2669 if (!tdesc_has_registers (tdesc))
2670 tdesc = tdesc_aarch64;
2671
2672 gdb_assert (tdesc);
2673
2674 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.core");
2675
2676 if (feature == NULL)
2677 return NULL;
2678
2679 tdesc_data = tdesc_data_alloc ();
2680
2681 /* Validate the descriptor provides the mandatory core R registers
2682 and allocate their numbers. */
2683 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
2684 valid_p &=
2685 tdesc_numbered_register (feature, tdesc_data, AARCH64_X0_REGNUM + i,
2686 aarch64_r_register_names[i]);
2687
2688 num_regs = AARCH64_X0_REGNUM + i;
2689
2690 /* Look for the V registers. */
2691 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
2692 if (feature)
2693 {
2694 /* Validate the descriptor provides the mandatory V registers
2695 and allocate their numbers. */
2696 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
2697 valid_p &=
2698 tdesc_numbered_register (feature, tdesc_data, AARCH64_V0_REGNUM + i,
2699 aarch64_v_register_names[i]);
2700
2701 num_regs = AARCH64_V0_REGNUM + i;
2702
2703 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
2704 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
2705 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
2706 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
2707 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
2708 }
2709
2710 if (!valid_p)
2711 {
2712 tdesc_data_cleanup (tdesc_data);
2713 return NULL;
2714 }
2715
2716 /* AArch64 code is always little-endian. */
2717 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
2718
2719 /* If there is already a candidate, use it. */
2720 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
2721 best_arch != NULL;
2722 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
2723 {
2724 /* Found a match. */
2725 break;
2726 }
2727
2728 if (best_arch != NULL)
2729 {
2730 if (tdesc_data != NULL)
2731 tdesc_data_cleanup (tdesc_data);
2732 return best_arch->gdbarch;
2733 }
2734
8d749320 2735 tdep = XCNEW (struct gdbarch_tdep);
07b287a0
MS
2736 gdbarch = gdbarch_alloc (&info, tdep);
2737
2738 /* This should be low enough for everything. */
2739 tdep->lowest_pc = 0x20;
2740 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
2741 tdep->jb_elt_size = 8;
2742
2743 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
2744 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
2745
07b287a0
MS
2746 /* Frame handling. */
2747 set_gdbarch_dummy_id (gdbarch, aarch64_dummy_id);
2748 set_gdbarch_unwind_pc (gdbarch, aarch64_unwind_pc);
2749 set_gdbarch_unwind_sp (gdbarch, aarch64_unwind_sp);
2750
2751 /* Advance PC across function entry code. */
2752 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
2753
2754 /* The stack grows downward. */
2755 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
2756
2757 /* Breakpoint manipulation. */
04180708
YQ
2758 set_gdbarch_breakpoint_kind_from_pc (gdbarch,
2759 aarch64_breakpoint::kind_from_pc);
2760 set_gdbarch_sw_breakpoint_from_kind (gdbarch,
2761 aarch64_breakpoint::bp_from_kind);
07b287a0 2762 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
9404b58f 2763 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
07b287a0
MS
2764
2765 /* Information about registers, etc. */
2766 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
2767 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
2768 set_gdbarch_num_regs (gdbarch, num_regs);
2769
2770 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
2771 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
2772 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
2773 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
2774 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
2775 set_tdesc_pseudo_register_reggroup_p (gdbarch,
2776 aarch64_pseudo_register_reggroup_p);
2777
2778 /* ABI */
2779 set_gdbarch_short_bit (gdbarch, 16);
2780 set_gdbarch_int_bit (gdbarch, 32);
2781 set_gdbarch_float_bit (gdbarch, 32);
2782 set_gdbarch_double_bit (gdbarch, 64);
2783 set_gdbarch_long_double_bit (gdbarch, 128);
2784 set_gdbarch_long_bit (gdbarch, 64);
2785 set_gdbarch_long_long_bit (gdbarch, 64);
2786 set_gdbarch_ptr_bit (gdbarch, 64);
2787 set_gdbarch_char_signed (gdbarch, 0);
2788 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
2789 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
2790 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
2791
2792 /* Internal <-> external register number maps. */
2793 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
2794
2795 /* Returning results. */
2796 set_gdbarch_return_value (gdbarch, aarch64_return_value);
2797
2798 /* Disassembly. */
2799 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
2800
2801 /* Virtual tables. */
2802 set_gdbarch_vbit_in_delta (gdbarch, 1);
2803
2804 /* Hook in the ABI-specific overrides, if they have been registered. */
2805 info.target_desc = tdesc;
2806 info.tdep_info = (void *) tdesc_data;
2807 gdbarch_init_osabi (info, gdbarch);
2808
2809 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
2810
2811 /* Add some default predicates. */
2812 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
2813 dwarf2_append_unwinders (gdbarch);
2814 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
2815
2816 frame_base_set_default (gdbarch, &aarch64_normal_base);
2817
2818 /* Now we have tuned the configuration, set a few final things,
2819 based on what the OS ABI has told us. */
2820
2821 if (tdep->jb_pc >= 0)
2822 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
2823
ea873d8e
PL
2824 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
2825
07b287a0
MS
2826 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
2827
2828 /* Add standard register aliases. */
2829 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
2830 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
2831 value_of_aarch64_user_reg,
2832 &aarch64_register_aliases[i].regnum);
2833
2834 return gdbarch;
2835}
2836
2837static void
2838aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
2839{
2840 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2841
2842 if (tdep == NULL)
2843 return;
2844
2845 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
2846 paddress (gdbarch, tdep->lowest_pc));
2847}
2848
2849/* Suppress warning from -Wmissing-prototypes. */
2850extern initialize_file_ftype _initialize_aarch64_tdep;
2851
2852void
2853_initialize_aarch64_tdep (void)
2854{
2855 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
2856 aarch64_dump_tdep);
2857
2858 initialize_tdesc_aarch64 ();
07b287a0
MS
2859
2860 /* Debug this file's internals. */
2861 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
2862Set AArch64 debugging."), _("\
2863Show AArch64 debugging."), _("\
2864When on, AArch64 specific debugging is enabled."),
2865 NULL,
2866 show_aarch64_debug,
2867 &setdebuglist, &showdebuglist);
2868}
99afc88b
OJ
2869
2870/* AArch64 process record-replay related structures, defines etc. */
2871
99afc88b
OJ
2872#define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
2873 do \
2874 { \
2875 unsigned int reg_len = LENGTH; \
2876 if (reg_len) \
2877 { \
2878 REGS = XNEWVEC (uint32_t, reg_len); \
2879 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
2880 } \
2881 } \
2882 while (0)
2883
2884#define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
2885 do \
2886 { \
2887 unsigned int mem_len = LENGTH; \
2888 if (mem_len) \
2889 { \
2890 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
2891 memcpy(&MEMS->len, &RECORD_BUF[0], \
2892 sizeof(struct aarch64_mem_r) * LENGTH); \
2893 } \
2894 } \
2895 while (0)
2896
2897/* AArch64 record/replay structures and enumerations. */
2898
2899struct aarch64_mem_r
2900{
2901 uint64_t len; /* Record length. */
2902 uint64_t addr; /* Memory address. */
2903};
2904
2905enum aarch64_record_result
2906{
2907 AARCH64_RECORD_SUCCESS,
2908 AARCH64_RECORD_FAILURE,
2909 AARCH64_RECORD_UNSUPPORTED,
2910 AARCH64_RECORD_UNKNOWN
2911};
2912
2913typedef struct insn_decode_record_t
2914{
2915 struct gdbarch *gdbarch;
2916 struct regcache *regcache;
2917 CORE_ADDR this_addr; /* Address of insn to be recorded. */
2918 uint32_t aarch64_insn; /* Insn to be recorded. */
2919 uint32_t mem_rec_count; /* Count of memory records. */
2920 uint32_t reg_rec_count; /* Count of register records. */
2921 uint32_t *aarch64_regs; /* Registers to be recorded. */
2922 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
2923} insn_decode_record;
2924
2925/* Record handler for data processing - register instructions. */
2926
2927static unsigned int
2928aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
2929{
2930 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
2931 uint32_t record_buf[4];
2932
2933 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
2934 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
2935 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
2936
2937 if (!bit (aarch64_insn_r->aarch64_insn, 28))
2938 {
2939 uint8_t setflags;
2940
2941 /* Logical (shifted register). */
2942 if (insn_bits24_27 == 0x0a)
2943 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
2944 /* Add/subtract. */
2945 else if (insn_bits24_27 == 0x0b)
2946 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
2947 else
2948 return AARCH64_RECORD_UNKNOWN;
2949
2950 record_buf[0] = reg_rd;
2951 aarch64_insn_r->reg_rec_count = 1;
2952 if (setflags)
2953 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
2954 }
2955 else
2956 {
2957 if (insn_bits24_27 == 0x0b)
2958 {
2959 /* Data-processing (3 source). */
2960 record_buf[0] = reg_rd;
2961 aarch64_insn_r->reg_rec_count = 1;
2962 }
2963 else if (insn_bits24_27 == 0x0a)
2964 {
2965 if (insn_bits21_23 == 0x00)
2966 {
2967 /* Add/subtract (with carry). */
2968 record_buf[0] = reg_rd;
2969 aarch64_insn_r->reg_rec_count = 1;
2970 if (bit (aarch64_insn_r->aarch64_insn, 29))
2971 {
2972 record_buf[1] = AARCH64_CPSR_REGNUM;
2973 aarch64_insn_r->reg_rec_count = 2;
2974 }
2975 }
2976 else if (insn_bits21_23 == 0x02)
2977 {
2978 /* Conditional compare (register) and conditional compare
2979 (immediate) instructions. */
2980 record_buf[0] = AARCH64_CPSR_REGNUM;
2981 aarch64_insn_r->reg_rec_count = 1;
2982 }
2983 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
2984 {
2985 /* CConditional select. */
2986 /* Data-processing (2 source). */
2987 /* Data-processing (1 source). */
2988 record_buf[0] = reg_rd;
2989 aarch64_insn_r->reg_rec_count = 1;
2990 }
2991 else
2992 return AARCH64_RECORD_UNKNOWN;
2993 }
2994 }
2995
2996 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
2997 record_buf);
2998 return AARCH64_RECORD_SUCCESS;
2999}
3000
3001/* Record handler for data processing - immediate instructions. */
3002
3003static unsigned int
3004aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
3005{
78cc6c2d 3006 uint8_t reg_rd, insn_bit23, insn_bits24_27, setflags;
99afc88b
OJ
3007 uint32_t record_buf[4];
3008
3009 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
99afc88b
OJ
3010 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3011 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3012
3013 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
3014 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
3015 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
3016 {
3017 record_buf[0] = reg_rd;
3018 aarch64_insn_r->reg_rec_count = 1;
3019 }
3020 else if (insn_bits24_27 == 0x01)
3021 {
3022 /* Add/Subtract (immediate). */
3023 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3024 record_buf[0] = reg_rd;
3025 aarch64_insn_r->reg_rec_count = 1;
3026 if (setflags)
3027 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3028 }
3029 else if (insn_bits24_27 == 0x02 && !insn_bit23)
3030 {
3031 /* Logical (immediate). */
3032 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
3033 record_buf[0] = reg_rd;
3034 aarch64_insn_r->reg_rec_count = 1;
3035 if (setflags)
3036 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3037 }
3038 else
3039 return AARCH64_RECORD_UNKNOWN;
3040
3041 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3042 record_buf);
3043 return AARCH64_RECORD_SUCCESS;
3044}
3045
3046/* Record handler for branch, exception generation and system instructions. */
3047
3048static unsigned int
3049aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
3050{
3051 struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
3052 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
3053 uint32_t record_buf[4];
3054
3055 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3056 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3057 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3058
3059 if (insn_bits28_31 == 0x0d)
3060 {
3061 /* Exception generation instructions. */
3062 if (insn_bits24_27 == 0x04)
3063 {
5d98d3cd
YQ
3064 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
3065 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
3066 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
99afc88b
OJ
3067 {
3068 ULONGEST svc_number;
3069
3070 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
3071 &svc_number);
3072 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
3073 svc_number);
3074 }
3075 else
3076 return AARCH64_RECORD_UNSUPPORTED;
3077 }
3078 /* System instructions. */
3079 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
3080 {
3081 uint32_t reg_rt, reg_crn;
3082
3083 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3084 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3085
3086 /* Record rt in case of sysl and mrs instructions. */
3087 if (bit (aarch64_insn_r->aarch64_insn, 21))
3088 {
3089 record_buf[0] = reg_rt;
3090 aarch64_insn_r->reg_rec_count = 1;
3091 }
3092 /* Record cpsr for hint and msr(immediate) instructions. */
3093 else if (reg_crn == 0x02 || reg_crn == 0x04)
3094 {
3095 record_buf[0] = AARCH64_CPSR_REGNUM;
3096 aarch64_insn_r->reg_rec_count = 1;
3097 }
3098 }
3099 /* Unconditional branch (register). */
3100 else if((insn_bits24_27 & 0x0e) == 0x06)
3101 {
3102 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3103 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3104 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3105 }
3106 else
3107 return AARCH64_RECORD_UNKNOWN;
3108 }
3109 /* Unconditional branch (immediate). */
3110 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3111 {
3112 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3113 if (bit (aarch64_insn_r->aarch64_insn, 31))
3114 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3115 }
3116 else
3117 /* Compare & branch (immediate), Test & branch (immediate) and
3118 Conditional branch (immediate). */
3119 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3120
3121 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3122 record_buf);
3123 return AARCH64_RECORD_SUCCESS;
3124}
3125
3126/* Record handler for advanced SIMD load and store instructions. */
3127
3128static unsigned int
3129aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3130{
3131 CORE_ADDR address;
3132 uint64_t addr_offset = 0;
3133 uint32_t record_buf[24];
3134 uint64_t record_buf_mem[24];
3135 uint32_t reg_rn, reg_rt;
3136 uint32_t reg_index = 0, mem_index = 0;
3137 uint8_t opcode_bits, size_bits;
3138
3139 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3140 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3141 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3142 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3143 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3144
3145 if (record_debug)
b277c936 3146 debug_printf ("Process record: Advanced SIMD load/store\n");
99afc88b
OJ
3147
3148 /* Load/store single structure. */
3149 if (bit (aarch64_insn_r->aarch64_insn, 24))
3150 {
3151 uint8_t sindex, scale, selem, esize, replicate = 0;
3152 scale = opcode_bits >> 2;
3153 selem = ((opcode_bits & 0x02) |
3154 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3155 switch (scale)
3156 {
3157 case 1:
3158 if (size_bits & 0x01)
3159 return AARCH64_RECORD_UNKNOWN;
3160 break;
3161 case 2:
3162 if ((size_bits >> 1) & 0x01)
3163 return AARCH64_RECORD_UNKNOWN;
3164 if (size_bits & 0x01)
3165 {
3166 if (!((opcode_bits >> 1) & 0x01))
3167 scale = 3;
3168 else
3169 return AARCH64_RECORD_UNKNOWN;
3170 }
3171 break;
3172 case 3:
3173 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3174 {
3175 scale = size_bits;
3176 replicate = 1;
3177 break;
3178 }
3179 else
3180 return AARCH64_RECORD_UNKNOWN;
3181 default:
3182 break;
3183 }
3184 esize = 8 << scale;
3185 if (replicate)
3186 for (sindex = 0; sindex < selem; sindex++)
3187 {
3188 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3189 reg_rt = (reg_rt + 1) % 32;
3190 }
3191 else
3192 {
3193 for (sindex = 0; sindex < selem; sindex++)
a2e3e93f
SM
3194 {
3195 if (bit (aarch64_insn_r->aarch64_insn, 22))
3196 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3197 else
3198 {
3199 record_buf_mem[mem_index++] = esize / 8;
3200 record_buf_mem[mem_index++] = address + addr_offset;
3201 }
3202 addr_offset = addr_offset + (esize / 8);
3203 reg_rt = (reg_rt + 1) % 32;
3204 }
99afc88b
OJ
3205 }
3206 }
3207 /* Load/store multiple structure. */
3208 else
3209 {
3210 uint8_t selem, esize, rpt, elements;
3211 uint8_t eindex, rindex;
3212
3213 esize = 8 << size_bits;
3214 if (bit (aarch64_insn_r->aarch64_insn, 30))
3215 elements = 128 / esize;
3216 else
3217 elements = 64 / esize;
3218
3219 switch (opcode_bits)
3220 {
3221 /*LD/ST4 (4 Registers). */
3222 case 0:
3223 rpt = 1;
3224 selem = 4;
3225 break;
3226 /*LD/ST1 (4 Registers). */
3227 case 2:
3228 rpt = 4;
3229 selem = 1;
3230 break;
3231 /*LD/ST3 (3 Registers). */
3232 case 4:
3233 rpt = 1;
3234 selem = 3;
3235 break;
3236 /*LD/ST1 (3 Registers). */
3237 case 6:
3238 rpt = 3;
3239 selem = 1;
3240 break;
3241 /*LD/ST1 (1 Register). */
3242 case 7:
3243 rpt = 1;
3244 selem = 1;
3245 break;
3246 /*LD/ST2 (2 Registers). */
3247 case 8:
3248 rpt = 1;
3249 selem = 2;
3250 break;
3251 /*LD/ST1 (2 Registers). */
3252 case 10:
3253 rpt = 2;
3254 selem = 1;
3255 break;
3256 default:
3257 return AARCH64_RECORD_UNSUPPORTED;
3258 break;
3259 }
3260 for (rindex = 0; rindex < rpt; rindex++)
3261 for (eindex = 0; eindex < elements; eindex++)
3262 {
3263 uint8_t reg_tt, sindex;
3264 reg_tt = (reg_rt + rindex) % 32;
3265 for (sindex = 0; sindex < selem; sindex++)
3266 {
3267 if (bit (aarch64_insn_r->aarch64_insn, 22))
3268 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
3269 else
3270 {
3271 record_buf_mem[mem_index++] = esize / 8;
3272 record_buf_mem[mem_index++] = address + addr_offset;
3273 }
3274 addr_offset = addr_offset + (esize / 8);
3275 reg_tt = (reg_tt + 1) % 32;
3276 }
3277 }
3278 }
3279
3280 if (bit (aarch64_insn_r->aarch64_insn, 23))
3281 record_buf[reg_index++] = reg_rn;
3282
3283 aarch64_insn_r->reg_rec_count = reg_index;
3284 aarch64_insn_r->mem_rec_count = mem_index / 2;
3285 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3286 record_buf_mem);
3287 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3288 record_buf);
3289 return AARCH64_RECORD_SUCCESS;
3290}
3291
3292/* Record handler for load and store instructions. */
3293
3294static unsigned int
3295aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
3296{
3297 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
3298 uint8_t insn_bit23, insn_bit21;
3299 uint8_t opc, size_bits, ld_flag, vector_flag;
3300 uint32_t reg_rn, reg_rt, reg_rt2;
3301 uint64_t datasize, offset;
3302 uint32_t record_buf[8];
3303 uint64_t record_buf_mem[8];
3304 CORE_ADDR address;
3305
3306 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3307 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3308 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
3309 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3310 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3311 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
3312 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
3313 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3314 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3315 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
3316 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
3317
3318 /* Load/store exclusive. */
3319 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
3320 {
3321 if (record_debug)
b277c936 3322 debug_printf ("Process record: load/store exclusive\n");
99afc88b
OJ
3323
3324 if (ld_flag)
3325 {
3326 record_buf[0] = reg_rt;
3327 aarch64_insn_r->reg_rec_count = 1;
3328 if (insn_bit21)
3329 {
3330 record_buf[1] = reg_rt2;
3331 aarch64_insn_r->reg_rec_count = 2;
3332 }
3333 }
3334 else
3335 {
3336 if (insn_bit21)
3337 datasize = (8 << size_bits) * 2;
3338 else
3339 datasize = (8 << size_bits);
3340 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3341 &address);
3342 record_buf_mem[0] = datasize / 8;
3343 record_buf_mem[1] = address;
3344 aarch64_insn_r->mem_rec_count = 1;
3345 if (!insn_bit23)
3346 {
3347 /* Save register rs. */
3348 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
3349 aarch64_insn_r->reg_rec_count = 1;
3350 }
3351 }
3352 }
3353 /* Load register (literal) instructions decoding. */
3354 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
3355 {
3356 if (record_debug)
b277c936 3357 debug_printf ("Process record: load register (literal)\n");
99afc88b
OJ
3358 if (vector_flag)
3359 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3360 else
3361 record_buf[0] = reg_rt;
3362 aarch64_insn_r->reg_rec_count = 1;
3363 }
3364 /* All types of load/store pair instructions decoding. */
3365 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
3366 {
3367 if (record_debug)
b277c936 3368 debug_printf ("Process record: load/store pair\n");
99afc88b
OJ
3369
3370 if (ld_flag)
3371 {
3372 if (vector_flag)
3373 {
3374 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3375 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
3376 }
3377 else
3378 {
3379 record_buf[0] = reg_rt;
3380 record_buf[1] = reg_rt2;
3381 }
3382 aarch64_insn_r->reg_rec_count = 2;
3383 }
3384 else
3385 {
3386 uint16_t imm7_off;
3387 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
3388 if (!vector_flag)
3389 size_bits = size_bits >> 1;
3390 datasize = 8 << (2 + size_bits);
3391 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
3392 offset = offset << (2 + size_bits);
3393 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3394 &address);
3395 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
3396 {
3397 if (imm7_off & 0x40)
3398 address = address - offset;
3399 else
3400 address = address + offset;
3401 }
3402
3403 record_buf_mem[0] = datasize / 8;
3404 record_buf_mem[1] = address;
3405 record_buf_mem[2] = datasize / 8;
3406 record_buf_mem[3] = address + (datasize / 8);
3407 aarch64_insn_r->mem_rec_count = 2;
3408 }
3409 if (bit (aarch64_insn_r->aarch64_insn, 23))
3410 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3411 }
3412 /* Load/store register (unsigned immediate) instructions. */
3413 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
3414 {
3415 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3416 if (!(opc >> 1))
3417 if (opc & 0x01)
3418 ld_flag = 0x01;
3419 else
3420 ld_flag = 0x0;
3421 else
3422 if (size_bits != 0x03)
3423 ld_flag = 0x01;
3424 else
3425 return AARCH64_RECORD_UNKNOWN;
3426
3427 if (record_debug)
3428 {
b277c936
PL
3429 debug_printf ("Process record: load/store (unsigned immediate):"
3430 " size %x V %d opc %x\n", size_bits, vector_flag,
3431 opc);
99afc88b
OJ
3432 }
3433
3434 if (!ld_flag)
3435 {
3436 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
3437 datasize = 8 << size_bits;
3438 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3439 &address);
3440 offset = offset << size_bits;
3441 address = address + offset;
3442
3443 record_buf_mem[0] = datasize >> 3;
3444 record_buf_mem[1] = address;
3445 aarch64_insn_r->mem_rec_count = 1;
3446 }
3447 else
3448 {
3449 if (vector_flag)
3450 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3451 else
3452 record_buf[0] = reg_rt;
3453 aarch64_insn_r->reg_rec_count = 1;
3454 }
3455 }
3456 /* Load/store register (register offset) instructions. */
5d98d3cd
YQ
3457 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3458 && insn_bits10_11 == 0x02 && insn_bit21)
99afc88b
OJ
3459 {
3460 if (record_debug)
b277c936 3461 debug_printf ("Process record: load/store (register offset)\n");
99afc88b
OJ
3462 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3463 if (!(opc >> 1))
3464 if (opc & 0x01)
3465 ld_flag = 0x01;
3466 else
3467 ld_flag = 0x0;
3468 else
3469 if (size_bits != 0x03)
3470 ld_flag = 0x01;
3471 else
3472 return AARCH64_RECORD_UNKNOWN;
3473
3474 if (!ld_flag)
3475 {
d9436c7c
PA
3476 ULONGEST reg_rm_val;
3477
99afc88b
OJ
3478 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
3479 bits (aarch64_insn_r->aarch64_insn, 16, 20), &reg_rm_val);
3480 if (bit (aarch64_insn_r->aarch64_insn, 12))
3481 offset = reg_rm_val << size_bits;
3482 else
3483 offset = reg_rm_val;
3484 datasize = 8 << size_bits;
3485 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3486 &address);
3487 address = address + offset;
3488 record_buf_mem[0] = datasize >> 3;
3489 record_buf_mem[1] = address;
3490 aarch64_insn_r->mem_rec_count = 1;
3491 }
3492 else
3493 {
3494 if (vector_flag)
3495 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3496 else
3497 record_buf[0] = reg_rt;
3498 aarch64_insn_r->reg_rec_count = 1;
3499 }
3500 }
3501 /* Load/store register (immediate and unprivileged) instructions. */
5d98d3cd
YQ
3502 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3503 && !insn_bit21)
99afc88b
OJ
3504 {
3505 if (record_debug)
3506 {
b277c936
PL
3507 debug_printf ("Process record: load/store "
3508 "(immediate and unprivileged)\n");
99afc88b
OJ
3509 }
3510 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3511 if (!(opc >> 1))
3512 if (opc & 0x01)
3513 ld_flag = 0x01;
3514 else
3515 ld_flag = 0x0;
3516 else
3517 if (size_bits != 0x03)
3518 ld_flag = 0x01;
3519 else
3520 return AARCH64_RECORD_UNKNOWN;
3521
3522 if (!ld_flag)
3523 {
3524 uint16_t imm9_off;
3525 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
3526 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
3527 datasize = 8 << size_bits;
3528 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3529 &address);
3530 if (insn_bits10_11 != 0x01)
3531 {
3532 if (imm9_off & 0x0100)
3533 address = address - offset;
3534 else
3535 address = address + offset;
3536 }
3537 record_buf_mem[0] = datasize >> 3;
3538 record_buf_mem[1] = address;
3539 aarch64_insn_r->mem_rec_count = 1;
3540 }
3541 else
3542 {
3543 if (vector_flag)
3544 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3545 else
3546 record_buf[0] = reg_rt;
3547 aarch64_insn_r->reg_rec_count = 1;
3548 }
3549 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
3550 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3551 }
3552 /* Advanced SIMD load/store instructions. */
3553 else
3554 return aarch64_record_asimd_load_store (aarch64_insn_r);
3555
3556 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3557 record_buf_mem);
3558 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3559 record_buf);
3560 return AARCH64_RECORD_SUCCESS;
3561}
3562
3563/* Record handler for data processing SIMD and floating point instructions. */
3564
3565static unsigned int
3566aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
3567{
3568 uint8_t insn_bit21, opcode, rmode, reg_rd;
3569 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
3570 uint8_t insn_bits11_14;
3571 uint32_t record_buf[2];
3572
3573 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3574 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3575 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3576 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3577 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
3578 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
3579 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
3580 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3581 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3582
3583 if (record_debug)
b277c936 3584 debug_printf ("Process record: data processing SIMD/FP: ");
99afc88b
OJ
3585
3586 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
3587 {
3588 /* Floating point - fixed point conversion instructions. */
3589 if (!insn_bit21)
3590 {
3591 if (record_debug)
b277c936 3592 debug_printf ("FP - fixed point conversion");
99afc88b
OJ
3593
3594 if ((opcode >> 1) == 0x0 && rmode == 0x03)
3595 record_buf[0] = reg_rd;
3596 else
3597 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3598 }
3599 /* Floating point - conditional compare instructions. */
3600 else if (insn_bits10_11 == 0x01)
3601 {
3602 if (record_debug)
b277c936 3603 debug_printf ("FP - conditional compare");
99afc88b
OJ
3604
3605 record_buf[0] = AARCH64_CPSR_REGNUM;
3606 }
3607 /* Floating point - data processing (2-source) and
3608 conditional select instructions. */
3609 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
3610 {
3611 if (record_debug)
b277c936 3612 debug_printf ("FP - DP (2-source)");
99afc88b
OJ
3613
3614 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3615 }
3616 else if (insn_bits10_11 == 0x00)
3617 {
3618 /* Floating point - immediate instructions. */
3619 if ((insn_bits12_15 & 0x01) == 0x01
3620 || (insn_bits12_15 & 0x07) == 0x04)
3621 {
3622 if (record_debug)
b277c936 3623 debug_printf ("FP - immediate");
99afc88b
OJ
3624 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3625 }
3626 /* Floating point - compare instructions. */
3627 else if ((insn_bits12_15 & 0x03) == 0x02)
3628 {
3629 if (record_debug)
b277c936 3630 debug_printf ("FP - immediate");
99afc88b
OJ
3631 record_buf[0] = AARCH64_CPSR_REGNUM;
3632 }
3633 /* Floating point - integer conversions instructions. */
f62fce35 3634 else if (insn_bits12_15 == 0x00)
99afc88b
OJ
3635 {
3636 /* Convert float to integer instruction. */
3637 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
3638 {
3639 if (record_debug)
b277c936 3640 debug_printf ("float to int conversion");
99afc88b
OJ
3641
3642 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3643 }
3644 /* Convert integer to float instruction. */
3645 else if ((opcode >> 1) == 0x01 && !rmode)
3646 {
3647 if (record_debug)
b277c936 3648 debug_printf ("int to float conversion");
99afc88b
OJ
3649
3650 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3651 }
3652 /* Move float to integer instruction. */
3653 else if ((opcode >> 1) == 0x03)
3654 {
3655 if (record_debug)
b277c936 3656 debug_printf ("move float to int");
99afc88b
OJ
3657
3658 if (!(opcode & 0x01))
3659 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3660 else
3661 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3662 }
f62fce35
YQ
3663 else
3664 return AARCH64_RECORD_UNKNOWN;
99afc88b 3665 }
f62fce35
YQ
3666 else
3667 return AARCH64_RECORD_UNKNOWN;
99afc88b 3668 }
f62fce35
YQ
3669 else
3670 return AARCH64_RECORD_UNKNOWN;
99afc88b
OJ
3671 }
3672 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
3673 {
3674 if (record_debug)
b277c936 3675 debug_printf ("SIMD copy");
99afc88b
OJ
3676
3677 /* Advanced SIMD copy instructions. */
3678 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
3679 && !bit (aarch64_insn_r->aarch64_insn, 15)
3680 && bit (aarch64_insn_r->aarch64_insn, 10))
3681 {
3682 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
3683 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3684 else
3685 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3686 }
3687 else
3688 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3689 }
3690 /* All remaining floating point or advanced SIMD instructions. */
3691 else
3692 {
3693 if (record_debug)
b277c936 3694 debug_printf ("all remain");
99afc88b
OJ
3695
3696 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3697 }
3698
3699 if (record_debug)
b277c936 3700 debug_printf ("\n");
99afc88b
OJ
3701
3702 aarch64_insn_r->reg_rec_count++;
3703 gdb_assert (aarch64_insn_r->reg_rec_count == 1);
3704 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3705 record_buf);
3706 return AARCH64_RECORD_SUCCESS;
3707}
3708
3709/* Decodes insns type and invokes its record handler. */
3710
3711static unsigned int
3712aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
3713{
3714 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
3715
3716 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
3717 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
3718 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
3719 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
3720
3721 /* Data processing - immediate instructions. */
3722 if (!ins_bit26 && !ins_bit27 && ins_bit28)
3723 return aarch64_record_data_proc_imm (aarch64_insn_r);
3724
3725 /* Branch, exception generation and system instructions. */
3726 if (ins_bit26 && !ins_bit27 && ins_bit28)
3727 return aarch64_record_branch_except_sys (aarch64_insn_r);
3728
3729 /* Load and store instructions. */
3730 if (!ins_bit25 && ins_bit27)
3731 return aarch64_record_load_store (aarch64_insn_r);
3732
3733 /* Data processing - register instructions. */
3734 if (ins_bit25 && !ins_bit26 && ins_bit27)
3735 return aarch64_record_data_proc_reg (aarch64_insn_r);
3736
3737 /* Data processing - SIMD and floating point instructions. */
3738 if (ins_bit25 && ins_bit26 && ins_bit27)
3739 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
3740
3741 return AARCH64_RECORD_UNSUPPORTED;
3742}
3743
3744/* Cleans up local record registers and memory allocations. */
3745
3746static void
3747deallocate_reg_mem (insn_decode_record *record)
3748{
3749 xfree (record->aarch64_regs);
3750 xfree (record->aarch64_mems);
3751}
3752
3753/* Parse the current instruction and record the values of the registers and
3754 memory that will be changed in current instruction to record_arch_list
3755 return -1 if something is wrong. */
3756
3757int
3758aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
3759 CORE_ADDR insn_addr)
3760{
3761 uint32_t rec_no = 0;
3762 uint8_t insn_size = 4;
3763 uint32_t ret = 0;
99afc88b
OJ
3764 gdb_byte buf[insn_size];
3765 insn_decode_record aarch64_record;
3766
3767 memset (&buf[0], 0, insn_size);
3768 memset (&aarch64_record, 0, sizeof (insn_decode_record));
3769 target_read_memory (insn_addr, &buf[0], insn_size);
3770 aarch64_record.aarch64_insn
3771 = (uint32_t) extract_unsigned_integer (&buf[0],
3772 insn_size,
3773 gdbarch_byte_order (gdbarch));
3774 aarch64_record.regcache = regcache;
3775 aarch64_record.this_addr = insn_addr;
3776 aarch64_record.gdbarch = gdbarch;
3777
3778 ret = aarch64_record_decode_insn_handler (&aarch64_record);
3779 if (ret == AARCH64_RECORD_UNSUPPORTED)
3780 {
3781 printf_unfiltered (_("Process record does not support instruction "
3782 "0x%0x at address %s.\n"),
3783 aarch64_record.aarch64_insn,
3784 paddress (gdbarch, insn_addr));
3785 ret = -1;
3786 }
3787
3788 if (0 == ret)
3789 {
3790 /* Record registers. */
3791 record_full_arch_list_add_reg (aarch64_record.regcache,
3792 AARCH64_PC_REGNUM);
3793 /* Always record register CPSR. */
3794 record_full_arch_list_add_reg (aarch64_record.regcache,
3795 AARCH64_CPSR_REGNUM);
3796 if (aarch64_record.aarch64_regs)
3797 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
3798 if (record_full_arch_list_add_reg (aarch64_record.regcache,
3799 aarch64_record.aarch64_regs[rec_no]))
3800 ret = -1;
3801
3802 /* Record memories. */
3803 if (aarch64_record.aarch64_mems)
3804 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
3805 if (record_full_arch_list_add_mem
3806 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
3807 aarch64_record.aarch64_mems[rec_no].len))
3808 ret = -1;
3809
3810 if (record_full_arch_list_add_end ())
3811 ret = -1;
3812 }
3813
3814 deallocate_reg_mem (&aarch64_record);
3815 return ret;
3816}