]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/aarch64-tdep.c
New regcache_raw_get_signed
[thirdparty/binutils-gdb.git] / gdb / aarch64-tdep.c
CommitLineData
07b287a0
MS
1/* Common target dependent code for GDB on AArch64 systems.
2
618f726f 3 Copyright (C) 2009-2016 Free Software Foundation, Inc.
07b287a0
MS
4 Contributed by ARM Ltd.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21#include "defs.h"
22
23#include "frame.h"
24#include "inferior.h"
25#include "gdbcmd.h"
26#include "gdbcore.h"
07b287a0
MS
27#include "dis-asm.h"
28#include "regcache.h"
29#include "reggroups.h"
30#include "doublest.h"
31#include "value.h"
32#include "arch-utils.h"
33#include "osabi.h"
34#include "frame-unwind.h"
35#include "frame-base.h"
36#include "trad-frame.h"
37#include "objfiles.h"
38#include "dwarf2-frame.h"
39#include "gdbtypes.h"
40#include "prologue-value.h"
41#include "target-descriptions.h"
42#include "user-regs.h"
43#include "language.h"
44#include "infcall.h"
ea873d8e
PL
45#include "ax.h"
46#include "ax-gdb.h"
07b287a0
MS
47
48#include "aarch64-tdep.h"
49
50#include "elf-bfd.h"
51#include "elf/aarch64.h"
52
07b287a0
MS
53#include "vec.h"
54
99afc88b
OJ
55#include "record.h"
56#include "record-full.h"
57
07b287a0 58#include "features/aarch64.c"
07b287a0 59
787749ea
PL
60#include "arch/aarch64-insn.h"
61
f77ee802 62#include "opcode/aarch64.h"
325fac50 63#include <algorithm>
f77ee802
YQ
64
65#define submask(x) ((1L << ((x) + 1)) - 1)
66#define bit(obj,st) (((obj) >> (st)) & 1)
67#define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
68
07b287a0
MS
69/* Pseudo register base numbers. */
70#define AARCH64_Q0_REGNUM 0
187f5d00 71#define AARCH64_D0_REGNUM (AARCH64_Q0_REGNUM + AARCH64_D_REGISTER_COUNT)
07b287a0
MS
72#define AARCH64_S0_REGNUM (AARCH64_D0_REGNUM + 32)
73#define AARCH64_H0_REGNUM (AARCH64_S0_REGNUM + 32)
74#define AARCH64_B0_REGNUM (AARCH64_H0_REGNUM + 32)
75
76/* The standard register names, and all the valid aliases for them. */
77static const struct
78{
79 const char *const name;
80 int regnum;
81} aarch64_register_aliases[] =
82{
83 /* 64-bit register names. */
84 {"fp", AARCH64_FP_REGNUM},
85 {"lr", AARCH64_LR_REGNUM},
86 {"sp", AARCH64_SP_REGNUM},
87
88 /* 32-bit register names. */
89 {"w0", AARCH64_X0_REGNUM + 0},
90 {"w1", AARCH64_X0_REGNUM + 1},
91 {"w2", AARCH64_X0_REGNUM + 2},
92 {"w3", AARCH64_X0_REGNUM + 3},
93 {"w4", AARCH64_X0_REGNUM + 4},
94 {"w5", AARCH64_X0_REGNUM + 5},
95 {"w6", AARCH64_X0_REGNUM + 6},
96 {"w7", AARCH64_X0_REGNUM + 7},
97 {"w8", AARCH64_X0_REGNUM + 8},
98 {"w9", AARCH64_X0_REGNUM + 9},
99 {"w10", AARCH64_X0_REGNUM + 10},
100 {"w11", AARCH64_X0_REGNUM + 11},
101 {"w12", AARCH64_X0_REGNUM + 12},
102 {"w13", AARCH64_X0_REGNUM + 13},
103 {"w14", AARCH64_X0_REGNUM + 14},
104 {"w15", AARCH64_X0_REGNUM + 15},
105 {"w16", AARCH64_X0_REGNUM + 16},
106 {"w17", AARCH64_X0_REGNUM + 17},
107 {"w18", AARCH64_X0_REGNUM + 18},
108 {"w19", AARCH64_X0_REGNUM + 19},
109 {"w20", AARCH64_X0_REGNUM + 20},
110 {"w21", AARCH64_X0_REGNUM + 21},
111 {"w22", AARCH64_X0_REGNUM + 22},
112 {"w23", AARCH64_X0_REGNUM + 23},
113 {"w24", AARCH64_X0_REGNUM + 24},
114 {"w25", AARCH64_X0_REGNUM + 25},
115 {"w26", AARCH64_X0_REGNUM + 26},
116 {"w27", AARCH64_X0_REGNUM + 27},
117 {"w28", AARCH64_X0_REGNUM + 28},
118 {"w29", AARCH64_X0_REGNUM + 29},
119 {"w30", AARCH64_X0_REGNUM + 30},
120
121 /* specials */
122 {"ip0", AARCH64_X0_REGNUM + 16},
123 {"ip1", AARCH64_X0_REGNUM + 17}
124};
125
126/* The required core 'R' registers. */
127static const char *const aarch64_r_register_names[] =
128{
129 /* These registers must appear in consecutive RAW register number
130 order and they must begin with AARCH64_X0_REGNUM! */
131 "x0", "x1", "x2", "x3",
132 "x4", "x5", "x6", "x7",
133 "x8", "x9", "x10", "x11",
134 "x12", "x13", "x14", "x15",
135 "x16", "x17", "x18", "x19",
136 "x20", "x21", "x22", "x23",
137 "x24", "x25", "x26", "x27",
138 "x28", "x29", "x30", "sp",
139 "pc", "cpsr"
140};
141
142/* The FP/SIMD 'V' registers. */
143static const char *const aarch64_v_register_names[] =
144{
145 /* These registers must appear in consecutive RAW register number
146 order and they must begin with AARCH64_V0_REGNUM! */
147 "v0", "v1", "v2", "v3",
148 "v4", "v5", "v6", "v7",
149 "v8", "v9", "v10", "v11",
150 "v12", "v13", "v14", "v15",
151 "v16", "v17", "v18", "v19",
152 "v20", "v21", "v22", "v23",
153 "v24", "v25", "v26", "v27",
154 "v28", "v29", "v30", "v31",
155 "fpsr",
156 "fpcr"
157};
158
159/* AArch64 prologue cache structure. */
160struct aarch64_prologue_cache
161{
db634143
PL
162 /* The program counter at the start of the function. It is used to
163 identify this frame as a prologue frame. */
164 CORE_ADDR func;
165
166 /* The program counter at the time this frame was created; i.e. where
167 this function was called from. It is used to identify this frame as a
168 stub frame. */
169 CORE_ADDR prev_pc;
170
07b287a0
MS
171 /* The stack pointer at the time this frame was created; i.e. the
172 caller's stack pointer when this function was called. It is used
173 to identify this frame. */
174 CORE_ADDR prev_sp;
175
7dfa3edc
PL
176 /* Is the target available to read from? */
177 int available_p;
178
07b287a0
MS
179 /* The frame base for this frame is just prev_sp - frame size.
180 FRAMESIZE is the distance from the frame pointer to the
181 initial stack pointer. */
182 int framesize;
183
184 /* The register used to hold the frame pointer for this frame. */
185 int framereg;
186
187 /* Saved register offsets. */
188 struct trad_frame_saved_reg *saved_regs;
189};
190
07b287a0
MS
191static void
192show_aarch64_debug (struct ui_file *file, int from_tty,
193 struct cmd_list_element *c, const char *value)
194{
195 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
196}
197
07b287a0
MS
198/* Analyze a prologue, looking for a recognizable stack frame
199 and frame pointer. Scan until we encounter a store that could
200 clobber the stack frame unexpectedly, or an unknown instruction. */
201
202static CORE_ADDR
203aarch64_analyze_prologue (struct gdbarch *gdbarch,
204 CORE_ADDR start, CORE_ADDR limit,
205 struct aarch64_prologue_cache *cache)
206{
207 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
208 int i;
187f5d00
YQ
209 /* Track X registers and D registers in prologue. */
210 pv_t regs[AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT];
07b287a0
MS
211 struct pv_area *stack;
212 struct cleanup *back_to;
213
187f5d00 214 for (i = 0; i < AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT; i++)
07b287a0
MS
215 regs[i] = pv_register (i, 0);
216 stack = make_pv_area (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
217 back_to = make_cleanup_free_pv_area (stack);
218
219 for (; start < limit; start += 4)
220 {
221 uint32_t insn;
d9ebcbce 222 aarch64_inst inst;
07b287a0
MS
223
224 insn = read_memory_unsigned_integer (start, 4, byte_order_for_code);
225
d9ebcbce
YQ
226 if (aarch64_decode_insn (insn, &inst, 1) != 0)
227 break;
228
229 if (inst.opcode->iclass == addsub_imm
230 && (inst.opcode->op == OP_ADD
231 || strcmp ("sub", inst.opcode->name) == 0))
07b287a0 232 {
d9ebcbce
YQ
233 unsigned rd = inst.operands[0].reg.regno;
234 unsigned rn = inst.operands[1].reg.regno;
235
236 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
237 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
238 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
239 gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM);
240
241 if (inst.opcode->op == OP_ADD)
242 {
243 regs[rd] = pv_add_constant (regs[rn],
244 inst.operands[2].imm.value);
245 }
246 else
247 {
248 regs[rd] = pv_add_constant (regs[rn],
249 -inst.operands[2].imm.value);
250 }
251 }
252 else if (inst.opcode->iclass == pcreladdr
253 && inst.operands[1].type == AARCH64_OPND_ADDR_ADRP)
254 {
255 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
256 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
257
258 regs[inst.operands[0].reg.regno] = pv_unknown ();
07b287a0 259 }
d9ebcbce 260 else if (inst.opcode->iclass == branch_imm)
07b287a0
MS
261 {
262 /* Stop analysis on branch. */
263 break;
264 }
d9ebcbce 265 else if (inst.opcode->iclass == condbranch)
07b287a0
MS
266 {
267 /* Stop analysis on branch. */
268 break;
269 }
d9ebcbce 270 else if (inst.opcode->iclass == branch_reg)
07b287a0
MS
271 {
272 /* Stop analysis on branch. */
273 break;
274 }
d9ebcbce 275 else if (inst.opcode->iclass == compbranch)
07b287a0
MS
276 {
277 /* Stop analysis on branch. */
278 break;
279 }
d9ebcbce
YQ
280 else if (inst.opcode->op == OP_MOVZ)
281 {
282 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
283 regs[inst.operands[0].reg.regno] = pv_unknown ();
284 }
285 else if (inst.opcode->iclass == log_shift
286 && strcmp (inst.opcode->name, "orr") == 0)
07b287a0 287 {
d9ebcbce
YQ
288 unsigned rd = inst.operands[0].reg.regno;
289 unsigned rn = inst.operands[1].reg.regno;
290 unsigned rm = inst.operands[2].reg.regno;
291
292 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
293 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn);
294 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT);
295
296 if (inst.operands[2].shifter.amount == 0
297 && rn == AARCH64_SP_REGNUM)
07b287a0
MS
298 regs[rd] = regs[rm];
299 else
300 {
301 if (aarch64_debug)
b277c936
PL
302 {
303 debug_printf ("aarch64: prologue analysis gave up "
0a0da556 304 "addr=%s opcode=0x%x (orr x register)\n",
b277c936
PL
305 core_addr_to_string_nz (start), insn);
306 }
07b287a0
MS
307 break;
308 }
309 }
d9ebcbce 310 else if (inst.opcode->op == OP_STUR)
07b287a0 311 {
d9ebcbce
YQ
312 unsigned rt = inst.operands[0].reg.regno;
313 unsigned rn = inst.operands[1].addr.base_regno;
314 int is64
315 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
316
317 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
318 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
319 gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9);
320 gdb_assert (!inst.operands[1].addr.offset.is_reg);
321
322 pv_area_store (stack, pv_add_constant (regs[rn],
323 inst.operands[1].addr.offset.imm),
07b287a0
MS
324 is64 ? 8 : 4, regs[rt]);
325 }
d9ebcbce 326 else if ((inst.opcode->iclass == ldstpair_off
03bcd739
YQ
327 || (inst.opcode->iclass == ldstpair_indexed
328 && inst.operands[2].addr.preind))
d9ebcbce 329 && strcmp ("stp", inst.opcode->name) == 0)
07b287a0 330 {
03bcd739 331 /* STP with addressing mode Pre-indexed and Base register. */
187f5d00
YQ
332 unsigned rt1;
333 unsigned rt2;
d9ebcbce
YQ
334 unsigned rn = inst.operands[2].addr.base_regno;
335 int32_t imm = inst.operands[2].addr.offset.imm;
336
187f5d00
YQ
337 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
338 || inst.operands[0].type == AARCH64_OPND_Ft);
339 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2
340 || inst.operands[1].type == AARCH64_OPND_Ft2);
d9ebcbce
YQ
341 gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7);
342 gdb_assert (!inst.operands[2].addr.offset.is_reg);
343
07b287a0
MS
344 /* If recording this store would invalidate the store area
345 (perhaps because rn is not known) then we should abandon
346 further prologue analysis. */
347 if (pv_area_store_would_trash (stack,
348 pv_add_constant (regs[rn], imm)))
349 break;
350
351 if (pv_area_store_would_trash (stack,
352 pv_add_constant (regs[rn], imm + 8)))
353 break;
354
187f5d00
YQ
355 rt1 = inst.operands[0].reg.regno;
356 rt2 = inst.operands[1].reg.regno;
357 if (inst.operands[0].type == AARCH64_OPND_Ft)
358 {
359 /* Only bottom 64-bit of each V register (D register) need
360 to be preserved. */
361 gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D);
362 rt1 += AARCH64_X_REGISTER_COUNT;
363 rt2 += AARCH64_X_REGISTER_COUNT;
364 }
365
07b287a0
MS
366 pv_area_store (stack, pv_add_constant (regs[rn], imm), 8,
367 regs[rt1]);
368 pv_area_store (stack, pv_add_constant (regs[rn], imm + 8), 8,
369 regs[rt2]);
14ac654f 370
d9ebcbce 371 if (inst.operands[2].addr.writeback)
93d96012 372 regs[rn] = pv_add_constant (regs[rn], imm);
07b287a0 373
07b287a0 374 }
d9ebcbce 375 else if (inst.opcode->iclass == testbranch)
07b287a0
MS
376 {
377 /* Stop analysis on branch. */
378 break;
379 }
380 else
381 {
382 if (aarch64_debug)
b277c936 383 {
0a0da556 384 debug_printf ("aarch64: prologue analysis gave up addr=%s"
b277c936
PL
385 " opcode=0x%x\n",
386 core_addr_to_string_nz (start), insn);
387 }
07b287a0
MS
388 break;
389 }
390 }
391
392 if (cache == NULL)
393 {
394 do_cleanups (back_to);
395 return start;
396 }
397
398 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
399 {
400 /* Frame pointer is fp. Frame size is constant. */
401 cache->framereg = AARCH64_FP_REGNUM;
402 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
403 }
404 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
405 {
406 /* Try the stack pointer. */
407 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
408 cache->framereg = AARCH64_SP_REGNUM;
409 }
410 else
411 {
412 /* We're just out of luck. We don't know where the frame is. */
413 cache->framereg = -1;
414 cache->framesize = 0;
415 }
416
417 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
418 {
419 CORE_ADDR offset;
420
421 if (pv_area_find_reg (stack, gdbarch, i, &offset))
422 cache->saved_regs[i].addr = offset;
423 }
424
187f5d00
YQ
425 for (i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
426 {
427 int regnum = gdbarch_num_regs (gdbarch);
428 CORE_ADDR offset;
429
430 if (pv_area_find_reg (stack, gdbarch, i + AARCH64_X_REGISTER_COUNT,
431 &offset))
432 cache->saved_regs[i + regnum + AARCH64_D0_REGNUM].addr = offset;
433 }
434
07b287a0
MS
435 do_cleanups (back_to);
436 return start;
437}
438
439/* Implement the "skip_prologue" gdbarch method. */
440
441static CORE_ADDR
442aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
443{
07b287a0 444 CORE_ADDR func_addr, limit_pc;
07b287a0
MS
445
446 /* See if we can determine the end of the prologue via the symbol
447 table. If so, then return either PC, or the PC after the
448 prologue, whichever is greater. */
449 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
450 {
451 CORE_ADDR post_prologue_pc
452 = skip_prologue_using_sal (gdbarch, func_addr);
453
454 if (post_prologue_pc != 0)
325fac50 455 return std::max (pc, post_prologue_pc);
07b287a0
MS
456 }
457
458 /* Can't determine prologue from the symbol table, need to examine
459 instructions. */
460
461 /* Find an upper limit on the function prologue using the debug
462 information. If the debug information could not be used to
463 provide that bound, then use an arbitrary large number as the
464 upper bound. */
465 limit_pc = skip_prologue_using_sal (gdbarch, pc);
466 if (limit_pc == 0)
467 limit_pc = pc + 128; /* Magic. */
468
469 /* Try disassembling prologue. */
470 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
471}
472
473/* Scan the function prologue for THIS_FRAME and populate the prologue
474 cache CACHE. */
475
476static void
477aarch64_scan_prologue (struct frame_info *this_frame,
478 struct aarch64_prologue_cache *cache)
479{
480 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
481 CORE_ADDR prologue_start;
482 CORE_ADDR prologue_end;
483 CORE_ADDR prev_pc = get_frame_pc (this_frame);
484 struct gdbarch *gdbarch = get_frame_arch (this_frame);
485
db634143
PL
486 cache->prev_pc = prev_pc;
487
07b287a0
MS
488 /* Assume we do not find a frame. */
489 cache->framereg = -1;
490 cache->framesize = 0;
491
492 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
493 &prologue_end))
494 {
495 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
496
497 if (sal.line == 0)
498 {
499 /* No line info so use the current PC. */
500 prologue_end = prev_pc;
501 }
502 else if (sal.end < prologue_end)
503 {
504 /* The next line begins after the function end. */
505 prologue_end = sal.end;
506 }
507
325fac50 508 prologue_end = std::min (prologue_end, prev_pc);
07b287a0
MS
509 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
510 }
511 else
512 {
513 CORE_ADDR frame_loc;
07b287a0
MS
514
515 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
516 if (frame_loc == 0)
517 return;
518
519 cache->framereg = AARCH64_FP_REGNUM;
520 cache->framesize = 16;
521 cache->saved_regs[29].addr = 0;
522 cache->saved_regs[30].addr = 8;
523 }
524}
525
7dfa3edc
PL
526/* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
527 function may throw an exception if the inferior's registers or memory is
528 not available. */
07b287a0 529
7dfa3edc
PL
530static void
531aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
532 struct aarch64_prologue_cache *cache)
07b287a0 533{
07b287a0
MS
534 CORE_ADDR unwound_fp;
535 int reg;
536
07b287a0
MS
537 aarch64_scan_prologue (this_frame, cache);
538
539 if (cache->framereg == -1)
7dfa3edc 540 return;
07b287a0
MS
541
542 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
543 if (unwound_fp == 0)
7dfa3edc 544 return;
07b287a0
MS
545
546 cache->prev_sp = unwound_fp + cache->framesize;
547
548 /* Calculate actual addresses of saved registers using offsets
549 determined by aarch64_analyze_prologue. */
550 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
551 if (trad_frame_addr_p (cache->saved_regs, reg))
552 cache->saved_regs[reg].addr += cache->prev_sp;
553
db634143
PL
554 cache->func = get_frame_func (this_frame);
555
7dfa3edc
PL
556 cache->available_p = 1;
557}
558
559/* Allocate and fill in *THIS_CACHE with information about the prologue of
560 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
561 Return a pointer to the current aarch64_prologue_cache in
562 *THIS_CACHE. */
563
564static struct aarch64_prologue_cache *
565aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
566{
567 struct aarch64_prologue_cache *cache;
568
569 if (*this_cache != NULL)
9a3c8263 570 return (struct aarch64_prologue_cache *) *this_cache;
7dfa3edc
PL
571
572 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
573 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
574 *this_cache = cache;
575
576 TRY
577 {
578 aarch64_make_prologue_cache_1 (this_frame, cache);
579 }
580 CATCH (ex, RETURN_MASK_ERROR)
581 {
582 if (ex.error != NOT_AVAILABLE_ERROR)
583 throw_exception (ex);
584 }
585 END_CATCH
586
07b287a0
MS
587 return cache;
588}
589
7dfa3edc
PL
590/* Implement the "stop_reason" frame_unwind method. */
591
592static enum unwind_stop_reason
593aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
594 void **this_cache)
595{
596 struct aarch64_prologue_cache *cache
597 = aarch64_make_prologue_cache (this_frame, this_cache);
598
599 if (!cache->available_p)
600 return UNWIND_UNAVAILABLE;
601
602 /* Halt the backtrace at "_start". */
603 if (cache->prev_pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
604 return UNWIND_OUTERMOST;
605
606 /* We've hit a wall, stop. */
607 if (cache->prev_sp == 0)
608 return UNWIND_OUTERMOST;
609
610 return UNWIND_NO_REASON;
611}
612
07b287a0
MS
613/* Our frame ID for a normal frame is the current function's starting
614 PC and the caller's SP when we were called. */
615
616static void
617aarch64_prologue_this_id (struct frame_info *this_frame,
618 void **this_cache, struct frame_id *this_id)
619{
7c8edfae
PL
620 struct aarch64_prologue_cache *cache
621 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0 622
7dfa3edc
PL
623 if (!cache->available_p)
624 *this_id = frame_id_build_unavailable_stack (cache->func);
625 else
626 *this_id = frame_id_build (cache->prev_sp, cache->func);
07b287a0
MS
627}
628
629/* Implement the "prev_register" frame_unwind method. */
630
631static struct value *
632aarch64_prologue_prev_register (struct frame_info *this_frame,
633 void **this_cache, int prev_regnum)
634{
7c8edfae
PL
635 struct aarch64_prologue_cache *cache
636 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0
MS
637
638 /* If we are asked to unwind the PC, then we need to return the LR
639 instead. The prologue may save PC, but it will point into this
640 frame's prologue, not the next frame's resume location. */
641 if (prev_regnum == AARCH64_PC_REGNUM)
642 {
643 CORE_ADDR lr;
644
645 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
646 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
647 }
648
649 /* SP is generally not saved to the stack, but this frame is
650 identified by the next frame's stack pointer at the time of the
651 call. The value was already reconstructed into PREV_SP. */
652 /*
653 +----------+ ^
654 | saved lr | |
655 +->| saved fp |--+
656 | | |
657 | | | <- Previous SP
658 | +----------+
659 | | saved lr |
660 +--| saved fp |<- FP
661 | |
662 | |<- SP
663 +----------+ */
664 if (prev_regnum == AARCH64_SP_REGNUM)
665 return frame_unwind_got_constant (this_frame, prev_regnum,
666 cache->prev_sp);
667
668 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
669 prev_regnum);
670}
671
672/* AArch64 prologue unwinder. */
673struct frame_unwind aarch64_prologue_unwind =
674{
675 NORMAL_FRAME,
7dfa3edc 676 aarch64_prologue_frame_unwind_stop_reason,
07b287a0
MS
677 aarch64_prologue_this_id,
678 aarch64_prologue_prev_register,
679 NULL,
680 default_frame_sniffer
681};
682
8b61f75d
PL
683/* Allocate and fill in *THIS_CACHE with information about the prologue of
684 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
685 Return a pointer to the current aarch64_prologue_cache in
686 *THIS_CACHE. */
07b287a0
MS
687
688static struct aarch64_prologue_cache *
8b61f75d 689aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
07b287a0 690{
07b287a0 691 struct aarch64_prologue_cache *cache;
8b61f75d
PL
692
693 if (*this_cache != NULL)
9a3c8263 694 return (struct aarch64_prologue_cache *) *this_cache;
07b287a0
MS
695
696 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
697 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
8b61f75d 698 *this_cache = cache;
07b287a0 699
02a2a705
PL
700 TRY
701 {
702 cache->prev_sp = get_frame_register_unsigned (this_frame,
703 AARCH64_SP_REGNUM);
704 cache->prev_pc = get_frame_pc (this_frame);
705 cache->available_p = 1;
706 }
707 CATCH (ex, RETURN_MASK_ERROR)
708 {
709 if (ex.error != NOT_AVAILABLE_ERROR)
710 throw_exception (ex);
711 }
712 END_CATCH
07b287a0
MS
713
714 return cache;
715}
716
02a2a705
PL
717/* Implement the "stop_reason" frame_unwind method. */
718
719static enum unwind_stop_reason
720aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
721 void **this_cache)
722{
723 struct aarch64_prologue_cache *cache
724 = aarch64_make_stub_cache (this_frame, this_cache);
725
726 if (!cache->available_p)
727 return UNWIND_UNAVAILABLE;
728
729 return UNWIND_NO_REASON;
730}
731
07b287a0
MS
732/* Our frame ID for a stub frame is the current SP and LR. */
733
734static void
735aarch64_stub_this_id (struct frame_info *this_frame,
736 void **this_cache, struct frame_id *this_id)
737{
8b61f75d
PL
738 struct aarch64_prologue_cache *cache
739 = aarch64_make_stub_cache (this_frame, this_cache);
07b287a0 740
02a2a705
PL
741 if (cache->available_p)
742 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
743 else
744 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
07b287a0
MS
745}
746
747/* Implement the "sniffer" frame_unwind method. */
748
749static int
750aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
751 struct frame_info *this_frame,
752 void **this_prologue_cache)
753{
754 CORE_ADDR addr_in_block;
755 gdb_byte dummy[4];
756
757 addr_in_block = get_frame_address_in_block (this_frame);
3e5d3a5a 758 if (in_plt_section (addr_in_block)
07b287a0
MS
759 /* We also use the stub winder if the target memory is unreadable
760 to avoid having the prologue unwinder trying to read it. */
761 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
762 return 1;
763
764 return 0;
765}
766
767/* AArch64 stub unwinder. */
768struct frame_unwind aarch64_stub_unwind =
769{
770 NORMAL_FRAME,
02a2a705 771 aarch64_stub_frame_unwind_stop_reason,
07b287a0
MS
772 aarch64_stub_this_id,
773 aarch64_prologue_prev_register,
774 NULL,
775 aarch64_stub_unwind_sniffer
776};
777
778/* Return the frame base address of *THIS_FRAME. */
779
780static CORE_ADDR
781aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
782{
7c8edfae
PL
783 struct aarch64_prologue_cache *cache
784 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0
MS
785
786 return cache->prev_sp - cache->framesize;
787}
788
789/* AArch64 default frame base information. */
790struct frame_base aarch64_normal_base =
791{
792 &aarch64_prologue_unwind,
793 aarch64_normal_frame_base,
794 aarch64_normal_frame_base,
795 aarch64_normal_frame_base
796};
797
798/* Assuming THIS_FRAME is a dummy, return the frame ID of that
799 dummy frame. The frame ID's base needs to match the TOS value
800 saved by save_dummy_frame_tos () and returned from
801 aarch64_push_dummy_call, and the PC needs to match the dummy
802 frame's breakpoint. */
803
804static struct frame_id
805aarch64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
806{
807 return frame_id_build (get_frame_register_unsigned (this_frame,
808 AARCH64_SP_REGNUM),
809 get_frame_pc (this_frame));
810}
811
812/* Implement the "unwind_pc" gdbarch method. */
813
814static CORE_ADDR
815aarch64_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
816{
817 CORE_ADDR pc
818 = frame_unwind_register_unsigned (this_frame, AARCH64_PC_REGNUM);
819
820 return pc;
821}
822
823/* Implement the "unwind_sp" gdbarch method. */
824
825static CORE_ADDR
826aarch64_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
827{
828 return frame_unwind_register_unsigned (this_frame, AARCH64_SP_REGNUM);
829}
830
831/* Return the value of the REGNUM register in the previous frame of
832 *THIS_FRAME. */
833
834static struct value *
835aarch64_dwarf2_prev_register (struct frame_info *this_frame,
836 void **this_cache, int regnum)
837{
07b287a0
MS
838 CORE_ADDR lr;
839
840 switch (regnum)
841 {
842 case AARCH64_PC_REGNUM:
843 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
844 return frame_unwind_got_constant (this_frame, regnum, lr);
845
846 default:
847 internal_error (__FILE__, __LINE__,
848 _("Unexpected register %d"), regnum);
849 }
850}
851
852/* Implement the "init_reg" dwarf2_frame_ops method. */
853
854static void
855aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
856 struct dwarf2_frame_state_reg *reg,
857 struct frame_info *this_frame)
858{
859 switch (regnum)
860 {
861 case AARCH64_PC_REGNUM:
862 reg->how = DWARF2_FRAME_REG_FN;
863 reg->loc.fn = aarch64_dwarf2_prev_register;
864 break;
865 case AARCH64_SP_REGNUM:
866 reg->how = DWARF2_FRAME_REG_CFA;
867 break;
868 }
869}
870
871/* When arguments must be pushed onto the stack, they go on in reverse
872 order. The code below implements a FILO (stack) to do this. */
873
874typedef struct
875{
c3c87445
YQ
876 /* Value to pass on stack. It can be NULL if this item is for stack
877 padding. */
7c543f7b 878 const gdb_byte *data;
07b287a0
MS
879
880 /* Size in bytes of value to pass on stack. */
881 int len;
882} stack_item_t;
883
884DEF_VEC_O (stack_item_t);
885
886/* Return the alignment (in bytes) of the given type. */
887
888static int
889aarch64_type_align (struct type *t)
890{
891 int n;
892 int align;
893 int falign;
894
895 t = check_typedef (t);
896 switch (TYPE_CODE (t))
897 {
898 default:
899 /* Should never happen. */
900 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
901 return 4;
902
903 case TYPE_CODE_PTR:
904 case TYPE_CODE_ENUM:
905 case TYPE_CODE_INT:
906 case TYPE_CODE_FLT:
907 case TYPE_CODE_SET:
908 case TYPE_CODE_RANGE:
909 case TYPE_CODE_BITSTRING:
910 case TYPE_CODE_REF:
911 case TYPE_CODE_CHAR:
912 case TYPE_CODE_BOOL:
913 return TYPE_LENGTH (t);
914
915 case TYPE_CODE_ARRAY:
238f2452
YQ
916 if (TYPE_VECTOR (t))
917 {
918 /* Use the natural alignment for vector types (the same for
919 scalar type), but the maximum alignment is 128-bit. */
920 if (TYPE_LENGTH (t) > 16)
921 return 16;
922 else
923 return TYPE_LENGTH (t);
924 }
925 else
926 return aarch64_type_align (TYPE_TARGET_TYPE (t));
07b287a0
MS
927 case TYPE_CODE_COMPLEX:
928 return aarch64_type_align (TYPE_TARGET_TYPE (t));
929
930 case TYPE_CODE_STRUCT:
931 case TYPE_CODE_UNION:
932 align = 1;
933 for (n = 0; n < TYPE_NFIELDS (t); n++)
934 {
935 falign = aarch64_type_align (TYPE_FIELD_TYPE (t, n));
936 if (falign > align)
937 align = falign;
938 }
939 return align;
940 }
941}
942
cd635f74
YQ
943/* Return 1 if *TY is a homogeneous floating-point aggregate or
944 homogeneous short-vector aggregate as defined in the AAPCS64 ABI
945 document; otherwise return 0. */
07b287a0
MS
946
947static int
cd635f74 948is_hfa_or_hva (struct type *ty)
07b287a0
MS
949{
950 switch (TYPE_CODE (ty))
951 {
952 case TYPE_CODE_ARRAY:
953 {
954 struct type *target_ty = TYPE_TARGET_TYPE (ty);
238f2452
YQ
955
956 if (TYPE_VECTOR (ty))
957 return 0;
958
cd635f74
YQ
959 if (TYPE_LENGTH (ty) <= 4 /* HFA or HVA has at most 4 members. */
960 && (TYPE_CODE (target_ty) == TYPE_CODE_FLT /* HFA */
961 || (TYPE_CODE (target_ty) == TYPE_CODE_ARRAY /* HVA */
962 && TYPE_VECTOR (target_ty))))
07b287a0
MS
963 return 1;
964 break;
965 }
966
967 case TYPE_CODE_UNION:
968 case TYPE_CODE_STRUCT:
969 {
cd635f74 970 /* HFA or HVA has at most four members. */
07b287a0
MS
971 if (TYPE_NFIELDS (ty) > 0 && TYPE_NFIELDS (ty) <= 4)
972 {
973 struct type *member0_type;
974
975 member0_type = check_typedef (TYPE_FIELD_TYPE (ty, 0));
cd635f74
YQ
976 if (TYPE_CODE (member0_type) == TYPE_CODE_FLT
977 || (TYPE_CODE (member0_type) == TYPE_CODE_ARRAY
978 && TYPE_VECTOR (member0_type)))
07b287a0
MS
979 {
980 int i;
981
982 for (i = 0; i < TYPE_NFIELDS (ty); i++)
983 {
984 struct type *member1_type;
985
986 member1_type = check_typedef (TYPE_FIELD_TYPE (ty, i));
987 if (TYPE_CODE (member0_type) != TYPE_CODE (member1_type)
988 || (TYPE_LENGTH (member0_type)
989 != TYPE_LENGTH (member1_type)))
990 return 0;
991 }
992 return 1;
993 }
994 }
995 return 0;
996 }
997
998 default:
999 break;
1000 }
1001
1002 return 0;
1003}
1004
1005/* AArch64 function call information structure. */
1006struct aarch64_call_info
1007{
1008 /* the current argument number. */
1009 unsigned argnum;
1010
1011 /* The next general purpose register number, equivalent to NGRN as
1012 described in the AArch64 Procedure Call Standard. */
1013 unsigned ngrn;
1014
1015 /* The next SIMD and floating point register number, equivalent to
1016 NSRN as described in the AArch64 Procedure Call Standard. */
1017 unsigned nsrn;
1018
1019 /* The next stacked argument address, equivalent to NSAA as
1020 described in the AArch64 Procedure Call Standard. */
1021 unsigned nsaa;
1022
1023 /* Stack item vector. */
1024 VEC(stack_item_t) *si;
1025};
1026
1027/* Pass a value in a sequence of consecutive X registers. The caller
1028 is responsbile for ensuring sufficient registers are available. */
1029
1030static void
1031pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1032 struct aarch64_call_info *info, struct type *type,
8e80f9d1 1033 struct value *arg)
07b287a0
MS
1034{
1035 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1036 int len = TYPE_LENGTH (type);
1037 enum type_code typecode = TYPE_CODE (type);
1038 int regnum = AARCH64_X0_REGNUM + info->ngrn;
8e80f9d1 1039 const bfd_byte *buf = value_contents (arg);
07b287a0
MS
1040
1041 info->argnum++;
1042
1043 while (len > 0)
1044 {
1045 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1046 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1047 byte_order);
1048
1049
1050 /* Adjust sub-word struct/union args when big-endian. */
1051 if (byte_order == BFD_ENDIAN_BIG
1052 && partial_len < X_REGISTER_SIZE
1053 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1054 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1055
1056 if (aarch64_debug)
b277c936
PL
1057 {
1058 debug_printf ("arg %d in %s = 0x%s\n", info->argnum,
1059 gdbarch_register_name (gdbarch, regnum),
1060 phex (regval, X_REGISTER_SIZE));
1061 }
07b287a0
MS
1062 regcache_cooked_write_unsigned (regcache, regnum, regval);
1063 len -= partial_len;
1064 buf += partial_len;
1065 regnum++;
1066 }
1067}
1068
1069/* Attempt to marshall a value in a V register. Return 1 if
1070 successful, or 0 if insufficient registers are available. This
1071 function, unlike the equivalent pass_in_x() function does not
1072 handle arguments spread across multiple registers. */
1073
1074static int
1075pass_in_v (struct gdbarch *gdbarch,
1076 struct regcache *regcache,
1077 struct aarch64_call_info *info,
0735fddd 1078 int len, const bfd_byte *buf)
07b287a0
MS
1079{
1080 if (info->nsrn < 8)
1081 {
07b287a0 1082 int regnum = AARCH64_V0_REGNUM + info->nsrn;
0735fddd 1083 gdb_byte reg[V_REGISTER_SIZE];
07b287a0
MS
1084
1085 info->argnum++;
1086 info->nsrn++;
1087
0735fddd
YQ
1088 memset (reg, 0, sizeof (reg));
1089 /* PCS C.1, the argument is allocated to the least significant
1090 bits of V register. */
1091 memcpy (reg, buf, len);
1092 regcache_cooked_write (regcache, regnum, reg);
1093
07b287a0 1094 if (aarch64_debug)
b277c936
PL
1095 {
1096 debug_printf ("arg %d in %s\n", info->argnum,
1097 gdbarch_register_name (gdbarch, regnum));
1098 }
07b287a0
MS
1099 return 1;
1100 }
1101 info->nsrn = 8;
1102 return 0;
1103}
1104
1105/* Marshall an argument onto the stack. */
1106
1107static void
1108pass_on_stack (struct aarch64_call_info *info, struct type *type,
8e80f9d1 1109 struct value *arg)
07b287a0 1110{
8e80f9d1 1111 const bfd_byte *buf = value_contents (arg);
07b287a0
MS
1112 int len = TYPE_LENGTH (type);
1113 int align;
1114 stack_item_t item;
1115
1116 info->argnum++;
1117
1118 align = aarch64_type_align (type);
1119
1120 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1121 Natural alignment of the argument's type. */
1122 align = align_up (align, 8);
1123
1124 /* The AArch64 PCS requires at most doubleword alignment. */
1125 if (align > 16)
1126 align = 16;
1127
1128 if (aarch64_debug)
b277c936
PL
1129 {
1130 debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1131 info->nsaa);
1132 }
07b287a0
MS
1133
1134 item.len = len;
1135 item.data = buf;
1136 VEC_safe_push (stack_item_t, info->si, &item);
1137
1138 info->nsaa += len;
1139 if (info->nsaa & (align - 1))
1140 {
1141 /* Push stack alignment padding. */
1142 int pad = align - (info->nsaa & (align - 1));
1143
1144 item.len = pad;
c3c87445 1145 item.data = NULL;
07b287a0
MS
1146
1147 VEC_safe_push (stack_item_t, info->si, &item);
1148 info->nsaa += pad;
1149 }
1150}
1151
1152/* Marshall an argument into a sequence of one or more consecutive X
1153 registers or, if insufficient X registers are available then onto
1154 the stack. */
1155
1156static void
1157pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1158 struct aarch64_call_info *info, struct type *type,
8e80f9d1 1159 struct value *arg)
07b287a0
MS
1160{
1161 int len = TYPE_LENGTH (type);
1162 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1163
1164 /* PCS C.13 - Pass in registers if we have enough spare */
1165 if (info->ngrn + nregs <= 8)
1166 {
8e80f9d1 1167 pass_in_x (gdbarch, regcache, info, type, arg);
07b287a0
MS
1168 info->ngrn += nregs;
1169 }
1170 else
1171 {
1172 info->ngrn = 8;
8e80f9d1 1173 pass_on_stack (info, type, arg);
07b287a0
MS
1174 }
1175}
1176
1177/* Pass a value in a V register, or on the stack if insufficient are
1178 available. */
1179
1180static void
1181pass_in_v_or_stack (struct gdbarch *gdbarch,
1182 struct regcache *regcache,
1183 struct aarch64_call_info *info,
1184 struct type *type,
8e80f9d1 1185 struct value *arg)
07b287a0 1186{
0735fddd
YQ
1187 if (!pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (type),
1188 value_contents (arg)))
8e80f9d1 1189 pass_on_stack (info, type, arg);
07b287a0
MS
1190}
1191
1192/* Implement the "push_dummy_call" gdbarch method. */
1193
1194static CORE_ADDR
1195aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1196 struct regcache *regcache, CORE_ADDR bp_addr,
1197 int nargs,
1198 struct value **args, CORE_ADDR sp, int struct_return,
1199 CORE_ADDR struct_addr)
1200{
07b287a0 1201 int argnum;
07b287a0
MS
1202 struct aarch64_call_info info;
1203 struct type *func_type;
1204 struct type *return_type;
1205 int lang_struct_return;
1206
1207 memset (&info, 0, sizeof (info));
1208
1209 /* We need to know what the type of the called function is in order
1210 to determine the number of named/anonymous arguments for the
1211 actual argument placement, and the return type in order to handle
1212 return value correctly.
1213
1214 The generic code above us views the decision of return in memory
1215 or return in registers as a two stage processes. The language
1216 handler is consulted first and may decide to return in memory (eg
1217 class with copy constructor returned by value), this will cause
1218 the generic code to allocate space AND insert an initial leading
1219 argument.
1220
1221 If the language code does not decide to pass in memory then the
1222 target code is consulted.
1223
1224 If the language code decides to pass in memory we want to move
1225 the pointer inserted as the initial argument from the argument
1226 list and into X8, the conventional AArch64 struct return pointer
1227 register.
1228
1229 This is slightly awkward, ideally the flag "lang_struct_return"
1230 would be passed to the targets implementation of push_dummy_call.
1231 Rather that change the target interface we call the language code
1232 directly ourselves. */
1233
1234 func_type = check_typedef (value_type (function));
1235
1236 /* Dereference function pointer types. */
1237 if (TYPE_CODE (func_type) == TYPE_CODE_PTR)
1238 func_type = TYPE_TARGET_TYPE (func_type);
1239
1240 gdb_assert (TYPE_CODE (func_type) == TYPE_CODE_FUNC
1241 || TYPE_CODE (func_type) == TYPE_CODE_METHOD);
1242
1243 /* If language_pass_by_reference () returned true we will have been
1244 given an additional initial argument, a hidden pointer to the
1245 return slot in memory. */
1246 return_type = TYPE_TARGET_TYPE (func_type);
1247 lang_struct_return = language_pass_by_reference (return_type);
1248
1249 /* Set the return address. For the AArch64, the return breakpoint
1250 is always at BP_ADDR. */
1251 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1252
1253 /* If we were given an initial argument for the return slot because
1254 lang_struct_return was true, lose it. */
1255 if (lang_struct_return)
1256 {
1257 args++;
1258 nargs--;
1259 }
1260
1261 /* The struct_return pointer occupies X8. */
1262 if (struct_return || lang_struct_return)
1263 {
1264 if (aarch64_debug)
b277c936
PL
1265 {
1266 debug_printf ("struct return in %s = 0x%s\n",
1267 gdbarch_register_name (gdbarch,
1268 AARCH64_STRUCT_RETURN_REGNUM),
1269 paddress (gdbarch, struct_addr));
1270 }
07b287a0
MS
1271 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1272 struct_addr);
1273 }
1274
1275 for (argnum = 0; argnum < nargs; argnum++)
1276 {
1277 struct value *arg = args[argnum];
1278 struct type *arg_type;
1279 int len;
1280
1281 arg_type = check_typedef (value_type (arg));
1282 len = TYPE_LENGTH (arg_type);
1283
1284 switch (TYPE_CODE (arg_type))
1285 {
1286 case TYPE_CODE_INT:
1287 case TYPE_CODE_BOOL:
1288 case TYPE_CODE_CHAR:
1289 case TYPE_CODE_RANGE:
1290 case TYPE_CODE_ENUM:
1291 if (len < 4)
1292 {
1293 /* Promote to 32 bit integer. */
1294 if (TYPE_UNSIGNED (arg_type))
1295 arg_type = builtin_type (gdbarch)->builtin_uint32;
1296 else
1297 arg_type = builtin_type (gdbarch)->builtin_int32;
1298 arg = value_cast (arg_type, arg);
1299 }
8e80f9d1 1300 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1301 break;
1302
1303 case TYPE_CODE_COMPLEX:
1304 if (info.nsrn <= 6)
1305 {
1306 const bfd_byte *buf = value_contents (arg);
1307 struct type *target_type =
1308 check_typedef (TYPE_TARGET_TYPE (arg_type));
1309
07b287a0 1310 pass_in_v (gdbarch, regcache, &info,
0735fddd
YQ
1311 TYPE_LENGTH (target_type), buf);
1312 pass_in_v (gdbarch, regcache, &info,
1313 TYPE_LENGTH (target_type),
07b287a0
MS
1314 buf + TYPE_LENGTH (target_type));
1315 }
1316 else
1317 {
1318 info.nsrn = 8;
8e80f9d1 1319 pass_on_stack (&info, arg_type, arg);
07b287a0
MS
1320 }
1321 break;
1322 case TYPE_CODE_FLT:
8e80f9d1 1323 pass_in_v_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1324 break;
1325
1326 case TYPE_CODE_STRUCT:
1327 case TYPE_CODE_ARRAY:
1328 case TYPE_CODE_UNION:
cd635f74 1329 if (is_hfa_or_hva (arg_type))
07b287a0
MS
1330 {
1331 int elements = TYPE_NFIELDS (arg_type);
1332
1333 /* Homogeneous Aggregates */
1334 if (info.nsrn + elements < 8)
1335 {
1336 int i;
1337
1338 for (i = 0; i < elements; i++)
1339 {
1340 /* We know that we have sufficient registers
1341 available therefore this will never fallback
1342 to the stack. */
1343 struct value *field =
1344 value_primitive_field (arg, 0, i, arg_type);
1345 struct type *field_type =
1346 check_typedef (value_type (field));
1347
8e80f9d1
YQ
1348 pass_in_v_or_stack (gdbarch, regcache, &info,
1349 field_type, field);
07b287a0
MS
1350 }
1351 }
1352 else
1353 {
1354 info.nsrn = 8;
8e80f9d1 1355 pass_on_stack (&info, arg_type, arg);
07b287a0
MS
1356 }
1357 }
238f2452
YQ
1358 else if (TYPE_CODE (arg_type) == TYPE_CODE_ARRAY
1359 && TYPE_VECTOR (arg_type) && (len == 16 || len == 8))
1360 {
1361 /* Short vector types are passed in V registers. */
1362 pass_in_v_or_stack (gdbarch, regcache, &info, arg_type, arg);
1363 }
07b287a0
MS
1364 else if (len > 16)
1365 {
1366 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1367 invisible reference. */
1368
1369 /* Allocate aligned storage. */
1370 sp = align_down (sp - len, 16);
1371
1372 /* Write the real data into the stack. */
1373 write_memory (sp, value_contents (arg), len);
1374
1375 /* Construct the indirection. */
1376 arg_type = lookup_pointer_type (arg_type);
1377 arg = value_from_pointer (arg_type, sp);
8e80f9d1 1378 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1379 }
1380 else
1381 /* PCS C.15 / C.18 multiple values pass. */
8e80f9d1 1382 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1383 break;
1384
1385 default:
8e80f9d1 1386 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1387 break;
1388 }
1389 }
1390
1391 /* Make sure stack retains 16 byte alignment. */
1392 if (info.nsaa & 15)
1393 sp -= 16 - (info.nsaa & 15);
1394
1395 while (!VEC_empty (stack_item_t, info.si))
1396 {
1397 stack_item_t *si = VEC_last (stack_item_t, info.si);
1398
1399 sp -= si->len;
c3c87445
YQ
1400 if (si->data != NULL)
1401 write_memory (sp, si->data, si->len);
07b287a0
MS
1402 VEC_pop (stack_item_t, info.si);
1403 }
1404
1405 VEC_free (stack_item_t, info.si);
1406
1407 /* Finally, update the SP register. */
1408 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1409
1410 return sp;
1411}
1412
1413/* Implement the "frame_align" gdbarch method. */
1414
1415static CORE_ADDR
1416aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1417{
1418 /* Align the stack to sixteen bytes. */
1419 return sp & ~(CORE_ADDR) 15;
1420}
1421
1422/* Return the type for an AdvSISD Q register. */
1423
1424static struct type *
1425aarch64_vnq_type (struct gdbarch *gdbarch)
1426{
1427 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1428
1429 if (tdep->vnq_type == NULL)
1430 {
1431 struct type *t;
1432 struct type *elem;
1433
1434 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1435 TYPE_CODE_UNION);
1436
1437 elem = builtin_type (gdbarch)->builtin_uint128;
1438 append_composite_type_field (t, "u", elem);
1439
1440 elem = builtin_type (gdbarch)->builtin_int128;
1441 append_composite_type_field (t, "s", elem);
1442
1443 tdep->vnq_type = t;
1444 }
1445
1446 return tdep->vnq_type;
1447}
1448
1449/* Return the type for an AdvSISD D register. */
1450
1451static struct type *
1452aarch64_vnd_type (struct gdbarch *gdbarch)
1453{
1454 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1455
1456 if (tdep->vnd_type == NULL)
1457 {
1458 struct type *t;
1459 struct type *elem;
1460
1461 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1462 TYPE_CODE_UNION);
1463
1464 elem = builtin_type (gdbarch)->builtin_double;
1465 append_composite_type_field (t, "f", elem);
1466
1467 elem = builtin_type (gdbarch)->builtin_uint64;
1468 append_composite_type_field (t, "u", elem);
1469
1470 elem = builtin_type (gdbarch)->builtin_int64;
1471 append_composite_type_field (t, "s", elem);
1472
1473 tdep->vnd_type = t;
1474 }
1475
1476 return tdep->vnd_type;
1477}
1478
1479/* Return the type for an AdvSISD S register. */
1480
1481static struct type *
1482aarch64_vns_type (struct gdbarch *gdbarch)
1483{
1484 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1485
1486 if (tdep->vns_type == NULL)
1487 {
1488 struct type *t;
1489 struct type *elem;
1490
1491 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1492 TYPE_CODE_UNION);
1493
1494 elem = builtin_type (gdbarch)->builtin_float;
1495 append_composite_type_field (t, "f", elem);
1496
1497 elem = builtin_type (gdbarch)->builtin_uint32;
1498 append_composite_type_field (t, "u", elem);
1499
1500 elem = builtin_type (gdbarch)->builtin_int32;
1501 append_composite_type_field (t, "s", elem);
1502
1503 tdep->vns_type = t;
1504 }
1505
1506 return tdep->vns_type;
1507}
1508
1509/* Return the type for an AdvSISD H register. */
1510
1511static struct type *
1512aarch64_vnh_type (struct gdbarch *gdbarch)
1513{
1514 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1515
1516 if (tdep->vnh_type == NULL)
1517 {
1518 struct type *t;
1519 struct type *elem;
1520
1521 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1522 TYPE_CODE_UNION);
1523
1524 elem = builtin_type (gdbarch)->builtin_uint16;
1525 append_composite_type_field (t, "u", elem);
1526
1527 elem = builtin_type (gdbarch)->builtin_int16;
1528 append_composite_type_field (t, "s", elem);
1529
1530 tdep->vnh_type = t;
1531 }
1532
1533 return tdep->vnh_type;
1534}
1535
1536/* Return the type for an AdvSISD B register. */
1537
1538static struct type *
1539aarch64_vnb_type (struct gdbarch *gdbarch)
1540{
1541 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1542
1543 if (tdep->vnb_type == NULL)
1544 {
1545 struct type *t;
1546 struct type *elem;
1547
1548 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1549 TYPE_CODE_UNION);
1550
1551 elem = builtin_type (gdbarch)->builtin_uint8;
1552 append_composite_type_field (t, "u", elem);
1553
1554 elem = builtin_type (gdbarch)->builtin_int8;
1555 append_composite_type_field (t, "s", elem);
1556
1557 tdep->vnb_type = t;
1558 }
1559
1560 return tdep->vnb_type;
1561}
1562
1563/* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
1564
1565static int
1566aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
1567{
1568 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
1569 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
1570
1571 if (reg == AARCH64_DWARF_SP)
1572 return AARCH64_SP_REGNUM;
1573
1574 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
1575 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
1576
1577 return -1;
1578}
1579\f
1580
1581/* Implement the "print_insn" gdbarch method. */
1582
1583static int
1584aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
1585{
1586 info->symbols = NULL;
1587 return print_insn_aarch64 (memaddr, info);
1588}
1589
1590/* AArch64 BRK software debug mode instruction.
1591 Note that AArch64 code is always little-endian.
1592 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
04180708 1593constexpr gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
07b287a0 1594
04180708 1595typedef BP_MANIPULATION (aarch64_default_breakpoint) aarch64_breakpoint;
07b287a0
MS
1596
1597/* Extract from an array REGS containing the (raw) register state a
1598 function return value of type TYPE, and copy that, in virtual
1599 format, into VALBUF. */
1600
1601static void
1602aarch64_extract_return_value (struct type *type, struct regcache *regs,
1603 gdb_byte *valbuf)
1604{
1605 struct gdbarch *gdbarch = get_regcache_arch (regs);
1606 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1607
1608 if (TYPE_CODE (type) == TYPE_CODE_FLT)
1609 {
1610 bfd_byte buf[V_REGISTER_SIZE];
1611 int len = TYPE_LENGTH (type);
1612
1613 regcache_cooked_read (regs, AARCH64_V0_REGNUM, buf);
1614 memcpy (valbuf, buf, len);
1615 }
1616 else if (TYPE_CODE (type) == TYPE_CODE_INT
1617 || TYPE_CODE (type) == TYPE_CODE_CHAR
1618 || TYPE_CODE (type) == TYPE_CODE_BOOL
1619 || TYPE_CODE (type) == TYPE_CODE_PTR
1620 || TYPE_CODE (type) == TYPE_CODE_REF
1621 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1622 {
1623 /* If the the type is a plain integer, then the access is
1624 straight-forward. Otherwise we have to play around a bit
1625 more. */
1626 int len = TYPE_LENGTH (type);
1627 int regno = AARCH64_X0_REGNUM;
1628 ULONGEST tmp;
1629
1630 while (len > 0)
1631 {
1632 /* By using store_unsigned_integer we avoid having to do
1633 anything special for small big-endian values. */
1634 regcache_cooked_read_unsigned (regs, regno++, &tmp);
1635 store_unsigned_integer (valbuf,
1636 (len > X_REGISTER_SIZE
1637 ? X_REGISTER_SIZE : len), byte_order, tmp);
1638 len -= X_REGISTER_SIZE;
1639 valbuf += X_REGISTER_SIZE;
1640 }
1641 }
1642 else if (TYPE_CODE (type) == TYPE_CODE_COMPLEX)
1643 {
1644 int regno = AARCH64_V0_REGNUM;
1645 bfd_byte buf[V_REGISTER_SIZE];
1646 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
1647 int len = TYPE_LENGTH (target_type);
1648
1649 regcache_cooked_read (regs, regno, buf);
1650 memcpy (valbuf, buf, len);
1651 valbuf += len;
1652 regcache_cooked_read (regs, regno + 1, buf);
1653 memcpy (valbuf, buf, len);
1654 valbuf += len;
1655 }
cd635f74 1656 else if (is_hfa_or_hva (type))
07b287a0
MS
1657 {
1658 int elements = TYPE_NFIELDS (type);
1659 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
1660 int len = TYPE_LENGTH (member_type);
1661 int i;
1662
1663 for (i = 0; i < elements; i++)
1664 {
1665 int regno = AARCH64_V0_REGNUM + i;
db3516bb 1666 bfd_byte buf[V_REGISTER_SIZE];
07b287a0
MS
1667
1668 if (aarch64_debug)
b277c936 1669 {
cd635f74 1670 debug_printf ("read HFA or HVA return value element %d from %s\n",
b277c936
PL
1671 i + 1,
1672 gdbarch_register_name (gdbarch, regno));
1673 }
07b287a0
MS
1674 regcache_cooked_read (regs, regno, buf);
1675
1676 memcpy (valbuf, buf, len);
1677 valbuf += len;
1678 }
1679 }
238f2452
YQ
1680 else if (TYPE_CODE (type) == TYPE_CODE_ARRAY && TYPE_VECTOR (type)
1681 && (TYPE_LENGTH (type) == 16 || TYPE_LENGTH (type) == 8))
1682 {
1683 /* Short vector is returned in V register. */
1684 gdb_byte buf[V_REGISTER_SIZE];
1685
1686 regcache_cooked_read (regs, AARCH64_V0_REGNUM, buf);
1687 memcpy (valbuf, buf, TYPE_LENGTH (type));
1688 }
07b287a0
MS
1689 else
1690 {
1691 /* For a structure or union the behaviour is as if the value had
1692 been stored to word-aligned memory and then loaded into
1693 registers with 64-bit load instruction(s). */
1694 int len = TYPE_LENGTH (type);
1695 int regno = AARCH64_X0_REGNUM;
1696 bfd_byte buf[X_REGISTER_SIZE];
1697
1698 while (len > 0)
1699 {
1700 regcache_cooked_read (regs, regno++, buf);
1701 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
1702 len -= X_REGISTER_SIZE;
1703 valbuf += X_REGISTER_SIZE;
1704 }
1705 }
1706}
1707
1708
1709/* Will a function return an aggregate type in memory or in a
1710 register? Return 0 if an aggregate type can be returned in a
1711 register, 1 if it must be returned in memory. */
1712
1713static int
1714aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
1715{
f168693b 1716 type = check_typedef (type);
07b287a0 1717
cd635f74 1718 if (is_hfa_or_hva (type))
07b287a0 1719 {
cd635f74
YQ
1720 /* v0-v7 are used to return values and one register is allocated
1721 for one member. However, HFA or HVA has at most four members. */
07b287a0
MS
1722 return 0;
1723 }
1724
1725 if (TYPE_LENGTH (type) > 16)
1726 {
1727 /* PCS B.6 Aggregates larger than 16 bytes are passed by
1728 invisible reference. */
1729
1730 return 1;
1731 }
1732
1733 return 0;
1734}
1735
1736/* Write into appropriate registers a function return value of type
1737 TYPE, given in virtual format. */
1738
1739static void
1740aarch64_store_return_value (struct type *type, struct regcache *regs,
1741 const gdb_byte *valbuf)
1742{
1743 struct gdbarch *gdbarch = get_regcache_arch (regs);
1744 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1745
1746 if (TYPE_CODE (type) == TYPE_CODE_FLT)
1747 {
1748 bfd_byte buf[V_REGISTER_SIZE];
1749 int len = TYPE_LENGTH (type);
1750
1751 memcpy (buf, valbuf, len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
1752 regcache_cooked_write (regs, AARCH64_V0_REGNUM, buf);
1753 }
1754 else if (TYPE_CODE (type) == TYPE_CODE_INT
1755 || TYPE_CODE (type) == TYPE_CODE_CHAR
1756 || TYPE_CODE (type) == TYPE_CODE_BOOL
1757 || TYPE_CODE (type) == TYPE_CODE_PTR
1758 || TYPE_CODE (type) == TYPE_CODE_REF
1759 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1760 {
1761 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
1762 {
1763 /* Values of one word or less are zero/sign-extended and
1764 returned in r0. */
1765 bfd_byte tmpbuf[X_REGISTER_SIZE];
1766 LONGEST val = unpack_long (type, valbuf);
1767
1768 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
1769 regcache_cooked_write (regs, AARCH64_X0_REGNUM, tmpbuf);
1770 }
1771 else
1772 {
1773 /* Integral values greater than one word are stored in
1774 consecutive registers starting with r0. This will always
1775 be a multiple of the regiser size. */
1776 int len = TYPE_LENGTH (type);
1777 int regno = AARCH64_X0_REGNUM;
1778
1779 while (len > 0)
1780 {
1781 regcache_cooked_write (regs, regno++, valbuf);
1782 len -= X_REGISTER_SIZE;
1783 valbuf += X_REGISTER_SIZE;
1784 }
1785 }
1786 }
cd635f74 1787 else if (is_hfa_or_hva (type))
07b287a0
MS
1788 {
1789 int elements = TYPE_NFIELDS (type);
1790 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
1791 int len = TYPE_LENGTH (member_type);
1792 int i;
1793
1794 for (i = 0; i < elements; i++)
1795 {
1796 int regno = AARCH64_V0_REGNUM + i;
1797 bfd_byte tmpbuf[MAX_REGISTER_SIZE];
1798
1799 if (aarch64_debug)
b277c936 1800 {
cd635f74 1801 debug_printf ("write HFA or HVA return value element %d to %s\n",
b277c936
PL
1802 i + 1,
1803 gdbarch_register_name (gdbarch, regno));
1804 }
07b287a0
MS
1805
1806 memcpy (tmpbuf, valbuf, len);
1807 regcache_cooked_write (regs, regno, tmpbuf);
1808 valbuf += len;
1809 }
1810 }
238f2452
YQ
1811 else if (TYPE_CODE (type) == TYPE_CODE_ARRAY && TYPE_VECTOR (type)
1812 && (TYPE_LENGTH (type) == 8 || TYPE_LENGTH (type) == 16))
1813 {
1814 /* Short vector. */
1815 gdb_byte buf[V_REGISTER_SIZE];
1816
1817 memcpy (buf, valbuf, TYPE_LENGTH (type));
1818 regcache_cooked_write (regs, AARCH64_V0_REGNUM, buf);
1819 }
07b287a0
MS
1820 else
1821 {
1822 /* For a structure or union the behaviour is as if the value had
1823 been stored to word-aligned memory and then loaded into
1824 registers with 64-bit load instruction(s). */
1825 int len = TYPE_LENGTH (type);
1826 int regno = AARCH64_X0_REGNUM;
1827 bfd_byte tmpbuf[X_REGISTER_SIZE];
1828
1829 while (len > 0)
1830 {
1831 memcpy (tmpbuf, valbuf,
1832 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
1833 regcache_cooked_write (regs, regno++, tmpbuf);
1834 len -= X_REGISTER_SIZE;
1835 valbuf += X_REGISTER_SIZE;
1836 }
1837 }
1838}
1839
1840/* Implement the "return_value" gdbarch method. */
1841
1842static enum return_value_convention
1843aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
1844 struct type *valtype, struct regcache *regcache,
1845 gdb_byte *readbuf, const gdb_byte *writebuf)
1846{
07b287a0
MS
1847
1848 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
1849 || TYPE_CODE (valtype) == TYPE_CODE_UNION
1850 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
1851 {
1852 if (aarch64_return_in_memory (gdbarch, valtype))
1853 {
1854 if (aarch64_debug)
b277c936 1855 debug_printf ("return value in memory\n");
07b287a0
MS
1856 return RETURN_VALUE_STRUCT_CONVENTION;
1857 }
1858 }
1859
1860 if (writebuf)
1861 aarch64_store_return_value (valtype, regcache, writebuf);
1862
1863 if (readbuf)
1864 aarch64_extract_return_value (valtype, regcache, readbuf);
1865
1866 if (aarch64_debug)
b277c936 1867 debug_printf ("return value in registers\n");
07b287a0
MS
1868
1869 return RETURN_VALUE_REGISTER_CONVENTION;
1870}
1871
1872/* Implement the "get_longjmp_target" gdbarch method. */
1873
1874static int
1875aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
1876{
1877 CORE_ADDR jb_addr;
1878 gdb_byte buf[X_REGISTER_SIZE];
1879 struct gdbarch *gdbarch = get_frame_arch (frame);
1880 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1881 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1882
1883 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
1884
1885 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
1886 X_REGISTER_SIZE))
1887 return 0;
1888
1889 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
1890 return 1;
1891}
ea873d8e
PL
1892
1893/* Implement the "gen_return_address" gdbarch method. */
1894
1895static void
1896aarch64_gen_return_address (struct gdbarch *gdbarch,
1897 struct agent_expr *ax, struct axs_value *value,
1898 CORE_ADDR scope)
1899{
1900 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
1901 value->kind = axs_lvalue_register;
1902 value->u.reg = AARCH64_LR_REGNUM;
1903}
07b287a0
MS
1904\f
1905
1906/* Return the pseudo register name corresponding to register regnum. */
1907
1908static const char *
1909aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
1910{
1911 static const char *const q_name[] =
1912 {
1913 "q0", "q1", "q2", "q3",
1914 "q4", "q5", "q6", "q7",
1915 "q8", "q9", "q10", "q11",
1916 "q12", "q13", "q14", "q15",
1917 "q16", "q17", "q18", "q19",
1918 "q20", "q21", "q22", "q23",
1919 "q24", "q25", "q26", "q27",
1920 "q28", "q29", "q30", "q31",
1921 };
1922
1923 static const char *const d_name[] =
1924 {
1925 "d0", "d1", "d2", "d3",
1926 "d4", "d5", "d6", "d7",
1927 "d8", "d9", "d10", "d11",
1928 "d12", "d13", "d14", "d15",
1929 "d16", "d17", "d18", "d19",
1930 "d20", "d21", "d22", "d23",
1931 "d24", "d25", "d26", "d27",
1932 "d28", "d29", "d30", "d31",
1933 };
1934
1935 static const char *const s_name[] =
1936 {
1937 "s0", "s1", "s2", "s3",
1938 "s4", "s5", "s6", "s7",
1939 "s8", "s9", "s10", "s11",
1940 "s12", "s13", "s14", "s15",
1941 "s16", "s17", "s18", "s19",
1942 "s20", "s21", "s22", "s23",
1943 "s24", "s25", "s26", "s27",
1944 "s28", "s29", "s30", "s31",
1945 };
1946
1947 static const char *const h_name[] =
1948 {
1949 "h0", "h1", "h2", "h3",
1950 "h4", "h5", "h6", "h7",
1951 "h8", "h9", "h10", "h11",
1952 "h12", "h13", "h14", "h15",
1953 "h16", "h17", "h18", "h19",
1954 "h20", "h21", "h22", "h23",
1955 "h24", "h25", "h26", "h27",
1956 "h28", "h29", "h30", "h31",
1957 };
1958
1959 static const char *const b_name[] =
1960 {
1961 "b0", "b1", "b2", "b3",
1962 "b4", "b5", "b6", "b7",
1963 "b8", "b9", "b10", "b11",
1964 "b12", "b13", "b14", "b15",
1965 "b16", "b17", "b18", "b19",
1966 "b20", "b21", "b22", "b23",
1967 "b24", "b25", "b26", "b27",
1968 "b28", "b29", "b30", "b31",
1969 };
1970
1971 regnum -= gdbarch_num_regs (gdbarch);
1972
1973 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
1974 return q_name[regnum - AARCH64_Q0_REGNUM];
1975
1976 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
1977 return d_name[regnum - AARCH64_D0_REGNUM];
1978
1979 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
1980 return s_name[regnum - AARCH64_S0_REGNUM];
1981
1982 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
1983 return h_name[regnum - AARCH64_H0_REGNUM];
1984
1985 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
1986 return b_name[regnum - AARCH64_B0_REGNUM];
1987
1988 internal_error (__FILE__, __LINE__,
1989 _("aarch64_pseudo_register_name: bad register number %d"),
1990 regnum);
1991}
1992
1993/* Implement the "pseudo_register_type" tdesc_arch_data method. */
1994
1995static struct type *
1996aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
1997{
1998 regnum -= gdbarch_num_regs (gdbarch);
1999
2000 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2001 return aarch64_vnq_type (gdbarch);
2002
2003 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2004 return aarch64_vnd_type (gdbarch);
2005
2006 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2007 return aarch64_vns_type (gdbarch);
2008
2009 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2010 return aarch64_vnh_type (gdbarch);
2011
2012 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2013 return aarch64_vnb_type (gdbarch);
2014
2015 internal_error (__FILE__, __LINE__,
2016 _("aarch64_pseudo_register_type: bad register number %d"),
2017 regnum);
2018}
2019
2020/* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2021
2022static int
2023aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2024 struct reggroup *group)
2025{
2026 regnum -= gdbarch_num_regs (gdbarch);
2027
2028 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2029 return group == all_reggroup || group == vector_reggroup;
2030 else if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2031 return (group == all_reggroup || group == vector_reggroup
2032 || group == float_reggroup);
2033 else if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2034 return (group == all_reggroup || group == vector_reggroup
2035 || group == float_reggroup);
2036 else if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2037 return group == all_reggroup || group == vector_reggroup;
2038 else if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2039 return group == all_reggroup || group == vector_reggroup;
2040
2041 return group == all_reggroup;
2042}
2043
2044/* Implement the "pseudo_register_read_value" gdbarch method. */
2045
2046static struct value *
2047aarch64_pseudo_read_value (struct gdbarch *gdbarch,
2048 struct regcache *regcache,
2049 int regnum)
2050{
2051 gdb_byte reg_buf[MAX_REGISTER_SIZE];
2052 struct value *result_value;
2053 gdb_byte *buf;
2054
2055 result_value = allocate_value (register_type (gdbarch, regnum));
2056 VALUE_LVAL (result_value) = lval_register;
2057 VALUE_REGNUM (result_value) = regnum;
2058 buf = value_contents_raw (result_value);
2059
2060 regnum -= gdbarch_num_regs (gdbarch);
2061
2062 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2063 {
2064 enum register_status status;
2065 unsigned v_regnum;
2066
2067 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2068 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2069 if (status != REG_VALID)
2070 mark_value_bytes_unavailable (result_value, 0,
2071 TYPE_LENGTH (value_type (result_value)));
2072 else
2073 memcpy (buf, reg_buf, Q_REGISTER_SIZE);
2074 return result_value;
2075 }
2076
2077 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2078 {
2079 enum register_status status;
2080 unsigned v_regnum;
2081
2082 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2083 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2084 if (status != REG_VALID)
2085 mark_value_bytes_unavailable (result_value, 0,
2086 TYPE_LENGTH (value_type (result_value)));
2087 else
2088 memcpy (buf, reg_buf, D_REGISTER_SIZE);
2089 return result_value;
2090 }
2091
2092 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2093 {
2094 enum register_status status;
2095 unsigned v_regnum;
2096
2097 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2098 status = regcache_raw_read (regcache, v_regnum, reg_buf);
4bcddace
PL
2099 if (status != REG_VALID)
2100 mark_value_bytes_unavailable (result_value, 0,
2101 TYPE_LENGTH (value_type (result_value)));
2102 else
2103 memcpy (buf, reg_buf, S_REGISTER_SIZE);
07b287a0
MS
2104 return result_value;
2105 }
2106
2107 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2108 {
2109 enum register_status status;
2110 unsigned v_regnum;
2111
2112 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2113 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2114 if (status != REG_VALID)
2115 mark_value_bytes_unavailable (result_value, 0,
2116 TYPE_LENGTH (value_type (result_value)));
2117 else
2118 memcpy (buf, reg_buf, H_REGISTER_SIZE);
2119 return result_value;
2120 }
2121
2122 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2123 {
2124 enum register_status status;
2125 unsigned v_regnum;
2126
2127 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2128 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2129 if (status != REG_VALID)
2130 mark_value_bytes_unavailable (result_value, 0,
2131 TYPE_LENGTH (value_type (result_value)));
2132 else
2133 memcpy (buf, reg_buf, B_REGISTER_SIZE);
2134 return result_value;
2135 }
2136
2137 gdb_assert_not_reached ("regnum out of bound");
2138}
2139
2140/* Implement the "pseudo_register_write" gdbarch method. */
2141
2142static void
2143aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2144 int regnum, const gdb_byte *buf)
2145{
2146 gdb_byte reg_buf[MAX_REGISTER_SIZE];
2147
2148 /* Ensure the register buffer is zero, we want gdb writes of the
2149 various 'scalar' pseudo registers to behavior like architectural
2150 writes, register width bytes are written the remainder are set to
2151 zero. */
2152 memset (reg_buf, 0, sizeof (reg_buf));
2153
2154 regnum -= gdbarch_num_regs (gdbarch);
2155
2156 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2157 {
2158 /* pseudo Q registers */
2159 unsigned v_regnum;
2160
2161 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2162 memcpy (reg_buf, buf, Q_REGISTER_SIZE);
2163 regcache_raw_write (regcache, v_regnum, reg_buf);
2164 return;
2165 }
2166
2167 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2168 {
2169 /* pseudo D registers */
2170 unsigned v_regnum;
2171
2172 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2173 memcpy (reg_buf, buf, D_REGISTER_SIZE);
2174 regcache_raw_write (regcache, v_regnum, reg_buf);
2175 return;
2176 }
2177
2178 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2179 {
2180 unsigned v_regnum;
2181
2182 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2183 memcpy (reg_buf, buf, S_REGISTER_SIZE);
2184 regcache_raw_write (regcache, v_regnum, reg_buf);
2185 return;
2186 }
2187
2188 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2189 {
2190 /* pseudo H registers */
2191 unsigned v_regnum;
2192
2193 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2194 memcpy (reg_buf, buf, H_REGISTER_SIZE);
2195 regcache_raw_write (regcache, v_regnum, reg_buf);
2196 return;
2197 }
2198
2199 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2200 {
2201 /* pseudo B registers */
2202 unsigned v_regnum;
2203
2204 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2205 memcpy (reg_buf, buf, B_REGISTER_SIZE);
2206 regcache_raw_write (regcache, v_regnum, reg_buf);
2207 return;
2208 }
2209
2210 gdb_assert_not_reached ("regnum out of bound");
2211}
2212
07b287a0
MS
2213/* Callback function for user_reg_add. */
2214
2215static struct value *
2216value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2217{
9a3c8263 2218 const int *reg_p = (const int *) baton;
07b287a0
MS
2219
2220 return value_of_register (*reg_p, frame);
2221}
2222\f
2223
9404b58f
KM
2224/* Implement the "software_single_step" gdbarch method, needed to
2225 single step through atomic sequences on AArch64. */
2226
93f9a11f 2227static VEC (CORE_ADDR) *
9404b58f
KM
2228aarch64_software_single_step (struct frame_info *frame)
2229{
2230 struct gdbarch *gdbarch = get_frame_arch (frame);
9404b58f
KM
2231 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2232 const int insn_size = 4;
2233 const int atomic_sequence_length = 16; /* Instruction sequence length. */
2234 CORE_ADDR pc = get_frame_pc (frame);
2235 CORE_ADDR breaks[2] = { -1, -1 };
2236 CORE_ADDR loc = pc;
2237 CORE_ADDR closing_insn = 0;
2238 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2239 byte_order_for_code);
2240 int index;
2241 int insn_count;
2242 int bc_insn_count = 0; /* Conditional branch instruction count. */
2243 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
f77ee802 2244 aarch64_inst inst;
93f9a11f 2245 VEC (CORE_ADDR) *next_pcs = NULL;
f77ee802 2246
43cdf5ae 2247 if (aarch64_decode_insn (insn, &inst, 1) != 0)
93f9a11f 2248 return NULL;
9404b58f
KM
2249
2250 /* Look for a Load Exclusive instruction which begins the sequence. */
f77ee802 2251 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
93f9a11f 2252 return NULL;
9404b58f
KM
2253
2254 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2255 {
9404b58f
KM
2256 loc += insn_size;
2257 insn = read_memory_unsigned_integer (loc, insn_size,
2258 byte_order_for_code);
2259
43cdf5ae 2260 if (aarch64_decode_insn (insn, &inst, 1) != 0)
93f9a11f 2261 return NULL;
9404b58f 2262 /* Check if the instruction is a conditional branch. */
f77ee802 2263 if (inst.opcode->iclass == condbranch)
9404b58f 2264 {
f77ee802
YQ
2265 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
2266
9404b58f 2267 if (bc_insn_count >= 1)
93f9a11f 2268 return NULL;
9404b58f
KM
2269
2270 /* It is, so we'll try to set a breakpoint at the destination. */
f77ee802 2271 breaks[1] = loc + inst.operands[0].imm.value;
9404b58f
KM
2272
2273 bc_insn_count++;
2274 last_breakpoint++;
2275 }
2276
2277 /* Look for the Store Exclusive which closes the atomic sequence. */
f77ee802 2278 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
9404b58f
KM
2279 {
2280 closing_insn = loc;
2281 break;
2282 }
2283 }
2284
2285 /* We didn't find a closing Store Exclusive instruction, fall back. */
2286 if (!closing_insn)
93f9a11f 2287 return NULL;
9404b58f
KM
2288
2289 /* Insert breakpoint after the end of the atomic sequence. */
2290 breaks[0] = loc + insn_size;
2291
2292 /* Check for duplicated breakpoints, and also check that the second
2293 breakpoint is not within the atomic sequence. */
2294 if (last_breakpoint
2295 && (breaks[1] == breaks[0]
2296 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2297 last_breakpoint = 0;
2298
2299 /* Insert the breakpoint at the end of the sequence, and one at the
2300 destination of the conditional branch, if it exists. */
2301 for (index = 0; index <= last_breakpoint; index++)
93f9a11f 2302 VEC_safe_push (CORE_ADDR, next_pcs, breaks[index]);
9404b58f 2303
93f9a11f 2304 return next_pcs;
9404b58f
KM
2305}
2306
b6542f81
YQ
2307struct displaced_step_closure
2308{
2309 /* It is true when condition instruction, such as B.CON, TBZ, etc,
2310 is being displaced stepping. */
2311 int cond;
2312
2313 /* PC adjustment offset after displaced stepping. */
2314 int32_t pc_adjust;
2315};
2316
2317/* Data when visiting instructions for displaced stepping. */
2318
2319struct aarch64_displaced_step_data
2320{
2321 struct aarch64_insn_data base;
2322
2323 /* The address where the instruction will be executed at. */
2324 CORE_ADDR new_addr;
2325 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
2326 uint32_t insn_buf[DISPLACED_MODIFIED_INSNS];
2327 /* Number of instructions in INSN_BUF. */
2328 unsigned insn_count;
2329 /* Registers when doing displaced stepping. */
2330 struct regcache *regs;
2331
2332 struct displaced_step_closure *dsc;
2333};
2334
2335/* Implementation of aarch64_insn_visitor method "b". */
2336
2337static void
2338aarch64_displaced_step_b (const int is_bl, const int32_t offset,
2339 struct aarch64_insn_data *data)
2340{
2341 struct aarch64_displaced_step_data *dsd
2342 = (struct aarch64_displaced_step_data *) data;
2ac09a5b 2343 int64_t new_offset = data->insn_addr - dsd->new_addr + offset;
b6542f81
YQ
2344
2345 if (can_encode_int32 (new_offset, 28))
2346 {
2347 /* Emit B rather than BL, because executing BL on a new address
2348 will get the wrong address into LR. In order to avoid this,
2349 we emit B, and update LR if the instruction is BL. */
2350 emit_b (dsd->insn_buf, 0, new_offset);
2351 dsd->insn_count++;
2352 }
2353 else
2354 {
2355 /* Write NOP. */
2356 emit_nop (dsd->insn_buf);
2357 dsd->insn_count++;
2358 dsd->dsc->pc_adjust = offset;
2359 }
2360
2361 if (is_bl)
2362 {
2363 /* Update LR. */
2364 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
2365 data->insn_addr + 4);
2366 }
2367}
2368
2369/* Implementation of aarch64_insn_visitor method "b_cond". */
2370
2371static void
2372aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
2373 struct aarch64_insn_data *data)
2374{
2375 struct aarch64_displaced_step_data *dsd
2376 = (struct aarch64_displaced_step_data *) data;
b6542f81
YQ
2377
2378 /* GDB has to fix up PC after displaced step this instruction
2379 differently according to the condition is true or false. Instead
2380 of checking COND against conditional flags, we can use
2381 the following instructions, and GDB can tell how to fix up PC
2382 according to the PC value.
2383
2384 B.COND TAKEN ; If cond is true, then jump to TAKEN.
2385 INSN1 ;
2386 TAKEN:
2387 INSN2
2388 */
2389
2390 emit_bcond (dsd->insn_buf, cond, 8);
2391 dsd->dsc->cond = 1;
2392 dsd->dsc->pc_adjust = offset;
2393 dsd->insn_count = 1;
2394}
2395
2396/* Dynamically allocate a new register. If we know the register
2397 statically, we should make it a global as above instead of using this
2398 helper function. */
2399
2400static struct aarch64_register
2401aarch64_register (unsigned num, int is64)
2402{
2403 return (struct aarch64_register) { num, is64 };
2404}
2405
2406/* Implementation of aarch64_insn_visitor method "cb". */
2407
2408static void
2409aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
2410 const unsigned rn, int is64,
2411 struct aarch64_insn_data *data)
2412{
2413 struct aarch64_displaced_step_data *dsd
2414 = (struct aarch64_displaced_step_data *) data;
b6542f81
YQ
2415
2416 /* The offset is out of range for a compare and branch
2417 instruction. We can use the following instructions instead:
2418
2419 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
2420 INSN1 ;
2421 TAKEN:
2422 INSN2
2423 */
2424 emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
2425 dsd->insn_count = 1;
2426 dsd->dsc->cond = 1;
2427 dsd->dsc->pc_adjust = offset;
2428}
2429
2430/* Implementation of aarch64_insn_visitor method "tb". */
2431
2432static void
2433aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
2434 const unsigned rt, unsigned bit,
2435 struct aarch64_insn_data *data)
2436{
2437 struct aarch64_displaced_step_data *dsd
2438 = (struct aarch64_displaced_step_data *) data;
b6542f81
YQ
2439
2440 /* The offset is out of range for a test bit and branch
2441 instruction We can use the following instructions instead:
2442
2443 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
2444 INSN1 ;
2445 TAKEN:
2446 INSN2
2447
2448 */
2449 emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
2450 dsd->insn_count = 1;
2451 dsd->dsc->cond = 1;
2452 dsd->dsc->pc_adjust = offset;
2453}
2454
2455/* Implementation of aarch64_insn_visitor method "adr". */
2456
2457static void
2458aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
2459 const int is_adrp, struct aarch64_insn_data *data)
2460{
2461 struct aarch64_displaced_step_data *dsd
2462 = (struct aarch64_displaced_step_data *) data;
2463 /* We know exactly the address the ADR{P,} instruction will compute.
2464 We can just write it to the destination register. */
2465 CORE_ADDR address = data->insn_addr + offset;
2466
2467 if (is_adrp)
2468 {
2469 /* Clear the lower 12 bits of the offset to get the 4K page. */
2470 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2471 address & ~0xfff);
2472 }
2473 else
2474 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2475 address);
2476
2477 dsd->dsc->pc_adjust = 4;
2478 emit_nop (dsd->insn_buf);
2479 dsd->insn_count = 1;
2480}
2481
2482/* Implementation of aarch64_insn_visitor method "ldr_literal". */
2483
2484static void
2485aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
2486 const unsigned rt, const int is64,
2487 struct aarch64_insn_data *data)
2488{
2489 struct aarch64_displaced_step_data *dsd
2490 = (struct aarch64_displaced_step_data *) data;
2491 CORE_ADDR address = data->insn_addr + offset;
2492 struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
2493
2494 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
2495 address);
2496
2497 if (is_sw)
2498 dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
2499 aarch64_register (rt, 1), zero);
2500 else
2501 dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
2502 aarch64_register (rt, 1), zero);
2503
2504 dsd->dsc->pc_adjust = 4;
2505}
2506
2507/* Implementation of aarch64_insn_visitor method "others". */
2508
2509static void
2510aarch64_displaced_step_others (const uint32_t insn,
2511 struct aarch64_insn_data *data)
2512{
2513 struct aarch64_displaced_step_data *dsd
2514 = (struct aarch64_displaced_step_data *) data;
2515
e1c587c3 2516 aarch64_emit_insn (dsd->insn_buf, insn);
b6542f81
YQ
2517 dsd->insn_count = 1;
2518
2519 if ((insn & 0xfffffc1f) == 0xd65f0000)
2520 {
2521 /* RET */
2522 dsd->dsc->pc_adjust = 0;
2523 }
2524 else
2525 dsd->dsc->pc_adjust = 4;
2526}
2527
2528static const struct aarch64_insn_visitor visitor =
2529{
2530 aarch64_displaced_step_b,
2531 aarch64_displaced_step_b_cond,
2532 aarch64_displaced_step_cb,
2533 aarch64_displaced_step_tb,
2534 aarch64_displaced_step_adr,
2535 aarch64_displaced_step_ldr_literal,
2536 aarch64_displaced_step_others,
2537};
2538
2539/* Implement the "displaced_step_copy_insn" gdbarch method. */
2540
2541struct displaced_step_closure *
2542aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
2543 CORE_ADDR from, CORE_ADDR to,
2544 struct regcache *regs)
2545{
2546 struct displaced_step_closure *dsc = NULL;
2547 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2548 uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
2549 struct aarch64_displaced_step_data dsd;
c86a40c6
YQ
2550 aarch64_inst inst;
2551
2552 if (aarch64_decode_insn (insn, &inst, 1) != 0)
2553 return NULL;
b6542f81
YQ
2554
2555 /* Look for a Load Exclusive instruction which begins the sequence. */
c86a40c6 2556 if (inst.opcode->iclass == ldstexcl && bit (insn, 22))
b6542f81
YQ
2557 {
2558 /* We can't displaced step atomic sequences. */
2559 return NULL;
2560 }
2561
2562 dsc = XCNEW (struct displaced_step_closure);
2563 dsd.base.insn_addr = from;
2564 dsd.new_addr = to;
2565 dsd.regs = regs;
2566 dsd.dsc = dsc;
034f1a81 2567 dsd.insn_count = 0;
b6542f81
YQ
2568 aarch64_relocate_instruction (insn, &visitor,
2569 (struct aarch64_insn_data *) &dsd);
2570 gdb_assert (dsd.insn_count <= DISPLACED_MODIFIED_INSNS);
2571
2572 if (dsd.insn_count != 0)
2573 {
2574 int i;
2575
2576 /* Instruction can be relocated to scratch pad. Copy
2577 relocated instruction(s) there. */
2578 for (i = 0; i < dsd.insn_count; i++)
2579 {
2580 if (debug_displaced)
2581 {
2582 debug_printf ("displaced: writing insn ");
2583 debug_printf ("%.8x", dsd.insn_buf[i]);
2584 debug_printf (" at %s\n", paddress (gdbarch, to + i * 4));
2585 }
2586 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
2587 (ULONGEST) dsd.insn_buf[i]);
2588 }
2589 }
2590 else
2591 {
2592 xfree (dsc);
2593 dsc = NULL;
2594 }
2595
2596 return dsc;
2597}
2598
2599/* Implement the "displaced_step_fixup" gdbarch method. */
2600
2601void
2602aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
2603 struct displaced_step_closure *dsc,
2604 CORE_ADDR from, CORE_ADDR to,
2605 struct regcache *regs)
2606{
2607 if (dsc->cond)
2608 {
2609 ULONGEST pc;
2610
2611 regcache_cooked_read_unsigned (regs, AARCH64_PC_REGNUM, &pc);
2612 if (pc - to == 8)
2613 {
2614 /* Condition is true. */
2615 }
2616 else if (pc - to == 4)
2617 {
2618 /* Condition is false. */
2619 dsc->pc_adjust = 4;
2620 }
2621 else
2622 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
2623 }
2624
2625 if (dsc->pc_adjust != 0)
2626 {
2627 if (debug_displaced)
2628 {
2629 debug_printf ("displaced: fixup: set PC to %s:%d\n",
2630 paddress (gdbarch, from), dsc->pc_adjust);
2631 }
2632 regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
2633 from + dsc->pc_adjust);
2634 }
2635}
2636
2637/* Implement the "displaced_step_hw_singlestep" gdbarch method. */
2638
2639int
2640aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch,
2641 struct displaced_step_closure *closure)
2642{
2643 return 1;
2644}
2645
07b287a0
MS
2646/* Initialize the current architecture based on INFO. If possible,
2647 re-use an architecture from ARCHES, which is a list of
2648 architectures already created during this debugging session.
2649
2650 Called e.g. at program startup, when reading a core file, and when
2651 reading a binary file. */
2652
2653static struct gdbarch *
2654aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
2655{
2656 struct gdbarch_tdep *tdep;
2657 struct gdbarch *gdbarch;
2658 struct gdbarch_list *best_arch;
2659 struct tdesc_arch_data *tdesc_data = NULL;
2660 const struct target_desc *tdesc = info.target_desc;
2661 int i;
07b287a0
MS
2662 int valid_p = 1;
2663 const struct tdesc_feature *feature;
2664 int num_regs = 0;
2665 int num_pseudo_regs = 0;
2666
2667 /* Ensure we always have a target descriptor. */
2668 if (!tdesc_has_registers (tdesc))
2669 tdesc = tdesc_aarch64;
2670
2671 gdb_assert (tdesc);
2672
2673 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.core");
2674
2675 if (feature == NULL)
2676 return NULL;
2677
2678 tdesc_data = tdesc_data_alloc ();
2679
2680 /* Validate the descriptor provides the mandatory core R registers
2681 and allocate their numbers. */
2682 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
2683 valid_p &=
2684 tdesc_numbered_register (feature, tdesc_data, AARCH64_X0_REGNUM + i,
2685 aarch64_r_register_names[i]);
2686
2687 num_regs = AARCH64_X0_REGNUM + i;
2688
2689 /* Look for the V registers. */
2690 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
2691 if (feature)
2692 {
2693 /* Validate the descriptor provides the mandatory V registers
2694 and allocate their numbers. */
2695 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
2696 valid_p &=
2697 tdesc_numbered_register (feature, tdesc_data, AARCH64_V0_REGNUM + i,
2698 aarch64_v_register_names[i]);
2699
2700 num_regs = AARCH64_V0_REGNUM + i;
2701
2702 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
2703 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
2704 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
2705 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
2706 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
2707 }
2708
2709 if (!valid_p)
2710 {
2711 tdesc_data_cleanup (tdesc_data);
2712 return NULL;
2713 }
2714
2715 /* AArch64 code is always little-endian. */
2716 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
2717
2718 /* If there is already a candidate, use it. */
2719 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
2720 best_arch != NULL;
2721 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
2722 {
2723 /* Found a match. */
2724 break;
2725 }
2726
2727 if (best_arch != NULL)
2728 {
2729 if (tdesc_data != NULL)
2730 tdesc_data_cleanup (tdesc_data);
2731 return best_arch->gdbarch;
2732 }
2733
8d749320 2734 tdep = XCNEW (struct gdbarch_tdep);
07b287a0
MS
2735 gdbarch = gdbarch_alloc (&info, tdep);
2736
2737 /* This should be low enough for everything. */
2738 tdep->lowest_pc = 0x20;
2739 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
2740 tdep->jb_elt_size = 8;
2741
2742 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
2743 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
2744
07b287a0
MS
2745 /* Frame handling. */
2746 set_gdbarch_dummy_id (gdbarch, aarch64_dummy_id);
2747 set_gdbarch_unwind_pc (gdbarch, aarch64_unwind_pc);
2748 set_gdbarch_unwind_sp (gdbarch, aarch64_unwind_sp);
2749
2750 /* Advance PC across function entry code. */
2751 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
2752
2753 /* The stack grows downward. */
2754 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
2755
2756 /* Breakpoint manipulation. */
04180708
YQ
2757 set_gdbarch_breakpoint_kind_from_pc (gdbarch,
2758 aarch64_breakpoint::kind_from_pc);
2759 set_gdbarch_sw_breakpoint_from_kind (gdbarch,
2760 aarch64_breakpoint::bp_from_kind);
07b287a0 2761 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
9404b58f 2762 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
07b287a0
MS
2763
2764 /* Information about registers, etc. */
2765 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
2766 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
2767 set_gdbarch_num_regs (gdbarch, num_regs);
2768
2769 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
2770 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
2771 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
2772 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
2773 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
2774 set_tdesc_pseudo_register_reggroup_p (gdbarch,
2775 aarch64_pseudo_register_reggroup_p);
2776
2777 /* ABI */
2778 set_gdbarch_short_bit (gdbarch, 16);
2779 set_gdbarch_int_bit (gdbarch, 32);
2780 set_gdbarch_float_bit (gdbarch, 32);
2781 set_gdbarch_double_bit (gdbarch, 64);
2782 set_gdbarch_long_double_bit (gdbarch, 128);
2783 set_gdbarch_long_bit (gdbarch, 64);
2784 set_gdbarch_long_long_bit (gdbarch, 64);
2785 set_gdbarch_ptr_bit (gdbarch, 64);
2786 set_gdbarch_char_signed (gdbarch, 0);
2787 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
2788 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
2789 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
2790
2791 /* Internal <-> external register number maps. */
2792 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
2793
2794 /* Returning results. */
2795 set_gdbarch_return_value (gdbarch, aarch64_return_value);
2796
2797 /* Disassembly. */
2798 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
2799
2800 /* Virtual tables. */
2801 set_gdbarch_vbit_in_delta (gdbarch, 1);
2802
2803 /* Hook in the ABI-specific overrides, if they have been registered. */
2804 info.target_desc = tdesc;
2805 info.tdep_info = (void *) tdesc_data;
2806 gdbarch_init_osabi (info, gdbarch);
2807
2808 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
2809
2810 /* Add some default predicates. */
2811 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
2812 dwarf2_append_unwinders (gdbarch);
2813 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
2814
2815 frame_base_set_default (gdbarch, &aarch64_normal_base);
2816
2817 /* Now we have tuned the configuration, set a few final things,
2818 based on what the OS ABI has told us. */
2819
2820 if (tdep->jb_pc >= 0)
2821 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
2822
ea873d8e
PL
2823 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
2824
07b287a0
MS
2825 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
2826
2827 /* Add standard register aliases. */
2828 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
2829 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
2830 value_of_aarch64_user_reg,
2831 &aarch64_register_aliases[i].regnum);
2832
2833 return gdbarch;
2834}
2835
2836static void
2837aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
2838{
2839 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2840
2841 if (tdep == NULL)
2842 return;
2843
2844 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
2845 paddress (gdbarch, tdep->lowest_pc));
2846}
2847
2848/* Suppress warning from -Wmissing-prototypes. */
2849extern initialize_file_ftype _initialize_aarch64_tdep;
2850
2851void
2852_initialize_aarch64_tdep (void)
2853{
2854 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
2855 aarch64_dump_tdep);
2856
2857 initialize_tdesc_aarch64 ();
07b287a0
MS
2858
2859 /* Debug this file's internals. */
2860 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
2861Set AArch64 debugging."), _("\
2862Show AArch64 debugging."), _("\
2863When on, AArch64 specific debugging is enabled."),
2864 NULL,
2865 show_aarch64_debug,
2866 &setdebuglist, &showdebuglist);
2867}
99afc88b
OJ
2868
2869/* AArch64 process record-replay related structures, defines etc. */
2870
99afc88b
OJ
2871#define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
2872 do \
2873 { \
2874 unsigned int reg_len = LENGTH; \
2875 if (reg_len) \
2876 { \
2877 REGS = XNEWVEC (uint32_t, reg_len); \
2878 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
2879 } \
2880 } \
2881 while (0)
2882
2883#define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
2884 do \
2885 { \
2886 unsigned int mem_len = LENGTH; \
2887 if (mem_len) \
2888 { \
2889 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
2890 memcpy(&MEMS->len, &RECORD_BUF[0], \
2891 sizeof(struct aarch64_mem_r) * LENGTH); \
2892 } \
2893 } \
2894 while (0)
2895
2896/* AArch64 record/replay structures and enumerations. */
2897
2898struct aarch64_mem_r
2899{
2900 uint64_t len; /* Record length. */
2901 uint64_t addr; /* Memory address. */
2902};
2903
2904enum aarch64_record_result
2905{
2906 AARCH64_RECORD_SUCCESS,
2907 AARCH64_RECORD_FAILURE,
2908 AARCH64_RECORD_UNSUPPORTED,
2909 AARCH64_RECORD_UNKNOWN
2910};
2911
2912typedef struct insn_decode_record_t
2913{
2914 struct gdbarch *gdbarch;
2915 struct regcache *regcache;
2916 CORE_ADDR this_addr; /* Address of insn to be recorded. */
2917 uint32_t aarch64_insn; /* Insn to be recorded. */
2918 uint32_t mem_rec_count; /* Count of memory records. */
2919 uint32_t reg_rec_count; /* Count of register records. */
2920 uint32_t *aarch64_regs; /* Registers to be recorded. */
2921 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
2922} insn_decode_record;
2923
2924/* Record handler for data processing - register instructions. */
2925
2926static unsigned int
2927aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
2928{
2929 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
2930 uint32_t record_buf[4];
2931
2932 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
2933 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
2934 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
2935
2936 if (!bit (aarch64_insn_r->aarch64_insn, 28))
2937 {
2938 uint8_t setflags;
2939
2940 /* Logical (shifted register). */
2941 if (insn_bits24_27 == 0x0a)
2942 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
2943 /* Add/subtract. */
2944 else if (insn_bits24_27 == 0x0b)
2945 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
2946 else
2947 return AARCH64_RECORD_UNKNOWN;
2948
2949 record_buf[0] = reg_rd;
2950 aarch64_insn_r->reg_rec_count = 1;
2951 if (setflags)
2952 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
2953 }
2954 else
2955 {
2956 if (insn_bits24_27 == 0x0b)
2957 {
2958 /* Data-processing (3 source). */
2959 record_buf[0] = reg_rd;
2960 aarch64_insn_r->reg_rec_count = 1;
2961 }
2962 else if (insn_bits24_27 == 0x0a)
2963 {
2964 if (insn_bits21_23 == 0x00)
2965 {
2966 /* Add/subtract (with carry). */
2967 record_buf[0] = reg_rd;
2968 aarch64_insn_r->reg_rec_count = 1;
2969 if (bit (aarch64_insn_r->aarch64_insn, 29))
2970 {
2971 record_buf[1] = AARCH64_CPSR_REGNUM;
2972 aarch64_insn_r->reg_rec_count = 2;
2973 }
2974 }
2975 else if (insn_bits21_23 == 0x02)
2976 {
2977 /* Conditional compare (register) and conditional compare
2978 (immediate) instructions. */
2979 record_buf[0] = AARCH64_CPSR_REGNUM;
2980 aarch64_insn_r->reg_rec_count = 1;
2981 }
2982 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
2983 {
2984 /* CConditional select. */
2985 /* Data-processing (2 source). */
2986 /* Data-processing (1 source). */
2987 record_buf[0] = reg_rd;
2988 aarch64_insn_r->reg_rec_count = 1;
2989 }
2990 else
2991 return AARCH64_RECORD_UNKNOWN;
2992 }
2993 }
2994
2995 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
2996 record_buf);
2997 return AARCH64_RECORD_SUCCESS;
2998}
2999
3000/* Record handler for data processing - immediate instructions. */
3001
3002static unsigned int
3003aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
3004{
78cc6c2d 3005 uint8_t reg_rd, insn_bit23, insn_bits24_27, setflags;
99afc88b
OJ
3006 uint32_t record_buf[4];
3007
3008 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
99afc88b
OJ
3009 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3010 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3011
3012 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
3013 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
3014 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
3015 {
3016 record_buf[0] = reg_rd;
3017 aarch64_insn_r->reg_rec_count = 1;
3018 }
3019 else if (insn_bits24_27 == 0x01)
3020 {
3021 /* Add/Subtract (immediate). */
3022 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3023 record_buf[0] = reg_rd;
3024 aarch64_insn_r->reg_rec_count = 1;
3025 if (setflags)
3026 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3027 }
3028 else if (insn_bits24_27 == 0x02 && !insn_bit23)
3029 {
3030 /* Logical (immediate). */
3031 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
3032 record_buf[0] = reg_rd;
3033 aarch64_insn_r->reg_rec_count = 1;
3034 if (setflags)
3035 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3036 }
3037 else
3038 return AARCH64_RECORD_UNKNOWN;
3039
3040 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3041 record_buf);
3042 return AARCH64_RECORD_SUCCESS;
3043}
3044
3045/* Record handler for branch, exception generation and system instructions. */
3046
3047static unsigned int
3048aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
3049{
3050 struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
3051 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
3052 uint32_t record_buf[4];
3053
3054 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3055 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3056 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3057
3058 if (insn_bits28_31 == 0x0d)
3059 {
3060 /* Exception generation instructions. */
3061 if (insn_bits24_27 == 0x04)
3062 {
5d98d3cd
YQ
3063 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
3064 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
3065 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
99afc88b
OJ
3066 {
3067 ULONGEST svc_number;
3068
3069 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
3070 &svc_number);
3071 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
3072 svc_number);
3073 }
3074 else
3075 return AARCH64_RECORD_UNSUPPORTED;
3076 }
3077 /* System instructions. */
3078 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
3079 {
3080 uint32_t reg_rt, reg_crn;
3081
3082 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3083 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3084
3085 /* Record rt in case of sysl and mrs instructions. */
3086 if (bit (aarch64_insn_r->aarch64_insn, 21))
3087 {
3088 record_buf[0] = reg_rt;
3089 aarch64_insn_r->reg_rec_count = 1;
3090 }
3091 /* Record cpsr for hint and msr(immediate) instructions. */
3092 else if (reg_crn == 0x02 || reg_crn == 0x04)
3093 {
3094 record_buf[0] = AARCH64_CPSR_REGNUM;
3095 aarch64_insn_r->reg_rec_count = 1;
3096 }
3097 }
3098 /* Unconditional branch (register). */
3099 else if((insn_bits24_27 & 0x0e) == 0x06)
3100 {
3101 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3102 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3103 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3104 }
3105 else
3106 return AARCH64_RECORD_UNKNOWN;
3107 }
3108 /* Unconditional branch (immediate). */
3109 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3110 {
3111 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3112 if (bit (aarch64_insn_r->aarch64_insn, 31))
3113 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3114 }
3115 else
3116 /* Compare & branch (immediate), Test & branch (immediate) and
3117 Conditional branch (immediate). */
3118 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3119
3120 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3121 record_buf);
3122 return AARCH64_RECORD_SUCCESS;
3123}
3124
3125/* Record handler for advanced SIMD load and store instructions. */
3126
3127static unsigned int
3128aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3129{
3130 CORE_ADDR address;
3131 uint64_t addr_offset = 0;
3132 uint32_t record_buf[24];
3133 uint64_t record_buf_mem[24];
3134 uint32_t reg_rn, reg_rt;
3135 uint32_t reg_index = 0, mem_index = 0;
3136 uint8_t opcode_bits, size_bits;
3137
3138 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3139 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3140 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3141 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3142 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3143
3144 if (record_debug)
b277c936 3145 debug_printf ("Process record: Advanced SIMD load/store\n");
99afc88b
OJ
3146
3147 /* Load/store single structure. */
3148 if (bit (aarch64_insn_r->aarch64_insn, 24))
3149 {
3150 uint8_t sindex, scale, selem, esize, replicate = 0;
3151 scale = opcode_bits >> 2;
3152 selem = ((opcode_bits & 0x02) |
3153 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3154 switch (scale)
3155 {
3156 case 1:
3157 if (size_bits & 0x01)
3158 return AARCH64_RECORD_UNKNOWN;
3159 break;
3160 case 2:
3161 if ((size_bits >> 1) & 0x01)
3162 return AARCH64_RECORD_UNKNOWN;
3163 if (size_bits & 0x01)
3164 {
3165 if (!((opcode_bits >> 1) & 0x01))
3166 scale = 3;
3167 else
3168 return AARCH64_RECORD_UNKNOWN;
3169 }
3170 break;
3171 case 3:
3172 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3173 {
3174 scale = size_bits;
3175 replicate = 1;
3176 break;
3177 }
3178 else
3179 return AARCH64_RECORD_UNKNOWN;
3180 default:
3181 break;
3182 }
3183 esize = 8 << scale;
3184 if (replicate)
3185 for (sindex = 0; sindex < selem; sindex++)
3186 {
3187 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3188 reg_rt = (reg_rt + 1) % 32;
3189 }
3190 else
3191 {
3192 for (sindex = 0; sindex < selem; sindex++)
a2e3e93f
SM
3193 {
3194 if (bit (aarch64_insn_r->aarch64_insn, 22))
3195 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3196 else
3197 {
3198 record_buf_mem[mem_index++] = esize / 8;
3199 record_buf_mem[mem_index++] = address + addr_offset;
3200 }
3201 addr_offset = addr_offset + (esize / 8);
3202 reg_rt = (reg_rt + 1) % 32;
3203 }
99afc88b
OJ
3204 }
3205 }
3206 /* Load/store multiple structure. */
3207 else
3208 {
3209 uint8_t selem, esize, rpt, elements;
3210 uint8_t eindex, rindex;
3211
3212 esize = 8 << size_bits;
3213 if (bit (aarch64_insn_r->aarch64_insn, 30))
3214 elements = 128 / esize;
3215 else
3216 elements = 64 / esize;
3217
3218 switch (opcode_bits)
3219 {
3220 /*LD/ST4 (4 Registers). */
3221 case 0:
3222 rpt = 1;
3223 selem = 4;
3224 break;
3225 /*LD/ST1 (4 Registers). */
3226 case 2:
3227 rpt = 4;
3228 selem = 1;
3229 break;
3230 /*LD/ST3 (3 Registers). */
3231 case 4:
3232 rpt = 1;
3233 selem = 3;
3234 break;
3235 /*LD/ST1 (3 Registers). */
3236 case 6:
3237 rpt = 3;
3238 selem = 1;
3239 break;
3240 /*LD/ST1 (1 Register). */
3241 case 7:
3242 rpt = 1;
3243 selem = 1;
3244 break;
3245 /*LD/ST2 (2 Registers). */
3246 case 8:
3247 rpt = 1;
3248 selem = 2;
3249 break;
3250 /*LD/ST1 (2 Registers). */
3251 case 10:
3252 rpt = 2;
3253 selem = 1;
3254 break;
3255 default:
3256 return AARCH64_RECORD_UNSUPPORTED;
3257 break;
3258 }
3259 for (rindex = 0; rindex < rpt; rindex++)
3260 for (eindex = 0; eindex < elements; eindex++)
3261 {
3262 uint8_t reg_tt, sindex;
3263 reg_tt = (reg_rt + rindex) % 32;
3264 for (sindex = 0; sindex < selem; sindex++)
3265 {
3266 if (bit (aarch64_insn_r->aarch64_insn, 22))
3267 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
3268 else
3269 {
3270 record_buf_mem[mem_index++] = esize / 8;
3271 record_buf_mem[mem_index++] = address + addr_offset;
3272 }
3273 addr_offset = addr_offset + (esize / 8);
3274 reg_tt = (reg_tt + 1) % 32;
3275 }
3276 }
3277 }
3278
3279 if (bit (aarch64_insn_r->aarch64_insn, 23))
3280 record_buf[reg_index++] = reg_rn;
3281
3282 aarch64_insn_r->reg_rec_count = reg_index;
3283 aarch64_insn_r->mem_rec_count = mem_index / 2;
3284 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3285 record_buf_mem);
3286 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3287 record_buf);
3288 return AARCH64_RECORD_SUCCESS;
3289}
3290
3291/* Record handler for load and store instructions. */
3292
3293static unsigned int
3294aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
3295{
3296 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
3297 uint8_t insn_bit23, insn_bit21;
3298 uint8_t opc, size_bits, ld_flag, vector_flag;
3299 uint32_t reg_rn, reg_rt, reg_rt2;
3300 uint64_t datasize, offset;
3301 uint32_t record_buf[8];
3302 uint64_t record_buf_mem[8];
3303 CORE_ADDR address;
3304
3305 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3306 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3307 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
3308 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3309 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3310 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
3311 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
3312 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3313 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3314 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
3315 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
3316
3317 /* Load/store exclusive. */
3318 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
3319 {
3320 if (record_debug)
b277c936 3321 debug_printf ("Process record: load/store exclusive\n");
99afc88b
OJ
3322
3323 if (ld_flag)
3324 {
3325 record_buf[0] = reg_rt;
3326 aarch64_insn_r->reg_rec_count = 1;
3327 if (insn_bit21)
3328 {
3329 record_buf[1] = reg_rt2;
3330 aarch64_insn_r->reg_rec_count = 2;
3331 }
3332 }
3333 else
3334 {
3335 if (insn_bit21)
3336 datasize = (8 << size_bits) * 2;
3337 else
3338 datasize = (8 << size_bits);
3339 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3340 &address);
3341 record_buf_mem[0] = datasize / 8;
3342 record_buf_mem[1] = address;
3343 aarch64_insn_r->mem_rec_count = 1;
3344 if (!insn_bit23)
3345 {
3346 /* Save register rs. */
3347 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
3348 aarch64_insn_r->reg_rec_count = 1;
3349 }
3350 }
3351 }
3352 /* Load register (literal) instructions decoding. */
3353 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
3354 {
3355 if (record_debug)
b277c936 3356 debug_printf ("Process record: load register (literal)\n");
99afc88b
OJ
3357 if (vector_flag)
3358 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3359 else
3360 record_buf[0] = reg_rt;
3361 aarch64_insn_r->reg_rec_count = 1;
3362 }
3363 /* All types of load/store pair instructions decoding. */
3364 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
3365 {
3366 if (record_debug)
b277c936 3367 debug_printf ("Process record: load/store pair\n");
99afc88b
OJ
3368
3369 if (ld_flag)
3370 {
3371 if (vector_flag)
3372 {
3373 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3374 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
3375 }
3376 else
3377 {
3378 record_buf[0] = reg_rt;
3379 record_buf[1] = reg_rt2;
3380 }
3381 aarch64_insn_r->reg_rec_count = 2;
3382 }
3383 else
3384 {
3385 uint16_t imm7_off;
3386 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
3387 if (!vector_flag)
3388 size_bits = size_bits >> 1;
3389 datasize = 8 << (2 + size_bits);
3390 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
3391 offset = offset << (2 + size_bits);
3392 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3393 &address);
3394 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
3395 {
3396 if (imm7_off & 0x40)
3397 address = address - offset;
3398 else
3399 address = address + offset;
3400 }
3401
3402 record_buf_mem[0] = datasize / 8;
3403 record_buf_mem[1] = address;
3404 record_buf_mem[2] = datasize / 8;
3405 record_buf_mem[3] = address + (datasize / 8);
3406 aarch64_insn_r->mem_rec_count = 2;
3407 }
3408 if (bit (aarch64_insn_r->aarch64_insn, 23))
3409 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3410 }
3411 /* Load/store register (unsigned immediate) instructions. */
3412 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
3413 {
3414 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3415 if (!(opc >> 1))
3416 if (opc & 0x01)
3417 ld_flag = 0x01;
3418 else
3419 ld_flag = 0x0;
3420 else
3421 if (size_bits != 0x03)
3422 ld_flag = 0x01;
3423 else
3424 return AARCH64_RECORD_UNKNOWN;
3425
3426 if (record_debug)
3427 {
b277c936
PL
3428 debug_printf ("Process record: load/store (unsigned immediate):"
3429 " size %x V %d opc %x\n", size_bits, vector_flag,
3430 opc);
99afc88b
OJ
3431 }
3432
3433 if (!ld_flag)
3434 {
3435 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
3436 datasize = 8 << size_bits;
3437 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3438 &address);
3439 offset = offset << size_bits;
3440 address = address + offset;
3441
3442 record_buf_mem[0] = datasize >> 3;
3443 record_buf_mem[1] = address;
3444 aarch64_insn_r->mem_rec_count = 1;
3445 }
3446 else
3447 {
3448 if (vector_flag)
3449 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3450 else
3451 record_buf[0] = reg_rt;
3452 aarch64_insn_r->reg_rec_count = 1;
3453 }
3454 }
3455 /* Load/store register (register offset) instructions. */
5d98d3cd
YQ
3456 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3457 && insn_bits10_11 == 0x02 && insn_bit21)
99afc88b
OJ
3458 {
3459 if (record_debug)
b277c936 3460 debug_printf ("Process record: load/store (register offset)\n");
99afc88b
OJ
3461 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3462 if (!(opc >> 1))
3463 if (opc & 0x01)
3464 ld_flag = 0x01;
3465 else
3466 ld_flag = 0x0;
3467 else
3468 if (size_bits != 0x03)
3469 ld_flag = 0x01;
3470 else
3471 return AARCH64_RECORD_UNKNOWN;
3472
3473 if (!ld_flag)
3474 {
d9436c7c
PA
3475 ULONGEST reg_rm_val;
3476
99afc88b
OJ
3477 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
3478 bits (aarch64_insn_r->aarch64_insn, 16, 20), &reg_rm_val);
3479 if (bit (aarch64_insn_r->aarch64_insn, 12))
3480 offset = reg_rm_val << size_bits;
3481 else
3482 offset = reg_rm_val;
3483 datasize = 8 << size_bits;
3484 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3485 &address);
3486 address = address + offset;
3487 record_buf_mem[0] = datasize >> 3;
3488 record_buf_mem[1] = address;
3489 aarch64_insn_r->mem_rec_count = 1;
3490 }
3491 else
3492 {
3493 if (vector_flag)
3494 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3495 else
3496 record_buf[0] = reg_rt;
3497 aarch64_insn_r->reg_rec_count = 1;
3498 }
3499 }
3500 /* Load/store register (immediate and unprivileged) instructions. */
5d98d3cd
YQ
3501 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3502 && !insn_bit21)
99afc88b
OJ
3503 {
3504 if (record_debug)
3505 {
b277c936
PL
3506 debug_printf ("Process record: load/store "
3507 "(immediate and unprivileged)\n");
99afc88b
OJ
3508 }
3509 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3510 if (!(opc >> 1))
3511 if (opc & 0x01)
3512 ld_flag = 0x01;
3513 else
3514 ld_flag = 0x0;
3515 else
3516 if (size_bits != 0x03)
3517 ld_flag = 0x01;
3518 else
3519 return AARCH64_RECORD_UNKNOWN;
3520
3521 if (!ld_flag)
3522 {
3523 uint16_t imm9_off;
3524 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
3525 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
3526 datasize = 8 << size_bits;
3527 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3528 &address);
3529 if (insn_bits10_11 != 0x01)
3530 {
3531 if (imm9_off & 0x0100)
3532 address = address - offset;
3533 else
3534 address = address + offset;
3535 }
3536 record_buf_mem[0] = datasize >> 3;
3537 record_buf_mem[1] = address;
3538 aarch64_insn_r->mem_rec_count = 1;
3539 }
3540 else
3541 {
3542 if (vector_flag)
3543 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3544 else
3545 record_buf[0] = reg_rt;
3546 aarch64_insn_r->reg_rec_count = 1;
3547 }
3548 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
3549 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3550 }
3551 /* Advanced SIMD load/store instructions. */
3552 else
3553 return aarch64_record_asimd_load_store (aarch64_insn_r);
3554
3555 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3556 record_buf_mem);
3557 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3558 record_buf);
3559 return AARCH64_RECORD_SUCCESS;
3560}
3561
3562/* Record handler for data processing SIMD and floating point instructions. */
3563
3564static unsigned int
3565aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
3566{
3567 uint8_t insn_bit21, opcode, rmode, reg_rd;
3568 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
3569 uint8_t insn_bits11_14;
3570 uint32_t record_buf[2];
3571
3572 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3573 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3574 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3575 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3576 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
3577 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
3578 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
3579 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3580 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3581
3582 if (record_debug)
b277c936 3583 debug_printf ("Process record: data processing SIMD/FP: ");
99afc88b
OJ
3584
3585 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
3586 {
3587 /* Floating point - fixed point conversion instructions. */
3588 if (!insn_bit21)
3589 {
3590 if (record_debug)
b277c936 3591 debug_printf ("FP - fixed point conversion");
99afc88b
OJ
3592
3593 if ((opcode >> 1) == 0x0 && rmode == 0x03)
3594 record_buf[0] = reg_rd;
3595 else
3596 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3597 }
3598 /* Floating point - conditional compare instructions. */
3599 else if (insn_bits10_11 == 0x01)
3600 {
3601 if (record_debug)
b277c936 3602 debug_printf ("FP - conditional compare");
99afc88b
OJ
3603
3604 record_buf[0] = AARCH64_CPSR_REGNUM;
3605 }
3606 /* Floating point - data processing (2-source) and
3607 conditional select instructions. */
3608 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
3609 {
3610 if (record_debug)
b277c936 3611 debug_printf ("FP - DP (2-source)");
99afc88b
OJ
3612
3613 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3614 }
3615 else if (insn_bits10_11 == 0x00)
3616 {
3617 /* Floating point - immediate instructions. */
3618 if ((insn_bits12_15 & 0x01) == 0x01
3619 || (insn_bits12_15 & 0x07) == 0x04)
3620 {
3621 if (record_debug)
b277c936 3622 debug_printf ("FP - immediate");
99afc88b
OJ
3623 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3624 }
3625 /* Floating point - compare instructions. */
3626 else if ((insn_bits12_15 & 0x03) == 0x02)
3627 {
3628 if (record_debug)
b277c936 3629 debug_printf ("FP - immediate");
99afc88b
OJ
3630 record_buf[0] = AARCH64_CPSR_REGNUM;
3631 }
3632 /* Floating point - integer conversions instructions. */
f62fce35 3633 else if (insn_bits12_15 == 0x00)
99afc88b
OJ
3634 {
3635 /* Convert float to integer instruction. */
3636 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
3637 {
3638 if (record_debug)
b277c936 3639 debug_printf ("float to int conversion");
99afc88b
OJ
3640
3641 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3642 }
3643 /* Convert integer to float instruction. */
3644 else if ((opcode >> 1) == 0x01 && !rmode)
3645 {
3646 if (record_debug)
b277c936 3647 debug_printf ("int to float conversion");
99afc88b
OJ
3648
3649 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3650 }
3651 /* Move float to integer instruction. */
3652 else if ((opcode >> 1) == 0x03)
3653 {
3654 if (record_debug)
b277c936 3655 debug_printf ("move float to int");
99afc88b
OJ
3656
3657 if (!(opcode & 0x01))
3658 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3659 else
3660 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3661 }
f62fce35
YQ
3662 else
3663 return AARCH64_RECORD_UNKNOWN;
99afc88b 3664 }
f62fce35
YQ
3665 else
3666 return AARCH64_RECORD_UNKNOWN;
99afc88b 3667 }
f62fce35
YQ
3668 else
3669 return AARCH64_RECORD_UNKNOWN;
99afc88b
OJ
3670 }
3671 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
3672 {
3673 if (record_debug)
b277c936 3674 debug_printf ("SIMD copy");
99afc88b
OJ
3675
3676 /* Advanced SIMD copy instructions. */
3677 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
3678 && !bit (aarch64_insn_r->aarch64_insn, 15)
3679 && bit (aarch64_insn_r->aarch64_insn, 10))
3680 {
3681 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
3682 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3683 else
3684 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3685 }
3686 else
3687 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3688 }
3689 /* All remaining floating point or advanced SIMD instructions. */
3690 else
3691 {
3692 if (record_debug)
b277c936 3693 debug_printf ("all remain");
99afc88b
OJ
3694
3695 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3696 }
3697
3698 if (record_debug)
b277c936 3699 debug_printf ("\n");
99afc88b
OJ
3700
3701 aarch64_insn_r->reg_rec_count++;
3702 gdb_assert (aarch64_insn_r->reg_rec_count == 1);
3703 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3704 record_buf);
3705 return AARCH64_RECORD_SUCCESS;
3706}
3707
3708/* Decodes insns type and invokes its record handler. */
3709
3710static unsigned int
3711aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
3712{
3713 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
3714
3715 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
3716 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
3717 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
3718 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
3719
3720 /* Data processing - immediate instructions. */
3721 if (!ins_bit26 && !ins_bit27 && ins_bit28)
3722 return aarch64_record_data_proc_imm (aarch64_insn_r);
3723
3724 /* Branch, exception generation and system instructions. */
3725 if (ins_bit26 && !ins_bit27 && ins_bit28)
3726 return aarch64_record_branch_except_sys (aarch64_insn_r);
3727
3728 /* Load and store instructions. */
3729 if (!ins_bit25 && ins_bit27)
3730 return aarch64_record_load_store (aarch64_insn_r);
3731
3732 /* Data processing - register instructions. */
3733 if (ins_bit25 && !ins_bit26 && ins_bit27)
3734 return aarch64_record_data_proc_reg (aarch64_insn_r);
3735
3736 /* Data processing - SIMD and floating point instructions. */
3737 if (ins_bit25 && ins_bit26 && ins_bit27)
3738 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
3739
3740 return AARCH64_RECORD_UNSUPPORTED;
3741}
3742
3743/* Cleans up local record registers and memory allocations. */
3744
3745static void
3746deallocate_reg_mem (insn_decode_record *record)
3747{
3748 xfree (record->aarch64_regs);
3749 xfree (record->aarch64_mems);
3750}
3751
3752/* Parse the current instruction and record the values of the registers and
3753 memory that will be changed in current instruction to record_arch_list
3754 return -1 if something is wrong. */
3755
3756int
3757aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
3758 CORE_ADDR insn_addr)
3759{
3760 uint32_t rec_no = 0;
3761 uint8_t insn_size = 4;
3762 uint32_t ret = 0;
99afc88b
OJ
3763 gdb_byte buf[insn_size];
3764 insn_decode_record aarch64_record;
3765
3766 memset (&buf[0], 0, insn_size);
3767 memset (&aarch64_record, 0, sizeof (insn_decode_record));
3768 target_read_memory (insn_addr, &buf[0], insn_size);
3769 aarch64_record.aarch64_insn
3770 = (uint32_t) extract_unsigned_integer (&buf[0],
3771 insn_size,
3772 gdbarch_byte_order (gdbarch));
3773 aarch64_record.regcache = regcache;
3774 aarch64_record.this_addr = insn_addr;
3775 aarch64_record.gdbarch = gdbarch;
3776
3777 ret = aarch64_record_decode_insn_handler (&aarch64_record);
3778 if (ret == AARCH64_RECORD_UNSUPPORTED)
3779 {
3780 printf_unfiltered (_("Process record does not support instruction "
3781 "0x%0x at address %s.\n"),
3782 aarch64_record.aarch64_insn,
3783 paddress (gdbarch, insn_addr));
3784 ret = -1;
3785 }
3786
3787 if (0 == ret)
3788 {
3789 /* Record registers. */
3790 record_full_arch_list_add_reg (aarch64_record.regcache,
3791 AARCH64_PC_REGNUM);
3792 /* Always record register CPSR. */
3793 record_full_arch_list_add_reg (aarch64_record.regcache,
3794 AARCH64_CPSR_REGNUM);
3795 if (aarch64_record.aarch64_regs)
3796 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
3797 if (record_full_arch_list_add_reg (aarch64_record.regcache,
3798 aarch64_record.aarch64_regs[rec_no]))
3799 ret = -1;
3800
3801 /* Record memories. */
3802 if (aarch64_record.aarch64_mems)
3803 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
3804 if (record_full_arch_list_add_mem
3805 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
3806 aarch64_record.aarch64_mems[rec_no].len))
3807 ret = -1;
3808
3809 if (record_full_arch_list_add_end ())
3810 ret = -1;
3811 }
3812
3813 deallocate_reg_mem (&aarch64_record);
3814 return ret;
3815}